2 * Server-side file descriptor management
4 * Copyright (C) 2000, 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36 #ifdef HAVE_SYS_POLL_H
44 #include <sys/types.h>
57 #if defined(HAVE_SYS_EPOLL_H) && defined(HAVE_EPOLL_CREATE)
58 # include <sys/epoll.h>
60 #elif defined(linux) && defined(__i386__) && defined(HAVE_STDINT_H)
62 # define EPOLLIN POLLIN
63 # define EPOLLOUT POLLOUT
64 # define EPOLLERR POLLERR
65 # define EPOLLHUP POLLHUP
66 # define EPOLL_CTL_ADD 1
67 # define EPOLL_CTL_DEL 2
68 # define EPOLL_CTL_MOD 3
70 typedef union epoll_data
84 #define SYSCALL_RET(ret) do { \
85 if (ret < 0) { errno = -ret; ret = -1; } \
89 static inline int epoll_create( int size )
92 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
93 : "=a" (ret) : "0" (254 /*NR_epoll_create*/), "r" (size) );
97 static inline int epoll_ctl( int epfd, int op, int fd, const struct epoll_event *event )
100 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
102 : "0" (255 /*NR_epoll_ctl*/), "r" (epfd), "c" (op), "d" (fd), "S" (event), "m" (*event) );
106 static inline int epoll_wait( int epfd, struct epoll_event *events, int maxevents, int timeout )
109 __asm__( "pushl %%ebx; movl %2,%%ebx; int $0x80; popl %%ebx"
111 : "0" (256 /*NR_epoll_wait*/), "r" (epfd), "c" (events), "d" (maxevents), "S" (timeout)
117 #endif /* linux && __i386__ && HAVE_STDINT_H */
120 /* Because of the stupid Posix locking semantics, we need to keep
121 * track of all file descriptors referencing a given file, and not
122 * close a single one until all the locks are gone (sigh).
125 /* file descriptor object */
127 /* closed_fd is used to keep track of the unix fd belonging to a closed fd object */
130 struct list entry; /* entry in inode closed list */
131 int fd; /* the unix file descriptor */
132 char unlink[1]; /* name to unlink on close (if any) */
137 struct object obj; /* object header */
138 const struct fd_ops *fd_ops; /* file descriptor operations */
139 struct inode *inode; /* inode that this fd belongs to */
140 struct list inode_entry; /* entry in inode fd list */
141 struct closed_fd *closed; /* structure to store the unix fd at destroy time */
142 struct object *user; /* object using this file descriptor */
143 struct list locks; /* list of locks on this fd */
144 unsigned int access; /* file access (GENERIC_READ/WRITE) */
145 unsigned int sharing; /* file sharing mode */
146 int unix_fd; /* unix file descriptor */
147 int fs_locks; /* can we use filesystem locks for this fd? */
148 int poll_index; /* index of fd in poll array */
149 struct list read_q; /* async readers of this fd */
150 struct list write_q; /* async writers of this fd */
153 static void fd_dump( struct object *obj, int verbose );
154 static void fd_destroy( struct object *obj );
156 static const struct object_ops fd_ops =
158 sizeof(struct fd), /* size */
160 no_add_queue, /* add_queue */
161 NULL, /* remove_queue */
163 NULL, /* satisfied */
164 no_signal, /* signal */
165 no_get_fd, /* get_fd */
166 fd_destroy /* destroy */
173 struct object obj; /* object header */
174 struct list entry; /* inode hash list entry */
175 unsigned int hash; /* hashing code */
176 dev_t dev; /* device number */
177 ino_t ino; /* inode number */
178 struct list open; /* list of open file descriptors */
179 struct list locks; /* list of file locks */
180 struct list closed; /* list of file descriptors to close at destroy time */
183 static void inode_dump( struct object *obj, int verbose );
184 static void inode_destroy( struct object *obj );
186 static const struct object_ops inode_ops =
188 sizeof(struct inode), /* size */
189 inode_dump, /* dump */
190 no_add_queue, /* add_queue */
191 NULL, /* remove_queue */
193 NULL, /* satisfied */
194 no_signal, /* signal */
195 no_get_fd, /* get_fd */
196 inode_destroy /* destroy */
199 /* file lock object */
203 struct object obj; /* object header */
204 struct fd *fd; /* fd owning this lock */
205 struct list fd_entry; /* entry in list of locks on a given fd */
206 struct list inode_entry; /* entry in inode list of locks */
207 int shared; /* shared lock? */
208 file_pos_t start; /* locked region is interval [start;end) */
210 struct process *process; /* process owning this lock */
211 struct list proc_entry; /* entry in list of locks owned by the process */
214 static void file_lock_dump( struct object *obj, int verbose );
215 static int file_lock_signaled( struct object *obj, struct thread *thread );
217 static const struct object_ops file_lock_ops =
219 sizeof(struct file_lock), /* size */
220 file_lock_dump, /* dump */
221 add_queue, /* add_queue */
222 remove_queue, /* remove_queue */
223 file_lock_signaled, /* signaled */
224 no_satisfied, /* satisfied */
225 no_signal, /* signal */
226 no_get_fd, /* get_fd */
227 no_destroy /* destroy */
231 #define OFF_T_MAX (~((file_pos_t)1 << (8*sizeof(off_t)-1)))
232 #define FILE_POS_T_MAX (~(file_pos_t)0)
234 static file_pos_t max_unix_offset = OFF_T_MAX;
236 #define DUMP_LONG_LONG(val) do { \
237 if (sizeof(val) > sizeof(unsigned long) && (val) > ~0UL) \
238 fprintf( stderr, "%lx%08lx", (unsigned long)((val) >> 32), (unsigned long)(val) ); \
240 fprintf( stderr, "%lx", (unsigned long)(val) ); \
245 /****************************************************************/
246 /* timeouts support */
250 struct list entry; /* entry in sorted timeout list */
251 struct timeval when; /* timeout expiry (absolute time) */
252 timeout_callback callback; /* callback function */
253 void *private; /* callback private data */
256 static struct list timeout_list = LIST_INIT(timeout_list); /* sorted timeouts list */
258 /* add a timeout user */
259 struct timeout_user *add_timeout_user( const struct timeval *when, timeout_callback func,
262 struct timeout_user *user;
265 if (!(user = mem_alloc( sizeof(*user) ))) return NULL;
267 user->callback = func;
268 user->private = private;
270 /* Now insert it in the linked list */
272 LIST_FOR_EACH( ptr, &timeout_list )
274 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
275 if (!time_before( &timeout->when, when )) break;
277 list_add_before( ptr, &user->entry );
281 /* remove a timeout user */
282 void remove_timeout_user( struct timeout_user *user )
284 list_remove( &user->entry );
288 /* add a timeout in milliseconds to an absolute time */
289 void add_timeout( struct timeval *when, int timeout )
293 long sec = timeout / 1000;
294 if ((when->tv_usec += (timeout - 1000*sec) * 1000) >= 1000000)
296 when->tv_usec -= 1000000;
304 /****************************************************************/
307 static struct fd **poll_users; /* users array */
308 static struct pollfd *pollfd; /* poll fd array */
309 static int nb_users; /* count of array entries actually in use */
310 static int active_users; /* current number of active users */
311 static int allocated_users; /* count of allocated entries in the array */
312 static struct fd **freelist; /* list of free entries in the array */
317 static struct epoll_event *epoll_events;
319 /* set the events that epoll waits for on this fd; helper for set_fd_events */
320 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
322 struct epoll_event ev;
325 if (epoll_fd == -1) return;
327 if (events == -1) /* stop waiting on this fd completely */
329 if (pollfd[user].fd == -1) return; /* already removed */
332 else if (pollfd[user].fd == -1)
334 if (pollfd[user].events) return; /* stopped waiting on it, don't restart */
339 if (pollfd[user].events == events) return; /* nothing to do */
346 if (epoll_ctl( epoll_fd, ctl, fd->unix_fd, &ev ) == -1)
348 if (errno == ENOMEM) /* not enough memory, give up on epoll */
353 else perror( "epoll_ctl" ); /* should not happen */
357 #else /* USE_EPOLL */
359 static inline void set_fd_epoll_events( struct fd *fd, int user, int events )
363 #endif /* USE_EPOLL */
366 /* add a user in the poll array and return its index, or -1 on failure */
367 static int add_poll_user( struct fd *fd )
372 ret = freelist - poll_users;
373 freelist = (struct fd **)poll_users[ret];
377 if (nb_users == allocated_users)
379 struct fd **newusers;
380 struct pollfd *newpoll;
381 int new_count = allocated_users ? (allocated_users + allocated_users / 2) : 16;
382 if (!(newusers = realloc( poll_users, new_count * sizeof(*poll_users) ))) return -1;
383 if (!(newpoll = realloc( pollfd, new_count * sizeof(*pollfd) )))
386 poll_users = newusers;
391 poll_users = newusers;
394 if (!allocated_users) epoll_fd = epoll_create( new_count );
397 struct epoll_event *new_events;
398 if (!(new_events = realloc( epoll_events, new_count * sizeof(*epoll_events) )))
400 epoll_events = new_events;
403 allocated_users = new_count;
408 pollfd[ret].events = 0;
409 pollfd[ret].revents = 0;
410 poll_users[ret] = fd;
415 /* remove a user from the poll list */
416 static void remove_poll_user( struct fd *fd, int user )
419 assert( poll_users[user] == fd );
422 if (epoll_fd != -1 && pollfd[user].fd != -1)
424 struct epoll_event dummy;
425 epoll_ctl( epoll_fd, EPOLL_CTL_DEL, fd->unix_fd, &dummy );
428 pollfd[user].fd = -1;
429 pollfd[user].events = 0;
430 pollfd[user].revents = 0;
431 poll_users[user] = (struct fd *)freelist;
432 freelist = &poll_users[user];
436 /* process pending timeouts and return the time until the next timeout, in milliseconds */
437 static int get_next_timeout(void)
439 if (!list_empty( &timeout_list ))
441 struct list expired_list, *ptr;
444 gettimeofday( &now, NULL );
446 /* first remove all expired timers from the list */
448 list_init( &expired_list );
449 while ((ptr = list_head( &timeout_list )) != NULL)
451 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
453 if (!time_before( &now, &timeout->when ))
455 list_remove( &timeout->entry );
456 list_add_tail( &expired_list, &timeout->entry );
461 /* now call the callback for all the removed timers */
463 while ((ptr = list_head( &expired_list )) != NULL)
465 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
466 list_remove( &timeout->entry );
467 timeout->callback( timeout->private );
471 if ((ptr = list_head( &timeout_list )) != NULL)
473 struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry );
474 int diff = (timeout->when.tv_sec - now.tv_sec) * 1000
475 + (timeout->when.tv_usec - now.tv_usec) / 1000;
476 if (diff < 0) diff = 0;
480 return -1; /* no pending timeouts */
483 /* server main poll() loop */
489 assert( POLLIN == EPOLLIN );
490 assert( POLLOUT == EPOLLOUT );
491 assert( POLLERR == EPOLLERR );
492 assert( POLLHUP == EPOLLHUP );
498 timeout = get_next_timeout();
500 if (!active_users) break; /* last user removed by a timeout */
501 if (epoll_fd == -1) break; /* an error occurred with epoll */
503 ret = epoll_wait( epoll_fd, epoll_events, allocated_users, timeout );
505 /* put the events into the pollfd array first, like poll does */
506 for (i = 0; i < ret; i++)
508 int user = epoll_events[i].data.u32;
509 pollfd[user].revents = epoll_events[i].events;
512 /* read events from the pollfd array, as set_fd_events may modify them */
513 for (i = 0; i < ret; i++)
515 int user = epoll_events[i].data.u32;
516 if (pollfd[user].revents) fd_poll_event( poll_users[user], pollfd[user].revents );
520 /* fall through to normal poll loop */
521 #endif /* USE_EPOLL */
525 timeout = get_next_timeout();
527 if (!active_users) break; /* last user removed by a timeout */
529 ret = poll( pollfd, nb_users, timeout );
532 for (i = 0; i < nb_users; i++)
534 if (pollfd[i].revents)
536 fd_poll_event( poll_users[i], pollfd[i].revents );
545 /****************************************************************/
546 /* inode functions */
550 static struct list inode_hash[HASH_SIZE];
552 /* close all pending file descriptors in the closed list */
553 static void inode_close_pending( struct inode *inode )
555 struct list *ptr = list_head( &inode->closed );
559 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
560 struct list *next = list_next( &inode->closed, ptr );
567 if (!fd->unlink) /* get rid of it unless there's an unlink pending on that file */
577 static void inode_dump( struct object *obj, int verbose )
579 struct inode *inode = (struct inode *)obj;
580 fprintf( stderr, "Inode dev=" );
581 DUMP_LONG_LONG( inode->dev );
582 fprintf( stderr, " ino=" );
583 DUMP_LONG_LONG( inode->ino );
584 fprintf( stderr, "\n" );
587 static void inode_destroy( struct object *obj )
589 struct inode *inode = (struct inode *)obj;
592 assert( list_empty(&inode->open) );
593 assert( list_empty(&inode->locks) );
595 list_remove( &inode->entry );
597 while ((ptr = list_head( &inode->closed )))
599 struct closed_fd *fd = LIST_ENTRY( ptr, struct closed_fd, entry );
601 if (fd->fd != -1) close( fd->fd );
604 /* make sure it is still the same file */
606 if (!stat( fd->unlink, &st ) && st.st_dev == inode->dev && st.st_ino == inode->ino)
608 if (S_ISDIR(st.st_mode)) rmdir( fd->unlink );
609 else unlink( fd->unlink );
616 /* retrieve the inode object for a given fd, creating it if needed */
617 static struct inode *get_inode( dev_t dev, ino_t ino )
621 unsigned int hash = (dev ^ ino) % HASH_SIZE;
623 if (inode_hash[hash].next)
625 LIST_FOR_EACH( ptr, &inode_hash[hash] )
627 inode = LIST_ENTRY( ptr, struct inode, entry );
628 if (inode->dev == dev && inode->ino == ino)
629 return (struct inode *)grab_object( inode );
632 else list_init( &inode_hash[hash] );
634 /* not found, create it */
635 if ((inode = alloc_object( &inode_ops )))
640 list_init( &inode->open );
641 list_init( &inode->locks );
642 list_init( &inode->closed );
643 list_add_head( &inode_hash[hash], &inode->entry );
648 /* add fd to the indoe list of file descriptors to close */
649 static void inode_add_closed_fd( struct inode *inode, struct closed_fd *fd )
651 if (!list_empty( &inode->locks ))
653 list_add_head( &inode->closed, &fd->entry );
655 else if (fd->unlink[0]) /* close the fd but keep the structure around for unlink */
659 list_add_head( &inode->closed, &fd->entry );
661 else /* no locks on this inode and no unlink, get rid of the fd */
669 /****************************************************************/
670 /* file lock functions */
672 static void file_lock_dump( struct object *obj, int verbose )
674 struct file_lock *lock = (struct file_lock *)obj;
675 fprintf( stderr, "Lock %s fd=%p proc=%p start=",
676 lock->shared ? "shared" : "excl", lock->fd, lock->process );
677 DUMP_LONG_LONG( lock->start );
678 fprintf( stderr, " end=" );
679 DUMP_LONG_LONG( lock->end );
680 fprintf( stderr, "\n" );
683 static int file_lock_signaled( struct object *obj, struct thread *thread )
685 struct file_lock *lock = (struct file_lock *)obj;
686 /* lock is signaled if it has lost its owner */
687 return !lock->process;
690 /* set (or remove) a Unix lock if possible for the given range */
691 static int set_unix_lock( struct fd *fd, file_pos_t start, file_pos_t end, int type )
695 if (!fd->fs_locks) return 1; /* no fs locks possible for this fd */
698 if (start == end) return 1; /* can't set zero-byte lock */
699 if (start > max_unix_offset) return 1; /* ignore it */
701 fl.l_whence = SEEK_SET;
703 if (!end || end > max_unix_offset) fl.l_len = 0;
704 else fl.l_len = end - start;
705 if (fcntl( fd->unix_fd, F_SETLK, &fl ) != -1) return 1;
710 /* check whether locks work at all on this file system */
711 if (fcntl( fd->unix_fd, F_GETLK, &fl ) != -1)
713 set_error( STATUS_FILE_LOCK_CONFLICT );
719 /* no locking on this fs, just ignore it */
723 set_error( STATUS_FILE_LOCK_CONFLICT );
726 /* this can happen if we try to set a write lock on a read-only file */
727 /* we just ignore that error */
728 if (fl.l_type == F_WRLCK) return 1;
729 set_error( STATUS_ACCESS_DENIED );
735 /* this can happen if off_t is 64-bit but the kernel only supports 32-bit */
736 /* in that case we shrink the limit and retry */
737 if (max_unix_offset > INT_MAX)
739 max_unix_offset = INT_MAX;
750 /* check if interval [start;end) overlaps the lock */
751 inline static int lock_overlaps( struct file_lock *lock, file_pos_t start, file_pos_t end )
753 if (lock->end && start >= lock->end) return 0;
754 if (end && lock->start >= end) return 0;
758 /* remove Unix locks for all bytes in the specified area that are no longer locked */
759 static void remove_unix_locks( struct fd *fd, file_pos_t start, file_pos_t end )
767 } *first, *cur, *next, *buffer;
772 if (!fd->inode) return;
773 if (!fd->fs_locks) return;
774 if (start == end || start > max_unix_offset) return;
775 if (!end || end > max_unix_offset) end = max_unix_offset + 1;
777 /* count the number of locks overlapping the specified area */
779 LIST_FOR_EACH( ptr, &fd->inode->locks )
781 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
782 if (lock->start == lock->end) continue;
783 if (lock_overlaps( lock, start, end )) count++;
786 if (!count) /* no locks at all, we can unlock everything */
788 set_unix_lock( fd, start, end, F_UNLCK );
792 /* allocate space for the list of holes */
793 /* max. number of holes is number of locks + 1 */
795 if (!(buffer = malloc( sizeof(*buffer) * (count+1) ))) return;
799 first->start = start;
803 /* build a sorted list of unlocked holes in the specified area */
805 LIST_FOR_EACH( ptr, &fd->inode->locks )
807 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
808 if (lock->start == lock->end) continue;
809 if (!lock_overlaps( lock, start, end )) continue;
811 /* go through all the holes touched by this lock */
812 for (cur = first; cur; cur = cur->next)
814 if (cur->end <= lock->start) continue; /* hole is before start of lock */
815 if (lock->end && cur->start >= lock->end) break; /* hole is after end of lock */
817 /* now we know that lock is overlapping hole */
819 if (cur->start >= lock->start) /* lock starts before hole, shrink from start */
821 cur->start = lock->end;
822 if (cur->start && cur->start < cur->end) break; /* done with this lock */
823 /* now hole is empty, remove it */
824 if (cur->next) cur->next->prev = cur->prev;
825 if (cur->prev) cur->prev->next = cur->next;
826 else if (!(first = cur->next)) goto done; /* no more holes at all */
828 else if (!lock->end || cur->end <= lock->end) /* lock larger than hole, shrink from end */
830 cur->end = lock->start;
831 assert( cur->start < cur->end );
833 else /* lock is in the middle of hole, split hole in two */
836 next->next = cur->next;
838 next->start = lock->end;
839 next->end = cur->end;
840 cur->end = lock->start;
841 assert( next->start < next->end );
842 assert( cur->end < next->start );
844 break; /* done with this lock */
849 /* clear Unix locks for all the holes */
851 for (cur = first; cur; cur = cur->next)
852 set_unix_lock( fd, cur->start, cur->end, F_UNLCK );
858 /* create a new lock on a fd */
859 static struct file_lock *add_lock( struct fd *fd, int shared, file_pos_t start, file_pos_t end )
861 struct file_lock *lock;
863 if (!fd->inode) /* not a regular file */
865 set_error( STATUS_INVALID_HANDLE );
869 if (!(lock = alloc_object( &file_lock_ops ))) return NULL;
870 lock->shared = shared;
874 lock->process = current->process;
876 /* now try to set a Unix lock */
877 if (!set_unix_lock( lock->fd, lock->start, lock->end, lock->shared ? F_RDLCK : F_WRLCK ))
879 release_object( lock );
882 list_add_head( &fd->locks, &lock->fd_entry );
883 list_add_head( &fd->inode->locks, &lock->inode_entry );
884 list_add_head( &lock->process->locks, &lock->proc_entry );
888 /* remove an existing lock */
889 static void remove_lock( struct file_lock *lock, int remove_unix )
891 struct inode *inode = lock->fd->inode;
893 list_remove( &lock->fd_entry );
894 list_remove( &lock->inode_entry );
895 list_remove( &lock->proc_entry );
896 if (remove_unix) remove_unix_locks( lock->fd, lock->start, lock->end );
897 if (list_empty( &inode->locks )) inode_close_pending( inode );
898 lock->process = NULL;
899 wake_up( &lock->obj, 0 );
900 release_object( lock );
903 /* remove all locks owned by a given process */
904 void remove_process_locks( struct process *process )
908 while ((ptr = list_head( &process->locks )))
910 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, proc_entry );
911 remove_lock( lock, 1 ); /* this removes it from the list */
915 /* remove all locks on a given fd */
916 static void remove_fd_locks( struct fd *fd )
918 file_pos_t start = FILE_POS_T_MAX, end = 0;
921 while ((ptr = list_head( &fd->locks )))
923 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
924 if (lock->start < start) start = lock->start;
925 if (!lock->end || lock->end > end) end = lock->end - 1;
926 remove_lock( lock, 0 );
928 if (start < end) remove_unix_locks( fd, start, end + 1 );
931 /* add a lock on an fd */
932 /* returns handle to wait on */
933 obj_handle_t lock_fd( struct fd *fd, file_pos_t start, file_pos_t count, int shared, int wait )
936 file_pos_t end = start + count;
938 /* don't allow wrapping locks */
939 if (end && end < start)
941 set_error( STATUS_INVALID_PARAMETER );
945 /* check if another lock on that file overlaps the area */
946 LIST_FOR_EACH( ptr, &fd->inode->locks )
948 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, inode_entry );
949 if (!lock_overlaps( lock, start, end )) continue;
950 if (lock->shared && shared) continue;
954 set_error( STATUS_FILE_LOCK_CONFLICT );
957 set_error( STATUS_PENDING );
958 return alloc_handle( current->process, lock, SYNCHRONIZE, 0 );
961 /* not found, add it */
962 if (add_lock( fd, shared, start, end )) return 0;
963 if (get_error() == STATUS_FILE_LOCK_CONFLICT)
965 /* Unix lock conflict -> tell client to wait and retry */
966 if (wait) set_error( STATUS_PENDING );
971 /* remove a lock on an fd */
972 void unlock_fd( struct fd *fd, file_pos_t start, file_pos_t count )
975 file_pos_t end = start + count;
977 /* find an existing lock with the exact same parameters */
978 LIST_FOR_EACH( ptr, &fd->locks )
980 struct file_lock *lock = LIST_ENTRY( ptr, struct file_lock, fd_entry );
981 if ((lock->start == start) && (lock->end == end))
983 remove_lock( lock, 1 );
987 set_error( STATUS_FILE_LOCK_CONFLICT );
991 /****************************************************************/
992 /* asynchronous operations support */
996 struct thread *thread;
1000 struct timeout_user *timeout;
1004 /* notifies client thread of new status of its async request */
1005 /* destroys the server side of it */
1006 static void async_terminate( struct async *async, int status )
1008 thread_queue_apc( async->thread, NULL, async->apc, APC_ASYNC_IO,
1009 1, async->user, async->sb, (void *)status );
1011 if (async->timeout) remove_timeout_user( async->timeout );
1012 async->timeout = NULL;
1013 list_remove( &async->entry );
1014 release_object( async->thread );
1018 /* cb for timeout on an async request */
1019 static void async_callback(void *private)
1021 struct async *async = (struct async *)private;
1023 /* fprintf(stderr, "async timeout out %p\n", async); */
1024 async->timeout = NULL;
1025 async_terminate( async, STATUS_TIMEOUT );
1028 /* create an async on a given queue of a fd */
1029 struct async *create_async(struct thread *thread, int* timeout, struct list *queue,
1030 void *io_apc, void *io_user, void* io_sb)
1032 struct async *async = mem_alloc( sizeof(struct async) );
1034 if (!async) return NULL;
1036 async->thread = (struct thread *)grab_object(thread);
1037 async->apc = io_apc;
1038 async->user = io_user;
1041 list_add_tail( queue, &async->entry );
1045 struct timeval when;
1047 gettimeofday( &when, NULL );
1048 add_timeout( &when, *timeout );
1049 async->timeout = add_timeout_user( &when, async_callback, async );
1051 else async->timeout = NULL;
1056 /* terminate the async operation at the head of the queue */
1057 void async_terminate_head( struct list *queue, int status )
1059 struct list *ptr = list_head( queue );
1060 if (ptr) async_terminate( LIST_ENTRY( ptr, struct async, entry ), status );
1063 /****************************************************************/
1064 /* file descriptor functions */
1066 static void fd_dump( struct object *obj, int verbose )
1068 struct fd *fd = (struct fd *)obj;
1069 fprintf( stderr, "Fd unix_fd=%d user=%p", fd->unix_fd, fd->user );
1070 if (fd->inode) fprintf( stderr, " inode=%p unlink='%s'", fd->inode, fd->closed->unlink );
1071 fprintf( stderr, "\n" );
1074 static void fd_destroy( struct object *obj )
1076 struct fd *fd = (struct fd *)obj;
1078 async_terminate_queue( &fd->read_q, STATUS_CANCELLED );
1079 async_terminate_queue( &fd->write_q, STATUS_CANCELLED );
1081 remove_fd_locks( fd );
1082 list_remove( &fd->inode_entry );
1083 if (fd->poll_index != -1) remove_poll_user( fd, fd->poll_index );
1086 inode_add_closed_fd( fd->inode, fd->closed );
1087 release_object( fd->inode );
1089 else /* no inode, close it right away */
1091 if (fd->unix_fd != -1) close( fd->unix_fd );
1095 /* set the events that select waits for on this fd */
1096 void set_fd_events( struct fd *fd, int events )
1098 int user = fd->poll_index;
1099 assert( poll_users[user] == fd );
1101 set_fd_epoll_events( fd, user, events );
1103 if (events == -1) /* stop waiting on this fd completely */
1105 pollfd[user].fd = -1;
1106 pollfd[user].events = POLLERR;
1107 pollfd[user].revents = 0;
1109 else if (pollfd[user].fd != -1 || !pollfd[user].events)
1111 pollfd[user].fd = fd->unix_fd;
1112 pollfd[user].events = events;
1116 /* allocate an fd object, without setting the unix fd yet */
1117 struct fd *alloc_fd( const struct fd_ops *fd_user_ops, struct object *user )
1119 struct fd *fd = alloc_object( &fd_ops );
1121 if (!fd) return NULL;
1123 fd->fd_ops = fd_user_ops;
1131 fd->poll_index = -1;
1132 list_init( &fd->inode_entry );
1133 list_init( &fd->locks );
1134 list_init( &fd->read_q );
1135 list_init( &fd->write_q );
1137 if ((fd->poll_index = add_poll_user( fd )) == -1)
1139 release_object( fd );
1145 /* check if the desired access is possible without violating */
1146 /* the sharing mode of other opens of the same file */
1147 static int check_sharing( struct fd *fd, unsigned int access, unsigned int sharing )
1149 unsigned int existing_sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
1150 unsigned int existing_access = 0;
1154 /* if access mode is 0, sharing mode is ignored */
1155 if (!access) sharing = existing_sharing;
1156 fd->access = access;
1157 fd->sharing = sharing;
1159 LIST_FOR_EACH( ptr, &fd->inode->open )
1161 struct fd *fd_ptr = LIST_ENTRY( ptr, struct fd, inode_entry );
1164 existing_sharing &= fd_ptr->sharing;
1165 existing_access |= fd_ptr->access;
1166 if (fd_ptr->closed->unlink[0]) unlink = 1;
1170 if ((access & GENERIC_READ) && !(existing_sharing & FILE_SHARE_READ)) return 0;
1171 if ((access & GENERIC_WRITE) && !(existing_sharing & FILE_SHARE_WRITE)) return 0;
1172 if ((existing_access & GENERIC_READ) && !(sharing & FILE_SHARE_READ)) return 0;
1173 if ((existing_access & GENERIC_WRITE) && !(sharing & FILE_SHARE_WRITE)) return 0;
1174 if (fd->closed->unlink[0] && !(existing_sharing & FILE_SHARE_DELETE)) return 0;
1175 if (unlink && !(sharing & FILE_SHARE_DELETE)) return 0;
1179 /* open() wrapper using a struct fd */
1180 /* the fd must have been created with alloc_fd */
1181 /* on error the fd object is released */
1182 struct fd *open_fd( struct fd *fd, const char *name, int flags, mode_t *mode,
1183 unsigned int access, unsigned int sharing, unsigned int options )
1186 struct closed_fd *closed_fd;
1187 const char *unlink_name = "";
1189 assert( fd->unix_fd == -1 );
1191 if (options & FILE_DELETE_ON_CLOSE) unlink_name = name;
1192 if (!(closed_fd = mem_alloc( sizeof(*closed_fd) + strlen(unlink_name) )))
1194 release_object( fd );
1197 /* create the directory if needed */
1198 if ((options & FILE_DIRECTORY_FILE) && (flags & O_CREAT))
1200 if (mkdir( name, 0777 ) == -1)
1202 if (errno != EEXIST || (flags & O_EXCL))
1205 release_object( fd );
1210 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
1212 if ((fd->unix_fd = open( name, flags & ~O_TRUNC, *mode )) == -1)
1215 release_object( fd );
1219 closed_fd->fd = fd->unix_fd;
1220 closed_fd->unlink[0] = 0;
1221 fstat( fd->unix_fd, &st );
1224 /* only bother with an inode for normal files and directories */
1225 if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode))
1227 struct inode *inode = get_inode( st.st_dev, st.st_ino );
1231 /* we can close the fd because there are no others open on the same file,
1232 * otherwise we wouldn't have failed to allocate a new inode
1237 fd->closed = closed_fd;
1238 list_add_head( &inode->open, &fd->inode_entry );
1240 /* check directory options */
1241 if ((options & FILE_DIRECTORY_FILE) && !S_ISDIR(st.st_mode))
1243 release_object( fd );
1244 set_error( STATUS_NOT_A_DIRECTORY );
1247 if ((options & FILE_NON_DIRECTORY_FILE) && S_ISDIR(st.st_mode))
1249 release_object( fd );
1250 set_error( STATUS_FILE_IS_A_DIRECTORY );
1253 if (!check_sharing( fd, access, sharing ))
1255 release_object( fd );
1256 set_error( STATUS_SHARING_VIOLATION );
1259 strcpy( closed_fd->unlink, unlink_name );
1260 if (flags & O_TRUNC) ftruncate( fd->unix_fd, 0 );
1262 else /* special file */
1264 if (options & FILE_DIRECTORY_FILE)
1266 set_error( STATUS_NOT_A_DIRECTORY );
1269 if (unlink_name[0]) /* we can't unlink special files */
1271 set_error( STATUS_INVALID_PARAMETER );
1279 release_object( fd );
1284 /* create an fd for an anonymous file */
1285 /* if the function fails the unix fd is closed */
1286 struct fd *create_anonymous_fd( const struct fd_ops *fd_user_ops, int unix_fd, struct object *user )
1288 struct fd *fd = alloc_fd( fd_user_ops, user );
1292 fd->unix_fd = unix_fd;
1299 /* retrieve the object that is using an fd */
1300 void *get_fd_user( struct fd *fd )
1305 /* retrieve the unix fd for an object */
1306 int get_unix_fd( struct fd *fd )
1311 /* check if two file descriptors point to the same file */
1312 int is_same_file_fd( struct fd *fd1, struct fd *fd2 )
1314 return fd1->inode == fd2->inode;
1317 /* callback for event happening in the main poll() loop */
1318 void fd_poll_event( struct fd *fd, int event )
1320 return fd->fd_ops->poll_event( fd, event );
1323 /* check if events are pending and if yes return which one(s) */
1324 int check_fd_events( struct fd *fd, int events )
1328 pfd.fd = fd->unix_fd;
1329 pfd.events = events;
1330 if (poll( &pfd, 1, 0 ) <= 0) return 0;
1334 /* default add_queue() routine for objects that poll() on an fd */
1335 int default_fd_add_queue( struct object *obj, struct wait_queue_entry *entry )
1337 struct fd *fd = get_obj_fd( obj );
1340 if (list_empty( &obj->wait_queue )) /* first on the queue */
1341 set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
1342 add_queue( obj, entry );
1343 release_object( fd );
1347 /* default remove_queue() routine for objects that poll() on an fd */
1348 void default_fd_remove_queue( struct object *obj, struct wait_queue_entry *entry )
1350 struct fd *fd = get_obj_fd( obj );
1353 remove_queue( obj, entry );
1354 if (list_empty( &obj->wait_queue )) /* last on the queue is gone */
1355 set_fd_events( fd, 0 );
1356 release_object( obj );
1357 release_object( fd );
1360 /* default signaled() routine for objects that poll() on an fd */
1361 int default_fd_signaled( struct object *obj, struct thread *thread )
1364 struct fd *fd = get_obj_fd( obj );
1366 if (fd->inode) return 1; /* regular files are always signaled */
1368 events = fd->fd_ops->get_poll_events( fd );
1369 ret = check_fd_events( fd, events ) != 0;
1372 set_fd_events( fd, 0 ); /* stop waiting on select() if we are signaled */
1373 else if (!list_empty( &obj->wait_queue ))
1374 set_fd_events( fd, events ); /* restart waiting on poll() if we are no longer signaled */
1376 release_object( fd );
1380 int default_fd_get_poll_events( struct fd *fd )
1384 if( !list_empty( &fd->read_q ))
1386 if( !list_empty( &fd->write_q ))
1392 /* default handler for poll() events */
1393 void default_poll_event( struct fd *fd, int event )
1395 if (!list_empty( &fd->read_q ) && (POLLIN & event) )
1397 async_terminate_head( &fd->read_q, STATUS_ALERTED );
1400 if (!list_empty( &fd->write_q ) && (POLLOUT & event) )
1402 async_terminate_head( &fd->write_q, STATUS_ALERTED );
1406 /* if an error occurred, stop polling this fd to avoid busy-looping */
1407 if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 );
1408 wake_up( fd->user, 0 );
1411 void default_fd_queue_async( struct fd *fd, void *apc, void *user, void *io_sb, int type, int count )
1416 if (!(fd->fd_ops->get_file_info( fd ) & FD_FLAG_OVERLAPPED))
1418 set_error( STATUS_INVALID_HANDLE );
1424 case ASYNC_TYPE_READ:
1425 queue = &fd->read_q;
1427 case ASYNC_TYPE_WRITE:
1428 queue = &fd->write_q;
1431 set_error( STATUS_INVALID_PARAMETER );
1435 if (!create_async( current, NULL, queue, apc, user, io_sb ))
1438 /* Check if the new pending request can be served immediately */
1439 events = check_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
1440 if (events) fd->fd_ops->poll_event( fd, events );
1442 set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
1445 void default_fd_cancel_async( struct fd *fd )
1447 async_terminate_queue( &fd->read_q, STATUS_CANCELLED );
1448 async_terminate_queue( &fd->write_q, STATUS_CANCELLED );
1451 /* default flush() routine */
1452 int no_flush( struct fd *fd, struct event **event )
1454 set_error( STATUS_OBJECT_TYPE_MISMATCH );
1458 /* default get_file_info() routine */
1459 int no_get_file_info( struct fd *fd )
1461 set_error( STATUS_OBJECT_TYPE_MISMATCH );
1465 /* default queue_async() routine */
1466 void no_queue_async( struct fd *fd, void* apc, void* user, void* io_sb,
1467 int type, int count)
1469 set_error( STATUS_OBJECT_TYPE_MISMATCH );
1472 /* default cancel_async() routine */
1473 void no_cancel_async( struct fd *fd )
1475 set_error( STATUS_OBJECT_TYPE_MISMATCH );
1478 /* same as get_handle_obj but retrieve the struct fd associated to the object */
1479 static struct fd *get_handle_fd_obj( struct process *process, obj_handle_t handle,
1480 unsigned int access )
1482 struct fd *fd = NULL;
1485 if ((obj = get_handle_obj( process, handle, access, NULL )))
1487 fd = get_obj_fd( obj );
1488 release_object( obj );
1493 /* flush a file buffers */
1494 DECL_HANDLER(flush_file)
1496 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
1497 struct event * event = NULL;
1501 fd->fd_ops->flush( fd, &event );
1504 reply->event = alloc_handle( current->process, event, SYNCHRONIZE, 0 );
1506 release_object( fd );
1510 /* get a Unix fd to access a file */
1511 DECL_HANDLER(get_handle_fd)
1517 if ((fd = get_handle_fd_obj( current->process, req->handle, req->access )))
1519 int unix_fd = get_handle_unix_fd( current->process, req->handle, req->access );
1520 if (unix_fd != -1) reply->fd = unix_fd;
1521 else if (!get_error())
1523 assert( fd->unix_fd != -1 );
1524 send_client_fd( current->process, fd->unix_fd, req->handle );
1526 reply->flags = fd->fd_ops->get_file_info( fd );
1527 release_object( fd );
1531 /* create / reschedule an async I/O */
1532 DECL_HANDLER(register_async)
1534 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
1537 * The queue_async method must do the following:
1539 * 1. Get the async_queue for the request of given type.
1540 * 2. Create a new asynchronous request for the selected queue
1541 * 3. Carry out any operations necessary to adjust the object's poll events
1542 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
1543 * 4. When the async request is triggered, then send back (with a proper APC)
1544 * the trigger (STATUS_ALERTED) to the thread that posted the request.
1545 * async_destroy() is to be called: it will both notify the sender about
1546 * the trigger and destroy the request by itself
1547 * See also the implementations in file.c, serial.c, and sock.c.
1552 fd->fd_ops->queue_async( fd, req->io_apc, req->io_user, req->io_sb,
1553 req->type, req->count );
1554 release_object( fd );
1558 /* cancels all async I/O */
1559 DECL_HANDLER(cancel_async)
1561 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
1564 /* Note: we don't kill the queued APC_ASYNC_IO on this thread because
1565 * NtCancelIoFile() will force the pending APC to be run. Since,
1566 * Windows only guarantees that the current thread will have no async
1567 * operation on the current fd when NtCancelIoFile returns, this shall
1570 fd->fd_ops->cancel_async( fd );
1571 release_object( fd );