2 * Server-side file descriptor management
4 * Copyright (C) 2003 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 #ifdef HAVE_SYS_POLL_H
33 #include <sys/types.h>
45 struct object obj; /* object header */
46 const struct fd_ops *fd_ops; /* file descriptor operations */
47 struct object *user; /* object using this file descriptor */
48 int unix_fd; /* unix file descriptor */
49 int poll_index; /* index of fd in poll array */
50 int mode; /* file protection mode */
53 static void fd_dump( struct object *obj, int verbose );
54 static void fd_destroy( struct object *obj );
56 static const struct object_ops fd_ops =
58 sizeof(struct fd), /* size */
60 no_add_queue, /* add_queue */
61 NULL, /* remove_queue */
64 no_get_fd, /* get_fd */
65 fd_destroy /* destroy */
69 /****************************************************************/
70 /* timeouts support */
74 struct timeout_user *next; /* next in sorted timeout list */
75 struct timeout_user *prev; /* prev in sorted timeout list */
76 struct timeval when; /* timeout expiry (absolute time) */
77 timeout_callback callback; /* callback function */
78 void *private; /* callback private data */
81 static struct timeout_user *timeout_head; /* sorted timeouts list head */
82 static struct timeout_user *timeout_tail; /* sorted timeouts list tail */
84 /* add a timeout user */
85 struct timeout_user *add_timeout_user( struct timeval *when, timeout_callback func, void *private )
87 struct timeout_user *user;
88 struct timeout_user *pos;
90 if (!(user = mem_alloc( sizeof(*user) ))) return NULL;
92 user->callback = func;
93 user->private = private;
95 /* Now insert it in the linked list */
97 for (pos = timeout_head; pos; pos = pos->next)
98 if (!time_before( &pos->when, when )) break;
100 if (pos) /* insert it before 'pos' */
102 if ((user->prev = pos->prev)) user->prev->next = user;
103 else timeout_head = user;
107 else /* insert it at the tail */
110 if (timeout_tail) timeout_tail->next = user;
111 else timeout_head = user;
112 user->prev = timeout_tail;
118 /* remove a timeout user */
119 void remove_timeout_user( struct timeout_user *user )
121 if (user->next) user->next->prev = user->prev;
122 else timeout_tail = user->prev;
123 if (user->prev) user->prev->next = user->next;
124 else timeout_head = user->next;
128 /* add a timeout in milliseconds to an absolute time */
129 void add_timeout( struct timeval *when, int timeout )
133 long sec = timeout / 1000;
134 if ((when->tv_usec += (timeout - 1000*sec) * 1000) >= 1000000)
136 when->tv_usec -= 1000000;
143 /* handle the next expired timeout */
144 inline static void handle_timeout(void)
146 struct timeout_user *user = timeout_head;
147 timeout_head = user->next;
148 if (user->next) user->next->prev = user->prev;
149 else timeout_tail = user->prev;
150 user->callback( user->private );
155 /****************************************************************/
158 static struct fd **poll_users; /* users array */
159 static struct pollfd *pollfd; /* poll fd array */
160 static int nb_users; /* count of array entries actually in use */
161 static int active_users; /* current number of active users */
162 static int allocated_users; /* count of allocated entries in the array */
163 static struct fd **freelist; /* list of free entries in the array */
165 /* add a user in the poll array and return its index, or -1 on failure */
166 static int add_poll_user( struct fd *fd )
171 ret = freelist - poll_users;
172 freelist = (struct fd **)poll_users[ret];
176 if (nb_users == allocated_users)
178 struct fd **newusers;
179 struct pollfd *newpoll;
180 int new_count = allocated_users ? (allocated_users + allocated_users / 2) : 16;
181 if (!(newusers = realloc( poll_users, new_count * sizeof(*poll_users) ))) return -1;
182 if (!(newpoll = realloc( pollfd, new_count * sizeof(*pollfd) )))
185 poll_users = newusers;
190 poll_users = newusers;
192 allocated_users = new_count;
197 pollfd[ret].events = 0;
198 pollfd[ret].revents = 0;
199 poll_users[ret] = fd;
204 /* remove a user from the poll list */
205 static void remove_poll_user( struct fd *fd, int user )
208 assert( poll_users[user] == fd );
209 pollfd[user].fd = -1;
210 pollfd[user].events = 0;
211 pollfd[user].revents = 0;
212 poll_users[user] = (struct fd *)freelist;
213 freelist = &poll_users[user];
219 static void sighup_handler()
226 /* SIGTERM handler */
227 static void sigterm_handler()
234 static void sigint_handler()
236 kill_all_processes( NULL, 1 );
241 /* server main poll() loop */
246 struct sigaction action;
248 /* block the signals we use */
249 sigemptyset( &sigset );
250 sigaddset( &sigset, SIGCHLD );
251 sigaddset( &sigset, SIGHUP );
252 sigaddset( &sigset, SIGINT );
253 sigaddset( &sigset, SIGQUIT );
254 sigaddset( &sigset, SIGTERM );
255 sigprocmask( SIG_BLOCK, &sigset, NULL );
257 /* set the handlers */
258 action.sa_mask = sigset;
260 action.sa_handler = sigchld_handler;
261 sigaction( SIGCHLD, &action, NULL );
262 action.sa_handler = sighup_handler;
263 sigaction( SIGHUP, &action, NULL );
264 action.sa_handler = sigint_handler;
265 sigaction( SIGINT, &action, NULL );
266 action.sa_handler = sigterm_handler;
267 sigaction( SIGQUIT, &action, NULL );
268 sigaction( SIGTERM, &action, NULL );
276 gettimeofday( &now, NULL );
279 if (!time_before( &now, &timeout_head->when )) handle_timeout();
282 diff = (timeout_head->when.tv_sec - now.tv_sec) * 1000
283 + (timeout_head->when.tv_usec - now.tv_usec) / 1000;
287 if (!active_users) break; /* last user removed by a timeout */
290 sigprocmask( SIG_UNBLOCK, &sigset, NULL );
292 /* Note: we assume that the signal handlers do not manipulate the pollfd array
293 * or the timeout list, otherwise there is a race here.
295 ret = poll( pollfd, nb_users, diff );
297 sigprocmask( SIG_BLOCK, &sigset, NULL );
302 for (i = 0; i < nb_users; i++)
304 if (pollfd[i].revents)
306 fd_poll_event( poll_users[i], pollfd[i].revents );
314 /****************************************************************/
315 /* file descriptor functions */
317 static void fd_dump( struct object *obj, int verbose )
319 struct fd *fd = (struct fd *)obj;
320 fprintf( stderr, "Fd unix_fd=%d mode=%06o user=%p\n", fd->unix_fd, fd->mode, fd->user );
323 static void fd_destroy( struct object *obj )
325 struct fd *fd = (struct fd *)obj;
327 if (fd->poll_index != -1) remove_poll_user( fd, fd->poll_index );
328 close( fd->unix_fd );
331 /* set the events that select waits for on this fd */
332 void set_fd_events( struct fd *fd, int events )
334 int user = fd->poll_index;
335 assert( poll_users[user] == fd );
336 if (events == -1) /* stop waiting on this fd completely */
338 pollfd[user].fd = -1;
339 pollfd[user].events = POLLERR;
340 pollfd[user].revents = 0;
342 else if (pollfd[user].fd != -1 || !pollfd[user].events)
344 pollfd[user].fd = fd->unix_fd;
345 pollfd[user].events = events;
349 /* allocate an fd object */
350 /* if the function fails the unix fd is closed */
351 struct fd *alloc_fd( const struct fd_ops *fd_user_ops, int unix_fd, struct object *user )
353 struct fd *fd = alloc_object( &fd_ops );
360 fd->fd_ops = fd_user_ops;
362 fd->unix_fd = unix_fd;
366 if ((unix_fd != -1) && ((fd->poll_index = add_poll_user( fd )) == -1))
368 release_object( fd );
374 /* retrieve the object that is using an fd */
375 void *get_fd_user( struct fd *fd )
380 /* retrieve the unix fd for an object */
381 int get_unix_fd( struct fd *fd )
386 /* callback for event happening in the main poll() loop */
387 void fd_poll_event( struct fd *fd, int event )
389 return fd->fd_ops->poll_event( fd, event );
392 /* check if events are pending and if yes return which one(s) */
393 int check_fd_events( struct fd *fd, int events )
397 pfd.fd = fd->unix_fd;
399 if (poll( &pfd, 1, 0 ) <= 0) return 0;
403 /* default add_queue() routine for objects that poll() on an fd */
404 int default_fd_add_queue( struct object *obj, struct wait_queue_entry *entry )
406 struct fd *fd = get_obj_fd( obj );
409 if (!obj->head) /* first on the queue */
410 set_fd_events( fd, fd->fd_ops->get_poll_events( fd ) );
411 add_queue( obj, entry );
412 release_object( fd );
416 /* default remove_queue() routine for objects that poll() on an fd */
417 void default_fd_remove_queue( struct object *obj, struct wait_queue_entry *entry )
419 struct fd *fd = get_obj_fd( obj );
422 remove_queue( obj, entry );
423 if (!obj->head) /* last on the queue is gone */
424 set_fd_events( fd, 0 );
425 release_object( obj );
426 release_object( fd );
429 /* default signaled() routine for objects that poll() on an fd */
430 int default_fd_signaled( struct object *obj, struct thread *thread )
432 struct fd *fd = get_obj_fd( obj );
433 int events = fd->fd_ops->get_poll_events( fd );
434 int ret = check_fd_events( fd, events ) != 0;
437 set_fd_events( fd, 0 ); /* stop waiting on select() if we are signaled */
439 set_fd_events( fd, events ); /* restart waiting on poll() if we are no longer signaled */
441 release_object( fd );
445 /* default handler for poll() events */
446 void default_poll_event( struct fd *fd, int event )
448 /* an error occurred, stop polling this fd to avoid busy-looping */
449 if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 );
450 wake_up( fd->user, 0 );
453 /* default flush() routine */
454 int no_flush( struct fd *fd )
456 set_error( STATUS_OBJECT_TYPE_MISMATCH );
460 /* default get_file_info() routine */
461 int no_get_file_info( struct fd *fd, struct get_file_info_reply *info, int *flags )
463 set_error( STATUS_OBJECT_TYPE_MISMATCH );
465 return FD_TYPE_INVALID;
468 /* default queue_async() routine */
469 void no_queue_async( struct fd *fd, void* ptr, unsigned int status, int type, int count )
471 set_error( STATUS_OBJECT_TYPE_MISMATCH );
474 /* same as get_handle_obj but retrieve the struct fd associated to the object */
475 static struct fd *get_handle_fd_obj( struct process *process, obj_handle_t handle,
476 unsigned int access )
478 struct fd *fd = NULL;
481 if ((obj = get_handle_obj( process, handle, access, NULL )))
483 if (!(fd = get_obj_fd( obj ))) set_error( STATUS_OBJECT_TYPE_MISMATCH );
484 release_object( obj );
489 /* flush a file buffers */
490 DECL_HANDLER(flush_file)
492 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
496 fd->fd_ops->flush( fd );
497 release_object( fd );
501 /* get a Unix fd to access a file */
502 DECL_HANDLER(get_handle_fd)
507 reply->type = FD_TYPE_INVALID;
509 if ((fd = get_handle_fd_obj( current->process, req->handle, req->access )))
511 int unix_fd = get_handle_unix_fd( current->process, req->handle, req->access );
512 if (unix_fd != -1) reply->fd = unix_fd;
513 else if (!get_error())
515 unix_fd = fd->unix_fd;
516 if (unix_fd != -1) send_client_fd( current->process, unix_fd, req->handle );
518 reply->type = fd->fd_ops->get_file_info( fd, NULL, &reply->flags );
519 release_object( fd );
521 else /* check for console handle (FIXME: should be done in the client) */
525 if ((obj = get_handle_obj( current->process, req->handle, req->access, NULL )))
527 if (is_console_object( obj )) reply->type = FD_TYPE_CONSOLE;
528 release_object( obj );
533 /* get a file information */
534 DECL_HANDLER(get_file_info)
536 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
541 fd->fd_ops->get_file_info( fd, reply, &flags );
542 release_object( fd );
546 /* create / reschedule an async I/O */
547 DECL_HANDLER(register_async)
549 struct fd *fd = get_handle_fd_obj( current->process, req->handle, 0 );
552 * The queue_async method must do the following:
554 * 1. Get the async_queue for the request of given type.
555 * 2. Call find_async() to look for the specific client request in the queue (=> NULL if not found).
556 * 3. If status is STATUS_PENDING:
557 * a) If no async request found in step 2 (new request): call create_async() to initialize one.
558 * b) Set request's status to STATUS_PENDING.
559 * c) If the "queue" field of the async request is NULL: call async_insert() to put it into the queue.
561 * If the async request was found in step 2, destroy it by calling destroy_async().
562 * 4. Carry out any operations necessary to adjust the object's poll events
563 * Usually: set_elect_events (obj, obj->ops->get_poll_events()).
565 * See also the implementations in file.c, serial.c, and sock.c.
570 fd->fd_ops->queue_async( fd, req->overlapped, req->status, req->type, req->count );
571 release_object( fd );