2 * Server-side thread management
4 * Copyright (C) 1998 Alexandre Julliard
17 #ifdef HAVE_SYS_MMAN_H
20 #include <sys/types.h>
36 struct thread_wait *next; /* next wait structure for this thread */
37 struct thread *thread; /* owner thread */
38 int count; /* count of objects */
40 void *cookie; /* magic cookie to return to client */
41 struct timeval timeout;
42 struct timeout_user *user;
43 struct wait_queue_entry queues[1];
46 /* asynchronous procedure calls */
50 struct thread_apc *next; /* queue linked list */
51 struct thread_apc *prev;
52 struct object *owner; /* object that queued this apc */
53 void *func; /* function to call in client */
54 enum apc_type type; /* type of apc function */
55 int nb_args; /* number of arguments */
56 void *args[1]; /* function arguments */
60 /* thread operations */
62 static void dump_thread( struct object *obj, int verbose );
63 static int thread_signaled( struct object *obj, struct thread *thread );
64 static void thread_poll_event( struct object *obj, int event );
65 static void destroy_thread( struct object *obj );
66 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only );
68 static const struct object_ops thread_ops =
70 sizeof(struct thread), /* size */
71 dump_thread, /* dump */
72 add_queue, /* add_queue */
73 remove_queue, /* remove_queue */
74 thread_signaled, /* signaled */
75 no_satisfied, /* satisfied */
76 NULL, /* get_poll_events */
77 thread_poll_event, /* poll_event */
78 no_get_fd, /* get_fd */
80 no_get_file_info, /* get_file_info */
81 destroy_thread /* destroy */
84 static struct thread *first_thread;
85 static struct thread *booting_thread;
87 /* initialize the structure for a newly allocated thread */
88 inline static void init_thread_structure( struct thread *thread )
92 thread->unix_pid = 0; /* not known yet */
93 thread->context = NULL;
96 thread->debug_ctx = NULL;
97 thread->debug_event = NULL;
101 thread->system_apc.head = NULL;
102 thread->system_apc.tail = NULL;
103 thread->user_apc.head = NULL;
104 thread->user_apc.tail = NULL;
106 thread->request_fd = NULL;
107 thread->reply_fd = -1;
108 thread->wait_fd = -1;
109 thread->state = RUNNING;
110 thread->attached = 0;
111 thread->exit_code = 0;
114 thread->priority = THREAD_PRIORITY_NORMAL;
115 thread->affinity = 1;
117 thread->buffer = (void *)-1;
119 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
120 thread->inflight[i].server = thread->inflight[i].client = -1;
123 /* create a new thread */
124 struct thread *create_thread( int fd, struct process *process )
126 struct thread *thread;
128 if (!(thread = alloc_object( &thread_ops, fd ))) return NULL;
130 init_thread_structure( thread );
132 thread->process = (struct process *)grab_object( process );
133 if (!current) current = thread;
135 if (!booting_thread) /* first thread ever */
137 booting_thread = thread;
138 lock_master_socket(1);
141 if ((thread->next = first_thread) != NULL) thread->next->prev = thread;
142 first_thread = thread;
144 fcntl( fd, F_SETFL, O_NONBLOCK );
145 set_select_events( &thread->obj, POLLIN ); /* start listening to events */
146 add_process_thread( thread->process, thread );
150 /* handle a client event */
151 static void thread_poll_event( struct object *obj, int event )
153 struct thread *thread = (struct thread *)obj;
154 assert( obj->ops == &thread_ops );
156 if (event & (POLLERR | POLLHUP)) kill_thread( thread, 0 );
157 else if (event & POLLIN) read_request( thread );
160 /* cleanup everything that is no longer needed by a dead thread */
161 /* used by destroy_thread and kill_thread */
162 static void cleanup_thread( struct thread *thread )
165 struct thread_apc *apc;
167 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
168 if (thread->buffer != (void *)-1) munmap( thread->buffer, MAX_REQUEST_LENGTH );
169 if (thread->reply_fd != -1) close( thread->reply_fd );
170 if (thread->wait_fd != -1) close( thread->wait_fd );
171 if (thread->request_fd) release_object( thread->request_fd );
174 if (thread->process->queue == thread->queue)
176 release_object( thread->process->queue );
177 thread->process->queue = NULL;
179 release_object( thread->queue );
180 thread->queue = NULL;
182 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
184 if (thread->inflight[i].client != -1)
186 close( thread->inflight[i].server );
187 thread->inflight[i].client = thread->inflight[i].server = -1;
190 thread->buffer = (void *)-1;
191 thread->reply_fd = -1;
192 thread->wait_fd = -1;
193 thread->request_fd = NULL;
196 /* destroy a thread when its refcount is 0 */
197 static void destroy_thread( struct object *obj )
199 struct thread_apc *apc;
200 struct thread *thread = (struct thread *)obj;
201 assert( obj->ops == &thread_ops );
203 assert( !thread->debug_ctx ); /* cannot still be debugging something */
204 if (thread->next) thread->next->prev = thread->prev;
205 if (thread->prev) thread->prev->next = thread->next;
206 else first_thread = thread->next;
207 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
208 if (thread->info) release_object( thread->info );
209 cleanup_thread( thread );
210 release_object( thread->process );
213 /* dump a thread on stdout for debugging purposes */
214 static void dump_thread( struct object *obj, int verbose )
216 struct thread *thread = (struct thread *)obj;
217 assert( obj->ops == &thread_ops );
219 fprintf( stderr, "Thread pid=%d teb=%p state=%d\n",
220 thread->unix_pid, thread->teb, thread->state );
223 static int thread_signaled( struct object *obj, struct thread *thread )
225 struct thread *mythread = (struct thread *)obj;
226 return (mythread->state == TERMINATED);
229 /* get a thread pointer from a thread id (and increment the refcount) */
230 struct thread *get_thread_from_id( void *id )
232 struct thread *t = first_thread;
233 while (t && (t != id)) t = t->next;
234 if (t) grab_object( t );
235 else set_error( STATUS_INVALID_PARAMETER );
239 /* get a thread from a handle (and increment the refcount) */
240 struct thread *get_thread_from_handle( handle_t handle, unsigned int access )
242 return (struct thread *)get_handle_obj( current->process, handle,
243 access, &thread_ops );
246 /* find a thread from a Unix pid */
247 struct thread *get_thread_from_pid( int pid )
249 struct thread *t = first_thread;
250 while (t && (t->unix_pid != pid)) t = t->next;
254 /* set all information about a thread */
255 static void set_thread_info( struct thread *thread,
256 struct set_thread_info_request *req )
258 if (req->mask & SET_THREAD_INFO_PRIORITY)
259 thread->priority = req->priority;
260 if (req->mask & SET_THREAD_INFO_AFFINITY)
262 if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
263 else thread->affinity = req->affinity;
267 /* suspend a thread */
268 int suspend_thread( struct thread *thread, int check_limit )
270 int old_count = thread->suspend;
271 if (thread->suspend < MAXIMUM_SUSPEND_COUNT || !check_limit)
273 if (!(thread->process->suspend + thread->suspend++)) stop_thread( thread );
275 else set_error( STATUS_SUSPEND_COUNT_EXCEEDED );
279 /* resume a thread */
280 int resume_thread( struct thread *thread )
282 int old_count = thread->suspend;
283 if (thread->suspend > 0)
285 if (!(--thread->suspend + thread->process->suspend)) continue_thread( thread );
290 /* suspend all threads but the current */
291 void suspend_all_threads( void )
293 struct thread *thread;
294 for ( thread = first_thread; thread; thread = thread->next )
295 if ( thread != current )
296 suspend_thread( thread, 0 );
299 /* resume all threads but the current */
300 void resume_all_threads( void )
302 struct thread *thread;
303 for ( thread = first_thread; thread; thread = thread->next )
304 if ( thread != current )
305 resume_thread( thread );
308 /* add a thread to an object wait queue; return 1 if OK, 0 on error */
309 int add_queue( struct object *obj, struct wait_queue_entry *entry )
313 entry->prev = obj->tail;
315 if (obj->tail) obj->tail->next = entry;
316 else obj->head = entry;
321 /* remove a thread from an object wait queue */
322 void remove_queue( struct object *obj, struct wait_queue_entry *entry )
324 if (entry->next) entry->next->prev = entry->prev;
325 else obj->tail = entry->prev;
326 if (entry->prev) entry->prev->next = entry->next;
327 else obj->head = entry->next;
328 release_object( obj );
332 static void end_wait( struct thread *thread )
334 struct thread_wait *wait = thread->wait;
335 struct wait_queue_entry *entry;
339 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
340 entry->obj->ops->remove_queue( entry->obj, entry );
341 if (wait->user) remove_timeout_user( wait->user );
342 thread->wait = wait->next;
346 /* build the thread wait structure */
347 static int wait_on( int count, struct object *objects[], int flags, int sec, int usec )
349 struct thread_wait *wait;
350 struct wait_queue_entry *entry;
353 if (!(wait = mem_alloc( sizeof(*wait) + (count-1) * sizeof(*entry) ))) return 0;
354 wait->next = current->wait;
355 wait->thread = current;
359 current->wait = wait;
360 if (flags & SELECT_TIMEOUT)
362 wait->timeout.tv_sec = sec;
363 wait->timeout.tv_usec = usec;
366 for (i = 0, entry = wait->queues; i < count; i++, entry++)
368 struct object *obj = objects[i];
369 entry->thread = current;
370 if (!obj->ops->add_queue( obj, entry ))
380 /* check if the thread waiting condition is satisfied */
381 static int check_wait( struct thread *thread )
384 struct thread_wait *wait = thread->wait;
385 struct wait_queue_entry *entry = wait->queues;
388 if (wait->flags & SELECT_ALL)
391 /* Note: we must check them all anyway, as some objects may
392 * want to do something when signaled, even if others are not */
393 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
394 not_ok |= !entry->obj->ops->signaled( entry->obj, thread );
395 if (not_ok) goto other_checks;
396 /* Wait satisfied: tell it to all objects */
398 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
399 if (entry->obj->ops->satisfied( entry->obj, thread ))
400 signaled = STATUS_ABANDONED_WAIT_0;
405 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
407 if (!entry->obj->ops->signaled( entry->obj, thread )) continue;
408 /* Wait satisfied: tell it to the object */
410 if (entry->obj->ops->satisfied( entry->obj, thread ))
411 signaled = i + STATUS_ABANDONED_WAIT_0;
417 if ((wait->flags & SELECT_INTERRUPTIBLE) && thread->system_apc.head) return STATUS_USER_APC;
418 if ((wait->flags & SELECT_ALERTABLE) && thread->user_apc.head) return STATUS_USER_APC;
419 if (wait->flags & SELECT_TIMEOUT)
422 gettimeofday( &now, NULL );
423 if (!time_before( &now, &wait->timeout )) return STATUS_TIMEOUT;
428 /* send the wakeup signal to a thread */
429 static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
431 struct wake_up_reply reply;
434 reply.cookie = cookie;
435 reply.signaled = signaled;
436 if ((ret = write( thread->wait_fd, &reply, sizeof(reply) )) == sizeof(reply)) return 0;
438 fatal_protocol_error( thread, "partial wakeup write %d\n", ret );
439 else if (errno == EPIPE)
440 kill_thread( thread, 0 ); /* normal death */
442 fatal_protocol_perror( thread, "write" );
446 /* attempt to wake up a thread */
447 /* return >0 if OK, 0 if the wait condition is still not satisfied */
448 static int wake_thread( struct thread *thread )
453 for (count = 0; thread->wait; count++)
455 if ((signaled = check_wait( thread )) == -1) break;
457 cookie = thread->wait->cookie;
458 if (debug_level) fprintf( stderr, "%08x: *wakeup* signaled=%d cookie=%p\n",
459 (unsigned int)thread, signaled, cookie );
461 send_thread_wakeup( thread, cookie, signaled );
466 /* thread wait timeout */
467 static void thread_timeout( void *ptr )
469 struct thread_wait *wait = ptr;
470 struct thread *thread = wait->thread;
471 void *cookie = wait->cookie;
474 if (thread->wait != wait) return; /* not the top-level wait, ignore it */
476 if (debug_level) fprintf( stderr, "%08x: *wakeup* signaled=%d cookie=%p\n",
477 (unsigned int)thread, STATUS_TIMEOUT, cookie );
479 send_thread_wakeup( thread, cookie, STATUS_TIMEOUT );
480 /* check if other objects have become signaled in the meantime */
481 wake_thread( thread );
484 /* select on a list of handles */
485 static void select_on( int count, void *cookie, handle_t *handles, int flags, int sec, int usec )
488 struct object *objects[MAXIMUM_WAIT_OBJECTS];
490 if ((count < 0) || (count > MAXIMUM_WAIT_OBJECTS))
492 set_error( STATUS_INVALID_PARAMETER );
495 for (i = 0; i < count; i++)
497 if (!(objects[i] = get_handle_obj( current->process, handles[i], SYNCHRONIZE, NULL )))
501 if (i < count) goto done;
502 if (!wait_on( count, objects, flags, sec, usec )) goto done;
504 if ((ret = check_wait( current )) != -1)
506 /* condition is already satisfied */
512 /* now we need to wait */
513 if (flags & SELECT_TIMEOUT)
515 if (!(current->wait->user = add_timeout_user( ¤t->wait->timeout,
516 thread_timeout, current->wait )))
522 current->wait->cookie = cookie;
523 set_error( STATUS_PENDING );
526 while (--i >= 0) release_object( objects[i] );
529 /* attempt to wake threads sleeping on the object wait queue */
530 void wake_up( struct object *obj, int max )
532 struct wait_queue_entry *entry = obj->head;
536 struct thread *thread = entry->thread;
538 if (wake_thread( thread ))
540 if (max && !--max) break;
545 /* queue an async procedure call */
546 int thread_queue_apc( struct thread *thread, struct object *owner, void *func,
547 enum apc_type type, int system, int nb_args, ... )
549 struct thread_apc *apc;
550 struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
552 /* cancel a possible previous APC with the same owner */
553 if (owner) thread_cancel_apc( thread, owner, system );
555 if (!(apc = mem_alloc( sizeof(*apc) + (nb_args-1)*sizeof(apc->args[0]) ))) return 0;
556 apc->prev = queue->tail;
561 apc->nb_args = nb_args;
566 va_start( args, nb_args );
567 for (i = 0; i < nb_args; i++) apc->args[i] = va_arg( args, void * );
571 if (!apc->prev) /* first one */
574 wake_thread( thread );
579 /* cancel the async procedure call owned by a specific object */
580 void thread_cancel_apc( struct thread *thread, struct object *owner, int system )
582 struct thread_apc *apc;
583 struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
584 for (apc = queue->head; apc; apc = apc->next)
586 if (apc->owner != owner) continue;
587 if (apc->next) apc->next->prev = apc->prev;
588 else queue->tail = apc->prev;
589 if (apc->prev) apc->prev->next = apc->next;
590 else queue->head = apc->next;
596 /* remove the head apc from the queue; the returned pointer must be freed by the caller */
597 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only )
599 struct thread_apc *apc;
600 struct apc_queue *queue = &thread->system_apc;
602 if (!queue->head && !system_only) queue = &thread->user_apc;
603 if ((apc = queue->head))
605 if (apc->next) apc->next->prev = NULL;
606 else queue->tail = NULL;
607 queue->head = apc->next;
612 /* add an fd to the inflight list */
613 /* return list index, or -1 on error */
614 int thread_add_inflight_fd( struct thread *thread, int client, int server )
618 if (server == -1) return -1;
625 /* first check if we already have an entry for this fd */
626 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
627 if (thread->inflight[i].client == client)
629 close( thread->inflight[i].server );
630 thread->inflight[i].server = server;
634 /* now find a free spot to store it */
635 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
636 if (thread->inflight[i].client == -1)
638 thread->inflight[i].client = client;
639 thread->inflight[i].server = server;
645 /* get an inflight fd and purge it from the list */
646 /* the fd must be closed when no longer used */
647 int thread_get_inflight_fd( struct thread *thread, int client )
651 if (client == -1) return -1;
655 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
657 if (thread->inflight[i].client == client)
659 ret = thread->inflight[i].server;
660 thread->inflight[i].server = thread->inflight[i].client = -1;
664 } while (!receive_fd( thread->process )); /* in case it is still in the socket buffer */
668 /* retrieve an LDT selector entry */
669 static void get_selector_entry( struct thread *thread, int entry,
670 unsigned int *base, unsigned int *limit,
671 unsigned char *flags )
673 if (!thread->process->ldt_copy)
675 set_error( STATUS_ACCESS_DENIED );
680 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
683 if (suspend_for_ptrace( thread ))
685 unsigned char flags_buf[4];
686 int *addr = (int *)thread->process->ldt_copy + entry;
687 if (read_thread_int( thread, addr, base ) == -1) goto done;
688 if (read_thread_int( thread, addr + 8192, limit ) == -1) goto done;
689 addr = (int *)thread->process->ldt_copy + 2*8192 + (entry >> 2);
690 if (read_thread_int( thread, addr, (int *)flags_buf ) == -1) goto done;
691 *flags = flags_buf[entry & 3];
693 resume_thread( thread );
697 /* kill a thread on the spot */
698 void kill_thread( struct thread *thread, int violent_death )
700 if (thread->state == TERMINATED) return; /* already killed */
701 thread->state = TERMINATED;
702 if (current == thread) current = NULL;
704 fprintf( stderr,"%08x: *killed* exit_code=%d\n",
705 (unsigned int)thread, thread->exit_code );
708 while (thread->wait) end_wait( thread );
709 send_thread_wakeup( thread, NULL, STATUS_PENDING );
710 /* if it is waiting on the socket, we don't need to send a SIGTERM */
713 debug_exit_thread( thread );
714 abandon_mutexes( thread );
715 remove_process_thread( thread->process, thread );
716 wake_up( &thread->obj, 0 );
717 detach_thread( thread, violent_death ? SIGTERM : 0 );
718 remove_select_user( &thread->obj );
719 cleanup_thread( thread );
720 release_object( thread );
723 /* take a snapshot of currently running threads */
724 struct thread_snapshot *thread_snap( int *count )
726 struct thread_snapshot *snapshot, *ptr;
727 struct thread *thread;
730 for (thread = first_thread; thread; thread = thread->next)
731 if (thread->state != TERMINATED) total++;
732 if (!total || !(snapshot = mem_alloc( sizeof(*snapshot) * total ))) return NULL;
734 for (thread = first_thread; thread; thread = thread->next)
736 if (thread->state == TERMINATED) continue;
737 ptr->thread = thread;
738 ptr->count = thread->obj.refcount;
739 ptr->priority = thread->priority;
740 grab_object( thread );
747 /* signal that we are finished booting on the client side */
748 DECL_HANDLER(boot_done)
750 debug_level = max( debug_level, req->debug_level );
751 if (current == booting_thread)
753 booting_thread = (struct thread *)~0UL; /* make sure it doesn't match other threads */
754 lock_master_socket(0); /* allow other clients now */
758 /* create a new thread */
759 DECL_HANDLER(new_thread)
761 struct thread *thread;
762 int request_fd = thread_get_inflight_fd( current, req->request_fd );
764 if (request_fd == -1)
766 set_error( STATUS_INVALID_HANDLE );
770 if ((thread = create_thread( request_fd, current->process )))
772 if (req->suspend) thread->suspend++;
774 if ((req->handle = alloc_handle( current->process, thread,
775 THREAD_ALL_ACCESS, req->inherit )))
777 /* thread object will be released when the thread gets killed */
780 kill_thread( thread, 1 );
785 /* initialize a new thread */
786 DECL_HANDLER(init_thread)
788 int reply_fd = thread_get_inflight_fd( current, req->reply_fd );
789 int wait_fd = thread_get_inflight_fd( current, req->wait_fd );
791 if (current->unix_pid)
793 fatal_protocol_error( current, "init_thread: already running\n" );
798 fatal_protocol_error( current, "bad reply fd\n" );
803 fatal_protocol_error( current, "bad wait fd\n" );
807 current->unix_pid = req->unix_pid;
808 current->teb = req->teb;
809 current->reply_fd = reply_fd;
810 current->wait_fd = wait_fd;
812 if (current->suspend + current->process->suspend > 0) stop_thread( current );
813 if (current->process->running_threads > 1)
814 generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
816 req->pid = get_process_id( current->process );
817 req->tid = get_thread_id( current );
818 req->boot = (current == booting_thread);
819 req->version = SERVER_PROTOCOL_VERSION;
823 if (reply_fd != -1) close( reply_fd );
824 if (wait_fd != -1) close( wait_fd );
827 /* set the shared buffer for a thread */
828 DECL_HANDLER(set_thread_buffer)
830 unsigned int size = MAX_REQUEST_LENGTH;
831 unsigned int offset = 0;
832 int fd = thread_get_inflight_fd( current, req->fd );
835 req->offset = offset;
839 if (ftruncate( fd, size ) == -1) file_set_error();
842 void *buffer = mmap( 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, offset );
843 if (buffer == (void *)-1) file_set_error();
846 if (current->buffer != (void *)-1) munmap( current->buffer, size );
847 current->buffer = buffer;
852 else set_error( STATUS_INVALID_HANDLE );
855 /* terminate a thread */
856 DECL_HANDLER(terminate_thread)
858 struct thread *thread;
862 if ((thread = get_thread_from_handle( req->handle, THREAD_TERMINATE )))
864 thread->exit_code = req->exit_code;
865 if (thread != current) kill_thread( thread, 1 );
869 req->last = (thread->process->running_threads == 1);
871 release_object( thread );
875 /* fetch information about a thread */
876 DECL_HANDLER(get_thread_info)
878 struct thread *thread;
879 handle_t handle = req->handle;
881 if (!handle) thread = get_thread_from_id( req->tid_in );
882 else thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION );
886 req->tid = get_thread_id( thread );
887 req->teb = thread->teb;
888 req->exit_code = (thread->state == TERMINATED) ? thread->exit_code : STILL_ACTIVE;
889 req->priority = thread->priority;
890 release_object( thread );
894 /* set information about a thread */
895 DECL_HANDLER(set_thread_info)
897 struct thread *thread;
899 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_INFORMATION )))
901 set_thread_info( thread, req );
902 release_object( thread );
906 /* suspend a thread */
907 DECL_HANDLER(suspend_thread)
909 struct thread *thread;
911 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
913 req->count = suspend_thread( thread, 1 );
914 release_object( thread );
918 /* resume a thread */
919 DECL_HANDLER(resume_thread)
921 struct thread *thread;
923 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
925 req->count = resume_thread( thread );
926 release_object( thread );
930 /* select on a handle list */
933 int count = get_req_data_size(req) / sizeof(int);
934 select_on( count, req->cookie, get_req_data(req), req->flags, req->sec, req->usec );
937 /* queue an APC for a thread */
938 DECL_HANDLER(queue_apc)
940 struct thread *thread;
941 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT )))
943 thread_queue_apc( thread, NULL, req->func, APC_USER, !req->user, 1, req->param );
944 release_object( thread );
948 /* get next APC to call */
949 DECL_HANDLER(get_apc)
951 struct thread_apc *apc;
956 if (!(apc = thread_dequeue_apc( current, !req->alertable )))
960 req->type = APC_NONE;
961 set_req_data_size( req, 0 );
964 /* Optimization: ignore APCs that have a NULL func; they are only used
965 * to wake up a thread, but since we got here the thread woke up already.
967 if (apc->func) break;
970 size = apc->nb_args * sizeof(apc->args[0]);
971 if (size > get_req_data_size(req)) size = get_req_data_size(req);
972 req->func = apc->func;
973 req->type = apc->type;
974 memcpy( get_req_data(req), apc->args, size );
975 set_req_data_size( req, size );
979 /* fetch a selector entry for a thread */
980 DECL_HANDLER(get_selector_entry)
982 struct thread *thread;
983 if ((thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION )))
985 get_selector_entry( thread, req->entry, &req->base, &req->limit, &req->flags );
986 release_object( thread );