2 * Server-side thread management
4 * Copyright (C) 1998 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "wine/port.h"
32 #include <sys/types.h>
50 struct thread_wait *next; /* next wait structure for this thread */
51 struct thread *thread; /* owner thread */
52 int count; /* count of objects */
54 void *cookie; /* magic cookie to return to client */
55 struct timeval timeout;
56 struct timeout_user *user;
57 struct wait_queue_entry queues[1];
60 /* asynchronous procedure calls */
64 struct thread_apc *next; /* queue linked list */
65 struct thread_apc *prev;
66 struct object *owner; /* object that queued this apc */
67 void *func; /* function to call in client */
68 enum apc_type type; /* type of apc function */
69 int nb_args; /* number of arguments */
70 void *args[1]; /* function arguments */
74 /* thread operations */
76 static void dump_thread( struct object *obj, int verbose );
77 static int thread_signaled( struct object *obj, struct thread *thread );
78 static void thread_poll_event( struct fd *fd, int event );
79 static void destroy_thread( struct object *obj );
80 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only );
82 static const struct object_ops thread_ops =
84 sizeof(struct thread), /* size */
85 dump_thread, /* dump */
86 add_queue, /* add_queue */
87 remove_queue, /* remove_queue */
88 thread_signaled, /* signaled */
89 no_satisfied, /* satisfied */
90 no_get_fd, /* get_fd */
91 destroy_thread /* destroy */
94 static const struct fd_ops thread_fd_ops =
96 NULL, /* get_poll_events */
97 thread_poll_event, /* poll_event */
99 no_get_file_info, /* get_file_info */
100 no_queue_async /* queue_async */
103 static struct thread *first_thread;
104 static struct thread *booting_thread;
106 /* initialize the structure for a newly allocated thread */
107 inline static void init_thread_structure( struct thread *thread )
111 thread->unix_pid = -1; /* not known yet */
112 thread->unix_tid = -1; /* not known yet */
113 thread->context = NULL;
115 thread->mutex = NULL;
116 thread->debug_ctx = NULL;
117 thread->debug_event = NULL;
118 thread->queue = NULL;
119 thread->hooks = NULL;
121 thread->system_apc.head = NULL;
122 thread->system_apc.tail = NULL;
123 thread->user_apc.head = NULL;
124 thread->user_apc.tail = NULL;
126 thread->req_data = NULL;
127 thread->req_toread = 0;
128 thread->reply_data = NULL;
129 thread->reply_towrite = 0;
130 thread->request_fd = NULL;
131 thread->reply_fd = NULL;
132 thread->wait_fd = NULL;
133 thread->state = RUNNING;
134 thread->attached = 0;
135 thread->exit_code = 0;
138 thread->priority = THREAD_PRIORITY_NORMAL;
139 thread->affinity = 1;
141 thread->creation_time = time(NULL);
142 thread->exit_time = 0;
144 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
145 thread->inflight[i].server = thread->inflight[i].client = -1;
148 /* create a new thread */
149 struct thread *create_thread( int fd, struct process *process )
151 struct thread *thread;
153 if (!(thread = alloc_object( &thread_ops ))) return NULL;
155 init_thread_structure( thread );
157 thread->process = (struct process *)grab_object( process );
158 if (!current) current = thread;
160 if (!booting_thread) /* first thread ever */
162 booting_thread = thread;
163 lock_master_socket(1);
166 if ((thread->next = first_thread) != NULL) thread->next->prev = thread;
167 first_thread = thread;
169 if (!(thread->id = alloc_ptid( thread )))
171 release_object( thread );
174 if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj )))
176 release_object( thread );
180 set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */
181 add_process_thread( thread->process, thread );
185 /* handle a client event */
186 static void thread_poll_event( struct fd *fd, int event )
188 struct thread *thread = get_fd_user( fd );
189 assert( thread->obj.ops == &thread_ops );
191 if (event & (POLLERR | POLLHUP)) kill_thread( thread, 0 );
192 else if (event & POLLIN) read_request( thread );
193 else if (event & POLLOUT) write_reply( thread );
196 /* cleanup everything that is no longer needed by a dead thread */
197 /* used by destroy_thread and kill_thread */
198 static void cleanup_thread( struct thread *thread )
201 struct thread_apc *apc;
203 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
204 if (thread->req_data) free( thread->req_data );
205 if (thread->reply_data) free( thread->reply_data );
206 if (thread->request_fd) release_object( thread->request_fd );
207 if (thread->reply_fd) release_object( thread->reply_fd );
208 if (thread->wait_fd) release_object( thread->wait_fd );
209 if (thread->hooks) release_object( thread->hooks );
210 free_msg_queue( thread );
211 destroy_thread_windows( thread );
212 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
214 if (thread->inflight[i].client != -1)
216 close( thread->inflight[i].server );
217 thread->inflight[i].client = thread->inflight[i].server = -1;
220 thread->req_data = NULL;
221 thread->reply_data = NULL;
222 thread->request_fd = NULL;
223 thread->reply_fd = NULL;
224 thread->wait_fd = NULL;
225 thread->hooks = NULL;
227 if (thread == booting_thread) /* killing booting thread */
229 booting_thread = NULL;
230 lock_master_socket(0);
234 /* destroy a thread when its refcount is 0 */
235 static void destroy_thread( struct object *obj )
237 struct thread_apc *apc;
238 struct thread *thread = (struct thread *)obj;
239 assert( obj->ops == &thread_ops );
241 assert( !thread->debug_ctx ); /* cannot still be debugging something */
242 if (thread->next) thread->next->prev = thread->prev;
243 if (thread->prev) thread->prev->next = thread->next;
244 else first_thread = thread->next;
245 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
246 cleanup_thread( thread );
247 release_object( thread->process );
248 if (thread->id) free_ptid( thread->id );
251 /* dump a thread on stdout for debugging purposes */
252 static void dump_thread( struct object *obj, int verbose )
254 struct thread *thread = (struct thread *)obj;
255 assert( obj->ops == &thread_ops );
257 fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
258 thread->id, thread->unix_pid, thread->unix_tid, thread->teb, thread->state );
261 static int thread_signaled( struct object *obj, struct thread *thread )
263 struct thread *mythread = (struct thread *)obj;
264 return (mythread->state == TERMINATED);
267 /* get a thread pointer from a thread id (and increment the refcount) */
268 struct thread *get_thread_from_id( thread_id_t id )
270 struct object *obj = get_ptid_entry( id );
272 if (obj && obj->ops == &thread_ops) return (struct thread *)grab_object( obj );
273 set_error( STATUS_INVALID_PARAMETER );
277 /* get a thread from a handle (and increment the refcount) */
278 struct thread *get_thread_from_handle( obj_handle_t handle, unsigned int access )
280 return (struct thread *)get_handle_obj( current->process, handle,
281 access, &thread_ops );
284 /* find a thread from a Unix pid */
285 struct thread *get_thread_from_pid( int pid )
289 for (t = first_thread; t; t = t->next) if (t->unix_tid == pid) return t;
290 for (t = first_thread; t; t = t->next) if (t->unix_pid == pid) return t;
294 /* set all information about a thread */
295 static void set_thread_info( struct thread *thread,
296 const struct set_thread_info_request *req )
298 if (req->mask & SET_THREAD_INFO_PRIORITY)
299 thread->priority = req->priority;
300 if (req->mask & SET_THREAD_INFO_AFFINITY)
302 if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
303 else thread->affinity = req->affinity;
307 /* stop a thread (at the Unix level) */
308 void stop_thread( struct thread *thread )
310 /* can't stop a thread while initialisation is in progress */
311 if (is_process_init_done(thread->process)) send_thread_signal( thread, SIGUSR1 );
314 /* suspend a thread */
315 static int suspend_thread( struct thread *thread )
317 int old_count = thread->suspend;
318 if (thread->suspend < MAXIMUM_SUSPEND_COUNT)
320 if (!(thread->process->suspend + thread->suspend++)) stop_thread( thread );
322 else set_error( STATUS_SUSPEND_COUNT_EXCEEDED );
326 /* resume a thread */
327 static int resume_thread( struct thread *thread )
329 int old_count = thread->suspend;
330 if (thread->suspend > 0)
332 if (!(--thread->suspend + thread->process->suspend)) wake_thread( thread );
337 /* add a thread to an object wait queue; return 1 if OK, 0 on error */
338 int add_queue( struct object *obj, struct wait_queue_entry *entry )
342 entry->prev = obj->tail;
344 if (obj->tail) obj->tail->next = entry;
345 else obj->head = entry;
350 /* remove a thread from an object wait queue */
351 void remove_queue( struct object *obj, struct wait_queue_entry *entry )
353 if (entry->next) entry->next->prev = entry->prev;
354 else obj->tail = entry->prev;
355 if (entry->prev) entry->prev->next = entry->next;
356 else obj->head = entry->next;
357 release_object( obj );
361 static void end_wait( struct thread *thread )
363 struct thread_wait *wait = thread->wait;
364 struct wait_queue_entry *entry;
368 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
369 entry->obj->ops->remove_queue( entry->obj, entry );
370 if (wait->user) remove_timeout_user( wait->user );
371 thread->wait = wait->next;
375 /* build the thread wait structure */
376 static int wait_on( int count, struct object *objects[], int flags, int sec, int usec )
378 struct thread_wait *wait;
379 struct wait_queue_entry *entry;
382 if (!(wait = mem_alloc( sizeof(*wait) + (count-1) * sizeof(*entry) ))) return 0;
383 wait->next = current->wait;
384 wait->thread = current;
388 current->wait = wait;
389 if (flags & SELECT_TIMEOUT)
391 wait->timeout.tv_sec = sec;
392 wait->timeout.tv_usec = usec;
395 for (i = 0, entry = wait->queues; i < count; i++, entry++)
397 struct object *obj = objects[i];
398 entry->thread = current;
399 if (!obj->ops->add_queue( obj, entry ))
409 /* check if the thread waiting condition is satisfied */
410 static int check_wait( struct thread *thread )
413 struct thread_wait *wait = thread->wait;
414 struct wait_queue_entry *entry = wait->queues;
416 /* Suspended threads may not acquire locks */
417 if( thread->process->suspend + thread->suspend > 0 ) return -1;
420 if (wait->flags & SELECT_ALL)
423 /* Note: we must check them all anyway, as some objects may
424 * want to do something when signaled, even if others are not */
425 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
426 not_ok |= !entry->obj->ops->signaled( entry->obj, thread );
427 if (not_ok) goto other_checks;
428 /* Wait satisfied: tell it to all objects */
430 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
431 if (entry->obj->ops->satisfied( entry->obj, thread ))
432 signaled = STATUS_ABANDONED_WAIT_0;
437 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
439 if (!entry->obj->ops->signaled( entry->obj, thread )) continue;
440 /* Wait satisfied: tell it to the object */
442 if (entry->obj->ops->satisfied( entry->obj, thread ))
443 signaled = i + STATUS_ABANDONED_WAIT_0;
449 if ((wait->flags & SELECT_INTERRUPTIBLE) && thread->system_apc.head) return STATUS_USER_APC;
450 if ((wait->flags & SELECT_ALERTABLE) && thread->user_apc.head) return STATUS_USER_APC;
451 if (wait->flags & SELECT_TIMEOUT)
454 gettimeofday( &now, NULL );
455 if (!time_before( &now, &wait->timeout )) return STATUS_TIMEOUT;
460 /* send the wakeup signal to a thread */
461 static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
463 struct wake_up_reply reply;
466 reply.cookie = cookie;
467 reply.signaled = signaled;
468 if ((ret = write( get_unix_fd( thread->wait_fd ), &reply, sizeof(reply) )) == sizeof(reply))
471 fatal_protocol_error( thread, "partial wakeup write %d\n", ret );
472 else if (errno == EPIPE)
473 kill_thread( thread, 0 ); /* normal death */
475 fatal_protocol_perror( thread, "write" );
479 /* attempt to wake up a thread */
480 /* return >0 if OK, 0 if the wait condition is still not satisfied */
481 int wake_thread( struct thread *thread )
486 for (count = 0; thread->wait; count++)
488 if ((signaled = check_wait( thread )) == -1) break;
490 cookie = thread->wait->cookie;
491 if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
492 thread->id, signaled, cookie );
494 if (send_thread_wakeup( thread, cookie, signaled ) == -1) /* error */
500 /* thread wait timeout */
501 static void thread_timeout( void *ptr )
503 struct thread_wait *wait = ptr;
504 struct thread *thread = wait->thread;
505 void *cookie = wait->cookie;
508 if (thread->wait != wait) return; /* not the top-level wait, ignore it */
509 if (thread->suspend + thread->process->suspend > 0) return; /* suspended, ignore it */
511 if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
512 thread->id, STATUS_TIMEOUT, cookie );
514 if (send_thread_wakeup( thread, cookie, STATUS_TIMEOUT ) == -1) return;
515 /* check if other objects have become signaled in the meantime */
516 wake_thread( thread );
519 /* select on a list of handles */
520 static void select_on( int count, void *cookie, const obj_handle_t *handles,
521 int flags, int sec, int usec )
524 struct object *objects[MAXIMUM_WAIT_OBJECTS];
526 if ((count < 0) || (count > MAXIMUM_WAIT_OBJECTS))
528 set_error( STATUS_INVALID_PARAMETER );
531 for (i = 0; i < count; i++)
533 if (!(objects[i] = get_handle_obj( current->process, handles[i], SYNCHRONIZE, NULL )))
537 if (i < count) goto done;
538 if (!wait_on( count, objects, flags, sec, usec )) goto done;
540 if ((ret = check_wait( current )) != -1)
542 /* condition is already satisfied */
548 /* now we need to wait */
549 if (flags & SELECT_TIMEOUT)
551 if (!(current->wait->user = add_timeout_user( ¤t->wait->timeout,
552 thread_timeout, current->wait )))
558 current->wait->cookie = cookie;
559 set_error( STATUS_PENDING );
562 while (--i >= 0) release_object( objects[i] );
565 /* attempt to wake threads sleeping on the object wait queue */
566 void wake_up( struct object *obj, int max )
568 struct wait_queue_entry *entry = obj->head;
572 struct thread *thread = entry->thread;
574 if (wake_thread( thread ))
576 if (max && !--max) break;
581 /* queue an async procedure call */
582 int thread_queue_apc( struct thread *thread, struct object *owner, void *func,
583 enum apc_type type, int system, int nb_args, ... )
585 struct thread_apc *apc;
586 struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
588 /* cancel a possible previous APC with the same owner */
589 if (owner) thread_cancel_apc( thread, owner, system );
590 if (thread->state == TERMINATED) return 0;
592 if (!(apc = mem_alloc( sizeof(*apc) + (nb_args-1)*sizeof(apc->args[0]) ))) return 0;
593 apc->prev = queue->tail;
598 apc->nb_args = nb_args;
603 va_start( args, nb_args );
604 for (i = 0; i < nb_args; i++) apc->args[i] = va_arg( args, void * );
608 if (!apc->prev) /* first one */
611 wake_thread( thread );
613 else apc->prev->next = apc;
618 /* cancel the async procedure call owned by a specific object */
619 void thread_cancel_apc( struct thread *thread, struct object *owner, int system )
621 struct thread_apc *apc;
622 struct apc_queue *queue = system ? &thread->system_apc : &thread->user_apc;
623 for (apc = queue->head; apc; apc = apc->next)
625 if (apc->owner != owner) continue;
626 if (apc->next) apc->next->prev = apc->prev;
627 else queue->tail = apc->prev;
628 if (apc->prev) apc->prev->next = apc->next;
629 else queue->head = apc->next;
635 /* remove the head apc from the queue; the returned pointer must be freed by the caller */
636 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only )
638 struct thread_apc *apc;
639 struct apc_queue *queue = &thread->system_apc;
641 if (!queue->head && !system_only) queue = &thread->user_apc;
642 if ((apc = queue->head))
644 if (apc->next) apc->next->prev = NULL;
645 else queue->tail = NULL;
646 queue->head = apc->next;
651 /* add an fd to the inflight list */
652 /* return list index, or -1 on error */
653 int thread_add_inflight_fd( struct thread *thread, int client, int server )
657 if (server == -1) return -1;
664 /* first check if we already have an entry for this fd */
665 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
666 if (thread->inflight[i].client == client)
668 close( thread->inflight[i].server );
669 thread->inflight[i].server = server;
673 /* now find a free spot to store it */
674 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
675 if (thread->inflight[i].client == -1)
677 thread->inflight[i].client = client;
678 thread->inflight[i].server = server;
684 /* get an inflight fd and purge it from the list */
685 /* the fd must be closed when no longer used */
686 int thread_get_inflight_fd( struct thread *thread, int client )
690 if (client == -1) return -1;
694 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
696 if (thread->inflight[i].client == client)
698 ret = thread->inflight[i].server;
699 thread->inflight[i].server = thread->inflight[i].client = -1;
703 } while (!receive_fd( thread->process )); /* in case it is still in the socket buffer */
707 /* retrieve an LDT selector entry */
708 static void get_selector_entry( struct thread *thread, int entry,
709 unsigned int *base, unsigned int *limit,
710 unsigned char *flags )
712 if (!thread->process->ldt_copy)
714 set_error( STATUS_ACCESS_DENIED );
719 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
722 if (suspend_for_ptrace( thread ))
724 unsigned char flags_buf[4];
725 int *addr = (int *)thread->process->ldt_copy + entry;
726 if (read_thread_int( thread, addr, base ) == -1) goto done;
727 if (read_thread_int( thread, addr + 8192, limit ) == -1) goto done;
728 addr = (int *)thread->process->ldt_copy + 2*8192 + (entry >> 2);
729 if (read_thread_int( thread, addr, (int *)flags_buf ) == -1) goto done;
730 *flags = flags_buf[entry & 3];
732 resume_after_ptrace( thread );
736 /* kill a thread on the spot */
737 void kill_thread( struct thread *thread, int violent_death )
739 if (thread->state == TERMINATED) return; /* already killed */
740 thread->state = TERMINATED;
741 thread->exit_time = time(NULL);
742 if (current == thread) current = NULL;
744 fprintf( stderr,"%04x: *killed* exit_code=%d\n",
745 thread->id, thread->exit_code );
748 while (thread->wait) end_wait( thread );
749 send_thread_wakeup( thread, NULL, STATUS_PENDING );
750 /* if it is waiting on the socket, we don't need to send a SIGTERM */
753 kill_console_processes( thread, 0 );
754 debug_exit_thread( thread );
755 abandon_mutexes( thread );
756 remove_process_thread( thread->process, thread );
757 wake_up( &thread->obj, 0 );
758 detach_thread( thread, violent_death ? SIGTERM : 0 );
759 cleanup_thread( thread );
760 release_object( thread );
763 /* take a snapshot of currently running threads */
764 struct thread_snapshot *thread_snap( int *count )
766 struct thread_snapshot *snapshot, *ptr;
767 struct thread *thread;
770 for (thread = first_thread; thread; thread = thread->next)
771 if (thread->state != TERMINATED) total++;
772 if (!total || !(snapshot = mem_alloc( sizeof(*snapshot) * total ))) return NULL;
774 for (thread = first_thread; thread; thread = thread->next)
776 if (thread->state == TERMINATED) continue;
777 ptr->thread = thread;
778 ptr->count = thread->obj.refcount;
779 ptr->priority = thread->priority;
780 grab_object( thread );
787 /* signal that we are finished booting on the client side */
788 DECL_HANDLER(boot_done)
790 debug_level = max( debug_level, req->debug_level );
791 if (current == booting_thread)
793 booting_thread = (struct thread *)~0UL; /* make sure it doesn't match other threads */
794 lock_master_socket(0); /* allow other clients now */
798 /* create a new thread */
799 DECL_HANDLER(new_thread)
801 struct thread *thread;
802 int request_fd = thread_get_inflight_fd( current, req->request_fd );
804 if (request_fd == -1 || fcntl( request_fd, F_SETFL, O_NONBLOCK ) == -1)
806 if (request_fd != -1) close( request_fd );
807 set_error( STATUS_INVALID_HANDLE );
811 if ((thread = create_thread( request_fd, current->process )))
813 if (req->suspend) thread->suspend++;
814 reply->tid = get_thread_id( thread );
815 if ((reply->handle = alloc_handle( current->process, thread,
816 THREAD_ALL_ACCESS, req->inherit )))
818 /* thread object will be released when the thread gets killed */
821 kill_thread( thread, 1 );
825 /* initialize a new thread */
826 DECL_HANDLER(init_thread)
828 int reply_fd = thread_get_inflight_fd( current, req->reply_fd );
829 int wait_fd = thread_get_inflight_fd( current, req->wait_fd );
831 if (current->unix_pid != -1)
833 fatal_protocol_error( current, "init_thread: already running\n" );
836 if (reply_fd == -1 || fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1)
838 fatal_protocol_error( current, "bad reply fd\n" );
843 fatal_protocol_error( current, "bad wait fd\n" );
846 current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, ¤t->obj );
847 current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj );
848 if (!current->reply_fd || !current->wait_fd) return;
850 current->unix_pid = req->unix_pid;
851 current->unix_tid = req->unix_tid;
852 current->teb = req->teb;
854 if (current->suspend + current->process->suspend > 0) stop_thread( current );
855 if (current->process->running_threads > 1)
856 generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
858 reply->pid = get_process_id( current->process );
859 reply->tid = get_thread_id( current );
860 reply->boot = (current == booting_thread);
861 reply->version = SERVER_PROTOCOL_VERSION;
865 if (reply_fd != -1) close( reply_fd );
866 if (wait_fd != -1) close( wait_fd );
869 /* terminate a thread */
870 DECL_HANDLER(terminate_thread)
872 struct thread *thread;
876 if ((thread = get_thread_from_handle( req->handle, THREAD_TERMINATE )))
878 thread->exit_code = req->exit_code;
879 if (thread != current) kill_thread( thread, 1 );
883 reply->last = (thread->process->running_threads == 1);
885 release_object( thread );
889 /* open a handle to a thread */
890 DECL_HANDLER(open_thread)
892 struct thread *thread = get_thread_from_id( req->tid );
897 reply->handle = alloc_handle( current->process, thread, req->access, req->inherit );
898 release_object( thread );
902 /* fetch information about a thread */
903 DECL_HANDLER(get_thread_info)
905 struct thread *thread;
906 obj_handle_t handle = req->handle;
908 if (!handle) thread = get_thread_from_id( req->tid_in );
909 else thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION );
913 reply->tid = get_thread_id( thread );
914 reply->teb = thread->teb;
915 reply->exit_code = (thread->state == TERMINATED) ? thread->exit_code : STILL_ACTIVE;
916 reply->priority = thread->priority;
917 reply->creation_time = thread->creation_time;
918 reply->exit_time = thread->exit_time;
920 release_object( thread );
924 /* set information about a thread */
925 DECL_HANDLER(set_thread_info)
927 struct thread *thread;
929 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_INFORMATION )))
931 set_thread_info( thread, req );
932 release_object( thread );
936 /* suspend a thread */
937 DECL_HANDLER(suspend_thread)
939 struct thread *thread;
941 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
943 if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
944 else reply->count = suspend_thread( thread );
945 release_object( thread );
949 /* resume a thread */
950 DECL_HANDLER(resume_thread)
952 struct thread *thread;
954 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
956 if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
957 else reply->count = resume_thread( thread );
958 release_object( thread );
962 /* select on a handle list */
965 int count = get_req_data_size() / sizeof(int);
966 select_on( count, req->cookie, get_req_data(), req->flags, req->sec, req->usec );
969 /* queue an APC for a thread */
970 DECL_HANDLER(queue_apc)
972 struct thread *thread;
973 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT )))
975 thread_queue_apc( thread, NULL, req->func, APC_USER, !req->user, 1, req->param );
976 release_object( thread );
980 /* get next APC to call */
981 DECL_HANDLER(get_apc)
983 struct thread_apc *apc;
988 if (!(apc = thread_dequeue_apc( current, !req->alertable )))
992 reply->type = APC_NONE;
995 /* Optimization: ignore APCs that have a NULL func; they are only used
996 * to wake up a thread, but since we got here the thread woke up already.
997 * Exception: for APC_ASYNC_IO, func == NULL is legal.
999 if (apc->func || apc->type == APC_ASYNC_IO) break;
1002 size = apc->nb_args * sizeof(apc->args[0]);
1003 if (size > get_reply_max_size()) size = get_reply_max_size();
1004 reply->func = apc->func;
1005 reply->type = apc->type;
1006 set_reply_data( apc->args, size );
1010 /* fetch a selector entry for a thread */
1011 DECL_HANDLER(get_selector_entry)
1013 struct thread *thread;
1014 if ((thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION )))
1016 get_selector_entry( thread, req->entry, &reply->base, &reply->limit, &reply->flags );
1017 release_object( thread );