2 * Server-side thread management
4 * Copyright (C) 1998 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "wine/port.h"
32 #include <sys/types.h>
54 struct thread_wait *next; /* next wait structure for this thread */
55 struct thread *thread; /* owner thread */
56 int count; /* count of objects */
58 void *cookie; /* magic cookie to return to client */
59 struct timeval timeout;
60 struct timeout_user *user;
61 struct wait_queue_entry queues[1];
64 /* asynchronous procedure calls */
68 struct list entry; /* queue linked list */
69 struct object *owner; /* object that queued this apc */
70 void *func; /* function to call in client */
71 enum apc_type type; /* type of apc function */
72 int nb_args; /* number of arguments */
73 void *arg1; /* function arguments */
79 /* thread operations */
81 static void dump_thread( struct object *obj, int verbose );
82 static int thread_signaled( struct object *obj, struct thread *thread );
83 static void thread_poll_event( struct fd *fd, int event );
84 static void destroy_thread( struct object *obj );
85 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only );
87 static const struct object_ops thread_ops =
89 sizeof(struct thread), /* size */
90 dump_thread, /* dump */
91 add_queue, /* add_queue */
92 remove_queue, /* remove_queue */
93 thread_signaled, /* signaled */
94 no_satisfied, /* satisfied */
95 no_signal, /* signal */
96 no_get_fd, /* get_fd */
97 destroy_thread /* destroy */
100 static const struct fd_ops thread_fd_ops =
102 NULL, /* get_poll_events */
103 thread_poll_event, /* poll_event */
104 no_flush, /* flush */
105 no_get_file_info, /* get_file_info */
106 no_queue_async, /* queue_async */
107 no_cancel_async /* cancel_async */
110 static struct list thread_list = LIST_INIT(thread_list);
111 static struct thread *booting_thread;
113 /* initialize the structure for a newly allocated thread */
114 inline static void init_thread_structure( struct thread *thread )
118 thread->unix_pid = -1; /* not known yet */
119 thread->unix_tid = -1; /* not known yet */
120 thread->context = NULL;
122 thread->debug_ctx = NULL;
123 thread->debug_event = NULL;
124 thread->queue = NULL;
127 thread->req_data = NULL;
128 thread->req_toread = 0;
129 thread->reply_data = NULL;
130 thread->reply_towrite = 0;
131 thread->request_fd = NULL;
132 thread->reply_fd = NULL;
133 thread->wait_fd = NULL;
134 thread->state = RUNNING;
135 thread->attached = 0;
136 thread->exit_code = 0;
137 thread->priority = THREAD_PRIORITY_NORMAL;
138 thread->affinity = 1;
140 thread->creation_time = time(NULL);
141 thread->exit_time = 0;
143 list_init( &thread->mutex_list );
144 list_init( &thread->system_apc );
145 list_init( &thread->user_apc );
147 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
148 thread->inflight[i].server = thread->inflight[i].client = -1;
151 /* create a new thread */
152 struct thread *create_thread( int fd, struct process *process )
154 struct thread *thread;
156 if (!(thread = alloc_object( &thread_ops ))) return NULL;
158 init_thread_structure( thread );
160 thread->process = (struct process *)grab_object( process );
161 if (!current) current = thread;
163 if (!booting_thread) /* first thread ever */
165 booting_thread = thread;
166 lock_master_socket(1);
169 list_add_head( &thread_list, &thread->entry );
171 if (!(thread->id = alloc_ptid( thread )))
173 release_object( thread );
176 if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj )))
178 release_object( thread );
182 thread->token = (struct token *) grab_object( process->token );
184 set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */
185 add_process_thread( thread->process, thread );
189 /* handle a client event */
190 static void thread_poll_event( struct fd *fd, int event )
192 struct thread *thread = get_fd_user( fd );
193 assert( thread->obj.ops == &thread_ops );
195 if (event & (POLLERR | POLLHUP)) kill_thread( thread, 0 );
196 else if (event & POLLIN) read_request( thread );
197 else if (event & POLLOUT) write_reply( thread );
200 /* cleanup everything that is no longer needed by a dead thread */
201 /* used by destroy_thread and kill_thread */
202 static void cleanup_thread( struct thread *thread )
205 struct thread_apc *apc;
207 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
208 if (thread->req_data) free( thread->req_data );
209 if (thread->reply_data) free( thread->reply_data );
210 if (thread->request_fd) release_object( thread->request_fd );
211 if (thread->reply_fd) release_object( thread->reply_fd );
212 if (thread->wait_fd) release_object( thread->wait_fd );
213 free_msg_queue( thread );
214 cleanup_clipboard_thread(thread);
215 destroy_thread_windows( thread );
216 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
218 if (thread->inflight[i].client != -1)
220 close( thread->inflight[i].server );
221 thread->inflight[i].client = thread->inflight[i].server = -1;
224 thread->req_data = NULL;
225 thread->reply_data = NULL;
226 thread->request_fd = NULL;
227 thread->reply_fd = NULL;
228 thread->wait_fd = NULL;
230 if (thread == booting_thread) /* killing booting thread */
232 booting_thread = NULL;
233 lock_master_socket(0);
237 /* destroy a thread when its refcount is 0 */
238 static void destroy_thread( struct object *obj )
240 struct thread_apc *apc;
241 struct thread *thread = (struct thread *)obj;
242 assert( obj->ops == &thread_ops );
244 assert( !thread->debug_ctx ); /* cannot still be debugging something */
245 list_remove( &thread->entry );
246 while ((apc = thread_dequeue_apc( thread, 0 ))) free( apc );
247 cleanup_thread( thread );
248 release_object( thread->process );
249 if (thread->id) free_ptid( thread->id );
250 if (thread->token) release_object( thread->token );
253 /* dump a thread on stdout for debugging purposes */
254 static void dump_thread( struct object *obj, int verbose )
256 struct thread *thread = (struct thread *)obj;
257 assert( obj->ops == &thread_ops );
259 fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
260 thread->id, thread->unix_pid, thread->unix_tid, thread->teb, thread->state );
263 static int thread_signaled( struct object *obj, struct thread *thread )
265 struct thread *mythread = (struct thread *)obj;
266 return (mythread->state == TERMINATED);
269 /* get a thread pointer from a thread id (and increment the refcount) */
270 struct thread *get_thread_from_id( thread_id_t id )
272 struct object *obj = get_ptid_entry( id );
274 if (obj && obj->ops == &thread_ops) return (struct thread *)grab_object( obj );
275 set_win32_error( ERROR_INVALID_THREAD_ID );
279 /* get a thread from a handle (and increment the refcount) */
280 struct thread *get_thread_from_handle( obj_handle_t handle, unsigned int access )
282 return (struct thread *)get_handle_obj( current->process, handle,
283 access, &thread_ops );
286 /* find a thread from a Unix pid */
287 struct thread *get_thread_from_pid( int pid )
289 struct thread *thread;
291 LIST_FOR_EACH_ENTRY( thread, &thread_list, struct thread, entry )
293 if (thread->unix_tid == pid) return thread;
295 LIST_FOR_EACH_ENTRY( thread, &thread_list, struct thread, entry )
297 if (thread->unix_pid == pid) return thread;
302 /* set all information about a thread */
303 static void set_thread_info( struct thread *thread,
304 const struct set_thread_info_request *req )
306 if (req->mask & SET_THREAD_INFO_PRIORITY)
307 thread->priority = req->priority;
308 if (req->mask & SET_THREAD_INFO_AFFINITY)
310 if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
311 else thread->affinity = req->affinity;
315 /* stop a thread (at the Unix level) */
316 void stop_thread( struct thread *thread )
318 /* can't stop a thread while initialisation is in progress */
319 if (is_process_init_done(thread->process)) send_thread_signal( thread, SIGUSR1 );
322 /* suspend a thread */
323 static int suspend_thread( struct thread *thread )
325 int old_count = thread->suspend;
326 if (thread->suspend < MAXIMUM_SUSPEND_COUNT)
328 if (!(thread->process->suspend + thread->suspend++)) stop_thread( thread );
330 else set_error( STATUS_SUSPEND_COUNT_EXCEEDED );
334 /* resume a thread */
335 static int resume_thread( struct thread *thread )
337 int old_count = thread->suspend;
338 if (thread->suspend > 0)
340 if (!(--thread->suspend + thread->process->suspend)) wake_thread( thread );
345 /* add a thread to an object wait queue; return 1 if OK, 0 on error */
346 int add_queue( struct object *obj, struct wait_queue_entry *entry )
350 list_add_tail( &obj->wait_queue, &entry->entry );
354 /* remove a thread from an object wait queue */
355 void remove_queue( struct object *obj, struct wait_queue_entry *entry )
357 list_remove( &entry->entry );
358 release_object( obj );
362 static void end_wait( struct thread *thread )
364 struct thread_wait *wait = thread->wait;
365 struct wait_queue_entry *entry;
369 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
370 entry->obj->ops->remove_queue( entry->obj, entry );
371 if (wait->user) remove_timeout_user( wait->user );
372 thread->wait = wait->next;
376 /* build the thread wait structure */
377 static int wait_on( int count, struct object *objects[], int flags, const abs_time_t *timeout )
379 struct thread_wait *wait;
380 struct wait_queue_entry *entry;
383 if (!(wait = mem_alloc( sizeof(*wait) + (count-1) * sizeof(*entry) ))) return 0;
384 wait->next = current->wait;
385 wait->thread = current;
389 current->wait = wait;
390 if (flags & SELECT_TIMEOUT)
392 wait->timeout.tv_sec = timeout->sec;
393 wait->timeout.tv_usec = timeout->usec;
396 for (i = 0, entry = wait->queues; i < count; i++, entry++)
398 struct object *obj = objects[i];
399 entry->thread = current;
400 if (!obj->ops->add_queue( obj, entry ))
410 /* check if the thread waiting condition is satisfied */
411 static int check_wait( struct thread *thread )
414 struct thread_wait *wait = thread->wait;
415 struct wait_queue_entry *entry = wait->queues;
417 /* Suspended threads may not acquire locks */
418 if( thread->process->suspend + thread->suspend > 0 ) return -1;
421 if (wait->flags & SELECT_ALL)
424 /* Note: we must check them all anyway, as some objects may
425 * want to do something when signaled, even if others are not */
426 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
427 not_ok |= !entry->obj->ops->signaled( entry->obj, thread );
428 if (not_ok) goto other_checks;
429 /* Wait satisfied: tell it to all objects */
431 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
432 if (entry->obj->ops->satisfied( entry->obj, thread ))
433 signaled = STATUS_ABANDONED_WAIT_0;
438 for (i = 0, entry = wait->queues; i < wait->count; i++, entry++)
440 if (!entry->obj->ops->signaled( entry->obj, thread )) continue;
441 /* Wait satisfied: tell it to the object */
443 if (entry->obj->ops->satisfied( entry->obj, thread ))
444 signaled = i + STATUS_ABANDONED_WAIT_0;
450 if ((wait->flags & SELECT_INTERRUPTIBLE) && !list_empty(&thread->system_apc)) return STATUS_USER_APC;
451 if ((wait->flags & SELECT_ALERTABLE) && !list_empty(&thread->user_apc)) return STATUS_USER_APC;
452 if (wait->flags & SELECT_TIMEOUT)
455 gettimeofday( &now, NULL );
456 if (!time_before( &now, &wait->timeout )) return STATUS_TIMEOUT;
461 /* send the wakeup signal to a thread */
462 static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
464 struct wake_up_reply reply;
467 reply.cookie = cookie;
468 reply.signaled = signaled;
469 if ((ret = write( get_unix_fd( thread->wait_fd ), &reply, sizeof(reply) )) == sizeof(reply))
472 fatal_protocol_error( thread, "partial wakeup write %d\n", ret );
473 else if (errno == EPIPE)
474 kill_thread( thread, 0 ); /* normal death */
476 fatal_protocol_perror( thread, "write" );
480 /* attempt to wake up a thread */
481 /* return >0 if OK, 0 if the wait condition is still not satisfied */
482 int wake_thread( struct thread *thread )
487 for (count = 0; thread->wait; count++)
489 if ((signaled = check_wait( thread )) == -1) break;
491 cookie = thread->wait->cookie;
492 if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
493 thread->id, signaled, cookie );
495 if (send_thread_wakeup( thread, cookie, signaled ) == -1) /* error */
501 /* thread wait timeout */
502 static void thread_timeout( void *ptr )
504 struct thread_wait *wait = ptr;
505 struct thread *thread = wait->thread;
506 void *cookie = wait->cookie;
509 if (thread->wait != wait) return; /* not the top-level wait, ignore it */
510 if (thread->suspend + thread->process->suspend > 0) return; /* suspended, ignore it */
512 if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
513 thread->id, STATUS_TIMEOUT, cookie );
515 if (send_thread_wakeup( thread, cookie, STATUS_TIMEOUT ) == -1) return;
516 /* check if other objects have become signaled in the meantime */
517 wake_thread( thread );
520 /* try signaling an event flag, a semaphore or a mutex */
521 static int signal_object( obj_handle_t handle )
526 obj = get_handle_obj( current->process, handle, 0, NULL );
529 ret = obj->ops->signal( obj, get_handle_access( current->process, handle ));
530 release_object( obj );
535 /* select on a list of handles */
536 static void select_on( int count, void *cookie, const obj_handle_t *handles,
537 int flags, const abs_time_t *timeout, obj_handle_t signal_obj )
540 struct object *objects[MAXIMUM_WAIT_OBJECTS];
542 if ((count < 0) || (count > MAXIMUM_WAIT_OBJECTS))
544 set_error( STATUS_INVALID_PARAMETER );
547 for (i = 0; i < count; i++)
549 if (!(objects[i] = get_handle_obj( current->process, handles[i], SYNCHRONIZE, NULL )))
553 if (i < count) goto done;
554 if (!wait_on( count, objects, flags, timeout )) goto done;
556 /* signal the object */
559 if (!signal_object( signal_obj ))
564 /* check if we woke ourselves up */
565 if (!current->wait) goto done;
568 if ((ret = check_wait( current )) != -1)
570 /* condition is already satisfied */
576 /* now we need to wait */
577 if (flags & SELECT_TIMEOUT)
579 if (!(current->wait->user = add_timeout_user( ¤t->wait->timeout,
580 thread_timeout, current->wait )))
586 current->wait->cookie = cookie;
587 set_error( STATUS_PENDING );
590 while (--i >= 0) release_object( objects[i] );
593 /* attempt to wake threads sleeping on the object wait queue */
594 void wake_up( struct object *obj, int max )
596 struct list *ptr, *next;
598 LIST_FOR_EACH_SAFE( ptr, next, &obj->wait_queue )
600 struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry );
601 if (wake_thread( entry->thread ))
603 if (max && !--max) break;
608 /* queue an async procedure call */
609 int thread_queue_apc( struct thread *thread, struct object *owner, void *func,
610 enum apc_type type, int system, void *arg1, void *arg2, void *arg3 )
612 struct thread_apc *apc;
613 struct list *queue = system ? &thread->system_apc : &thread->user_apc;
615 /* cancel a possible previous APC with the same owner */
616 if (owner) thread_cancel_apc( thread, owner, system );
617 if (thread->state == TERMINATED) return 0;
619 if (!(apc = mem_alloc( sizeof(*apc) ))) return 0;
626 list_add_tail( queue, &apc->entry );
627 if (!list_prev( queue, &apc->entry )) /* first one */
628 wake_thread( thread );
633 /* cancel the async procedure call owned by a specific object */
634 void thread_cancel_apc( struct thread *thread, struct object *owner, int system )
636 struct thread_apc *apc;
637 struct list *queue = system ? &thread->system_apc : &thread->user_apc;
638 LIST_FOR_EACH_ENTRY( apc, queue, struct thread_apc, entry )
640 if (apc->owner != owner) continue;
641 list_remove( &apc->entry );
647 /* remove the head apc from the queue; the returned pointer must be freed by the caller */
648 static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system_only )
650 struct thread_apc *apc = NULL;
651 struct list *ptr = list_head( &thread->system_apc );
653 if (!ptr && !system_only) ptr = list_head( &thread->user_apc );
656 apc = LIST_ENTRY( ptr, struct thread_apc, entry );
662 /* add an fd to the inflight list */
663 /* return list index, or -1 on error */
664 int thread_add_inflight_fd( struct thread *thread, int client, int server )
668 if (server == -1) return -1;
675 /* first check if we already have an entry for this fd */
676 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
677 if (thread->inflight[i].client == client)
679 close( thread->inflight[i].server );
680 thread->inflight[i].server = server;
684 /* now find a free spot to store it */
685 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
686 if (thread->inflight[i].client == -1)
688 thread->inflight[i].client = client;
689 thread->inflight[i].server = server;
695 /* get an inflight fd and purge it from the list */
696 /* the fd must be closed when no longer used */
697 int thread_get_inflight_fd( struct thread *thread, int client )
701 if (client == -1) return -1;
705 for (i = 0; i < MAX_INFLIGHT_FDS; i++)
707 if (thread->inflight[i].client == client)
709 ret = thread->inflight[i].server;
710 thread->inflight[i].server = thread->inflight[i].client = -1;
714 } while (!receive_fd( thread->process )); /* in case it is still in the socket buffer */
718 /* retrieve an LDT selector entry */
719 static void get_selector_entry( struct thread *thread, int entry,
720 unsigned int *base, unsigned int *limit,
721 unsigned char *flags )
723 if (!thread->process->ldt_copy)
725 set_error( STATUS_ACCESS_DENIED );
730 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
733 if (suspend_for_ptrace( thread ))
735 unsigned char flags_buf[4];
736 int *addr = (int *)thread->process->ldt_copy + entry;
737 if (read_thread_int( thread, addr, base ) == -1) goto done;
738 if (read_thread_int( thread, addr + 8192, limit ) == -1) goto done;
739 addr = (int *)thread->process->ldt_copy + 2*8192 + (entry >> 2);
740 if (read_thread_int( thread, addr, (int *)flags_buf ) == -1) goto done;
741 *flags = flags_buf[entry & 3];
743 resume_after_ptrace( thread );
747 /* kill a thread on the spot */
748 void kill_thread( struct thread *thread, int violent_death )
750 if (thread->state == TERMINATED) return; /* already killed */
751 thread->state = TERMINATED;
752 thread->exit_time = time(NULL);
753 if (current == thread) current = NULL;
755 fprintf( stderr,"%04x: *killed* exit_code=%d\n",
756 thread->id, thread->exit_code );
759 while (thread->wait) end_wait( thread );
760 send_thread_wakeup( thread, NULL, STATUS_PENDING );
761 /* if it is waiting on the socket, we don't need to send a SIGTERM */
764 kill_console_processes( thread, 0 );
765 debug_exit_thread( thread );
766 abandon_mutexes( thread );
767 remove_process_thread( thread->process, thread );
768 wake_up( &thread->obj, 0 );
769 detach_thread( thread, violent_death ? SIGTERM : 0 );
770 cleanup_thread( thread );
771 release_object( thread );
774 /* take a snapshot of currently running threads */
775 struct thread_snapshot *thread_snap( int *count )
777 struct thread_snapshot *snapshot, *ptr;
778 struct thread *thread;
781 LIST_FOR_EACH_ENTRY( thread, &thread_list, struct thread, entry )
782 if (thread->state != TERMINATED) total++;
783 if (!total || !(snapshot = mem_alloc( sizeof(*snapshot) * total ))) return NULL;
785 LIST_FOR_EACH_ENTRY( thread, &thread_list, struct thread, entry )
787 if (thread->state == TERMINATED) continue;
788 ptr->thread = thread;
789 ptr->count = thread->obj.refcount;
790 ptr->priority = thread->priority;
791 grab_object( thread );
798 /* gets the current impersonation token */
799 struct token *thread_get_impersonation_token( struct thread *thread )
802 return thread->token;
804 return thread->process->token;
807 /* signal that we are finished booting on the client side */
808 DECL_HANDLER(boot_done)
810 debug_level = max( debug_level, req->debug_level );
811 if (current == booting_thread)
813 booting_thread = (struct thread *)~0UL; /* make sure it doesn't match other threads */
814 lock_master_socket(0); /* allow other clients now */
818 /* create a new thread */
819 DECL_HANDLER(new_thread)
821 struct thread *thread;
822 int request_fd = thread_get_inflight_fd( current, req->request_fd );
824 if (request_fd == -1 || fcntl( request_fd, F_SETFL, O_NONBLOCK ) == -1)
826 if (request_fd != -1) close( request_fd );
827 set_error( STATUS_INVALID_HANDLE );
831 if ((thread = create_thread( request_fd, current->process )))
833 if (req->suspend) thread->suspend++;
834 reply->tid = get_thread_id( thread );
835 if ((reply->handle = alloc_handle( current->process, thread,
836 THREAD_ALL_ACCESS, req->inherit )))
838 /* thread object will be released when the thread gets killed */
841 kill_thread( thread, 1 );
845 /* initialize a new thread */
846 DECL_HANDLER(init_thread)
848 int reply_fd = thread_get_inflight_fd( current, req->reply_fd );
849 int wait_fd = thread_get_inflight_fd( current, req->wait_fd );
851 if (current->unix_pid != -1)
853 fatal_protocol_error( current, "init_thread: already running\n" );
856 if (reply_fd == -1 || fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1)
858 fatal_protocol_error( current, "bad reply fd\n" );
863 fatal_protocol_error( current, "bad wait fd\n" );
866 if (!(current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, ¤t->obj )))
869 fatal_protocol_error( current, "could not allocate reply fd\n" );
872 if (!(current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj )))
875 current->unix_pid = req->unix_pid;
876 current->unix_tid = req->unix_tid;
877 current->teb = req->teb;
879 if (current->suspend + current->process->suspend > 0) stop_thread( current );
880 if (current->process->running_threads > 1)
881 generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
883 reply->pid = get_process_id( current->process );
884 reply->tid = get_thread_id( current );
885 reply->boot = (current == booting_thread);
886 reply->version = SERVER_PROTOCOL_VERSION;
890 if (reply_fd != -1) close( reply_fd );
891 if (wait_fd != -1) close( wait_fd );
894 /* terminate a thread */
895 DECL_HANDLER(terminate_thread)
897 struct thread *thread;
901 if ((thread = get_thread_from_handle( req->handle, THREAD_TERMINATE )))
903 thread->exit_code = req->exit_code;
904 if (thread != current) kill_thread( thread, 1 );
908 reply->last = (thread->process->running_threads == 1);
910 release_object( thread );
914 /* open a handle to a thread */
915 DECL_HANDLER(open_thread)
917 struct thread *thread = get_thread_from_id( req->tid );
922 reply->handle = alloc_handle( current->process, thread, req->access, req->inherit );
923 release_object( thread );
927 /* fetch information about a thread */
928 DECL_HANDLER(get_thread_info)
930 struct thread *thread;
931 obj_handle_t handle = req->handle;
933 if (!handle) thread = get_thread_from_id( req->tid_in );
934 else thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION );
938 reply->pid = get_process_id( thread->process );
939 reply->tid = get_thread_id( thread );
940 reply->teb = thread->teb;
941 reply->exit_code = (thread->state == TERMINATED) ? thread->exit_code : STILL_ACTIVE;
942 reply->priority = thread->priority;
943 reply->affinity = thread->affinity;
944 reply->creation_time = thread->creation_time;
945 reply->exit_time = thread->exit_time;
947 release_object( thread );
951 /* set information about a thread */
952 DECL_HANDLER(set_thread_info)
954 struct thread *thread;
956 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_INFORMATION )))
958 set_thread_info( thread, req );
959 release_object( thread );
963 /* suspend a thread */
964 DECL_HANDLER(suspend_thread)
966 struct thread *thread;
968 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
970 if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
971 else reply->count = suspend_thread( thread );
972 release_object( thread );
976 /* resume a thread */
977 DECL_HANDLER(resume_thread)
979 struct thread *thread;
981 if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
983 if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
984 else reply->count = resume_thread( thread );
985 release_object( thread );
989 /* select on a handle list */
992 int count = get_req_data_size() / sizeof(int);
993 select_on( count, req->cookie, get_req_data(), req->flags, &req->timeout, req->signal );
996 /* queue an APC for a thread */
997 DECL_HANDLER(queue_apc)
999 struct thread *thread;
1000 if ((thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT )))
1002 thread_queue_apc( thread, NULL, req->func, APC_USER, !req->user,
1003 req->arg1, req->arg2, req->arg3 );
1004 release_object( thread );
1008 /* get next APC to call */
1009 DECL_HANDLER(get_apc)
1011 struct thread_apc *apc;
1015 if (!(apc = thread_dequeue_apc( current, !req->alertable )))
1019 reply->type = APC_NONE;
1022 /* Optimization: ignore APCs that have a NULL func; they are only used
1023 * to wake up a thread, but since we got here the thread woke up already.
1024 * Exception: for APC_ASYNC_IO, func == NULL is legal.
1026 if (apc->func || apc->type == APC_ASYNC_IO) break;
1029 reply->func = apc->func;
1030 reply->type = apc->type;
1031 reply->arg1 = apc->arg1;
1032 reply->arg2 = apc->arg2;
1033 reply->arg3 = apc->arg3;
1037 /* fetch a selector entry for a thread */
1038 DECL_HANDLER(get_selector_entry)
1040 struct thread *thread;
1041 if ((thread = get_thread_from_handle( req->handle, THREAD_QUERY_INFORMATION )))
1043 get_selector_entry( thread, req->entry, &reply->base, &reply->limit, &reply->flags );
1044 release_object( thread );