#ifdef HAVE_POLL_H
#include <poll.h>
#endif
+#ifdef HAVE_SCHED_H
+#include <sched.h>
+#endif
#include "ntstatus.h"
#define WIN32_NO_STATUS
#include "security.h"
+#define CPU_FLAG(cpu) (1 << (cpu))
+#ifdef __i386__
+static const unsigned int supported_cpus = CPU_FLAG(CPU_x86);
+#elif defined(__x86_64__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_x86_64) | CPU_FLAG(CPU_x86);
+#elif defined(__ALPHA__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_ALPHA);
+#elif defined(__powerpc__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_POWERPC);
+#elif defined(__sparc__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_SPARC);
+#else
+#error Unsupported CPU
+#endif
+
/* thread queues */
struct thread_wait
struct thread *thread; /* owner thread */
int count; /* count of objects */
int flags;
- void *cookie; /* magic cookie to return to client */
- struct timeval timeout;
+ client_ptr_t cookie; /* magic cookie to return to client */
+ timeout_t timeout;
struct timeout_user *user;
struct wait_queue_entry queues[1];
};
{
sizeof(struct thread_apc), /* size */
dump_thread_apc, /* dump */
+ no_get_type, /* get_type */
add_queue, /* add_queue */
remove_queue, /* remove_queue */
thread_apc_signaled, /* signaled */
no_signal, /* signal */
no_get_fd, /* get_fd */
no_map_access, /* map_access */
+ default_get_sd, /* get_sd */
+ default_set_sd, /* set_sd */
no_lookup_name, /* lookup_name */
+ no_open_file, /* open_file */
no_close_handle, /* close_handle */
thread_apc_destroy /* destroy */
};
{
sizeof(struct thread), /* size */
dump_thread, /* dump */
+ no_get_type, /* get_type */
add_queue, /* add_queue */
remove_queue, /* remove_queue */
thread_signaled, /* signaled */
no_signal, /* signal */
no_get_fd, /* get_fd */
thread_map_access, /* map_access */
+ default_get_sd, /* get_sd */
+ default_set_sd, /* set_sd */
no_lookup_name, /* lookup_name */
+ no_open_file, /* open_file */
no_close_handle, /* close_handle */
destroy_thread /* destroy */
};
{
NULL, /* get_poll_events */
thread_poll_event, /* poll_event */
- no_flush, /* flush */
- no_get_file_info, /* get_file_info */
- no_queue_async, /* queue_async */
- no_cancel_async /* cancel_async */
+ NULL, /* flush */
+ NULL, /* get_fd_type */
+ NULL, /* ioctl */
+ NULL, /* queue_async */
+ NULL, /* reselect_async */
+ NULL /* cancel_async */
};
static struct list thread_list = LIST_INIT(thread_list);
thread->unix_tid = -1; /* not known yet */
thread->context = NULL;
thread->suspend_context = NULL;
- thread->teb = NULL;
+ thread->teb = 0;
thread->debug_ctx = NULL;
thread->debug_event = NULL;
thread->debug_break = 0;
thread->state = RUNNING;
thread->exit_code = 0;
thread->priority = 0;
- thread->affinity = 1;
+ thread->affinity = ~0;
thread->suspend = 0;
thread->desktop_users = 0;
thread->token = NULL;
thread->creation_time = current_time;
- thread->exit_time.tv_sec = thread->exit_time.tv_usec = 0;
+ thread->exit_time = 0;
list_init( &thread->mutex_list );
list_init( &thread->system_apc );
}
/* check if address looks valid for a client-side data structure (TEB etc.) */
-static inline int is_valid_address( void *addr )
+static inline int is_valid_address( client_ptr_t addr )
{
- return addr && !((unsigned long)addr % sizeof(int));
+ return addr && !(addr % sizeof(int));
}
/* create a new thread */
release_object( thread );
return NULL;
}
- if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj )))
+ if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj, 0 )))
{
release_object( thread );
return NULL;
if (thread->reply_fd) release_object( thread->reply_fd );
if (thread->wait_fd) release_object( thread->wait_fd );
free( thread->suspend_context );
- free_msg_queue( thread );
cleanup_clipboard_thread(thread);
destroy_thread_windows( thread );
+ free_msg_queue( thread );
close_thread_desktop( thread );
for (i = 0; i < MAX_INFLIGHT_FDS; i++)
{
struct thread *thread = (struct thread *)obj;
assert( obj->ops == &thread_ops );
- fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
- thread->id, thread->unix_pid, thread->unix_tid, thread->teb, thread->state );
+ fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d state=%d\n",
+ thread->id, thread->unix_pid, thread->unix_tid, thread->state );
}
static int thread_signaled( struct object *obj, struct thread *thread )
return NULL;
}
+void set_thread_affinity( struct thread *thread, affinity_t affinity )
+{
+ if ((affinity & thread->process->affinity) != affinity)
+ {
+ set_error( STATUS_INVALID_PARAMETER );
+ return;
+ }
+#ifdef HAVE_SCHED_SETAFFINITY
+ if (thread->unix_tid != -1)
+ {
+ cpu_set_t set;
+ int i;
+ affinity_t mask;
+
+ CPU_ZERO( &set );
+ for (i = 0, mask = 1; mask; i++, mask <<= 1)
+ if (affinity & mask) CPU_SET( i, &set );
+
+ if (!sched_setaffinity( thread->unix_tid, sizeof(set), &set ))
+ thread->affinity = affinity;
+ else
+ file_set_error();
+ }
+ else set_error( STATUS_ACCESS_DENIED );
+#else
+ thread->affinity = affinity;
+#endif
+}
+
+#define THREAD_PRIORITY_REALTIME_HIGHEST 6
+#define THREAD_PRIORITY_REALTIME_LOWEST -7
+
/* set all information about a thread */
static void set_thread_info( struct thread *thread,
const struct set_thread_info_request *req )
{
if (req->mask & SET_THREAD_INFO_PRIORITY)
- thread->priority = req->priority;
- if (req->mask & SET_THREAD_INFO_AFFINITY)
{
- if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
- else thread->affinity = req->affinity;
+ int max = THREAD_PRIORITY_HIGHEST;
+ int min = THREAD_PRIORITY_LOWEST;
+ if (thread->process->priority == PROCESS_PRIOCLASS_REALTIME)
+ {
+ max = THREAD_PRIORITY_REALTIME_HIGHEST;
+ min = THREAD_PRIORITY_REALTIME_LOWEST;
+ }
+ if ((req->priority >= min && req->priority <= max) ||
+ req->priority == THREAD_PRIORITY_IDLE ||
+ req->priority == THREAD_PRIORITY_TIME_CRITICAL)
+ thread->priority = req->priority;
+ else
+ set_error( STATUS_INVALID_PARAMETER );
}
+ if (req->mask & SET_THREAD_INFO_AFFINITY)
+ set_thread_affinity( thread, req->affinity );
if (req->mask & SET_THREAD_INFO_TOKEN)
security_set_thread_token( thread, req->token );
}
}
/* build the thread wait structure */
-static int wait_on( int count, struct object *objects[], int flags, const abs_time_t *timeout )
+static int wait_on( unsigned int count, struct object *objects[], int flags, timeout_t timeout )
{
struct thread_wait *wait;
struct wait_queue_entry *entry;
- int i;
+ unsigned int i;
- if (!(wait = mem_alloc( sizeof(*wait) + (count-1) * sizeof(*entry) ))) return 0;
+ if (!(wait = mem_alloc( FIELD_OFFSET(struct thread_wait, queues[count]) ))) return 0;
wait->next = current->wait;
wait->thread = current;
wait->count = count;
wait->flags = flags;
wait->user = NULL;
+ wait->timeout = timeout;
current->wait = wait;
- if (flags & SELECT_TIMEOUT)
- {
- wait->timeout.tv_sec = timeout->sec;
- wait->timeout.tv_usec = timeout->usec;
- }
for (i = 0, entry = wait->queues; i < count; i++, entry++)
{
struct thread_wait *wait = thread->wait;
struct wait_queue_entry *entry = wait->queues;
+ assert( wait );
+
+ if ((wait->flags & SELECT_INTERRUPTIBLE) && !list_empty( &thread->system_apc ))
+ return STATUS_USER_APC;
+
/* Suspended threads may not acquire locks, but they can run system APCs */
- if (thread->process->suspend + thread->suspend > 0)
- {
- if ((wait->flags & SELECT_INTERRUPTIBLE) && !list_empty( &thread->system_apc ))
- return STATUS_USER_APC;
- return -1;
- }
+ if (thread->process->suspend + thread->suspend > 0) return -1;
- assert( wait );
if (wait->flags & SELECT_ALL)
{
int not_ok = 0;
}
other_checks:
- if ((wait->flags & SELECT_INTERRUPTIBLE) && !list_empty(&thread->system_apc)) return STATUS_USER_APC;
if ((wait->flags & SELECT_ALERTABLE) && !list_empty(&thread->user_apc)) return STATUS_USER_APC;
- if (wait->flags & SELECT_TIMEOUT)
- {
- if (!time_before( ¤t_time, &wait->timeout )) return STATUS_TIMEOUT;
- }
+ if (wait->timeout <= current_time) return STATUS_TIMEOUT;
return -1;
}
/* send the wakeup signal to a thread */
-static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
+static int send_thread_wakeup( struct thread *thread, client_ptr_t cookie, int signaled )
{
struct wake_up_reply reply;
int ret;
+ memset( &reply, 0, sizeof(reply) );
reply.cookie = cookie;
reply.signaled = signaled;
if ((ret = write( get_unix_fd( thread->wait_fd ), &reply, sizeof(reply) )) == sizeof(reply))
int wake_thread( struct thread *thread )
{
int signaled, count;
- void *cookie;
+ client_ptr_t cookie;
for (count = 0; thread->wait; count++)
{
if ((signaled = check_wait( thread )) == -1) break;
cookie = thread->wait->cookie;
- if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
- thread->id, signaled, cookie );
+ if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d\n", thread->id, signaled );
end_wait( thread );
if (send_thread_wakeup( thread, cookie, signaled ) == -1) /* error */
break;
{
struct thread_wait *wait = ptr;
struct thread *thread = wait->thread;
- void *cookie = wait->cookie;
+ client_ptr_t cookie = wait->cookie;
wait->user = NULL;
if (thread->wait != wait) return; /* not the top-level wait, ignore it */
if (thread->suspend + thread->process->suspend > 0) return; /* suspended, ignore it */
- if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
- thread->id, (int)STATUS_TIMEOUT, cookie );
+ if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=TIMEOUT\n", thread->id );
end_wait( thread );
if (send_thread_wakeup( thread, cookie, STATUS_TIMEOUT ) == -1) return;
/* check if other objects have become signaled in the meantime */
}
/* select on a list of handles */
-static void select_on( int count, void *cookie, const obj_handle_t *handles,
- int flags, const abs_time_t *timeout, obj_handle_t signal_obj )
+static timeout_t select_on( unsigned int count, client_ptr_t cookie, const obj_handle_t *handles,
+ int flags, timeout_t timeout, obj_handle_t signal_obj )
{
- int ret, i;
+ int ret;
+ unsigned int i;
struct object *objects[MAXIMUM_WAIT_OBJECTS];
- if ((count < 0) || (count > MAXIMUM_WAIT_OBJECTS))
+ if (timeout <= 0) timeout = current_time - timeout;
+
+ if (count > MAXIMUM_WAIT_OBJECTS)
{
set_error( STATUS_INVALID_PARAMETER );
- return;
+ return 0;
}
for (i = 0; i < count; i++)
{
}
/* now we need to wait */
- if (flags & SELECT_TIMEOUT)
+ if (current->wait->timeout != TIMEOUT_INFINITE)
{
- if (!(current->wait->user = add_timeout_user( ¤t->wait->timeout,
+ if (!(current->wait->user = add_timeout_user( current->wait->timeout,
thread_timeout, current->wait )))
{
end_wait( current );
set_error( STATUS_PENDING );
done:
- while (--i >= 0) release_object( objects[i] );
+ while (i > 0) release_object( objects[--i] );
+ return timeout;
}
/* attempt to wake threads sleeping on the object wait queue */
void wake_up( struct object *obj, int max )
{
- struct list *ptr, *next;
+ struct list *ptr;
- LIST_FOR_EACH_SAFE( ptr, next, &obj->wait_queue )
+ LIST_FOR_EACH( ptr, &obj->wait_queue )
{
struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry );
- if (wake_thread( entry->thread ))
- {
- if (max && !--max) break;
- }
+ if (!wake_thread( entry->thread )) continue;
+ if (max && !--max) break;
+ /* restart at the head of the list since a wake up can change the object wait queue */
+ ptr = &obj->wait_queue;
}
}
}
}
+/* check if thread is currently waiting for a (system) apc */
+static inline int is_in_apc_wait( struct thread *thread )
+{
+ return (thread->process->suspend || thread->suspend ||
+ (thread->wait && (thread->wait->flags & SELECT_INTERRUPTIBLE)));
+}
+
/* queue an existing APC to a given thread */
static int queue_apc( struct process *process, struct thread *thread, struct thread_apc *apc )
{
LIST_FOR_EACH_ENTRY( candidate, &process->thread_list, struct thread, proc_entry )
{
if (candidate->state == TERMINATED) continue;
- if (process->suspend || candidate->suspend ||
- (candidate->wait && (candidate->wait->flags & SELECT_INTERRUPTIBLE)))
+ if (is_in_apc_wait( candidate ))
{
thread = candidate;
break;
}
}
if (!thread) return 0; /* nothing found */
+ queue = get_apc_queue( thread, apc->call.type );
}
else
{
if (thread->state == TERMINATED) return 0;
+ queue = get_apc_queue( thread, apc->call.type );
+ /* send signal for system APCs if needed */
+ if (queue == &thread->system_apc && list_empty( queue ) && !is_in_apc_wait( thread ))
+ {
+ if (!send_thread_signal( thread, SIGUSR1 )) return 0;
+ }
/* cancel a possible previous APC with the same owner */
if (apc->owner) thread_cancel_apc( thread, apc->owner, apc->call.type );
}
- queue = get_apc_queue( thread, apc->call.type );
grab_object( apc );
list_add_tail( queue, &apc->entry );
if (!list_prev( queue, &apc->entry )) /* first one */
if (thread->wait)
{
while (thread->wait) end_wait( thread );
- send_thread_wakeup( thread, NULL, STATUS_PENDING );
- /* if it is waiting on the socket, we don't need to send a SIGTERM */
+ send_thread_wakeup( thread, 0, STATUS_PENDING );
+ /* if it is waiting on the socket, we don't need to send a SIGQUIT */
violent_death = 0;
}
kill_console_processes( thread, 0 );
debug_exit_thread( thread );
abandon_mutexes( thread );
wake_up( &thread->obj, 0 );
- if (violent_death) send_thread_signal( thread, SIGTERM );
+ if (violent_death) send_thread_signal( thread, SIGQUIT );
cleanup_thread( thread );
remove_process_thread( thread->process, thread );
release_object( thread );
}
+/* copy parts of a context structure */
+static void copy_context( context_t *to, const context_t *from, unsigned int flags )
+{
+ assert( to->cpu == from->cpu );
+ to->flags |= flags;
+ if (flags & SERVER_CTX_CONTROL) to->ctl = from->ctl;
+ if (flags & SERVER_CTX_INTEGER) to->integer = from->integer;
+ if (flags & SERVER_CTX_SEGMENTS) to->seg = from->seg;
+ if (flags & SERVER_CTX_FLOATING_POINT) to->fp = from->fp;
+ if (flags & SERVER_CTX_DEBUG_REGISTERS) to->debug = from->debug;
+ if (flags & SERVER_CTX_EXTENDED_REGISTERS) to->ext = from->ext;
+}
+
+/* return the context flags that correspond to system regs */
+/* (system regs are the ones we can't access on the client side) */
+static unsigned int get_context_system_regs( enum cpu_type cpu )
+{
+ switch (cpu)
+ {
+ case CPU_x86: return SERVER_CTX_DEBUG_REGISTERS;
+ case CPU_x86_64: return SERVER_CTX_DEBUG_REGISTERS;
+ case CPU_ALPHA: return 0;
+ case CPU_POWERPC: return 0;
+ case CPU_SPARC: return 0;
+ }
+ return 0;
+}
+
/* trigger a breakpoint event in a given thread */
void break_thread( struct thread *thread )
{
- struct debug_event_exception data;
+ debug_event_t data;
assert( thread->context );
- data.record.ExceptionCode = STATUS_BREAKPOINT;
- data.record.ExceptionFlags = EXCEPTION_CONTINUABLE;
- data.record.ExceptionRecord = NULL;
- data.record.ExceptionAddress = get_context_ip( thread->context );
- data.record.NumberParameters = 0;
- data.first = 1;
+ memset( &data, 0, sizeof(data) );
+ data.exception.first = 1;
+ data.exception.exc_code = STATUS_BREAKPOINT;
+ data.exception.flags = EXCEPTION_CONTINUABLE;
+ switch (thread->context->cpu)
+ {
+ case CPU_x86:
+ data.exception.address = thread->context->ctl.i386_regs.eip;
+ break;
+ case CPU_x86_64:
+ data.exception.address = thread->context->ctl.x86_64_regs.rip;
+ break;
+ case CPU_ALPHA:
+ data.exception.address = thread->context->ctl.alpha_regs.fir;
+ break;
+ case CPU_POWERPC:
+ data.exception.address = thread->context->ctl.powerpc_regs.iar;
+ break;
+ case CPU_SPARC:
+ data.exception.address = thread->context->ctl.sparc_regs.pc;
+ break;
+ }
generate_debug_event( thread, EXCEPTION_DEBUG_EVENT, &data );
thread->debug_break = 0;
}
if (reply_fd == -1 || fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1) goto error;
- current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, ¤t->obj );
+ current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, ¤t->obj, 0 );
reply_fd = -1;
if (!current->reply_fd) goto error;
set_error( STATUS_TOO_MANY_OPENED_FILES ); /* most likely reason */
return;
}
- if (!(current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj )))
+ if (!(current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj, 0 )))
return;
- if (!is_valid_address(req->teb) || !is_valid_address(req->peb) || !is_valid_address(req->ldt_copy))
+ if (!is_valid_address(req->teb))
{
set_error( STATUS_INVALID_PARAMETER );
return;
if (!process->peb) /* first thread, initialize the process too */
{
+ if (!CPU_FLAG(req->cpu) || !(supported_cpus & CPU_FLAG(req->cpu)))
+ {
+ set_error( STATUS_NOT_SUPPORTED );
+ return;
+ }
process->unix_pid = current->unix_pid;
- process->peb = req->peb;
- process->ldt_copy = req->ldt_copy;
+ process->peb = req->entry;
+ process->cpu = req->cpu;
reply->info_size = init_process( current );
}
else
{
+ if (req->cpu != process->cpu)
+ {
+ set_error( STATUS_INVALID_PARAMETER );
+ return;
+ }
if (process->unix_pid != current->unix_pid)
process->unix_pid = -1; /* can happen with linuxthreads */
if (current->suspend + process->suspend > 0) stop_thread( current );
- generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
+ generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, &req->entry );
}
debug_level = max( debug_level, req->debug_level );
reply->pid = get_process_id( process );
reply->tid = get_thread_id( current );
reply->version = SERVER_PROTOCOL_VERSION;
- reply->server_start.sec = server_start_time.tv_sec;
- reply->server_start.usec = server_start_time.tv_usec;
+ reply->server_start = server_start_time;
+ reply->all_cpus = supported_cpus;
return;
error:
reply->exit_code = (thread->state == TERMINATED) ? thread->exit_code : STATUS_PENDING;
reply->priority = thread->priority;
reply->affinity = thread->affinity;
- reply->creation_time.sec = thread->creation_time.tv_sec;
- reply->creation_time.usec = thread->creation_time.tv_usec;
- reply->exit_time.sec = thread->exit_time.tv_sec;
- reply->exit_time.usec = thread->exit_time.tv_usec;
+ reply->creation_time = thread->creation_time;
+ reply->exit_time = thread->exit_time;
reply->last = thread->process->running_threads == 1;
release_object( thread );
if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
{
- if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
- else reply->count = resume_thread( thread );
+ reply->count = resume_thread( thread );
release_object( thread );
}
}
/* select on a handle list */
DECL_HANDLER(select)
{
- int count = get_req_data_size() / sizeof(obj_handle_t);
- select_on( count, req->cookie, get_req_data(), req->flags, &req->timeout, req->signal );
+ struct thread_apc *apc;
+ unsigned int count;
+ const apc_result_t *result = get_req_data();
+ const obj_handle_t *handles = (const obj_handle_t *)(result + 1);
+
+ if (get_req_data_size() < sizeof(*result))
+ {
+ set_error( STATUS_INVALID_PARAMETER );
+ return;
+ }
+ count = (get_req_data_size() - sizeof(*result)) / sizeof(obj_handle_t);
+
+ /* first store results of previous apc */
+ if (req->prev_apc)
+ {
+ if (!(apc = (struct thread_apc *)get_handle_obj( current->process, req->prev_apc,
+ 0, &thread_apc_ops ))) return;
+ apc->result = *result;
+ apc->executed = 1;
+ if (apc->result.type == APC_CREATE_THREAD) /* transfer the handle to the caller process */
+ {
+ obj_handle_t handle = duplicate_handle( current->process, apc->result.create_thread.handle,
+ apc->caller->process, 0, 0, DUP_HANDLE_SAME_ACCESS );
+ close_handle( current->process, apc->result.create_thread.handle );
+ apc->result.create_thread.handle = handle;
+ clear_error(); /* ignore errors from the above calls */
+ }
+ else if (apc->result.type == APC_ASYNC_IO)
+ {
+ if (apc->owner)
+ async_set_result( apc->owner, apc->result.async_io.status,
+ apc->result.async_io.total, apc->result.async_io.apc );
+ }
+ wake_up( &apc->obj, 0 );
+ close_handle( current->process, req->prev_apc );
+ release_object( apc );
+ }
+
+ reply->timeout = select_on( count, req->cookie, handles, req->flags, req->timeout, req->signal );
+
+ if (get_error() == STATUS_USER_APC)
+ {
+ for (;;)
+ {
+ if (!(apc = thread_dequeue_apc( current, !(req->flags & SELECT_ALERTABLE) )))
+ break;
+ /* Optimization: ignore APC_NONE calls, they are only used to
+ * wake up a thread, but since we got here the thread woke up already.
+ */
+ if (apc->call.type != APC_NONE)
+ {
+ if ((reply->apc_handle = alloc_handle( current->process, apc, SYNCHRONIZE, 0 )))
+ reply->call = apc->call;
+ release_object( apc );
+ break;
+ }
+ apc->executed = 1;
+ wake_up( &apc->obj, 0 );
+ release_object( apc );
+ }
+ }
}
/* queue an APC for a thread or process */
{
case APC_NONE:
case APC_USER:
- thread = get_thread_from_handle( req->thread, THREAD_SET_CONTEXT );
+ thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT );
break;
case APC_VIRTUAL_ALLOC:
case APC_VIRTUAL_FREE:
case APC_VIRTUAL_LOCK:
case APC_VIRTUAL_UNLOCK:
case APC_UNMAP_VIEW:
- process = get_process_from_handle( req->process, PROCESS_VM_OPERATION );
+ process = get_process_from_handle( req->handle, PROCESS_VM_OPERATION );
break;
case APC_VIRTUAL_QUERY:
- process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION );
+ process = get_process_from_handle( req->handle, PROCESS_QUERY_INFORMATION );
break;
case APC_MAP_VIEW:
- process = get_process_from_handle( req->process, PROCESS_VM_OPERATION );
+ process = get_process_from_handle( req->handle, PROCESS_VM_OPERATION );
if (process && process != current->process)
{
/* duplicate the handle into the target process */
}
break;
case APC_CREATE_THREAD:
- process = get_process_from_handle( req->process, PROCESS_CREATE_THREAD );
+ process = get_process_from_handle( req->handle, PROCESS_CREATE_THREAD );
break;
default:
set_error( STATUS_INVALID_PARAMETER );
release_object( apc );
}
-/* get next APC to call */
-DECL_HANDLER(get_apc)
-{
- struct thread_apc *apc;
- int system_only = !req->alertable;
-
- if (req->prev)
- {
- if (!(apc = (struct thread_apc *)get_handle_obj( current->process, req->prev,
- 0, &thread_apc_ops ))) return;
- apc->result = req->result;
- apc->executed = 1;
- if (apc->result.type == APC_CREATE_THREAD) /* transfer the handle to the caller process */
- {
- obj_handle_t handle = duplicate_handle( current->process, apc->result.create_thread.handle,
- apc->caller->process, 0, 0, DUP_HANDLE_SAME_ACCESS );
- close_handle( current->process, apc->result.create_thread.handle );
- apc->result.create_thread.handle = handle;
- clear_error(); /* ignore errors from the above calls */
- }
- wake_up( &apc->obj, 0 );
- close_handle( current->process, req->prev );
- release_object( apc );
- }
-
- if (current->suspend + current->process->suspend > 0) system_only = 1;
-
- for (;;)
- {
- if (!(apc = thread_dequeue_apc( current, system_only )))
- {
- /* no more APCs */
- set_error( STATUS_PENDING );
- return;
- }
- /* Optimization: ignore APC_NONE calls, they are only used to
- * wake up a thread, but since we got here the thread woke up already.
- */
- if (apc->call.type != APC_NONE) break;
- apc->executed = 1;
- wake_up( &apc->obj, 0 );
- release_object( apc );
- }
-
- if ((reply->handle = alloc_handle( current->process, apc, SYNCHRONIZE, 0 )))
- reply->call = apc->call;
- release_object( apc );
-}
-
/* Get the result of an APC call */
DECL_HANDLER(get_apc_result)
{
DECL_HANDLER(get_thread_context)
{
struct thread *thread;
- CONTEXT *context;
+ context_t *context;
- if (get_reply_max_size() < sizeof(CONTEXT))
+ if (get_reply_max_size() < sizeof(context_t))
{
set_error( STATUS_INVALID_PARAMETER );
return;
else
{
if (thread->context == thread->suspend_context) thread->context = NULL;
- set_reply_data_ptr( thread->suspend_context, sizeof(CONTEXT) );
+ set_reply_data_ptr( thread->suspend_context, sizeof(context_t) );
thread->suspend_context = NULL;
}
}
if (thread->state != RUNNING) set_error( STATUS_ACCESS_DENIED );
else set_error( STATUS_PENDING );
}
- else if ((context = set_reply_data_size( sizeof(CONTEXT) )))
+ else if ((context = set_reply_data_size( sizeof(context_t) )))
{
- unsigned int flags = get_context_system_regs( req->flags );
+ unsigned int flags = get_context_system_regs( thread->process->cpu );
- memset( context, 0, sizeof(CONTEXT) );
- context->ContextFlags = get_context_cpu_flag();
+ memset( context, 0, sizeof(context_t) );
+ context->cpu = thread->process->cpu;
if (thread->context) copy_context( context, thread->context, req->flags & ~flags );
if (flags) get_thread_context( thread, context, flags );
}
DECL_HANDLER(set_thread_context)
{
struct thread *thread;
+ const context_t *context = get_req_data();
- if (get_req_data_size() < sizeof(CONTEXT))
+ if (get_req_data_size() < sizeof(context_t))
{
set_error( STATUS_INVALID_PARAMETER );
return;
if (req->suspend)
{
- if (thread != current || thread->context)
+ if (thread != current || thread->context || context->cpu != thread->process->cpu)
{
/* nested suspend or exception, shouldn't happen */
set_error( STATUS_INVALID_PARAMETER );
}
- else if ((thread->suspend_context = mem_alloc( sizeof(CONTEXT) )))
+ else if ((thread->suspend_context = mem_alloc( sizeof(context_t) )))
{
- memcpy( thread->suspend_context, get_req_data(), sizeof(CONTEXT) );
+ memcpy( thread->suspend_context, get_req_data(), sizeof(context_t) );
thread->context = thread->suspend_context;
if (thread->debug_break) break_thread( thread );
}
if (thread->state != RUNNING) set_error( STATUS_ACCESS_DENIED );
else set_error( STATUS_PENDING );
}
- else
+ else if (context->cpu == thread->process->cpu)
{
- const CONTEXT *context = get_req_data();
- unsigned int flags = get_context_system_regs( req->flags );
+ unsigned int system_flags = get_context_system_regs(context->cpu) & context->flags;
+ unsigned int client_flags = context->flags & ~system_flags;
- if (flags) set_thread_context( thread, context, flags );
- if (thread->context && !get_error())
- copy_context( thread->context, context, req->flags & ~flags );
+ if (system_flags) set_thread_context( thread, context, system_flags );
+ if (thread->context && !get_error()) copy_context( thread->context, context, client_flags );
}
+ else set_error( STATUS_INVALID_PARAMETER );
+
reply->self = (thread == current);
release_object( thread );
}