#ifdef HAVE_POLL_H
#include <poll.h>
#endif
+#ifdef HAVE_SCHED_H
+#include <sched.h>
+#endif
#include "ntstatus.h"
#define WIN32_NO_STATUS
#include "security.h"
+#ifdef __i386__
+static const unsigned int supported_cpus = CPU_FLAG(CPU_x86);
+#elif defined(__x86_64__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_x86_64) | CPU_FLAG(CPU_x86);
+#elif defined(__ALPHA__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_ALPHA);
+#elif defined(__powerpc__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_POWERPC);
+#elif defined(__sparc__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_SPARC);
+#elif defined(__arm__)
+static const unsigned int supported_cpus = CPU_FLAG(CPU_ARM);
+#else
+#error Unsupported CPU
+#endif
+
/* thread queues */
struct thread_wait
struct thread *thread; /* owner thread */
int count; /* count of objects */
int flags;
- void *cookie; /* magic cookie to return to client */
+ client_ptr_t cookie; /* magic cookie to return to client */
timeout_t timeout;
struct timeout_user *user;
struct wait_queue_entry queues[1];
{
sizeof(struct thread_apc), /* size */
dump_thread_apc, /* dump */
+ no_get_type, /* get_type */
add_queue, /* add_queue */
remove_queue, /* remove_queue */
thread_apc_signaled, /* signaled */
{
sizeof(struct thread), /* size */
dump_thread, /* dump */
+ no_get_type, /* get_type */
add_queue, /* add_queue */
remove_queue, /* remove_queue */
thread_signaled, /* signaled */
thread->unix_tid = -1; /* not known yet */
thread->context = NULL;
thread->suspend_context = NULL;
- thread->teb = NULL;
+ thread->teb = 0;
thread->debug_ctx = NULL;
thread->debug_event = NULL;
thread->debug_break = 0;
thread->state = RUNNING;
thread->exit_code = 0;
thread->priority = 0;
- thread->affinity = 1;
+ thread->affinity = ~0;
thread->suspend = 0;
thread->desktop_users = 0;
thread->token = NULL;
}
/* check if address looks valid for a client-side data structure (TEB etc.) */
-static inline int is_valid_address( void *addr )
+static inline int is_valid_address( client_ptr_t addr )
{
- return addr && !((unsigned long)addr % sizeof(int));
+ return addr && !(addr % sizeof(int));
}
/* create a new thread */
thread->process = (struct process *)grab_object( process );
thread->desktop = process->desktop;
+ thread->affinity = process->affinity;
if (!current) current = thread;
list_add_head( &thread_list, &thread->entry );
struct thread *thread = get_fd_user( fd );
assert( thread->obj.ops == &thread_ops );
+ grab_object( thread );
if (event & (POLLERR | POLLHUP)) kill_thread( thread, 0 );
else if (event & POLLIN) read_request( thread );
else if (event & POLLOUT) write_reply( thread );
+ release_object( thread );
}
/* cleanup everything that is no longer needed by a dead thread */
if (thread->reply_fd) release_object( thread->reply_fd );
if (thread->wait_fd) release_object( thread->wait_fd );
free( thread->suspend_context );
- free_msg_queue( thread );
cleanup_clipboard_thread(thread);
destroy_thread_windows( thread );
+ free_msg_queue( thread );
close_thread_desktop( thread );
for (i = 0; i < MAX_INFLIGHT_FDS; i++)
{
struct thread *thread = (struct thread *)obj;
assert( obj->ops == &thread_ops );
- fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d teb=%p state=%d\n",
- thread->id, thread->unix_pid, thread->unix_tid, thread->teb, thread->state );
+ fprintf( stderr, "Thread id=%04x unix pid=%d unix tid=%d state=%d\n",
+ thread->id, thread->unix_pid, thread->unix_tid, thread->state );
}
static int thread_signaled( struct object *obj, struct thread *thread )
return NULL;
}
+int set_thread_affinity( struct thread *thread, affinity_t affinity )
+{
+ int ret = 0;
+#ifdef HAVE_SCHED_SETAFFINITY
+ if (thread->unix_tid != -1)
+ {
+ cpu_set_t set;
+ int i;
+ affinity_t mask;
+
+ CPU_ZERO( &set );
+ for (i = 0, mask = 1; mask; i++, mask <<= 1)
+ if (affinity & mask) CPU_SET( i, &set );
+
+ ret = sched_setaffinity( thread->unix_tid, sizeof(set), &set );
+ }
+#endif
+ if (!ret) thread->affinity = affinity;
+ return ret;
+}
+
#define THREAD_PRIORITY_REALTIME_HIGHEST 6
#define THREAD_PRIORITY_REALTIME_LOWEST -7
}
if (req->mask & SET_THREAD_INFO_AFFINITY)
{
- if (req->affinity != 1) set_error( STATUS_INVALID_PARAMETER );
- else thread->affinity = req->affinity;
+ if ((req->affinity & thread->process->affinity) != req->affinity)
+ set_error( STATUS_INVALID_PARAMETER );
+ else if (thread->state == TERMINATED)
+ set_error( STATUS_ACCESS_DENIED );
+ else if (set_thread_affinity( thread, req->affinity ))
+ file_set_error();
}
if (req->mask & SET_THREAD_INFO_TOKEN)
security_set_thread_token( thread, req->token );
}
/* send the wakeup signal to a thread */
-static int send_thread_wakeup( struct thread *thread, void *cookie, int signaled )
+static int send_thread_wakeup( struct thread *thread, client_ptr_t cookie, int signaled )
{
struct wake_up_reply reply;
int ret;
+ memset( &reply, 0, sizeof(reply) );
reply.cookie = cookie;
reply.signaled = signaled;
if ((ret = write( get_unix_fd( thread->wait_fd ), &reply, sizeof(reply) )) == sizeof(reply))
int wake_thread( struct thread *thread )
{
int signaled, count;
- void *cookie;
+ client_ptr_t cookie;
for (count = 0; thread->wait; count++)
{
if ((signaled = check_wait( thread )) == -1) break;
cookie = thread->wait->cookie;
- if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
- thread->id, signaled, cookie );
+ if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d\n", thread->id, signaled );
end_wait( thread );
if (send_thread_wakeup( thread, cookie, signaled ) == -1) /* error */
break;
{
struct thread_wait *wait = ptr;
struct thread *thread = wait->thread;
- void *cookie = wait->cookie;
+ client_ptr_t cookie = wait->cookie;
wait->user = NULL;
if (thread->wait != wait) return; /* not the top-level wait, ignore it */
if (thread->suspend + thread->process->suspend > 0) return; /* suspended, ignore it */
- if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=%d cookie=%p\n",
- thread->id, (int)STATUS_TIMEOUT, cookie );
+ if (debug_level) fprintf( stderr, "%04x: *wakeup* signaled=TIMEOUT\n", thread->id );
end_wait( thread );
if (send_thread_wakeup( thread, cookie, STATUS_TIMEOUT ) == -1) return;
/* check if other objects have become signaled in the meantime */
}
/* select on a list of handles */
-static timeout_t select_on( unsigned int count, void *cookie, const obj_handle_t *handles,
+static timeout_t select_on( unsigned int count, client_ptr_t cookie, const obj_handle_t *handles,
int flags, timeout_t timeout, obj_handle_t signal_obj )
{
int ret;
/* attempt to wake threads sleeping on the object wait queue */
void wake_up( struct object *obj, int max )
{
- struct list *ptr, *next;
+ struct list *ptr;
- LIST_FOR_EACH_SAFE( ptr, next, &obj->wait_queue )
+ LIST_FOR_EACH( ptr, &obj->wait_queue )
{
struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry );
- if (wake_thread( entry->thread ))
- {
- if (max && !--max) break;
- }
+ if (!wake_thread( entry->thread )) continue;
+ if (max && !--max) break;
+ /* restart at the head of the list since a wake up can change the object wait queue */
+ ptr = &obj->wait_queue;
}
}
if (thread->wait)
{
while (thread->wait) end_wait( thread );
- send_thread_wakeup( thread, NULL, STATUS_PENDING );
- /* if it is waiting on the socket, we don't need to send a SIGTERM */
+ send_thread_wakeup( thread, 0, STATUS_PENDING );
+ /* if it is waiting on the socket, we don't need to send a SIGQUIT */
violent_death = 0;
}
kill_console_processes( thread, 0 );
debug_exit_thread( thread );
abandon_mutexes( thread );
wake_up( &thread->obj, 0 );
- if (violent_death) send_thread_signal( thread, SIGTERM );
+ if (violent_death) send_thread_signal( thread, SIGQUIT );
cleanup_thread( thread );
remove_process_thread( thread->process, thread );
release_object( thread );
}
+/* copy parts of a context structure */
+static void copy_context( context_t *to, const context_t *from, unsigned int flags )
+{
+ assert( to->cpu == from->cpu );
+ to->flags |= flags;
+ if (flags & SERVER_CTX_CONTROL) to->ctl = from->ctl;
+ if (flags & SERVER_CTX_INTEGER) to->integer = from->integer;
+ if (flags & SERVER_CTX_SEGMENTS) to->seg = from->seg;
+ if (flags & SERVER_CTX_FLOATING_POINT) to->fp = from->fp;
+ if (flags & SERVER_CTX_DEBUG_REGISTERS) to->debug = from->debug;
+ if (flags & SERVER_CTX_EXTENDED_REGISTERS) to->ext = from->ext;
+}
+
+/* return the context flags that correspond to system regs */
+/* (system regs are the ones we can't access on the client side) */
+static unsigned int get_context_system_regs( enum cpu_type cpu )
+{
+ switch (cpu)
+ {
+ case CPU_x86: return SERVER_CTX_DEBUG_REGISTERS;
+ case CPU_x86_64: return SERVER_CTX_DEBUG_REGISTERS;
+ case CPU_ALPHA: return 0;
+ case CPU_POWERPC: return 0;
+ case CPU_ARM: return 0;
+ case CPU_SPARC: return 0;
+ }
+ return 0;
+}
+
/* trigger a breakpoint event in a given thread */
void break_thread( struct thread *thread )
{
- struct debug_event_exception data;
+ debug_event_t data;
assert( thread->context );
- data.record.ExceptionCode = STATUS_BREAKPOINT;
- data.record.ExceptionFlags = EXCEPTION_CONTINUABLE;
- data.record.ExceptionRecord = NULL;
- data.record.ExceptionAddress = get_context_ip( thread->context );
- data.record.NumberParameters = 0;
- data.first = 1;
+ memset( &data, 0, sizeof(data) );
+ data.exception.first = 1;
+ data.exception.exc_code = STATUS_BREAKPOINT;
+ data.exception.flags = EXCEPTION_CONTINUABLE;
+ switch (thread->context->cpu)
+ {
+ case CPU_x86:
+ data.exception.address = thread->context->ctl.i386_regs.eip;
+ break;
+ case CPU_x86_64:
+ data.exception.address = thread->context->ctl.x86_64_regs.rip;
+ break;
+ case CPU_ALPHA:
+ data.exception.address = thread->context->ctl.alpha_regs.fir;
+ break;
+ case CPU_POWERPC:
+ data.exception.address = thread->context->ctl.powerpc_regs.iar;
+ break;
+ case CPU_SPARC:
+ data.exception.address = thread->context->ctl.sparc_regs.pc;
+ break;
+ case CPU_ARM:
+ data.exception.address = thread->context->ctl.arm_regs.pc;
+ break;
+ }
generate_debug_event( thread, EXCEPTION_DEBUG_EVENT, &data );
thread->debug_break = 0;
}
/* initialize a new thread */
DECL_HANDLER(init_thread)
{
+ unsigned int prefix_cpu_mask = get_prefix_cpu_mask();
struct process *process = current->process;
- int reply_fd = thread_get_inflight_fd( current, req->reply_fd );
- int wait_fd = thread_get_inflight_fd( current, req->wait_fd );
+ int wait_fd, reply_fd;
+
+ if ((reply_fd = thread_get_inflight_fd( current, req->reply_fd )) == -1)
+ {
+ set_error( STATUS_TOO_MANY_OPENED_FILES );
+ return;
+ }
+ if ((wait_fd = thread_get_inflight_fd( current, req->wait_fd )) == -1)
+ {
+ set_error( STATUS_TOO_MANY_OPENED_FILES );
+ goto error;
+ }
if (current->reply_fd) /* already initialised */
{
goto error;
}
- if (reply_fd == -1 || fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1) goto error;
+ if (fcntl( reply_fd, F_SETFL, O_NONBLOCK ) == -1) goto error;
current->reply_fd = create_anonymous_fd( &thread_fd_ops, reply_fd, ¤t->obj, 0 );
- reply_fd = -1;
- if (!current->reply_fd) goto error;
+ current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj, 0 );
+ if (!current->reply_fd || !current->wait_fd) return;
- if (wait_fd == -1)
- {
- set_error( STATUS_TOO_MANY_OPENED_FILES ); /* most likely reason */
- return;
- }
- if (!(current->wait_fd = create_anonymous_fd( &thread_fd_ops, wait_fd, ¤t->obj, 0 )))
- return;
-
- if (!is_valid_address(req->teb) || !is_valid_address(req->peb) || !is_valid_address(req->ldt_copy))
+ if (!is_valid_address(req->teb))
{
set_error( STATUS_INVALID_PARAMETER );
return;
if (!process->peb) /* first thread, initialize the process too */
{
+ if (!CPU_FLAG(req->cpu) || !(supported_cpus & prefix_cpu_mask & CPU_FLAG(req->cpu)))
+ {
+ if (!(supported_cpus & CPU_64BIT_MASK))
+ set_error( STATUS_NOT_SUPPORTED );
+ else
+ set_error( STATUS_NOT_REGISTRY_FILE ); /* server supports it but not the prefix */
+ return;
+ }
process->unix_pid = current->unix_pid;
- process->peb = req->peb;
- process->ldt_copy = req->ldt_copy;
+ process->peb = req->entry;
+ process->cpu = req->cpu;
reply->info_size = init_process( current );
}
else
{
+ if (req->cpu != process->cpu)
+ {
+ set_error( STATUS_INVALID_PARAMETER );
+ return;
+ }
if (process->unix_pid != current->unix_pid)
process->unix_pid = -1; /* can happen with linuxthreads */
if (current->suspend + process->suspend > 0) stop_thread( current );
- generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, req->entry );
+ generate_debug_event( current, CREATE_THREAD_DEBUG_EVENT, &req->entry );
}
debug_level = max( debug_level, req->debug_level );
+ set_thread_affinity( current, current->affinity );
reply->pid = get_process_id( process );
reply->tid = get_thread_id( current );
reply->version = SERVER_PROTOCOL_VERSION;
reply->server_start = server_start_time;
+ reply->all_cpus = supported_cpus & prefix_cpu_mask;
return;
error:
if ((thread = get_thread_from_handle( req->handle, THREAD_SUSPEND_RESUME )))
{
- if (thread->state == TERMINATED) set_error( STATUS_ACCESS_DENIED );
- else reply->count = resume_thread( thread );
+ reply->count = resume_thread( thread );
release_object( thread );
}
}
}
else if (apc->result.type == APC_ASYNC_IO)
{
- if (apc->owner) async_set_result( apc->owner, apc->result.async_io.status );
+ if (apc->owner)
+ async_set_result( apc->owner, apc->result.async_io.status,
+ apc->result.async_io.total, apc->result.async_io.apc );
}
wake_up( &apc->obj, 0 );
close_handle( current->process, req->prev_apc );
{
case APC_NONE:
case APC_USER:
- thread = get_thread_from_handle( req->thread, THREAD_SET_CONTEXT );
+ thread = get_thread_from_handle( req->handle, THREAD_SET_CONTEXT );
break;
case APC_VIRTUAL_ALLOC:
case APC_VIRTUAL_FREE:
case APC_VIRTUAL_LOCK:
case APC_VIRTUAL_UNLOCK:
case APC_UNMAP_VIEW:
- process = get_process_from_handle( req->process, PROCESS_VM_OPERATION );
+ process = get_process_from_handle( req->handle, PROCESS_VM_OPERATION );
break;
case APC_VIRTUAL_QUERY:
- process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION );
+ process = get_process_from_handle( req->handle, PROCESS_QUERY_INFORMATION );
break;
case APC_MAP_VIEW:
- process = get_process_from_handle( req->process, PROCESS_VM_OPERATION );
+ process = get_process_from_handle( req->handle, PROCESS_VM_OPERATION );
if (process && process != current->process)
{
/* duplicate the handle into the target process */
}
break;
case APC_CREATE_THREAD:
- process = get_process_from_handle( req->process, PROCESS_CREATE_THREAD );
+ process = get_process_from_handle( req->handle, PROCESS_CREATE_THREAD );
break;
default:
set_error( STATUS_INVALID_PARAMETER );
DECL_HANDLER(get_thread_context)
{
struct thread *thread;
- CONTEXT *context;
+ context_t *context;
- if (get_reply_max_size() < sizeof(CONTEXT))
+ if (get_reply_max_size() < sizeof(context_t))
{
set_error( STATUS_INVALID_PARAMETER );
return;
else
{
if (thread->context == thread->suspend_context) thread->context = NULL;
- set_reply_data_ptr( thread->suspend_context, sizeof(CONTEXT) );
+ set_reply_data_ptr( thread->suspend_context, sizeof(context_t) );
thread->suspend_context = NULL;
}
}
if (thread->state != RUNNING) set_error( STATUS_ACCESS_DENIED );
else set_error( STATUS_PENDING );
}
- else if ((context = set_reply_data_size( sizeof(CONTEXT) )))
+ else if ((context = set_reply_data_size( sizeof(context_t) )))
{
- unsigned int flags = get_context_system_regs( req->flags );
+ unsigned int flags = get_context_system_regs( thread->process->cpu );
- memset( context, 0, sizeof(CONTEXT) );
- context->ContextFlags = get_context_cpu_flag();
+ memset( context, 0, sizeof(context_t) );
+ context->cpu = thread->process->cpu;
if (thread->context) copy_context( context, thread->context, req->flags & ~flags );
if (flags) get_thread_context( thread, context, flags );
}
DECL_HANDLER(set_thread_context)
{
struct thread *thread;
+ const context_t *context = get_req_data();
- if (get_req_data_size() < sizeof(CONTEXT))
+ if (get_req_data_size() < sizeof(context_t))
{
set_error( STATUS_INVALID_PARAMETER );
return;
if (req->suspend)
{
- if (thread != current || thread->context)
+ if (thread != current || thread->context || context->cpu != thread->process->cpu)
{
/* nested suspend or exception, shouldn't happen */
set_error( STATUS_INVALID_PARAMETER );
}
- else if ((thread->suspend_context = mem_alloc( sizeof(CONTEXT) )))
+ else if ((thread->suspend_context = mem_alloc( sizeof(context_t) )))
{
- memcpy( thread->suspend_context, get_req_data(), sizeof(CONTEXT) );
+ memcpy( thread->suspend_context, get_req_data(), sizeof(context_t) );
thread->context = thread->suspend_context;
if (thread->debug_break) break_thread( thread );
}
if (thread->state != RUNNING) set_error( STATUS_ACCESS_DENIED );
else set_error( STATUS_PENDING );
}
- else
+ else if (context->cpu == thread->process->cpu)
{
- const CONTEXT *context = get_req_data();
- unsigned int flags = get_context_system_regs( req->flags );
+ unsigned int system_flags = get_context_system_regs(context->cpu) & context->flags;
+ unsigned int client_flags = context->flags & ~system_flags;
- if (flags) set_thread_context( thread, context, flags );
- if (thread->context && !get_error())
- copy_context( thread->context, context, req->flags & ~flags );
+ if (system_flags) set_thread_context( thread, context, system_flags );
+ if (thread->context && !get_error()) copy_context( thread->context, context, client_flags );
}
+ else set_error( STATUS_INVALID_PARAMETER );
+
reply->self = (thread == current);
release_object( thread );
}