2 * Server-side debugger support using Mach primitives
4 * Copyright (C) 1999, 2006 Alexandre Julliard
5 * Copyright (C) 2006 Ken Thomases for CodeWeavers
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
23 #include "wine/port.h"
30 #include <sys/types.h>
34 #define WIN32_NO_STATUS
41 #include "wine/library.h"
45 #include <mach/mach.h>
46 #include <mach/mach_error.h>
47 #include <mach/thread_act.h>
48 #include <servers/bootstrap.h>
50 #if defined(__APPLE__) && defined(__i386__)
51 extern int pthread_kill_syscall( mach_port_t, int );
52 __ASM_GLOBAL_FUNC( pthread_kill_syscall,
53 "movl $328,%eax\n\t" /* SYS___pthread_kill */
59 static inline int pthread_kill_syscall( mach_port_t, int )
65 static mach_port_t server_mach_port;
67 void sigchld_callback(void)
69 assert(0); /* should never be called on MacOS */
72 static void mach_set_error(kern_return_t mach_error)
76 case KERN_SUCCESS: break;
77 case KERN_INVALID_ARGUMENT: set_error(STATUS_INVALID_PARAMETER); break;
78 case KERN_NO_SPACE: set_error(STATUS_NO_MEMORY); break;
79 case KERN_PROTECTION_FAILURE: set_error(STATUS_ACCESS_DENIED); break;
80 case KERN_INVALID_ADDRESS: set_error(STATUS_ACCESS_VIOLATION); break;
81 default: set_error(STATUS_UNSUCCESSFUL); break;
85 static mach_port_t get_process_port( struct process *process )
87 return process->trace_data;
90 /* initialize the process control mechanism */
91 void init_tracing_mechanism(void)
95 if (task_get_bootstrap_port(mach_task_self(), &bp) != KERN_SUCCESS)
96 fatal_error("Can't find bootstrap port\n");
97 if (mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &server_mach_port) != KERN_SUCCESS)
98 fatal_error("Can't allocate port\n");
99 if (mach_port_insert_right( mach_task_self(),
102 MACH_MSG_TYPE_MAKE_SEND ) != KERN_SUCCESS)
103 fatal_error("Error inserting rights\n");
104 if (bootstrap_register(bp, (char*)wine_get_server_dir(), server_mach_port) != KERN_SUCCESS)
105 fatal_error("Can't check in server_mach_port\n");
106 mach_port_deallocate(mach_task_self(), bp);
109 /* initialize the per-process tracing mechanism */
110 void init_process_tracing( struct process *process )
115 mach_msg_header_t header;
116 mach_msg_body_t body;
117 mach_msg_port_descriptor_t task_port;
118 mach_msg_trailer_t trailer; /* only present on receive */
123 ret = mach_msg( &msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, sizeof(msg),
124 server_mach_port, 0, 0 );
127 if (ret != MACH_RCV_TIMED_OUT && debug_level)
128 fprintf( stderr, "warning: mach port receive failed with %x\n", ret );
132 /* if anything in the message is invalid, ignore it */
133 if (msg.header.msgh_size != offsetof(typeof(msg), trailer)) continue;
134 if (msg.body.msgh_descriptor_count != 1) continue;
135 if (msg.task_port.type != MACH_MSG_PORT_DESCRIPTOR) continue;
136 if (msg.task_port.disposition != MACH_MSG_TYPE_PORT_SEND) continue;
137 if (msg.task_port.name == MACH_PORT_NULL) continue;
138 if (msg.task_port.name == MACH_PORT_DEAD) continue;
140 if (!pid_for_task( msg.task_port.name, &pid ))
142 struct thread *thread = get_thread_from_pid( pid );
144 if (thread && !thread->process->trace_data)
145 thread->process->trace_data = msg.task_port.name;
147 mach_port_deallocate( mach_task_self(), msg.task_port.name );
152 /* terminate the per-process tracing mechanism */
153 void finish_process_tracing( struct process *process )
155 if (process->trace_data)
157 mach_port_deallocate( mach_task_self(), process->trace_data );
158 process->trace_data = 0;
162 /* retrieve the thread x86 registers */
163 void get_thread_context( struct thread *thread, context_t *context, unsigned int flags )
166 x86_debug_state32_t state;
167 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
168 mach_msg_type_name_t type;
169 mach_port_t port, process_port = get_process_port( thread->process );
171 /* all other regs are handled on the client side */
172 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
174 if (thread->unix_pid == -1 || !process_port ||
175 mach_port_extract_right( process_port, thread->unix_tid,
176 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
178 set_error( STATUS_ACCESS_DENIED );
182 if (!thread_get_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, &count ))
184 /* work around silly renaming of struct members in OS X 10.5 */
185 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
186 context->debug.i386_regs.dr0 = state.__dr0;
187 context->debug.i386_regs.dr1 = state.__dr1;
188 context->debug.i386_regs.dr2 = state.__dr2;
189 context->debug.i386_regs.dr3 = state.__dr3;
190 context->debug.i386_regs.dr6 = state.__dr6;
191 context->debug.i386_regs.dr7 = state.__dr7;
193 context->debug.i386_regs.dr0 = state.dr0;
194 context->debug.i386_regs.dr1 = state.dr1;
195 context->debug.i386_regs.dr2 = state.dr2;
196 context->debug.i386_regs.dr3 = state.dr3;
197 context->debug.i386_regs.dr6 = state.dr6;
198 context->debug.i386_regs.dr7 = state.dr7;
200 context->flags |= SERVER_CTX_DEBUG_REGISTERS;
202 mach_port_deallocate( mach_task_self(), port );
206 /* set the thread x86 registers */
207 void set_thread_context( struct thread *thread, const context_t *context, unsigned int flags )
210 x86_debug_state32_t state;
211 mach_msg_type_number_t count = sizeof(state) / sizeof(int);
212 mach_msg_type_name_t type;
213 mach_port_t port, process_port = get_process_port( thread->process );
216 /* all other regs are handled on the client side */
217 assert( flags == SERVER_CTX_DEBUG_REGISTERS );
219 if (thread->unix_pid == -1 || !process_port ||
220 mach_port_extract_right( process_port, thread->unix_tid,
221 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
223 set_error( STATUS_ACCESS_DENIED );
227 /* Mac OS doesn't allow setting the global breakpoint flags */
228 dr7 = (context->debug.i386_regs.dr7 & ~0xaa) | ((context->debug.i386_regs.dr7 & 0xaa) >> 1);
230 #if __DARWIN_UNIX03 && defined(_STRUCT_X86_DEBUG_STATE32)
231 state.__dr0 = context->debug.i386_regs.dr0;
232 state.__dr1 = context->debug.i386_regs.dr1;
233 state.__dr2 = context->debug.i386_regs.dr2;
234 state.__dr3 = context->debug.i386_regs.dr3;
237 state.__dr6 = context->debug.i386_regs.dr6;
240 state.dr0 = context->debug.i386_regs.dr0;
241 state.dr1 = context->debug.i386_regs.dr1;
242 state.dr2 = context->debug.i386_regs.dr2;
243 state.dr3 = context->debug.i386_regs.dr3;
246 state.dr6 = context->debug.i386_regs.dr6;
249 if (!thread_set_state( port, x86_DEBUG_STATE32, (thread_state_t)&state, count ))
251 if (thread->context) /* update the cached values */
252 thread->context->debug.i386_regs = context->debug.i386_regs;
254 mach_port_deallocate( mach_task_self(), port );
258 int send_thread_signal( struct thread *thread, int sig )
261 mach_port_t process_port = get_process_port( thread->process );
263 if (thread->unix_pid != -1 && process_port)
265 mach_msg_type_name_t type;
268 if (!mach_port_extract_right( process_port, thread->unix_tid,
269 MACH_MSG_TYPE_COPY_SEND, &port, &type ))
271 if ((ret = pthread_kill_syscall( port, sig )) < 0)
276 mach_port_deallocate( mach_task_self(), port );
280 if (ret == -1 && errno == ESRCH) /* thread got killed */
282 thread->unix_pid = -1;
283 thread->unix_tid = -1;
286 if (debug_level && ret != -1)
287 fprintf( stderr, "%04x: *sent signal* signal=%d\n", thread->id, sig );
291 /* read data from a process memory space */
292 int read_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, char *dest )
295 mach_msg_type_number_t bytes_read;
296 vm_offset_t offset, data;
297 vm_address_t aligned_address;
298 vm_size_t aligned_size;
299 unsigned int page_size = get_page_size();
300 mach_port_t process_port = get_process_port( process );
304 set_error( STATUS_ACCESS_DENIED );
307 if ((vm_address_t)ptr != ptr)
309 set_error( STATUS_ACCESS_DENIED );
313 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
315 mach_set_error( ret );
319 offset = ptr % page_size;
320 aligned_address = (vm_address_t)(ptr - offset);
321 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
323 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
324 if (ret != KERN_SUCCESS) mach_set_error( ret );
327 memcpy( dest, (char *)data + offset, size );
328 vm_deallocate( mach_task_self(), data, bytes_read );
330 task_resume( process_port );
331 return (ret == KERN_SUCCESS);
334 /* write data to a process memory space */
335 int write_process_memory( struct process *process, client_ptr_t ptr, data_size_t size, const char *src )
338 vm_address_t aligned_address, region_address;
339 vm_size_t aligned_size, region_size;
340 mach_msg_type_number_t info_size, bytes_read;
341 vm_offset_t offset, task_mem = 0;
342 struct vm_region_basic_info info;
344 unsigned int page_size = get_page_size();
345 mach_port_t process_port = get_process_port( process );
349 set_error( STATUS_ACCESS_DENIED );
352 if ((vm_address_t)ptr != ptr)
354 set_error( STATUS_ACCESS_DENIED );
358 offset = ptr % page_size;
359 aligned_address = (vm_address_t)(ptr - offset);
360 aligned_size = (size + offset + page_size - 1) / page_size * page_size;
362 if ((ret = task_suspend( process_port )) != KERN_SUCCESS)
364 mach_set_error( ret );
368 ret = vm_read( process_port, aligned_address, aligned_size, &task_mem, &bytes_read );
369 if (ret != KERN_SUCCESS)
371 mach_set_error( ret );
374 region_address = aligned_address;
375 info_size = sizeof(info);
376 ret = vm_region( process_port, ®ion_address, ®ion_size, VM_REGION_BASIC_INFO,
377 (vm_region_info_t)&info, &info_size, &dummy );
378 if (ret != KERN_SUCCESS)
380 mach_set_error( ret );
383 if (region_address > aligned_address ||
384 region_address + region_size < aligned_address + aligned_size)
386 /* FIXME: should support multiple regions */
387 set_error( ERROR_ACCESS_DENIED );
390 ret = vm_protect( process_port, aligned_address, aligned_size, 0, VM_PROT_READ | VM_PROT_WRITE );
391 if (ret != KERN_SUCCESS)
393 mach_set_error( ret );
397 /* FIXME: there's an optimization that can be made: check first and last */
398 /* pages for writability; read first and last pages; write interior */
399 /* pages to task without ever reading&modifying them; if that succeeds, */
400 /* modify first and last pages and write them. */
402 memcpy( (char*)task_mem + offset, src, size );
404 ret = vm_write( process_port, aligned_address, task_mem, bytes_read );
405 if (ret != KERN_SUCCESS) mach_set_error( ret );
408 vm_deallocate( mach_task_self(), task_mem, bytes_read );
409 /* restore protection */
410 vm_protect( process_port, aligned_address, aligned_size, 0, info.protection );
411 task_resume( process_port );
416 if (task_mem) vm_deallocate( mach_task_self(), task_mem, bytes_read );
417 task_resume( process_port );
421 /* retrieve an LDT selector entry */
422 void get_selector_entry( struct thread *thread, int entry, unsigned int *base,
423 unsigned int *limit, unsigned char *flags )
425 const unsigned int total_size = (2 * sizeof(int) + 1) * 8192;
426 struct process *process = thread->process;
427 unsigned int page_size = get_page_size();
430 mach_msg_type_number_t bytes_read;
431 mach_port_t process_port = get_process_port( thread->process );
433 if (!process->ldt_copy || !process_port)
435 set_error( STATUS_ACCESS_DENIED );
440 set_error( STATUS_INVALID_PARAMETER ); /* FIXME */
444 if ((ret = task_suspend( process_port )) == KERN_SUCCESS)
446 vm_offset_t offset = process->ldt_copy % page_size;
447 vm_address_t aligned_address = (vm_address_t)(process->ldt_copy - offset);
448 vm_size_t aligned_size = (total_size + offset + page_size - 1) / page_size * page_size;
450 ret = vm_read( process_port, aligned_address, aligned_size, &data, &bytes_read );
451 if (ret != KERN_SUCCESS) mach_set_error( ret );
454 const int *ldt = (const int *)((char *)data + offset);
455 memcpy( base, ldt + entry, sizeof(int) );
456 memcpy( limit, ldt + entry + 8192, sizeof(int) );
457 memcpy( flags, (char *)(ldt + 2 * 8192) + entry, 1 );
458 vm_deallocate( mach_task_self(), data, bytes_read );
460 task_resume( process_port );
462 else mach_set_error( ret );
465 #endif /* USE_MACH */