3 #ifdef LISP_FEATURE_SB_THREAD
4 #include <architecture/i386/table.h>
5 #include <i386/user_ldt.h>
6 #include <mach/mach_init.h>
12 #include "interrupt.h"
13 #include "x86-darwin-os.h"
14 #include "genesis/fdefn.h"
16 #include <mach/mach.h>
17 #include <mach/mach_error.h>
18 #include <mach/mach_types.h>
19 #include <mach/sync_policy.h>
20 #include <mach/vm_region.h>
21 #include <mach/machine/thread_state.h>
22 #include <mach/machine/thread_status.h>
23 #include <sys/_types.h>
24 #include <sys/ucontext.h>
29 #ifdef LISP_FEATURE_SB_THREAD
31 pthread_mutex_t modify_ldt_lock = PTHREAD_MUTEX_INITIALIZER;
33 void set_data_desc_size(data_desc_t* desc, unsigned long size)
35 desc->limit00 = (size - 1) & 0xffff;
36 desc->limit16 = ((size - 1) >> 16) &0xf;
39 void set_data_desc_addr(data_desc_t* desc, void* addr)
41 desc->base00 = (unsigned int)addr & 0xffff;
42 desc->base16 = ((unsigned int)addr & 0xff0000) >> 16;
43 desc->base24 = ((unsigned int)addr & 0xff000000) >> 24;
48 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
49 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
52 int arch_os_thread_init(struct thread *thread) {
53 #ifdef LISP_FEATURE_SB_THREAD
57 data_desc_t ldt_entry = { 0, 0, 0, DESC_DATA_WRITE,
58 3, 1, 0, DESC_DATA_32B, DESC_GRAN_BYTE, 0 };
60 set_data_desc_addr(&ldt_entry, thread);
61 set_data_desc_size(&ldt_entry, dynamic_values_bytes);
63 thread_mutex_lock(&modify_ldt_lock);
64 n = i386_set_ldt(LDT_AUTO_ALLOC, (union ldt_entry*) &ldt_entry, 1);
67 perror("i386_set_ldt");
68 lose("unexpected i386_set_ldt(..) failure\n");
70 thread_mutex_unlock(&modify_ldt_lock);
72 FSHOW_SIGNAL((stderr, "/ TLS: Allocated LDT %x\n", n));
77 __asm__ __volatile__ ("mov %0, %%fs" : : "r"(sel));
80 pthread_setspecific(specials,thread);
82 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
83 mach_thread_init(THREAD_STRUCT_TO_EXCEPTION_PORT(thread));
86 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
89 /* Signal handlers are run on the control stack, so if it is exhausted
90 * we had better use an alternate stack for whatever signal tells us
91 * we've exhausted it */
92 sigstack.ss_sp=((void *) thread)+dynamic_values_bytes;
94 sigstack.ss_size = 32*SIGSTKSZ;
95 sigaltstack(&sigstack,0);
97 return 1; /* success */
100 int arch_os_thread_cleanup(struct thread *thread) {
101 #if defined(LISP_FEATURE_SB_THREAD)
102 int n = thread->tls_cookie;
104 /* Set the %%fs register back to 0 and free the ldt by setting it
107 FSHOW_SIGNAL((stderr, "/ TLS: Freeing LDT %x\n", n));
109 __asm__ __volatile__ ("mov %0, %%fs" : : "r"(0));
110 thread_mutex_lock(&modify_ldt_lock);
111 i386_set_ldt(n, NULL, 1);
112 thread_mutex_unlock(&modify_ldt_lock);
114 return 1; /* success */
117 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
119 void sigill_handler(int signal, siginfo_t *siginfo, void *void_context);
120 void sigtrap_handler(int signal, siginfo_t *siginfo, void *void_context);
121 void memory_fault_handler(int signal, siginfo_t *siginfo, void *void_context);
123 /* exc_server handles mach exception messages from the kernel and
124 * calls catch exception raise. We use the system-provided
125 * mach_msg_server, which, I assume, calls exc_server in a loop.
128 extern boolean_t exc_server();
130 /* This executes in the faulting thread as part of the signal
131 * emulation. It is passed a context with the uc_mcontext field
132 * pointing to a valid block of memory. */
133 void build_fake_signal_context(struct ucontext *context,
134 x86_thread_state32_t *thread_state,
135 x86_float_state32_t *float_state) {
136 pthread_sigmask(0, NULL, &context->uc_sigmask);
137 context->uc_mcontext->ss = *thread_state;
138 context->uc_mcontext->fs = *float_state;
141 /* This executes in the faulting thread as part of the signal
142 * emulation. It is effectively the inverse operation from above. */
143 void update_thread_state_from_context(x86_thread_state32_t *thread_state,
144 x86_float_state32_t *float_state,
145 struct ucontext *context) {
146 *thread_state = context->uc_mcontext->ss;
147 *float_state = context->uc_mcontext->fs;
148 pthread_sigmask(SIG_SETMASK, &context->uc_sigmask, NULL);
151 /* Modify a context to push new data on its stack. */
152 void push_context(u32 data, x86_thread_state32_t *context)
156 stack_pointer = (u32*) context->esp;
157 *(--stack_pointer) = data;
158 context->esp = (unsigned int) stack_pointer;
161 void align_context_stack(x86_thread_state32_t *context)
163 /* 16byte align the stack (provided that the stack is, as it
164 * should be, 4byte aligned. */
165 while (context->esp & 15) push_context(0, context);
168 /* Stack allocation starts with a context that has a mod-4 ESP value
169 * and needs to leave a context with a mod-16 ESP that will restore
170 * the old ESP value and other register state when activated. The
171 * first part of this is the recovery trampoline, which loads ESP from
172 * EBP, pops EBP, and returns. */
173 asm("_stack_allocation_recover: movl %ebp, %esp; popl %ebp; ret;");
175 void open_stack_allocation(x86_thread_state32_t *context)
177 void stack_allocation_recover(void);
179 push_context(context->eip, context);
180 push_context(context->ebp, context);
181 context->ebp = context->esp;
182 context->eip = (unsigned int) stack_allocation_recover;
184 align_context_stack(context);
187 /* Stack allocation of data starts with a context with a mod-16 ESP
188 * value and reserves some space on it by manipulating the ESP
190 void *stack_allocate(x86_thread_state32_t *context, size_t size)
192 /* round up size to 16byte multiple */
193 size = (size + 15) & -16;
195 context->esp = ((u32)context->esp) - size;
197 return (void *)context->esp;
200 /* Arranging to invoke a C function is tricky, as we have to assume
201 * cdecl calling conventions (caller removes args) and x86/darwin
202 * alignment requirements. The simplest way to arrange this,
203 * actually, is to open a new stack allocation.
204 * WARNING!!! THIS DOES NOT PRESERVE REGISTERS! */
205 void call_c_function_in_context(x86_thread_state32_t *context,
214 /* Set up to restore stack on exit. */
215 open_stack_allocation(context);
217 /* Have to keep stack 16byte aligned on x86/darwin. */
218 for (i = (3 & -nargs); i; i--) {
219 push_context(0, context);
222 context->esp = ((u32)context->esp) - nargs * 4;
223 stack_pointer = (u32 *)context->esp;
226 for (i = 0; i < nargs; i++) {
227 //push_context(va_arg(ap, u32), context);
228 stack_pointer[i] = va_arg(ap, u32);
232 push_context(context->eip, context);
233 context->eip = (unsigned int) function;
236 void signal_emulation_wrapper(x86_thread_state32_t *thread_state,
237 x86_float_state32_t *float_state,
240 void (*handler)(int, siginfo_t *, void *))
243 /* CLH: FIXME **NOTE: HACK ALERT!** Ideally, we would allocate
244 * context and regs on the stack as local variables, but this
245 * causes problems for the lisp debugger. When it walks the stack
246 * for a back trace, it sees the 1) address of the local variable
247 * on the stack and thinks that is a frame pointer to a lisp
248 * frame, and, 2) the address of the sap that we alloc'ed in
249 * dynamic space and thinks that is a return address, so it,
250 * heuristicly (and wrongly), chooses that this should be
251 * interpreted as a lisp frame instead of as a C frame.
252 * We can work around this in this case by os_validating the
253 * context (and regs just for symmetry).
256 struct ucontext *context;
257 struct mcontext *regs;
259 context = (struct ucontext*) os_validate(0, sizeof(struct ucontext));
260 regs = (struct mcontext*) os_validate(0, sizeof(struct mcontext));
261 context->uc_mcontext = regs;
263 /* when BSD signals are fired, they mask they signals in sa_mask
264 which always seem to be the blockable_sigset, for us, so we
266 1) save the current sigmask
267 2) block blockable signals
268 3) call the signal handler
269 4) restore the sigmask */
271 build_fake_signal_context(context, thread_state, float_state);
273 block_blockable_signals();
275 handler(signal, siginfo, context);
277 update_thread_state_from_context(thread_state, float_state, context);
279 os_invalidate((os_vm_address_t)context, sizeof(struct ucontext));
280 os_invalidate((os_vm_address_t)regs, sizeof(struct mcontext));
282 /* Trap to restore the signal context. */
283 asm volatile ("movl %0, %%eax; movl %1, %%ebx; .long 0xffff0b0f"
284 : : "r" (thread_state), "r" (float_state));
287 /* Convenience wrapper for the above */
288 void call_handler_on_thread(mach_port_t thread,
289 x86_thread_state32_t *thread_state,
292 void (*handler)(int, siginfo_t *, void *))
294 x86_thread_state32_t new_state;
295 x86_thread_state32_t *save_thread_state;
296 x86_float_state32_t *save_float_state;
297 mach_msg_type_number_t state_count;
298 siginfo_t *save_siginfo;
300 /* Initialize the new state */
301 new_state = *thread_state;
302 open_stack_allocation(&new_state);
304 save_thread_state = (x86_thread_state32_t *)stack_allocate(&new_state, sizeof(*save_thread_state));
305 *save_thread_state = *thread_state;
306 /* Save float state */
307 save_float_state = (x86_float_state32_t *)stack_allocate(&new_state, sizeof(*save_float_state));
308 state_count = x86_FLOAT_STATE32_COUNT;
309 if ((ret = thread_get_state(thread,
311 (thread_state_t)save_float_state,
312 &state_count)) != KERN_SUCCESS)
313 lose("thread_get_state (x86_THREAD_STATE32) failed %d\n", ret);
315 save_siginfo = stack_allocate(&new_state, sizeof(*siginfo));
317 save_siginfo = siginfo;
319 *save_siginfo = *siginfo;
320 /* Prepare to call */
321 call_c_function_in_context(&new_state,
322 signal_emulation_wrapper,
329 /* Update the thread state */
330 state_count = x86_THREAD_STATE32_COUNT;
331 if ((ret = thread_set_state(thread,
333 (thread_state_t)&new_state,
334 state_count)) != KERN_SUCCESS)
335 lose("thread_set_state (x86_FLOAT_STATE32) failed %d\n", ret);
339 #if defined DUMP_CONTEXT
340 void dump_context(x86_thread_state32_t *context)
345 printf("eax: %08lx ecx: %08lx edx: %08lx ebx: %08lx\n",
346 context->eax, context->ecx, context->edx, context->ebx);
347 printf("esp: %08lx ebp: %08lx esi: %08lx edi: %08lx\n",
348 context->esp, context->ebp, context->esi, context->edi);
349 printf("eip: %08lx eflags: %08lx\n",
350 context->eip, context->eflags);
351 printf("cs: %04hx ds: %04hx es: %04hx "
352 "ss: %04hx fs: %04hx gs: %04hx\n",
353 context->cs, context->ds, context->es,
354 context->ss, context->fs, context->gs);
356 stack_pointer = (u32 *)context->esp;
357 for (i = 0; i < 48; i+=4) {
358 printf("%08x: %08x %08x %08x %08x\n",
359 context->esp + (i * 4),
369 control_stack_exhausted_handler(int signal, siginfo_t *siginfo, void *void_context) {
370 os_context_t *context = arch_os_get_context(&void_context);
372 arrange_return_to_lisp_function
373 (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
377 undefined_alien_handler(int signal, siginfo_t *siginfo, void *void_context) {
378 os_context_t *context = arch_os_get_context(&void_context);
380 arrange_return_to_lisp_function
381 (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
385 catch_exception_raise(mach_port_t exception_port,
388 exception_type_t exception,
389 exception_data_t code_vector,
390 mach_msg_type_number_t code_count)
392 struct thread *th = (struct thread*) exception_port;
393 x86_thread_state32_t thread_state;
394 mach_msg_type_number_t state_count;
395 vm_address_t region_addr;
396 vm_size_t region_size;
397 vm_region_basic_info_data_t region_info;
398 mach_msg_type_number_t info_count;
399 mach_port_t region_name;
402 void (*handler)(int, siginfo_t *, void *) = NULL;
406 /* Get state and info */
407 state_count = x86_THREAD_STATE32_COUNT;
408 if ((ret = thread_get_state(thread,
410 (thread_state_t)&thread_state,
411 &state_count)) != KERN_SUCCESS)
412 lose("thread_get_state (x86_THREAD_STATE32) failed %d\n", ret);
416 /* Check if write protection fault */
417 if ((code_vector[0] & OS_VM_PROT_ALL) == 0) {
418 ret = KERN_INVALID_RIGHT;
421 /* Get vm_region info */
422 region_addr = (vm_address_t)code_vector[1];
423 info_count = VM_REGION_BASIC_INFO_COUNT;
424 if ((ret = vm_region(mach_task_self(),
427 VM_REGION_BASIC_INFO,
428 (vm_region_info_t)®ion_info,
431 lose("vm_region (VM_REGION_BASIC_INFO) failed failed %d\n", ret);
432 /* Check if still protected */
433 if ((region_info.protection & OS_VM_PROT_ALL) == 0) {
435 * If two threads fault on the same page, the protection
436 * is cleared as the first thread runs memory_fault_handler.
437 * Grep for "not marked as write-protected" in gencgc.c
442 addr = (void*)code_vector[1];
444 if (os_trunc_to_page(addr) == CONTROL_STACK_GUARD_PAGE(th)) {
445 protect_control_stack_guard_page_thread(0, th);
446 protect_control_stack_return_guard_page_thread(1, th);
447 handler = control_stack_exhausted_handler;
450 /* Return from stack guard */
451 if (os_trunc_to_page(addr) == CONTROL_STACK_RETURN_GUARD_PAGE(th)) {
452 protect_control_stack_guard_page_thread(1, th);
453 protect_control_stack_return_guard_page_thread(0, th);
456 /* Undefined alien */
457 if (os_trunc_to_page(addr) == undefined_alien_address) {
458 handler = undefined_alien_handler;
461 /* Regular memory fault */
462 handler = memory_fault_handler;
464 case EXC_BAD_INSTRUCTION:
466 /* Check if illegal instruction trap */
467 if (code_vector[0] != EXC_I386_INVOP) {
468 ret = KERN_INVALID_RIGHT;
471 /* Check if UD2 instruction */
472 if (*(unsigned short *)thread_state.eip != 0x0b0f) {
473 /* KLUDGE: There are two ways we could get here:
474 * 1) We're executing data and we've hit some truly
475 * illegal opcode, of which there are a few, see
476 * Intel 64 and IA-32 Architectures
477 * Sofware Developer's Manual
478 * Volume 3A page 5-34)
479 * 2) The kernel started an unrelated signal handler
480 * before we got a chance to run. The context that
481 * caused the exception is saved in a stack frame
482 * somewhere down below.
483 * In either case we rely on the exception to retrigger,
484 * eventually bailing out if we're spinning on case 2).
486 static mach_port_t last_thread;
487 static unsigned int last_eip;
488 if (last_thread == thread && last_eip == thread_state.eip)
489 ret = KERN_INVALID_RIGHT;
492 last_thread = thread;
493 last_eip = thread_state.eip;
496 /* Skip the trap code */
497 thread_state.eip += 2;
498 /* Return from handler? */
499 if (*(unsigned short *)thread_state.eip == 0xffff) {
500 if ((ret = thread_set_state(thread,
502 (thread_state_t)thread_state.eax,
503 x86_THREAD_STATE32_COUNT)) != KERN_SUCCESS)
504 lose("thread_set_state (x86_THREAD_STATE32) failed %d\n", ret);
505 if ((ret = thread_set_state(thread,
507 (thread_state_t)thread_state.ebx,
508 x86_FLOAT_STATE32_COUNT)) != KERN_SUCCESS)
509 lose("thread_set_state (x86_FLOAT_STATE32) failed %d\n", ret);
513 handler = sigtrap_handler;
516 ret = KERN_INVALID_RIGHT;
520 siginfo.si_signo = signal;
521 siginfo.si_addr = addr;
522 call_handler_on_thread(thread, &thread_state, signal, &siginfo, handler);
528 mach_exception_handler(void *port)
530 mach_msg_server(exc_server, 2048, (mach_port_t) port, 0);
531 /* mach_msg_server should never return, but it should dispatch mach
532 * exceptions to our catch_exception_raise function
539 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
541 /* Sets up the thread that will listen for mach exceptions. note that
542 the exception handlers will be run on this thread. This is
543 different from the BSD-style signal handling situation in which the
544 signal handlers run in the relevant thread directly. */
546 mach_port_t mach_exception_handler_port_set = MACH_PORT_NULL;
549 setup_mach_exception_handling_thread()
552 pthread_t mach_exception_handling_thread = NULL;
555 /* allocate a mach_port for this process */
556 ret = mach_port_allocate(mach_task_self(),
557 MACH_PORT_RIGHT_PORT_SET,
558 &mach_exception_handler_port_set);
560 /* create the thread that will receive the mach exceptions */
562 FSHOW((stderr, "Creating mach_exception_handler thread!\n"));
564 pthread_attr_init(&attr);
565 pthread_create(&mach_exception_handling_thread,
567 mach_exception_handler,
568 (void*) mach_exception_handler_port_set);
569 pthread_attr_destroy(&attr);
571 return mach_exception_handling_thread;
574 /* tell the kernel that we want EXC_BAD_ACCESS exceptions sent to the
575 exception port (which is being listened to do by the mach
576 exception handling thread). */
578 mach_thread_init(mach_port_t thread_exception_port)
581 /* allocate a named port for the thread */
583 FSHOW((stderr, "Allocating mach port %x\n", thread_exception_port));
585 ret = mach_port_allocate_name(mach_task_self(),
586 MACH_PORT_RIGHT_RECEIVE,
587 thread_exception_port);
589 lose("mach_port_allocate_name failed with return_code %d\n", ret);
592 /* establish the right for the thread_exception_port to send messages */
593 ret = mach_port_insert_right(mach_task_self(),
594 thread_exception_port,
595 thread_exception_port,
596 MACH_MSG_TYPE_MAKE_SEND);
598 lose("mach_port_insert_right failed with return_code %d\n", ret);
601 ret = thread_set_exception_ports(mach_thread_self(),
602 EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
603 thread_exception_port,
607 lose("thread_set_exception_port failed with return_code %d\n", ret);
610 ret = mach_port_move_member(mach_task_self(),
611 thread_exception_port,
612 mach_exception_handler_port_set);
614 lose("mach_port_ failed with return_code %d\n", ret);
621 setup_mach_exceptions() {
622 setup_mach_exception_handling_thread();
623 mach_thread_init(THREAD_STRUCT_TO_EXCEPTION_PORT(all_threads));
630 setup_mach_exceptions();