1.0.46.28: fix mach port leakage on x86 too
[sbcl.git] / src / runtime / x86-darwin-os.c
index 0c46dff..6205445 100644 (file)
@@ -1,5 +1,3 @@
-
-
 #ifdef LISP_FEATURE_SB_THREAD
 #include <architecture/i386/table.h>
 #include <i386/user_ldt.h>
@@ -25,6 +23,7 @@
 #include <pthread.h>
 #include <assert.h>
 #include <stdlib.h>
+#include <stdio.h>
 
 #ifdef LISP_FEATURE_SB_THREAD
 
@@ -116,53 +115,47 @@ int arch_os_thread_cleanup(struct thread *thread) {
 
 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
 
-void sigill_handler(int signal, siginfo_t *siginfo, void *void_context);
-void sigtrap_handler(int signal, siginfo_t *siginfo, void *void_context);
-void memory_fault_handler(int signal, siginfo_t *siginfo, void *void_context);
-
-/* exc_server handles mach exception messages from the kernel and
- * calls catch exception raise. We use the system-provided
- * mach_msg_server, which, I assume, calls exc_server in a loop.
- *
- */
-extern boolean_t exc_server();
+void sigill_handler(int signal, siginfo_t *siginfo, os_context_t *context);
+void sigtrap_handler(int signal, siginfo_t *siginfo, os_context_t *context);
+void memory_fault_handler(int signal, siginfo_t *siginfo,
+                          os_context_t *context);
 
 /* This executes in the faulting thread as part of the signal
  * emulation.  It is passed a context with the uc_mcontext field
  * pointing to a valid block of memory. */
-void build_fake_signal_context(struct ucontext *context,
+void build_fake_signal_context(os_context_t *context,
                                x86_thread_state32_t *thread_state,
                                x86_float_state32_t *float_state) {
     pthread_sigmask(0, NULL, &context->uc_sigmask);
-    context->uc_mcontext->ss = *thread_state;
-    context->uc_mcontext->fs = *float_state;
+    context->uc_mcontext->SS = *thread_state;
+    context->uc_mcontext->FS = *float_state;
 }
 
 /* This executes in the faulting thread as part of the signal
  * emulation.  It is effectively the inverse operation from above. */
 void update_thread_state_from_context(x86_thread_state32_t *thread_state,
                                       x86_float_state32_t *float_state,
-                                      struct ucontext *context) {
-    *thread_state = context->uc_mcontext->ss;
-    *float_state = context->uc_mcontext->fs;
+                                      os_context_t *context) {
+    *thread_state = context->uc_mcontext->SS;
+    *float_state = context->uc_mcontext->FS;
     pthread_sigmask(SIG_SETMASK, &context->uc_sigmask, NULL);
 }
 
 /* Modify a context to push new data on its stack. */
-void push_context(u32 data, x86_thread_state32_t *context)
+void push_context(u32 data, x86_thread_state32_t *thread_state)
 {
     u32 *stack_pointer;
 
-    stack_pointer = (u32*) context->esp;
+    stack_pointer = (u32*) thread_state->ESP;
     *(--stack_pointer) = data;
-    context->esp = (unsigned int) stack_pointer;
+    thread_state->ESP = (unsigned int) stack_pointer;
 }
 
-void align_context_stack(x86_thread_state32_t *context)
+void align_context_stack(x86_thread_state32_t *thread_state)
 {
     /* 16byte align the stack (provided that the stack is, as it
      * should be, 4byte aligned. */
-    while (context->esp & 15) push_context(0, context);
+    while (thread_state->ESP & 15) push_context(0, thread_state);
 }
 
 /* Stack allocation starts with a context that has a mod-4 ESP value
@@ -172,29 +165,29 @@ void align_context_stack(x86_thread_state32_t *context)
  * EBP, pops EBP, and returns. */
 asm("_stack_allocation_recover: movl %ebp, %esp; popl %ebp; ret;");
 
-void open_stack_allocation(x86_thread_state32_t *context)
+void open_stack_allocation(x86_thread_state32_t *thread_state)
 {
     void stack_allocation_recover(void);
 
-    push_context(context->eip, context);
-    push_context(context->ebp, context);
-    context->ebp = context->esp;
-    context->eip = (unsigned int) stack_allocation_recover;
+    push_context(thread_state->EIP, thread_state);
+    push_context(thread_state->EBP, thread_state);
+    thread_state->EBP = thread_state->ESP;
+    thread_state->EIP = (unsigned int) stack_allocation_recover;
 
-    align_context_stack(context);
+    align_context_stack(thread_state);
 }
 
 /* Stack allocation of data starts with a context with a mod-16 ESP
  * value and reserves some space on it by manipulating the ESP
  * register. */
-void *stack_allocate(x86_thread_state32_t *context, size_t size)
+void *stack_allocate(x86_thread_state32_t *thread_state, size_t size)
 {
     /* round up size to 16byte multiple */
     size = (size + 15) & -16;
 
-    context->esp = ((u32)context->esp) - size;
+    thread_state->ESP = ((u32)thread_state->ESP) - size;
 
-    return (void *)context->esp;
+    return (void *)thread_state->ESP;
 }
 
 /* Arranging to invoke a C function is tricky, as we have to assume
@@ -202,7 +195,7 @@ void *stack_allocate(x86_thread_state32_t *context, size_t size)
  * alignment requirements.  The simplest way to arrange this,
  * actually, is to open a new stack allocation.
  * WARNING!!! THIS DOES NOT PRESERVE REGISTERS! */
-void call_c_function_in_context(x86_thread_state32_t *context,
+void call_c_function_in_context(x86_thread_state32_t *thread_state,
                                 void *function,
                                 int nargs,
                                 ...)
@@ -212,25 +205,25 @@ void call_c_function_in_context(x86_thread_state32_t *context,
     u32 *stack_pointer;
 
     /* Set up to restore stack on exit. */
-    open_stack_allocation(context);
+    open_stack_allocation(thread_state);
 
     /* Have to keep stack 16byte aligned on x86/darwin. */
     for (i = (3 & -nargs); i; i--) {
-        push_context(0, context);
+        push_context(0, thread_state);
     }
 
-    context->esp = ((u32)context->esp) - nargs * 4;
-    stack_pointer = (u32 *)context->esp;
+    thread_state->ESP = ((u32)thread_state->ESP) - nargs * 4;
+    stack_pointer = (u32 *)thread_state->ESP;
 
     va_start(ap, nargs);
     for (i = 0; i < nargs; i++) {
-        //push_context(va_arg(ap, u32), context);
+        //push_context(va_arg(ap, u32), thread_state);
         stack_pointer[i] = va_arg(ap, u32);
     }
     va_end(ap);
 
-    push_context(context->eip, context);
-    context->eip = (unsigned int) function;
+    push_context(thread_state->EIP, thread_state);
+    thread_state->EIP = (unsigned int) function;
 }
 
 void signal_emulation_wrapper(x86_thread_state32_t *thread_state,
@@ -253,11 +246,11 @@ void signal_emulation_wrapper(x86_thread_state32_t *thread_state,
      * context (and regs just for symmetry).
      */
 
-    struct ucontext *context;
-    struct mcontext *regs;
+    os_context_t *context;
+    mcontext_t *regs;
 
-    context = (struct ucontext*) os_validate(0, sizeof(struct ucontext));
-    regs = (struct mcontext*) os_validate(0, sizeof(struct mcontext));
+    context = (os_context_t*) os_validate(0, sizeof(os_context_t));
+    regs = (mcontext_t*) os_validate(0, sizeof(mcontext_t));
     context->uc_mcontext = regs;
 
     /* when BSD signals are fired, they mask they signals in sa_mask
@@ -270,14 +263,14 @@ void signal_emulation_wrapper(x86_thread_state32_t *thread_state,
 
     build_fake_signal_context(context, thread_state, float_state);
 
-    block_blockable_signals();
+    block_blockable_signals(0, 0);
 
     handler(signal, siginfo, context);
 
     update_thread_state_from_context(thread_state, float_state, context);
 
-    os_invalidate((os_vm_address_t)context, sizeof(struct ucontext));
-    os_invalidate((os_vm_address_t)regs, sizeof(struct mcontext));
+    os_invalidate((os_vm_address_t)context, sizeof(os_context_t));
+    os_invalidate((os_vm_address_t)regs, sizeof(mcontext_t));
 
     /* Trap to restore the signal context. */
     asm volatile ("movl %0, %%eax; movl %1, %%ebx; .long 0xffff0b0f"
@@ -300,6 +293,7 @@ void call_handler_on_thread(mach_port_t thread,
     /* Initialize the new state */
     new_state = *thread_state;
     open_stack_allocation(&new_state);
+    stack_allocate(&new_state, 256);
     /* Save old state */
     save_thread_state = (x86_thread_state32_t *)stack_allocate(&new_state, sizeof(*save_thread_state));
     *save_thread_state = *thread_state;
@@ -337,26 +331,30 @@ void call_handler_on_thread(mach_port_t thread,
 }
 
 #if defined DUMP_CONTEXT
-void dump_context(x86_thread_state32_t *context)
+void dump_context(x86_thread_state32_t *thread_state)
 {
     int i;
     u32 *stack_pointer;
 
     printf("eax: %08lx  ecx: %08lx  edx: %08lx  ebx: %08lx\n",
-           context->eax, context->ecx, context->edx, context->ebx);
+           thread_state->EAX, thread_state->ECX, thread_state->EDX, thread_state->EAX);
     printf("esp: %08lx  ebp: %08lx  esi: %08lx  edi: %08lx\n",
-           context->esp, context->ebp, context->esi, context->edi);
+           thread_state->ESP, thread_state->EBP, thread_state->ESI, thread_state->EDI);
     printf("eip: %08lx  eflags: %08lx\n",
-           context->eip, context->eflags);
+           thread_state->EIP, thread_state->EFLAGS);
     printf("cs: %04hx  ds: %04hx  es: %04hx  "
            "ss: %04hx  fs: %04hx  gs: %04hx\n",
-           context->cs, context->ds, context->es,
-           context->ss, context->fs, context->gs);
-
-    stack_pointer = (u32 *)context->esp;
+           thread_state->CS,
+           thread_state->DS,
+           thread_state->ES,
+           thread_state->SS,
+           thread_state->FS,
+           thread_state->GS);
+
+    stack_pointer = (u32 *)thread_state->ESP;
     for (i = 0; i < 48; i+=4) {
         printf("%08x:  %08x %08x %08x %08x\n",
-               context->esp + (i * 4),
+               thread_state->ESP + (i * 4),
                stack_pointer[i],
                stack_pointer[i+1],
                stack_pointer[i+2],
@@ -366,19 +364,18 @@ void dump_context(x86_thread_state32_t *context)
 #endif
 
 void
-control_stack_exhausted_handler(int signal, siginfo_t *siginfo, void *void_context) {
-    os_context_t *context = arch_os_get_context(&void_context);
+control_stack_exhausted_handler(int signal, siginfo_t *siginfo,
+                                os_context_t *context) {
 
+    unblock_signals_in_context_and_maybe_warn(context);
     arrange_return_to_lisp_function
-        (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
+        (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
 }
 
 void
-undefined_alien_handler(int signal, siginfo_t *siginfo, void *void_context) {
-    os_context_t *context = arch_os_get_context(&void_context);
-
+undefined_alien_handler(int signal, siginfo_t *siginfo, os_context_t *context) {
     arrange_return_to_lisp_function
-        (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
+        (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
 }
 
 kern_return_t
@@ -418,44 +415,21 @@ catch_exception_raise(mach_port_t exception_port,
             ret = KERN_INVALID_RIGHT;
             break;
         }
-        /* Get vm_region info */
-        region_addr = (vm_address_t)code_vector[1];
-        info_count = VM_REGION_BASIC_INFO_COUNT;
-        if ((ret = vm_region(mach_task_self(),
-                             &region_addr,
-                             &region_size,
-                             VM_REGION_BASIC_INFO,
-                             (vm_region_info_t)&region_info,
-                             &info_count,
-                             &region_name)))
-            lose("vm_region (VM_REGION_BASIC_INFO) failed failed %d\n", ret);
-        /* Check if still protected */
-        if ((region_info.protection & OS_VM_PROT_ALL) == 0) {
-          /* KLUDGE:
-           * If two threads fault on the same page, the protection
-           * is cleared as the first thread runs memory_fault_handler.
-           * Grep for "not marked as write-protected" in gencgc.c
-           */
-            ret = KERN_SUCCESS;
+        addr = (void*)code_vector[1];
+        /* Undefined alien */
+        if (os_trunc_to_page(addr) == undefined_alien_address) {
+            handler = undefined_alien_handler;
             break;
         }
-        addr = (void*)code_vector[1];
         /* At stack guard */
         if (os_trunc_to_page(addr) == CONTROL_STACK_GUARD_PAGE(th)) {
-            protect_control_stack_guard_page_thread(0, th);
-            protect_control_stack_return_guard_page_thread(1, th);
+            lower_thread_control_stack_guard_page(th);
             handler = control_stack_exhausted_handler;
             break;
         }
         /* Return from stack guard */
         if (os_trunc_to_page(addr) == CONTROL_STACK_RETURN_GUARD_PAGE(th)) {
-            protect_control_stack_guard_page_thread(1, th);
-            protect_control_stack_return_guard_page_thread(0, th);
-            break;
-        }
-        /* Undefined alien */
-        if (os_trunc_to_page(addr) == undefined_alien_address) {
-            handler = undefined_alien_handler;
+            reset_thread_control_stack_guard_page(th);
             break;
         }
         /* Regular memory fault */
@@ -469,7 +443,7 @@ catch_exception_raise(mach_port_t exception_port,
             break;
         }
         /* Check if UD2 instruction */
-        if (*(unsigned short *)thread_state.eip != 0x0b0f) {
+        if (*(unsigned short *)thread_state.EIP != 0x0b0f) {
             /* KLUDGE: There are two ways we could get here:
              * 1) We're executing data and we've hit some truly
              *    illegal opcode, of which there are a few, see
@@ -485,26 +459,26 @@ catch_exception_raise(mach_port_t exception_port,
              */
             static mach_port_t last_thread;
             static unsigned int last_eip;
-            if (last_thread == thread && last_eip == thread_state.eip)
+            if (last_thread == thread && last_eip == thread_state.EIP)
                 ret = KERN_INVALID_RIGHT;
             else
                 ret = KERN_SUCCESS;
             last_thread = thread;
-            last_eip = thread_state.eip;
+            last_eip = thread_state.EIP;
             break;
         }
         /* Skip the trap code */
-        thread_state.eip += 2;
+        thread_state.EIP += 2;
         /* Return from handler? */
-        if (*(unsigned short *)thread_state.eip == 0xffff) {
+        if (*(unsigned short *)thread_state.EIP == 0xffff) {
             if ((ret = thread_set_state(thread,
                                         x86_THREAD_STATE32,
-                                        (thread_state_t)thread_state.eax,
+                                        (thread_state_t)thread_state.EAX,
                                         x86_THREAD_STATE32_COUNT)) != KERN_SUCCESS)
                 lose("thread_set_state (x86_THREAD_STATE32) failed %d\n", ret);
             if ((ret = thread_set_state(thread,
                                         x86_FLOAT_STATE32,
-                                        (thread_state_t)thread_state.ebx,
+                                        (thread_state_t)thread_state.EBX,
                                         x86_FLOAT_STATE32_COUNT)) != KERN_SUCCESS)
                 lose("thread_set_state (x86_FLOAT_STATE32) failed %d\n", ret);
             break;
@@ -521,117 +495,12 @@ catch_exception_raise(mach_port_t exception_port,
       siginfo.si_addr = addr;
       call_handler_on_thread(thread, &thread_state, signal, &siginfo, handler);
     }
-    return ret;
-}
 
-void *
-mach_exception_handler(void *port)
-{
-  mach_msg_server(exc_server, 2048, (mach_port_t) port, 0);
-  /* mach_msg_server should never return, but it should dispatch mach
-   * exceptions to our catch_exception_raise function
-   */
-  abort();
-}
-
-#endif
-
-#ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
-
-/* Sets up the thread that will listen for mach exceptions. note that
-   the exception handlers will be run on this thread. This is
-   different from the BSD-style signal handling situation in which the
-   signal handlers run in the relevant thread directly. */
-
-mach_port_t mach_exception_handler_port_set = MACH_PORT_NULL;
-
-pthread_t
-setup_mach_exception_handling_thread()
-{
-    kern_return_t ret;
-    pthread_t mach_exception_handling_thread = NULL;
-    pthread_attr_t attr;
-
-    /* allocate a mach_port for this process */
-    ret = mach_port_allocate(mach_task_self(),
-                             MACH_PORT_RIGHT_PORT_SET,
-                             &mach_exception_handler_port_set);
-
-    /* create the thread that will receive the mach exceptions */
-
-    FSHOW((stderr, "Creating mach_exception_handler thread!\n"));
-
-    pthread_attr_init(&attr);
-    pthread_create(&mach_exception_handling_thread,
-                   &attr,
-                   mach_exception_handler,
-                   (void*) mach_exception_handler_port_set);
-    pthread_attr_destroy(&attr);
-
-    return mach_exception_handling_thread;
-}
-
-/* tell the kernel that we want EXC_BAD_ACCESS exceptions sent to the
-   exception port (which is being listened to do by the mach
-   exception handling thread). */
-kern_return_t
-mach_thread_init(mach_port_t thread_exception_port)
-{
-    kern_return_t ret;
-    /* allocate a named port for the thread */
-
-    FSHOW((stderr, "Allocating mach port %x\n", thread_exception_port));
-
-    ret = mach_port_allocate_name(mach_task_self(),
-                                  MACH_PORT_RIGHT_RECEIVE,
-                                  thread_exception_port);
-    if (ret) {
-        lose("mach_port_allocate_name failed with return_code %d\n", ret);
-    }
-
-    /* establish the right for the thread_exception_port to send messages */
-    ret = mach_port_insert_right(mach_task_self(),
-                                 thread_exception_port,
-                                 thread_exception_port,
-                                 MACH_MSG_TYPE_MAKE_SEND);
-    if (ret) {
-        lose("mach_port_insert_right failed with return_code %d\n", ret);
-    }
-
-    ret = thread_set_exception_ports(mach_thread_self(),
-                                     EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
-                                     thread_exception_port,
-                                     EXCEPTION_DEFAULT,
-                                     THREAD_STATE_NONE);
-    if (ret) {
-        lose("thread_set_exception_port failed with return_code %d\n", ret);
-    }
-
-    ret = mach_port_move_member(mach_task_self(),
-                                thread_exception_port,
-                                mach_exception_handler_port_set);
-    if (ret) {
-        lose("mach_port_ failed with return_code %d\n", ret);
-    }
+    mach_port_deallocate (current_mach_task, exception_port);
+    mach_port_deallocate (current_mach_task, thread);
+    mach_port_deallocate (current_mach_task, task);
 
     return ret;
 }
 
-void
-setup_mach_exceptions() {
-    setup_mach_exception_handling_thread();
-    mach_thread_init(THREAD_STRUCT_TO_EXCEPTION_PORT(all_threads));
-}
-
-pid_t
-mach_fork() {
-    pid_t pid = fork();
-    if (pid == 0) {
-        setup_mach_exceptions();
-        return pid;
-    } else {
-        return pid;
-    }
-}
-
 #endif