X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Finterrupt.c;h=0067a1002a60419499038ed836a9a5ae09201887;hb=77d94d36bcfd3d5eea73ad51e6ee621a8938f995;hp=e277dde73f2ae336449e551efa6b067635aa5cac;hpb=ba4659ad35f28a8fefa4a10c3c7012e4ba50d683;p=sbcl.git diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index e277dde..0067a10 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -108,7 +108,6 @@ void sigaddset_blockable(sigset_t *s) #ifdef LISP_FEATURE_SB_THREAD sigaddset(s, SIG_STOP_FOR_GC); sigaddset(s, SIG_INTERRUPT_THREAD); - sigaddset(s, SIG_THREAD_EXIT); #endif } @@ -552,16 +551,6 @@ sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) sigset_t ss; int i; - /* KLUDGE: at least on Linux, the kernel apparently schedules a - thread immediately it is signalled. However, we signal - SIG_STOP_FOR_GC while holding the spinlock, and consequently we - can easily end up with a kind of thundering herd of threads all - wanting to acquire the lock at the same time so that they can - tell the system that they've gone to sleep. So we yield here. - Whether this is the right fix or not is unknown. -- CSR, - 2004-07-16 */ - sched_yield(); - if(maybe_defer_handler(sig_stop_for_gc_handler,data, signal,info,context)) { return; @@ -573,9 +562,13 @@ sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) for(i=1;ipid); + /* The GC can't tell if a thread is a zombie, so this would be a + * good time to let the kernel reap any of our children in that + * awful state, to stop them from being waited for indefinitely. + * Userland reaping is done later when GC is finished */ + mark_dead_threads(); + thread->state=STATE_STOPPED; - release_spinlock(&all_threads_lock); sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); sigwaitinfo(&ss,0); @@ -632,10 +625,31 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) #ifdef LISP_FEATURE_X86 /* Suppose the existence of some function that saved all * registers, called call_into_lisp, then restored GP registers and - * returned. We shortcut this: fake the stack that call_into_lisp - * would see, then arrange to have it called directly. post_signal_tramp - * is the second half of this function + * returned. It would look something like this: + + push ebp + mov ebp esp + pushad + push $0 + push $0 + pushl {address of function to call} + call 0x8058db0 + addl $12,%esp + popa + leave + ret + + * What we do here is set up the stack that call_into_lisp would + * expect to see if it had been called by this code, and frob the + * signal context so that signal return goes directly to call_into_lisp, + * and when that function (and the lisp function it invoked) returns, + * it returns to the second half of this imaginary function which + * restores all registers and returns to C + + * For this to work, the latter part of the imaginary function + * must obviously exist in reality. That would be post_signal_tramp */ + u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP); *(sp-14) = post_signal_tramp; /* return address for call_into_lisp */ @@ -645,9 +659,9 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) /* this order matches that used in POPAD */ *(sp-10)=*os_context_register_addr(context,reg_EDI); *(sp-9)=*os_context_register_addr(context,reg_ESI); - /* this gets overwritten again before it's used, anyway */ - *(sp-8)=*os_context_register_addr(context,reg_EBP); - *(sp-7)=0 ; /* POPAD doesn't set ESP, but expects a gap for it anyway */ + + *(sp-8)=*os_context_register_addr(context,reg_ESP)-8; + *(sp-7)=0; *(sp-6)=*os_context_register_addr(context,reg_EBX); *(sp-5)=*os_context_register_addr(context,reg_EDX); @@ -665,7 +679,11 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) *os_context_pc_addr(context) = call_into_lisp; *os_context_register_addr(context,reg_ECX) = 0; *os_context_register_addr(context,reg_EBP) = sp-2; +#ifdef __NetBSD__ + *os_context_register_addr(context,reg_UESP) = sp-14; +#else *os_context_register_addr(context,reg_ESP) = sp-14; +#endif #else /* this much of the calling convention is common to all non-x86 ports */ @@ -700,40 +718,43 @@ void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) void thread_exit_handler(int num, siginfo_t *info, void *v_context) { /* called when a child thread exits */ - os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); - struct thread *th=arch_os_get_current_thread(); - pid_t kid; - int *status; - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; - if(maybe_defer_handler(thread_exit_handler,data,num,info,context)){ - return ; - } - while(1) { - kid=waitpid(-1,&status,__WALL|WNOHANG); - if(kid<1) break; - if(WIFEXITED(status) || WIFSIGNALED(status)) { - struct thread *th=find_thread_by_pid(kid); - if(!th) continue; - funcall1(SymbolFunction(HANDLE_THREAD_EXIT),make_fixnum(kid)); - destroy_thread(th); - } - } + mark_dead_threads(); } + #endif -boolean handle_control_stack_guard_triggered(os_context_t *context,void *addr){ +boolean handle_guard_page_triggered(os_context_t *context,void *addr){ struct thread *th=arch_os_get_current_thread(); + /* note the os_context hackery here. When the signal handler returns, * it won't go back to what it was doing ... */ - if(addr>=(void *)CONTROL_STACK_GUARD_PAGE(th) && - addr<(void *)(CONTROL_STACK_GUARD_PAGE(th)+os_vm_page_size)) { - /* we hit the end of the control stack. disable protection - * temporarily so the error handler has some headroom */ - protect_control_stack_guard_page(th->pid,0L); - + if(addr >= CONTROL_STACK_GUARD_PAGE(th) && + addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) { + /* We hit the end of the control stack: disable guard page + * protection so the error handler has some headroom, protect the + * previous page so that we can catch returns from the guard page + * and restore it. */ + protect_control_stack_guard_page(th->pid,0); + protect_control_stack_return_guard_page(th->pid,1); + + arrange_return_to_lisp_function + (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + return 1; + } + else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) && + addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) { + /* We're returning from the guard page: reprotect it, and + * unprotect this one. This works even if we somehow missed + * the return-guard-page, and hit it on our way to new + * exhaustion instead. */ + protect_control_stack_guard_page(th->pid,1); + protect_control_stack_return_guard_page(th->pid,0); + return 1; + } + else if (addr >= undefined_alien_address && + addr < undefined_alien_address + os_vm_page_size) { arrange_return_to_lisp_function - (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + (context, SymbolFunction(UNDEFINED_ALIEN_ERROR)); return 1; } else return 0;