X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Finterrupt.c;h=f24c454adf13b2dcbac463615749773b944f718d;hb=e4542bc034db18cf98f005b2dac53a6d7d5c7260;hp=e734ea11d0bc7700e33d30e66eb719eb46fd83e8;hpb=426bde0954ef91387b8ab0d4528fad9ec02fa24c;p=sbcl.git diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index e734ea1..f24c454 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -43,7 +43,6 @@ #include #include -#include #include #include #include @@ -69,14 +68,15 @@ -void run_deferred_handler(struct interrupt_data *data, void *v_context) ; +static void run_deferred_handler(struct interrupt_data *data, void *v_context); static void store_signal_data_for_later (struct interrupt_data *data, void *handler, int signal, siginfo_t *info, os_context_t *context); boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context); -void sigaddset_blockable(sigset_t *s) +void +sigaddset_deferrable(sigset_t *s) { sigaddset(s, SIGHUP); sigaddset(s, SIGINT); @@ -84,7 +84,6 @@ void sigaddset_blockable(sigset_t *s) sigaddset(s, SIGPIPE); sigaddset(s, SIGALRM); sigaddset(s, SIGURG); - sigaddset(s, SIGFPE); sigaddset(s, SIGTSTP); sigaddset(s, SIGCHLD); sigaddset(s, SIGIO); @@ -96,27 +95,39 @@ void sigaddset_blockable(sigset_t *s) sigaddset(s, SIGUSR1); sigaddset(s, SIGUSR2); #ifdef LISP_FEATURE_SB_THREAD - sigaddset(s, SIG_STOP_FOR_GC); sigaddset(s, SIG_INTERRUPT_THREAD); #endif } +void +sigaddset_blockable(sigset_t *s) +{ + sigaddset_deferrable(s); +#ifdef LISP_FEATURE_SB_THREAD + sigaddset(s, SIG_STOP_FOR_GC); +#endif +} + +/* initialized in interrupt_init */ +static sigset_t deferrable_sigset; static sigset_t blockable_sigset; -inline static void check_blockables_blocked_or_lose() +void +check_blockables_blocked_or_lose() { /* Get the current sigmask, by blocking the empty set. */ sigset_t empty,current; int i; sigemptyset(&empty); thread_sigmask(SIG_BLOCK, &empty, ¤t); - for(i=0;iinterrupt_data; - /* Pseudo atomic may trigger several times for a single interrupt, - * and while without-interrupts should not, a false trigger by - * pseudo-atomic may eat a pending handler even from - * without-interrupts. */ - if (data->pending_handler) { - - /* If we're here as the result of a pseudo-atomic as opposed - * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already - * NIL, because maybe_defer_handler sets - * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/ - SetSymbolValue(INTERRUPT_PENDING, NIL,thread); - - /* restore the saved signal mask from the original signal (the - * one that interrupted us during the critical section) into the - * os_context for the signal we're currently in the handler for. - * This should ensure that when we return from the handler the - * blocked signals are unblocked */ - sigcopyset(os_context_sigmask_addr(context), &data->pending_mask); - - sigemptyset(&data->pending_mask); - /* This will break on sparc linux: the deferred handler really wants - * to be called with a void_context */ - run_deferred_handler(data,(void *)context); +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + /* If pseudo_atomic_interrupted is set then the interrupt is going + * to be handled now, ergo it's safe to clear it. */ + arch_clear_pseudo_atomic_interrupted(context); +#endif + + if (SymbolValue(GC_INHIBIT,thread)==NIL) { +#ifdef LISP_FEATURE_SB_THREAD + if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) { + /* another thread has already initiated a gc, this attempt + * might as well be cancelled */ + SetSymbolValue(GC_PENDING,NIL,thread); + SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread); + sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context); + } else +#endif + if (SymbolValue(GC_PENDING,thread) != NIL) { + /* GC_PENDING is cleared in SUB-GC, or if another thread + * is doing a gc already we will get a SIG_STOP_FOR_GC and + * that will clear it. */ + interrupt_maybe_gc_int(0,NULL,context); + } + check_blockables_blocked_or_lose(); + } + + /* we may be here only to do the gc stuff, if interrupts are + * enabled run the pending handler */ + if (!((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) || + ( +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + (!foreign_function_call_active) && +#endif + arch_pseudo_atomic_atomic(context)))) { + + /* There may be no pending handler, because it was only a gc + * that had to be executed or because pseudo atomic triggered + * twice for a single interrupt. For the interested reader, + * that may happen if an interrupt hits after the interrupted + * flag is cleared but before pseduo-atomic is set and a + * pseudo atomic is interrupted in that interrupt. */ + if (data->pending_handler) { + + /* If we're here as the result of a pseudo-atomic as opposed + * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already + * NIL, because maybe_defer_handler sets + * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/ + SetSymbolValue(INTERRUPT_PENDING, NIL,thread); + + /* restore the saved signal mask from the original signal (the + * one that interrupted us during the critical section) into the + * os_context for the signal we're currently in the handler for. + * This should ensure that when we return from the handler the + * blocked signals are unblocked */ + sigcopyset(os_context_sigmask_addr(context), &data->pending_mask); + + sigemptyset(&data->pending_mask); + /* This will break on sparc linux: the deferred handler really wants + * to be called with a void_context */ + run_deferred_handler(data,(void *)context); + } } } @@ -379,13 +428,13 @@ void interrupt_handle_now(int signal, siginfo_t *info, void *void_context) { os_context_t *context = (os_context_t*)void_context; - struct thread *thread=arch_os_get_current_thread(); #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) boolean were_in_lisp; #endif union interrupt_handler handler; check_blockables_blocked_or_lose(); - check_interrupts_enabled_or_lose(context); + if (sigismember(&deferrable_sigset,signal)) + check_interrupts_enabled_or_lose(context); #ifdef LISP_FEATURE_LINUX /* Under Linux on some architectures, we appear to have to restore @@ -393,7 +442,7 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) delivered we appear to have a null FPU control word. */ os_restore_fp_control(context); #endif - handler = thread->interrupt_data->interrupt_handlers[signal]; + handler = interrupt_handlers[signal]; if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) { return; @@ -407,11 +456,9 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) fake_foreign_function_call(context); } -#ifdef QSHOW_SIGNALS - FSHOW((stderr, - "/entering interrupt_handle_now(%d, info, context)\n", - signal)); -#endif + FSHOW_SIGNAL((stderr, + "/entering interrupt_handle_now(%d, info, context)\n", + signal)); if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) { @@ -431,26 +478,33 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) * because we're not in pseudoatomic and allocation shouldn't * be interrupted. In which case it's no longer an issue as * all our allocation from C now goes through a PA wrapper, - * but still, doesn't hurt */ + * but still, doesn't hurt. + * + * Yeah, but non-gencgc platforms don't really wrap allocation + * in PA. MG - 2005-08-29 */ lispobj info_sap,context_sap = alloc_sap(context); info_sap = alloc_sap(info); - /* Allow signals again. */ - thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); - -#ifdef QSHOW_SIGNALS - SHOW("calling Lisp-level handler"); + /* Leave deferrable signals blocked, the handler itself will + * allow signals again when it sees fit. */ +#ifdef LISP_FEATURE_SB_THREAD + { + sigset_t unblock; + sigemptyset(&unblock); + sigaddset(&unblock, SIG_STOP_FOR_GC); + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + } #endif + FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n")); + funcall3(handler.lisp, make_fixnum(signal), info_sap, context_sap); } else { -#ifdef QSHOW_SIGNALS - SHOW("calling C-level handler"); -#endif + FSHOW_SIGNAL((stderr,"/calling C-level handler\n")); /* Allow signals again. */ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); @@ -465,11 +519,9 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) undo_fake_foreign_function_call(context); /* block signals again */ } -#ifdef QSHOW_SIGNALS - FSHOW((stderr, - "/returning from interrupt_handle_now(%d, info, context)\n", - signal)); -#endif + FSHOW_SIGNAL((stderr, + "/returning from interrupt_handle_now(%d, info, context)\n", + signal)); } /* This is called at the end of a critical section if the indications @@ -477,14 +529,12 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) * far as C or the kernel is concerned we dealt with the signal * already; we're just doing the Lisp-level processing now that we * put off then */ - -void +static void run_deferred_handler(struct interrupt_data *data, void *v_context) { - /* The pending_handler may enable interrupts (see - * interrupt_maybe_gc_int) and then another interrupt may hit, - * overwrite interrupt_data, so reset the pending handler before - * calling it. Trust the handler to finish with the siginfo before - * enabling interrupts. */ + /* The pending_handler may enable interrupts and then another + * interrupt may hit, overwrite interrupt_data, so reset the + * pending handler before calling it. Trust the handler to finish + * with the siginfo before enabling interrupts. */ void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler; data->pending_handler=0; (*pending_handler)(data->pending_signal,&(data->pending_info), v_context); @@ -502,16 +552,15 @@ maybe_defer_handler(void *handler, struct interrupt_data *data, lose("interrupt already pending"); /* If interrupts are disabled then INTERRUPT_PENDING is set and * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo - * atomic section inside a without-interrupts. + * atomic section inside a WITHOUT-INTERRUPTS. */ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) { store_signal_data_for_later(data,handler,signal,info,context); SetSymbolValue(INTERRUPT_PENDING, T,thread); -#ifdef QSHOW_SIGNALS - FSHOW((stderr, - "/maybe_defer_handler(%x,%d),thread=%ld: deferred\n", - (unsigned int)handler,signal,thread->os_thread)); -#endif + FSHOW_SIGNAL((stderr, + "/maybe_defer_handler(%x,%d),thread=%lu: deferred\n", + (unsigned int)handler,signal, + (unsigned long)thread->os_thread)); return 1; } /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't @@ -519,23 +568,27 @@ maybe_defer_handler(void *handler, struct interrupt_data *data, * may succeed even when context is null (gencgc alloc()) */ if ( #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + /* FIXME: this foreign_function_call_active test is dubious at + * best. If a foreign call is made in a pseudo atomic section + * (?) or more likely a pseudo atomic section is in a foreign + * call then an interrupt is executed immediately. Maybe it + * has to do with C code not maintaining pseudo atomic + * properly. MG - 2005-08-10 */ (!foreign_function_call_active) && #endif arch_pseudo_atomic_atomic(context)) { store_signal_data_for_later(data,handler,signal,info,context); arch_set_pseudo_atomic_interrupted(context); -#ifdef QSHOW_SIGNALS - FSHOW((stderr, - "/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n", - (unsigned int)handler,signal,thread->os_thread)); -#endif + FSHOW_SIGNAL((stderr, + "/maybe_defer_handler(%x,%d),thread=%lu: deferred(PA)\n", + (unsigned int)handler,signal, + (unsigned long)thread->os_thread)); return 1; } -#ifdef QSHOW_SIGNALS - FSHOW((stderr, - "/maybe_defer_handler(%x,%d),thread=%ld: not deferred\n", - (unsigned int)handler,signal,thread->os_thread)); -#endif + FSHOW_SIGNAL((stderr, + "/maybe_defer_handler(%x,%d),thread=%lu: not deferred\n", + (unsigned int)handler,signal, + (unsigned long)thread->os_thread)); return 0; } @@ -560,7 +613,7 @@ store_signal_data_for_later (struct interrupt_data *data, void *handler, * signals are added to the mask in the context so that we are * running with blocked signals when the handler returns */ sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context)); - sigaddset_blockable(os_context_sigmask_addr(context)); + sigaddset_deferrable(os_context_sigmask_addr(context)); } } @@ -573,8 +626,7 @@ maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) #ifdef LISP_FEATURE_LINUX os_restore_fp_control(context); #endif - if(maybe_defer_handler(interrupt_handle_now,data, - signal,info,context)) + if(maybe_defer_handler(interrupt_handle_now,data,signal,info,context)) return; interrupt_handle_now(signal, info, context); #ifdef LISP_FEATURE_DARWIN @@ -587,15 +639,13 @@ static void low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context) { os_context_t *context = (os_context_t*)void_context; - struct thread *thread=arch_os_get_current_thread(); #ifdef LISP_FEATURE_LINUX os_restore_fp_control(context); #endif check_blockables_blocked_or_lose(); check_interrupts_enabled_or_lose(context); - (*thread->interrupt_data->interrupt_low_level_handlers[signal]) - (signal, info, void_context); + interrupt_low_level_handlers[signal](signal, info, void_context); #ifdef LISP_FEATURE_DARWIN /* Work around G5 bug */ DARWIN_FIX_CONTEXT(context); @@ -622,40 +672,47 @@ low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) } #ifdef LISP_FEATURE_SB_THREAD + void sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); struct thread *thread=arch_os_get_current_thread(); sigset_t ss; - int i; - /* need the context stored so it can have registers scavenged */ - fake_foreign_function_call(context); + if ((arch_pseudo_atomic_atomic(context) || + SymbolValue(GC_INHIBIT,thread) != NIL)) { + SetSymbolValue(STOP_FOR_GC_PENDING,T,thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) + arch_set_pseudo_atomic_interrupted(context); + FSHOW_SIGNAL((stderr,"thread=%lu sig_stop_for_gc deferred\n", + thread->os_thread)); + } else { + /* need the context stored so it can have registers scavenged */ + fake_foreign_function_call(context); - sigemptyset(&ss); - for(i=1;istate!=STATE_RUNNING) { - lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", - fixnum_value(thread->state)); - } - thread->state=STATE_SUSPENDED; + sigfillset(&ss); /* Block everything. */ + thread_sigmask(SIG_BLOCK,&ss,0); - sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); - sigwaitinfo(&ss,0); - if(thread->state!=STATE_SUSPENDED) { - lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", - fixnum_value(thread->state)); - } - thread->state=STATE_RUNNING; + if(thread->state!=STATE_RUNNING) { + lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", + fixnum_value(thread->state)); + } + thread->state=STATE_SUSPENDED; + FSHOW_SIGNAL((stderr,"thread=%lu suspended\n",thread->os_thread)); + + sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); + /* It is possible to get SIGCONT (and probably other + * non-blockable signals) here. */ + while (sigwaitinfo(&ss,0) != SIG_STOP_FOR_GC); + FSHOW_SIGNAL((stderr,"thread=%lu resumed\n",thread->os_thread)); + if(thread->state!=STATE_RUNNING) { + lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", + fixnum_value(thread->state)); + } - undo_fake_foreign_function_call(context); + undo_fake_foreign_function_call(context); + } } #endif @@ -694,12 +751,13 @@ gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context) */ #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) -int *context_eflags_addr(os_context_t *context); +extern int *context_eflags_addr(os_context_t *context); #endif extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs); extern void post_signal_tramp(void); -void arrange_return_to_lisp_function(os_context_t *context, lispobj function) +void +arrange_return_to_lisp_function(os_context_t *context, lispobj function) { #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) void * fun=native_pointer(function); @@ -741,10 +799,10 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) * must obviously exist in reality. That would be post_signal_tramp */ - uint32_t *sp=(uint32_t *)*os_context_register_addr(context,reg_ESP); + u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP); /* return address for call_into_lisp: */ - *(sp-15) = (uint32_t)post_signal_tramp; + *(sp-15) = (u32)post_signal_tramp; *(sp-14) = function; /* args for call_into_lisp : function*/ *(sp-13) = 0; /* arg array */ *(sp-12) = 0; /* no. args */ @@ -765,9 +823,9 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) *(sp-1)=*os_context_pc_addr(context); #elif defined(LISP_FEATURE_X86_64) - uint64_t *sp=(uint64_t *)*os_context_register_addr(context,reg_RSP); + u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP); /* return address for call_into_lisp: */ - *(sp-18) = (uint64_t)post_signal_tramp; + *(sp-18) = (u64)post_signal_tramp; *(sp-17)=*os_context_register_addr(context,reg_R15); *(sp-16)=*os_context_register_addr(context,reg_R14); @@ -815,11 +873,12 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) #else /* this much of the calling convention is common to all non-x86 ports */ - *os_context_pc_addr(context) = (os_context_register_t)code; + *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code; *os_context_register_addr(context,reg_NARGS) = 0; - *os_context_register_addr(context,reg_LIP) = (os_context_register_t)code; + *os_context_register_addr(context,reg_LIP) = + (os_context_register_t)(unsigned long)code; *os_context_register_addr(context,reg_CFP) = - (os_context_register_t)current_control_frame_pointer; + (os_context_register_t)(unsigned long)current_control_frame_pointer; #endif #ifdef ARCH_HAS_NPC_REGISTER *os_context_npc_addr(context) = @@ -832,26 +891,16 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) } #ifdef LISP_FEATURE_SB_THREAD -void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) + +/* FIXME: this function can go away when all lisp handlers are invoked + * via arrange_return_to_lisp_function. */ +void +interrupt_thread_handler(int num, siginfo_t *info, void *v_context) { os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); - /* The order of interrupt execution is peculiar. If thread A - * interrupts thread B with I1, I2 and B for some reason recieves - * I1 when FUN2 is already on the list, then it is FUN2 that gets - * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is - * enabled again and I2 hits pretty soon in FUN2 and run - * FUN1. This is of course just one scenario, and the order of - * thread interrupt execution is undefined. */ - struct thread *th=arch_os_get_current_thread(); - struct cons *c; - if (th->state != STATE_RUNNING) - lose("interrupt_thread_handler: thread %ld in wrong state: %d\n", - th->os_thread,fixnum_value(th->state)); - get_spinlock(&th->interrupt_fun_lock,(long)th); - c=((struct cons *)native_pointer(th->interrupt_fun)); - arrange_return_to_lisp_function(context,c->car); - th->interrupt_fun=c->cdr; - release_spinlock(&th->interrupt_fun_lock); + /* let the handler enable interrupts again when it sees fit */ + sigaddset_deferrable(os_context_sigmask_addr(context)); + arrange_return_to_lisp_function(context, SymbolFunction(RUN_INTERRUPTION)); } #endif @@ -862,11 +911,13 @@ void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) * that has the added benefit of automatically discriminating between * functions and variables. */ -void undefined_alien_function() { +void +undefined_alien_function() { funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR)); } -boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) +boolean +handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) { struct thread *th=arch_os_get_current_thread(); @@ -878,8 +929,8 @@ boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) * protection so the error handler has some headroom, protect the * previous page so that we can catch returns from the guard page * and restore it. */ - protect_control_stack_guard_page(th,0); - protect_control_stack_return_guard_page(th,1); + protect_control_stack_guard_page(0); + protect_control_stack_return_guard_page(1); arrange_return_to_lisp_function (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); @@ -891,8 +942,8 @@ boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) * unprotect this one. This works even if we somehow missed * the return-guard-page, and hit it on our way to new * exhaustion instead. */ - protect_control_stack_guard_page(th,1); - protect_control_stack_return_guard_page(th,0); + protect_control_stack_guard_page(1); + protect_control_stack_return_guard_page(0); return 1; } else if (addr >= undefined_alien_address && @@ -915,16 +966,27 @@ boolean interrupt_maybe_gc(int signal, siginfo_t *info, void *void_context) { os_context_t *context=(os_context_t *) void_context; - struct thread *th=arch_os_get_current_thread(); - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; - if(!data->pending_handler && !foreign_function_call_active && - gc_trigger_hit(signal, info, context)){ + if(!foreign_function_call_active && gc_trigger_hit(signal, info, context)){ + struct thread *thread=arch_os_get_current_thread(); clear_auto_gc_trigger(); - if(!maybe_defer_handler(interrupt_maybe_gc_int, - data,signal,info,void_context)) - interrupt_maybe_gc_int(signal,info,void_context); + /* Don't flood the system with interrupts if the need to gc is + * already noted. This can happen for example when SUB-GC + * allocates or after a gc triggered in a WITHOUT-GCING. */ + if (SymbolValue(GC_PENDING,thread) == NIL) { + if (SymbolValue(GC_INHIBIT,thread) == NIL) { + if (arch_pseudo_atomic_atomic(context)) { + /* set things up so that GC happens when we finish + * the PA section */ + SetSymbolValue(GC_PENDING,T,thread); + arch_set_pseudo_atomic_interrupted(context); + } else { + interrupt_maybe_gc_int(signal,info,void_context); + } + } else { + SetSymbolValue(GC_PENDING,T,thread); + } + } return 1; } return 0; @@ -937,8 +999,8 @@ boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) { os_context_t *context=(os_context_t *) void_context; + struct thread *thread=arch_os_get_current_thread(); - check_blockables_blocked_or_lose(); fake_foreign_function_call(context); /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in @@ -950,11 +1012,29 @@ interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) * and signal a storage condition from there. */ - /* restore the signal mask from the interrupted context before - * calling into Lisp */ - if (context) + /* Restore the signal mask from the interrupted context before + * calling into Lisp if interrupts are enabled. Why not always? + * + * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an + * interrupt hits while in SUB-GC, it is deferred and the + * os_context_sigmask of that interrupt is set to block further + * deferrable interrupts (until the first one is + * handled). Unfortunately, that context refers to this place and + * when we return from here the signals will not be blocked. + * + * A kludgy alternative is to propagate the sigmask change to the + * outer context. + */ + if(SymbolValue(INTERRUPTS_ENABLED,thread)!=NIL) thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); - +#ifdef LISP_FEATURE_SB_THREAD + else { + sigset_t new; + sigemptyset(&new); + sigaddset(&new,SIG_STOP_FOR_GC); + thread_sigmask(SIG_UNBLOCK,&new,0); + } +#endif funcall0(SymbolFunction(SUB_GC)); undo_fake_foreign_function_call(context); @@ -966,6 +1046,75 @@ interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) * noise to install handlers */ +/* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if + * they are blocked, in Linux 2.6 the default handler is invoked + * instead that usually coredumps. One might hastily think that adding + * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then + * the whole sa_mask is ignored and instead of not adding the signal + * in question to the mask. That means if it's not blockable the + * signal must be unblocked at the beginning of signal handlers. + */ +static volatile int sigaction_nodefer_works = -1; + +static void +sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context) +{ + sigset_t empty, current; + int i; + sigemptyset(&empty); + sigprocmask(SIG_BLOCK, &empty, ¤t); + for(i = 1; i < NSIG; i++) + if (sigismember(¤t, i) != ((i == SIGABRT) ? 1 : 0)) { + FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i)); + sigaction_nodefer_works = 0; + } + if (sigaction_nodefer_works == -1) + sigaction_nodefer_works = 1; +} + +static void +see_if_sigaction_nodefer_works() +{ + struct sigaction sa; + + sa.sa_flags = SA_SIGINFO | SA_NODEFER; + sa.sa_sigaction = sigaction_nodefer_test_handler; + sigemptyset(&sa.sa_mask); + sigaddset(&sa.sa_mask, SIGABRT); + /* We can use any signal for which a handler will be installed + * later. Let's go with SIGINT because gdb barfs on SIGTRAP on + * Darwin. */ + sigaction(SIGINT, &sa, NULL); + /* Make sure no signals are blocked. */ + { + sigset_t empty; + sigemptyset(&empty); + sigprocmask(SIG_SETMASK, &empty, 0); + } + kill(getpid(), SIGINT); + while (sigaction_nodefer_works == -1); +} + +static void +unblock_me_trampoline(int signal, siginfo_t *info, void *void_context) +{ + sigset_t unblock; + sigemptyset(&unblock); + sigaddset(&unblock, signal); + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + interrupt_handle_now_handler(signal, info, void_context); +} + +static void +low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context) +{ + sigset_t unblock; + sigemptyset(&unblock); + sigaddset(&unblock, signal); + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + (*interrupt_low_level_handlers[signal])(signal, info, void_context); +} + void undoably_install_low_level_interrupt_handler (int signal, void handler(int, @@ -973,33 +1122,35 @@ undoably_install_low_level_interrupt_handler (int signal, void*)) { struct sigaction sa; - struct thread *th=arch_os_get_current_thread(); - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; if (0 > signal || signal >= NSIG) { lose("bad signal number %d", signal); } - if (sigismember(&blockable_sigset,signal)) + if (ARE_SAME_HANDLER(handler, SIG_DFL)) + sa.sa_sigaction = handler; + else if (sigismember(&deferrable_sigset,signal)) sa.sa_sigaction = low_level_maybe_now_maybe_later; + else if (!sigaction_nodefer_works && + !sigismember(&blockable_sigset, signal)) + sa.sa_sigaction = low_level_unblock_me_trampoline; else sa.sa_sigaction = handler; - sigemptyset(&sa.sa_mask); - sigaddset_blockable(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO | SA_RESTART; + sigcopyset(&sa.sa_mask, &blockable_sigset); + sa.sa_flags = SA_SIGINFO | SA_RESTART | + (sigaction_nodefer_works ? SA_NODEFER : 0); #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK if((signal==SIG_MEMORY_FAULT) #ifdef SIG_INTERRUPT_THREAD || (signal==SIG_INTERRUPT_THREAD) #endif ) - sa.sa_flags|= SA_ONSTACK; + sa.sa_flags |= SA_ONSTACK; #endif sigaction(signal, &sa, NULL); - data->interrupt_low_level_handlers[signal] = + interrupt_low_level_handlers[signal] = (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler); } @@ -1010,9 +1161,6 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) struct sigaction sa; sigset_t old, new; union interrupt_handler oldhandler; - struct thread *th=arch_os_get_current_thread(); - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal)); @@ -1020,29 +1168,28 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) sigaddset(&new, signal); thread_sigmask(SIG_BLOCK, &new, &old); - sigemptyset(&new); - sigaddset_blockable(&new); - - FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n", - (unsigned int)data->interrupt_low_level_handlers[signal])); - if (data->interrupt_low_level_handlers[signal]==0) { + FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n", + (unsigned int)interrupt_low_level_handlers[signal])); + if (interrupt_low_level_handlers[signal]==0) { if (ARE_SAME_HANDLER(handler, SIG_DFL) || - ARE_SAME_HANDLER(handler, SIG_IGN)) { + ARE_SAME_HANDLER(handler, SIG_IGN)) sa.sa_sigaction = handler; - } else if (sigismember(&new, signal)) { + else if (sigismember(&deferrable_sigset, signal)) sa.sa_sigaction = maybe_now_maybe_later; - } else { + else if (!sigaction_nodefer_works && + !sigismember(&blockable_sigset, signal)) + sa.sa_sigaction = unblock_me_trampoline; + else sa.sa_sigaction = interrupt_handle_now_handler; - } - sigemptyset(&sa.sa_mask); - sigaddset_blockable(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO | SA_RESTART; + sigcopyset(&sa.sa_mask, &blockable_sigset); + sa.sa_flags = SA_SIGINFO | SA_RESTART | + (sigaction_nodefer_works ? SA_NODEFER : 0); sigaction(signal, &sa, NULL); } - oldhandler = data->interrupt_handlers[signal]; - data->interrupt_handlers[signal].c = handler; + oldhandler = interrupt_handlers[signal]; + interrupt_handlers[signal].c = handler; thread_sigmask(SIG_SETMASK, &old, 0); @@ -1056,14 +1203,15 @@ interrupt_init() { int i; SHOW("entering interrupt_init()"); + see_if_sigaction_nodefer_works(); + sigemptyset(&deferrable_sigset); sigemptyset(&blockable_sigset); + sigaddset_deferrable(&deferrable_sigset); sigaddset_blockable(&blockable_sigset); - global_interrupt_data=calloc(sizeof(struct interrupt_data), 1); - /* Set up high level handler information. */ for (i = 0; i < NSIG; i++) { - global_interrupt_data->interrupt_handlers[i].c = + interrupt_handlers[i].c = /* (The cast here blasts away the distinction between * SA_SIGACTION-style three-argument handlers and * signal(..)-style one-argument handlers, which is OK