X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Fruntime%2Finterrupt.c;h=5164c303675a4480f71568e170258ea44e42f3da;hb=94ea2b2082deaa0331dfb66fa6af6ca12dd8dc83;hp=81c8e0944d5a814dd3d15074d7711e636aa37788;hpb=a8166c109a5ae1ebaa0204946c0f720e1acb700e;p=sbcl.git diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index 81c8e09..5164c30 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -13,14 +13,45 @@ * files for more information. */ + +/* As far as I can tell, what's going on here is: + * + * In the case of most signals, when Lisp asks us to handle the + * signal, the outermost handler (the one actually passed to UNIX) is + * either interrupt_handle_now(..) or maybe_now_maybe_later(..). + * In that case, the Lisp-level handler is stored in interrupt_handlers[..] + * and interrupt_low_level_handlers[..] is cleared. + * + * However, some signals need special handling, e.g. + * + * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the + * garbage collector to detect violations of write protection, + * because some cases of such signals (e.g. GC-related violations of + * write protection) are handled at C level and never passed on to + * Lisp. For such signals, we still store any Lisp-level handler + * in interrupt_handlers[..], but for the outermost handle we use + * the value from interrupt_low_level_handlers[..], instead of the + * ordinary interrupt_handle_now(..) or interrupt_handle_later(..). + * + * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints, + * pseudo-atomic sections, and some classes of error (e.g. "function + * not defined"). This never goes anywhere near the Lisp handlers at all. + * See runtime/alpha-arch.c and code/signal.lisp + * + * - WHN 20000728, dan 20010128 */ + + #include #include #include #include +#include +#include +#include +#include "sbcl.h" #include "runtime.h" #include "arch.h" -#include "sbcl.h" #include "os.h" #include "interrupt.h" #include "globals.h" @@ -33,6 +64,16 @@ #include "interr.h" #include "genesis/fdefn.h" #include "genesis/simple-fun.h" +#include "genesis/cons.h" + + + +void run_deferred_handler(struct interrupt_data *data, void *v_context) ; +static void store_signal_data_for_later (struct interrupt_data *data, + void *handler, int signal, + siginfo_t *info, + os_context_t *context); +boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context); void sigaddset_blockable(sigset_t *s) { @@ -53,6 +94,38 @@ void sigaddset_blockable(sigset_t *s) sigaddset(s, SIGWINCH); sigaddset(s, SIGUSR1); sigaddset(s, SIGUSR2); +#ifdef LISP_FEATURE_SB_THREAD + sigaddset(s, SIG_STOP_FOR_GC); + sigaddset(s, SIG_INTERRUPT_THREAD); +#endif +} + +static sigset_t blockable_sigset; + +inline static void check_blockables_blocked_or_lose() +{ + /* Get the current sigmask, by blocking the empty set. */ + sigset_t empty,current; + int i; + sigemptyset(&empty); + thread_sigmask(SIG_BLOCK, &empty, ¤t); + for(i=0;iinterrupt_data; - SetSymbolValue(INTERRUPT_PENDING, NIL,thread); - if (maybe_gc_pending) { -#ifndef __i386__ - if (were_in_lisp) -#endif - { - fake_foreign_function_call(context); - } - funcall0(SymbolFunction(SUB_GC)); -#ifndef __i386__ - if (were_in_lisp) -#endif - { - undo_fake_foreign_function_call(context); - } - } - - /* FIXME: This isn't very clear. It would be good to reverse - * engineer it and rewrite the code more clearly, or write a clear - * explanation of what's going on in the comments, or both. - * - * WHN's question 1a: How come we unconditionally copy from - * pending_mask into the context, and then test whether - * pending_signal is set? - * - * WHN's question 1b: If pending_signal wasn't set, how could - * pending_mask be valid? - * - * Dan Barlow's reply (sbcl-devel 2001-03-13): And the answer is - - * or appears to be - because interrupt_maybe_gc set it that way - * (look in the #ifndef __i386__ bit). We can't GC during a - * pseudo-atomic, so we set maybe_gc_pending=1 and - * arch_set_pseudo_atomic_interrupted(..) When we come out of - * pseudo_atomic we're marked as interrupted, so we call - * interrupt_handle_pending, which does the GC using the pending - * context (it needs a context so that it has registers to use as - * GC roots) then notices there's no actual interrupt handler to - * call, so doesn't. That's the second question [1b] answered, - * anyway. Why we still need to copy the pending_mask into the - * context given that we're now done with the context anyway, I - * couldn't say. */ -#if 0 - memcpy(os_context_sigmask_addr(context), &pending_mask, - 4 /* sizeof(sigset_t) */ ); -#endif - sigemptyset(&data->pending_mask); - if (data->pending_signal) { - int signal = data->pending_signal; - siginfo_t info; - memcpy(&info, &data->pending_info, sizeof(siginfo_t)); - data->pending_signal = 0; - interrupt_handle_now(signal, &info, context); + /* Pseudo atomic may trigger several times for a single interrupt, + * and while without-interrupts should not, a false trigger by + * pseudo-atomic may eat a pending handler even from + * without-interrupts. */ + if (data->pending_handler) { + + /* If we're here as the result of a pseudo-atomic as opposed + * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already + * NIL, because maybe_defer_handler sets + * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/ + SetSymbolValue(INTERRUPT_PENDING, NIL,thread); + + /* restore the saved signal mask from the original signal (the + * one that interrupted us during the critical section) into the + * os_context for the signal we're currently in the handler for. + * This should ensure that when we return from the handler the + * blocked signals are unblocked */ + sigcopyset(os_context_sigmask_addr(context), &data->pending_mask); + + sigemptyset(&data->pending_mask); + /* This will break on sparc linux: the deferred handler really wants + * to be called with a void_context */ + run_deferred_handler(data,(void *)context); } } @@ -355,10 +379,12 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) { os_context_t *context = (os_context_t*)void_context; struct thread *thread=arch_os_get_current_thread(); -#ifndef __i386__ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) boolean were_in_lisp; #endif union interrupt_handler handler; + check_blockables_blocked_or_lose(); + check_interrupts_enabled_or_lose(context); #ifdef LISP_FEATURE_LINUX /* Under Linux on some architectures, we appear to have to restore @@ -372,7 +398,7 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) return; } -#ifndef __i386__ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) were_in_lisp = !foreign_function_call_active; if (were_in_lisp) #endif @@ -394,15 +420,22 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) lose("no handler for signal %d in interrupt_handle_now(..)", signal); } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) { + /* Once we've decided what to do about contexts in a + * return-elsewhere world (the original context will no longer + * be available; should we copy it or was nobody using it anyway?) + * then we should convert this to return-elsewhere */ + + /* CMUCL comment said "Allocate the SAPs while the interrupts + * are still disabled.". I (dan, 2003.08.21) assume this is + * because we're not in pseudoatomic and allocation shouldn't + * be interrupted. In which case it's no longer an issue as + * all our allocation from C now goes through a PA wrapper, + * but still, doesn't hurt */ - /* Allocate the SAPs while the interrupts are still disabled. - * (FIXME: Why? This is the way it was done in CMU CL, and it - * even had the comment noting that this is the way it was - * done, but no motivation..) */ lispobj info_sap,context_sap = alloc_sap(context); info_sap = alloc_sap(info); /* Allow signals again. */ - sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); #ifdef QSHOW_SIGNALS SHOW("calling Lisp-level handler"); @@ -419,16 +452,16 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) #endif /* Allow signals again. */ - sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); (*handler.c)(signal, info, void_context); } -#ifndef __i386__ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) if (were_in_lisp) #endif { - undo_fake_foreign_function_call(context); + undo_fake_foreign_function_call(context); /* block signals again */ } #ifdef QSHOW_SIGNALS @@ -438,19 +471,98 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) #endif } +/* This is called at the end of a critical section if the indications + * are that some signal was deferred during the section. Note that as + * far as C or the kernel is concerned we dealt with the signal + * already; we're just doing the Lisp-level processing now that we + * put off then */ + +void +run_deferred_handler(struct interrupt_data *data, void *v_context) { + /* The pending_handler may enable interrupts (see + * interrupt_maybe_gc_int) and then another interrupt may hit, + * overwrite interrupt_data, so reset the pending handler before + * calling it. Trust the handler to finish with the siginfo before + * enabling interrupts. */ + void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler; + data->pending_handler=0; + (*pending_handler)(data->pending_signal,&(data->pending_info), v_context); +} + +boolean +maybe_defer_handler(void *handler, struct interrupt_data *data, + int signal, siginfo_t *info, os_context_t *context) +{ + struct thread *thread=arch_os_get_current_thread(); + + check_blockables_blocked_or_lose(); + + if (SymbolValue(INTERRUPT_PENDING,thread) != NIL) + lose("interrupt already pending"); + /* If interrupts are disabled then INTERRUPT_PENDING is set and + * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo + * atomic section inside a without-interrupts. + */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) { + store_signal_data_for_later(data,handler,signal,info,context); + SetSymbolValue(INTERRUPT_PENDING, T,thread); +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: deferred\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 1; + } + /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't + * actually use its argument for anything on x86, so this branch + * may succeed even when context is null (gencgc alloc()) */ + if ( +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + (!foreign_function_call_active) && +#endif + arch_pseudo_atomic_atomic(context)) { + store_signal_data_for_later(data,handler,signal,info,context); + arch_set_pseudo_atomic_interrupted(context); +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 1; + } +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: not deferred\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 0; +} + static void -store_signal_data_for_later (struct interrupt_data *data, int signal, +store_signal_data_for_later (struct interrupt_data *data, void *handler, + int signal, siginfo_t *info, os_context_t *context) { + if (data->pending_handler) + lose("tried to overwrite pending interrupt handler %x with %x\n", + data->pending_handler, handler); + if (!handler) + lose("tried to defer null interrupt handler\n"); + data->pending_handler = handler; data->pending_signal = signal; - memcpy(&(data->pending_info), info, sizeof(siginfo_t)); - memcpy(&(data->pending_mask), - os_context_sigmask_addr(context), - sizeof(sigset_t)); - sigaddset_blockable(os_context_sigmask_addr(context)); + if(info) + memcpy(&(data->pending_info), info, sizeof(siginfo_t)); + if(context) { + /* the signal mask in the context (from before we were + * interrupted) is copied to be restored when + * run_deferred_handler happens. Then the usually-blocked + * signals are added to the mask in the context so that we are + * running with blocked signals when the handler returns */ + sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context)); + sigaddset_blockable(os_context_sigmask_addr(context)); + } } - static void maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) { @@ -460,30 +572,100 @@ maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) #ifdef LISP_FEATURE_LINUX os_restore_fp_control(context); #endif - /* see comments at top of code/signal.lisp for what's going on here - * with INTERRUPTS_ENABLED/INTERRUPT_HANDLE_NOW - */ - if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) { - store_signal_data_for_later(data,signal,info,context); - SetSymbolValue(INTERRUPT_PENDING, T,thread); - } else if ( -#ifndef __i386__ - (!foreign_function_call_active) && + if(maybe_defer_handler(interrupt_handle_now,data, + signal,info,context)) + return; + interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); #endif - arch_pseudo_atomic_atomic(context)) { - store_signal_data_for_later(data,signal,info,context); - arch_set_pseudo_atomic_interrupted(context); - } else { - interrupt_handle_now(signal, info, context); +} + +static void +low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = (os_context_t*)void_context; + struct thread *thread=arch_os_get_current_thread(); + +#ifdef LISP_FEATURE_LINUX + os_restore_fp_control(context); +#endif + check_blockables_blocked_or_lose(); + check_interrupts_enabled_or_lose(context); + (*thread->interrupt_data->interrupt_low_level_handlers[signal]) + (signal, info, void_context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); +#endif +} + +static void +low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); + struct interrupt_data *data=thread->interrupt_data; +#ifdef LISP_FEATURE_LINUX + os_restore_fp_control(context); +#endif + if(maybe_defer_handler(low_level_interrupt_handle_now,data, + signal,info,context)) + return; + low_level_interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); +#endif +} + +#ifdef LISP_FEATURE_SB_THREAD +void +sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); + sigset_t ss; + int i; + + /* need the context stored so it can have registers scavenged */ + fake_foreign_function_call(context); + + sigemptyset(&ss); + for(i=1;istate!=STATE_RUNNING) { + lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", + fixnum_value(thread->state)); } + thread->state=STATE_SUSPENDED; + + sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); + sigwaitinfo(&ss,0); + if(thread->state!=STATE_SUSPENDED) { + lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", + fixnum_value(thread->state)); + } + thread->state=STATE_RUNNING; + + undo_fake_foreign_function_call(context); } - +#endif void interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + DARWIN_FIX_CONTEXT(context); +#endif } /* @@ -510,42 +692,103 @@ gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context) * previously */ +#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) +int *context_eflags_addr(os_context_t *context); +#endif + extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs); extern void post_signal_tramp(void); void arrange_return_to_lisp_function(os_context_t *context, lispobj function) { +#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) void * fun=native_pointer(function); - char *code = &(((struct simple_fun *) fun)->code); - + void *code = &(((struct simple_fun *) fun)->code); +#endif + /* Build a stack frame showing `interrupted' so that the * user's backtrace makes (as much) sense (as usual) */ + + /* FIXME: what about restoring fp state? */ + /* FIXME: what about restoring errno? */ #ifdef LISP_FEATURE_X86 /* Suppose the existence of some function that saved all * registers, called call_into_lisp, then restored GP registers and - * returned. We shortcut this: fake the stack that call_into_lisp - * would see, then arrange to have it called directly. post_signal_tramp - * is the second half of this function + * returned. It would look something like this: + + push ebp + mov ebp esp + pushfl + pushal + push $0 + push $0 + pushl {address of function to call} + call 0x8058db0 + addl $12,%esp + popal + popfl + leave + ret + + * What we do here is set up the stack that call_into_lisp would + * expect to see if it had been called by this code, and frob the + * signal context so that signal return goes directly to call_into_lisp, + * and when that function (and the lisp function it invoked) returns, + * it returns to the second half of this imaginary function which + * restores all registers and returns to C + + * For this to work, the latter part of the imaginary function + * must obviously exist in reality. That would be post_signal_tramp */ + u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP); - *(sp-14) = post_signal_tramp; /* return address for call_into_lisp */ - *(sp-13) = function; /* args for call_into_lisp : function*/ - *(sp-12) = 0; /* arg array */ - *(sp-11) = 0; /* no. args */ + *(sp-15) = post_signal_tramp; /* return address for call_into_lisp */ + *(sp-14) = function; /* args for call_into_lisp : function*/ + *(sp-13) = 0; /* arg array */ + *(sp-12) = 0; /* no. args */ /* this order matches that used in POPAD */ - *(sp-10)=*os_context_register_addr(context,reg_EDI); - *(sp-9)=*os_context_register_addr(context,reg_ESI); - /* this gets overwritten again before it's used, anyway */ - *(sp-8)=*os_context_register_addr(context,reg_EBP); - *(sp-7)=0 ; /* POPAD doesn't set ESP, but expects a gap for it anyway */ - *(sp-6)=*os_context_register_addr(context,reg_EBX); - - *(sp-5)=*os_context_register_addr(context,reg_EDX); - *(sp-4)=*os_context_register_addr(context,reg_ECX); - *(sp-3)=*os_context_register_addr(context,reg_EAX); + *(sp-11)=*os_context_register_addr(context,reg_EDI); + *(sp-10)=*os_context_register_addr(context,reg_ESI); + + *(sp-9)=*os_context_register_addr(context,reg_ESP)-8; + /* POPAD ignores the value of ESP: */ + *(sp-8)=0; + *(sp-7)=*os_context_register_addr(context,reg_EBX); + + *(sp-6)=*os_context_register_addr(context,reg_EDX); + *(sp-5)=*os_context_register_addr(context,reg_ECX); + *(sp-4)=*os_context_register_addr(context,reg_EAX); + *(sp-3)=*context_eflags_addr(context); *(sp-2)=*os_context_register_addr(context,reg_EBP); *(sp-1)=*os_context_pc_addr(context); +#elif defined(LISP_FEATURE_X86_64) + u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP); + *(sp-20) = post_signal_tramp; /* return address for call_into_lisp */ + + *(sp-19)=*os_context_register_addr(context,reg_R15); + *(sp-18)=*os_context_register_addr(context,reg_R14); + *(sp-17)=*os_context_register_addr(context,reg_R13); + *(sp-16)=*os_context_register_addr(context,reg_R12); + *(sp-15)=*os_context_register_addr(context,reg_R11); + *(sp-14)=*os_context_register_addr(context,reg_R10); + *(sp-13)=*os_context_register_addr(context,reg_R9); + *(sp-12)=*os_context_register_addr(context,reg_R8); + *(sp-11)=*os_context_register_addr(context,reg_RDI); + *(sp-10)=*os_context_register_addr(context,reg_RSI); + *(sp-9)=*os_context_register_addr(context,reg_RSP)-16; + *(sp-8)=0; + *(sp-7)=*os_context_register_addr(context,reg_RBX); + *(sp-6)=*os_context_register_addr(context,reg_RDX); + *(sp-5)=*os_context_register_addr(context,reg_RCX); + *(sp-4)=*os_context_register_addr(context,reg_RAX); + *(sp-3)=*context_eflags_addr(context); + *(sp-2)=*os_context_register_addr(context,reg_RBP); + *(sp-1)=*os_context_pc_addr(context); + + *os_context_register_addr(context,reg_RDI) = function; /* function */ + *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */ + *os_context_register_addr(context,reg_RDX) = 0; /* no. args */ #else struct thread *th=arch_os_get_current_thread(); build_fake_control_stack_frames(th,context); @@ -555,7 +798,16 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) *os_context_pc_addr(context) = call_into_lisp; *os_context_register_addr(context,reg_ECX) = 0; *os_context_register_addr(context,reg_EBP) = sp-2; - *os_context_register_addr(context,reg_ESP) = sp-14; +#ifdef __NetBSD__ + *os_context_register_addr(context,reg_UESP) = sp-15; +#else + *os_context_register_addr(context,reg_ESP) = sp-15; +#endif +#elif defined(LISP_FEATURE_X86_64) + *os_context_pc_addr(context) = call_into_lisp; + *os_context_register_addr(context,reg_RCX) = 0; + *os_context_register_addr(context,reg_RBP) = sp-2; + *os_context_register_addr(context,reg_RSP) = sp-20; #else /* this much of the calling convention is common to all non-x86 ports */ @@ -576,34 +828,79 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) } #ifdef LISP_FEATURE_SB_THREAD -boolean handle_rt_signal(int num, siginfo_t *info, void *v_context) +void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) { - struct - os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); - arrange_return_to_lisp_function(context,info->si_value.sival_int); + os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); + /* The order of interrupt execution is peculiar. If thread A + * interrupts thread B with I1, I2 and B for some reason recieves + * I1 when FUN2 is already on the list, then it is FUN2 that gets + * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is + * enabled again and I2 hits pretty soon in FUN2 and run + * FUN1. This is of course just one scenario, and the order of + * thread interrupt execution is undefined. */ + struct thread *th=arch_os_get_current_thread(); + struct cons *c; + if (th->state != STATE_RUNNING) + lose("interrupt_thread_handler: thread %ld in wrong state: %d\n", + th->os_thread,fixnum_value(th->state)); + get_spinlock(&th->interrupt_fun_lock,(long)th); + c=((struct cons *)native_pointer(th->interrupt_fun)); + arrange_return_to_lisp_function(context,c->car); + th->interrupt_fun=(lispobj *)(c->cdr); + release_spinlock(&th->interrupt_fun_lock); } + #endif -boolean handle_control_stack_guard_triggered(os_context_t *context,void *addr) -{ +/* KLUDGE: Theoretically the approach we use for undefined alien + * variables should work for functions as well, but on PPC/Darwin + * we get bus error at bogus addresses instead, hence this workaround, + * that has the added benefit of automatically discriminating between + * functions and variables. + */ +void undefined_alien_function() { + funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR)); +} + +boolean handle_guard_page_triggered(os_context_t *context,void *addr){ struct thread *th=arch_os_get_current_thread(); + /* note the os_context hackery here. When the signal handler returns, * it won't go back to what it was doing ... */ - if(addr>=(void *)CONTROL_STACK_GUARD_PAGE(th) && - addr<(void *)(CONTROL_STACK_GUARD_PAGE(th)+os_vm_page_size)) { - /* we hit the end of the control stack. disable protection - * temporarily so the error handler has some headroom */ - protect_control_stack_guard_page(th->pid,0L); - + if(addr >= CONTROL_STACK_GUARD_PAGE(th) && + addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) { + /* We hit the end of the control stack: disable guard page + * protection so the error handler has some headroom, protect the + * previous page so that we can catch returns from the guard page + * and restore it. */ + protect_control_stack_guard_page(th,0); + protect_control_stack_return_guard_page(th,1); + + arrange_return_to_lisp_function + (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + return 1; + } + else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) && + addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) { + /* We're returning from the guard page: reprotect it, and + * unprotect this one. This works even if we somehow missed + * the return-guard-page, and hit it on our way to new + * exhaustion instead. */ + protect_control_stack_guard_page(th,1); + protect_control_stack_return_guard_page(th,0); + return 1; + } + else if (addr >= undefined_alien_address && + addr < undefined_alien_address + os_vm_page_size) { arrange_return_to_lisp_function - (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR)); return 1; } else return 0; } #ifndef LISP_FEATURE_GENCGC -/* This function gets called from the SIGSEGV (for e.g. Linux or +/* This function gets called from the SIGSEGV (for e.g. Linux, NetBSD, & * OpenBSD) or SIGBUS (for e.g. FreeBSD) handler. Here we check * whether the signal was due to treading on the mprotect()ed zone - * and if so, arrange for a GC to happen. */ @@ -617,69 +914,53 @@ interrupt_maybe_gc(int signal, siginfo_t *info, void *void_context) struct interrupt_data *data= th ? th->interrupt_data : global_interrupt_data; - if(!foreign_function_call_active && gc_trigger_hit(signal, info, context)){ - clear_auto_gc_trigger(); - - if (arch_pseudo_atomic_atomic(context)) { - /* don't GC during an atomic operation. Instead, copy the - * signal mask somewhere safe. interrupt_handle_pending - * will detect pending_signal==0 and know to do a GC with the - * signal context instead of calling a Lisp-level handler */ - maybe_gc_pending = 1; - if (data->pending_signal == 0) { - /* FIXME: This copy-pending_mask-then-sigaddset_blockable - * idiom occurs over and over. It should be factored out - * into a function with a descriptive name. */ - memcpy(&(data->pending_mask), - os_context_sigmask_addr(context), - sizeof(sigset_t)); - sigaddset_blockable(os_context_sigmask_addr(context)); - } - arch_set_pseudo_atomic_interrupted(context); - } - else { - fake_foreign_function_call(context); - /* SUB-GC may return without GCing if *GC-INHIBIT* is set, - * in which case we will be running with no gc trigger - * barrier thing for a while. But it shouldn't be long - * until the end of WITHOUT-GCING. */ - funcall0(SymbolFunction(SUB_GC)); - undo_fake_foreign_function_call(context); - } - return 1; - } else { - return 0; + if(!data->pending_handler && !foreign_function_call_active && + gc_trigger_hit(signal, info, context)){ + clear_auto_gc_trigger(); + if(!maybe_defer_handler(interrupt_maybe_gc_int, + data,signal,info,void_context)) + interrupt_maybe_gc_int(signal,info,void_context); + return 1; } + return 0; } + #endif - -/* - * noise to install handlers - */ -/* SBCL used to have code to restore signal handlers on exit, which - * has been removed from the threaded version until we decide: exit of - * _what_ ? */ +/* this is also used by gencgc, in alloc() */ +boolean +interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context=(os_context_t *) void_context; -/* SBCL comment: The "undoably" aspect is because we also arrange with - * atexit() for the handler to be restored to its old value. This is - * for tidiness: it shouldn't matter much ordinarily, but it does - * remove a window where e.g. memory fault signals (SIGSEGV or SIGBUS, - * which in ordinary operation of SBCL are sent to the generational - * garbage collector, then possibly onward to Lisp code) or SIGINT - * (which is ordinarily passed to Lisp code) could otherwise be - * handled bizarrely/brokenly because the Lisp code would try to deal - * with them using machinery (like stream output buffers) which has - * already been dismantled. */ + check_blockables_blocked_or_lose(); + fake_foreign_function_call(context); -/* I'm not sure (a) whether this is a real concern, (b) how it helps - anyway */ + /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in + * which case we will be running with no gc trigger barrier + * thing for a while. But it shouldn't be long until the end + * of WITHOUT-GCING. + * + * FIXME: It would be good to protect the end of dynamic space + * and signal a storage condition from there. + */ -void -uninstall_low_level_interrupt_handlers_atexit(void) -{ + /* restore the signal mask from the interrupted context before + * calling into Lisp */ + if (context) + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + + funcall0(SymbolFunction(SUB_GC)); + + undo_fake_foreign_function_call(context); + return 1; } + +/* + * noise to install handlers + */ + void undoably_install_low_level_interrupt_handler (int signal, void handler(int, @@ -695,7 +976,11 @@ undoably_install_low_level_interrupt_handler (int signal, lose("bad signal number %d", signal); } - sa.sa_sigaction = handler; + if (sigismember(&blockable_sigset,signal)) + sa.sa_sigaction = low_level_maybe_now_maybe_later; + else + sa.sa_sigaction = handler; + sigemptyset(&sa.sa_mask); sigaddset_blockable(&sa.sa_mask); sa.sa_flags = SA_SIGINFO | SA_RESTART; @@ -728,13 +1013,13 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) sigemptyset(&new); sigaddset(&new, signal); - sigprocmask(SIG_BLOCK, &new, &old); + thread_sigmask(SIG_BLOCK, &new, &old); sigemptyset(&new); sigaddset_blockable(&new); - FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%d\n", - interrupt_low_level_handlers[signal])); + FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n", + (unsigned int)data->interrupt_low_level_handlers[signal])); if (data->interrupt_low_level_handlers[signal]==0) { if (ARE_SAME_HANDLER(handler, SIG_DFL) || ARE_SAME_HANDLER(handler, SIG_IGN)) { @@ -754,7 +1039,7 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) oldhandler = data->interrupt_handlers[signal]; data->interrupt_handlers[signal].c = handler; - sigprocmask(SIG_SETMASK, &old, 0); + thread_sigmask(SIG_SETMASK, &old, 0); FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal)); @@ -766,6 +1051,9 @@ interrupt_init() { int i; SHOW("entering interrupt_init()"); + sigemptyset(&blockable_sigset); + sigaddset_blockable(&blockable_sigset); + global_interrupt_data=calloc(sizeof(struct interrupt_data), 1); /* Set up high level handler information. */