X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Fruntime%2Finterrupt.c;h=ac1abcf122649e032ec8d7fe84c2c96310da2f51;hb=d8e682fdfb7e8ba067e15aea0f3d1f8d37ca9eb1;hp=c5e70aaec181f0b0fe21217289a16e5595c905f9;hpb=ee61a0d8cefb5ccfba266a1e0407415adb88b150;p=sbcl.git diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index c5e70aa..ac1abcf 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -13,14 +13,45 @@ * files for more information. */ + +/* As far as I can tell, what's going on here is: + * + * In the case of most signals, when Lisp asks us to handle the + * signal, the outermost handler (the one actually passed to UNIX) is + * either interrupt_handle_now(..) or maybe_now_maybe_later(..). + * In that case, the Lisp-level handler is stored in interrupt_handlers[..] + * and interrupt_low_level_handlers[..] is cleared. + * + * However, some signals need special handling, e.g. + * + * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the + * garbage collector to detect violations of write protection, + * because some cases of such signals (e.g. GC-related violations of + * write protection) are handled at C level and never passed on to + * Lisp. For such signals, we still store any Lisp-level handler + * in interrupt_handlers[..], but for the outermost handle we use + * the value from interrupt_low_level_handlers[..], instead of the + * ordinary interrupt_handle_now(..) or interrupt_handle_later(..). + * + * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints, + * pseudo-atomic sections, and some classes of error (e.g. "function + * not defined"). This never goes anywhere near the Lisp handlers at all. + * See runtime/alpha-arch.c and code/signal.lisp + * + * - WHN 20000728, dan 20010128 */ + + #include #include #include #include +#include +#include +#include +#include "sbcl.h" #include "runtime.h" #include "arch.h" -#include "sbcl.h" #include "os.h" #include "interrupt.h" #include "globals.h" @@ -31,6 +62,20 @@ #include "alloc.h" #include "dynbind.h" #include "interr.h" +#include "genesis/fdefn.h" +#include "genesis/simple-fun.h" +#include "genesis/cons.h" + + + +void run_deferred_handler(struct interrupt_data *data, void *v_context) ; +static void store_signal_data_for_later (struct interrupt_data *data, + void *handler, int signal, + siginfo_t *info, + os_context_t *context); +boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context); + +extern volatile lispobj all_threads_lock; void sigaddset_blockable(sigset_t *s) { @@ -51,6 +96,38 @@ void sigaddset_blockable(sigset_t *s) sigaddset(s, SIGWINCH); sigaddset(s, SIGUSR1); sigaddset(s, SIGUSR2); +#ifdef LISP_FEATURE_SB_THREAD + sigaddset(s, SIG_STOP_FOR_GC); + sigaddset(s, SIG_INTERRUPT_THREAD); +#endif +} + +static sigset_t blockable_sigset; + +inline static void check_blockables_blocked_or_lose() +{ + /* Get the current sigmask, by blocking the empty set. */ + sigset_t empty,current; + int i; + sigemptyset(&empty); + thread_sigmask(SIG_BLOCK, &empty, ¤t); + for(i=0;i>2; - /* FIXME: Ick! Why use abstract "make_fixnum" in some places if - * you're going to convert from fixnum by bare >>2 in other - * places? Use fixnum_value(..) here, and look for other places - * which do bare >> and << for fixnum_value and make_fixnum. */ - + context_index = + fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)); + if (context_index >= MAX_INTERRUPTS) { lose("maximum interrupt nesting depth (%d) exceeded", MAX_INTERRUPTS); } bind_variable(FREE_INTERRUPT_CONTEXT_INDEX, - make_fixnum(context_index + 1)); + make_fixnum(context_index + 1),thread); - lisp_interrupt_contexts[context_index] = context; + thread->interrupt_contexts[context_index] = context; /* no longer in Lisp now */ foreign_function_call_active = 1; } +/* blocks all blockable signals. If you are calling from a signal handler, + * the usual signal mask will be restored from the context when the handler + * finishes. Otherwise, be careful */ + void undo_fake_foreign_function_call(os_context_t *context) { + struct thread *thread=arch_os_get_current_thread(); /* Block all blockable signals. */ sigset_t block; sigemptyset(&block); sigaddset_blockable(&block); - sigprocmask(SIG_BLOCK, &block, 0); + thread_sigmask(SIG_BLOCK, &block, 0); /* going back into Lisp */ foreign_function_call_active = 0; - /* Undo dynamic binding. */ - /* ### Do I really need to unbind_to_here()? */ - /* FIXME: Is this to undo the binding of - * FREE_INTERRUPT_CONTEXT_INDEX? If so, we should say so. And - * perhaps yes, unbind_to_here() really would be clearer and less - * fragile.. */ - /* dan (2001.08.10) thinks the above supposition is probably correct */ - unbind(); + /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */ + unbind(thread); #ifdef reg_ALLOC /* Put the dynamic space free pointer back into the context. */ @@ -235,6 +287,7 @@ interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context, { lispobj context_sap = 0; + check_blockables_blocked_or_lose(); fake_foreign_function_call(context); /* Allocate the SAP object while the interrupts are still @@ -243,11 +296,11 @@ interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context, context_sap = alloc_sap(context); } - sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); if (internal_errors_enabled) { SHOW("in interrupt_internal_error"); -#if QSHOW +#ifdef QSHOW /* Display some rudimentary debugging information about the * error, so that even if the Lisp error handler gets badly * confused, we have a chance to determine what's going on. */ @@ -261,80 +314,47 @@ interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context, * before the Lisp error handling mechanism is set up. */ lose("internal error too early in init, can't recover"); } - undo_fake_foreign_function_call(context); + undo_fake_foreign_function_call(context); /* blocks signals again */ if (continuable) { arch_skip_instruction(context); } } -/* This function handles pending interrupts. Note that in C/kernel - * terms we dealt with the signal already; we just haven't decided - * whether to call a Lisp handler or do a GC or something like that. - * If it helps, you can think of pending_{signal,mask,info} as a - * one-element queue of signals that we have acknowledged but not - * processed */ - void interrupt_handle_pending(os_context_t *context) { -#ifndef __i386__ - boolean were_in_lisp = !foreign_function_call_active; -#endif - - SetSymbolValue(INTERRUPT_PENDING, NIL); - - if (maybe_gc_pending) { - maybe_gc_pending = 0; -#ifndef __i386__ - if (were_in_lisp) -#endif - { - fake_foreign_function_call(context); - } - funcall0(SymbolFunction(MAYBE_GC)); -#ifndef __i386__ - if (were_in_lisp) -#endif - { - undo_fake_foreign_function_call(context); - } - } - - /* FIXME: This isn't very clear. It would be good to reverse - * engineer it and rewrite the code more clearly, or write a clear - * explanation of what's going on in the comments, or both. - * - * WHN's question 1a: How come we unconditionally copy from - * pending_mask into the context, and then test whether - * pending_signal is set? - * - * WHN's question 1b: If pending_signal wasn't set, how could - * pending_mask be valid? - * - * Dan Barlow's reply (sbcl-devel 2001-03-13): And the answer is - - * or appears to be - because interrupt_maybe_gc set it that way - * (look in the #ifndef __i386__ bit). We can't GC during a - * pseudo-atomic, so we set maybe_gc_pending=1 and - * arch_set_pseudo_atomic_interrupted(..) When we come out of - * pseudo_atomic we're marked as interrupted, so we call - * interrupt_handle_pending, which does the GC using the pending - * context (it needs a context so that it has registers to use as - * GC roots) then notices there's no actual interrupt handler to - * call, so doesn't. That's the second question [1b] answered, - * anyway. Why we still need to copy the pending_mask into the - * context given that we're now done with the context anyway, I - * couldn't say. */ -#if 0 - memcpy(os_context_sigmask_addr(context), &pending_mask, - 4 /* sizeof(sigset_t) */ ); -#endif - sigemptyset(&pending_mask); - if (pending_signal) { - int signal = pending_signal; - siginfo_t info; - memcpy(&info, &pending_info, sizeof(siginfo_t)); - pending_signal = 0; - interrupt_handle_now(signal, &info, context); + struct thread *thread; + struct interrupt_data *data; + + check_blockables_blocked_or_lose(); + check_interrupts_enabled_or_lose(context); + + thread=arch_os_get_current_thread(); + data=thread->interrupt_data; + + /* Pseudo atomic may trigger several times for a single interrupt, + * and while without-interrupts should not, a false trigger by + * pseudo-atomic may eat a pending handler even from + * without-interrupts. */ + if (data->pending_handler) { + + /* If we're here as the result of a pseudo-atomic as opposed + * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already + * NIL, because maybe_defer_handler sets + * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/ + SetSymbolValue(INTERRUPT_PENDING, NIL,thread); + + /* restore the saved signal mask from the original signal (the + * one that interrupted us during the critical section) into the + * os_context for the signal we're currently in the handler for. + * This should ensure that when we return from the handler the + * blocked signals are unblocked */ + sigcopyset(os_context_sigmask_addr(context), &data->pending_mask); + + sigemptyset(&data->pending_mask); + /* This will break on sparc linux: the deferred handler really wants + * to be called with a void_context */ + run_deferred_handler(data,(void *)context); } } @@ -357,10 +377,13 @@ void interrupt_handle_now(int signal, siginfo_t *info, void *void_context) { os_context_t *context = (os_context_t*)void_context; -#ifndef __i386__ + struct thread *thread=arch_os_get_current_thread(); +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) boolean were_in_lisp; #endif union interrupt_handler handler; + check_blockables_blocked_or_lose(); + check_interrupts_enabled_or_lose(context); #ifdef LISP_FEATURE_LINUX /* Under Linux on some architectures, we appear to have to restore @@ -368,13 +391,13 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) delivered we appear to have a null FPU control word. */ os_restore_fp_control(context); #endif - handler = interrupt_handlers[signal]; + handler = thread->interrupt_data->interrupt_handlers[signal]; if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) { return; } -#ifndef __i386__ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) were_in_lisp = !foreign_function_call_active; if (were_in_lisp) #endif @@ -396,15 +419,22 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) lose("no handler for signal %d in interrupt_handle_now(..)", signal); } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) { + /* Once we've decided what to do about contexts in a + * return-elsewhere world (the original context will no longer + * be available; should we copy it or was nobody using it anyway?) + * then we should convert this to return-elsewhere */ + + /* CMUCL comment said "Allocate the SAPs while the interrupts + * are still disabled.". I (dan, 2003.08.21) assume this is + * because we're not in pseudoatomic and allocation shouldn't + * be interrupted. In which case it's no longer an issue as + * all our allocation from C now goes through a PA wrapper, + * but still, doesn't hurt */ - /* Allocate the SAPs while the interrupts are still disabled. - * (FIXME: Why? This is the way it was done in CMU CL, and it - * even had the comment noting that this is the way it was - * done, but no motivation..) */ lispobj info_sap,context_sap = alloc_sap(context); info_sap = alloc_sap(info); /* Allow signals again. */ - sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); #ifdef QSHOW_SIGNALS SHOW("calling Lisp-level handler"); @@ -421,16 +451,16 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) #endif /* Allow signals again. */ - sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); (*handler.c)(signal, info, void_context); } -#ifndef __i386__ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) if (were_in_lisp) #endif { - undo_fake_foreign_function_call(context); + undo_fake_foreign_function_call(context); /* block signals again */ } #ifdef QSHOW_SIGNALS @@ -440,69 +470,209 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) #endif } +/* This is called at the end of a critical section if the indications + * are that some signal was deferred during the section. Note that as + * far as C or the kernel is concerned we dealt with the signal + * already; we're just doing the Lisp-level processing now that we + * put off then */ + +void +run_deferred_handler(struct interrupt_data *data, void *v_context) { + /* The pending_handler may enable interrupts (see + * interrupt_maybe_gc_int) and then another interrupt may hit, + * overwrite interrupt_data, so reset the pending handler before + * calling it. Trust the handler to finish with the siginfo before + * enabling interrupts. */ + void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler; + data->pending_handler=0; + (*pending_handler)(data->pending_signal,&(data->pending_info), v_context); +} + +boolean +maybe_defer_handler(void *handler, struct interrupt_data *data, + int signal, siginfo_t *info, os_context_t *context) +{ + struct thread *thread=arch_os_get_current_thread(); + + check_blockables_blocked_or_lose(); + + if (SymbolValue(INTERRUPT_PENDING,thread) != NIL) + lose("interrupt already pending"); + /* If interrupts are disabled then INTERRUPT_PENDING is set and + * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo + * atomic section inside a without-interrupts. + */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) { + store_signal_data_for_later(data,handler,signal,info,context); + SetSymbolValue(INTERRUPT_PENDING, T,thread); +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: deferred\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 1; + } + /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't + * actually use its argument for anything on x86, so this branch + * may succeed even when context is null (gencgc alloc()) */ + if ( +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + (!foreign_function_call_active) && +#endif + arch_pseudo_atomic_atomic(context)) { + store_signal_data_for_later(data,handler,signal,info,context); + arch_set_pseudo_atomic_interrupted(context); +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 1; + } +#ifdef QSHOW_SIGNALS + FSHOW((stderr, + "/maybe_defer_handler(%x,%d),thread=%ld: not deferred\n", + (unsigned int)handler,signal,thread->os_thread)); +#endif + return 0; +} + +static void +store_signal_data_for_later (struct interrupt_data *data, void *handler, + int signal, + siginfo_t *info, os_context_t *context) +{ + if (data->pending_handler) + lose("tried to overwrite pending interrupt handler %x with %x\n", + data->pending_handler, handler); + if (!handler) + lose("tried to defer null interrupt handler\n"); + data->pending_handler = handler; + data->pending_signal = signal; + if(info) + memcpy(&(data->pending_info), info, sizeof(siginfo_t)); + if(context) { + /* the signal mask in the context (from before we were + * interrupted) is copied to be restored when + * run_deferred_handler happens. Then the usually-blocked + * signals are added to the mask in the context so that we are + * running with blocked signals when the handler returns */ + sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context)); + sigaddset_blockable(os_context_sigmask_addr(context)); + } +} + static void maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); + struct interrupt_data *data=thread->interrupt_data; +#ifdef LISP_FEATURE_LINUX + os_restore_fp_control(context); +#endif + if(maybe_defer_handler(interrupt_handle_now,data, + signal,info,context)) + return; + interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); +#endif +} + +static void +low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = (os_context_t*)void_context; + struct thread *thread=arch_os_get_current_thread(); #ifdef LISP_FEATURE_LINUX os_restore_fp_control(context); +#endif + check_blockables_blocked_or_lose(); + check_interrupts_enabled_or_lose(context); + (*thread->interrupt_data->interrupt_low_level_handlers[signal]) + (signal, info, void_context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); +#endif +} + +static void +low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); + struct interrupt_data *data=thread->interrupt_data; +#ifdef LISP_FEATURE_LINUX + os_restore_fp_control(context); #endif - - /* see comments at top of code/signal.lisp for what's going on here - * with INTERRUPTS_ENABLED/INTERRUPT_HANDLE_NOW - */ - if (SymbolValue(INTERRUPTS_ENABLED) == NIL) { - - /* FIXME: This code is exactly the same as the code in the - * other leg of the if(..), and should be factored out into - * a shared function. */ - pending_signal = signal; - memcpy(&pending_info, info, sizeof(siginfo_t)); - memcpy(&pending_mask, - os_context_sigmask_addr(context), - sizeof(sigset_t)); - sigaddset_blockable(os_context_sigmask_addr(context)); - SetSymbolValue(INTERRUPT_PENDING, T); - - } else if ( -#ifndef __i386__ - (!foreign_function_call_active) && -#endif - arch_pseudo_atomic_atomic(context)) { - - /* FIXME: It would probably be good to replace these bare - * memcpy(..) calls with calls to cpy_siginfo_t and - * cpy_sigset_t, so that we only have to get the sizeof - * expressions right in one place, and after that static type - * checking takes over. */ - pending_signal = signal; - memcpy(&pending_info, info, sizeof(siginfo_t)); - memcpy(&pending_mask, - os_context_sigmask_addr(context), - sizeof(sigset_t)); - sigaddset_blockable(os_context_sigmask_addr(context)); + if(maybe_defer_handler(low_level_interrupt_handle_now,data, + signal,info,context)) + return; + low_level_interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + /* Work around G5 bug */ + DARWIN_FIX_CONTEXT(context); +#endif +} - arch_set_pseudo_atomic_interrupted(context); +#ifdef LISP_FEATURE_SB_THREAD +void +sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); + sigset_t ss; + int i; + + /* need the context stored so it can have registers scavenged */ + fake_foreign_function_call(context); + + sigemptyset(&ss); + for(i=1;istate!=STATE_STOPPING) { + lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", + fixnum_value(thread->state)); + } + thread->state=STATE_STOPPED; - } else { - interrupt_handle_now(signal, info, context); + sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); + sigwaitinfo(&ss,0); + if(thread->state!=STATE_STOPPED) { + lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", + fixnum_value(thread->state)); } + thread->state=STATE_RUNNING; + + undo_fake_foreign_function_call(context); } - +#endif void interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); interrupt_handle_now(signal, info, context); +#ifdef LISP_FEATURE_DARWIN + DARWIN_FIX_CONTEXT(context); +#endif } /* * stuff to detect and handle hitting the GC trigger */ -#ifndef GENCGC /* since GENCGC has its own way to record trigger */ +#ifndef LISP_FEATURE_GENCGC +/* since GENCGC has its own way to record trigger */ static boolean gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context) { @@ -516,162 +686,277 @@ gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context) } #endif -/* and similarly for the control stack guard page */ +/* manipulate the signal context and stack such that when the handler + * returns, it will call function instead of whatever it was doing + * previously + */ + +#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) +int *context_eflags_addr(os_context_t *context); +#endif -boolean handle_control_stack_guard_triggered(os_context_t *context,void *addr) +extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs); +extern void post_signal_tramp(void); +void arrange_return_to_lisp_function(os_context_t *context, lispobj function) { - /* note the os_context hackery here. When the signal handler returns, - * it won't go back to what it was doing ... */ - if(addr>=CONTROL_STACK_GUARD_PAGE && - addr<(CONTROL_STACK_GUARD_PAGE+os_vm_page_size)) { - void *function; - /* we hit the end of the control stack. disable protection - * temporarily so the error handler has some headroom */ - protect_control_stack_guard_page(0); - - function= - &(((struct simple_fun *) - native_pointer(SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR))) - ->code); - - /* Build a stack frame showing `interrupted' so that the - * user's backtrace makes (as much) sense (as usual) */ - build_fake_control_stack_frames(context); - /* signal handler will "return" to this error-causing function */ - *os_context_pc_addr(context) = function; +#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) + void * fun=native_pointer(function); + void *code = &(((struct simple_fun *) fun)->code); +#endif + + /* Build a stack frame showing `interrupted' so that the + * user's backtrace makes (as much) sense (as usual) */ + + /* FIXME: what about restoring fp state? */ + /* FIXME: what about restoring errno? */ #ifdef LISP_FEATURE_X86 - /* this much of the calling convention is common to all - non-x86 ports */ - *os_context_register_addr(context,reg_ECX) = 0; + /* Suppose the existence of some function that saved all + * registers, called call_into_lisp, then restored GP registers and + * returned. It would look something like this: + + push ebp + mov ebp esp + pushfl + pushal + push $0 + push $0 + pushl {address of function to call} + call 0x8058db0 + addl $12,%esp + popal + popfl + leave + ret + + * What we do here is set up the stack that call_into_lisp would + * expect to see if it had been called by this code, and frob the + * signal context so that signal return goes directly to call_into_lisp, + * and when that function (and the lisp function it invoked) returns, + * it returns to the second half of this imaginary function which + * restores all registers and returns to C + + * For this to work, the latter part of the imaginary function + * must obviously exist in reality. That would be post_signal_tramp + */ + + u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP); + + *(sp-15) = post_signal_tramp; /* return address for call_into_lisp */ + *(sp-14) = function; /* args for call_into_lisp : function*/ + *(sp-13) = 0; /* arg array */ + *(sp-12) = 0; /* no. args */ + /* this order matches that used in POPAD */ + *(sp-11)=*os_context_register_addr(context,reg_EDI); + *(sp-10)=*os_context_register_addr(context,reg_ESI); + + *(sp-9)=*os_context_register_addr(context,reg_ESP)-8; + /* POPAD ignores the value of ESP: */ + *(sp-8)=0; + *(sp-7)=*os_context_register_addr(context,reg_EBX); + + *(sp-6)=*os_context_register_addr(context,reg_EDX); + *(sp-5)=*os_context_register_addr(context,reg_ECX); + *(sp-4)=*os_context_register_addr(context,reg_EAX); + *(sp-3)=*context_eflags_addr(context); + *(sp-2)=*os_context_register_addr(context,reg_EBP); + *(sp-1)=*os_context_pc_addr(context); + +#elif defined(LISP_FEATURE_X86_64) + u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP); + *(sp-20) = post_signal_tramp; /* return address for call_into_lisp */ + + *(sp-19)=*os_context_register_addr(context,reg_R15); + *(sp-18)=*os_context_register_addr(context,reg_R14); + *(sp-17)=*os_context_register_addr(context,reg_R13); + *(sp-16)=*os_context_register_addr(context,reg_R12); + *(sp-15)=*os_context_register_addr(context,reg_R11); + *(sp-14)=*os_context_register_addr(context,reg_R10); + *(sp-13)=*os_context_register_addr(context,reg_R9); + *(sp-12)=*os_context_register_addr(context,reg_R8); + *(sp-11)=*os_context_register_addr(context,reg_RDI); + *(sp-10)=*os_context_register_addr(context,reg_RSI); + *(sp-9)=*os_context_register_addr(context,reg_RSP)-16; + *(sp-8)=0; + *(sp-7)=*os_context_register_addr(context,reg_RBX); + *(sp-6)=*os_context_register_addr(context,reg_RDX); + *(sp-5)=*os_context_register_addr(context,reg_RCX); + *(sp-4)=*os_context_register_addr(context,reg_RAX); + *(sp-3)=*context_eflags_addr(context); + *(sp-2)=*os_context_register_addr(context,reg_RBP); + *(sp-1)=*os_context_pc_addr(context); + + *os_context_register_addr(context,reg_RDI) = function; /* function */ + *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */ + *os_context_register_addr(context,reg_RDX) = 0; /* no. args */ +#else + struct thread *th=arch_os_get_current_thread(); + build_fake_control_stack_frames(th,context); +#endif + +#ifdef LISP_FEATURE_X86 + *os_context_pc_addr(context) = call_into_lisp; + *os_context_register_addr(context,reg_ECX) = 0; + *os_context_register_addr(context,reg_EBP) = sp-2; +#ifdef __NetBSD__ + *os_context_register_addr(context,reg_UESP) = sp-15; +#else + *os_context_register_addr(context,reg_ESP) = sp-15; +#endif +#elif defined(LISP_FEATURE_X86_64) + *os_context_pc_addr(context) = call_into_lisp; + *os_context_register_addr(context,reg_RCX) = 0; + *os_context_register_addr(context,reg_RBP) = sp-2; + *os_context_register_addr(context,reg_RSP) = sp-20; #else - *os_context_register_addr(context,reg_NARGS) = 0; - *os_context_register_addr(context,reg_LIP) = function; - *os_context_register_addr(context,reg_CFP) = - current_control_frame_pointer; + /* this much of the calling convention is common to all + non-x86 ports */ + *os_context_pc_addr(context) = code; + *os_context_register_addr(context,reg_NARGS) = 0; + *os_context_register_addr(context,reg_LIP) = code; + *os_context_register_addr(context,reg_CFP) = + current_control_frame_pointer; #endif #ifdef ARCH_HAS_NPC_REGISTER - *os_context_npc_addr(context) = - 4 + *os_context_pc_addr(context); + *os_context_npc_addr(context) = + 4 + *os_context_pc_addr(context); #endif #ifdef LISP_FEATURE_SPARC - /* Bletch. This is a feature of the SPARC calling convention, - which sadly I'm not going to go into in large detail here, - as I don't know it well enough. Suffice to say that if the - line + *os_context_register_addr(context,reg_CODE) = + fun + FUN_POINTER_LOWTAG; +#endif +} - (INST MOVE CODE-TN FUNCTION) +#ifdef LISP_FEATURE_SB_THREAD +void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) +{ + os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); + /* The order of interrupt execution is peculiar. If thread A + * interrupts thread B with I1, I2 and B for some reason recieves + * I1 when FUN2 is already on the list, then it is FUN2 that gets + * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is + * enabled again and I2 hits pretty soon in FUN2 and run + * FUN1. This is of course just one scenario, and the order of + * thread interrupt execution is undefined. */ + struct thread *th=arch_os_get_current_thread(); + struct cons *c; + get_spinlock(&th->interrupt_fun_lock,(long)th); + c=((struct cons *)native_pointer(th->interrupt_fun)); + arrange_return_to_lisp_function(context,c->car); + th->interrupt_fun=(lispobj *)(c->cdr); + release_spinlock(&th->interrupt_fun_lock); +} - in compiler/sparc/call.lisp is changed, then this bit can - probably go away. -- CSR, 2002-07-24 */ - *os_context_register_addr(context,reg_CODE) = - function - SIMPLE_FUN_CODE_OFFSET; #endif + +/* KLUDGE: Theoretically the approach we use for undefined alien + * variables should work for functions as well, but on PPC/Darwin + * we get bus error at bogus addresses instead, hence this workaround, + * that has the added benefit of automatically discriminating between + * functions and variables. + */ +void undefined_alien_function() { + funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR)); +} + +boolean handle_guard_page_triggered(os_context_t *context,void *addr){ + struct thread *th=arch_os_get_current_thread(); + + /* note the os_context hackery here. When the signal handler returns, + * it won't go back to what it was doing ... */ + if(addr >= CONTROL_STACK_GUARD_PAGE(th) && + addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) { + /* We hit the end of the control stack: disable guard page + * protection so the error handler has some headroom, protect the + * previous page so that we can catch returns from the guard page + * and restore it. */ + protect_control_stack_guard_page(th->os_thread,0); + protect_control_stack_return_guard_page(th->os_thread,1); + + arrange_return_to_lisp_function + (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + return 1; + } + else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) && + addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) { + /* We're returning from the guard page: reprotect it, and + * unprotect this one. This works even if we somehow missed + * the return-guard-page, and hit it on our way to new + * exhaustion instead. */ + protect_control_stack_guard_page(th->os_thread,1); + protect_control_stack_return_guard_page(th->os_thread,0); + return 1; + } + else if (addr >= undefined_alien_address && + addr < undefined_alien_address + os_vm_page_size) { + arrange_return_to_lisp_function + (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR)); return 1; } else return 0; } -#ifndef __i386__ -/* This function gets called from the SIGSEGV (for e.g. Linux or +#ifndef LISP_FEATURE_GENCGC +/* This function gets called from the SIGSEGV (for e.g. Linux, NetBSD, & * OpenBSD) or SIGBUS (for e.g. FreeBSD) handler. Here we check * whether the signal was due to treading on the mprotect()ed zone - * and if so, arrange for a GC to happen. */ +extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ + boolean interrupt_maybe_gc(int signal, siginfo_t *info, void *void_context) { os_context_t *context=(os_context_t *) void_context; - - if (!foreign_function_call_active -#ifndef GENCGC /* since GENCGC has its own way to record trigger */ - && gc_trigger_hit(signal, info, context) -#endif - ) { -#ifndef GENCGC /* since GENCGC has its own way to record trigger */ - clear_auto_gc_trigger(); -#endif - - if (arch_pseudo_atomic_atomic(context)) { - /* don't GC during an atomic operation. Instead, copy the - * signal mask somewhere safe. interrupt_handle_pending - * will detect pending_signal==0 and know to do a GC with the - * signal context instead of calling a Lisp-level handler */ - maybe_gc_pending = 1; - if (pending_signal == 0) { - /* FIXME: This copy-pending_mask-then-sigaddset_blockable - * idiom occurs over and over. It should be factored out - * into a function with a descriptive name. */ - memcpy(&pending_mask, - os_context_sigmask_addr(context), - sizeof(sigset_t)); - sigaddset_blockable(os_context_sigmask_addr(context)); - } - arch_set_pseudo_atomic_interrupted(context); - } - else { - lispobj *old_free_space=current_dynamic_space; - fake_foreign_function_call(context); - funcall0(SymbolFunction(MAYBE_GC)); - undo_fake_foreign_function_call(context); - if(current_dynamic_space==old_free_space) - /* MAYBE-GC (as the name suggest) might not. If it - * doesn't, it won't reset the GC trigger either, so we - * have to do it ourselves. Put it near the end of - * dynamic space so we're not running into it continually - */ - set_auto_gc_trigger(DYNAMIC_SPACE_SIZE - -(u32)os_vm_page_size); - } - return 1; - } else { - return 0; + struct thread *th=arch_os_get_current_thread(); + struct interrupt_data *data= + th ? th->interrupt_data : global_interrupt_data; + + if(!data->pending_handler && !foreign_function_call_active && + gc_trigger_hit(signal, info, context)){ + clear_auto_gc_trigger(); + if(!maybe_defer_handler(interrupt_maybe_gc_int, + data,signal,info,void_context)) + interrupt_maybe_gc_int(signal,info,void_context); + return 1; } + return 0; } + #endif + +/* this is also used by gencgc, in alloc() */ +boolean +interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) +{ + os_context_t *context=(os_context_t *) void_context; + + check_blockables_blocked_or_lose(); + fake_foreign_function_call(context); + + /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in + * which case we will be running with no gc trigger barrier + * thing for a while. But it shouldn't be long until the end + * of WITHOUT-GCING. + * + * FIXME: It would be good to protect the end of dynamic space + * and signal a storage condition from there. + */ + + /* restore the signal mask from the interrupted context before + * calling into Lisp */ + if (context) + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + + funcall0(SymbolFunction(SUB_GC)); + + undo_fake_foreign_function_call(context); + return 1; +} + /* * noise to install handlers */ -/* - * what low-level signal handlers looked like before - * undoably_install_low_level_interrupt_handler() got involved - */ -struct low_level_signal_handler_state { - int was_modified; - void (*handler)(int, siginfo_t*, void*); -} old_low_level_signal_handler_states[NSIG]; - -void -uninstall_low_level_interrupt_handlers_atexit(void) -{ - int signal; - for (signal = 0; signal < NSIG; ++signal) { - struct low_level_signal_handler_state - *old_low_level_signal_handler_state = - old_low_level_signal_handler_states + signal; - if (old_low_level_signal_handler_state->was_modified) { - struct sigaction sa; - sa.sa_sigaction = old_low_level_signal_handler_state->handler; - sigemptyset(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO | SA_RESTART; - sigaction(signal, &sa, NULL); - } - } -} - -/* Undoably install a special low-level handler for signal; or if - * handler is SIG_DFL, remove any special handling for signal. - * - * The "undoably" aspect is because we also arrange with atexit() for - * the handler to be restored to its old value. This is for tidiness: - * it shouldn't matter much ordinarily, but it does remove a window - * where e.g. memory fault signals (SIGSEGV or SIGBUS, which in - * ordinary operation of SBCL are sent to the generational garbage - * collector, then possibly onward to Lisp code) or SIGINT (which is - * ordinarily passed to Lisp code) could otherwise be handled - * bizarrely/brokenly because the Lisp code would try to deal with - * them using machinery (like stream output buffers) which has already - * been dismantled. */ void undoably_install_low_level_interrupt_handler (int signal, void handler(int, @@ -679,43 +964,33 @@ undoably_install_low_level_interrupt_handler (int signal, void*)) { struct sigaction sa; - struct low_level_signal_handler_state *old_low_level_signal_handler_state = - old_low_level_signal_handler_states + signal; + struct thread *th=arch_os_get_current_thread(); + struct interrupt_data *data= + th ? th->interrupt_data : global_interrupt_data; if (0 > signal || signal >= NSIG) { lose("bad signal number %d", signal); } - sa.sa_sigaction = handler; + if (sigismember(&blockable_sigset,signal)) + sa.sa_sigaction = low_level_maybe_now_maybe_later; + else + sa.sa_sigaction = handler; + sigemptyset(&sa.sa_mask); sigaddset_blockable(&sa.sa_mask); sa.sa_flags = SA_SIGINFO | SA_RESTART; #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK - /* Signal handlers are run on the control stack, so if it is exhausted - * we had better use an alternate stack for whatever signal tells us - * we've exhausted it */ - if(signal==SIG_MEMORY_FAULT) { - stack_t sigstack; - sigstack.ss_sp=(void *) ALTERNATE_SIGNAL_STACK_START; - sigstack.ss_flags=0; - sigstack.ss_size = SIGSTKSZ; - sigaltstack(&sigstack,0); - sa.sa_flags|=SA_ONSTACK; - } + if((signal==SIG_MEMORY_FAULT) +#ifdef SIG_INTERRUPT_THREAD + || (signal==SIG_INTERRUPT_THREAD) +#endif + ) + sa.sa_flags|= SA_ONSTACK; #endif - /* In the case of interrupt handlers which are modified more than - * once, we only save the original unmodified copy. */ - if (!old_low_level_signal_handler_state->was_modified) { - struct sigaction *old_handler = - (struct sigaction*) &old_low_level_signal_handler_state->handler; - old_low_level_signal_handler_state->was_modified = 1; - sigaction(signal, &sa, old_handler); - } else { - sigaction(signal, &sa, NULL); - } - - interrupt_low_level_handlers[signal] = + sigaction(signal, &sa, NULL); + data->interrupt_low_level_handlers[signal] = (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler); } @@ -726,19 +1001,22 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) struct sigaction sa; sigset_t old, new; union interrupt_handler oldhandler; + struct thread *th=arch_os_get_current_thread(); + struct interrupt_data *data= + th ? th->interrupt_data : global_interrupt_data; FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal)); sigemptyset(&new); sigaddset(&new, signal); - sigprocmask(SIG_BLOCK, &new, &old); + thread_sigmask(SIG_BLOCK, &new, &old); sigemptyset(&new); sigaddset_blockable(&new); - FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%d\n", - interrupt_low_level_handlers[signal])); - if (interrupt_low_level_handlers[signal]==0) { + FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n", + (unsigned int)data->interrupt_low_level_handlers[signal])); + if (data->interrupt_low_level_handlers[signal]==0) { if (ARE_SAME_HANDLER(handler, SIG_DFL) || ARE_SAME_HANDLER(handler, SIG_IGN)) { sa.sa_sigaction = handler; @@ -751,14 +1029,13 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) sigemptyset(&sa.sa_mask); sigaddset_blockable(&sa.sa_mask); sa.sa_flags = SA_SIGINFO | SA_RESTART; - sigaction(signal, &sa, NULL); } - oldhandler = interrupt_handlers[signal]; - interrupt_handlers[signal].c = handler; + oldhandler = data->interrupt_handlers[signal]; + data->interrupt_handlers[signal].c = handler; - sigprocmask(SIG_SETMASK, &old, 0); + thread_sigmask(SIG_SETMASK, &old, 0); FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal)); @@ -766,18 +1043,18 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) } void -interrupt_init(void) +interrupt_init() { int i; - SHOW("entering interrupt_init()"); - - /* Set up for recovery from any installed low-level handlers. */ - atexit(&uninstall_low_level_interrupt_handlers_atexit); + sigemptyset(&blockable_sigset); + sigaddset_blockable(&blockable_sigset); + + global_interrupt_data=calloc(sizeof(struct interrupt_data), 1); /* Set up high level handler information. */ for (i = 0; i < NSIG; i++) { - interrupt_handlers[i].c = + global_interrupt_data->interrupt_handlers[i].c = /* (The cast here blasts away the distinction between * SA_SIGACTION-style three-argument handlers and * signal(..)-style one-argument handlers, which is OK