X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Finterrupt.c;h=584cee2d21a3daa99f01988b3750a0e137aae0b8;hb=f815f89eb5b1a7d5e6fefaf5b19321d8870931f9;hp=79275ef2854d40b59b60d41235cb9e1af8f6ca8c;hpb=c0d30332957c6f0cab8c0a3670f2903546fc0ad8;p=sbcl.git diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index 79275ef..584cee2 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -40,16 +40,18 @@ * * - WHN 20000728, dan 20010128 */ +#include "sbcl.h" #include #include #include #include #include +#ifndef LISP_FEATURE_WIN32 #include +#endif #include -#include "sbcl.h" #include "runtime.h" #include "arch.h" #include "os.h" @@ -57,7 +59,6 @@ #include "globals.h" #include "lispregs.h" #include "validate.h" -#include "monitor.h" #include "gc.h" #include "alloc.h" #include "dynbind.h" @@ -66,16 +67,15 @@ #include "genesis/simple-fun.h" #include "genesis/cons.h" - - -void run_deferred_handler(struct interrupt_data *data, void *v_context) ; +static void run_deferred_handler(struct interrupt_data *data, void *v_context); +#ifndef LISP_FEATURE_WIN32 static void store_signal_data_for_later (struct interrupt_data *data, void *handler, int signal, siginfo_t *info, os_context_t *context); -boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context); -void sigaddset_deferrable(sigset_t *s) +void +sigaddset_deferrable(sigset_t *s) { sigaddset(s, SIGHUP); sigaddset(s, SIGINT); @@ -83,26 +83,35 @@ void sigaddset_deferrable(sigset_t *s) sigaddset(s, SIGPIPE); sigaddset(s, SIGALRM); sigaddset(s, SIGURG); - sigaddset(s, SIGFPE); sigaddset(s, SIGTSTP); sigaddset(s, SIGCHLD); sigaddset(s, SIGIO); +#ifndef LISP_FEATURE_HPUX sigaddset(s, SIGXCPU); sigaddset(s, SIGXFSZ); +#endif sigaddset(s, SIGVTALRM); sigaddset(s, SIGPROF); sigaddset(s, SIGWINCH); + +#if !((defined(LISP_FEATURE_DARWIN) || defined(LISP_FEATURE_FREEBSD)) && defined(LISP_FEATURE_SB_THREAD)) sigaddset(s, SIGUSR1); sigaddset(s, SIGUSR2); +#endif + #ifdef LISP_FEATURE_SB_THREAD sigaddset(s, SIG_INTERRUPT_THREAD); #endif } -void sigaddset_blockable(sigset_t *s) +void +sigaddset_blockable(sigset_t *s) { sigaddset_deferrable(s); #ifdef LISP_FEATURE_SB_THREAD +#ifdef SIG_RESUME_FROM_GC + sigaddset(s, SIG_RESUME_FROM_GC); +#endif sigaddset(s, SIG_STOP_FOR_GC); #endif } @@ -110,31 +119,46 @@ void sigaddset_blockable(sigset_t *s) /* initialized in interrupt_init */ static sigset_t deferrable_sigset; static sigset_t blockable_sigset; +#endif -inline static void check_blockables_blocked_or_lose() +void +check_blockables_blocked_or_lose(void) { +#if !defined(LISP_FEATURE_WIN32) /* Get the current sigmask, by blocking the empty set. */ sigset_t empty,current; int i; sigemptyset(&empty); thread_sigmask(SIG_BLOCK, &empty, ¤t); - for(i=0;i= MAX_INTERRUPTS) { - lose("maximum interrupt nesting depth (%d) exceeded", MAX_INTERRUPTS); + lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS); } bind_variable(FREE_INTERRUPT_CONTEXT_INDEX, @@ -261,14 +313,14 @@ fake_foreign_function_call(os_context_t *context) thread->interrupt_contexts[context_index] = context; - /* no longer in Lisp now */ +#ifdef FOREIGN_FUNCTION_CALL_FLAG foreign_function_call_active = 1; +#endif } /* blocks all blockable signals. If you are calling from a signal handler, * the usual signal mask will be restored from the context when the handler * finishes. Otherwise, be careful */ - void undo_fake_foreign_function_call(os_context_t *context) { @@ -276,8 +328,9 @@ undo_fake_foreign_function_call(os_context_t *context) /* Block all blockable signals. */ block_blockable_signals(); - /* going back into Lisp */ +#ifdef FOREIGN_FUNCTION_CALL_FLAG foreign_function_call_active = 0; +#endif /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */ unbind(thread); @@ -285,69 +338,98 @@ undo_fake_foreign_function_call(os_context_t *context) #ifdef reg_ALLOC /* Put the dynamic space free pointer back into the context. */ *os_context_register_addr(context, reg_ALLOC) = - (unsigned long) dynamic_space_free_pointer; + (unsigned long) dynamic_space_free_pointer + | (*os_context_register_addr(context, reg_ALLOC) + & LOWTAG_MASK); + /* + ((unsigned long)(*os_context_register_addr(context, reg_ALLOC)) + & ~LOWTAG_MASK) + | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK); + */ #endif } /* a handler for the signal caused by execution of a trap opcode * signalling an internal error */ void -interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context, - boolean continuable) +interrupt_internal_error(os_context_t *context, boolean continuable) { - lispobj context_sap = 0; + lispobj context_sap; - check_blockables_blocked_or_lose(); fake_foreign_function_call(context); + if (!internal_errors_enabled) { + describe_internal_error(context); + /* There's no good way to recover from an internal error + * before the Lisp error handling mechanism is set up. */ + lose("internal error too early in init, can't recover\n"); + } + /* Allocate the SAP object while the interrupts are still * disabled. */ - if (internal_errors_enabled) { - context_sap = alloc_sap(context); - } + context_sap = alloc_sap(context); +#ifndef LISP_FEATURE_WIN32 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); +#endif - if (internal_errors_enabled) { - SHOW("in interrupt_internal_error"); + SHOW("in interrupt_internal_error"); #ifdef QSHOW - /* Display some rudimentary debugging information about the - * error, so that even if the Lisp error handler gets badly - * confused, we have a chance to determine what's going on. */ - describe_internal_error(context); + /* Display some rudimentary debugging information about the + * error, so that even if the Lisp error handler gets badly + * confused, we have a chance to determine what's going on. */ + describe_internal_error(context); #endif - funcall2(SymbolFunction(INTERNAL_ERROR), context_sap, - continuable ? T : NIL); - } else { - describe_internal_error(context); - /* There's no good way to recover from an internal error - * before the Lisp error handling mechanism is set up. */ - lose("internal error too early in init, can't recover"); - } + funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap, + continuable ? T : NIL); + undo_fake_foreign_function_call(context); /* blocks signals again */ - if (continuable) { + if (continuable) arch_skip_instruction(context); - } } void interrupt_handle_pending(os_context_t *context) { + /* There are three ways we can get here. First, if an interrupt + * occurs within pseudo-atomic, it will be deferred, and we'll + * trap to here at the end of the pseudo-atomic block. Second, if + * the GC (in alloc()) decides that a GC is required, it will set + * *GC-PENDING* and pseudo-atomic-interrupted, and alloc() is + * always called from within pseudo-atomic, and thus we end up + * here again. Third, when calling GC-ON or at the end of a + * WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to here if + * there is a pending GC. */ + + /* Win32 only needs to handle the GC cases (for now?) */ + struct thread *thread; - struct interrupt_data *data; + + /* Punt if in PA section, marking it as interrupted. This can + * happenat least if we pick up a GC request while in a + * WITHOUT-GCING with an outer PA -- it is not immediately clear + * to me that this should/could ever happen, but better safe then + * sorry. --NS 2007-05-15 */ + if (arch_pseudo_atomic_atomic(context)) { + arch_set_pseudo_atomic_interrupted(context); + return; + } + + thread = arch_os_get_current_thread(); + + FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n")); check_blockables_blocked_or_lose(); - thread=arch_os_get_current_thread(); - data=thread->interrupt_data; + /* If pseudo_atomic_interrupted is set then the interrupt is going + * to be handled now, ergo it's safe to clear it. */ + arch_clear_pseudo_atomic_interrupted(context); if (SymbolValue(GC_INHIBIT,thread)==NIL) { #ifdef LISP_FEATURE_SB_THREAD if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) { - /* another thread has already initiated a gc, this attempt - * might as well be cancelled */ - SetSymbolValue(GC_PENDING,NIL,thread); - SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread); + /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by + * the signal handler if it actually stops us. */ sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context); } else #endif @@ -355,25 +437,22 @@ interrupt_handle_pending(os_context_t *context) /* GC_PENDING is cleared in SUB-GC, or if another thread * is doing a gc already we will get a SIG_STOP_FOR_GC and * that will clear it. */ - interrupt_maybe_gc_int(0,NULL,context); + maybe_gc(context); } check_blockables_blocked_or_lose(); } +#ifndef LISP_FEATURE_WIN32 /* we may be here only to do the gc stuff, if interrupts are * enabled run the pending handler */ - if (!((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) || - ( -#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) - (!foreign_function_call_active) && -#endif - arch_pseudo_atomic_atomic(context)))) { + if (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) { + struct interrupt_data *data = thread->interrupt_data; /* There may be no pending handler, because it was only a gc * that had to be executed or because pseudo atomic triggered * twice for a single interrupt. For the interested reader, * that may happen if an interrupt hits after the interrupted - * flag is cleared but before pseduo-atomic is set and a + * flag is cleared but before pseudo-atomic is set and a * pseudo atomic is interrupted in that interrupt. */ if (data->pending_handler) { @@ -381,7 +460,7 @@ interrupt_handle_pending(os_context_t *context) * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already * NIL, because maybe_defer_handler sets * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/ - SetSymbolValue(INTERRUPT_PENDING, NIL,thread); + SetSymbolValue(INTERRUPT_PENDING, NIL, thread); /* restore the saved signal mask from the original signal (the * one that interrupted us during the critical section) into the @@ -396,6 +475,7 @@ interrupt_handle_pending(os_context_t *context) run_deferred_handler(data,(void *)context); } } +#endif } /* @@ -414,30 +494,34 @@ interrupt_handle_pending(os_context_t *context) */ void -interrupt_handle_now(int signal, siginfo_t *info, void *void_context) +interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context) { - os_context_t *context = (os_context_t*)void_context; - struct thread *thread=arch_os_get_current_thread(); -#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) +#ifdef FOREIGN_FUNCTION_CALL_FLAG boolean were_in_lisp; #endif union interrupt_handler handler; + check_blockables_blocked_or_lose(); - check_interrupts_enabled_or_lose(context); -#ifdef LISP_FEATURE_LINUX +#ifndef LISP_FEATURE_WIN32 + if (sigismember(&deferrable_sigset,signal)) + check_interrupts_enabled_or_lose(context); +#endif + +#if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT) /* Under Linux on some architectures, we appear to have to restore the FPU control word from the context, as after the signal is delivered we appear to have a null FPU control word. */ os_restore_fp_control(context); #endif - handler = thread->interrupt_data->interrupt_handlers[signal]; + + handler = interrupt_handlers[signal]; if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) { return; } -#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) +#ifdef FOREIGN_FUNCTION_CALL_FLAG were_in_lisp = !foreign_function_call_active; if (were_in_lisp) #endif @@ -454,7 +538,7 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) /* This can happen if someone tries to ignore or default one * of the signals we need for runtime support, and the runtime * support decides to pass on it. */ - lose("no handler for signal %d in interrupt_handle_now(..)", signal); + lose("no handler for signal %d in interrupt_handle_now(..)\n", signal); } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) { /* Once we've decided what to do about contexts in a @@ -467,12 +551,26 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) * because we're not in pseudoatomic and allocation shouldn't * be interrupted. In which case it's no longer an issue as * all our allocation from C now goes through a PA wrapper, - * but still, doesn't hurt */ + * but still, doesn't hurt. + * + * Yeah, but non-gencgc platforms don't really wrap allocation + * in PA. MG - 2005-08-29 */ lispobj info_sap,context_sap = alloc_sap(context); info_sap = alloc_sap(info); - /* Allow signals again. */ - thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); + /* Leave deferrable signals blocked, the handler itself will + * allow signals again when it sees fit. */ +#ifdef LISP_FEATURE_SB_THREAD + { + sigset_t unblock; + sigemptyset(&unblock); + sigaddset(&unblock, SIG_STOP_FOR_GC); +#ifdef SIG_RESUME_FROM_GC + sigaddset(&unblock, SIG_RESUME_FROM_GC); +#endif + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + } +#endif FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n")); @@ -484,13 +582,14 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) FSHOW_SIGNAL((stderr,"/calling C-level handler\n")); +#ifndef LISP_FEATURE_WIN32 /* Allow signals again. */ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); - - (*handler.c)(signal, info, void_context); +#endif + (*handler.c)(signal, info, context); } -#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) +#ifdef FOREIGN_FUNCTION_CALL_FLAG if (were_in_lisp) #endif { @@ -507,18 +606,20 @@ interrupt_handle_now(int signal, siginfo_t *info, void *void_context) * far as C or the kernel is concerned we dealt with the signal * already; we're just doing the Lisp-level processing now that we * put off then */ - -void -run_deferred_handler(struct interrupt_data *data, void *v_context) { +static void +run_deferred_handler(struct interrupt_data *data, void *v_context) +{ /* The pending_handler may enable interrupts and then another * interrupt may hit, overwrite interrupt_data, so reset the * pending handler before calling it. Trust the handler to finish * with the siginfo before enabling interrupts. */ void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler; + data->pending_handler=0; (*pending_handler)(data->pending_signal,&(data->pending_info), v_context); } +#ifndef LISP_FEATURE_WIN32 boolean maybe_defer_handler(void *handler, struct interrupt_data *data, int signal, siginfo_t *info, os_context_t *context) @@ -528,7 +629,7 @@ maybe_defer_handler(void *handler, struct interrupt_data *data, check_blockables_blocked_or_lose(); if (SymbolValue(INTERRUPT_PENDING,thread) != NIL) - lose("interrupt already pending"); + lose("interrupt already pending\n"); /* If interrupts are disabled then INTERRUPT_PENDING is set and * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo * atomic section inside a WITHOUT-INTERRUPTS. @@ -542,20 +643,10 @@ maybe_defer_handler(void *handler, struct interrupt_data *data, (unsigned long)thread->os_thread)); return 1; } - /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't + /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't * actually use its argument for anything on x86, so this branch * may succeed even when context is null (gencgc alloc()) */ - if ( -#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) - /* FIXME: this foreign_function_call_active test is dubious at - * best. If a foreign call is made in a pseudo atomic section - * (?) or more likely a pseudo atomic section is in a foreign - * call then an interrupt is executed immediately. Maybe it - * has to do with C code not maintaining pseudo atomic - * properly. MG - 2005-08-10 */ - (!foreign_function_call_active) && -#endif - arch_pseudo_atomic_atomic(context)) { + if (arch_pseudo_atomic_atomic(context)) { store_signal_data_for_later(data,handler,signal,info,context); arch_set_pseudo_atomic_interrupted(context); FSHOW_SIGNAL((stderr, @@ -585,6 +676,10 @@ store_signal_data_for_later (struct interrupt_data *data, void *handler, data->pending_signal = signal; if(info) memcpy(&(data->pending_info), info, sizeof(siginfo_t)); + + FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n", + signal)); + if(context) { /* the signal mask in the context (from before we were * interrupted) is copied to be restored when @@ -600,58 +695,44 @@ static void maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); - struct thread *thread=arch_os_get_current_thread(); - struct interrupt_data *data=thread->interrupt_data; -#ifdef LISP_FEATURE_LINUX + struct thread *thread = arch_os_get_current_thread(); + struct interrupt_data *data = thread->interrupt_data; + +#if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT) os_restore_fp_control(context); #endif - if(maybe_defer_handler(interrupt_handle_now,data,signal,info,context)) - return; - interrupt_handle_now(signal, info, context); -#ifdef LISP_FEATURE_DARWIN - /* Work around G5 bug */ - DARWIN_FIX_CONTEXT(context); -#endif + + if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context)) + interrupt_handle_now(signal, info, context); } static void -low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context) +low_level_interrupt_handle_now(int signal, siginfo_t *info, + os_context_t *context) { - os_context_t *context = (os_context_t*)void_context; - struct thread *thread=arch_os_get_current_thread(); - struct interrupt_data *data=thread->interrupt_data; - -#ifdef LISP_FEATURE_LINUX - os_restore_fp_control(context); -#endif + /* No FP control fixage needed, caller has done that. */ check_blockables_blocked_or_lose(); check_interrupts_enabled_or_lose(context); - (*data->interrupt_low_level_handlers[signal]) - (signal, info, void_context); -#ifdef LISP_FEATURE_DARWIN - /* Work around G5 bug */ - DARWIN_FIX_CONTEXT(context); -#endif + interrupt_low_level_handlers[signal](signal, info, context); + /* No Darwin context fixage needed, caller does that. */ } static void low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); - struct thread *thread=arch_os_get_current_thread(); - struct interrupt_data *data=thread->interrupt_data; -#ifdef LISP_FEATURE_LINUX + struct thread *thread = arch_os_get_current_thread(); + struct interrupt_data *data = thread->interrupt_data; + +#if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT) os_restore_fp_control(context); #endif - if(maybe_defer_handler(low_level_interrupt_handle_now,data, - signal,info,context)) - return; - low_level_interrupt_handle_now(signal, info, context); -#ifdef LISP_FEATURE_DARWIN - /* Work around G5 bug */ - DARWIN_FIX_CONTEXT(context); -#endif + + if(!maybe_defer_handler(low_level_interrupt_handle_now,data, + signal,info,context)) + low_level_interrupt_handle_now(signal, info, context); } +#endif #ifdef LISP_FEATURE_SB_THREAD @@ -659,46 +740,72 @@ void sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); + struct thread *thread=arch_os_get_current_thread(); sigset_t ss; - int i; - if ((arch_pseudo_atomic_atomic(context) || - SymbolValue(GC_INHIBIT,thread) != NIL)) { + if (arch_pseudo_atomic_atomic(context)) { SetSymbolValue(STOP_FOR_GC_PENDING,T,thread); - if (SymbolValue(GC_INHIBIT,thread) == NIL) - arch_set_pseudo_atomic_interrupted(context); - FSHOW_SIGNAL((stderr,"thread=%lu sig_stop_for_gc deferred\n", + arch_set_pseudo_atomic_interrupted(context); + FSHOW_SIGNAL((stderr,"thread=%lu sig_stop_for_gc deferred (PA)\n", thread->os_thread)); - } else { - /* need the context stored so it can have registers scavenged */ - fake_foreign_function_call(context); + return; + } + else if (SymbolValue(GC_INHIBIT,thread) != NIL) { + SetSymbolValue(STOP_FOR_GC_PENDING,T,thread); + FSHOW_SIGNAL((stderr, + "thread=%lu sig_stop_for_gc deferred (*GC-INHIBIT*)\n", + thread->os_thread)); + return; + } - sigemptyset(&ss); - for(i=1;istate!=STATE_RUNNING) { - lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", - fixnum_value(thread->state)); - } - thread->state=STATE_SUSPENDED; - FSHOW_SIGNAL((stderr,"thread=%lu suspended\n",thread->os_thread)); - - sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC); - sigwaitinfo(&ss,0); - FSHOW_SIGNAL((stderr,"thread=%lu resumed\n",thread->os_thread)); - if(thread->state!=STATE_RUNNING) { - lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", - fixnum_value(thread->state)); - } + /* Not PA and GC not inhibited -- we can stop now. */ + + /* need the context stored so it can have registers scavenged */ + fake_foreign_function_call(context); - undo_fake_foreign_function_call(context); + /* Block everything. */ + sigfillset(&ss); + thread_sigmask(SIG_BLOCK,&ss,0); + + /* Not pending anymore. */ + SetSymbolValue(GC_PENDING,NIL,thread); + SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread); + + if(thread->state!=STATE_RUNNING) { + lose("sig_stop_for_gc_handler: wrong thread state: %ld\n", + fixnum_value(thread->state)); } + + thread->state=STATE_SUSPENDED; + FSHOW_SIGNAL((stderr,"thread=%lu suspended\n",thread->os_thread)); + + sigemptyset(&ss); +#if defined(SIG_RESUME_FROM_GC) + sigaddset(&ss,SIG_RESUME_FROM_GC); +#else + sigaddset(&ss,SIG_STOP_FOR_GC); +#endif + + /* It is possible to get SIGCONT (and probably other non-blockable + * signals) here. */ +#ifdef SIG_RESUME_FROM_GC + { + int sigret; + do { sigwait(&ss, &sigret); } + while (sigret != SIG_RESUME_FROM_GC); + } +#else + while (sigwaitinfo(&ss,0) != SIG_STOP_FOR_GC); +#endif + + FSHOW_SIGNAL((stderr,"thread=%lu resumed\n",thread->os_thread)); + if(thread->state!=STATE_RUNNING) { + lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n", + fixnum_value(thread->state)); + } + + undo_fake_foreign_function_call(context); } #endif @@ -706,43 +813,26 @@ void interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context) { os_context_t *context = arch_os_get_context(&void_context); - interrupt_handle_now(signal, info, context); -#ifdef LISP_FEATURE_DARWIN - DARWIN_FIX_CONTEXT(context); +#if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT) + os_restore_fp_control(context); #endif + interrupt_handle_now(signal, info, context); } -/* - * stuff to detect and handle hitting the GC trigger - */ - -#ifndef LISP_FEATURE_GENCGC -/* since GENCGC has its own way to record trigger */ -static boolean -gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context) -{ - if (current_auto_gc_trigger == NULL) - return 0; - else{ - void *badaddr=arch_get_bad_addr(signal,info,context); - return (badaddr >= (void *)current_auto_gc_trigger && - badaddr <((void *)current_dynamic_space + DYNAMIC_SPACE_SIZE)); - } -} -#endif - /* manipulate the signal context and stack such that when the handler * returns, it will call function instead of whatever it was doing * previously */ #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) -int *context_eflags_addr(os_context_t *context); +extern int *context_eflags_addr(os_context_t *context); #endif extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs); extern void post_signal_tramp(void); -void arrange_return_to_lisp_function(os_context_t *context, lispobj function) +extern void call_into_lisp_tramp(void); +void +arrange_return_to_lisp_function(os_context_t *context, lispobj function) { #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) void * fun=native_pointer(function); @@ -786,6 +876,37 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP); +#if defined(LISP_FEATURE_DARWIN) + u32 *register_save_area = (u32 *)os_validate(0, 0x40); + + FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp)); + FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context)); + + /* 1. os_validate (malloc/mmap) register_save_block + * 2. copy register state into register_save_block + * 3. put a pointer to register_save_block in a register in the context + * 4. set the context's EIP to point to a trampoline which: + * a. builds the fake stack frame from the block + * b. frees the block + * c. calls the function + */ + + *register_save_area = *os_context_pc_addr(context); + *(register_save_area + 1) = function; + *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI); + *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI); + *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX); + *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX); + *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX); + *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX); + *(register_save_area + 8) = *context_eflags_addr(context); + + *os_context_pc_addr(context) = + (os_context_register_t) call_into_lisp_tramp; + *os_context_register_addr(context,reg_ECX) = + (os_context_register_t) register_save_area; +#else + /* return address for call_into_lisp: */ *(sp-15) = (u32)post_signal_tramp; *(sp-14) = function; /* args for call_into_lisp : function*/ @@ -807,8 +928,11 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) *(sp-2)=*os_context_register_addr(context,reg_EBP); *(sp-1)=*os_context_pc_addr(context); +#endif + #elif defined(LISP_FEATURE_X86_64) u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP); + /* return address for call_into_lisp: */ *(sp-18) = (u64)post_signal_tramp; @@ -841,6 +965,8 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) #endif #ifdef LISP_FEATURE_X86 + +#if !defined(LISP_FEATURE_DARWIN) *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp; *os_context_register_addr(context,reg_ECX) = 0; *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2); @@ -849,7 +975,9 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) (os_context_register_t)(sp-15); #else *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15); -#endif +#endif /* __NETBSD__ */ +#endif /* LISP_FEATURE_DARWIN */ + #elif defined(LISP_FEATURE_X86_64) *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp; *os_context_register_addr(context,reg_RCX) = 0; @@ -858,11 +986,12 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) #else /* this much of the calling convention is common to all non-x86 ports */ - *os_context_pc_addr(context) = (os_context_register_t)code; + *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code; *os_context_register_addr(context,reg_NARGS) = 0; - *os_context_register_addr(context,reg_LIP) = (os_context_register_t)code; + *os_context_register_addr(context,reg_LIP) = + (os_context_register_t)(unsigned long)code; *os_context_register_addr(context,reg_CFP) = - (os_context_register_t)current_control_frame_pointer; + (os_context_register_t)(unsigned long)current_control_frame_pointer; #endif #ifdef ARCH_HAS_NPC_REGISTER *os_context_npc_addr(context) = @@ -875,30 +1004,18 @@ void arrange_return_to_lisp_function(os_context_t *context, lispobj function) } #ifdef LISP_FEATURE_SB_THREAD -void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) + +/* FIXME: this function can go away when all lisp handlers are invoked + * via arrange_return_to_lisp_function. */ +void +interrupt_thread_handler(int num, siginfo_t *info, void *v_context) { os_context_t *context = (os_context_t*)arch_os_get_context(&v_context); - /* The order of interrupt execution is peculiar. If thread A - * interrupts thread B with I1, I2 and B for some reason receives - * I1 when FUN2 is already on the list, then it is FUN2 that gets - * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is - * enabled again and I2 hits pretty soon in FUN2 and run - * FUN1. This is of course just one scenario, and the order of - * thread interrupt execution is undefined. */ - struct thread *th=arch_os_get_current_thread(); - struct cons *c; - lispobj function; - if (th->state != STATE_RUNNING) - lose("interrupt_thread_handler: thread %lu in wrong state: %d\n", - th->os_thread,fixnum_value(th->state)); - get_spinlock(&th->interrupt_fun_lock,(long)th); - c=((struct cons *)native_pointer(th->interrupt_fun)); - function=c->car; - th->interrupt_fun=c->cdr; - release_spinlock(&th->interrupt_fun_lock); - if (function==NIL) - lose("interrupt_thread_handler: NIL function\n"); - arrange_return_to_lisp_function(context,function); + + /* let the handler enable interrupts again when it sees fit */ + sigaddset_deferrable(os_context_sigmask_addr(context)); + arrange_return_to_lisp_function(context, + StaticSymbolFunction(RUN_INTERRUPTION)); } #endif @@ -909,11 +1026,14 @@ void interrupt_thread_handler(int num, siginfo_t *info, void *v_context) * that has the added benefit of automatically discriminating between * functions and variables. */ -void undefined_alien_function() { - funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR)); +void +undefined_alien_function(void) +{ + funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR)); } -boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) +boolean +handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) { struct thread *th=arch_os_get_current_thread(); @@ -925,11 +1045,11 @@ boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) * protection so the error handler has some headroom, protect the * previous page so that we can catch returns from the guard page * and restore it. */ - protect_control_stack_guard_page(th,0); - protect_control_stack_return_guard_page(th,1); + protect_control_stack_guard_page(0); + protect_control_stack_return_guard_page(1); arrange_return_to_lisp_function - (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); + (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR)); return 1; } else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) && @@ -938,161 +1058,163 @@ boolean handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr) * unprotect this one. This works even if we somehow missed * the return-guard-page, and hit it on our way to new * exhaustion instead. */ - protect_control_stack_guard_page(th,1); - protect_control_stack_return_guard_page(th,0); + protect_control_stack_guard_page(1); + protect_control_stack_return_guard_page(0); return 1; } else if (addr >= undefined_alien_address && addr < undefined_alien_address + os_vm_page_size) { arrange_return_to_lisp_function - (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR)); + (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR)); return 1; } else return 0; } + +/* + * noise to install handlers + */ -#ifndef LISP_FEATURE_GENCGC -/* This function gets called from the SIGSEGV (for e.g. Linux, NetBSD, & - * OpenBSD) or SIGBUS (for e.g. FreeBSD) handler. Here we check - * whether the signal was due to treading on the mprotect()ed zone - - * and if so, arrange for a GC to happen. */ -extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ +#ifndef LISP_FEATURE_WIN32 +/* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if + * they are blocked, in Linux 2.6 the default handler is invoked + * instead that usually coredumps. One might hastily think that adding + * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then + * the whole sa_mask is ignored and instead of not adding the signal + * in question to the mask. That means if it's not blockable the + * signal must be unblocked at the beginning of signal handlers. + * + * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different + * way: if SA_NODEFER is set and the signal is in sa_mask, the signal + * will be unblocked in the sigmask during the signal handler. -- RMK + * X-mas day, 2005 + */ +static volatile int sigaction_nodefer_works = -1; -boolean -interrupt_maybe_gc(int signal, siginfo_t *info, void *void_context) +#define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT +#define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1 + +static void +sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context) { - os_context_t *context=(os_context_t *) void_context; - struct thread *th=arch_os_get_current_thread(); - struct interrupt_data *data=th->interrupt_data; - - if(!foreign_function_call_active && gc_trigger_hit(signal, info, context)){ - struct thread *thread=arch_os_get_current_thread(); - clear_auto_gc_trigger(); - /* Don't flood the system with interrupts if the need to gc is - * already noted. This can happen for example when SUB-GC - * allocates or after a gc triggered in a WITHOUT-GCING. */ - if (SymbolValue(GC_PENDING,thread) == NIL) { - if (SymbolValue(GC_INHIBIT,thread) == NIL) { - if (arch_pseudo_atomic_atomic(context)) { - /* set things up so that GC happens when we finish - * the PA section */ - SetSymbolValue(GC_PENDING,T,thread); - arch_set_pseudo_atomic_interrupted(context); - } else { - interrupt_maybe_gc_int(signal,info,void_context); - } - } else { - SetSymbolValue(GC_PENDING,T,thread); - } + sigset_t empty, current; + int i; + sigemptyset(&empty); + thread_sigmask(SIG_BLOCK, &empty, ¤t); + /* There should be exactly two blocked signals: the two we added + * to sa_mask when setting up the handler. NetBSD doesn't block + * the signal we're handling when SA_NODEFER is set; Linux before + * 2.6.13 or so also doesn't block the other signal when + * SA_NODEFER is set. */ + for(i = 1; i < NSIG; i++) + if (sigismember(¤t, i) != + (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) { + FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i)); + sigaction_nodefer_works = 0; } - return 1; - } - return 0; + if (sigaction_nodefer_works == -1) + sigaction_nodefer_works = 1; } -#endif - -/* this is also used by gencgc, in alloc() */ -boolean -interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context) +static void +see_if_sigaction_nodefer_works(void) { - os_context_t *context=(os_context_t *) void_context; - struct thread *thread=arch_os_get_current_thread(); + struct sigaction sa, old_sa; - check_blockables_blocked_or_lose(); - fake_foreign_function_call(context); + sa.sa_flags = SA_SIGINFO | SA_NODEFER; + sa.sa_sigaction = sigaction_nodefer_test_handler; + sigemptyset(&sa.sa_mask); + sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL); + sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL); + sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa); + /* Make sure no signals are blocked. */ + { + sigset_t empty; + sigemptyset(&empty); + thread_sigmask(SIG_SETMASK, &empty, 0); + } + kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL); + while (sigaction_nodefer_works == -1); + sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL); +} - /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in - * which case we will be running with no gc trigger barrier - * thing for a while. But it shouldn't be long until the end - * of WITHOUT-GCING. - * - * FIXME: It would be good to protect the end of dynamic space - * and signal a storage condition from there. - */ +#undef SA_NODEFER_TEST_BLOCK_SIGNAL +#undef SA_NODEFER_TEST_KILL_SIGNAL - /* Restore the signal mask from the interrupted context before - * calling into Lisp if interrupts are enabled. Why not always? - * - * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an - * interrupt hits while in SUB-GC, it is deferred and the - * os_context_sigmask of that interrupt is set to block further - * deferrable interrupts (until the first one is - * handled). Unfortunately, that context refers to this place and - * when we return from here the signals will not be blocked. - * - * A kludgy alternative is to propagate the sigmask change to the - * outer context. - */ - if(SymbolValue(INTERRUPTS_ENABLED,thread)!=NIL) - thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); -#ifdef LISP_FEATURE_SB_THREAD - else { - sigset_t new; - sigaddset(&new,SIG_STOP_FOR_GC); - thread_sigmask(SIG_UNBLOCK,&new,0); - } -#endif - funcall0(SymbolFunction(SUB_GC)); +static void +unblock_me_trampoline(int signal, siginfo_t *info, void *void_context) +{ + sigset_t unblock; - undo_fake_foreign_function_call(context); - return 1; + sigemptyset(&unblock); + sigaddset(&unblock, signal); + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + interrupt_handle_now_handler(signal, info, void_context); } - -/* - * noise to install handlers - */ +static void +low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context) +{ + sigset_t unblock; + + sigemptyset(&unblock); + sigaddset(&unblock, signal); + thread_sigmask(SIG_UNBLOCK, &unblock, 0); + (*interrupt_low_level_handlers[signal])(signal, info, void_context); +} void undoably_install_low_level_interrupt_handler (int signal, - void handler(int, - siginfo_t*, - void*)) + interrupt_handler_t handler) { struct sigaction sa; - struct thread *th=arch_os_get_current_thread(); - /* It may be before the initial thread is started. */ - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; if (0 > signal || signal >= NSIG) { - lose("bad signal number %d", signal); + lose("bad signal number %d\n", signal); } - if (sigismember(&deferrable_sigset,signal)) + if (ARE_SAME_HANDLER(handler, SIG_DFL)) + sa.sa_sigaction = handler; + else if (sigismember(&deferrable_sigset,signal)) sa.sa_sigaction = low_level_maybe_now_maybe_later; + /* The use of a trampoline appears to break the + arch_os_get_context() workaround for SPARC/Linux. For now, + don't use the trampoline (and so be vulnerable to the problems + that SA_NODEFER is meant to solve. */ +#if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX)) + else if (!sigaction_nodefer_works && + !sigismember(&blockable_sigset, signal)) + sa.sa_sigaction = low_level_unblock_me_trampoline; +#endif else sa.sa_sigaction = handler; - sigemptyset(&sa.sa_mask); - sigaddset_blockable(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO | SA_RESTART; + sigcopyset(&sa.sa_mask, &blockable_sigset); + sa.sa_flags = SA_SIGINFO | SA_RESTART + | (sigaction_nodefer_works ? SA_NODEFER : 0); #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK if((signal==SIG_MEMORY_FAULT) #ifdef SIG_INTERRUPT_THREAD || (signal==SIG_INTERRUPT_THREAD) #endif ) - sa.sa_flags|= SA_ONSTACK; + sa.sa_flags |= SA_ONSTACK; #endif sigaction(signal, &sa, NULL); - data->interrupt_low_level_handlers[signal] = + interrupt_low_level_handlers[signal] = (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler); } +#endif /* This is called from Lisp. */ unsigned long install_handler(int signal, void handler(int, siginfo_t*, void*)) { +#ifndef LISP_FEATURE_WIN32 struct sigaction sa; sigset_t old, new; union interrupt_handler oldhandler; - struct thread *th=arch_os_get_current_thread(); - /* It may be before the initial thread is started. */ - struct interrupt_data *data= - th ? th->interrupt_data : global_interrupt_data; FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal)); @@ -1100,49 +1222,55 @@ install_handler(int signal, void handler(int, siginfo_t*, void*)) sigaddset(&new, signal); thread_sigmask(SIG_BLOCK, &new, &old); - FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n", - (unsigned int)data->interrupt_low_level_handlers[signal])); - if (data->interrupt_low_level_handlers[signal]==0) { + FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n", + (unsigned int)interrupt_low_level_handlers[signal])); + if (interrupt_low_level_handlers[signal]==0) { if (ARE_SAME_HANDLER(handler, SIG_DFL) || - ARE_SAME_HANDLER(handler, SIG_IGN)) { + ARE_SAME_HANDLER(handler, SIG_IGN)) sa.sa_sigaction = handler; - } else if (sigismember(&deferrable_sigset, signal)) { + else if (sigismember(&deferrable_sigset, signal)) sa.sa_sigaction = maybe_now_maybe_later; - } else { + else if (!sigaction_nodefer_works && + !sigismember(&blockable_sigset, signal)) + sa.sa_sigaction = unblock_me_trampoline; + else sa.sa_sigaction = interrupt_handle_now_handler; - } - sigemptyset(&sa.sa_mask); - sigaddset_blockable(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO | SA_RESTART; + sigcopyset(&sa.sa_mask, &blockable_sigset); + sa.sa_flags = SA_SIGINFO | SA_RESTART | + (sigaction_nodefer_works ? SA_NODEFER : 0); sigaction(signal, &sa, NULL); } - oldhandler = data->interrupt_handlers[signal]; - data->interrupt_handlers[signal].c = handler; + oldhandler = interrupt_handlers[signal]; + interrupt_handlers[signal].c = handler; thread_sigmask(SIG_SETMASK, &old, 0); FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal)); return (unsigned long)oldhandler.lisp; +#else + /* Probably-wrong Win32 hack */ + return 0; +#endif } void -interrupt_init() +interrupt_init(void) { +#ifndef LISP_FEATURE_WIN32 int i; SHOW("entering interrupt_init()"); + see_if_sigaction_nodefer_works(); sigemptyset(&deferrable_sigset); sigemptyset(&blockable_sigset); sigaddset_deferrable(&deferrable_sigset); sigaddset_blockable(&blockable_sigset); - global_interrupt_data=calloc(sizeof(struct interrupt_data), 1); - /* Set up high level handler information. */ for (i = 0; i < NSIG; i++) { - global_interrupt_data->interrupt_handlers[i].c = + interrupt_handlers[i].c = /* (The cast here blasts away the distinction between * SA_SIGACTION-style three-argument handlers and * signal(..)-style one-argument handlers, which is OK @@ -1152,4 +1280,83 @@ interrupt_init() } SHOW("returning from interrupt_init()"); +#endif +} + +#ifndef LISP_FEATURE_WIN32 +int +siginfo_code(siginfo_t *info) +{ + return info->si_code; +} +os_vm_address_t current_memory_fault_address; + +void +lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr) +{ + /* FIXME: This is lossy: if we get another memory fault (eg. from + * another thread) before lisp has read this, we lose the information. + * However, since this is mostly informative, we'll live with that for + * now -- some address is better then no address in this case. + */ + current_memory_fault_address = addr; + arrange_return_to_lisp_function(context, + StaticSymbolFunction(MEMORY_FAULT_ERROR)); } +#endif + +static void +unhandled_trap_error(os_context_t *context) +{ + lispobj context_sap; + fake_foreign_function_call(context); + context_sap = alloc_sap(context); +#ifndef LISP_FEATURE_WIN32 + thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0); +#endif + funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap); + lose("UNHANDLED-TRAP-ERROR fell through"); +} + +/* Common logic for trapping instructions. How we actually handle each + * case is highly architecture dependent, but the overall shape is + * this. */ +void +handle_trap(os_context_t *context, int trap) +{ + switch(trap) { + case trap_PendingInterrupt: + FSHOW((stderr, "/\n")); + arch_skip_instruction(context); + interrupt_handle_pending(context); + break; + case trap_Error: + case trap_Cerror: + FSHOW((stderr, "/\n", trap)); + interrupt_internal_error(context, trap==trap_Cerror); + break; + case trap_Breakpoint: + arch_handle_breakpoint(context); + break; + case trap_FunEndBreakpoint: + arch_handle_fun_end_breakpoint(context); + break; +#ifdef trap_AfterBreakpoint + case trap_AfterBreakpoint: + arch_handle_after_breakpoint(context); + break; +#endif +#ifdef trap_SingleStepAround + case trap_SingleStepAround: + case trap_SingleStepBefore: + arch_handle_single_step_trap(context, trap); + break; +#endif + case trap_Halt: + fake_foreign_function_call(context); + lose("%%PRIMITIVE HALT called; the party is over.\n"); + default: + unhandled_trap_error(context); + } +} +