2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* When we catch an internal error, should we pass it back to Lisp to
72 * be handled in a high-level way? (Early in cold init, the answer is
73 * 'no', because Lisp is still too brain-dead to handle anything.
74 * After sufficient initialization has been completed, the answer
76 boolean internal_errors_enabled = 0;
78 #ifndef LISP_FEATURE_WIN32
80 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
82 union interrupt_handler interrupt_handlers[NSIG];
84 /* Under Linux on some architectures, we appear to have to restore the
85 * FPU control word from the context, as after the signal is delivered
86 * we appear to have a null FPU control word. */
87 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
88 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
89 os_context_t *context = arch_os_get_context(&void_context); \
90 os_restore_fp_control(context);
92 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
93 os_context_t *context = arch_os_get_context(&void_context);
96 /* Foreign code may want to start some threads on its own.
97 * Non-targetted, truly asynchronous signals can be delivered to
98 * basically any thread, but invoking Lisp handlers in such foregign
99 * threads is really bad, so let's resignal it.
101 * This should at least bring attention to the problem, but it cannot
102 * work for SIGSEGV and similar. It is good enough for timers, and
103 * maybe all deferrables. */
105 #ifdef LISP_FEATURE_SB_THREAD
107 add_handled_signals(sigset_t *sigset)
110 for(i = 1; i < NSIG; i++) {
111 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
112 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
113 sigaddset(sigset, i);
118 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
122 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
124 #ifdef LISP_FEATURE_SB_THREAD
125 if (!pthread_getspecific(lisp_thread)) {
126 if (!(sigismember(&deferrable_sigset,signal))) {
127 corruption_warning_and_maybe_lose
128 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
134 sigemptyset(&sigset);
135 add_handled_signals(&sigset);
136 block_signals(&sigset, 0, 0);
137 block_signals(&sigset, os_context_sigmask_addr(context), 0);
138 kill(getpid(), signal);
146 /* These are to be used in signal handlers. Currently all handlers are
147 * called from one of:
149 * interrupt_handle_now_handler
150 * maybe_now_maybe_later
151 * unblock_me_trampoline
152 * low_level_handle_now_handler
153 * low_level_maybe_now_maybe_later
154 * low_level_unblock_me_trampoline
156 * This gives us a single point of control (or six) over errno, fp
157 * control word, and fixing up signal context on sparc.
159 * The SPARC/Linux platform doesn't quite do signals the way we want
160 * them done. The third argument in the handler isn't filled in by the
161 * kernel properly, so we fix it up ourselves in the
162 * arch_os_get_context(..) function. -- CSR, 2002-07-23
164 #define SAVE_ERRNO(signal,context,void_context) \
166 int _saved_errno = errno; \
167 RESTORE_FP_CONTROL_WORD(context,void_context); \
168 if (!maybe_resignal_to_lisp_thread(signal, context)) \
171 #define RESTORE_ERRNO \
173 errno = _saved_errno; \
176 static void run_deferred_handler(struct interrupt_data *data,
177 os_context_t *context);
178 #ifndef LISP_FEATURE_WIN32
179 static void store_signal_data_for_later (struct interrupt_data *data,
180 void *handler, int signal,
182 os_context_t *context);
185 /* Generic signal related utilities. */
188 get_current_sigmask(sigset_t *sigset)
190 /* Get the current sigmask, by blocking the empty set. */
191 thread_sigmask(SIG_BLOCK, 0, sigset);
195 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
200 sigcopyset(old, where);
201 for(i = 1; i < NSIG; i++) {
202 if (sigismember(what, i))
206 thread_sigmask(SIG_BLOCK, what, old);
211 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
216 sigcopyset(old, where);
217 for(i = 1; i < NSIG; i++) {
218 if (sigismember(what, i))
222 thread_sigmask(SIG_UNBLOCK, what, old);
227 print_sigset(sigset_t *sigset)
230 for(i = 1; i < NSIG; i++) {
231 if (sigismember(sigset, i))
232 fprintf(stderr, "Signal %d masked\n", i);
236 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
237 * if all re unmasked else die. Passing NULL for sigset is a shorthand
238 * for the current sigmask. */
240 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
243 #if !defined(LISP_FEATURE_WIN32)
245 boolean has_blocked = 0, has_unblocked = 0;
248 get_current_sigmask(¤t);
251 for(i = 1; i < NSIG; i++) {
252 if (sigismember(sigset2, i)) {
253 if (sigismember(sigset, i))
259 if (has_blocked && has_unblocked) {
260 print_sigset(sigset);
261 lose("some %s signals blocked, some unblocked\n", name);
271 /* Deferrables, blockables, gc signals. */
274 sigaddset_deferrable(sigset_t *s)
276 sigaddset(s, SIGHUP);
277 sigaddset(s, SIGINT);
278 sigaddset(s, SIGTERM);
279 sigaddset(s, SIGQUIT);
280 sigaddset(s, SIGPIPE);
281 sigaddset(s, SIGALRM);
282 sigaddset(s, SIGURG);
283 sigaddset(s, SIGTSTP);
284 sigaddset(s, SIGCHLD);
286 #ifndef LISP_FEATURE_HPUX
287 sigaddset(s, SIGXCPU);
288 sigaddset(s, SIGXFSZ);
290 sigaddset(s, SIGVTALRM);
291 sigaddset(s, SIGPROF);
292 sigaddset(s, SIGWINCH);
296 sigaddset_blockable(sigset_t *sigset)
298 sigaddset_deferrable(sigset);
299 sigaddset_gc(sigset);
303 sigaddset_gc(sigset_t *sigset)
305 #ifdef THREADS_USING_GCSIGNAL
306 sigaddset(sigset,SIG_STOP_FOR_GC);
310 /* initialized in interrupt_init */
311 sigset_t deferrable_sigset;
312 sigset_t blockable_sigset;
317 #if !defined(LISP_FEATURE_WIN32)
319 deferrables_blocked_p(sigset_t *sigset)
321 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
326 check_deferrables_unblocked_or_lose(sigset_t *sigset)
328 #if !defined(LISP_FEATURE_WIN32)
329 if (deferrables_blocked_p(sigset))
330 lose("deferrables blocked\n");
335 check_deferrables_blocked_or_lose(sigset_t *sigset)
337 #if !defined(LISP_FEATURE_WIN32)
338 if (!deferrables_blocked_p(sigset))
339 lose("deferrables unblocked\n");
343 #if !defined(LISP_FEATURE_WIN32)
345 blockables_blocked_p(sigset_t *sigset)
347 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
352 check_blockables_unblocked_or_lose(sigset_t *sigset)
354 #if !defined(LISP_FEATURE_WIN32)
355 if (blockables_blocked_p(sigset))
356 lose("blockables blocked\n");
361 check_blockables_blocked_or_lose(sigset_t *sigset)
363 #if !defined(LISP_FEATURE_WIN32)
364 if (!blockables_blocked_p(sigset))
365 lose("blockables unblocked\n");
369 #ifndef LISP_FEATURE_SB_SAFEPOINT
370 #if !defined(LISP_FEATURE_WIN32)
372 gc_signals_blocked_p(sigset_t *sigset)
374 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
379 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
381 #if !defined(LISP_FEATURE_WIN32)
382 if (gc_signals_blocked_p(sigset))
383 lose("gc signals blocked\n");
388 check_gc_signals_blocked_or_lose(sigset_t *sigset)
390 #if !defined(LISP_FEATURE_WIN32)
391 if (!gc_signals_blocked_p(sigset))
392 lose("gc signals unblocked\n");
398 block_deferrable_signals(sigset_t *where, sigset_t *old)
400 #ifndef LISP_FEATURE_WIN32
401 block_signals(&deferrable_sigset, where, old);
406 block_blockable_signals(sigset_t *where, sigset_t *old)
408 #ifndef LISP_FEATURE_WIN32
409 block_signals(&blockable_sigset, where, old);
413 #ifndef LISP_FEATURE_SB_SAFEPOINT
415 block_gc_signals(sigset_t *where, sigset_t *old)
417 #ifndef LISP_FEATURE_WIN32
418 block_signals(&gc_sigset, where, old);
424 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
426 #ifndef LISP_FEATURE_WIN32
427 if (interrupt_handler_pending_p())
428 lose("unblock_deferrable_signals: losing proposition\n");
429 #ifndef LISP_FEATURE_SB_SAFEPOINT
430 check_gc_signals_unblocked_or_lose(where);
432 unblock_signals(&deferrable_sigset, where, old);
437 unblock_blockable_signals(sigset_t *where, sigset_t *old)
439 #ifndef LISP_FEATURE_WIN32
440 unblock_signals(&blockable_sigset, where, old);
444 #ifndef LISP_FEATURE_SB_SAFEPOINT
446 unblock_gc_signals(sigset_t *where, sigset_t *old)
448 #ifndef LISP_FEATURE_WIN32
449 unblock_signals(&gc_sigset, where, old);
455 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
457 #ifndef LISP_FEATURE_WIN32
458 sigset_t *sigset = os_context_sigmask_addr(context);
459 #ifndef LISP_FEATURE_SB_SAFEPOINT
460 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
461 corruption_warning_and_maybe_lose(
462 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
463 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
464 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
465 unblock_gc_signals(sigset, 0);
468 if (!interrupt_handler_pending_p()) {
469 unblock_deferrable_signals(sigset, 0);
476 check_interrupts_enabled_or_lose(os_context_t *context)
478 struct thread *thread=arch_os_get_current_thread();
479 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
480 lose("interrupts not enabled\n");
481 if (arch_pseudo_atomic_atomic(context))
482 lose ("in pseudo atomic section\n");
485 /* Save sigset (or the current sigmask if 0) if there is no pending
486 * handler, because that means that deferabbles are already blocked.
487 * The purpose is to avoid losing the pending gc signal if a
488 * deferrable interrupt async unwinds between clearing the pseudo
489 * atomic and trapping to GC.*/
490 #ifndef LISP_FEATURE_SB_SAFEPOINT
492 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
494 #ifndef LISP_FEATURE_WIN32
495 struct thread *thread = arch_os_get_current_thread();
496 struct interrupt_data *data = thread->interrupt_data;
498 /* Obviously, this function is called when signals may not be
499 * blocked. Let's make sure we are not interrupted. */
500 block_blockable_signals(0, &oldset);
501 #ifndef LISP_FEATURE_SB_THREAD
502 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
504 if (data->gc_blocked_deferrables)
505 lose("gc_blocked_deferrables already true\n");
507 if ((!data->pending_handler) &&
508 (!data->gc_blocked_deferrables)) {
509 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
510 data->gc_blocked_deferrables = 1;
512 /* This is the sigmask of some context. */
513 sigcopyset(&data->pending_mask, sigset);
514 sigaddset_deferrable(sigset);
515 thread_sigmask(SIG_SETMASK,&oldset,0);
518 /* Operating on the current sigmask. Save oldset and
519 * unblock gc signals. In the end, this is equivalent to
520 * blocking the deferrables. */
521 sigcopyset(&data->pending_mask, &oldset);
522 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
526 thread_sigmask(SIG_SETMASK,&oldset,0);
531 /* Are we leaving WITH-GCING and already running with interrupts
532 * enabled, without the protection of *GC-INHIBIT* T and there is gc
533 * (or stop for gc) pending, but we haven't trapped yet? */
535 in_leaving_without_gcing_race_p(struct thread *thread)
537 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
538 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
539 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
540 ((SymbolValue(GC_PENDING,thread) != NIL)
541 #if defined(LISP_FEATURE_SB_THREAD)
542 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
547 /* Check our baroque invariants. */
549 check_interrupt_context_or_lose(os_context_t *context)
551 #ifndef LISP_FEATURE_WIN32
552 struct thread *thread = arch_os_get_current_thread();
553 struct interrupt_data *data = thread->interrupt_data;
554 int interrupt_deferred_p = (data->pending_handler != 0);
555 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
556 sigset_t *sigset = os_context_sigmask_addr(context);
557 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
558 * handle_allocation_trap. */
559 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
560 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
561 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
562 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
563 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
564 int in_race_p = in_leaving_without_gcing_race_p(thread);
565 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
566 * section and trapping, a SIG_STOP_FOR_GC would see the next
567 * check fail, for this reason sig_stop_for_gc handler does not
568 * call this function. */
569 if (interrupt_deferred_p) {
570 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
571 lose("Stray deferred interrupt.\n");
574 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
575 lose("GC_PENDING, but why?\n");
576 #if defined(LISP_FEATURE_SB_THREAD)
578 int stop_for_gc_pending =
579 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
580 if (stop_for_gc_pending)
581 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
582 lose("STOP_FOR_GC_PENDING, but why?\n");
583 if (pseudo_atomic_interrupted)
584 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
585 lose("pseudo_atomic_interrupted, but why?\n");
588 if (pseudo_atomic_interrupted)
589 if (!(gc_pending || interrupt_deferred_p))
590 lose("pseudo_atomic_interrupted, but why?\n");
593 if (interrupt_pending && !interrupt_deferred_p)
594 lose("INTERRUPT_PENDING but not pending handler.\n");
595 if ((data->gc_blocked_deferrables) && interrupt_pending)
596 lose("gc_blocked_deferrables and interrupt pending\n.");
597 if (data->gc_blocked_deferrables)
598 check_deferrables_blocked_or_lose(sigset);
599 if (interrupt_pending || interrupt_deferred_p ||
600 data->gc_blocked_deferrables)
601 check_deferrables_blocked_or_lose(sigset);
603 check_deferrables_unblocked_or_lose(sigset);
604 #ifndef LISP_FEATURE_SB_SAFEPOINT
605 /* If deferrables are unblocked then we are open to signals
606 * that run lisp code. */
607 check_gc_signals_unblocked_or_lose(sigset);
614 * utility routines used by various signal handlers
618 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
620 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
624 /* Build a fake stack frame or frames */
626 access_control_frame_pointer(th) =
627 (lispobj *)(unsigned long)
628 (*os_context_register_addr(context, reg_CSP));
629 if ((lispobj *)(unsigned long)
630 (*os_context_register_addr(context, reg_CFP))
631 == access_control_frame_pointer(th)) {
632 /* There is a small window during call where the callee's
633 * frame isn't built yet. */
634 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
635 == FUN_POINTER_LOWTAG) {
636 /* We have called, but not built the new frame, so
637 * build it for them. */
638 access_control_frame_pointer(th)[0] =
639 *os_context_register_addr(context, reg_OCFP);
640 access_control_frame_pointer(th)[1] =
641 *os_context_register_addr(context, reg_LRA);
642 access_control_frame_pointer(th) += 8;
643 /* Build our frame on top of it. */
644 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
647 /* We haven't yet called, build our frame as if the
648 * partial frame wasn't there. */
649 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
652 /* We can't tell whether we are still in the caller if it had to
653 * allocate a stack frame due to stack arguments. */
654 /* This observation provoked some past CMUCL maintainer to ask
655 * "Can anything strange happen during return?" */
658 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
661 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 8;
663 access_control_frame_pointer(th)[0] = oldcont;
664 access_control_frame_pointer(th)[1] = NIL;
665 access_control_frame_pointer(th)[2] =
666 (lispobj)(*os_context_register_addr(context, reg_CODE));
670 /* Stores the context for gc to scavange and builds fake stack
673 fake_foreign_function_call(os_context_t *context)
676 struct thread *thread=arch_os_get_current_thread();
678 /* context_index incrementing must not be interrupted */
679 check_blockables_blocked_or_lose(0);
681 /* Get current Lisp state from context. */
683 #ifdef LISP_FEATURE_SB_THREAD
684 thread->pseudo_atomic_bits =
686 dynamic_space_free_pointer =
687 (lispobj *)(unsigned long)
689 (*os_context_register_addr(context, reg_ALLOC));
690 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
691 /* dynamic_space_free_pointer); */
692 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
693 if ((long)dynamic_space_free_pointer & 1) {
694 lose("dead in fake_foreign_function_call, context = %x\n", context);
697 /* why doesnt PPC and SPARC do something like this: */
698 #if defined(LISP_FEATURE_HPPA)
699 if ((long)dynamic_space_free_pointer & 4) {
700 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
705 set_binding_stack_pointer(thread,
706 *os_context_register_addr(context, reg_BSP));
709 build_fake_control_stack_frames(thread,context);
711 /* Do dynamic binding of the active interrupt context index
712 * and save the context in the context array. */
714 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
716 if (context_index >= MAX_INTERRUPTS) {
717 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
720 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
721 make_fixnum(context_index + 1),thread);
723 thread->interrupt_contexts[context_index] = context;
725 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
726 /* x86oid targets don't maintain the foreign function call flag at
727 * all, so leave them to believe that they are never in foreign
729 foreign_function_call_active_p(thread) = 1;
733 /* blocks all blockable signals. If you are calling from a signal handler,
734 * the usual signal mask will be restored from the context when the handler
735 * finishes. Otherwise, be careful */
737 undo_fake_foreign_function_call(os_context_t *context)
739 struct thread *thread=arch_os_get_current_thread();
740 /* Block all blockable signals. */
741 block_blockable_signals(0, 0);
743 foreign_function_call_active_p(thread) = 0;
745 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
748 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
749 /* Put the dynamic space free pointer back into the context. */
750 *os_context_register_addr(context, reg_ALLOC) =
751 (unsigned long) dynamic_space_free_pointer
752 | (*os_context_register_addr(context, reg_ALLOC)
755 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
757 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
760 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
761 /* Put the pseudo-atomic bits and dynamic space free pointer back
762 * into the context (p-a-bits for p-a, and dynamic space free
763 * pointer for ROOM). */
764 *os_context_register_addr(context, reg_ALLOC) =
765 (unsigned long) dynamic_space_free_pointer
766 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
767 /* And clear them so we don't get bit later by call-in/call-out
768 * not updating them. */
769 thread->pseudo_atomic_bits = 0;
773 /* a handler for the signal caused by execution of a trap opcode
774 * signalling an internal error */
776 interrupt_internal_error(os_context_t *context, boolean continuable)
780 fake_foreign_function_call(context);
782 if (!internal_errors_enabled) {
783 describe_internal_error(context);
784 /* There's no good way to recover from an internal error
785 * before the Lisp error handling mechanism is set up. */
786 lose("internal error too early in init, can't recover\n");
789 /* Allocate the SAP object while the interrupts are still
791 #ifndef LISP_FEATURE_SB_SAFEPOINT
792 unblock_gc_signals(0, 0);
794 context_sap = alloc_sap(context);
796 #ifndef LISP_FEATURE_WIN32
797 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
800 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
801 /* Workaround for blocked SIGTRAP. */
804 sigemptyset(&newset);
805 sigaddset(&newset, SIGTRAP);
806 thread_sigmask(SIG_UNBLOCK, &newset, 0);
810 SHOW("in interrupt_internal_error");
812 /* Display some rudimentary debugging information about the
813 * error, so that even if the Lisp error handler gets badly
814 * confused, we have a chance to determine what's going on. */
815 describe_internal_error(context);
817 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
818 continuable ? T : NIL);
820 undo_fake_foreign_function_call(context); /* blocks signals again */
822 arch_skip_instruction(context);
826 interrupt_handler_pending_p(void)
828 struct thread *thread = arch_os_get_current_thread();
829 struct interrupt_data *data = thread->interrupt_data;
830 return (data->pending_handler != 0);
834 interrupt_handle_pending(os_context_t *context)
836 /* There are three ways we can get here. First, if an interrupt
837 * occurs within pseudo-atomic, it will be deferred, and we'll
838 * trap to here at the end of the pseudo-atomic block. Second, if
839 * the GC (in alloc()) decides that a GC is required, it will set
840 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
841 * and alloc() is always called from within pseudo-atomic, and
842 * thus we end up here again. Third, when calling GC-ON or at the
843 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
844 * here if there is a pending GC. Fourth, ahem, at the end of
845 * WITHOUT-INTERRUPTS (bar complications with nesting).
847 * A fourth way happens with safepoints: In addition to a stop for
848 * GC that is pending, there are thruptions. Both mechanisms are
849 * mostly signal-free, yet also of an asynchronous nature, so it makes
850 * sense to let interrupt_handle_pending take care of running them:
851 * It gets run precisely at those places where it is safe to process
852 * pending asynchronous tasks. */
854 struct thread *thread = arch_os_get_current_thread();
855 struct interrupt_data *data = thread->interrupt_data;
857 if (arch_pseudo_atomic_atomic(context)) {
858 lose("Handling pending interrupt in pseudo atomic.");
861 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
863 check_blockables_blocked_or_lose(0);
864 #ifndef LISP_FEATURE_SB_SAFEPOINT
866 * (On safepoint builds, there is no gc_blocked_deferrables nor
869 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
870 * handler, then the pending mask was saved and
871 * gc_blocked_deferrables set. Hence, there can be no pending
872 * handler and it's safe to restore the pending mask.
874 * Note, that if gc_blocked_deferrables is false we may still have
875 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
876 * pseudo atomic was interrupt be a deferrable first. */
877 if (data->gc_blocked_deferrables) {
878 if (data->pending_handler)
879 lose("GC blocked deferrables but still got a pending handler.");
880 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
881 lose("GC blocked deferrables while GC is inhibited.");
882 /* Restore the saved signal mask from the original signal (the
883 * one that interrupted us during the critical section) into
884 * the os_context for the signal we're currently in the
885 * handler for. This should ensure that when we return from
886 * the handler the blocked signals are unblocked. */
887 #ifndef LISP_FEATURE_WIN32
888 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
890 data->gc_blocked_deferrables = 0;
894 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
895 void *original_pending_handler = data->pending_handler;
897 #ifdef LISP_FEATURE_SB_SAFEPOINT
898 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
899 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
900 # ifdef LISP_FEATURE_SB_THRUPTION
901 || SymbolValue(THRUPTION_PENDING,thread) != NIL
905 /* We ought to take this chance to do a pitstop now. */
907 /* Now, it goes without saying that the context sigmask
908 * tweaking around this call is not pretty. However, it
909 * currently seems to be "needed" for the following
910 * situation. (So let's find a better solution and remove
911 * this comment afterwards.)
913 * Suppose we are in a signal handler (let's say SIGALRM).
914 * At the end of a WITHOUT-INTERRUPTS, the lisp code notices
915 * that a thruption is pending, and says to itself "let's
916 * receive pending interrupts then". We trust that the
917 * caller is happy to run those sorts of things now,
918 * including thruptions, otherwise it wouldn't have called
919 * us. But that's the problem: Even though we can guess the
920 * caller's intention, may_thrupt() would see that signals
921 * are blocked in the signal context (because that context
922 * itself points to a signal handler). So we cheat and
923 * pretend that signals weren't blocked.
925 #ifndef LISP_FEATURE_WIN32
926 sigset_t old, *ctxset = os_context_sigmask_addr(context);
927 unblock_signals(&deferrable_sigset, ctxset, &old);
929 thread_pitstop(context);
930 #ifndef LISP_FEATURE_WIN32
931 sigcopyset(&old, ctxset);
934 #elif defined(LISP_FEATURE_SB_THREAD)
935 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
936 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
937 * the signal handler if it actually stops us. */
938 arch_clear_pseudo_atomic_interrupted(context);
939 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
942 /* Test for T and not for != NIL since the value :IN-PROGRESS
943 * is used in SUB-GC as part of the mechanism to supress
945 if (SymbolValue(GC_PENDING,thread) == T) {
947 /* Two reasons for doing this. First, if there is a
948 * pending handler we don't want to run. Second, we are
949 * going to clear pseudo atomic interrupted to avoid
950 * spurious trapping on every allocation in SUB_GC and
951 * having a pending handler with interrupts enabled and
952 * without pseudo atomic interrupted breaks an
954 if (data->pending_handler) {
955 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
956 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
959 arch_clear_pseudo_atomic_interrupted(context);
961 /* GC_PENDING is cleared in SUB-GC, or if another thread
962 * is doing a gc already we will get a SIG_STOP_FOR_GC and
963 * that will clear it.
965 * If there is a pending handler or gc was triggerred in a
966 * signal handler then maybe_gc won't run POST_GC and will
967 * return normally. */
968 if (!maybe_gc(context))
969 lose("GC not inhibited but maybe_gc did not GC.");
971 if (data->pending_handler) {
975 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
976 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
977 * GC-PENDING is not NIL then we cannot trap on pseudo
978 * atomic due to GC (see if(GC_PENDING) logic in
979 * cheneygc.c an gengcgc.c), plus there is a outer
980 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
982 lose("Trapping to run pending handler while GC in progress.");
985 check_blockables_blocked_or_lose(0);
987 /* No GC shall be lost. If SUB_GC triggers another GC then
988 * that should be handled on the spot. */
989 if (SymbolValue(GC_PENDING,thread) != NIL)
990 lose("GC_PENDING after doing gc.");
991 #ifdef THREADS_USING_GCSIGNAL
992 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
993 lose("STOP_FOR_GC_PENDING after doing gc.");
995 /* Check two things. First, that gc does not clobber a handler
996 * that's already pending. Second, that there is no interrupt
997 * lossage: if original_pending_handler was NULL then even if
998 * an interrupt arrived during GC (POST-GC, really) it was
1000 if (original_pending_handler != data->pending_handler)
1001 lose("pending handler changed in gc: %x -> %d.",
1002 original_pending_handler, data->pending_handler);
1005 #ifndef LISP_FEATURE_WIN32
1006 /* There may be no pending handler, because it was only a gc that
1007 * had to be executed or because Lisp is a bit too eager to call
1008 * DO-PENDING-INTERRUPT. */
1009 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
1010 (data->pending_handler)) {
1011 /* No matter how we ended up here, clear both
1012 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1013 * because we checked above that there is no GC pending. */
1014 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
1015 arch_clear_pseudo_atomic_interrupted(context);
1016 /* Restore the sigmask in the context. */
1017 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1018 run_deferred_handler(data, context);
1020 #ifdef LISP_FEATURE_SB_THRUPTION
1021 if (SymbolValue(THRUPTION_PENDING,thread)==T)
1022 /* Special case for the following situation: There is a
1023 * thruption pending, but a signal had been deferred. The
1024 * pitstop at the top of this function could only take care
1025 * of GC, and skipped the thruption, so we need to try again
1026 * now that INTERRUPT_PENDING and the sigmask have been
1028 while (check_pending_thruptions(context))
1032 #ifdef LISP_FEATURE_GENCGC
1033 if (get_pseudo_atomic_interrupted(thread))
1034 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1036 /* It is possible that the end of this function was reached
1037 * without never actually doing anything, the tests in Lisp for
1038 * when to call receive-pending-interrupt are not exact. */
1039 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1044 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1046 boolean were_in_lisp;
1047 union interrupt_handler handler;
1049 check_blockables_blocked_or_lose(0);
1051 #ifndef LISP_FEATURE_WIN32
1052 if (sigismember(&deferrable_sigset,signal))
1053 check_interrupts_enabled_or_lose(context);
1056 handler = interrupt_handlers[signal];
1058 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1062 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1065 fake_foreign_function_call(context);
1068 FSHOW_SIGNAL((stderr,
1069 "/entering interrupt_handle_now(%d, info, context)\n",
1072 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1074 /* This can happen if someone tries to ignore or default one
1075 * of the signals we need for runtime support, and the runtime
1076 * support decides to pass on it. */
1077 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1079 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1080 /* Once we've decided what to do about contexts in a
1081 * return-elsewhere world (the original context will no longer
1082 * be available; should we copy it or was nobody using it anyway?)
1083 * then we should convert this to return-elsewhere */
1085 /* CMUCL comment said "Allocate the SAPs while the interrupts
1086 * are still disabled.". I (dan, 2003.08.21) assume this is
1087 * because we're not in pseudoatomic and allocation shouldn't
1088 * be interrupted. In which case it's no longer an issue as
1089 * all our allocation from C now goes through a PA wrapper,
1090 * but still, doesn't hurt.
1092 * Yeah, but non-gencgc platforms don't really wrap allocation
1093 * in PA. MG - 2005-08-29 */
1095 lispobj info_sap, context_sap;
1096 /* Leave deferrable signals blocked, the handler itself will
1097 * allow signals again when it sees fit. */
1098 #ifndef LISP_FEATURE_SB_SAFEPOINT
1099 unblock_gc_signals(0, 0);
1101 context_sap = alloc_sap(context);
1102 info_sap = alloc_sap(info);
1104 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1106 #ifdef LISP_FEATURE_SB_SAFEPOINT
1107 WITH_GC_AT_SAFEPOINTS_ONLY()
1109 funcall3(handler.lisp,
1110 make_fixnum(signal),
1114 /* This cannot happen in sane circumstances. */
1116 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1118 #ifndef LISP_FEATURE_WIN32
1119 /* Allow signals again. */
1120 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1122 (*handler.c)(signal, info, context);
1127 undo_fake_foreign_function_call(context); /* block signals again */
1130 FSHOW_SIGNAL((stderr,
1131 "/returning from interrupt_handle_now(%d, info, context)\n",
1135 /* This is called at the end of a critical section if the indications
1136 * are that some signal was deferred during the section. Note that as
1137 * far as C or the kernel is concerned we dealt with the signal
1138 * already; we're just doing the Lisp-level processing now that we
1141 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1143 /* The pending_handler may enable interrupts and then another
1144 * interrupt may hit, overwrite interrupt_data, so reset the
1145 * pending handler before calling it. Trust the handler to finish
1146 * with the siginfo before enabling interrupts. */
1147 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1148 data->pending_handler;
1150 data->pending_handler=0;
1151 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1152 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1155 #ifndef LISP_FEATURE_WIN32
1157 maybe_defer_handler(void *handler, struct interrupt_data *data,
1158 int signal, siginfo_t *info, os_context_t *context)
1160 struct thread *thread=arch_os_get_current_thread();
1162 check_blockables_blocked_or_lose(0);
1164 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1165 lose("interrupt already pending\n");
1166 if (thread->interrupt_data->pending_handler)
1167 lose("there is a pending handler already (PA)\n");
1168 if (data->gc_blocked_deferrables)
1169 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1170 check_interrupt_context_or_lose(context);
1171 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1172 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1173 * atomic section inside a WITHOUT-INTERRUPTS.
1175 * Also, if in_leaving_without_gcing_race_p then
1176 * interrupt_handle_pending is going to be called soon, so
1177 * stashing the signal away is safe.
1179 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1180 in_leaving_without_gcing_race_p(thread)) {
1181 FSHOW_SIGNAL((stderr,
1182 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1183 (unsigned int)handler,signal,
1184 in_leaving_without_gcing_race_p(thread)));
1185 store_signal_data_for_later(data,handler,signal,info,context);
1186 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1187 check_interrupt_context_or_lose(context);
1190 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1191 * actually use its argument for anything on x86, so this branch
1192 * may succeed even when context is null (gencgc alloc()) */
1193 if (arch_pseudo_atomic_atomic(context)) {
1194 FSHOW_SIGNAL((stderr,
1195 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1196 (unsigned int)handler,signal));
1197 store_signal_data_for_later(data,handler,signal,info,context);
1198 arch_set_pseudo_atomic_interrupted(context);
1199 check_interrupt_context_or_lose(context);
1202 FSHOW_SIGNAL((stderr,
1203 "/maybe_defer_handler(%x,%d): not deferred\n",
1204 (unsigned int)handler,signal));
1209 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1211 siginfo_t *info, os_context_t *context)
1213 if (data->pending_handler)
1214 lose("tried to overwrite pending interrupt handler %x with %x\n",
1215 data->pending_handler, handler);
1217 lose("tried to defer null interrupt handler\n");
1218 data->pending_handler = handler;
1219 data->pending_signal = signal;
1221 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1223 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1227 lose("Null context");
1229 /* the signal mask in the context (from before we were
1230 * interrupted) is copied to be restored when run_deferred_handler
1231 * happens. Then the usually-blocked signals are added to the mask
1232 * in the context so that we are running with blocked signals when
1233 * the handler returns */
1234 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1235 sigaddset_deferrable(os_context_sigmask_addr(context));
1239 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1241 SAVE_ERRNO(signal,context,void_context);
1242 struct thread *thread = arch_os_get_current_thread();
1243 struct interrupt_data *data = thread->interrupt_data;
1244 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1245 interrupt_handle_now(signal, info, context);
1250 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1251 os_context_t *context)
1253 /* No FP control fixage needed, caller has done that. */
1254 check_blockables_blocked_or_lose(0);
1255 check_interrupts_enabled_or_lose(context);
1256 (*interrupt_low_level_handlers[signal])(signal, info, context);
1257 /* No Darwin context fixage needed, caller does that. */
1261 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1263 SAVE_ERRNO(signal,context,void_context);
1264 struct thread *thread = arch_os_get_current_thread();
1265 struct interrupt_data *data = thread->interrupt_data;
1267 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1268 signal,info,context))
1269 low_level_interrupt_handle_now(signal, info, context);
1274 #ifdef THREADS_USING_GCSIGNAL
1276 /* This function must not cons, because that may trigger a GC. */
1278 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1280 struct thread *thread=arch_os_get_current_thread();
1281 boolean was_in_lisp;
1283 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1284 * pseudo atomic until gc is finally allowed. */
1285 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1286 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1287 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1289 } else if (arch_pseudo_atomic_atomic(context)) {
1290 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1291 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1292 arch_set_pseudo_atomic_interrupted(context);
1293 maybe_save_gc_mask_and_block_deferrables
1294 (os_context_sigmask_addr(context));
1298 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1300 /* Not PA and GC not inhibited -- we can stop now. */
1302 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1305 /* need the context stored so it can have registers scavenged */
1306 fake_foreign_function_call(context);
1309 /* Not pending anymore. */
1310 SetSymbolValue(GC_PENDING,NIL,thread);
1311 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1313 /* Consider this: in a PA section GC is requested: GC_PENDING,
1314 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1315 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1316 * but a SIG_STOP_FOR_GC arrives before trapping to
1317 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1318 * pseudo_atomic_interrupted is not and we go on running with
1319 * pseudo_atomic_interrupted but without a pending interrupt or
1320 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1322 if (thread->interrupt_data->gc_blocked_deferrables) {
1323 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1324 clear_pseudo_atomic_interrupted(thread);
1325 sigcopyset(os_context_sigmask_addr(context),
1326 &thread->interrupt_data->pending_mask);
1327 thread->interrupt_data->gc_blocked_deferrables = 0;
1330 if(thread_state(thread)!=STATE_RUNNING) {
1331 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1332 fixnum_value(thread->state));
1335 set_thread_state(thread,STATE_STOPPED);
1336 FSHOW_SIGNAL((stderr,"suspended\n"));
1338 /* While waiting for gc to finish occupy ourselves with zeroing
1339 * the unused portion of the control stack to reduce conservatism.
1340 * On hypothetic platforms with threads and exact gc it is
1341 * actually a must. */
1342 scrub_control_stack();
1344 wait_for_thread_state_change(thread, STATE_STOPPED);
1345 FSHOW_SIGNAL((stderr,"resumed\n"));
1347 if(thread_state(thread)!=STATE_RUNNING) {
1348 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1349 fixnum_value(thread_state(thread)));
1353 undo_fake_foreign_function_call(context);
1360 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1362 SAVE_ERRNO(signal,context,void_context);
1363 #ifndef LISP_FEATURE_WIN32
1364 if ((signal == SIGILL) || (signal == SIGBUS)
1365 #ifndef LISP_FEATURE_LINUX
1366 || (signal == SIGEMT)
1369 corruption_warning_and_maybe_lose("Signal %d received", signal);
1371 interrupt_handle_now(signal, info, context);
1375 /* manipulate the signal context and stack such that when the handler
1376 * returns, it will call function instead of whatever it was doing
1380 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1381 extern int *context_eflags_addr(os_context_t *context);
1384 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1385 extern void post_signal_tramp(void);
1386 extern void call_into_lisp_tramp(void);
1389 arrange_return_to_c_function(os_context_t *context,
1390 call_into_lisp_lookalike funptr,
1393 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1394 check_gc_signals_unblocked_or_lose
1395 (os_context_sigmask_addr(context));
1397 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1398 void * fun=native_pointer(function);
1399 void *code = &(((struct simple_fun *) fun)->code);
1402 /* Build a stack frame showing `interrupted' so that the
1403 * user's backtrace makes (as much) sense (as usual) */
1405 /* fp state is saved and restored by call_into_lisp */
1406 /* FIXME: errno is not restored, but since current uses of this
1407 * function only call Lisp code that signals an error, it's not
1408 * much of a problem. In other words, running out of the control
1409 * stack between a syscall and (GET-ERRNO) may clobber errno if
1410 * something fails during signalling or in the handler. But I
1411 * can't see what can go wrong as long as there is no CONTINUE
1412 * like restart on them. */
1413 #ifdef LISP_FEATURE_X86
1414 /* Suppose the existence of some function that saved all
1415 * registers, called call_into_lisp, then restored GP registers and
1416 * returned. It would look something like this:
1424 pushl {address of function to call}
1425 call 0x8058db0 <call_into_lisp>
1432 * What we do here is set up the stack that call_into_lisp would
1433 * expect to see if it had been called by this code, and frob the
1434 * signal context so that signal return goes directly to call_into_lisp,
1435 * and when that function (and the lisp function it invoked) returns,
1436 * it returns to the second half of this imaginary function which
1437 * restores all registers and returns to C
1439 * For this to work, the latter part of the imaginary function
1440 * must obviously exist in reality. That would be post_signal_tramp
1443 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1445 #if defined(LISP_FEATURE_DARWIN)
1446 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1448 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1449 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1451 /* 1. os_validate (malloc/mmap) register_save_block
1452 * 2. copy register state into register_save_block
1453 * 3. put a pointer to register_save_block in a register in the context
1454 * 4. set the context's EIP to point to a trampoline which:
1455 * a. builds the fake stack frame from the block
1456 * b. frees the block
1457 * c. calls the function
1460 *register_save_area = *os_context_pc_addr(context);
1461 *(register_save_area + 1) = function;
1462 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1463 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1464 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1465 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1466 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1467 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1468 *(register_save_area + 8) = *context_eflags_addr(context);
1470 *os_context_pc_addr(context) =
1471 (os_context_register_t) funptr;
1472 *os_context_register_addr(context,reg_ECX) =
1473 (os_context_register_t) register_save_area;
1476 /* return address for call_into_lisp: */
1477 *(sp-15) = (u32)post_signal_tramp;
1478 *(sp-14) = function; /* args for call_into_lisp : function*/
1479 *(sp-13) = 0; /* arg array */
1480 *(sp-12) = 0; /* no. args */
1481 /* this order matches that used in POPAD */
1482 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1483 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1485 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1486 /* POPAD ignores the value of ESP: */
1488 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1490 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1491 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1492 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1493 *(sp-3)=*context_eflags_addr(context);
1494 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1495 *(sp-1)=*os_context_pc_addr(context);
1499 #elif defined(LISP_FEATURE_X86_64)
1500 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1502 /* return address for call_into_lisp: */
1503 *(sp-18) = (u64)post_signal_tramp;
1505 *(sp-17)=*os_context_register_addr(context,reg_R15);
1506 *(sp-16)=*os_context_register_addr(context,reg_R14);
1507 *(sp-15)=*os_context_register_addr(context,reg_R13);
1508 *(sp-14)=*os_context_register_addr(context,reg_R12);
1509 *(sp-13)=*os_context_register_addr(context,reg_R11);
1510 *(sp-12)=*os_context_register_addr(context,reg_R10);
1511 *(sp-11)=*os_context_register_addr(context,reg_R9);
1512 *(sp-10)=*os_context_register_addr(context,reg_R8);
1513 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1514 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1515 /* skip RBP and RSP */
1516 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1517 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1518 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1519 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1520 *(sp-3)=*context_eflags_addr(context);
1521 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1522 *(sp-1)=*os_context_pc_addr(context);
1524 *os_context_register_addr(context,reg_RDI) =
1525 (os_context_register_t)function; /* function */
1526 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1527 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1529 struct thread *th=arch_os_get_current_thread();
1530 build_fake_control_stack_frames(th,context);
1533 #ifdef LISP_FEATURE_X86
1535 #if !defined(LISP_FEATURE_DARWIN)
1536 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1537 *os_context_register_addr(context,reg_ECX) = 0;
1538 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1540 *os_context_register_addr(context,reg_UESP) =
1541 (os_context_register_t)(sp-15);
1543 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1544 #endif /* __NETBSD__ */
1545 #endif /* LISP_FEATURE_DARWIN */
1547 #elif defined(LISP_FEATURE_X86_64)
1548 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1549 *os_context_register_addr(context,reg_RCX) = 0;
1550 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1551 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1553 /* this much of the calling convention is common to all
1555 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1556 *os_context_register_addr(context,reg_NARGS) = 0;
1557 *os_context_register_addr(context,reg_LIP) =
1558 (os_context_register_t)(unsigned long)code;
1559 *os_context_register_addr(context,reg_CFP) =
1560 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1562 #ifdef ARCH_HAS_NPC_REGISTER
1563 *os_context_npc_addr(context) =
1564 4 + *os_context_pc_addr(context);
1566 #ifdef LISP_FEATURE_SPARC
1567 *os_context_register_addr(context,reg_CODE) =
1568 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1570 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1575 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1577 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1578 arrange_return_to_c_function(context, call_into_lisp_tramp, function);
1580 arrange_return_to_c_function(context, call_into_lisp, function);
1584 /* KLUDGE: Theoretically the approach we use for undefined alien
1585 * variables should work for functions as well, but on PPC/Darwin
1586 * we get bus error at bogus addresses instead, hence this workaround,
1587 * that has the added benefit of automatically discriminating between
1588 * functions and variables.
1591 undefined_alien_function(void)
1593 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1596 void lower_thread_control_stack_guard_page(struct thread *th)
1598 protect_control_stack_guard_page(0, th);
1599 protect_control_stack_return_guard_page(1, th);
1600 th->control_stack_guard_page_protected = NIL;
1601 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1604 void reset_thread_control_stack_guard_page(struct thread *th)
1606 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1607 protect_control_stack_guard_page(1, th);
1608 protect_control_stack_return_guard_page(0, th);
1609 th->control_stack_guard_page_protected = T;
1610 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1613 /* Called from the REPL, too. */
1614 void reset_control_stack_guard_page(void)
1616 struct thread *th=arch_os_get_current_thread();
1617 if (th->control_stack_guard_page_protected == NIL) {
1618 reset_thread_control_stack_guard_page(th);
1622 void lower_control_stack_guard_page(void)
1624 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1628 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1630 struct thread *th=arch_os_get_current_thread();
1632 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1633 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1634 lose("Control stack exhausted");
1636 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1637 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1638 /* We hit the end of the control stack: disable guard page
1639 * protection so the error handler has some headroom, protect the
1640 * previous page so that we can catch returns from the guard page
1641 * and restore it. */
1642 if (th->control_stack_guard_page_protected == NIL)
1643 lose("control_stack_guard_page_protected NIL");
1644 lower_control_stack_guard_page();
1645 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1646 /* For the unfortunate case, when the control stack is
1647 * exhausted in a signal handler. */
1648 unblock_signals_in_context_and_maybe_warn(context);
1650 arrange_return_to_lisp_function
1651 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1654 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1655 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1656 /* We're returning from the guard page: reprotect it, and
1657 * unprotect this one. This works even if we somehow missed
1658 * the return-guard-page, and hit it on our way to new
1659 * exhaustion instead. */
1660 if (th->control_stack_guard_page_protected != NIL)
1661 lose("control_stack_guard_page_protected not NIL");
1662 reset_control_stack_guard_page();
1665 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1666 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1667 lose("Binding stack exhausted");
1669 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1670 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1671 protect_binding_stack_guard_page(0, NULL);
1672 protect_binding_stack_return_guard_page(1, NULL);
1673 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1675 /* For the unfortunate case, when the binding stack is
1676 * exhausted in a signal handler. */
1677 unblock_signals_in_context_and_maybe_warn(context);
1678 arrange_return_to_lisp_function
1679 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1682 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1683 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1684 protect_binding_stack_guard_page(1, NULL);
1685 protect_binding_stack_return_guard_page(0, NULL);
1686 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1689 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1690 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1691 lose("Alien stack exhausted");
1693 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1694 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1695 protect_alien_stack_guard_page(0, NULL);
1696 protect_alien_stack_return_guard_page(1, NULL);
1697 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1699 /* For the unfortunate case, when the alien stack is
1700 * exhausted in a signal handler. */
1701 unblock_signals_in_context_and_maybe_warn(context);
1702 arrange_return_to_lisp_function
1703 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1706 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1707 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1708 protect_alien_stack_guard_page(1, NULL);
1709 protect_alien_stack_return_guard_page(0, NULL);
1710 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1713 else if (addr >= undefined_alien_address &&
1714 addr < undefined_alien_address + os_vm_page_size) {
1715 arrange_return_to_lisp_function
1716 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1723 * noise to install handlers
1726 #ifndef LISP_FEATURE_WIN32
1727 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1728 * they are blocked, in Linux 2.6 the default handler is invoked
1729 * instead that usually coredumps. One might hastily think that adding
1730 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1731 * the whole sa_mask is ignored and instead of not adding the signal
1732 * in question to the mask. That means if it's not blockable the
1733 * signal must be unblocked at the beginning of signal handlers.
1735 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1736 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1737 * will be unblocked in the sigmask during the signal handler. -- RMK
1740 static volatile int sigaction_nodefer_works = -1;
1742 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1743 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1746 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1750 get_current_sigmask(¤t);
1751 /* There should be exactly two blocked signals: the two we added
1752 * to sa_mask when setting up the handler. NetBSD doesn't block
1753 * the signal we're handling when SA_NODEFER is set; Linux before
1754 * 2.6.13 or so also doesn't block the other signal when
1755 * SA_NODEFER is set. */
1756 for(i = 1; i < NSIG; i++)
1757 if (sigismember(¤t, i) !=
1758 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1759 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1760 sigaction_nodefer_works = 0;
1762 if (sigaction_nodefer_works == -1)
1763 sigaction_nodefer_works = 1;
1767 see_if_sigaction_nodefer_works(void)
1769 struct sigaction sa, old_sa;
1771 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1772 sa.sa_sigaction = sigaction_nodefer_test_handler;
1773 sigemptyset(&sa.sa_mask);
1774 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1775 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1776 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1777 /* Make sure no signals are blocked. */
1780 sigemptyset(&empty);
1781 thread_sigmask(SIG_SETMASK, &empty, 0);
1783 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1784 while (sigaction_nodefer_works == -1);
1785 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1788 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1789 #undef SA_NODEFER_TEST_KILL_SIGNAL
1792 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1794 SAVE_ERRNO(signal,context,void_context);
1797 sigemptyset(&unblock);
1798 sigaddset(&unblock, signal);
1799 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1800 interrupt_handle_now(signal, info, context);
1805 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1807 SAVE_ERRNO(signal,context,void_context);
1810 sigemptyset(&unblock);
1811 sigaddset(&unblock, signal);
1812 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1813 (*interrupt_low_level_handlers[signal])(signal, info, context);
1818 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1820 SAVE_ERRNO(signal,context,void_context);
1821 (*interrupt_low_level_handlers[signal])(signal, info, context);
1826 undoably_install_low_level_interrupt_handler (int signal,
1827 interrupt_handler_t handler)
1829 struct sigaction sa;
1831 if (0 > signal || signal >= NSIG) {
1832 lose("bad signal number %d\n", signal);
1835 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1836 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1837 else if (sigismember(&deferrable_sigset,signal))
1838 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1839 else if (!sigaction_nodefer_works &&
1840 !sigismember(&blockable_sigset, signal))
1841 sa.sa_sigaction = low_level_unblock_me_trampoline;
1843 sa.sa_sigaction = low_level_handle_now_handler;
1845 #ifdef LISP_FEATURE_SB_THRUPTION
1846 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1847 * but we don't actually want to defer it. And if we put it only
1848 * into blockable_sigset, we'd have to special-case it around thread
1849 * creation at least. */
1850 if (signal == SIGPIPE)
1851 sa.sa_sigaction = low_level_handle_now_handler;
1854 sigcopyset(&sa.sa_mask, &blockable_sigset);
1855 sa.sa_flags = SA_SIGINFO | SA_RESTART
1856 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1857 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1858 if(signal==SIG_MEMORY_FAULT) {
1859 sa.sa_flags |= SA_ONSTACK;
1860 # ifdef LISP_FEATURE_SB_SAFEPOINT
1861 sigaddset(&sa.sa_mask, SIGRTMIN);
1862 sigaddset(&sa.sa_mask, SIGRTMIN+1);
1867 sigaction(signal, &sa, NULL);
1868 interrupt_low_level_handlers[signal] =
1869 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1873 /* This is called from Lisp. */
1875 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1877 #ifndef LISP_FEATURE_WIN32
1878 struct sigaction sa;
1880 union interrupt_handler oldhandler;
1882 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1884 block_blockable_signals(0, &old);
1886 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1887 (unsigned int)interrupt_low_level_handlers[signal]));
1888 if (interrupt_low_level_handlers[signal]==0) {
1889 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1890 ARE_SAME_HANDLER(handler, SIG_IGN))
1891 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1892 else if (sigismember(&deferrable_sigset, signal))
1893 sa.sa_sigaction = maybe_now_maybe_later;
1894 else if (!sigaction_nodefer_works &&
1895 !sigismember(&blockable_sigset, signal))
1896 sa.sa_sigaction = unblock_me_trampoline;
1898 sa.sa_sigaction = interrupt_handle_now_handler;
1900 sigcopyset(&sa.sa_mask, &blockable_sigset);
1901 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1902 (sigaction_nodefer_works ? SA_NODEFER : 0);
1903 sigaction(signal, &sa, NULL);
1906 oldhandler = interrupt_handlers[signal];
1907 interrupt_handlers[signal].c = handler;
1909 thread_sigmask(SIG_SETMASK, &old, 0);
1911 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1913 return (unsigned long)oldhandler.lisp;
1915 /* Probably-wrong Win32 hack */
1920 /* This must not go through lisp as it's allowed anytime, even when on
1923 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1925 lose("SIGABRT received.\n");
1929 interrupt_init(void)
1931 #ifndef LISP_FEATURE_WIN32
1933 SHOW("entering interrupt_init()");
1934 see_if_sigaction_nodefer_works();
1935 sigemptyset(&deferrable_sigset);
1936 sigemptyset(&blockable_sigset);
1937 sigemptyset(&gc_sigset);
1938 sigaddset_deferrable(&deferrable_sigset);
1939 sigaddset_blockable(&blockable_sigset);
1940 sigaddset_gc(&gc_sigset);
1942 /* Set up high level handler information. */
1943 for (i = 0; i < NSIG; i++) {
1944 interrupt_handlers[i].c =
1945 /* (The cast here blasts away the distinction between
1946 * SA_SIGACTION-style three-argument handlers and
1947 * signal(..)-style one-argument handlers, which is OK
1948 * because it works to call the 1-argument form where the
1949 * 3-argument form is expected.) */
1950 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1952 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1953 SHOW("returning from interrupt_init()");
1957 #ifndef LISP_FEATURE_WIN32
1959 siginfo_code(siginfo_t *info)
1961 return info->si_code;
1963 os_vm_address_t current_memory_fault_address;
1966 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1968 /* FIXME: This is lossy: if we get another memory fault (eg. from
1969 * another thread) before lisp has read this, we lose the information.
1970 * However, since this is mostly informative, we'll live with that for
1971 * now -- some address is better then no address in this case.
1973 current_memory_fault_address = addr;
1974 /* To allow debugging memory faults in signal handlers and such. */
1975 corruption_warning_and_maybe_lose("Memory fault at %x (pc=%p, sp=%p)",
1977 *os_context_pc_addr(context),
1978 #ifdef ARCH_HAS_STACK_POINTER
1979 *os_context_sp_addr(context)
1984 unblock_signals_in_context_and_maybe_warn(context);
1985 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1986 arrange_return_to_lisp_function(context,
1987 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1989 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1995 unhandled_trap_error(os_context_t *context)
1997 lispobj context_sap;
1998 fake_foreign_function_call(context);
1999 #ifndef LISP_FEATURE_SB_SAFEPOINT
2000 unblock_gc_signals(0, 0);
2002 context_sap = alloc_sap(context);
2003 #ifndef LISP_FEATURE_WIN32
2004 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2006 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2007 lose("UNHANDLED-TRAP-ERROR fell through");
2010 /* Common logic for trapping instructions. How we actually handle each
2011 * case is highly architecture dependent, but the overall shape is
2014 handle_trap(os_context_t *context, int trap)
2017 case trap_PendingInterrupt:
2018 FSHOW((stderr, "/<trap pending interrupt>\n"));
2019 arch_skip_instruction(context);
2020 interrupt_handle_pending(context);
2024 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2025 interrupt_internal_error(context, trap==trap_Cerror);
2027 case trap_Breakpoint:
2028 arch_handle_breakpoint(context);
2030 case trap_FunEndBreakpoint:
2031 arch_handle_fun_end_breakpoint(context);
2033 #ifdef trap_AfterBreakpoint
2034 case trap_AfterBreakpoint:
2035 arch_handle_after_breakpoint(context);
2038 #ifdef trap_SingleStepAround
2039 case trap_SingleStepAround:
2040 case trap_SingleStepBefore:
2041 arch_handle_single_step_trap(context, trap);
2044 #ifdef LISP_FEATURE_SB_SAFEPOINT
2045 case trap_GlobalSafepoint:
2046 fake_foreign_function_call(context);
2047 thread_in_lisp_raised(context);
2048 undo_fake_foreign_function_call(context);
2049 arch_skip_instruction(context);
2051 case trap_CspSafepoint:
2052 fake_foreign_function_call(context);
2053 thread_in_safety_transition(context);
2054 undo_fake_foreign_function_call(context);
2055 arch_skip_instruction(context);
2059 fake_foreign_function_call(context);
2060 lose("%%PRIMITIVE HALT called; the party is over.\n");
2062 unhandled_trap_error(context);