2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* When we catch an internal error, should we pass it back to Lisp to
72 * be handled in a high-level way? (Early in cold init, the answer is
73 * 'no', because Lisp is still too brain-dead to handle anything.
74 * After sufficient initialization has been completed, the answer
76 boolean internal_errors_enabled = 0;
78 #ifndef LISP_FEATURE_WIN32
80 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
82 union interrupt_handler interrupt_handlers[NSIG];
84 /* Under Linux on some architectures, we appear to have to restore the
85 * FPU control word from the context, as after the signal is delivered
86 * we appear to have a null FPU control word. */
87 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
88 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
89 os_context_t *context = arch_os_get_context(&void_context); \
90 os_restore_fp_control(context);
92 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
93 os_context_t *context = arch_os_get_context(&void_context);
96 /* Foreign code may want to start some threads on its own.
97 * Non-targetted, truly asynchronous signals can be delivered to
98 * basically any thread, but invoking Lisp handlers in such foregign
99 * threads is really bad, so let's resignal it.
101 * This should at least bring attention to the problem, but it cannot
102 * work for SIGSEGV and similar. It is good enough for timers, and
103 * maybe all deferrables. */
105 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
107 add_handled_signals(sigset_t *sigset)
110 for(i = 1; i < NSIG; i++) {
111 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
112 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
113 sigaddset(sigset, i);
118 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
122 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
124 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
125 if (!pthread_getspecific(lisp_thread)) {
126 if (!(sigismember(&deferrable_sigset,signal))) {
127 corruption_warning_and_maybe_lose
128 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
134 sigemptyset(&sigset);
135 add_handled_signals(&sigset);
136 block_signals(&sigset, 0, 0);
137 block_signals(&sigset, os_context_sigmask_addr(context), 0);
138 kill(getpid(), signal);
146 /* These are to be used in signal handlers. Currently all handlers are
147 * called from one of:
149 * interrupt_handle_now_handler
150 * maybe_now_maybe_later
151 * unblock_me_trampoline
152 * low_level_handle_now_handler
153 * low_level_maybe_now_maybe_later
154 * low_level_unblock_me_trampoline
156 * This gives us a single point of control (or six) over errno, fp
157 * control word, and fixing up signal context on sparc.
159 * The SPARC/Linux platform doesn't quite do signals the way we want
160 * them done. The third argument in the handler isn't filled in by the
161 * kernel properly, so we fix it up ourselves in the
162 * arch_os_get_context(..) function. -- CSR, 2002-07-23
164 #define SAVE_ERRNO(signal,context,void_context) \
166 int _saved_errno = errno; \
167 RESTORE_FP_CONTROL_WORD(context,void_context); \
168 if (!maybe_resignal_to_lisp_thread(signal, context)) \
171 #define RESTORE_ERRNO \
173 errno = _saved_errno; \
176 static void run_deferred_handler(struct interrupt_data *data,
177 os_context_t *context);
178 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
179 static void store_signal_data_for_later (struct interrupt_data *data,
180 void *handler, int signal,
182 os_context_t *context);
185 /* Generic signal related utilities. */
188 get_current_sigmask(sigset_t *sigset)
190 /* Get the current sigmask, by blocking the empty set. */
191 thread_sigmask(SIG_BLOCK, 0, sigset);
195 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
200 sigcopyset(old, where);
201 for(i = 1; i < NSIG; i++) {
202 if (sigismember(what, i))
206 thread_sigmask(SIG_BLOCK, what, old);
211 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
216 sigcopyset(old, where);
217 for(i = 1; i < NSIG; i++) {
218 if (sigismember(what, i))
222 thread_sigmask(SIG_UNBLOCK, what, old);
227 print_sigset(sigset_t *sigset)
230 for(i = 1; i < NSIG; i++) {
231 if (sigismember(sigset, i))
232 fprintf(stderr, "Signal %d masked\n", i);
236 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
237 * if all re unmasked else die. Passing NULL for sigset is a shorthand
238 * for the current sigmask. */
240 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
243 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
245 boolean has_blocked = 0, has_unblocked = 0;
248 get_current_sigmask(¤t);
251 for(i = 1; i < NSIG; i++) {
252 if (sigismember(sigset2, i)) {
253 if (sigismember(sigset, i))
259 if (has_blocked && has_unblocked) {
260 print_sigset(sigset);
261 lose("some %s signals blocked, some unblocked\n", name);
271 /* Deferrables, blockables, gc signals. */
274 sigaddset_deferrable(sigset_t *s)
276 sigaddset(s, SIGHUP);
277 sigaddset(s, SIGINT);
278 sigaddset(s, SIGTERM);
279 sigaddset(s, SIGQUIT);
280 sigaddset(s, SIGPIPE);
281 sigaddset(s, SIGALRM);
282 sigaddset(s, SIGURG);
283 sigaddset(s, SIGTSTP);
284 sigaddset(s, SIGCHLD);
286 #ifndef LISP_FEATURE_HPUX
287 sigaddset(s, SIGXCPU);
288 sigaddset(s, SIGXFSZ);
290 sigaddset(s, SIGVTALRM);
291 sigaddset(s, SIGPROF);
292 sigaddset(s, SIGWINCH);
296 sigaddset_blockable(sigset_t *sigset)
298 sigaddset_deferrable(sigset);
299 sigaddset_gc(sigset);
303 sigaddset_gc(sigset_t *sigset)
305 #ifdef THREADS_USING_GCSIGNAL
306 sigaddset(sigset,SIG_STOP_FOR_GC);
310 /* initialized in interrupt_init */
311 sigset_t deferrable_sigset;
312 sigset_t blockable_sigset;
317 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
319 deferrables_blocked_p(sigset_t *sigset)
321 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
326 check_deferrables_unblocked_or_lose(sigset_t *sigset)
328 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
329 if (deferrables_blocked_p(sigset))
330 lose("deferrables blocked\n");
335 check_deferrables_blocked_or_lose(sigset_t *sigset)
337 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
338 if (!deferrables_blocked_p(sigset))
339 lose("deferrables unblocked\n");
343 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
345 blockables_blocked_p(sigset_t *sigset)
347 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
352 check_blockables_unblocked_or_lose(sigset_t *sigset)
354 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
355 if (blockables_blocked_p(sigset))
356 lose("blockables blocked\n");
361 check_blockables_blocked_or_lose(sigset_t *sigset)
363 #if !defined(LISP_FEATURE_WIN32)
364 /* On Windows, there are no actual signals, but since the win32 port
365 * tracks the sigmask and checks it explicitly, some functions are
366 * still required to keep the mask set up properly. (After all, the
367 * goal of the sigmask emulation is to not have to change all the
368 * call sites in the first place.)
370 * However, this does not hold for all signals equally: While
371 * deferrables matter ("is interrupt-thread okay?"), it is not worth
372 * having to set up blockables properly (which include the
373 * non-existing GC signals).
375 * Yet, as the original comment explains it:
376 * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
377 * fake_foreign_function_call machinery are sometimes useful here[...].
379 * So we merely skip this assertion.
380 * -- DFL, trying to expand on a comment by AK.
382 if (!blockables_blocked_p(sigset))
383 lose("blockables unblocked\n");
387 #ifndef LISP_FEATURE_SB_SAFEPOINT
388 #if !defined(LISP_FEATURE_WIN32)
390 gc_signals_blocked_p(sigset_t *sigset)
392 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
397 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
399 #if !defined(LISP_FEATURE_WIN32)
400 if (gc_signals_blocked_p(sigset))
401 lose("gc signals blocked\n");
406 check_gc_signals_blocked_or_lose(sigset_t *sigset)
408 #if !defined(LISP_FEATURE_WIN32)
409 if (!gc_signals_blocked_p(sigset))
410 lose("gc signals unblocked\n");
416 block_deferrable_signals(sigset_t *where, sigset_t *old)
418 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
419 block_signals(&deferrable_sigset, where, old);
424 block_blockable_signals(sigset_t *where, sigset_t *old)
426 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
427 block_signals(&blockable_sigset, where, old);
431 #ifndef LISP_FEATURE_SB_SAFEPOINT
433 block_gc_signals(sigset_t *where, sigset_t *old)
435 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
436 block_signals(&gc_sigset, where, old);
442 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
444 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
445 if (interrupt_handler_pending_p())
446 lose("unblock_deferrable_signals: losing proposition\n");
447 #ifndef LISP_FEATURE_SB_SAFEPOINT
448 check_gc_signals_unblocked_or_lose(where);
450 unblock_signals(&deferrable_sigset, where, old);
455 unblock_blockable_signals(sigset_t *where, sigset_t *old)
457 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
458 unblock_signals(&blockable_sigset, where, old);
462 #ifndef LISP_FEATURE_SB_SAFEPOINT
464 unblock_gc_signals(sigset_t *where, sigset_t *old)
466 #ifndef LISP_FEATURE_WIN32
467 unblock_signals(&gc_sigset, where, old);
473 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
475 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
476 sigset_t *sigset = os_context_sigmask_addr(context);
477 #ifndef LISP_FEATURE_SB_SAFEPOINT
478 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
479 corruption_warning_and_maybe_lose(
480 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
481 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
482 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
483 unblock_gc_signals(sigset, 0);
486 if (!interrupt_handler_pending_p()) {
487 unblock_deferrable_signals(sigset, 0);
494 check_interrupts_enabled_or_lose(os_context_t *context)
496 struct thread *thread=arch_os_get_current_thread();
497 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
498 lose("interrupts not enabled\n");
499 if (arch_pseudo_atomic_atomic(context))
500 lose ("in pseudo atomic section\n");
503 /* Save sigset (or the current sigmask if 0) if there is no pending
504 * handler, because that means that deferabbles are already blocked.
505 * The purpose is to avoid losing the pending gc signal if a
506 * deferrable interrupt async unwinds between clearing the pseudo
507 * atomic and trapping to GC.*/
508 #ifndef LISP_FEATURE_SB_SAFEPOINT
510 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
512 #ifndef LISP_FEATURE_WIN32
513 struct thread *thread = arch_os_get_current_thread();
514 struct interrupt_data *data = thread->interrupt_data;
516 /* Obviously, this function is called when signals may not be
517 * blocked. Let's make sure we are not interrupted. */
518 block_blockable_signals(0, &oldset);
519 #ifndef LISP_FEATURE_SB_THREAD
520 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
522 if (data->gc_blocked_deferrables)
523 lose("gc_blocked_deferrables already true\n");
525 if ((!data->pending_handler) &&
526 (!data->gc_blocked_deferrables)) {
527 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
528 data->gc_blocked_deferrables = 1;
530 /* This is the sigmask of some context. */
531 sigcopyset(&data->pending_mask, sigset);
532 sigaddset_deferrable(sigset);
533 thread_sigmask(SIG_SETMASK,&oldset,0);
536 /* Operating on the current sigmask. Save oldset and
537 * unblock gc signals. In the end, this is equivalent to
538 * blocking the deferrables. */
539 sigcopyset(&data->pending_mask, &oldset);
540 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
544 thread_sigmask(SIG_SETMASK,&oldset,0);
549 /* Are we leaving WITH-GCING and already running with interrupts
550 * enabled, without the protection of *GC-INHIBIT* T and there is gc
551 * (or stop for gc) pending, but we haven't trapped yet? */
553 in_leaving_without_gcing_race_p(struct thread *thread)
555 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
556 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
557 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
558 ((SymbolValue(GC_PENDING,thread) != NIL)
559 #if defined(LISP_FEATURE_SB_THREAD)
560 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
565 /* Check our baroque invariants. */
567 check_interrupt_context_or_lose(os_context_t *context)
569 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
570 struct thread *thread = arch_os_get_current_thread();
571 struct interrupt_data *data = thread->interrupt_data;
572 int interrupt_deferred_p = (data->pending_handler != 0);
573 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
574 sigset_t *sigset = os_context_sigmask_addr(context);
575 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
576 * handle_allocation_trap. */
577 #if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
578 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
579 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
580 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
581 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
582 int in_race_p = in_leaving_without_gcing_race_p(thread);
583 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
584 * section and trapping, a SIG_STOP_FOR_GC would see the next
585 * check fail, for this reason sig_stop_for_gc handler does not
586 * call this function. */
587 if (interrupt_deferred_p) {
588 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
589 lose("Stray deferred interrupt.\n");
592 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
593 lose("GC_PENDING, but why?\n");
594 #if defined(LISP_FEATURE_SB_THREAD)
596 int stop_for_gc_pending =
597 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
598 if (stop_for_gc_pending)
599 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
600 lose("STOP_FOR_GC_PENDING, but why?\n");
601 if (pseudo_atomic_interrupted)
602 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
603 lose("pseudo_atomic_interrupted, but why?\n");
606 if (pseudo_atomic_interrupted)
607 if (!(gc_pending || interrupt_deferred_p))
608 lose("pseudo_atomic_interrupted, but why?\n");
611 if (interrupt_pending && !interrupt_deferred_p)
612 lose("INTERRUPT_PENDING but not pending handler.\n");
613 if ((data->gc_blocked_deferrables) && interrupt_pending)
614 lose("gc_blocked_deferrables and interrupt pending\n.");
615 if (data->gc_blocked_deferrables)
616 check_deferrables_blocked_or_lose(sigset);
617 if (interrupt_pending || interrupt_deferred_p ||
618 data->gc_blocked_deferrables)
619 check_deferrables_blocked_or_lose(sigset);
621 check_deferrables_unblocked_or_lose(sigset);
622 #ifndef LISP_FEATURE_SB_SAFEPOINT
623 /* If deferrables are unblocked then we are open to signals
624 * that run lisp code. */
625 check_gc_signals_unblocked_or_lose(sigset);
632 * utility routines used by various signal handlers
636 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
638 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
642 /* Build a fake stack frame or frames */
644 access_control_frame_pointer(th) =
646 (*os_context_register_addr(context, reg_CSP));
647 if ((lispobj *)(uword_t)
648 (*os_context_register_addr(context, reg_CFP))
649 == access_control_frame_pointer(th)) {
650 /* There is a small window during call where the callee's
651 * frame isn't built yet. */
652 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
653 == FUN_POINTER_LOWTAG) {
654 /* We have called, but not built the new frame, so
655 * build it for them. */
656 access_control_frame_pointer(th)[0] =
657 *os_context_register_addr(context, reg_OCFP);
658 access_control_frame_pointer(th)[1] =
659 *os_context_register_addr(context, reg_LRA);
660 access_control_frame_pointer(th) += 8;
661 /* Build our frame on top of it. */
662 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
665 /* We haven't yet called, build our frame as if the
666 * partial frame wasn't there. */
667 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
670 /* We can't tell whether we are still in the caller if it had to
671 * allocate a stack frame due to stack arguments. */
672 /* This observation provoked some past CMUCL maintainer to ask
673 * "Can anything strange happen during return?" */
676 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
679 access_control_stack_pointer(th) = access_control_frame_pointer(th) + 8;
681 access_control_frame_pointer(th)[0] = oldcont;
682 access_control_frame_pointer(th)[1] = NIL;
683 access_control_frame_pointer(th)[2] =
684 (lispobj)(*os_context_register_addr(context, reg_CODE));
688 /* Stores the context for gc to scavange and builds fake stack
691 fake_foreign_function_call(os_context_t *context)
694 struct thread *thread=arch_os_get_current_thread();
696 /* context_index incrementing must not be interrupted */
697 check_blockables_blocked_or_lose(0);
699 /* Get current Lisp state from context. */
701 #ifdef LISP_FEATURE_SB_THREAD
702 thread->pseudo_atomic_bits =
704 dynamic_space_free_pointer =
707 (*os_context_register_addr(context, reg_ALLOC));
708 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
709 /* dynamic_space_free_pointer); */
710 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
711 if ((sword_t)dynamic_space_free_pointer & 1) {
712 lose("dead in fake_foreign_function_call, context = %x\n", context);
715 /* why doesnt PPC and SPARC do something like this: */
716 #if defined(LISP_FEATURE_HPPA)
717 if ((sword_t)dynamic_space_free_pointer & 4) {
718 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
723 set_binding_stack_pointer(thread,
724 *os_context_register_addr(context, reg_BSP));
727 build_fake_control_stack_frames(thread,context);
729 /* Do dynamic binding of the active interrupt context index
730 * and save the context in the context array. */
732 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
734 if (context_index >= MAX_INTERRUPTS) {
735 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
738 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
739 make_fixnum(context_index + 1),thread);
741 thread->interrupt_contexts[context_index] = context;
743 #if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
744 /* x86oid targets don't maintain the foreign function call flag at
745 * all, so leave them to believe that they are never in foreign
747 foreign_function_call_active_p(thread) = 1;
751 /* blocks all blockable signals. If you are calling from a signal handler,
752 * the usual signal mask will be restored from the context when the handler
753 * finishes. Otherwise, be careful */
755 undo_fake_foreign_function_call(os_context_t *context)
757 struct thread *thread=arch_os_get_current_thread();
758 /* Block all blockable signals. */
759 block_blockable_signals(0, 0);
761 foreign_function_call_active_p(thread) = 0;
763 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
766 #if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
767 /* Put the dynamic space free pointer back into the context. */
768 *os_context_register_addr(context, reg_ALLOC) =
769 (uword_t) dynamic_space_free_pointer
770 | (*os_context_register_addr(context, reg_ALLOC)
773 ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
775 | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
778 #if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
779 /* Put the pseudo-atomic bits and dynamic space free pointer back
780 * into the context (p-a-bits for p-a, and dynamic space free
781 * pointer for ROOM). */
782 *os_context_register_addr(context, reg_ALLOC) =
783 (uword_t) dynamic_space_free_pointer
784 | (thread->pseudo_atomic_bits & LOWTAG_MASK);
785 /* And clear them so we don't get bit later by call-in/call-out
786 * not updating them. */
787 thread->pseudo_atomic_bits = 0;
791 /* a handler for the signal caused by execution of a trap opcode
792 * signalling an internal error */
794 interrupt_internal_error(os_context_t *context, boolean continuable)
798 fake_foreign_function_call(context);
800 if (!internal_errors_enabled) {
801 describe_internal_error(context);
802 /* There's no good way to recover from an internal error
803 * before the Lisp error handling mechanism is set up. */
804 lose("internal error too early in init, can't recover\n");
807 /* Allocate the SAP object while the interrupts are still
809 #ifndef LISP_FEATURE_SB_SAFEPOINT
810 unblock_gc_signals(0, 0);
812 context_sap = alloc_sap(context);
814 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
815 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
818 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
819 /* Workaround for blocked SIGTRAP. */
822 sigemptyset(&newset);
823 sigaddset(&newset, SIGTRAP);
824 thread_sigmask(SIG_UNBLOCK, &newset, 0);
828 SHOW("in interrupt_internal_error");
830 /* Display some rudimentary debugging information about the
831 * error, so that even if the Lisp error handler gets badly
832 * confused, we have a chance to determine what's going on. */
833 describe_internal_error(context);
835 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
836 continuable ? T : NIL);
838 undo_fake_foreign_function_call(context); /* blocks signals again */
840 arch_skip_instruction(context);
844 interrupt_handler_pending_p(void)
846 struct thread *thread = arch_os_get_current_thread();
847 struct interrupt_data *data = thread->interrupt_data;
848 return (data->pending_handler != 0);
852 interrupt_handle_pending(os_context_t *context)
854 /* There are three ways we can get here. First, if an interrupt
855 * occurs within pseudo-atomic, it will be deferred, and we'll
856 * trap to here at the end of the pseudo-atomic block. Second, if
857 * the GC (in alloc()) decides that a GC is required, it will set
858 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
859 * and alloc() is always called from within pseudo-atomic, and
860 * thus we end up here again. Third, when calling GC-ON or at the
861 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
862 * here if there is a pending GC. Fourth, ahem, at the end of
863 * WITHOUT-INTERRUPTS (bar complications with nesting).
865 * A fourth way happens with safepoints: In addition to a stop for
866 * GC that is pending, there are thruptions. Both mechanisms are
867 * mostly signal-free, yet also of an asynchronous nature, so it makes
868 * sense to let interrupt_handle_pending take care of running them:
869 * It gets run precisely at those places where it is safe to process
870 * pending asynchronous tasks. */
872 struct thread *thread = arch_os_get_current_thread();
873 struct interrupt_data *data = thread->interrupt_data;
875 if (arch_pseudo_atomic_atomic(context)) {
876 lose("Handling pending interrupt in pseudo atomic.");
879 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
881 check_blockables_blocked_or_lose(0);
882 #ifndef LISP_FEATURE_SB_SAFEPOINT
884 * (On safepoint builds, there is no gc_blocked_deferrables nor
887 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
888 * handler, then the pending mask was saved and
889 * gc_blocked_deferrables set. Hence, there can be no pending
890 * handler and it's safe to restore the pending mask.
892 * Note, that if gc_blocked_deferrables is false we may still have
893 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
894 * pseudo atomic was interrupt be a deferrable first. */
895 if (data->gc_blocked_deferrables) {
896 if (data->pending_handler)
897 lose("GC blocked deferrables but still got a pending handler.");
898 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
899 lose("GC blocked deferrables while GC is inhibited.");
900 /* Restore the saved signal mask from the original signal (the
901 * one that interrupted us during the critical section) into
902 * the os_context for the signal we're currently in the
903 * handler for. This should ensure that when we return from
904 * the handler the blocked signals are unblocked. */
905 #ifndef LISP_FEATURE_WIN32
906 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
908 data->gc_blocked_deferrables = 0;
912 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
913 void *original_pending_handler = data->pending_handler;
915 #ifdef LISP_FEATURE_SB_SAFEPOINT
916 /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
917 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
918 # ifdef LISP_FEATURE_SB_THRUPTION
919 || (SymbolValue(THRUPTION_PENDING,thread) != NIL
920 && SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
923 /* We ought to take this chance to do a pitstop now. */
924 thread_in_lisp_raised(context);
925 #elif defined(LISP_FEATURE_SB_THREAD)
926 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
927 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
928 * the signal handler if it actually stops us. */
929 arch_clear_pseudo_atomic_interrupted(context);
930 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
933 /* Test for T and not for != NIL since the value :IN-PROGRESS
934 * is used in SUB-GC as part of the mechanism to supress
936 if (SymbolValue(GC_PENDING,thread) == T) {
938 /* Two reasons for doing this. First, if there is a
939 * pending handler we don't want to run. Second, we are
940 * going to clear pseudo atomic interrupted to avoid
941 * spurious trapping on every allocation in SUB_GC and
942 * having a pending handler with interrupts enabled and
943 * without pseudo atomic interrupted breaks an
945 if (data->pending_handler) {
946 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
947 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
950 arch_clear_pseudo_atomic_interrupted(context);
952 /* GC_PENDING is cleared in SUB-GC, or if another thread
953 * is doing a gc already we will get a SIG_STOP_FOR_GC and
954 * that will clear it.
956 * If there is a pending handler or gc was triggerred in a
957 * signal handler then maybe_gc won't run POST_GC and will
958 * return normally. */
959 if (!maybe_gc(context))
960 lose("GC not inhibited but maybe_gc did not GC.");
962 if (data->pending_handler) {
966 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
967 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
968 * GC-PENDING is not NIL then we cannot trap on pseudo
969 * atomic due to GC (see if(GC_PENDING) logic in
970 * cheneygc.c an gengcgc.c), plus there is a outer
971 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
973 lose("Trapping to run pending handler while GC in progress.");
976 check_blockables_blocked_or_lose(0);
978 /* No GC shall be lost. If SUB_GC triggers another GC then
979 * that should be handled on the spot. */
980 if (SymbolValue(GC_PENDING,thread) != NIL)
981 lose("GC_PENDING after doing gc.");
982 #ifdef THREADS_USING_GCSIGNAL
983 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
984 lose("STOP_FOR_GC_PENDING after doing gc.");
986 /* Check two things. First, that gc does not clobber a handler
987 * that's already pending. Second, that there is no interrupt
988 * lossage: if original_pending_handler was NULL then even if
989 * an interrupt arrived during GC (POST-GC, really) it was
991 if (original_pending_handler != data->pending_handler)
992 lose("pending handler changed in gc: %x -> %x.",
993 original_pending_handler, data->pending_handler);
996 #ifndef LISP_FEATURE_WIN32
997 /* There may be no pending handler, because it was only a gc that
998 * had to be executed or because Lisp is a bit too eager to call
999 * DO-PENDING-INTERRUPT. */
1000 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
1001 (data->pending_handler)) {
1002 /* No matter how we ended up here, clear both
1003 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
1004 * because we checked above that there is no GC pending. */
1005 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
1006 arch_clear_pseudo_atomic_interrupted(context);
1007 /* Restore the sigmask in the context. */
1008 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
1009 run_deferred_handler(data, context);
1011 #ifdef LISP_FEATURE_SB_THRUPTION
1012 if (SymbolValue(THRUPTION_PENDING,thread)==T)
1013 /* Special case for the following situation: There is a
1014 * thruption pending, but a signal had been deferred. The
1015 * pitstop at the top of this function could only take care
1016 * of GC, and skipped the thruption, so we need to try again
1017 * now that INTERRUPT_PENDING and the sigmask have been
1019 while (check_pending_thruptions(context))
1023 #ifdef LISP_FEATURE_GENCGC
1024 if (get_pseudo_atomic_interrupted(thread))
1025 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
1027 /* It is possible that the end of this function was reached
1028 * without never actually doing anything, the tests in Lisp for
1029 * when to call receive-pending-interrupt are not exact. */
1030 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
1035 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
1037 boolean were_in_lisp;
1038 union interrupt_handler handler;
1040 check_blockables_blocked_or_lose(0);
1042 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1043 if (sigismember(&deferrable_sigset,signal))
1044 check_interrupts_enabled_or_lose(context);
1047 handler = interrupt_handlers[signal];
1049 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
1053 were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1056 fake_foreign_function_call(context);
1059 FSHOW_SIGNAL((stderr,
1060 "/entering interrupt_handle_now(%d, info, context)\n",
1063 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
1065 /* This can happen if someone tries to ignore or default one
1066 * of the signals we need for runtime support, and the runtime
1067 * support decides to pass on it. */
1068 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
1070 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
1071 /* Once we've decided what to do about contexts in a
1072 * return-elsewhere world (the original context will no longer
1073 * be available; should we copy it or was nobody using it anyway?)
1074 * then we should convert this to return-elsewhere */
1076 /* CMUCL comment said "Allocate the SAPs while the interrupts
1077 * are still disabled.". I (dan, 2003.08.21) assume this is
1078 * because we're not in pseudoatomic and allocation shouldn't
1079 * be interrupted. In which case it's no longer an issue as
1080 * all our allocation from C now goes through a PA wrapper,
1081 * but still, doesn't hurt.
1083 * Yeah, but non-gencgc platforms don't really wrap allocation
1084 * in PA. MG - 2005-08-29 */
1086 lispobj info_sap, context_sap;
1088 #ifndef LISP_FEATURE_SB_SAFEPOINT
1089 /* Leave deferrable signals blocked, the handler itself will
1090 * allow signals again when it sees fit. */
1091 unblock_gc_signals(0, 0);
1093 WITH_GC_AT_SAFEPOINTS_ONLY()
1095 { // the block is needed for WITH_GC_AT_SAFEPOINTS_ONLY() to work
1096 context_sap = alloc_sap(context);
1097 info_sap = alloc_sap(info);
1099 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1101 funcall3(handler.lisp,
1102 make_fixnum(signal),
1107 /* This cannot happen in sane circumstances. */
1109 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1111 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
1112 /* Allow signals again. */
1113 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1114 (*handler.c)(signal, info, context);
1120 undo_fake_foreign_function_call(context); /* block signals again */
1123 FSHOW_SIGNAL((stderr,
1124 "/returning from interrupt_handle_now(%d, info, context)\n",
1128 /* This is called at the end of a critical section if the indications
1129 * are that some signal was deferred during the section. Note that as
1130 * far as C or the kernel is concerned we dealt with the signal
1131 * already; we're just doing the Lisp-level processing now that we
1134 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1136 /* The pending_handler may enable interrupts and then another
1137 * interrupt may hit, overwrite interrupt_data, so reset the
1138 * pending handler before calling it. Trust the handler to finish
1139 * with the siginfo before enabling interrupts. */
1140 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1141 data->pending_handler;
1143 data->pending_handler=0;
1144 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1145 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1148 #ifndef LISP_FEATURE_WIN32
1150 maybe_defer_handler(void *handler, struct interrupt_data *data,
1151 int signal, siginfo_t *info, os_context_t *context)
1153 struct thread *thread=arch_os_get_current_thread();
1155 check_blockables_blocked_or_lose(0);
1157 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1158 lose("interrupt already pending\n");
1159 if (thread->interrupt_data->pending_handler)
1160 lose("there is a pending handler already (PA)\n");
1161 if (data->gc_blocked_deferrables)
1162 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1163 check_interrupt_context_or_lose(context);
1164 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1165 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1166 * atomic section inside a WITHOUT-INTERRUPTS.
1168 * Also, if in_leaving_without_gcing_race_p then
1169 * interrupt_handle_pending is going to be called soon, so
1170 * stashing the signal away is safe.
1172 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1173 in_leaving_without_gcing_race_p(thread)) {
1174 FSHOW_SIGNAL((stderr,
1175 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1176 (unsigned int)handler,signal,
1177 in_leaving_without_gcing_race_p(thread)));
1178 store_signal_data_for_later(data,handler,signal,info,context);
1179 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1180 check_interrupt_context_or_lose(context);
1183 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1184 * actually use its argument for anything on x86, so this branch
1185 * may succeed even when context is null (gencgc alloc()) */
1186 if (arch_pseudo_atomic_atomic(context)) {
1187 FSHOW_SIGNAL((stderr,
1188 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1189 (unsigned int)handler,signal));
1190 store_signal_data_for_later(data,handler,signal,info,context);
1191 arch_set_pseudo_atomic_interrupted(context);
1192 check_interrupt_context_or_lose(context);
1195 FSHOW_SIGNAL((stderr,
1196 "/maybe_defer_handler(%x,%d): not deferred\n",
1197 (unsigned int)handler,signal));
1202 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1204 siginfo_t *info, os_context_t *context)
1206 if (data->pending_handler)
1207 lose("tried to overwrite pending interrupt handler %x with %x\n",
1208 data->pending_handler, handler);
1210 lose("tried to defer null interrupt handler\n");
1211 data->pending_handler = handler;
1212 data->pending_signal = signal;
1214 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1216 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1220 lose("Null context");
1222 /* the signal mask in the context (from before we were
1223 * interrupted) is copied to be restored when run_deferred_handler
1224 * happens. Then the usually-blocked signals are added to the mask
1225 * in the context so that we are running with blocked signals when
1226 * the handler returns */
1227 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1228 sigaddset_deferrable(os_context_sigmask_addr(context));
1232 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1234 SAVE_ERRNO(signal,context,void_context);
1235 struct thread *thread = arch_os_get_current_thread();
1236 struct interrupt_data *data = thread->interrupt_data;
1237 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1238 interrupt_handle_now(signal, info, context);
1243 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1244 os_context_t *context)
1246 /* No FP control fixage needed, caller has done that. */
1247 check_blockables_blocked_or_lose(0);
1248 check_interrupts_enabled_or_lose(context);
1249 (*interrupt_low_level_handlers[signal])(signal, info, context);
1250 /* No Darwin context fixage needed, caller does that. */
1254 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1256 SAVE_ERRNO(signal,context,void_context);
1257 struct thread *thread = arch_os_get_current_thread();
1258 struct interrupt_data *data = thread->interrupt_data;
1260 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1261 signal,info,context))
1262 low_level_interrupt_handle_now(signal, info, context);
1267 #ifdef THREADS_USING_GCSIGNAL
1269 /* This function must not cons, because that may trigger a GC. */
1271 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1273 struct thread *thread=arch_os_get_current_thread();
1274 boolean was_in_lisp;
1276 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1277 * pseudo atomic until gc is finally allowed. */
1278 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1279 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1280 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1282 } else if (arch_pseudo_atomic_atomic(context)) {
1283 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1284 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1285 arch_set_pseudo_atomic_interrupted(context);
1286 maybe_save_gc_mask_and_block_deferrables
1287 (os_context_sigmask_addr(context));
1291 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1293 /* Not PA and GC not inhibited -- we can stop now. */
1295 was_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
1298 /* need the context stored so it can have registers scavenged */
1299 fake_foreign_function_call(context);
1302 /* Not pending anymore. */
1303 SetSymbolValue(GC_PENDING,NIL,thread);
1304 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1306 /* Consider this: in a PA section GC is requested: GC_PENDING,
1307 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1308 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1309 * but a SIG_STOP_FOR_GC arrives before trapping to
1310 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1311 * pseudo_atomic_interrupted is not and we go on running with
1312 * pseudo_atomic_interrupted but without a pending interrupt or
1313 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1315 if (thread->interrupt_data->gc_blocked_deferrables) {
1316 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1317 clear_pseudo_atomic_interrupted(thread);
1318 sigcopyset(os_context_sigmask_addr(context),
1319 &thread->interrupt_data->pending_mask);
1320 thread->interrupt_data->gc_blocked_deferrables = 0;
1323 if(thread_state(thread)!=STATE_RUNNING) {
1324 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1325 fixnum_value(thread->state));
1328 set_thread_state(thread,STATE_STOPPED);
1329 FSHOW_SIGNAL((stderr,"suspended\n"));
1331 /* While waiting for gc to finish occupy ourselves with zeroing
1332 * the unused portion of the control stack to reduce conservatism.
1333 * On hypothetic platforms with threads and exact gc it is
1334 * actually a must. */
1335 scrub_control_stack();
1337 wait_for_thread_state_change(thread, STATE_STOPPED);
1338 FSHOW_SIGNAL((stderr,"resumed\n"));
1340 if(thread_state(thread)!=STATE_RUNNING) {
1341 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1342 fixnum_value(thread_state(thread)));
1346 undo_fake_foreign_function_call(context);
1353 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1355 SAVE_ERRNO(signal,context,void_context);
1356 #ifndef LISP_FEATURE_WIN32
1357 if ((signal == SIGILL) || (signal == SIGBUS)
1358 #ifndef LISP_FEATURE_LINUX
1359 || (signal == SIGEMT)
1362 corruption_warning_and_maybe_lose("Signal %d received", signal);
1364 interrupt_handle_now(signal, info, context);
1368 /* manipulate the signal context and stack such that when the handler
1369 * returns, it will call function instead of whatever it was doing
1373 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1374 extern int *context_eflags_addr(os_context_t *context);
1377 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1378 extern void post_signal_tramp(void);
1379 extern void call_into_lisp_tramp(void);
1382 arrange_return_to_c_function(os_context_t *context,
1383 call_into_lisp_lookalike funptr,
1386 #if !(defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_SAFEPOINT))
1387 check_gc_signals_unblocked_or_lose
1388 (os_context_sigmask_addr(context));
1390 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1391 void * fun=native_pointer(function);
1392 void *code = &(((struct simple_fun *) fun)->code);
1395 /* Build a stack frame showing `interrupted' so that the
1396 * user's backtrace makes (as much) sense (as usual) */
1398 /* fp state is saved and restored by call_into_lisp */
1399 /* FIXME: errno is not restored, but since current uses of this
1400 * function only call Lisp code that signals an error, it's not
1401 * much of a problem. In other words, running out of the control
1402 * stack between a syscall and (GET-ERRNO) may clobber errno if
1403 * something fails during signalling or in the handler. But I
1404 * can't see what can go wrong as long as there is no CONTINUE
1405 * like restart on them. */
1406 #ifdef LISP_FEATURE_X86
1407 /* Suppose the existence of some function that saved all
1408 * registers, called call_into_lisp, then restored GP registers and
1409 * returned. It would look something like this:
1417 pushl {address of function to call}
1418 call 0x8058db0 <call_into_lisp>
1425 * What we do here is set up the stack that call_into_lisp would
1426 * expect to see if it had been called by this code, and frob the
1427 * signal context so that signal return goes directly to call_into_lisp,
1428 * and when that function (and the lisp function it invoked) returns,
1429 * it returns to the second half of this imaginary function which
1430 * restores all registers and returns to C
1432 * For this to work, the latter part of the imaginary function
1433 * must obviously exist in reality. That would be post_signal_tramp
1436 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1438 #if defined(LISP_FEATURE_DARWIN)
1439 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1441 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1442 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1444 /* 1. os_validate (malloc/mmap) register_save_block
1445 * 2. copy register state into register_save_block
1446 * 3. put a pointer to register_save_block in a register in the context
1447 * 4. set the context's EIP to point to a trampoline which:
1448 * a. builds the fake stack frame from the block
1449 * b. frees the block
1450 * c. calls the function
1453 *register_save_area = *os_context_pc_addr(context);
1454 *(register_save_area + 1) = function;
1455 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1456 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1457 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1458 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1459 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1460 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1461 *(register_save_area + 8) = *context_eflags_addr(context);
1463 *os_context_pc_addr(context) =
1464 (os_context_register_t) funptr;
1465 *os_context_register_addr(context,reg_ECX) =
1466 (os_context_register_t) register_save_area;
1469 /* return address for call_into_lisp: */
1470 *(sp-15) = (u32)post_signal_tramp;
1471 *(sp-14) = function; /* args for call_into_lisp : function*/
1472 *(sp-13) = 0; /* arg array */
1473 *(sp-12) = 0; /* no. args */
1474 /* this order matches that used in POPAD */
1475 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1476 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1478 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1479 /* POPAD ignores the value of ESP: */
1481 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1483 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1484 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1485 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1486 *(sp-3)=*context_eflags_addr(context);
1487 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1488 *(sp-1)=*os_context_pc_addr(context);
1492 #elif defined(LISP_FEATURE_X86_64)
1493 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1495 /* return address for call_into_lisp: */
1496 *(sp-18) = (u64)post_signal_tramp;
1498 *(sp-17)=*os_context_register_addr(context,reg_R15);
1499 *(sp-16)=*os_context_register_addr(context,reg_R14);
1500 *(sp-15)=*os_context_register_addr(context,reg_R13);
1501 *(sp-14)=*os_context_register_addr(context,reg_R12);
1502 *(sp-13)=*os_context_register_addr(context,reg_R11);
1503 *(sp-12)=*os_context_register_addr(context,reg_R10);
1504 *(sp-11)=*os_context_register_addr(context,reg_R9);
1505 *(sp-10)=*os_context_register_addr(context,reg_R8);
1506 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1507 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1508 /* skip RBP and RSP */
1509 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1510 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1511 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1512 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1513 *(sp-3)=*context_eflags_addr(context);
1514 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1515 *(sp-1)=*os_context_pc_addr(context);
1517 *os_context_register_addr(context,reg_RDI) =
1518 (os_context_register_t)function; /* function */
1519 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1520 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1522 struct thread *th=arch_os_get_current_thread();
1523 build_fake_control_stack_frames(th,context);
1526 #ifdef LISP_FEATURE_X86
1528 #if !defined(LISP_FEATURE_DARWIN)
1529 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1530 *os_context_register_addr(context,reg_ECX) = 0;
1531 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1533 *os_context_register_addr(context,reg_UESP) =
1534 (os_context_register_t)(sp-15);
1536 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1537 #endif /* __NETBSD__ */
1538 #endif /* LISP_FEATURE_DARWIN */
1540 #elif defined(LISP_FEATURE_X86_64)
1541 *os_context_pc_addr(context) = (os_context_register_t)funptr;
1542 *os_context_register_addr(context,reg_RCX) = 0;
1543 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1544 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1546 /* this much of the calling convention is common to all
1548 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1549 *os_context_register_addr(context,reg_NARGS) = 0;
1550 *os_context_register_addr(context,reg_LIP) =
1551 (os_context_register_t)(unsigned long)code;
1552 *os_context_register_addr(context,reg_CFP) =
1553 (os_context_register_t)(unsigned long)access_control_frame_pointer(th);
1555 #ifdef ARCH_HAS_NPC_REGISTER
1556 *os_context_npc_addr(context) =
1557 4 + *os_context_pc_addr(context);
1559 #ifdef LISP_FEATURE_SPARC
1560 *os_context_register_addr(context,reg_CODE) =
1561 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1563 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1568 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1570 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_X86)
1571 arrange_return_to_c_function(context, call_into_lisp_tramp, function);
1573 arrange_return_to_c_function(context, call_into_lisp, function);
1577 /* KLUDGE: Theoretically the approach we use for undefined alien
1578 * variables should work for functions as well, but on PPC/Darwin
1579 * we get bus error at bogus addresses instead, hence this workaround,
1580 * that has the added benefit of automatically discriminating between
1581 * functions and variables.
1584 undefined_alien_function(void)
1586 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1589 void lower_thread_control_stack_guard_page(struct thread *th)
1591 protect_control_stack_guard_page(0, th);
1592 protect_control_stack_return_guard_page(1, th);
1593 th->control_stack_guard_page_protected = NIL;
1594 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1597 void reset_thread_control_stack_guard_page(struct thread *th)
1599 memset(CONTROL_STACK_GUARD_PAGE(th), 0, os_vm_page_size);
1600 protect_control_stack_guard_page(1, th);
1601 protect_control_stack_return_guard_page(0, th);
1602 th->control_stack_guard_page_protected = T;
1603 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1606 /* Called from the REPL, too. */
1607 void reset_control_stack_guard_page(void)
1609 struct thread *th=arch_os_get_current_thread();
1610 if (th->control_stack_guard_page_protected == NIL) {
1611 reset_thread_control_stack_guard_page(th);
1615 void lower_control_stack_guard_page(void)
1617 lower_thread_control_stack_guard_page(arch_os_get_current_thread());
1621 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1623 struct thread *th=arch_os_get_current_thread();
1625 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1626 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1627 lose("Control stack exhausted");
1629 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1630 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1631 /* We hit the end of the control stack: disable guard page
1632 * protection so the error handler has some headroom, protect the
1633 * previous page so that we can catch returns from the guard page
1634 * and restore it. */
1635 if (th->control_stack_guard_page_protected == NIL)
1636 lose("control_stack_guard_page_protected NIL");
1637 lower_control_stack_guard_page();
1638 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1639 /* For the unfortunate case, when the control stack is
1640 * exhausted in a signal handler. */
1641 unblock_signals_in_context_and_maybe_warn(context);
1643 arrange_return_to_lisp_function
1644 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1647 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1648 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1649 /* We're returning from the guard page: reprotect it, and
1650 * unprotect this one. This works even if we somehow missed
1651 * the return-guard-page, and hit it on our way to new
1652 * exhaustion instead. */
1653 if (th->control_stack_guard_page_protected != NIL)
1654 lose("control_stack_guard_page_protected not NIL");
1655 reset_control_stack_guard_page();
1658 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1659 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1660 lose("Binding stack exhausted");
1662 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1663 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1664 protect_binding_stack_guard_page(0, NULL);
1665 protect_binding_stack_return_guard_page(1, NULL);
1666 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1668 /* For the unfortunate case, when the binding stack is
1669 * exhausted in a signal handler. */
1670 unblock_signals_in_context_and_maybe_warn(context);
1671 arrange_return_to_lisp_function
1672 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1675 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1676 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1677 protect_binding_stack_guard_page(1, NULL);
1678 protect_binding_stack_return_guard_page(0, NULL);
1679 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1682 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1683 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1684 lose("Alien stack exhausted");
1686 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1687 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1688 protect_alien_stack_guard_page(0, NULL);
1689 protect_alien_stack_return_guard_page(1, NULL);
1690 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1692 /* For the unfortunate case, when the alien stack is
1693 * exhausted in a signal handler. */
1694 unblock_signals_in_context_and_maybe_warn(context);
1695 arrange_return_to_lisp_function
1696 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1699 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1700 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1701 protect_alien_stack_guard_page(1, NULL);
1702 protect_alien_stack_return_guard_page(0, NULL);
1703 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1706 else if (addr >= undefined_alien_address &&
1707 addr < undefined_alien_address + os_vm_page_size) {
1708 arrange_return_to_lisp_function
1709 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1716 * noise to install handlers
1719 #ifndef LISP_FEATURE_WIN32
1720 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1721 * they are blocked, in Linux 2.6 the default handler is invoked
1722 * instead that usually coredumps. One might hastily think that adding
1723 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1724 * the whole sa_mask is ignored and instead of not adding the signal
1725 * in question to the mask. That means if it's not blockable the
1726 * signal must be unblocked at the beginning of signal handlers.
1728 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1729 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1730 * will be unblocked in the sigmask during the signal handler. -- RMK
1733 static volatile int sigaction_nodefer_works = -1;
1735 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1736 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1739 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1743 get_current_sigmask(¤t);
1744 /* There should be exactly two blocked signals: the two we added
1745 * to sa_mask when setting up the handler. NetBSD doesn't block
1746 * the signal we're handling when SA_NODEFER is set; Linux before
1747 * 2.6.13 or so also doesn't block the other signal when
1748 * SA_NODEFER is set. */
1749 for(i = 1; i < NSIG; i++)
1750 if (sigismember(¤t, i) !=
1751 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1752 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1753 sigaction_nodefer_works = 0;
1755 if (sigaction_nodefer_works == -1)
1756 sigaction_nodefer_works = 1;
1760 see_if_sigaction_nodefer_works(void)
1762 struct sigaction sa, old_sa;
1764 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1765 sa.sa_sigaction = sigaction_nodefer_test_handler;
1766 sigemptyset(&sa.sa_mask);
1767 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1768 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1769 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1770 /* Make sure no signals are blocked. */
1773 sigemptyset(&empty);
1774 thread_sigmask(SIG_SETMASK, &empty, 0);
1776 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1777 while (sigaction_nodefer_works == -1);
1778 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1781 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1782 #undef SA_NODEFER_TEST_KILL_SIGNAL
1784 #if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32)
1787 signal_thread_trampoline(void *pthread_arg)
1789 int signo = (int) pthread_arg;
1790 os_context_t fake_context;
1791 siginfo_t fake_info;
1792 #ifdef LISP_FEATURE_PPC
1796 memset(&fake_info, 0, sizeof(fake_info));
1797 memset(&fake_context, 0, sizeof(fake_context));
1798 #ifdef LISP_FEATURE_PPC
1799 memset(&uc_regs, 0, sizeof(uc_regs));
1800 fake_context.uc_mcontext.uc_regs = &uc_regs;
1803 *os_context_pc_addr(&fake_context) = &signal_thread_trampoline;
1804 #ifdef ARCH_HAS_STACK_POINTER /* aka x86(-64) */
1805 *os_context_sp_addr(&fake_context) = __builtin_frame_address(0);
1808 signal_handler_callback(interrupt_handlers[signo].lisp,
1809 signo, &fake_info, &fake_context);
1814 sigprof_handler_trampoline(int signal, siginfo_t *info, void *void_context)
1816 SAVE_ERRNO(signal,context,void_context);
1817 struct thread *self = arch_os_get_current_thread();
1819 /* alloc() is not re-entrant and still uses pseudo atomic (even though
1820 * inline allocation does not). In this case, give up. */
1821 if (get_pseudo_atomic_atomic(self))
1824 struct alloc_region tmp = self->alloc_region;
1825 self->alloc_region = self->sprof_alloc_region;
1826 self->sprof_alloc_region = tmp;
1828 interrupt_handle_now_handler(signal, info, void_context);
1830 /* And we're back. We know that the SIGPROF handler never unwinds
1831 * non-locally, and can simply swap things back: */
1833 tmp = self->alloc_region;
1834 self->alloc_region = self->sprof_alloc_region;
1835 self->sprof_alloc_region = tmp;
1838 ; /* Dear C compiler, it's OK to have a label here. */
1843 spawn_signal_thread_handler(int signal, siginfo_t *info, void *void_context)
1845 SAVE_ERRNO(signal,context,void_context);
1847 pthread_attr_t attr;
1850 if (pthread_attr_init(&attr))
1852 if (pthread_attr_setstacksize(&attr, thread_control_stack_size))
1854 if (pthread_create(&th, &attr, &signal_thread_trampoline, (void*) signal))
1856 if (pthread_attr_destroy(&attr))
1863 lose("spawn_signal_thread_handler");
1868 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1870 SAVE_ERRNO(signal,context,void_context);
1873 sigemptyset(&unblock);
1874 sigaddset(&unblock, signal);
1875 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1876 interrupt_handle_now(signal, info, context);
1881 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1883 SAVE_ERRNO(signal,context,void_context);
1886 sigemptyset(&unblock);
1887 sigaddset(&unblock, signal);
1888 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1889 (*interrupt_low_level_handlers[signal])(signal, info, context);
1894 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1896 SAVE_ERRNO(signal,context,void_context);
1897 (*interrupt_low_level_handlers[signal])(signal, info, context);
1902 undoably_install_low_level_interrupt_handler (int signal,
1903 interrupt_handler_t handler)
1905 struct sigaction sa;
1907 if (0 > signal || signal >= NSIG) {
1908 lose("bad signal number %d\n", signal);
1911 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1912 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1913 else if (sigismember(&deferrable_sigset,signal))
1914 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1915 else if (!sigaction_nodefer_works &&
1916 !sigismember(&blockable_sigset, signal))
1917 sa.sa_sigaction = low_level_unblock_me_trampoline;
1919 sa.sa_sigaction = low_level_handle_now_handler;
1921 #ifdef LISP_FEATURE_SB_THRUPTION
1922 /* It's in `deferrable_sigset' so that we block&unblock it properly,
1923 * but we don't actually want to defer it. And if we put it only
1924 * into blockable_sigset, we'd have to special-case it around thread
1925 * creation at least. */
1926 if (signal == SIGPIPE)
1927 sa.sa_sigaction = low_level_handle_now_handler;
1930 sigcopyset(&sa.sa_mask, &blockable_sigset);
1931 sa.sa_flags = SA_SIGINFO | SA_RESTART
1932 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1933 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1934 if(signal==SIG_MEMORY_FAULT) {
1935 sa.sa_flags |= SA_ONSTACK;
1936 # ifdef LISP_FEATURE_SB_SAFEPOINT
1937 sigaddset(&sa.sa_mask, SIGRTMIN);
1938 sigaddset(&sa.sa_mask, SIGRTMIN+1);
1943 sigaction(signal, &sa, NULL);
1944 interrupt_low_level_handlers[signal] =
1945 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1949 /* This is called from Lisp. */
1951 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*),
1954 #ifndef LISP_FEATURE_WIN32
1955 struct sigaction sa;
1957 union interrupt_handler oldhandler;
1959 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1961 block_blockable_signals(0, &old);
1963 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1964 (unsigned int)interrupt_low_level_handlers[signal]));
1965 if (interrupt_low_level_handlers[signal]==0) {
1966 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1967 ARE_SAME_HANDLER(handler, SIG_IGN))
1968 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1969 #ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY
1970 else if (signal == SIGPROF)
1971 sa.sa_sigaction = sigprof_handler_trampoline;
1972 else if (!synchronous)
1973 sa.sa_sigaction = spawn_signal_thread_handler;
1975 else if (sigismember(&deferrable_sigset, signal))
1976 sa.sa_sigaction = maybe_now_maybe_later;
1977 else if (!sigaction_nodefer_works &&
1978 !sigismember(&blockable_sigset, signal))
1979 sa.sa_sigaction = unblock_me_trampoline;
1981 sa.sa_sigaction = interrupt_handle_now_handler;
1983 sigcopyset(&sa.sa_mask, &blockable_sigset);
1984 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1985 (sigaction_nodefer_works ? SA_NODEFER : 0);
1986 sigaction(signal, &sa, NULL);
1989 oldhandler = interrupt_handlers[signal];
1990 interrupt_handlers[signal].c = handler;
1992 thread_sigmask(SIG_SETMASK, &old, 0);
1994 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1996 return (uword_t)oldhandler.lisp;
1998 /* Probably-wrong Win32 hack */
2003 /* This must not go through lisp as it's allowed anytime, even when on
2006 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
2008 lose("SIGABRT received.\n");
2012 interrupt_init(void)
2014 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2016 SHOW("entering interrupt_init()");
2017 #ifndef LISP_FEATURE_WIN32
2018 see_if_sigaction_nodefer_works();
2020 sigemptyset(&deferrable_sigset);
2021 sigemptyset(&blockable_sigset);
2022 sigemptyset(&gc_sigset);
2023 sigaddset_deferrable(&deferrable_sigset);
2024 sigaddset_blockable(&blockable_sigset);
2025 sigaddset_gc(&gc_sigset);
2028 #ifndef LISP_FEATURE_WIN32
2029 /* Set up high level handler information. */
2030 for (i = 0; i < NSIG; i++) {
2031 interrupt_handlers[i].c =
2032 /* (The cast here blasts away the distinction between
2033 * SA_SIGACTION-style three-argument handlers and
2034 * signal(..)-style one-argument handlers, which is OK
2035 * because it works to call the 1-argument form where the
2036 * 3-argument form is expected.) */
2037 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
2039 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
2041 SHOW("returning from interrupt_init()");
2044 #ifndef LISP_FEATURE_WIN32
2046 siginfo_code(siginfo_t *info)
2048 return info->si_code;
2050 os_vm_address_t current_memory_fault_address;
2053 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
2055 /* FIXME: This is lossy: if we get another memory fault (eg. from
2056 * another thread) before lisp has read this, we lose the information.
2057 * However, since this is mostly informative, we'll live with that for
2058 * now -- some address is better then no address in this case.
2060 current_memory_fault_address = addr;
2061 /* To allow debugging memory faults in signal handlers and such. */
2062 corruption_warning_and_maybe_lose("Memory fault at %x (pc=%p, sp=%p)",
2064 *os_context_pc_addr(context),
2065 #ifdef ARCH_HAS_STACK_POINTER
2066 *os_context_sp_addr(context)
2071 unblock_signals_in_context_and_maybe_warn(context);
2072 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
2073 arrange_return_to_lisp_function(context,
2074 StaticSymbolFunction(MEMORY_FAULT_ERROR));
2076 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
2082 unhandled_trap_error(os_context_t *context)
2084 lispobj context_sap;
2085 fake_foreign_function_call(context);
2086 #ifndef LISP_FEATURE_SB_SAFEPOINT
2087 unblock_gc_signals(0, 0);
2089 context_sap = alloc_sap(context);
2090 #if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
2091 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
2093 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
2094 lose("UNHANDLED-TRAP-ERROR fell through");
2097 /* Common logic for trapping instructions. How we actually handle each
2098 * case is highly architecture dependent, but the overall shape is
2101 handle_trap(os_context_t *context, int trap)
2104 #if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
2105 case trap_PendingInterrupt:
2106 FSHOW((stderr, "/<trap pending interrupt>\n"));
2107 arch_skip_instruction(context);
2108 interrupt_handle_pending(context);
2113 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
2114 interrupt_internal_error(context, trap==trap_Cerror);
2116 case trap_Breakpoint:
2117 arch_handle_breakpoint(context);
2119 case trap_FunEndBreakpoint:
2120 arch_handle_fun_end_breakpoint(context);
2122 #ifdef trap_AfterBreakpoint
2123 case trap_AfterBreakpoint:
2124 arch_handle_after_breakpoint(context);
2127 #ifdef trap_SingleStepAround
2128 case trap_SingleStepAround:
2129 case trap_SingleStepBefore:
2130 arch_handle_single_step_trap(context, trap);
2133 #ifdef trap_GlobalSafepoint
2134 case trap_GlobalSafepoint:
2135 fake_foreign_function_call(context);
2136 thread_in_lisp_raised(context);
2137 undo_fake_foreign_function_call(context);
2138 arch_skip_instruction(context);
2140 case trap_CspSafepoint:
2141 fake_foreign_function_call(context);
2142 thread_in_safety_transition(context);
2143 undo_fake_foreign_function_call(context);
2144 arch_skip_instruction(context);
2147 #if defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_GENCGC)
2148 case trap_Allocation:
2149 arch_handle_allocation_trap(context);
2150 arch_skip_instruction(context);
2154 fake_foreign_function_call(context);
2155 lose("%%PRIMITIVE HALT called; the party is over.\n");
2157 unhandled_trap_error(context);