2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 * This gives us a single point of control (or six) over errno, fp
94 * control word, and fixing up signal context on sparc.
96 * The SPARC/Linux platform doesn't quite do signals the way we want
97 * them done. The third argument in the handler isn't filled in by the
98 * kernel properly, so we fix it up ourselves in the
99 * arch_os_get_context(..) function. -- CSR, 2002-07-23
101 #define SAVE_ERRNO(context,void_context) \
103 int _saved_errno = errno; \
104 RESTORE_FP_CONTROL_WORD(context,void_context); \
107 #define RESTORE_ERRNO \
109 errno = _saved_errno; \
112 static void run_deferred_handler(struct interrupt_data *data,
113 os_context_t *context);
114 #ifndef LISP_FEATURE_WIN32
115 static void store_signal_data_for_later (struct interrupt_data *data,
116 void *handler, int signal,
118 os_context_t *context);
121 /* Generic signal related utilities. */
124 get_current_sigmask(sigset_t *sigset)
126 /* Get the current sigmask, by blocking the empty set. */
127 thread_sigmask(SIG_BLOCK, 0, sigset);
131 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
136 sigcopyset(old, where);
137 for(i = 1; i < NSIG; i++) {
138 if (sigismember(what, i))
142 thread_sigmask(SIG_BLOCK, what, old);
147 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
152 sigcopyset(old, where);
153 for(i = 1; i < NSIG; i++) {
154 if (sigismember(what, i))
158 thread_sigmask(SIG_UNBLOCK, what, old);
163 print_sigset(sigset_t *sigset)
166 for(i = 1; i < NSIG; i++) {
167 if (sigismember(sigset, i))
168 fprintf(stderr, "Signal %d masked\n", i);
172 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
173 * if all re unmasked else die. Passing NULL for sigset is a shorthand
174 * for the current sigmask. */
176 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
179 #if !defined(LISP_FEATURE_WIN32)
181 boolean has_blocked = 0, has_unblocked = 0;
184 get_current_sigmask(¤t);
187 for(i = 1; i < NSIG; i++) {
188 if (sigismember(sigset2, i)) {
189 if (sigismember(sigset, i))
195 if (has_blocked && has_unblocked) {
196 print_sigset(sigset);
197 lose("some %s signals blocked, some unblocked\n", name);
207 /* Deferrables, blockables, gc signals. */
210 sigaddset_deferrable(sigset_t *s)
212 sigaddset(s, SIGHUP);
213 sigaddset(s, SIGINT);
214 sigaddset(s, SIGTERM);
215 sigaddset(s, SIGQUIT);
216 sigaddset(s, SIGPIPE);
217 sigaddset(s, SIGALRM);
218 sigaddset(s, SIGURG);
219 sigaddset(s, SIGTSTP);
220 sigaddset(s, SIGCHLD);
222 #ifndef LISP_FEATURE_HPUX
223 sigaddset(s, SIGXCPU);
224 sigaddset(s, SIGXFSZ);
226 sigaddset(s, SIGVTALRM);
227 sigaddset(s, SIGPROF);
228 sigaddset(s, SIGWINCH);
232 sigaddset_blockable(sigset_t *sigset)
234 sigaddset_deferrable(sigset);
235 sigaddset_gc(sigset);
239 sigaddset_gc(sigset_t *sigset)
241 #ifdef LISP_FEATURE_SB_THREAD
242 sigaddset(sigset,SIG_STOP_FOR_GC);
246 /* initialized in interrupt_init */
247 sigset_t deferrable_sigset;
248 sigset_t blockable_sigset;
254 deferrables_blocked_p(sigset_t *sigset)
256 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
260 check_deferrables_unblocked_or_lose(sigset_t *sigset)
262 #if !defined(LISP_FEATURE_WIN32)
263 if (deferrables_blocked_p(sigset))
264 lose("deferrables blocked\n");
269 check_deferrables_blocked_or_lose(sigset_t *sigset)
271 #if !defined(LISP_FEATURE_WIN32)
272 if (!deferrables_blocked_p(sigset))
273 lose("deferrables unblocked\n");
278 blockables_blocked_p(sigset_t *sigset)
280 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
284 check_blockables_unblocked_or_lose(sigset_t *sigset)
286 #if !defined(LISP_FEATURE_WIN32)
287 if (blockables_blocked_p(sigset))
288 lose("blockables blocked\n");
293 check_blockables_blocked_or_lose(sigset_t *sigset)
295 #if !defined(LISP_FEATURE_WIN32)
296 if (!blockables_blocked_p(sigset))
297 lose("blockables unblocked\n");
302 gc_signals_blocked_p(sigset_t *sigset)
304 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
308 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
310 #if !defined(LISP_FEATURE_WIN32)
311 if (gc_signals_blocked_p(sigset))
312 lose("gc signals blocked\n");
317 check_gc_signals_blocked_or_lose(sigset_t *sigset)
319 #if !defined(LISP_FEATURE_WIN32)
320 if (!gc_signals_blocked_p(sigset))
321 lose("gc signals unblocked\n");
326 block_deferrable_signals(sigset_t *where, sigset_t *old)
328 #ifndef LISP_FEATURE_WIN32
329 block_signals(&deferrable_sigset, where, old);
334 block_blockable_signals(sigset_t *where, sigset_t *old)
336 #ifndef LISP_FEATURE_WIN32
337 block_signals(&blockable_sigset, where, old);
342 block_gc_signals(sigset_t *where, sigset_t *old)
344 #ifndef LISP_FEATURE_WIN32
345 block_signals(&gc_sigset, where, old);
350 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
352 #ifndef LISP_FEATURE_WIN32
353 if (interrupt_handler_pending_p())
354 lose("unblock_deferrable_signals: losing proposition\n");
355 check_gc_signals_unblocked_or_lose(where);
356 unblock_signals(&deferrable_sigset, where, old);
361 unblock_blockable_signals(sigset_t *where, sigset_t *old)
363 #ifndef LISP_FEATURE_WIN32
364 unblock_signals(&blockable_sigset, where, old);
369 unblock_gc_signals(sigset_t *where, sigset_t *old)
371 #ifndef LISP_FEATURE_WIN32
372 unblock_signals(&gc_sigset, where, old);
377 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
379 #ifndef LISP_FEATURE_WIN32
380 sigset_t *sigset = os_context_sigmask_addr(context);
381 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
382 corruption_warning_and_maybe_lose(
383 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
384 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
385 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
386 unblock_gc_signals(sigset, 0);
388 if (!interrupt_handler_pending_p()) {
389 unblock_deferrable_signals(sigset, 0);
396 check_interrupts_enabled_or_lose(os_context_t *context)
398 struct thread *thread=arch_os_get_current_thread();
399 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
400 lose("interrupts not enabled\n");
401 if (arch_pseudo_atomic_atomic(context))
402 lose ("in pseudo atomic section\n");
405 /* Save sigset (or the current sigmask if 0) if there is no pending
406 * handler, because that means that deferabbles are already blocked.
407 * The purpose is to avoid losing the pending gc signal if a
408 * deferrable interrupt async unwinds between clearing the pseudo
409 * atomic and trapping to GC.*/
411 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
413 #ifndef LISP_FEATURE_WIN32
414 struct thread *thread = arch_os_get_current_thread();
415 struct interrupt_data *data = thread->interrupt_data;
417 /* Obviously, this function is called when signals may not be
418 * blocked. Let's make sure we are not interrupted. */
419 block_blockable_signals(0, &oldset);
420 #ifndef LISP_FEATURE_SB_THREAD
421 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
423 if (data->gc_blocked_deferrables)
424 lose("gc_blocked_deferrables already true\n");
426 if ((!data->pending_handler) &&
427 (!data->gc_blocked_deferrables)) {
428 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
429 data->gc_blocked_deferrables = 1;
431 /* This is the sigmask of some context. */
432 sigcopyset(&data->pending_mask, sigset);
433 sigaddset_deferrable(sigset);
434 thread_sigmask(SIG_SETMASK,&oldset,0);
437 /* Operating on the current sigmask. Save oldset and
438 * unblock gc signals. In the end, this is equivalent to
439 * blocking the deferrables. */
440 sigcopyset(&data->pending_mask, &oldset);
441 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
445 thread_sigmask(SIG_SETMASK,&oldset,0);
449 /* Are we leaving WITH-GCING and already running with interrupts
450 * enabled, without the protection of *GC-INHIBIT* T and there is gc
451 * (or stop for gc) pending, but we haven't trapped yet? */
453 in_leaving_without_gcing_race_p(struct thread *thread)
455 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
456 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
457 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
458 ((SymbolValue(GC_PENDING,thread) != NIL)
459 #if defined(LISP_FEATURE_SB_THREAD)
460 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
465 /* Check our baroque invariants. */
467 check_interrupt_context_or_lose(os_context_t *context)
469 #ifndef LISP_FEATURE_WIN32
470 struct thread *thread = arch_os_get_current_thread();
471 struct interrupt_data *data = thread->interrupt_data;
472 int interrupt_deferred_p = (data->pending_handler != 0);
473 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
474 sigset_t *sigset = os_context_sigmask_addr(context);
475 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
476 * handle_allocation_trap. */
477 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
478 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
479 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
480 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
481 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
482 int in_race_p = in_leaving_without_gcing_race_p(thread);
483 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
484 * section and trapping, a SIG_STOP_FOR_GC would see the next
485 * check fail, for this reason sig_stop_for_gc handler does not
486 * call this function. */
487 if (interrupt_deferred_p) {
488 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
489 lose("Stray deferred interrupt.\n");
492 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
493 lose("GC_PENDING, but why?\n");
494 #if defined(LISP_FEATURE_SB_THREAD)
496 int stop_for_gc_pending =
497 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
498 if (stop_for_gc_pending)
499 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
500 lose("STOP_FOR_GC_PENDING, but why?\n");
501 if (pseudo_atomic_interrupted)
502 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
503 lose("pseudo_atomic_interrupted, but why?\n");
506 if (pseudo_atomic_interrupted)
507 if (!(gc_pending || interrupt_deferred_p))
508 lose("pseudo_atomic_interrupted, but why?\n");
511 if (interrupt_pending && !interrupt_deferred_p)
512 lose("INTERRUPT_PENDING but not pending handler.\n");
513 if ((data->gc_blocked_deferrables) && interrupt_pending)
514 lose("gc_blocked_deferrables and interrupt pending\n.");
515 if (data->gc_blocked_deferrables)
516 check_deferrables_blocked_or_lose(sigset);
517 if (interrupt_pending || interrupt_deferred_p ||
518 data->gc_blocked_deferrables)
519 check_deferrables_blocked_or_lose(sigset);
521 check_deferrables_unblocked_or_lose(sigset);
522 /* If deferrables are unblocked then we are open to signals
523 * that run lisp code. */
524 check_gc_signals_unblocked_or_lose(sigset);
529 /* When we catch an internal error, should we pass it back to Lisp to
530 * be handled in a high-level way? (Early in cold init, the answer is
531 * 'no', because Lisp is still too brain-dead to handle anything.
532 * After sufficient initialization has been completed, the answer
534 boolean internal_errors_enabled = 0;
536 #ifndef LISP_FEATURE_WIN32
538 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
540 union interrupt_handler interrupt_handlers[NSIG];
544 * utility routines used by various signal handlers
548 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
550 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
554 /* Build a fake stack frame or frames */
556 current_control_frame_pointer =
557 (lispobj *)(unsigned long)
558 (*os_context_register_addr(context, reg_CSP));
559 if ((lispobj *)(unsigned long)
560 (*os_context_register_addr(context, reg_CFP))
561 == current_control_frame_pointer) {
562 /* There is a small window during call where the callee's
563 * frame isn't built yet. */
564 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
565 == FUN_POINTER_LOWTAG) {
566 /* We have called, but not built the new frame, so
567 * build it for them. */
568 current_control_frame_pointer[0] =
569 *os_context_register_addr(context, reg_OCFP);
570 current_control_frame_pointer[1] =
571 *os_context_register_addr(context, reg_LRA);
572 current_control_frame_pointer += 8;
573 /* Build our frame on top of it. */
574 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
577 /* We haven't yet called, build our frame as if the
578 * partial frame wasn't there. */
579 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
582 /* We can't tell whether we are still in the caller if it had to
583 * allocate a stack frame due to stack arguments. */
584 /* This observation provoked some past CMUCL maintainer to ask
585 * "Can anything strange happen during return?" */
588 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
591 current_control_stack_pointer = current_control_frame_pointer + 8;
593 current_control_frame_pointer[0] = oldcont;
594 current_control_frame_pointer[1] = NIL;
595 current_control_frame_pointer[2] =
596 (lispobj)(*os_context_register_addr(context, reg_CODE));
600 /* Stores the context for gc to scavange and builds fake stack
603 fake_foreign_function_call(os_context_t *context)
606 struct thread *thread=arch_os_get_current_thread();
608 /* context_index incrementing must not be interrupted */
609 check_blockables_blocked_or_lose(0);
611 /* Get current Lisp state from context. */
613 dynamic_space_free_pointer =
614 (lispobj *)(unsigned long)
615 (*os_context_register_addr(context, reg_ALLOC));
616 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
617 /* dynamic_space_free_pointer); */
618 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
619 if ((long)dynamic_space_free_pointer & 1) {
620 lose("dead in fake_foreign_function_call, context = %x\n", context);
623 /* why doesnt PPC and SPARC do something like this: */
624 #if defined(LISP_FEATURE_HPPA)
625 if ((long)dynamic_space_free_pointer & 4) {
626 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
631 current_binding_stack_pointer =
632 (lispobj *)(unsigned long)
633 (*os_context_register_addr(context, reg_BSP));
636 build_fake_control_stack_frames(thread,context);
638 /* Do dynamic binding of the active interrupt context index
639 * and save the context in the context array. */
641 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
643 if (context_index >= MAX_INTERRUPTS) {
644 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
647 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
648 make_fixnum(context_index + 1),thread);
650 thread->interrupt_contexts[context_index] = context;
652 #ifdef FOREIGN_FUNCTION_CALL_FLAG
653 foreign_function_call_active = 1;
657 /* blocks all blockable signals. If you are calling from a signal handler,
658 * the usual signal mask will be restored from the context when the handler
659 * finishes. Otherwise, be careful */
661 undo_fake_foreign_function_call(os_context_t *context)
663 struct thread *thread=arch_os_get_current_thread();
664 /* Block all blockable signals. */
665 block_blockable_signals(0, 0);
667 #ifdef FOREIGN_FUNCTION_CALL_FLAG
668 foreign_function_call_active = 0;
671 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
675 /* Put the dynamic space free pointer back into the context. */
676 *os_context_register_addr(context, reg_ALLOC) =
677 (unsigned long) dynamic_space_free_pointer
678 | (*os_context_register_addr(context, reg_ALLOC)
681 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
683 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
688 /* a handler for the signal caused by execution of a trap opcode
689 * signalling an internal error */
691 interrupt_internal_error(os_context_t *context, boolean continuable)
695 fake_foreign_function_call(context);
697 if (!internal_errors_enabled) {
698 describe_internal_error(context);
699 /* There's no good way to recover from an internal error
700 * before the Lisp error handling mechanism is set up. */
701 lose("internal error too early in init, can't recover\n");
704 /* Allocate the SAP object while the interrupts are still
706 unblock_gc_signals(0, 0);
707 context_sap = alloc_sap(context);
709 #ifndef LISP_FEATURE_WIN32
710 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
713 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
714 /* Workaround for blocked SIGTRAP. */
717 sigemptyset(&newset);
718 sigaddset(&newset, SIGTRAP);
719 thread_sigmask(SIG_UNBLOCK, &newset, 0);
723 SHOW("in interrupt_internal_error");
725 /* Display some rudimentary debugging information about the
726 * error, so that even if the Lisp error handler gets badly
727 * confused, we have a chance to determine what's going on. */
728 describe_internal_error(context);
730 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
731 continuable ? T : NIL);
733 undo_fake_foreign_function_call(context); /* blocks signals again */
735 arch_skip_instruction(context);
739 interrupt_handler_pending_p(void)
741 struct thread *thread = arch_os_get_current_thread();
742 struct interrupt_data *data = thread->interrupt_data;
743 return (data->pending_handler != 0);
747 interrupt_handle_pending(os_context_t *context)
749 /* There are three ways we can get here. First, if an interrupt
750 * occurs within pseudo-atomic, it will be deferred, and we'll
751 * trap to here at the end of the pseudo-atomic block. Second, if
752 * the GC (in alloc()) decides that a GC is required, it will set
753 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
754 * and alloc() is always called from within pseudo-atomic, and
755 * thus we end up here again. Third, when calling GC-ON or at the
756 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
757 * here if there is a pending GC. Fourth, ahem, at the end of
758 * WITHOUT-INTERRUPTS (bar complications with nesting). */
760 /* Win32 only needs to handle the GC cases (for now?) */
762 struct thread *thread = arch_os_get_current_thread();
763 struct interrupt_data *data = thread->interrupt_data;
765 if (arch_pseudo_atomic_atomic(context)) {
766 lose("Handling pending interrupt in pseudo atomic.");
769 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
771 check_blockables_blocked_or_lose(0);
773 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
774 * handler, then the pending mask was saved and
775 * gc_blocked_deferrables set. Hence, there can be no pending
776 * handler and it's safe to restore the pending mask.
778 * Note, that if gc_blocked_deferrables is false we may still have
779 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
780 * pseudo atomic was interrupt be a deferrable first. */
781 if (data->gc_blocked_deferrables) {
782 if (data->pending_handler)
783 lose("GC blocked deferrables but still got a pending handler.");
784 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
785 lose("GC blocked deferrables while GC is inhibited.");
786 /* Restore the saved signal mask from the original signal (the
787 * one that interrupted us during the critical section) into
788 * the os_context for the signal we're currently in the
789 * handler for. This should ensure that when we return from
790 * the handler the blocked signals are unblocked. */
791 #ifndef LISP_FEATURE_WIN32
792 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
794 data->gc_blocked_deferrables = 0;
797 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
798 void *original_pending_handler = data->pending_handler;
800 #ifdef LISP_FEATURE_SB_THREAD
801 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
802 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
803 * the signal handler if it actually stops us. */
804 arch_clear_pseudo_atomic_interrupted(context);
805 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
808 /* Test for T and not for != NIL since the value :IN-PROGRESS
809 * is used in SUB-GC as part of the mechanism to supress
811 if (SymbolValue(GC_PENDING,thread) == T) {
813 /* Two reasons for doing this. First, if there is a
814 * pending handler we don't want to run. Second, we are
815 * going to clear pseudo atomic interrupted to avoid
816 * spurious trapping on every allocation in SUB_GC and
817 * having a pending handler with interrupts enabled and
818 * without pseudo atomic interrupted breaks an
820 if (data->pending_handler) {
821 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
822 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
825 arch_clear_pseudo_atomic_interrupted(context);
827 /* GC_PENDING is cleared in SUB-GC, or if another thread
828 * is doing a gc already we will get a SIG_STOP_FOR_GC and
829 * that will clear it.
831 * If there is a pending handler or gc was triggerred in a
832 * signal handler then maybe_gc won't run POST_GC and will
833 * return normally. */
834 if (!maybe_gc(context))
835 lose("GC not inhibited but maybe_gc did not GC.");
837 if (data->pending_handler) {
841 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
842 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
843 * GC-PENDING is not NIL then we cannot trap on pseudo
844 * atomic due to GC (see if(GC_PENDING) logic in
845 * cheneygc.c an gengcgc.c), plus there is a outer
846 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
848 lose("Trapping to run pending handler while GC in progress.");
851 check_blockables_blocked_or_lose(0);
853 /* No GC shall be lost. If SUB_GC triggers another GC then
854 * that should be handled on the spot. */
855 if (SymbolValue(GC_PENDING,thread) != NIL)
856 lose("GC_PENDING after doing gc.");
857 #ifdef LISP_FEATURE_SB_THREAD
858 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
859 lose("STOP_FOR_GC_PENDING after doing gc.");
861 /* Check two things. First, that gc does not clobber a handler
862 * that's already pending. Second, that there is no interrupt
863 * lossage: if original_pending_handler was NULL then even if
864 * an interrupt arrived during GC (POST-GC, really) it was
866 if (original_pending_handler != data->pending_handler)
867 lose("pending handler changed in gc: %x -> %d.",
868 original_pending_handler, data->pending_handler);
871 #ifndef LISP_FEATURE_WIN32
872 /* There may be no pending handler, because it was only a gc that
873 * had to be executed or because Lisp is a bit too eager to call
874 * DO-PENDING-INTERRUPT. */
875 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
876 (data->pending_handler)) {
877 /* No matter how we ended up here, clear both
878 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
879 * because we checked above that there is no GC pending. */
880 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
881 arch_clear_pseudo_atomic_interrupted(context);
882 /* Restore the sigmask in the context. */
883 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
884 run_deferred_handler(data, context);
887 #ifdef LISP_FEATURE_GENCGC
888 if (get_pseudo_atomic_interrupted(thread))
889 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
891 /* It is possible that the end of this function was reached
892 * without never actually doing anything, the tests in Lisp for
893 * when to call receive-pending-interrupt are not exact. */
894 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
899 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
901 #ifdef FOREIGN_FUNCTION_CALL_FLAG
902 boolean were_in_lisp;
904 union interrupt_handler handler;
906 check_blockables_blocked_or_lose(0);
908 #ifndef LISP_FEATURE_WIN32
909 if (sigismember(&deferrable_sigset,signal))
910 check_interrupts_enabled_or_lose(context);
913 handler = interrupt_handlers[signal];
915 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
919 #ifdef FOREIGN_FUNCTION_CALL_FLAG
920 were_in_lisp = !foreign_function_call_active;
924 fake_foreign_function_call(context);
927 FSHOW_SIGNAL((stderr,
928 "/entering interrupt_handle_now(%d, info, context)\n",
931 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
933 /* This can happen if someone tries to ignore or default one
934 * of the signals we need for runtime support, and the runtime
935 * support decides to pass on it. */
936 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
938 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
939 /* Once we've decided what to do about contexts in a
940 * return-elsewhere world (the original context will no longer
941 * be available; should we copy it or was nobody using it anyway?)
942 * then we should convert this to return-elsewhere */
944 /* CMUCL comment said "Allocate the SAPs while the interrupts
945 * are still disabled.". I (dan, 2003.08.21) assume this is
946 * because we're not in pseudoatomic and allocation shouldn't
947 * be interrupted. In which case it's no longer an issue as
948 * all our allocation from C now goes through a PA wrapper,
949 * but still, doesn't hurt.
951 * Yeah, but non-gencgc platforms don't really wrap allocation
952 * in PA. MG - 2005-08-29 */
954 lispobj info_sap, context_sap;
955 /* Leave deferrable signals blocked, the handler itself will
956 * allow signals again when it sees fit. */
957 unblock_gc_signals(0, 0);
958 context_sap = alloc_sap(context);
959 info_sap = alloc_sap(info);
961 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
963 funcall3(handler.lisp,
968 /* This cannot happen in sane circumstances. */
970 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
972 #ifndef LISP_FEATURE_WIN32
973 /* Allow signals again. */
974 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
976 (*handler.c)(signal, info, context);
979 #ifdef FOREIGN_FUNCTION_CALL_FLAG
983 undo_fake_foreign_function_call(context); /* block signals again */
986 FSHOW_SIGNAL((stderr,
987 "/returning from interrupt_handle_now(%d, info, context)\n",
991 /* This is called at the end of a critical section if the indications
992 * are that some signal was deferred during the section. Note that as
993 * far as C or the kernel is concerned we dealt with the signal
994 * already; we're just doing the Lisp-level processing now that we
997 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
999 /* The pending_handler may enable interrupts and then another
1000 * interrupt may hit, overwrite interrupt_data, so reset the
1001 * pending handler before calling it. Trust the handler to finish
1002 * with the siginfo before enabling interrupts. */
1003 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1004 data->pending_handler;
1006 data->pending_handler=0;
1007 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1008 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1011 #ifndef LISP_FEATURE_WIN32
1013 maybe_defer_handler(void *handler, struct interrupt_data *data,
1014 int signal, siginfo_t *info, os_context_t *context)
1016 struct thread *thread=arch_os_get_current_thread();
1018 check_blockables_blocked_or_lose(0);
1020 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1021 lose("interrupt already pending\n");
1022 if (thread->interrupt_data->pending_handler)
1023 lose("there is a pending handler already (PA)\n");
1024 if (data->gc_blocked_deferrables)
1025 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1026 check_interrupt_context_or_lose(context);
1027 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1028 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1029 * atomic section inside a WITHOUT-INTERRUPTS.
1031 * Also, if in_leaving_without_gcing_race_p then
1032 * interrupt_handle_pending is going to be called soon, so
1033 * stashing the signal away is safe.
1035 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1036 in_leaving_without_gcing_race_p(thread)) {
1037 FSHOW_SIGNAL((stderr,
1038 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1039 (unsigned int)handler,signal,
1040 in_leaving_without_gcing_race_p(thread)));
1041 store_signal_data_for_later(data,handler,signal,info,context);
1042 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1043 check_interrupt_context_or_lose(context);
1046 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1047 * actually use its argument for anything on x86, so this branch
1048 * may succeed even when context is null (gencgc alloc()) */
1049 if (arch_pseudo_atomic_atomic(context)) {
1050 FSHOW_SIGNAL((stderr,
1051 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1052 (unsigned int)handler,signal));
1053 store_signal_data_for_later(data,handler,signal,info,context);
1054 arch_set_pseudo_atomic_interrupted(context);
1055 check_interrupt_context_or_lose(context);
1058 FSHOW_SIGNAL((stderr,
1059 "/maybe_defer_handler(%x,%d): not deferred\n",
1060 (unsigned int)handler,signal));
1065 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1067 siginfo_t *info, os_context_t *context)
1069 if (data->pending_handler)
1070 lose("tried to overwrite pending interrupt handler %x with %x\n",
1071 data->pending_handler, handler);
1073 lose("tried to defer null interrupt handler\n");
1074 data->pending_handler = handler;
1075 data->pending_signal = signal;
1077 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1079 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1083 lose("Null context");
1085 /* the signal mask in the context (from before we were
1086 * interrupted) is copied to be restored when run_deferred_handler
1087 * happens. Then the usually-blocked signals are added to the mask
1088 * in the context so that we are running with blocked signals when
1089 * the handler returns */
1090 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1091 sigaddset_deferrable(os_context_sigmask_addr(context));
1095 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1097 SAVE_ERRNO(context,void_context);
1098 struct thread *thread = arch_os_get_current_thread();
1099 struct interrupt_data *data = thread->interrupt_data;
1101 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1102 interrupt_handle_now(signal, info, context);
1107 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1108 os_context_t *context)
1110 /* No FP control fixage needed, caller has done that. */
1111 check_blockables_blocked_or_lose(0);
1112 check_interrupts_enabled_or_lose(context);
1113 (*interrupt_low_level_handlers[signal])(signal, info, context);
1114 /* No Darwin context fixage needed, caller does that. */
1118 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1120 SAVE_ERRNO(context,void_context);
1121 struct thread *thread = arch_os_get_current_thread();
1122 struct interrupt_data *data = thread->interrupt_data;
1124 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1125 signal,info,context))
1126 low_level_interrupt_handle_now(signal, info, context);
1131 #ifdef LISP_FEATURE_SB_THREAD
1133 /* This function must not cons, because that may trigger a GC. */
1135 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1137 struct thread *thread=arch_os_get_current_thread();
1139 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1140 * pseudo atomic until gc is finally allowed. */
1141 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1142 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1143 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1145 } else if (arch_pseudo_atomic_atomic(context)) {
1146 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1147 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1148 arch_set_pseudo_atomic_interrupted(context);
1149 maybe_save_gc_mask_and_block_deferrables
1150 (os_context_sigmask_addr(context));
1154 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1156 /* Not PA and GC not inhibited -- we can stop now. */
1158 /* need the context stored so it can have registers scavenged */
1159 fake_foreign_function_call(context);
1161 /* Not pending anymore. */
1162 SetSymbolValue(GC_PENDING,NIL,thread);
1163 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1165 /* Consider this: in a PA section GC is requested: GC_PENDING,
1166 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1167 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1168 * but a SIG_STOP_FOR_GC arrives before trapping to
1169 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1170 * pseudo_atomic_interrupted is not and we go on running with
1171 * pseudo_atomic_interrupted but without a pending interrupt or
1172 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1174 if (thread->interrupt_data->gc_blocked_deferrables) {
1175 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1176 clear_pseudo_atomic_interrupted(thread);
1177 sigcopyset(os_context_sigmask_addr(context),
1178 &thread->interrupt_data->pending_mask);
1179 thread->interrupt_data->gc_blocked_deferrables = 0;
1182 if(thread_state(thread)!=STATE_RUNNING) {
1183 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1184 fixnum_value(thread->state));
1187 set_thread_state(thread,STATE_SUSPENDED);
1188 FSHOW_SIGNAL((stderr,"suspended\n"));
1190 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1191 FSHOW_SIGNAL((stderr,"resumed\n"));
1193 if(thread_state(thread)!=STATE_RUNNING) {
1194 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1195 fixnum_value(thread_state(thread)));
1198 undo_fake_foreign_function_call(context);
1204 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1206 SAVE_ERRNO(context,void_context);
1207 #ifndef LISP_FEATURE_WIN32
1208 if ((signal == SIGILL) || (signal == SIGBUS)
1209 #ifndef LISP_FEATURE_LINUX
1210 || (signal == SIGEMT)
1213 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1215 interrupt_handle_now(signal, info, context);
1219 /* manipulate the signal context and stack such that when the handler
1220 * returns, it will call function instead of whatever it was doing
1224 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1225 extern int *context_eflags_addr(os_context_t *context);
1228 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1229 extern void post_signal_tramp(void);
1230 extern void call_into_lisp_tramp(void);
1232 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1234 #ifndef LISP_FEATURE_WIN32
1235 check_gc_signals_unblocked_or_lose
1236 (os_context_sigmask_addr(context));
1238 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1239 void * fun=native_pointer(function);
1240 void *code = &(((struct simple_fun *) fun)->code);
1243 /* Build a stack frame showing `interrupted' so that the
1244 * user's backtrace makes (as much) sense (as usual) */
1246 /* fp state is saved and restored by call_into_lisp */
1247 /* FIXME: errno is not restored, but since current uses of this
1248 * function only call Lisp code that signals an error, it's not
1249 * much of a problem. In other words, running out of the control
1250 * stack between a syscall and (GET-ERRNO) may clobber errno if
1251 * something fails during signalling or in the handler. But I
1252 * can't see what can go wrong as long as there is no CONTINUE
1253 * like restart on them. */
1254 #ifdef LISP_FEATURE_X86
1255 /* Suppose the existence of some function that saved all
1256 * registers, called call_into_lisp, then restored GP registers and
1257 * returned. It would look something like this:
1265 pushl {address of function to call}
1266 call 0x8058db0 <call_into_lisp>
1273 * What we do here is set up the stack that call_into_lisp would
1274 * expect to see if it had been called by this code, and frob the
1275 * signal context so that signal return goes directly to call_into_lisp,
1276 * and when that function (and the lisp function it invoked) returns,
1277 * it returns to the second half of this imaginary function which
1278 * restores all registers and returns to C
1280 * For this to work, the latter part of the imaginary function
1281 * must obviously exist in reality. That would be post_signal_tramp
1284 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1286 #if defined(LISP_FEATURE_DARWIN)
1287 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1289 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1290 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1292 /* 1. os_validate (malloc/mmap) register_save_block
1293 * 2. copy register state into register_save_block
1294 * 3. put a pointer to register_save_block in a register in the context
1295 * 4. set the context's EIP to point to a trampoline which:
1296 * a. builds the fake stack frame from the block
1297 * b. frees the block
1298 * c. calls the function
1301 *register_save_area = *os_context_pc_addr(context);
1302 *(register_save_area + 1) = function;
1303 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1304 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1305 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1306 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1307 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1308 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1309 *(register_save_area + 8) = *context_eflags_addr(context);
1311 *os_context_pc_addr(context) =
1312 (os_context_register_t) call_into_lisp_tramp;
1313 *os_context_register_addr(context,reg_ECX) =
1314 (os_context_register_t) register_save_area;
1317 /* return address for call_into_lisp: */
1318 *(sp-15) = (u32)post_signal_tramp;
1319 *(sp-14) = function; /* args for call_into_lisp : function*/
1320 *(sp-13) = 0; /* arg array */
1321 *(sp-12) = 0; /* no. args */
1322 /* this order matches that used in POPAD */
1323 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1324 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1326 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1327 /* POPAD ignores the value of ESP: */
1329 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1331 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1332 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1333 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1334 *(sp-3)=*context_eflags_addr(context);
1335 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1336 *(sp-1)=*os_context_pc_addr(context);
1340 #elif defined(LISP_FEATURE_X86_64)
1341 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1343 /* return address for call_into_lisp: */
1344 *(sp-18) = (u64)post_signal_tramp;
1346 *(sp-17)=*os_context_register_addr(context,reg_R15);
1347 *(sp-16)=*os_context_register_addr(context,reg_R14);
1348 *(sp-15)=*os_context_register_addr(context,reg_R13);
1349 *(sp-14)=*os_context_register_addr(context,reg_R12);
1350 *(sp-13)=*os_context_register_addr(context,reg_R11);
1351 *(sp-12)=*os_context_register_addr(context,reg_R10);
1352 *(sp-11)=*os_context_register_addr(context,reg_R9);
1353 *(sp-10)=*os_context_register_addr(context,reg_R8);
1354 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1355 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1356 /* skip RBP and RSP */
1357 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1358 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1359 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1360 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1361 *(sp-3)=*context_eflags_addr(context);
1362 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1363 *(sp-1)=*os_context_pc_addr(context);
1365 *os_context_register_addr(context,reg_RDI) =
1366 (os_context_register_t)function; /* function */
1367 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1368 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1370 struct thread *th=arch_os_get_current_thread();
1371 build_fake_control_stack_frames(th,context);
1374 #ifdef LISP_FEATURE_X86
1376 #if !defined(LISP_FEATURE_DARWIN)
1377 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1378 *os_context_register_addr(context,reg_ECX) = 0;
1379 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1381 *os_context_register_addr(context,reg_UESP) =
1382 (os_context_register_t)(sp-15);
1384 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1385 #endif /* __NETBSD__ */
1386 #endif /* LISP_FEATURE_DARWIN */
1388 #elif defined(LISP_FEATURE_X86_64)
1389 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1390 *os_context_register_addr(context,reg_RCX) = 0;
1391 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1392 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1394 /* this much of the calling convention is common to all
1396 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1397 *os_context_register_addr(context,reg_NARGS) = 0;
1398 *os_context_register_addr(context,reg_LIP) =
1399 (os_context_register_t)(unsigned long)code;
1400 *os_context_register_addr(context,reg_CFP) =
1401 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1403 #ifdef ARCH_HAS_NPC_REGISTER
1404 *os_context_npc_addr(context) =
1405 4 + *os_context_pc_addr(context);
1407 #ifdef LISP_FEATURE_SPARC
1408 *os_context_register_addr(context,reg_CODE) =
1409 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1411 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1415 /* KLUDGE: Theoretically the approach we use for undefined alien
1416 * variables should work for functions as well, but on PPC/Darwin
1417 * we get bus error at bogus addresses instead, hence this workaround,
1418 * that has the added benefit of automatically discriminating between
1419 * functions and variables.
1422 undefined_alien_function(void)
1424 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1428 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1430 struct thread *th=arch_os_get_current_thread();
1432 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1433 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1434 lose("Control stack exhausted");
1436 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1437 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1438 /* We hit the end of the control stack: disable guard page
1439 * protection so the error handler has some headroom, protect the
1440 * previous page so that we can catch returns from the guard page
1441 * and restore it. */
1442 protect_control_stack_guard_page(0, NULL);
1443 protect_control_stack_return_guard_page(1, NULL);
1444 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1446 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1447 /* For the unfortunate case, when the control stack is
1448 * exhausted in a signal handler. */
1449 unblock_signals_in_context_and_maybe_warn(context);
1451 arrange_return_to_lisp_function
1452 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1455 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1456 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1457 /* We're returning from the guard page: reprotect it, and
1458 * unprotect this one. This works even if we somehow missed
1459 * the return-guard-page, and hit it on our way to new
1460 * exhaustion instead. */
1461 protect_control_stack_guard_page(1, NULL);
1462 protect_control_stack_return_guard_page(0, NULL);
1463 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1466 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1467 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1468 lose("Binding stack exhausted");
1470 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1471 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1472 protect_binding_stack_guard_page(0, NULL);
1473 protect_binding_stack_return_guard_page(1, NULL);
1474 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1476 /* For the unfortunate case, when the binding stack is
1477 * exhausted in a signal handler. */
1478 unblock_signals_in_context_and_maybe_warn(context);
1479 arrange_return_to_lisp_function
1480 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1483 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1484 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1485 protect_binding_stack_guard_page(1, NULL);
1486 protect_binding_stack_return_guard_page(0, NULL);
1487 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1490 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1491 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1492 lose("Alien stack exhausted");
1494 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1495 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1496 protect_alien_stack_guard_page(0, NULL);
1497 protect_alien_stack_return_guard_page(1, NULL);
1498 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1500 /* For the unfortunate case, when the alien stack is
1501 * exhausted in a signal handler. */
1502 unblock_signals_in_context_and_maybe_warn(context);
1503 arrange_return_to_lisp_function
1504 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1507 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1508 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1509 protect_alien_stack_guard_page(1, NULL);
1510 protect_alien_stack_return_guard_page(0, NULL);
1511 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1514 else if (addr >= undefined_alien_address &&
1515 addr < undefined_alien_address + os_vm_page_size) {
1516 arrange_return_to_lisp_function
1517 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1524 * noise to install handlers
1527 #ifndef LISP_FEATURE_WIN32
1528 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1529 * they are blocked, in Linux 2.6 the default handler is invoked
1530 * instead that usually coredumps. One might hastily think that adding
1531 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1532 * the whole sa_mask is ignored and instead of not adding the signal
1533 * in question to the mask. That means if it's not blockable the
1534 * signal must be unblocked at the beginning of signal handlers.
1536 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1537 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1538 * will be unblocked in the sigmask during the signal handler. -- RMK
1541 static volatile int sigaction_nodefer_works = -1;
1543 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1544 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1547 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1551 get_current_sigmask(¤t);
1552 /* There should be exactly two blocked signals: the two we added
1553 * to sa_mask when setting up the handler. NetBSD doesn't block
1554 * the signal we're handling when SA_NODEFER is set; Linux before
1555 * 2.6.13 or so also doesn't block the other signal when
1556 * SA_NODEFER is set. */
1557 for(i = 1; i < NSIG; i++)
1558 if (sigismember(¤t, i) !=
1559 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1560 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1561 sigaction_nodefer_works = 0;
1563 if (sigaction_nodefer_works == -1)
1564 sigaction_nodefer_works = 1;
1568 see_if_sigaction_nodefer_works(void)
1570 struct sigaction sa, old_sa;
1572 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1573 sa.sa_sigaction = sigaction_nodefer_test_handler;
1574 sigemptyset(&sa.sa_mask);
1575 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1576 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1577 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1578 /* Make sure no signals are blocked. */
1581 sigemptyset(&empty);
1582 thread_sigmask(SIG_SETMASK, &empty, 0);
1584 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1585 while (sigaction_nodefer_works == -1);
1586 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1589 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1590 #undef SA_NODEFER_TEST_KILL_SIGNAL
1593 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1595 SAVE_ERRNO(context,void_context);
1598 sigemptyset(&unblock);
1599 sigaddset(&unblock, signal);
1600 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1601 interrupt_handle_now(signal, info, context);
1606 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1608 SAVE_ERRNO(context,void_context);
1611 sigemptyset(&unblock);
1612 sigaddset(&unblock, signal);
1613 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1614 (*interrupt_low_level_handlers[signal])(signal, info, context);
1619 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1621 SAVE_ERRNO(context,void_context);
1622 (*interrupt_low_level_handlers[signal])(signal, info, context);
1627 undoably_install_low_level_interrupt_handler (int signal,
1628 interrupt_handler_t handler)
1630 struct sigaction sa;
1632 if (0 > signal || signal >= NSIG) {
1633 lose("bad signal number %d\n", signal);
1636 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1637 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1638 else if (sigismember(&deferrable_sigset,signal))
1639 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1640 else if (!sigaction_nodefer_works &&
1641 !sigismember(&blockable_sigset, signal))
1642 sa.sa_sigaction = low_level_unblock_me_trampoline;
1644 sa.sa_sigaction = low_level_handle_now_handler;
1646 sigcopyset(&sa.sa_mask, &blockable_sigset);
1647 sa.sa_flags = SA_SIGINFO | SA_RESTART
1648 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1649 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1650 if((signal==SIG_MEMORY_FAULT))
1651 sa.sa_flags |= SA_ONSTACK;
1654 sigaction(signal, &sa, NULL);
1655 interrupt_low_level_handlers[signal] =
1656 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1660 /* This is called from Lisp. */
1662 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1664 #ifndef LISP_FEATURE_WIN32
1665 struct sigaction sa;
1667 union interrupt_handler oldhandler;
1669 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1671 block_blockable_signals(0, &old);
1673 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1674 (unsigned int)interrupt_low_level_handlers[signal]));
1675 if (interrupt_low_level_handlers[signal]==0) {
1676 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1677 ARE_SAME_HANDLER(handler, SIG_IGN))
1678 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1679 else if (sigismember(&deferrable_sigset, signal))
1680 sa.sa_sigaction = maybe_now_maybe_later;
1681 else if (!sigaction_nodefer_works &&
1682 !sigismember(&blockable_sigset, signal))
1683 sa.sa_sigaction = unblock_me_trampoline;
1685 sa.sa_sigaction = interrupt_handle_now_handler;
1687 sigcopyset(&sa.sa_mask, &blockable_sigset);
1688 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1689 (sigaction_nodefer_works ? SA_NODEFER : 0);
1690 sigaction(signal, &sa, NULL);
1693 oldhandler = interrupt_handlers[signal];
1694 interrupt_handlers[signal].c = handler;
1696 thread_sigmask(SIG_SETMASK, &old, 0);
1698 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1700 return (unsigned long)oldhandler.lisp;
1702 /* Probably-wrong Win32 hack */
1707 /* This must not go through lisp as it's allowed anytime, even when on
1710 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1712 lose("SIGABRT received.\n");
1716 interrupt_init(void)
1718 #ifndef LISP_FEATURE_WIN32
1720 SHOW("entering interrupt_init()");
1721 see_if_sigaction_nodefer_works();
1722 sigemptyset(&deferrable_sigset);
1723 sigemptyset(&blockable_sigset);
1724 sigemptyset(&gc_sigset);
1725 sigaddset_deferrable(&deferrable_sigset);
1726 sigaddset_blockable(&blockable_sigset);
1727 sigaddset_gc(&gc_sigset);
1729 /* Set up high level handler information. */
1730 for (i = 0; i < NSIG; i++) {
1731 interrupt_handlers[i].c =
1732 /* (The cast here blasts away the distinction between
1733 * SA_SIGACTION-style three-argument handlers and
1734 * signal(..)-style one-argument handlers, which is OK
1735 * because it works to call the 1-argument form where the
1736 * 3-argument form is expected.) */
1737 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1739 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1740 SHOW("returning from interrupt_init()");
1744 #ifndef LISP_FEATURE_WIN32
1746 siginfo_code(siginfo_t *info)
1748 return info->si_code;
1750 os_vm_address_t current_memory_fault_address;
1753 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1755 /* FIXME: This is lossy: if we get another memory fault (eg. from
1756 * another thread) before lisp has read this, we lose the information.
1757 * However, since this is mostly informative, we'll live with that for
1758 * now -- some address is better then no address in this case.
1760 current_memory_fault_address = addr;
1761 /* To allow debugging memory faults in signal handlers and such. */
1762 corruption_warning_and_maybe_lose("Memory fault at %x (pc=%p, sp=%p)",
1764 *os_context_pc_addr(context),
1765 #ifdef ARCH_HAS_STACK_POINTER
1766 *os_context_sp_addr(context)
1771 unblock_signals_in_context_and_maybe_warn(context);
1772 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1773 arrange_return_to_lisp_function(context,
1774 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1776 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1782 unhandled_trap_error(os_context_t *context)
1784 lispobj context_sap;
1785 fake_foreign_function_call(context);
1786 unblock_gc_signals(0, 0);
1787 context_sap = alloc_sap(context);
1788 #ifndef LISP_FEATURE_WIN32
1789 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1791 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1792 lose("UNHANDLED-TRAP-ERROR fell through");
1795 /* Common logic for trapping instructions. How we actually handle each
1796 * case is highly architecture dependent, but the overall shape is
1799 handle_trap(os_context_t *context, int trap)
1802 case trap_PendingInterrupt:
1803 FSHOW((stderr, "/<trap pending interrupt>\n"));
1804 arch_skip_instruction(context);
1805 interrupt_handle_pending(context);
1809 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1810 interrupt_internal_error(context, trap==trap_Cerror);
1812 case trap_Breakpoint:
1813 arch_handle_breakpoint(context);
1815 case trap_FunEndBreakpoint:
1816 arch_handle_fun_end_breakpoint(context);
1818 #ifdef trap_AfterBreakpoint
1819 case trap_AfterBreakpoint:
1820 arch_handle_after_breakpoint(context);
1823 #ifdef trap_SingleStepAround
1824 case trap_SingleStepAround:
1825 case trap_SingleStepBefore:
1826 arch_handle_single_step_trap(context, trap);
1830 fake_foreign_function_call(context);
1831 lose("%%PRIMITIVE HALT called; the party is over.\n");
1833 unhandled_trap_error(context);