2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* When we catch an internal error, should we pass it back to Lisp to
72 * be handled in a high-level way? (Early in cold init, the answer is
73 * 'no', because Lisp is still too brain-dead to handle anything.
74 * After sufficient initialization has been completed, the answer
76 boolean internal_errors_enabled = 0;
78 #ifndef LISP_FEATURE_WIN32
80 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
82 union interrupt_handler interrupt_handlers[NSIG];
84 /* Under Linux on some architectures, we appear to have to restore the
85 * FPU control word from the context, as after the signal is delivered
86 * we appear to have a null FPU control word. */
87 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
88 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
89 os_context_t *context = arch_os_get_context(&void_context); \
90 os_restore_fp_control(context);
92 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
93 os_context_t *context = arch_os_get_context(&void_context);
96 /* Foreign code may want to start some threads on its own.
97 * Non-targetted, truly asynchronous signals can be delivered to
98 * basically any thread, but invoking Lisp handlers in such foregign
99 * threads is really bad, so let's resignal it.
101 * This should at least bring attention to the problem, but it cannot
102 * work for SIGSEGV and similar. It is good enough for timers, and
103 * maybe all deferrables. */
106 add_handled_signals(sigset_t *sigset)
109 for(i = 1; i < NSIG; i++) {
110 if (!(ARE_SAME_HANDLER(interrupt_low_level_handlers[i], SIG_DFL)) ||
111 !(ARE_SAME_HANDLER(interrupt_handlers[i].c, SIG_DFL))) {
112 sigaddset(sigset, i);
117 void block_signals(sigset_t *what, sigset_t *where, sigset_t *old);
120 maybe_resignal_to_lisp_thread(int signal, os_context_t *context)
122 #ifdef LISP_FEATURE_SB_THREAD
123 if (!pthread_getspecific(lisp_thread)) {
124 if (!(sigismember(&deferrable_sigset,signal))) {
125 corruption_warning_and_maybe_lose
126 ("Received signal %d in non-lisp thread %lu, resignalling to a lisp thread.",
132 sigemptyset(&sigset);
133 add_handled_signals(&sigset);
134 block_signals(&sigset, 0, 0);
135 block_signals(&sigset, os_context_sigmask_addr(context), 0);
136 kill(getpid(), signal);
144 /* These are to be used in signal handlers. Currently all handlers are
145 * called from one of:
147 * interrupt_handle_now_handler
148 * maybe_now_maybe_later
149 * unblock_me_trampoline
150 * low_level_handle_now_handler
151 * low_level_maybe_now_maybe_later
152 * low_level_unblock_me_trampoline
154 * This gives us a single point of control (or six) over errno, fp
155 * control word, and fixing up signal context on sparc.
157 * The SPARC/Linux platform doesn't quite do signals the way we want
158 * them done. The third argument in the handler isn't filled in by the
159 * kernel properly, so we fix it up ourselves in the
160 * arch_os_get_context(..) function. -- CSR, 2002-07-23
162 #define SAVE_ERRNO(signal,context,void_context) \
164 int _saved_errno = errno; \
165 RESTORE_FP_CONTROL_WORD(context,void_context); \
166 if (!maybe_resignal_to_lisp_thread(signal, context)) \
169 #define RESTORE_ERRNO \
171 errno = _saved_errno; \
174 static void run_deferred_handler(struct interrupt_data *data,
175 os_context_t *context);
176 #ifndef LISP_FEATURE_WIN32
177 static void store_signal_data_for_later (struct interrupt_data *data,
178 void *handler, int signal,
180 os_context_t *context);
183 /* Generic signal related utilities. */
186 get_current_sigmask(sigset_t *sigset)
188 /* Get the current sigmask, by blocking the empty set. */
189 thread_sigmask(SIG_BLOCK, 0, sigset);
193 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
198 sigcopyset(old, where);
199 for(i = 1; i < NSIG; i++) {
200 if (sigismember(what, i))
204 thread_sigmask(SIG_BLOCK, what, old);
209 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
214 sigcopyset(old, where);
215 for(i = 1; i < NSIG; i++) {
216 if (sigismember(what, i))
220 thread_sigmask(SIG_UNBLOCK, what, old);
225 print_sigset(sigset_t *sigset)
228 for(i = 1; i < NSIG; i++) {
229 if (sigismember(sigset, i))
230 fprintf(stderr, "Signal %d masked\n", i);
234 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
235 * if all re unmasked else die. Passing NULL for sigset is a shorthand
236 * for the current sigmask. */
238 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
241 #if !defined(LISP_FEATURE_WIN32)
243 boolean has_blocked = 0, has_unblocked = 0;
246 get_current_sigmask(¤t);
249 for(i = 1; i < NSIG; i++) {
250 if (sigismember(sigset2, i)) {
251 if (sigismember(sigset, i))
257 if (has_blocked && has_unblocked) {
258 print_sigset(sigset);
259 lose("some %s signals blocked, some unblocked\n", name);
269 /* Deferrables, blockables, gc signals. */
272 sigaddset_deferrable(sigset_t *s)
274 sigaddset(s, SIGHUP);
275 sigaddset(s, SIGINT);
276 sigaddset(s, SIGTERM);
277 sigaddset(s, SIGQUIT);
278 sigaddset(s, SIGPIPE);
279 sigaddset(s, SIGALRM);
280 sigaddset(s, SIGURG);
281 sigaddset(s, SIGTSTP);
282 sigaddset(s, SIGCHLD);
284 #ifndef LISP_FEATURE_HPUX
285 sigaddset(s, SIGXCPU);
286 sigaddset(s, SIGXFSZ);
288 sigaddset(s, SIGVTALRM);
289 sigaddset(s, SIGPROF);
290 sigaddset(s, SIGWINCH);
294 sigaddset_blockable(sigset_t *sigset)
296 sigaddset_deferrable(sigset);
297 sigaddset_gc(sigset);
301 sigaddset_gc(sigset_t *sigset)
303 #ifdef LISP_FEATURE_SB_THREAD
304 sigaddset(sigset,SIG_STOP_FOR_GC);
308 /* initialized in interrupt_init */
309 sigset_t deferrable_sigset;
310 sigset_t blockable_sigset;
315 #if !defined(LISP_FEATURE_WIN32)
317 deferrables_blocked_p(sigset_t *sigset)
319 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
324 check_deferrables_unblocked_or_lose(sigset_t *sigset)
326 #if !defined(LISP_FEATURE_WIN32)
327 if (deferrables_blocked_p(sigset))
328 lose("deferrables blocked\n");
333 check_deferrables_blocked_or_lose(sigset_t *sigset)
335 #if !defined(LISP_FEATURE_WIN32)
336 if (!deferrables_blocked_p(sigset))
337 lose("deferrables unblocked\n");
341 #if !defined(LISP_FEATURE_WIN32)
343 blockables_blocked_p(sigset_t *sigset)
345 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
350 check_blockables_unblocked_or_lose(sigset_t *sigset)
352 #if !defined(LISP_FEATURE_WIN32)
353 if (blockables_blocked_p(sigset))
354 lose("blockables blocked\n");
359 check_blockables_blocked_or_lose(sigset_t *sigset)
361 #if !defined(LISP_FEATURE_WIN32)
362 if (!blockables_blocked_p(sigset))
363 lose("blockables unblocked\n");
367 #if !defined(LISP_FEATURE_WIN32)
369 gc_signals_blocked_p(sigset_t *sigset)
371 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
376 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
378 #if !defined(LISP_FEATURE_WIN32)
379 if (gc_signals_blocked_p(sigset))
380 lose("gc signals blocked\n");
385 check_gc_signals_blocked_or_lose(sigset_t *sigset)
387 #if !defined(LISP_FEATURE_WIN32)
388 if (!gc_signals_blocked_p(sigset))
389 lose("gc signals unblocked\n");
394 block_deferrable_signals(sigset_t *where, sigset_t *old)
396 #ifndef LISP_FEATURE_WIN32
397 block_signals(&deferrable_sigset, where, old);
402 block_blockable_signals(sigset_t *where, sigset_t *old)
404 #ifndef LISP_FEATURE_WIN32
405 block_signals(&blockable_sigset, where, old);
410 block_gc_signals(sigset_t *where, sigset_t *old)
412 #ifndef LISP_FEATURE_WIN32
413 block_signals(&gc_sigset, where, old);
418 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
420 #ifndef LISP_FEATURE_WIN32
421 if (interrupt_handler_pending_p())
422 lose("unblock_deferrable_signals: losing proposition\n");
423 check_gc_signals_unblocked_or_lose(where);
424 unblock_signals(&deferrable_sigset, where, old);
429 unblock_blockable_signals(sigset_t *where, sigset_t *old)
431 #ifndef LISP_FEATURE_WIN32
432 unblock_signals(&blockable_sigset, where, old);
437 unblock_gc_signals(sigset_t *where, sigset_t *old)
439 #ifndef LISP_FEATURE_WIN32
440 unblock_signals(&gc_sigset, where, old);
445 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
447 #ifndef LISP_FEATURE_WIN32
448 sigset_t *sigset = os_context_sigmask_addr(context);
449 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
450 corruption_warning_and_maybe_lose(
451 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
452 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
453 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
454 unblock_gc_signals(sigset, 0);
456 if (!interrupt_handler_pending_p()) {
457 unblock_deferrable_signals(sigset, 0);
464 check_interrupts_enabled_or_lose(os_context_t *context)
466 struct thread *thread=arch_os_get_current_thread();
467 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
468 lose("interrupts not enabled\n");
469 if (arch_pseudo_atomic_atomic(context))
470 lose ("in pseudo atomic section\n");
473 /* Save sigset (or the current sigmask if 0) if there is no pending
474 * handler, because that means that deferabbles are already blocked.
475 * The purpose is to avoid losing the pending gc signal if a
476 * deferrable interrupt async unwinds between clearing the pseudo
477 * atomic and trapping to GC.*/
479 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
481 #ifndef LISP_FEATURE_WIN32
482 struct thread *thread = arch_os_get_current_thread();
483 struct interrupt_data *data = thread->interrupt_data;
485 /* Obviously, this function is called when signals may not be
486 * blocked. Let's make sure we are not interrupted. */
487 block_blockable_signals(0, &oldset);
488 #ifndef LISP_FEATURE_SB_THREAD
489 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
491 if (data->gc_blocked_deferrables)
492 lose("gc_blocked_deferrables already true\n");
494 if ((!data->pending_handler) &&
495 (!data->gc_blocked_deferrables)) {
496 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
497 data->gc_blocked_deferrables = 1;
499 /* This is the sigmask of some context. */
500 sigcopyset(&data->pending_mask, sigset);
501 sigaddset_deferrable(sigset);
502 thread_sigmask(SIG_SETMASK,&oldset,0);
505 /* Operating on the current sigmask. Save oldset and
506 * unblock gc signals. In the end, this is equivalent to
507 * blocking the deferrables. */
508 sigcopyset(&data->pending_mask, &oldset);
509 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
513 thread_sigmask(SIG_SETMASK,&oldset,0);
517 /* Are we leaving WITH-GCING and already running with interrupts
518 * enabled, without the protection of *GC-INHIBIT* T and there is gc
519 * (or stop for gc) pending, but we haven't trapped yet? */
521 in_leaving_without_gcing_race_p(struct thread *thread)
523 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
524 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
525 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
526 ((SymbolValue(GC_PENDING,thread) != NIL)
527 #if defined(LISP_FEATURE_SB_THREAD)
528 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
533 /* Check our baroque invariants. */
535 check_interrupt_context_or_lose(os_context_t *context)
537 #ifndef LISP_FEATURE_WIN32
538 struct thread *thread = arch_os_get_current_thread();
539 struct interrupt_data *data = thread->interrupt_data;
540 int interrupt_deferred_p = (data->pending_handler != 0);
541 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
542 sigset_t *sigset = os_context_sigmask_addr(context);
543 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
544 * handle_allocation_trap. */
545 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
546 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
547 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
548 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
549 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
550 int in_race_p = in_leaving_without_gcing_race_p(thread);
551 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
552 * section and trapping, a SIG_STOP_FOR_GC would see the next
553 * check fail, for this reason sig_stop_for_gc handler does not
554 * call this function. */
555 if (interrupt_deferred_p) {
556 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
557 lose("Stray deferred interrupt.\n");
560 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
561 lose("GC_PENDING, but why?\n");
562 #if defined(LISP_FEATURE_SB_THREAD)
564 int stop_for_gc_pending =
565 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
566 if (stop_for_gc_pending)
567 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
568 lose("STOP_FOR_GC_PENDING, but why?\n");
569 if (pseudo_atomic_interrupted)
570 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
571 lose("pseudo_atomic_interrupted, but why?\n");
574 if (pseudo_atomic_interrupted)
575 if (!(gc_pending || interrupt_deferred_p))
576 lose("pseudo_atomic_interrupted, but why?\n");
579 if (interrupt_pending && !interrupt_deferred_p)
580 lose("INTERRUPT_PENDING but not pending handler.\n");
581 if ((data->gc_blocked_deferrables) && interrupt_pending)
582 lose("gc_blocked_deferrables and interrupt pending\n.");
583 if (data->gc_blocked_deferrables)
584 check_deferrables_blocked_or_lose(sigset);
585 if (interrupt_pending || interrupt_deferred_p ||
586 data->gc_blocked_deferrables)
587 check_deferrables_blocked_or_lose(sigset);
589 check_deferrables_unblocked_or_lose(sigset);
590 /* If deferrables are unblocked then we are open to signals
591 * that run lisp code. */
592 check_gc_signals_unblocked_or_lose(sigset);
598 * utility routines used by various signal handlers
602 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
604 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
608 /* Build a fake stack frame or frames */
610 current_control_frame_pointer =
611 (lispobj *)(unsigned long)
612 (*os_context_register_addr(context, reg_CSP));
613 if ((lispobj *)(unsigned long)
614 (*os_context_register_addr(context, reg_CFP))
615 == current_control_frame_pointer) {
616 /* There is a small window during call where the callee's
617 * frame isn't built yet. */
618 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
619 == FUN_POINTER_LOWTAG) {
620 /* We have called, but not built the new frame, so
621 * build it for them. */
622 current_control_frame_pointer[0] =
623 *os_context_register_addr(context, reg_OCFP);
624 current_control_frame_pointer[1] =
625 *os_context_register_addr(context, reg_LRA);
626 current_control_frame_pointer += 8;
627 /* Build our frame on top of it. */
628 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
631 /* We haven't yet called, build our frame as if the
632 * partial frame wasn't there. */
633 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
636 /* We can't tell whether we are still in the caller if it had to
637 * allocate a stack frame due to stack arguments. */
638 /* This observation provoked some past CMUCL maintainer to ask
639 * "Can anything strange happen during return?" */
642 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
645 current_control_stack_pointer = current_control_frame_pointer + 8;
647 current_control_frame_pointer[0] = oldcont;
648 current_control_frame_pointer[1] = NIL;
649 current_control_frame_pointer[2] =
650 (lispobj)(*os_context_register_addr(context, reg_CODE));
654 /* Stores the context for gc to scavange and builds fake stack
657 fake_foreign_function_call(os_context_t *context)
660 struct thread *thread=arch_os_get_current_thread();
662 /* context_index incrementing must not be interrupted */
663 check_blockables_blocked_or_lose(0);
665 /* Get current Lisp state from context. */
667 dynamic_space_free_pointer =
668 (lispobj *)(unsigned long)
669 (*os_context_register_addr(context, reg_ALLOC));
670 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
671 /* dynamic_space_free_pointer); */
672 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
673 if ((long)dynamic_space_free_pointer & 1) {
674 lose("dead in fake_foreign_function_call, context = %x\n", context);
677 /* why doesnt PPC and SPARC do something like this: */
678 #if defined(LISP_FEATURE_HPPA)
679 if ((long)dynamic_space_free_pointer & 4) {
680 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
685 current_binding_stack_pointer =
686 (lispobj *)(unsigned long)
687 (*os_context_register_addr(context, reg_BSP));
690 build_fake_control_stack_frames(thread,context);
692 /* Do dynamic binding of the active interrupt context index
693 * and save the context in the context array. */
695 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
697 if (context_index >= MAX_INTERRUPTS) {
698 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
701 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
702 make_fixnum(context_index + 1),thread);
704 thread->interrupt_contexts[context_index] = context;
706 #ifdef FOREIGN_FUNCTION_CALL_FLAG
707 foreign_function_call_active = 1;
711 /* blocks all blockable signals. If you are calling from a signal handler,
712 * the usual signal mask will be restored from the context when the handler
713 * finishes. Otherwise, be careful */
715 undo_fake_foreign_function_call(os_context_t *context)
717 struct thread *thread=arch_os_get_current_thread();
718 /* Block all blockable signals. */
719 block_blockable_signals(0, 0);
721 #ifdef FOREIGN_FUNCTION_CALL_FLAG
722 foreign_function_call_active = 0;
725 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
729 /* Put the dynamic space free pointer back into the context. */
730 *os_context_register_addr(context, reg_ALLOC) =
731 (unsigned long) dynamic_space_free_pointer
732 | (*os_context_register_addr(context, reg_ALLOC)
735 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
737 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
742 /* a handler for the signal caused by execution of a trap opcode
743 * signalling an internal error */
745 interrupt_internal_error(os_context_t *context, boolean continuable)
749 fake_foreign_function_call(context);
751 if (!internal_errors_enabled) {
752 describe_internal_error(context);
753 /* There's no good way to recover from an internal error
754 * before the Lisp error handling mechanism is set up. */
755 lose("internal error too early in init, can't recover\n");
758 /* Allocate the SAP object while the interrupts are still
760 unblock_gc_signals(0, 0);
761 context_sap = alloc_sap(context);
763 #ifndef LISP_FEATURE_WIN32
764 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
767 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
768 /* Workaround for blocked SIGTRAP. */
771 sigemptyset(&newset);
772 sigaddset(&newset, SIGTRAP);
773 thread_sigmask(SIG_UNBLOCK, &newset, 0);
777 SHOW("in interrupt_internal_error");
779 /* Display some rudimentary debugging information about the
780 * error, so that even if the Lisp error handler gets badly
781 * confused, we have a chance to determine what's going on. */
782 describe_internal_error(context);
784 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
785 continuable ? T : NIL);
787 undo_fake_foreign_function_call(context); /* blocks signals again */
789 arch_skip_instruction(context);
793 interrupt_handler_pending_p(void)
795 struct thread *thread = arch_os_get_current_thread();
796 struct interrupt_data *data = thread->interrupt_data;
797 return (data->pending_handler != 0);
801 interrupt_handle_pending(os_context_t *context)
803 /* There are three ways we can get here. First, if an interrupt
804 * occurs within pseudo-atomic, it will be deferred, and we'll
805 * trap to here at the end of the pseudo-atomic block. Second, if
806 * the GC (in alloc()) decides that a GC is required, it will set
807 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
808 * and alloc() is always called from within pseudo-atomic, and
809 * thus we end up here again. Third, when calling GC-ON or at the
810 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
811 * here if there is a pending GC. Fourth, ahem, at the end of
812 * WITHOUT-INTERRUPTS (bar complications with nesting). */
814 /* Win32 only needs to handle the GC cases (for now?) */
816 struct thread *thread = arch_os_get_current_thread();
817 struct interrupt_data *data = thread->interrupt_data;
819 if (arch_pseudo_atomic_atomic(context)) {
820 lose("Handling pending interrupt in pseudo atomic.");
823 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
825 check_blockables_blocked_or_lose(0);
827 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
828 * handler, then the pending mask was saved and
829 * gc_blocked_deferrables set. Hence, there can be no pending
830 * handler and it's safe to restore the pending mask.
832 * Note, that if gc_blocked_deferrables is false we may still have
833 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
834 * pseudo atomic was interrupt be a deferrable first. */
835 if (data->gc_blocked_deferrables) {
836 if (data->pending_handler)
837 lose("GC blocked deferrables but still got a pending handler.");
838 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
839 lose("GC blocked deferrables while GC is inhibited.");
840 /* Restore the saved signal mask from the original signal (the
841 * one that interrupted us during the critical section) into
842 * the os_context for the signal we're currently in the
843 * handler for. This should ensure that when we return from
844 * the handler the blocked signals are unblocked. */
845 #ifndef LISP_FEATURE_WIN32
846 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
848 data->gc_blocked_deferrables = 0;
851 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
852 void *original_pending_handler = data->pending_handler;
854 #ifdef LISP_FEATURE_SB_THREAD
855 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
856 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
857 * the signal handler if it actually stops us. */
858 arch_clear_pseudo_atomic_interrupted(context);
859 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
862 /* Test for T and not for != NIL since the value :IN-PROGRESS
863 * is used in SUB-GC as part of the mechanism to supress
865 if (SymbolValue(GC_PENDING,thread) == T) {
867 /* Two reasons for doing this. First, if there is a
868 * pending handler we don't want to run. Second, we are
869 * going to clear pseudo atomic interrupted to avoid
870 * spurious trapping on every allocation in SUB_GC and
871 * having a pending handler with interrupts enabled and
872 * without pseudo atomic interrupted breaks an
874 if (data->pending_handler) {
875 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
876 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
879 arch_clear_pseudo_atomic_interrupted(context);
881 /* GC_PENDING is cleared in SUB-GC, or if another thread
882 * is doing a gc already we will get a SIG_STOP_FOR_GC and
883 * that will clear it.
885 * If there is a pending handler or gc was triggerred in a
886 * signal handler then maybe_gc won't run POST_GC and will
887 * return normally. */
888 if (!maybe_gc(context))
889 lose("GC not inhibited but maybe_gc did not GC.");
891 if (data->pending_handler) {
895 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
896 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
897 * GC-PENDING is not NIL then we cannot trap on pseudo
898 * atomic due to GC (see if(GC_PENDING) logic in
899 * cheneygc.c an gengcgc.c), plus there is a outer
900 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
902 lose("Trapping to run pending handler while GC in progress.");
905 check_blockables_blocked_or_lose(0);
907 /* No GC shall be lost. If SUB_GC triggers another GC then
908 * that should be handled on the spot. */
909 if (SymbolValue(GC_PENDING,thread) != NIL)
910 lose("GC_PENDING after doing gc.");
911 #ifdef LISP_FEATURE_SB_THREAD
912 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
913 lose("STOP_FOR_GC_PENDING after doing gc.");
915 /* Check two things. First, that gc does not clobber a handler
916 * that's already pending. Second, that there is no interrupt
917 * lossage: if original_pending_handler was NULL then even if
918 * an interrupt arrived during GC (POST-GC, really) it was
920 if (original_pending_handler != data->pending_handler)
921 lose("pending handler changed in gc: %x -> %d.",
922 original_pending_handler, data->pending_handler);
925 #ifndef LISP_FEATURE_WIN32
926 /* There may be no pending handler, because it was only a gc that
927 * had to be executed or because Lisp is a bit too eager to call
928 * DO-PENDING-INTERRUPT. */
929 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
930 (data->pending_handler)) {
931 /* No matter how we ended up here, clear both
932 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
933 * because we checked above that there is no GC pending. */
934 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
935 arch_clear_pseudo_atomic_interrupted(context);
936 /* Restore the sigmask in the context. */
937 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
938 run_deferred_handler(data, context);
941 #ifdef LISP_FEATURE_GENCGC
942 if (get_pseudo_atomic_interrupted(thread))
943 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
945 /* It is possible that the end of this function was reached
946 * without never actually doing anything, the tests in Lisp for
947 * when to call receive-pending-interrupt are not exact. */
948 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
953 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
955 #ifdef FOREIGN_FUNCTION_CALL_FLAG
956 boolean were_in_lisp;
958 union interrupt_handler handler;
960 check_blockables_blocked_or_lose(0);
962 #ifndef LISP_FEATURE_WIN32
963 if (sigismember(&deferrable_sigset,signal))
964 check_interrupts_enabled_or_lose(context);
967 handler = interrupt_handlers[signal];
969 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
973 #ifdef FOREIGN_FUNCTION_CALL_FLAG
974 were_in_lisp = !foreign_function_call_active;
978 fake_foreign_function_call(context);
981 FSHOW_SIGNAL((stderr,
982 "/entering interrupt_handle_now(%d, info, context)\n",
985 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
987 /* This can happen if someone tries to ignore or default one
988 * of the signals we need for runtime support, and the runtime
989 * support decides to pass on it. */
990 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
992 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
993 /* Once we've decided what to do about contexts in a
994 * return-elsewhere world (the original context will no longer
995 * be available; should we copy it or was nobody using it anyway?)
996 * then we should convert this to return-elsewhere */
998 /* CMUCL comment said "Allocate the SAPs while the interrupts
999 * are still disabled.". I (dan, 2003.08.21) assume this is
1000 * because we're not in pseudoatomic and allocation shouldn't
1001 * be interrupted. In which case it's no longer an issue as
1002 * all our allocation from C now goes through a PA wrapper,
1003 * but still, doesn't hurt.
1005 * Yeah, but non-gencgc platforms don't really wrap allocation
1006 * in PA. MG - 2005-08-29 */
1008 lispobj info_sap, context_sap;
1009 /* Leave deferrable signals blocked, the handler itself will
1010 * allow signals again when it sees fit. */
1011 unblock_gc_signals(0, 0);
1012 context_sap = alloc_sap(context);
1013 info_sap = alloc_sap(info);
1015 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
1017 funcall3(handler.lisp,
1018 make_fixnum(signal),
1022 /* This cannot happen in sane circumstances. */
1024 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
1026 #ifndef LISP_FEATURE_WIN32
1027 /* Allow signals again. */
1028 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1030 (*handler.c)(signal, info, context);
1033 #ifdef FOREIGN_FUNCTION_CALL_FLAG
1037 undo_fake_foreign_function_call(context); /* block signals again */
1040 FSHOW_SIGNAL((stderr,
1041 "/returning from interrupt_handle_now(%d, info, context)\n",
1045 /* This is called at the end of a critical section if the indications
1046 * are that some signal was deferred during the section. Note that as
1047 * far as C or the kernel is concerned we dealt with the signal
1048 * already; we're just doing the Lisp-level processing now that we
1051 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1053 /* The pending_handler may enable interrupts and then another
1054 * interrupt may hit, overwrite interrupt_data, so reset the
1055 * pending handler before calling it. Trust the handler to finish
1056 * with the siginfo before enabling interrupts. */
1057 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1058 data->pending_handler;
1060 data->pending_handler=0;
1061 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1062 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1065 #ifndef LISP_FEATURE_WIN32
1067 maybe_defer_handler(void *handler, struct interrupt_data *data,
1068 int signal, siginfo_t *info, os_context_t *context)
1070 struct thread *thread=arch_os_get_current_thread();
1072 check_blockables_blocked_or_lose(0);
1074 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1075 lose("interrupt already pending\n");
1076 if (thread->interrupt_data->pending_handler)
1077 lose("there is a pending handler already (PA)\n");
1078 if (data->gc_blocked_deferrables)
1079 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1080 check_interrupt_context_or_lose(context);
1081 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1082 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1083 * atomic section inside a WITHOUT-INTERRUPTS.
1085 * Also, if in_leaving_without_gcing_race_p then
1086 * interrupt_handle_pending is going to be called soon, so
1087 * stashing the signal away is safe.
1089 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1090 in_leaving_without_gcing_race_p(thread)) {
1091 FSHOW_SIGNAL((stderr,
1092 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1093 (unsigned int)handler,signal,
1094 in_leaving_without_gcing_race_p(thread)));
1095 store_signal_data_for_later(data,handler,signal,info,context);
1096 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1097 check_interrupt_context_or_lose(context);
1100 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1101 * actually use its argument for anything on x86, so this branch
1102 * may succeed even when context is null (gencgc alloc()) */
1103 if (arch_pseudo_atomic_atomic(context)) {
1104 FSHOW_SIGNAL((stderr,
1105 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1106 (unsigned int)handler,signal));
1107 store_signal_data_for_later(data,handler,signal,info,context);
1108 arch_set_pseudo_atomic_interrupted(context);
1109 check_interrupt_context_or_lose(context);
1112 FSHOW_SIGNAL((stderr,
1113 "/maybe_defer_handler(%x,%d): not deferred\n",
1114 (unsigned int)handler,signal));
1119 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1121 siginfo_t *info, os_context_t *context)
1123 if (data->pending_handler)
1124 lose("tried to overwrite pending interrupt handler %x with %x\n",
1125 data->pending_handler, handler);
1127 lose("tried to defer null interrupt handler\n");
1128 data->pending_handler = handler;
1129 data->pending_signal = signal;
1131 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1133 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1137 lose("Null context");
1139 /* the signal mask in the context (from before we were
1140 * interrupted) is copied to be restored when run_deferred_handler
1141 * happens. Then the usually-blocked signals are added to the mask
1142 * in the context so that we are running with blocked signals when
1143 * the handler returns */
1144 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1145 sigaddset_deferrable(os_context_sigmask_addr(context));
1149 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1151 SAVE_ERRNO(signal,context,void_context);
1152 struct thread *thread = arch_os_get_current_thread();
1153 struct interrupt_data *data = thread->interrupt_data;
1154 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1155 interrupt_handle_now(signal, info, context);
1160 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1161 os_context_t *context)
1163 /* No FP control fixage needed, caller has done that. */
1164 check_blockables_blocked_or_lose(0);
1165 check_interrupts_enabled_or_lose(context);
1166 (*interrupt_low_level_handlers[signal])(signal, info, context);
1167 /* No Darwin context fixage needed, caller does that. */
1171 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1173 SAVE_ERRNO(signal,context,void_context);
1174 struct thread *thread = arch_os_get_current_thread();
1175 struct interrupt_data *data = thread->interrupt_data;
1177 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1178 signal,info,context))
1179 low_level_interrupt_handle_now(signal, info, context);
1184 #ifdef LISP_FEATURE_SB_THREAD
1186 /* This function must not cons, because that may trigger a GC. */
1188 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1190 struct thread *thread=arch_os_get_current_thread();
1192 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1193 * pseudo atomic until gc is finally allowed. */
1194 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1195 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1196 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1198 } else if (arch_pseudo_atomic_atomic(context)) {
1199 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1200 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1201 arch_set_pseudo_atomic_interrupted(context);
1202 maybe_save_gc_mask_and_block_deferrables
1203 (os_context_sigmask_addr(context));
1207 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1209 /* Not PA and GC not inhibited -- we can stop now. */
1211 /* need the context stored so it can have registers scavenged */
1212 fake_foreign_function_call(context);
1214 /* Not pending anymore. */
1215 SetSymbolValue(GC_PENDING,NIL,thread);
1216 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1218 /* Consider this: in a PA section GC is requested: GC_PENDING,
1219 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1220 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1221 * but a SIG_STOP_FOR_GC arrives before trapping to
1222 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1223 * pseudo_atomic_interrupted is not and we go on running with
1224 * pseudo_atomic_interrupted but without a pending interrupt or
1225 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1227 if (thread->interrupt_data->gc_blocked_deferrables) {
1228 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1229 clear_pseudo_atomic_interrupted(thread);
1230 sigcopyset(os_context_sigmask_addr(context),
1231 &thread->interrupt_data->pending_mask);
1232 thread->interrupt_data->gc_blocked_deferrables = 0;
1235 if(thread_state(thread)!=STATE_RUNNING) {
1236 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1237 fixnum_value(thread->state));
1240 set_thread_state(thread,STATE_SUSPENDED);
1241 FSHOW_SIGNAL((stderr,"suspended\n"));
1243 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1244 FSHOW_SIGNAL((stderr,"resumed\n"));
1246 if(thread_state(thread)!=STATE_RUNNING) {
1247 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1248 fixnum_value(thread_state(thread)));
1251 undo_fake_foreign_function_call(context);
1257 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1259 SAVE_ERRNO(signal,context,void_context);
1260 #ifndef LISP_FEATURE_WIN32
1261 if ((signal == SIGILL) || (signal == SIGBUS)
1262 #ifndef LISP_FEATURE_LINUX
1263 || (signal == SIGEMT)
1266 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1268 interrupt_handle_now(signal, info, context);
1272 /* manipulate the signal context and stack such that when the handler
1273 * returns, it will call function instead of whatever it was doing
1277 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1278 extern int *context_eflags_addr(os_context_t *context);
1281 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1282 extern void post_signal_tramp(void);
1283 extern void call_into_lisp_tramp(void);
1285 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1287 #ifndef LISP_FEATURE_WIN32
1288 check_gc_signals_unblocked_or_lose
1289 (os_context_sigmask_addr(context));
1291 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1292 void * fun=native_pointer(function);
1293 void *code = &(((struct simple_fun *) fun)->code);
1296 /* Build a stack frame showing `interrupted' so that the
1297 * user's backtrace makes (as much) sense (as usual) */
1299 /* fp state is saved and restored by call_into_lisp */
1300 /* FIXME: errno is not restored, but since current uses of this
1301 * function only call Lisp code that signals an error, it's not
1302 * much of a problem. In other words, running out of the control
1303 * stack between a syscall and (GET-ERRNO) may clobber errno if
1304 * something fails during signalling or in the handler. But I
1305 * can't see what can go wrong as long as there is no CONTINUE
1306 * like restart on them. */
1307 #ifdef LISP_FEATURE_X86
1308 /* Suppose the existence of some function that saved all
1309 * registers, called call_into_lisp, then restored GP registers and
1310 * returned. It would look something like this:
1318 pushl {address of function to call}
1319 call 0x8058db0 <call_into_lisp>
1326 * What we do here is set up the stack that call_into_lisp would
1327 * expect to see if it had been called by this code, and frob the
1328 * signal context so that signal return goes directly to call_into_lisp,
1329 * and when that function (and the lisp function it invoked) returns,
1330 * it returns to the second half of this imaginary function which
1331 * restores all registers and returns to C
1333 * For this to work, the latter part of the imaginary function
1334 * must obviously exist in reality. That would be post_signal_tramp
1337 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1339 #if defined(LISP_FEATURE_DARWIN)
1340 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1342 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1343 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1345 /* 1. os_validate (malloc/mmap) register_save_block
1346 * 2. copy register state into register_save_block
1347 * 3. put a pointer to register_save_block in a register in the context
1348 * 4. set the context's EIP to point to a trampoline which:
1349 * a. builds the fake stack frame from the block
1350 * b. frees the block
1351 * c. calls the function
1354 *register_save_area = *os_context_pc_addr(context);
1355 *(register_save_area + 1) = function;
1356 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1357 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1358 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1359 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1360 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1361 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1362 *(register_save_area + 8) = *context_eflags_addr(context);
1364 *os_context_pc_addr(context) =
1365 (os_context_register_t) call_into_lisp_tramp;
1366 *os_context_register_addr(context,reg_ECX) =
1367 (os_context_register_t) register_save_area;
1370 /* return address for call_into_lisp: */
1371 *(sp-15) = (u32)post_signal_tramp;
1372 *(sp-14) = function; /* args for call_into_lisp : function*/
1373 *(sp-13) = 0; /* arg array */
1374 *(sp-12) = 0; /* no. args */
1375 /* this order matches that used in POPAD */
1376 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1377 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1379 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1380 /* POPAD ignores the value of ESP: */
1382 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1384 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1385 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1386 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1387 *(sp-3)=*context_eflags_addr(context);
1388 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1389 *(sp-1)=*os_context_pc_addr(context);
1393 #elif defined(LISP_FEATURE_X86_64)
1394 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1396 /* return address for call_into_lisp: */
1397 *(sp-18) = (u64)post_signal_tramp;
1399 *(sp-17)=*os_context_register_addr(context,reg_R15);
1400 *(sp-16)=*os_context_register_addr(context,reg_R14);
1401 *(sp-15)=*os_context_register_addr(context,reg_R13);
1402 *(sp-14)=*os_context_register_addr(context,reg_R12);
1403 *(sp-13)=*os_context_register_addr(context,reg_R11);
1404 *(sp-12)=*os_context_register_addr(context,reg_R10);
1405 *(sp-11)=*os_context_register_addr(context,reg_R9);
1406 *(sp-10)=*os_context_register_addr(context,reg_R8);
1407 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1408 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1409 /* skip RBP and RSP */
1410 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1411 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1412 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1413 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1414 *(sp-3)=*context_eflags_addr(context);
1415 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1416 *(sp-1)=*os_context_pc_addr(context);
1418 *os_context_register_addr(context,reg_RDI) =
1419 (os_context_register_t)function; /* function */
1420 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1421 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1423 struct thread *th=arch_os_get_current_thread();
1424 build_fake_control_stack_frames(th,context);
1427 #ifdef LISP_FEATURE_X86
1429 #if !defined(LISP_FEATURE_DARWIN)
1430 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1431 *os_context_register_addr(context,reg_ECX) = 0;
1432 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1434 *os_context_register_addr(context,reg_UESP) =
1435 (os_context_register_t)(sp-15);
1437 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1438 #endif /* __NETBSD__ */
1439 #endif /* LISP_FEATURE_DARWIN */
1441 #elif defined(LISP_FEATURE_X86_64)
1442 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1443 *os_context_register_addr(context,reg_RCX) = 0;
1444 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1445 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1447 /* this much of the calling convention is common to all
1449 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1450 *os_context_register_addr(context,reg_NARGS) = 0;
1451 *os_context_register_addr(context,reg_LIP) =
1452 (os_context_register_t)(unsigned long)code;
1453 *os_context_register_addr(context,reg_CFP) =
1454 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1456 #ifdef ARCH_HAS_NPC_REGISTER
1457 *os_context_npc_addr(context) =
1458 4 + *os_context_pc_addr(context);
1460 #ifdef LISP_FEATURE_SPARC
1461 *os_context_register_addr(context,reg_CODE) =
1462 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1464 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1468 /* KLUDGE: Theoretically the approach we use for undefined alien
1469 * variables should work for functions as well, but on PPC/Darwin
1470 * we get bus error at bogus addresses instead, hence this workaround,
1471 * that has the added benefit of automatically discriminating between
1472 * functions and variables.
1475 undefined_alien_function(void)
1477 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1481 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1483 struct thread *th=arch_os_get_current_thread();
1485 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1486 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1487 lose("Control stack exhausted");
1489 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1490 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1491 /* We hit the end of the control stack: disable guard page
1492 * protection so the error handler has some headroom, protect the
1493 * previous page so that we can catch returns from the guard page
1494 * and restore it. */
1495 protect_control_stack_guard_page(0, NULL);
1496 protect_control_stack_return_guard_page(1, NULL);
1497 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1499 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1500 /* For the unfortunate case, when the control stack is
1501 * exhausted in a signal handler. */
1502 unblock_signals_in_context_and_maybe_warn(context);
1504 arrange_return_to_lisp_function
1505 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1508 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1509 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1510 /* We're returning from the guard page: reprotect it, and
1511 * unprotect this one. This works even if we somehow missed
1512 * the return-guard-page, and hit it on our way to new
1513 * exhaustion instead. */
1514 protect_control_stack_guard_page(1, NULL);
1515 protect_control_stack_return_guard_page(0, NULL);
1516 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1519 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1520 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1521 lose("Binding stack exhausted");
1523 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1524 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1525 protect_binding_stack_guard_page(0, NULL);
1526 protect_binding_stack_return_guard_page(1, NULL);
1527 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1529 /* For the unfortunate case, when the binding stack is
1530 * exhausted in a signal handler. */
1531 unblock_signals_in_context_and_maybe_warn(context);
1532 arrange_return_to_lisp_function
1533 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1536 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1537 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1538 protect_binding_stack_guard_page(1, NULL);
1539 protect_binding_stack_return_guard_page(0, NULL);
1540 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1543 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1544 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1545 lose("Alien stack exhausted");
1547 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1548 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1549 protect_alien_stack_guard_page(0, NULL);
1550 protect_alien_stack_return_guard_page(1, NULL);
1551 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1553 /* For the unfortunate case, when the alien stack is
1554 * exhausted in a signal handler. */
1555 unblock_signals_in_context_and_maybe_warn(context);
1556 arrange_return_to_lisp_function
1557 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1560 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1561 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1562 protect_alien_stack_guard_page(1, NULL);
1563 protect_alien_stack_return_guard_page(0, NULL);
1564 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1567 else if (addr >= undefined_alien_address &&
1568 addr < undefined_alien_address + os_vm_page_size) {
1569 arrange_return_to_lisp_function
1570 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1577 * noise to install handlers
1580 #ifndef LISP_FEATURE_WIN32
1581 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1582 * they are blocked, in Linux 2.6 the default handler is invoked
1583 * instead that usually coredumps. One might hastily think that adding
1584 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1585 * the whole sa_mask is ignored and instead of not adding the signal
1586 * in question to the mask. That means if it's not blockable the
1587 * signal must be unblocked at the beginning of signal handlers.
1589 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1590 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1591 * will be unblocked in the sigmask during the signal handler. -- RMK
1594 static volatile int sigaction_nodefer_works = -1;
1596 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1597 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1600 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1604 get_current_sigmask(¤t);
1605 /* There should be exactly two blocked signals: the two we added
1606 * to sa_mask when setting up the handler. NetBSD doesn't block
1607 * the signal we're handling when SA_NODEFER is set; Linux before
1608 * 2.6.13 or so also doesn't block the other signal when
1609 * SA_NODEFER is set. */
1610 for(i = 1; i < NSIG; i++)
1611 if (sigismember(¤t, i) !=
1612 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1613 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1614 sigaction_nodefer_works = 0;
1616 if (sigaction_nodefer_works == -1)
1617 sigaction_nodefer_works = 1;
1621 see_if_sigaction_nodefer_works(void)
1623 struct sigaction sa, old_sa;
1625 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1626 sa.sa_sigaction = sigaction_nodefer_test_handler;
1627 sigemptyset(&sa.sa_mask);
1628 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1629 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1630 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1631 /* Make sure no signals are blocked. */
1634 sigemptyset(&empty);
1635 thread_sigmask(SIG_SETMASK, &empty, 0);
1637 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1638 while (sigaction_nodefer_works == -1);
1639 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1642 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1643 #undef SA_NODEFER_TEST_KILL_SIGNAL
1646 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1648 SAVE_ERRNO(signal,context,void_context);
1651 sigemptyset(&unblock);
1652 sigaddset(&unblock, signal);
1653 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1654 interrupt_handle_now(signal, info, context);
1659 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1661 SAVE_ERRNO(signal,context,void_context);
1664 sigemptyset(&unblock);
1665 sigaddset(&unblock, signal);
1666 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1667 (*interrupt_low_level_handlers[signal])(signal, info, context);
1672 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1674 SAVE_ERRNO(signal,context,void_context);
1675 (*interrupt_low_level_handlers[signal])(signal, info, context);
1680 undoably_install_low_level_interrupt_handler (int signal,
1681 interrupt_handler_t handler)
1683 struct sigaction sa;
1685 if (0 > signal || signal >= NSIG) {
1686 lose("bad signal number %d\n", signal);
1689 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1690 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1691 else if (sigismember(&deferrable_sigset,signal))
1692 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1693 else if (!sigaction_nodefer_works &&
1694 !sigismember(&blockable_sigset, signal))
1695 sa.sa_sigaction = low_level_unblock_me_trampoline;
1697 sa.sa_sigaction = low_level_handle_now_handler;
1699 sigcopyset(&sa.sa_mask, &blockable_sigset);
1700 sa.sa_flags = SA_SIGINFO | SA_RESTART
1701 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1702 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1703 if((signal==SIG_MEMORY_FAULT))
1704 sa.sa_flags |= SA_ONSTACK;
1707 sigaction(signal, &sa, NULL);
1708 interrupt_low_level_handlers[signal] =
1709 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1713 /* This is called from Lisp. */
1715 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1717 #ifndef LISP_FEATURE_WIN32
1718 struct sigaction sa;
1720 union interrupt_handler oldhandler;
1722 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1724 block_blockable_signals(0, &old);
1726 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1727 (unsigned int)interrupt_low_level_handlers[signal]));
1728 if (interrupt_low_level_handlers[signal]==0) {
1729 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1730 ARE_SAME_HANDLER(handler, SIG_IGN))
1731 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1732 else if (sigismember(&deferrable_sigset, signal))
1733 sa.sa_sigaction = maybe_now_maybe_later;
1734 else if (!sigaction_nodefer_works &&
1735 !sigismember(&blockable_sigset, signal))
1736 sa.sa_sigaction = unblock_me_trampoline;
1738 sa.sa_sigaction = interrupt_handle_now_handler;
1740 sigcopyset(&sa.sa_mask, &blockable_sigset);
1741 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1742 (sigaction_nodefer_works ? SA_NODEFER : 0);
1743 sigaction(signal, &sa, NULL);
1746 oldhandler = interrupt_handlers[signal];
1747 interrupt_handlers[signal].c = handler;
1749 thread_sigmask(SIG_SETMASK, &old, 0);
1751 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1753 return (unsigned long)oldhandler.lisp;
1755 /* Probably-wrong Win32 hack */
1760 /* This must not go through lisp as it's allowed anytime, even when on
1763 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1765 lose("SIGABRT received.\n");
1769 interrupt_init(void)
1771 #ifndef LISP_FEATURE_WIN32
1773 SHOW("entering interrupt_init()");
1774 see_if_sigaction_nodefer_works();
1775 sigemptyset(&deferrable_sigset);
1776 sigemptyset(&blockable_sigset);
1777 sigemptyset(&gc_sigset);
1778 sigaddset_deferrable(&deferrable_sigset);
1779 sigaddset_blockable(&blockable_sigset);
1780 sigaddset_gc(&gc_sigset);
1782 /* Set up high level handler information. */
1783 for (i = 0; i < NSIG; i++) {
1784 interrupt_handlers[i].c =
1785 /* (The cast here blasts away the distinction between
1786 * SA_SIGACTION-style three-argument handlers and
1787 * signal(..)-style one-argument handlers, which is OK
1788 * because it works to call the 1-argument form where the
1789 * 3-argument form is expected.) */
1790 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1792 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1793 SHOW("returning from interrupt_init()");
1797 #ifndef LISP_FEATURE_WIN32
1799 siginfo_code(siginfo_t *info)
1801 return info->si_code;
1803 os_vm_address_t current_memory_fault_address;
1806 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1808 /* FIXME: This is lossy: if we get another memory fault (eg. from
1809 * another thread) before lisp has read this, we lose the information.
1810 * However, since this is mostly informative, we'll live with that for
1811 * now -- some address is better then no address in this case.
1813 current_memory_fault_address = addr;
1814 /* To allow debugging memory faults in signal handlers and such. */
1815 corruption_warning_and_maybe_lose("Memory fault at %x (pc=%p, sp=%p)",
1817 *os_context_pc_addr(context),
1818 #ifdef ARCH_HAS_STACK_POINTER
1819 *os_context_sp_addr(context)
1824 unblock_signals_in_context_and_maybe_warn(context);
1825 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1826 arrange_return_to_lisp_function(context,
1827 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1829 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1835 unhandled_trap_error(os_context_t *context)
1837 lispobj context_sap;
1838 fake_foreign_function_call(context);
1839 unblock_gc_signals(0, 0);
1840 context_sap = alloc_sap(context);
1841 #ifndef LISP_FEATURE_WIN32
1842 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1844 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1845 lose("UNHANDLED-TRAP-ERROR fell through");
1848 /* Common logic for trapping instructions. How we actually handle each
1849 * case is highly architecture dependent, but the overall shape is
1852 handle_trap(os_context_t *context, int trap)
1855 case trap_PendingInterrupt:
1856 FSHOW((stderr, "/<trap pending interrupt>\n"));
1857 arch_skip_instruction(context);
1858 interrupt_handle_pending(context);
1862 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1863 interrupt_internal_error(context, trap==trap_Cerror);
1865 case trap_Breakpoint:
1866 arch_handle_breakpoint(context);
1868 case trap_FunEndBreakpoint:
1869 arch_handle_fun_end_breakpoint(context);
1871 #ifdef trap_AfterBreakpoint
1872 case trap_AfterBreakpoint:
1873 arch_handle_after_breakpoint(context);
1876 #ifdef trap_SingleStepAround
1877 case trap_SingleStepAround:
1878 case trap_SingleStepBefore:
1879 arch_handle_single_step_trap(context, trap);
1883 fake_foreign_function_call(context);
1884 lose("%%PRIMITIVE HALT called; the party is over.\n");
1886 unhandled_trap_error(context);