2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 * This gives us a single point of control (or six) over errno, fp
94 * control word, and fixing up signal context on sparc.
96 * The SPARC/Linux platform doesn't quite do signals the way we want
97 * them done. The third argument in the handler isn't filled in by the
98 * kernel properly, so we fix it up ourselves in the
99 * arch_os_get_context(..) function. -- CSR, 2002-07-23
101 #define SAVE_ERRNO(context,void_context) \
103 int _saved_errno = errno; \
104 RESTORE_FP_CONTROL_WORD(context,void_context); \
107 #define RESTORE_ERRNO \
109 errno = _saved_errno; \
112 static void run_deferred_handler(struct interrupt_data *data,
113 os_context_t *context);
114 #ifndef LISP_FEATURE_WIN32
115 static void store_signal_data_for_later (struct interrupt_data *data,
116 void *handler, int signal,
118 os_context_t *context);
121 /* Generic signal related utilities. */
124 get_current_sigmask(sigset_t *sigset)
126 /* Get the current sigmask, by blocking the empty set. */
127 thread_sigmask(SIG_BLOCK, 0, sigset);
131 block_signals(sigset_t *what, sigset_t *where, sigset_t *old)
136 sigcopyset(old, where);
137 for(i = 1; i < NSIG; i++) {
138 if (sigismember(what, i))
142 thread_sigmask(SIG_BLOCK, what, old);
147 unblock_signals(sigset_t *what, sigset_t *where, sigset_t *old)
152 sigcopyset(old, where);
153 for(i = 1; i < NSIG; i++) {
154 if (sigismember(what, i))
158 thread_sigmask(SIG_UNBLOCK, what, old);
163 print_sigset(sigset_t *sigset)
166 for(i = 1; i < NSIG; i++) {
167 if (sigismember(sigset, i))
168 fprintf(stderr, "Signal %d masked\n", i);
172 /* Return 1 is all signals is sigset2 are masked in sigset, return 0
173 * if all re unmasked else die. Passing NULL for sigset is a shorthand
174 * for the current sigmask. */
176 all_signals_blocked_p(sigset_t *sigset, sigset_t *sigset2,
179 #if !defined(LISP_FEATURE_WIN32)
181 boolean has_blocked = 0, has_unblocked = 0;
184 get_current_sigmask(¤t);
187 for(i = 1; i < NSIG; i++) {
188 if (sigismember(sigset2, i)) {
189 if (sigismember(sigset, i))
195 if (has_blocked && has_unblocked) {
196 print_sigset(sigset);
197 lose("some %s signals blocked, some unblocked\n", name);
207 /* Deferrables, blockables, gc signals. */
210 sigaddset_deferrable(sigset_t *s)
212 sigaddset(s, SIGHUP);
213 sigaddset(s, SIGINT);
214 sigaddset(s, SIGTERM);
215 sigaddset(s, SIGQUIT);
216 sigaddset(s, SIGPIPE);
217 sigaddset(s, SIGALRM);
218 sigaddset(s, SIGURG);
219 sigaddset(s, SIGTSTP);
220 sigaddset(s, SIGCHLD);
222 #ifndef LISP_FEATURE_HPUX
223 sigaddset(s, SIGXCPU);
224 sigaddset(s, SIGXFSZ);
226 sigaddset(s, SIGVTALRM);
227 sigaddset(s, SIGPROF);
228 sigaddset(s, SIGWINCH);
232 sigaddset_blockable(sigset_t *sigset)
234 sigaddset_deferrable(sigset);
235 sigaddset_gc(sigset);
239 sigaddset_gc(sigset_t *sigset)
241 #ifdef LISP_FEATURE_SB_THREAD
242 sigaddset(sigset,SIG_STOP_FOR_GC);
246 /* initialized in interrupt_init */
247 sigset_t deferrable_sigset;
248 sigset_t blockable_sigset;
253 #if !defined(LISP_FEATURE_WIN32)
255 deferrables_blocked_p(sigset_t *sigset)
257 return all_signals_blocked_p(sigset, &deferrable_sigset, "deferrable");
262 check_deferrables_unblocked_or_lose(sigset_t *sigset)
264 #if !defined(LISP_FEATURE_WIN32)
265 if (deferrables_blocked_p(sigset))
266 lose("deferrables blocked\n");
271 check_deferrables_blocked_or_lose(sigset_t *sigset)
273 #if !defined(LISP_FEATURE_WIN32)
274 if (!deferrables_blocked_p(sigset))
275 lose("deferrables unblocked\n");
279 #if !defined(LISP_FEATURE_WIN32)
281 blockables_blocked_p(sigset_t *sigset)
283 return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
288 check_blockables_unblocked_or_lose(sigset_t *sigset)
290 #if !defined(LISP_FEATURE_WIN32)
291 if (blockables_blocked_p(sigset))
292 lose("blockables blocked\n");
297 check_blockables_blocked_or_lose(sigset_t *sigset)
299 #if !defined(LISP_FEATURE_WIN32)
300 if (!blockables_blocked_p(sigset))
301 lose("blockables unblocked\n");
305 #if !defined(LISP_FEATURE_WIN32)
307 gc_signals_blocked_p(sigset_t *sigset)
309 return all_signals_blocked_p(sigset, &gc_sigset, "gc");
314 check_gc_signals_unblocked_or_lose(sigset_t *sigset)
316 #if !defined(LISP_FEATURE_WIN32)
317 if (gc_signals_blocked_p(sigset))
318 lose("gc signals blocked\n");
323 check_gc_signals_blocked_or_lose(sigset_t *sigset)
325 #if !defined(LISP_FEATURE_WIN32)
326 if (!gc_signals_blocked_p(sigset))
327 lose("gc signals unblocked\n");
332 block_deferrable_signals(sigset_t *where, sigset_t *old)
334 #ifndef LISP_FEATURE_WIN32
335 block_signals(&deferrable_sigset, where, old);
340 block_blockable_signals(sigset_t *where, sigset_t *old)
342 #ifndef LISP_FEATURE_WIN32
343 block_signals(&blockable_sigset, where, old);
348 block_gc_signals(sigset_t *where, sigset_t *old)
350 #ifndef LISP_FEATURE_WIN32
351 block_signals(&gc_sigset, where, old);
356 unblock_deferrable_signals(sigset_t *where, sigset_t *old)
358 #ifndef LISP_FEATURE_WIN32
359 if (interrupt_handler_pending_p())
360 lose("unblock_deferrable_signals: losing proposition\n");
361 check_gc_signals_unblocked_or_lose(where);
362 unblock_signals(&deferrable_sigset, where, old);
367 unblock_blockable_signals(sigset_t *where, sigset_t *old)
369 #ifndef LISP_FEATURE_WIN32
370 unblock_signals(&blockable_sigset, where, old);
375 unblock_gc_signals(sigset_t *where, sigset_t *old)
377 #ifndef LISP_FEATURE_WIN32
378 unblock_signals(&gc_sigset, where, old);
383 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
385 #ifndef LISP_FEATURE_WIN32
386 sigset_t *sigset = os_context_sigmask_addr(context);
387 if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
388 corruption_warning_and_maybe_lose(
389 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
390 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
391 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
392 unblock_gc_signals(sigset, 0);
394 if (!interrupt_handler_pending_p()) {
395 unblock_deferrable_signals(sigset, 0);
402 check_interrupts_enabled_or_lose(os_context_t *context)
404 struct thread *thread=arch_os_get_current_thread();
405 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
406 lose("interrupts not enabled\n");
407 if (arch_pseudo_atomic_atomic(context))
408 lose ("in pseudo atomic section\n");
411 /* Save sigset (or the current sigmask if 0) if there is no pending
412 * handler, because that means that deferabbles are already blocked.
413 * The purpose is to avoid losing the pending gc signal if a
414 * deferrable interrupt async unwinds between clearing the pseudo
415 * atomic and trapping to GC.*/
417 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
419 #ifndef LISP_FEATURE_WIN32
420 struct thread *thread = arch_os_get_current_thread();
421 struct interrupt_data *data = thread->interrupt_data;
423 /* Obviously, this function is called when signals may not be
424 * blocked. Let's make sure we are not interrupted. */
425 block_blockable_signals(0, &oldset);
426 #ifndef LISP_FEATURE_SB_THREAD
427 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
429 if (data->gc_blocked_deferrables)
430 lose("gc_blocked_deferrables already true\n");
432 if ((!data->pending_handler) &&
433 (!data->gc_blocked_deferrables)) {
434 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
435 data->gc_blocked_deferrables = 1;
437 /* This is the sigmask of some context. */
438 sigcopyset(&data->pending_mask, sigset);
439 sigaddset_deferrable(sigset);
440 thread_sigmask(SIG_SETMASK,&oldset,0);
443 /* Operating on the current sigmask. Save oldset and
444 * unblock gc signals. In the end, this is equivalent to
445 * blocking the deferrables. */
446 sigcopyset(&data->pending_mask, &oldset);
447 thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
451 thread_sigmask(SIG_SETMASK,&oldset,0);
455 /* Are we leaving WITH-GCING and already running with interrupts
456 * enabled, without the protection of *GC-INHIBIT* T and there is gc
457 * (or stop for gc) pending, but we haven't trapped yet? */
459 in_leaving_without_gcing_race_p(struct thread *thread)
461 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
462 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
463 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
464 ((SymbolValue(GC_PENDING,thread) != NIL)
465 #if defined(LISP_FEATURE_SB_THREAD)
466 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
471 /* Check our baroque invariants. */
473 check_interrupt_context_or_lose(os_context_t *context)
475 #ifndef LISP_FEATURE_WIN32
476 struct thread *thread = arch_os_get_current_thread();
477 struct interrupt_data *data = thread->interrupt_data;
478 int interrupt_deferred_p = (data->pending_handler != 0);
479 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
480 sigset_t *sigset = os_context_sigmask_addr(context);
481 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
482 * handle_allocation_trap. */
483 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
484 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
485 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
486 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
487 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
488 int in_race_p = in_leaving_without_gcing_race_p(thread);
489 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
490 * section and trapping, a SIG_STOP_FOR_GC would see the next
491 * check fail, for this reason sig_stop_for_gc handler does not
492 * call this function. */
493 if (interrupt_deferred_p) {
494 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
495 lose("Stray deferred interrupt.\n");
498 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
499 lose("GC_PENDING, but why?\n");
500 #if defined(LISP_FEATURE_SB_THREAD)
502 int stop_for_gc_pending =
503 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
504 if (stop_for_gc_pending)
505 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
506 lose("STOP_FOR_GC_PENDING, but why?\n");
507 if (pseudo_atomic_interrupted)
508 if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
509 lose("pseudo_atomic_interrupted, but why?\n");
512 if (pseudo_atomic_interrupted)
513 if (!(gc_pending || interrupt_deferred_p))
514 lose("pseudo_atomic_interrupted, but why?\n");
517 if (interrupt_pending && !interrupt_deferred_p)
518 lose("INTERRUPT_PENDING but not pending handler.\n");
519 if ((data->gc_blocked_deferrables) && interrupt_pending)
520 lose("gc_blocked_deferrables and interrupt pending\n.");
521 if (data->gc_blocked_deferrables)
522 check_deferrables_blocked_or_lose(sigset);
523 if (interrupt_pending || interrupt_deferred_p ||
524 data->gc_blocked_deferrables)
525 check_deferrables_blocked_or_lose(sigset);
527 check_deferrables_unblocked_or_lose(sigset);
528 /* If deferrables are unblocked then we are open to signals
529 * that run lisp code. */
530 check_gc_signals_unblocked_or_lose(sigset);
535 /* When we catch an internal error, should we pass it back to Lisp to
536 * be handled in a high-level way? (Early in cold init, the answer is
537 * 'no', because Lisp is still too brain-dead to handle anything.
538 * After sufficient initialization has been completed, the answer
540 boolean internal_errors_enabled = 0;
542 #ifndef LISP_FEATURE_WIN32
544 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
546 union interrupt_handler interrupt_handlers[NSIG];
550 * utility routines used by various signal handlers
554 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
556 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
560 /* Build a fake stack frame or frames */
562 current_control_frame_pointer =
563 (lispobj *)(unsigned long)
564 (*os_context_register_addr(context, reg_CSP));
565 if ((lispobj *)(unsigned long)
566 (*os_context_register_addr(context, reg_CFP))
567 == current_control_frame_pointer) {
568 /* There is a small window during call where the callee's
569 * frame isn't built yet. */
570 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
571 == FUN_POINTER_LOWTAG) {
572 /* We have called, but not built the new frame, so
573 * build it for them. */
574 current_control_frame_pointer[0] =
575 *os_context_register_addr(context, reg_OCFP);
576 current_control_frame_pointer[1] =
577 *os_context_register_addr(context, reg_LRA);
578 current_control_frame_pointer += 8;
579 /* Build our frame on top of it. */
580 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
583 /* We haven't yet called, build our frame as if the
584 * partial frame wasn't there. */
585 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
588 /* We can't tell whether we are still in the caller if it had to
589 * allocate a stack frame due to stack arguments. */
590 /* This observation provoked some past CMUCL maintainer to ask
591 * "Can anything strange happen during return?" */
594 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
597 current_control_stack_pointer = current_control_frame_pointer + 8;
599 current_control_frame_pointer[0] = oldcont;
600 current_control_frame_pointer[1] = NIL;
601 current_control_frame_pointer[2] =
602 (lispobj)(*os_context_register_addr(context, reg_CODE));
606 /* Stores the context for gc to scavange and builds fake stack
609 fake_foreign_function_call(os_context_t *context)
612 struct thread *thread=arch_os_get_current_thread();
614 /* context_index incrementing must not be interrupted */
615 check_blockables_blocked_or_lose(0);
617 /* Get current Lisp state from context. */
619 dynamic_space_free_pointer =
620 (lispobj *)(unsigned long)
621 (*os_context_register_addr(context, reg_ALLOC));
622 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
623 /* dynamic_space_free_pointer); */
624 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
625 if ((long)dynamic_space_free_pointer & 1) {
626 lose("dead in fake_foreign_function_call, context = %x\n", context);
629 /* why doesnt PPC and SPARC do something like this: */
630 #if defined(LISP_FEATURE_HPPA)
631 if ((long)dynamic_space_free_pointer & 4) {
632 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
637 current_binding_stack_pointer =
638 (lispobj *)(unsigned long)
639 (*os_context_register_addr(context, reg_BSP));
642 build_fake_control_stack_frames(thread,context);
644 /* Do dynamic binding of the active interrupt context index
645 * and save the context in the context array. */
647 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
649 if (context_index >= MAX_INTERRUPTS) {
650 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
653 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
654 make_fixnum(context_index + 1),thread);
656 thread->interrupt_contexts[context_index] = context;
658 #ifdef FOREIGN_FUNCTION_CALL_FLAG
659 foreign_function_call_active = 1;
663 /* blocks all blockable signals. If you are calling from a signal handler,
664 * the usual signal mask will be restored from the context when the handler
665 * finishes. Otherwise, be careful */
667 undo_fake_foreign_function_call(os_context_t *context)
669 struct thread *thread=arch_os_get_current_thread();
670 /* Block all blockable signals. */
671 block_blockable_signals(0, 0);
673 #ifdef FOREIGN_FUNCTION_CALL_FLAG
674 foreign_function_call_active = 0;
677 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
681 /* Put the dynamic space free pointer back into the context. */
682 *os_context_register_addr(context, reg_ALLOC) =
683 (unsigned long) dynamic_space_free_pointer
684 | (*os_context_register_addr(context, reg_ALLOC)
687 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
689 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
694 /* a handler for the signal caused by execution of a trap opcode
695 * signalling an internal error */
697 interrupt_internal_error(os_context_t *context, boolean continuable)
701 fake_foreign_function_call(context);
703 if (!internal_errors_enabled) {
704 describe_internal_error(context);
705 /* There's no good way to recover from an internal error
706 * before the Lisp error handling mechanism is set up. */
707 lose("internal error too early in init, can't recover\n");
710 /* Allocate the SAP object while the interrupts are still
712 unblock_gc_signals(0, 0);
713 context_sap = alloc_sap(context);
715 #ifndef LISP_FEATURE_WIN32
716 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
719 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
720 /* Workaround for blocked SIGTRAP. */
723 sigemptyset(&newset);
724 sigaddset(&newset, SIGTRAP);
725 thread_sigmask(SIG_UNBLOCK, &newset, 0);
729 SHOW("in interrupt_internal_error");
731 /* Display some rudimentary debugging information about the
732 * error, so that even if the Lisp error handler gets badly
733 * confused, we have a chance to determine what's going on. */
734 describe_internal_error(context);
736 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
737 continuable ? T : NIL);
739 undo_fake_foreign_function_call(context); /* blocks signals again */
741 arch_skip_instruction(context);
745 interrupt_handler_pending_p(void)
747 struct thread *thread = arch_os_get_current_thread();
748 struct interrupt_data *data = thread->interrupt_data;
749 return (data->pending_handler != 0);
753 interrupt_handle_pending(os_context_t *context)
755 /* There are three ways we can get here. First, if an interrupt
756 * occurs within pseudo-atomic, it will be deferred, and we'll
757 * trap to here at the end of the pseudo-atomic block. Second, if
758 * the GC (in alloc()) decides that a GC is required, it will set
759 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
760 * and alloc() is always called from within pseudo-atomic, and
761 * thus we end up here again. Third, when calling GC-ON or at the
762 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
763 * here if there is a pending GC. Fourth, ahem, at the end of
764 * WITHOUT-INTERRUPTS (bar complications with nesting). */
766 /* Win32 only needs to handle the GC cases (for now?) */
768 struct thread *thread = arch_os_get_current_thread();
769 struct interrupt_data *data = thread->interrupt_data;
771 if (arch_pseudo_atomic_atomic(context)) {
772 lose("Handling pending interrupt in pseudo atomic.");
775 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
777 check_blockables_blocked_or_lose(0);
779 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
780 * handler, then the pending mask was saved and
781 * gc_blocked_deferrables set. Hence, there can be no pending
782 * handler and it's safe to restore the pending mask.
784 * Note, that if gc_blocked_deferrables is false we may still have
785 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
786 * pseudo atomic was interrupt be a deferrable first. */
787 if (data->gc_blocked_deferrables) {
788 if (data->pending_handler)
789 lose("GC blocked deferrables but still got a pending handler.");
790 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
791 lose("GC blocked deferrables while GC is inhibited.");
792 /* Restore the saved signal mask from the original signal (the
793 * one that interrupted us during the critical section) into
794 * the os_context for the signal we're currently in the
795 * handler for. This should ensure that when we return from
796 * the handler the blocked signals are unblocked. */
797 #ifndef LISP_FEATURE_WIN32
798 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
800 data->gc_blocked_deferrables = 0;
803 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
804 void *original_pending_handler = data->pending_handler;
806 #ifdef LISP_FEATURE_SB_THREAD
807 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
808 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
809 * the signal handler if it actually stops us. */
810 arch_clear_pseudo_atomic_interrupted(context);
811 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
814 /* Test for T and not for != NIL since the value :IN-PROGRESS
815 * is used in SUB-GC as part of the mechanism to supress
817 if (SymbolValue(GC_PENDING,thread) == T) {
819 /* Two reasons for doing this. First, if there is a
820 * pending handler we don't want to run. Second, we are
821 * going to clear pseudo atomic interrupted to avoid
822 * spurious trapping on every allocation in SUB_GC and
823 * having a pending handler with interrupts enabled and
824 * without pseudo atomic interrupted breaks an
826 if (data->pending_handler) {
827 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
828 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
831 arch_clear_pseudo_atomic_interrupted(context);
833 /* GC_PENDING is cleared in SUB-GC, or if another thread
834 * is doing a gc already we will get a SIG_STOP_FOR_GC and
835 * that will clear it.
837 * If there is a pending handler or gc was triggerred in a
838 * signal handler then maybe_gc won't run POST_GC and will
839 * return normally. */
840 if (!maybe_gc(context))
841 lose("GC not inhibited but maybe_gc did not GC.");
843 if (data->pending_handler) {
847 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
848 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
849 * GC-PENDING is not NIL then we cannot trap on pseudo
850 * atomic due to GC (see if(GC_PENDING) logic in
851 * cheneygc.c an gengcgc.c), plus there is a outer
852 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
854 lose("Trapping to run pending handler while GC in progress.");
857 check_blockables_blocked_or_lose(0);
859 /* No GC shall be lost. If SUB_GC triggers another GC then
860 * that should be handled on the spot. */
861 if (SymbolValue(GC_PENDING,thread) != NIL)
862 lose("GC_PENDING after doing gc.");
863 #ifdef LISP_FEATURE_SB_THREAD
864 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
865 lose("STOP_FOR_GC_PENDING after doing gc.");
867 /* Check two things. First, that gc does not clobber a handler
868 * that's already pending. Second, that there is no interrupt
869 * lossage: if original_pending_handler was NULL then even if
870 * an interrupt arrived during GC (POST-GC, really) it was
872 if (original_pending_handler != data->pending_handler)
873 lose("pending handler changed in gc: %x -> %d.",
874 original_pending_handler, data->pending_handler);
877 #ifndef LISP_FEATURE_WIN32
878 /* There may be no pending handler, because it was only a gc that
879 * had to be executed or because Lisp is a bit too eager to call
880 * DO-PENDING-INTERRUPT. */
881 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
882 (data->pending_handler)) {
883 /* No matter how we ended up here, clear both
884 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
885 * because we checked above that there is no GC pending. */
886 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
887 arch_clear_pseudo_atomic_interrupted(context);
888 /* Restore the sigmask in the context. */
889 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
890 run_deferred_handler(data, context);
893 #ifdef LISP_FEATURE_GENCGC
894 if (get_pseudo_atomic_interrupted(thread))
895 lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
897 /* It is possible that the end of this function was reached
898 * without never actually doing anything, the tests in Lisp for
899 * when to call receive-pending-interrupt are not exact. */
900 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
905 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
907 #ifdef FOREIGN_FUNCTION_CALL_FLAG
908 boolean were_in_lisp;
910 union interrupt_handler handler;
912 check_blockables_blocked_or_lose(0);
914 #ifndef LISP_FEATURE_WIN32
915 if (sigismember(&deferrable_sigset,signal))
916 check_interrupts_enabled_or_lose(context);
919 handler = interrupt_handlers[signal];
921 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
925 #ifdef FOREIGN_FUNCTION_CALL_FLAG
926 were_in_lisp = !foreign_function_call_active;
930 fake_foreign_function_call(context);
933 FSHOW_SIGNAL((stderr,
934 "/entering interrupt_handle_now(%d, info, context)\n",
937 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
939 /* This can happen if someone tries to ignore or default one
940 * of the signals we need for runtime support, and the runtime
941 * support decides to pass on it. */
942 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
944 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
945 /* Once we've decided what to do about contexts in a
946 * return-elsewhere world (the original context will no longer
947 * be available; should we copy it or was nobody using it anyway?)
948 * then we should convert this to return-elsewhere */
950 /* CMUCL comment said "Allocate the SAPs while the interrupts
951 * are still disabled.". I (dan, 2003.08.21) assume this is
952 * because we're not in pseudoatomic and allocation shouldn't
953 * be interrupted. In which case it's no longer an issue as
954 * all our allocation from C now goes through a PA wrapper,
955 * but still, doesn't hurt.
957 * Yeah, but non-gencgc platforms don't really wrap allocation
958 * in PA. MG - 2005-08-29 */
960 lispobj info_sap, context_sap;
961 /* Leave deferrable signals blocked, the handler itself will
962 * allow signals again when it sees fit. */
963 unblock_gc_signals(0, 0);
964 context_sap = alloc_sap(context);
965 info_sap = alloc_sap(info);
967 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
969 funcall3(handler.lisp,
974 /* This cannot happen in sane circumstances. */
976 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
978 #ifndef LISP_FEATURE_WIN32
979 /* Allow signals again. */
980 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
982 (*handler.c)(signal, info, context);
985 #ifdef FOREIGN_FUNCTION_CALL_FLAG
989 undo_fake_foreign_function_call(context); /* block signals again */
992 FSHOW_SIGNAL((stderr,
993 "/returning from interrupt_handle_now(%d, info, context)\n",
997 /* This is called at the end of a critical section if the indications
998 * are that some signal was deferred during the section. Note that as
999 * far as C or the kernel is concerned we dealt with the signal
1000 * already; we're just doing the Lisp-level processing now that we
1003 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
1005 /* The pending_handler may enable interrupts and then another
1006 * interrupt may hit, overwrite interrupt_data, so reset the
1007 * pending handler before calling it. Trust the handler to finish
1008 * with the siginfo before enabling interrupts. */
1009 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
1010 data->pending_handler;
1012 data->pending_handler=0;
1013 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
1014 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
1017 #ifndef LISP_FEATURE_WIN32
1019 maybe_defer_handler(void *handler, struct interrupt_data *data,
1020 int signal, siginfo_t *info, os_context_t *context)
1022 struct thread *thread=arch_os_get_current_thread();
1024 check_blockables_blocked_or_lose(0);
1026 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
1027 lose("interrupt already pending\n");
1028 if (thread->interrupt_data->pending_handler)
1029 lose("there is a pending handler already (PA)\n");
1030 if (data->gc_blocked_deferrables)
1031 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
1032 check_interrupt_context_or_lose(context);
1033 /* If interrupts are disabled then INTERRUPT_PENDING is set and
1034 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
1035 * atomic section inside a WITHOUT-INTERRUPTS.
1037 * Also, if in_leaving_without_gcing_race_p then
1038 * interrupt_handle_pending is going to be called soon, so
1039 * stashing the signal away is safe.
1041 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
1042 in_leaving_without_gcing_race_p(thread)) {
1043 FSHOW_SIGNAL((stderr,
1044 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
1045 (unsigned int)handler,signal,
1046 in_leaving_without_gcing_race_p(thread)));
1047 store_signal_data_for_later(data,handler,signal,info,context);
1048 SetSymbolValue(INTERRUPT_PENDING, T,thread);
1049 check_interrupt_context_or_lose(context);
1052 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1053 * actually use its argument for anything on x86, so this branch
1054 * may succeed even when context is null (gencgc alloc()) */
1055 if (arch_pseudo_atomic_atomic(context)) {
1056 FSHOW_SIGNAL((stderr,
1057 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1058 (unsigned int)handler,signal));
1059 store_signal_data_for_later(data,handler,signal,info,context);
1060 arch_set_pseudo_atomic_interrupted(context);
1061 check_interrupt_context_or_lose(context);
1064 FSHOW_SIGNAL((stderr,
1065 "/maybe_defer_handler(%x,%d): not deferred\n",
1066 (unsigned int)handler,signal));
1071 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1073 siginfo_t *info, os_context_t *context)
1075 if (data->pending_handler)
1076 lose("tried to overwrite pending interrupt handler %x with %x\n",
1077 data->pending_handler, handler);
1079 lose("tried to defer null interrupt handler\n");
1080 data->pending_handler = handler;
1081 data->pending_signal = signal;
1083 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1085 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1089 lose("Null context");
1091 /* the signal mask in the context (from before we were
1092 * interrupted) is copied to be restored when run_deferred_handler
1093 * happens. Then the usually-blocked signals are added to the mask
1094 * in the context so that we are running with blocked signals when
1095 * the handler returns */
1096 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1097 sigaddset_deferrable(os_context_sigmask_addr(context));
1101 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1103 SAVE_ERRNO(context,void_context);
1104 struct thread *thread = arch_os_get_current_thread();
1105 struct interrupt_data *data = thread->interrupt_data;
1107 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1108 interrupt_handle_now(signal, info, context);
1113 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1114 os_context_t *context)
1116 /* No FP control fixage needed, caller has done that. */
1117 check_blockables_blocked_or_lose(0);
1118 check_interrupts_enabled_or_lose(context);
1119 (*interrupt_low_level_handlers[signal])(signal, info, context);
1120 /* No Darwin context fixage needed, caller does that. */
1124 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1126 SAVE_ERRNO(context,void_context);
1127 struct thread *thread = arch_os_get_current_thread();
1128 struct interrupt_data *data = thread->interrupt_data;
1130 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1131 signal,info,context))
1132 low_level_interrupt_handle_now(signal, info, context);
1137 #ifdef LISP_FEATURE_SB_THREAD
1139 /* This function must not cons, because that may trigger a GC. */
1141 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1143 struct thread *thread=arch_os_get_current_thread();
1145 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1146 * pseudo atomic until gc is finally allowed. */
1147 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1148 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1149 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1151 } else if (arch_pseudo_atomic_atomic(context)) {
1152 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1153 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1154 arch_set_pseudo_atomic_interrupted(context);
1155 maybe_save_gc_mask_and_block_deferrables
1156 (os_context_sigmask_addr(context));
1160 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1162 /* Not PA and GC not inhibited -- we can stop now. */
1164 /* need the context stored so it can have registers scavenged */
1165 fake_foreign_function_call(context);
1167 /* Not pending anymore. */
1168 SetSymbolValue(GC_PENDING,NIL,thread);
1169 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1171 /* Consider this: in a PA section GC is requested: GC_PENDING,
1172 * pseudo_atomic_interrupted and gc_blocked_deferrables are set,
1173 * deferrables are blocked then pseudo_atomic_atomic is cleared,
1174 * but a SIG_STOP_FOR_GC arrives before trapping to
1175 * interrupt_handle_pending. Here, GC_PENDING is cleared but
1176 * pseudo_atomic_interrupted is not and we go on running with
1177 * pseudo_atomic_interrupted but without a pending interrupt or
1178 * GC. GC_BLOCKED_DEFERRABLES is also left at 1. So let's tidy it
1180 if (thread->interrupt_data->gc_blocked_deferrables) {
1181 FSHOW_SIGNAL((stderr,"cleaning up after gc_blocked_deferrables\n"));
1182 clear_pseudo_atomic_interrupted(thread);
1183 sigcopyset(os_context_sigmask_addr(context),
1184 &thread->interrupt_data->pending_mask);
1185 thread->interrupt_data->gc_blocked_deferrables = 0;
1188 if(thread_state(thread)!=STATE_RUNNING) {
1189 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1190 fixnum_value(thread->state));
1193 set_thread_state(thread,STATE_SUSPENDED);
1194 FSHOW_SIGNAL((stderr,"suspended\n"));
1196 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1197 FSHOW_SIGNAL((stderr,"resumed\n"));
1199 if(thread_state(thread)!=STATE_RUNNING) {
1200 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1201 fixnum_value(thread_state(thread)));
1204 undo_fake_foreign_function_call(context);
1210 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1212 SAVE_ERRNO(context,void_context);
1213 #ifndef LISP_FEATURE_WIN32
1214 if ((signal == SIGILL) || (signal == SIGBUS)
1215 #ifndef LISP_FEATURE_LINUX
1216 || (signal == SIGEMT)
1219 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1221 interrupt_handle_now(signal, info, context);
1225 /* manipulate the signal context and stack such that when the handler
1226 * returns, it will call function instead of whatever it was doing
1230 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1231 extern int *context_eflags_addr(os_context_t *context);
1234 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1235 extern void post_signal_tramp(void);
1236 extern void call_into_lisp_tramp(void);
1238 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1240 #ifndef LISP_FEATURE_WIN32
1241 check_gc_signals_unblocked_or_lose
1242 (os_context_sigmask_addr(context));
1244 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1245 void * fun=native_pointer(function);
1246 void *code = &(((struct simple_fun *) fun)->code);
1249 /* Build a stack frame showing `interrupted' so that the
1250 * user's backtrace makes (as much) sense (as usual) */
1252 /* fp state is saved and restored by call_into_lisp */
1253 /* FIXME: errno is not restored, but since current uses of this
1254 * function only call Lisp code that signals an error, it's not
1255 * much of a problem. In other words, running out of the control
1256 * stack between a syscall and (GET-ERRNO) may clobber errno if
1257 * something fails during signalling or in the handler. But I
1258 * can't see what can go wrong as long as there is no CONTINUE
1259 * like restart on them. */
1260 #ifdef LISP_FEATURE_X86
1261 /* Suppose the existence of some function that saved all
1262 * registers, called call_into_lisp, then restored GP registers and
1263 * returned. It would look something like this:
1271 pushl {address of function to call}
1272 call 0x8058db0 <call_into_lisp>
1279 * What we do here is set up the stack that call_into_lisp would
1280 * expect to see if it had been called by this code, and frob the
1281 * signal context so that signal return goes directly to call_into_lisp,
1282 * and when that function (and the lisp function it invoked) returns,
1283 * it returns to the second half of this imaginary function which
1284 * restores all registers and returns to C
1286 * For this to work, the latter part of the imaginary function
1287 * must obviously exist in reality. That would be post_signal_tramp
1290 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1292 #if defined(LISP_FEATURE_DARWIN)
1293 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1295 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1296 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1298 /* 1. os_validate (malloc/mmap) register_save_block
1299 * 2. copy register state into register_save_block
1300 * 3. put a pointer to register_save_block in a register in the context
1301 * 4. set the context's EIP to point to a trampoline which:
1302 * a. builds the fake stack frame from the block
1303 * b. frees the block
1304 * c. calls the function
1307 *register_save_area = *os_context_pc_addr(context);
1308 *(register_save_area + 1) = function;
1309 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1310 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1311 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1312 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1313 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1314 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1315 *(register_save_area + 8) = *context_eflags_addr(context);
1317 *os_context_pc_addr(context) =
1318 (os_context_register_t) call_into_lisp_tramp;
1319 *os_context_register_addr(context,reg_ECX) =
1320 (os_context_register_t) register_save_area;
1323 /* return address for call_into_lisp: */
1324 *(sp-15) = (u32)post_signal_tramp;
1325 *(sp-14) = function; /* args for call_into_lisp : function*/
1326 *(sp-13) = 0; /* arg array */
1327 *(sp-12) = 0; /* no. args */
1328 /* this order matches that used in POPAD */
1329 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1330 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1332 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1333 /* POPAD ignores the value of ESP: */
1335 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1337 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1338 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1339 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1340 *(sp-3)=*context_eflags_addr(context);
1341 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1342 *(sp-1)=*os_context_pc_addr(context);
1346 #elif defined(LISP_FEATURE_X86_64)
1347 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1349 /* return address for call_into_lisp: */
1350 *(sp-18) = (u64)post_signal_tramp;
1352 *(sp-17)=*os_context_register_addr(context,reg_R15);
1353 *(sp-16)=*os_context_register_addr(context,reg_R14);
1354 *(sp-15)=*os_context_register_addr(context,reg_R13);
1355 *(sp-14)=*os_context_register_addr(context,reg_R12);
1356 *(sp-13)=*os_context_register_addr(context,reg_R11);
1357 *(sp-12)=*os_context_register_addr(context,reg_R10);
1358 *(sp-11)=*os_context_register_addr(context,reg_R9);
1359 *(sp-10)=*os_context_register_addr(context,reg_R8);
1360 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1361 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1362 /* skip RBP and RSP */
1363 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1364 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1365 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1366 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1367 *(sp-3)=*context_eflags_addr(context);
1368 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1369 *(sp-1)=*os_context_pc_addr(context);
1371 *os_context_register_addr(context,reg_RDI) =
1372 (os_context_register_t)function; /* function */
1373 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1374 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1376 struct thread *th=arch_os_get_current_thread();
1377 build_fake_control_stack_frames(th,context);
1380 #ifdef LISP_FEATURE_X86
1382 #if !defined(LISP_FEATURE_DARWIN)
1383 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1384 *os_context_register_addr(context,reg_ECX) = 0;
1385 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1387 *os_context_register_addr(context,reg_UESP) =
1388 (os_context_register_t)(sp-15);
1390 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1391 #endif /* __NETBSD__ */
1392 #endif /* LISP_FEATURE_DARWIN */
1394 #elif defined(LISP_FEATURE_X86_64)
1395 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1396 *os_context_register_addr(context,reg_RCX) = 0;
1397 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1398 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1400 /* this much of the calling convention is common to all
1402 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1403 *os_context_register_addr(context,reg_NARGS) = 0;
1404 *os_context_register_addr(context,reg_LIP) =
1405 (os_context_register_t)(unsigned long)code;
1406 *os_context_register_addr(context,reg_CFP) =
1407 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1409 #ifdef ARCH_HAS_NPC_REGISTER
1410 *os_context_npc_addr(context) =
1411 4 + *os_context_pc_addr(context);
1413 #ifdef LISP_FEATURE_SPARC
1414 *os_context_register_addr(context,reg_CODE) =
1415 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1417 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1421 /* KLUDGE: Theoretically the approach we use for undefined alien
1422 * variables should work for functions as well, but on PPC/Darwin
1423 * we get bus error at bogus addresses instead, hence this workaround,
1424 * that has the added benefit of automatically discriminating between
1425 * functions and variables.
1428 undefined_alien_function(void)
1430 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1434 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1436 struct thread *th=arch_os_get_current_thread();
1438 if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
1439 addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1440 lose("Control stack exhausted");
1442 else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1443 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1444 /* We hit the end of the control stack: disable guard page
1445 * protection so the error handler has some headroom, protect the
1446 * previous page so that we can catch returns from the guard page
1447 * and restore it. */
1448 protect_control_stack_guard_page(0, NULL);
1449 protect_control_stack_return_guard_page(1, NULL);
1450 fprintf(stderr, "INFO: Control stack guard page unprotected\n");
1452 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1453 /* For the unfortunate case, when the control stack is
1454 * exhausted in a signal handler. */
1455 unblock_signals_in_context_and_maybe_warn(context);
1457 arrange_return_to_lisp_function
1458 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1461 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1462 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1463 /* We're returning from the guard page: reprotect it, and
1464 * unprotect this one. This works even if we somehow missed
1465 * the return-guard-page, and hit it on our way to new
1466 * exhaustion instead. */
1467 protect_control_stack_guard_page(1, NULL);
1468 protect_control_stack_return_guard_page(0, NULL);
1469 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1472 else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
1473 addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1474 lose("Binding stack exhausted");
1476 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1477 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1478 protect_binding_stack_guard_page(0, NULL);
1479 protect_binding_stack_return_guard_page(1, NULL);
1480 fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
1482 /* For the unfortunate case, when the binding stack is
1483 * exhausted in a signal handler. */
1484 unblock_signals_in_context_and_maybe_warn(context);
1485 arrange_return_to_lisp_function
1486 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1489 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1490 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1491 protect_binding_stack_guard_page(1, NULL);
1492 protect_binding_stack_return_guard_page(0, NULL);
1493 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1496 else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
1497 addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
1498 lose("Alien stack exhausted");
1500 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1501 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1502 protect_alien_stack_guard_page(0, NULL);
1503 protect_alien_stack_return_guard_page(1, NULL);
1504 fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
1506 /* For the unfortunate case, when the alien stack is
1507 * exhausted in a signal handler. */
1508 unblock_signals_in_context_and_maybe_warn(context);
1509 arrange_return_to_lisp_function
1510 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1513 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1514 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1515 protect_alien_stack_guard_page(1, NULL);
1516 protect_alien_stack_return_guard_page(0, NULL);
1517 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1520 else if (addr >= undefined_alien_address &&
1521 addr < undefined_alien_address + os_vm_page_size) {
1522 arrange_return_to_lisp_function
1523 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1530 * noise to install handlers
1533 #ifndef LISP_FEATURE_WIN32
1534 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1535 * they are blocked, in Linux 2.6 the default handler is invoked
1536 * instead that usually coredumps. One might hastily think that adding
1537 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1538 * the whole sa_mask is ignored and instead of not adding the signal
1539 * in question to the mask. That means if it's not blockable the
1540 * signal must be unblocked at the beginning of signal handlers.
1542 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1543 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1544 * will be unblocked in the sigmask during the signal handler. -- RMK
1547 static volatile int sigaction_nodefer_works = -1;
1549 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1550 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1553 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1557 get_current_sigmask(¤t);
1558 /* There should be exactly two blocked signals: the two we added
1559 * to sa_mask when setting up the handler. NetBSD doesn't block
1560 * the signal we're handling when SA_NODEFER is set; Linux before
1561 * 2.6.13 or so also doesn't block the other signal when
1562 * SA_NODEFER is set. */
1563 for(i = 1; i < NSIG; i++)
1564 if (sigismember(¤t, i) !=
1565 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1566 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1567 sigaction_nodefer_works = 0;
1569 if (sigaction_nodefer_works == -1)
1570 sigaction_nodefer_works = 1;
1574 see_if_sigaction_nodefer_works(void)
1576 struct sigaction sa, old_sa;
1578 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1579 sa.sa_sigaction = sigaction_nodefer_test_handler;
1580 sigemptyset(&sa.sa_mask);
1581 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1582 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1583 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1584 /* Make sure no signals are blocked. */
1587 sigemptyset(&empty);
1588 thread_sigmask(SIG_SETMASK, &empty, 0);
1590 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1591 while (sigaction_nodefer_works == -1);
1592 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1595 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1596 #undef SA_NODEFER_TEST_KILL_SIGNAL
1599 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1601 SAVE_ERRNO(context,void_context);
1604 sigemptyset(&unblock);
1605 sigaddset(&unblock, signal);
1606 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1607 interrupt_handle_now(signal, info, context);
1612 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1614 SAVE_ERRNO(context,void_context);
1617 sigemptyset(&unblock);
1618 sigaddset(&unblock, signal);
1619 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1620 (*interrupt_low_level_handlers[signal])(signal, info, context);
1625 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1627 SAVE_ERRNO(context,void_context);
1628 (*interrupt_low_level_handlers[signal])(signal, info, context);
1633 undoably_install_low_level_interrupt_handler (int signal,
1634 interrupt_handler_t handler)
1636 struct sigaction sa;
1638 if (0 > signal || signal >= NSIG) {
1639 lose("bad signal number %d\n", signal);
1642 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1643 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1644 else if (sigismember(&deferrable_sigset,signal))
1645 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1646 else if (!sigaction_nodefer_works &&
1647 !sigismember(&blockable_sigset, signal))
1648 sa.sa_sigaction = low_level_unblock_me_trampoline;
1650 sa.sa_sigaction = low_level_handle_now_handler;
1652 sigcopyset(&sa.sa_mask, &blockable_sigset);
1653 sa.sa_flags = SA_SIGINFO | SA_RESTART
1654 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1655 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1656 if((signal==SIG_MEMORY_FAULT))
1657 sa.sa_flags |= SA_ONSTACK;
1660 sigaction(signal, &sa, NULL);
1661 interrupt_low_level_handlers[signal] =
1662 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1666 /* This is called from Lisp. */
1668 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1670 #ifndef LISP_FEATURE_WIN32
1671 struct sigaction sa;
1673 union interrupt_handler oldhandler;
1675 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1677 block_blockable_signals(0, &old);
1679 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1680 (unsigned int)interrupt_low_level_handlers[signal]));
1681 if (interrupt_low_level_handlers[signal]==0) {
1682 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1683 ARE_SAME_HANDLER(handler, SIG_IGN))
1684 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1685 else if (sigismember(&deferrable_sigset, signal))
1686 sa.sa_sigaction = maybe_now_maybe_later;
1687 else if (!sigaction_nodefer_works &&
1688 !sigismember(&blockable_sigset, signal))
1689 sa.sa_sigaction = unblock_me_trampoline;
1691 sa.sa_sigaction = interrupt_handle_now_handler;
1693 sigcopyset(&sa.sa_mask, &blockable_sigset);
1694 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1695 (sigaction_nodefer_works ? SA_NODEFER : 0);
1696 sigaction(signal, &sa, NULL);
1699 oldhandler = interrupt_handlers[signal];
1700 interrupt_handlers[signal].c = handler;
1702 thread_sigmask(SIG_SETMASK, &old, 0);
1704 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1706 return (unsigned long)oldhandler.lisp;
1708 /* Probably-wrong Win32 hack */
1713 /* This must not go through lisp as it's allowed anytime, even when on
1716 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1718 lose("SIGABRT received.\n");
1722 interrupt_init(void)
1724 #ifndef LISP_FEATURE_WIN32
1726 SHOW("entering interrupt_init()");
1727 see_if_sigaction_nodefer_works();
1728 sigemptyset(&deferrable_sigset);
1729 sigemptyset(&blockable_sigset);
1730 sigemptyset(&gc_sigset);
1731 sigaddset_deferrable(&deferrable_sigset);
1732 sigaddset_blockable(&blockable_sigset);
1733 sigaddset_gc(&gc_sigset);
1735 /* Set up high level handler information. */
1736 for (i = 0; i < NSIG; i++) {
1737 interrupt_handlers[i].c =
1738 /* (The cast here blasts away the distinction between
1739 * SA_SIGACTION-style three-argument handlers and
1740 * signal(..)-style one-argument handlers, which is OK
1741 * because it works to call the 1-argument form where the
1742 * 3-argument form is expected.) */
1743 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1745 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1746 SHOW("returning from interrupt_init()");
1750 #ifndef LISP_FEATURE_WIN32
1752 siginfo_code(siginfo_t *info)
1754 return info->si_code;
1756 os_vm_address_t current_memory_fault_address;
1759 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1761 /* FIXME: This is lossy: if we get another memory fault (eg. from
1762 * another thread) before lisp has read this, we lose the information.
1763 * However, since this is mostly informative, we'll live with that for
1764 * now -- some address is better then no address in this case.
1766 current_memory_fault_address = addr;
1767 /* To allow debugging memory faults in signal handlers and such. */
1768 corruption_warning_and_maybe_lose("Memory fault at %x (pc=%p, sp=%p)",
1770 *os_context_pc_addr(context),
1771 #ifdef ARCH_HAS_STACK_POINTER
1772 *os_context_sp_addr(context)
1777 unblock_signals_in_context_and_maybe_warn(context);
1778 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1779 arrange_return_to_lisp_function(context,
1780 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1782 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1788 unhandled_trap_error(os_context_t *context)
1790 lispobj context_sap;
1791 fake_foreign_function_call(context);
1792 unblock_gc_signals(0, 0);
1793 context_sap = alloc_sap(context);
1794 #ifndef LISP_FEATURE_WIN32
1795 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1797 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1798 lose("UNHANDLED-TRAP-ERROR fell through");
1801 /* Common logic for trapping instructions. How we actually handle each
1802 * case is highly architecture dependent, but the overall shape is
1805 handle_trap(os_context_t *context, int trap)
1808 case trap_PendingInterrupt:
1809 FSHOW((stderr, "/<trap pending interrupt>\n"));
1810 arch_skip_instruction(context);
1811 interrupt_handle_pending(context);
1815 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1816 interrupt_internal_error(context, trap==trap_Cerror);
1818 case trap_Breakpoint:
1819 arch_handle_breakpoint(context);
1821 case trap_FunEndBreakpoint:
1822 arch_handle_fun_end_breakpoint(context);
1824 #ifdef trap_AfterBreakpoint
1825 case trap_AfterBreakpoint:
1826 arch_handle_after_breakpoint(context);
1829 #ifdef trap_SingleStepAround
1830 case trap_SingleStepAround:
1831 case trap_SingleStepBefore:
1832 arch_handle_single_step_trap(context, trap);
1836 fake_foreign_function_call(context);
1837 lose("%%PRIMITIVE HALT called; the party is over.\n");
1839 unhandled_trap_error(context);