2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 * This gives us a single point of control (or six) over errno, fp
94 * control word, and fixing up signal context on sparc.
96 * The SPARC/Linux platform doesn't quite do signals the way we want
97 * them done. The third argument in the handler isn't filled in by the
98 * kernel properly, so we fix it up ourselves in the
99 * arch_os_get_context(..) function. -- CSR, 2002-07-23
101 #define SAVE_ERRNO(context,void_context) \
103 int _saved_errno = errno; \
104 RESTORE_FP_CONTROL_WORD(context,void_context); \
107 #define RESTORE_ERRNO \
109 errno = _saved_errno; \
112 static void run_deferred_handler(struct interrupt_data *data,
113 os_context_t *context);
114 #ifndef LISP_FEATURE_WIN32
115 static void store_signal_data_for_later (struct interrupt_data *data,
116 void *handler, int signal,
118 os_context_t *context);
121 fill_current_sigmask(sigset_t *sigset)
123 /* Get the current sigmask, by blocking the empty set. */
126 thread_sigmask(SIG_BLOCK, &empty, sigset);
130 sigaddset_deferrable(sigset_t *s)
132 sigaddset(s, SIGHUP);
133 sigaddset(s, SIGINT);
134 sigaddset(s, SIGTERM);
135 sigaddset(s, SIGQUIT);
136 sigaddset(s, SIGPIPE);
137 sigaddset(s, SIGALRM);
138 sigaddset(s, SIGURG);
139 sigaddset(s, SIGTSTP);
140 sigaddset(s, SIGCHLD);
142 #ifndef LISP_FEATURE_HPUX
143 sigaddset(s, SIGXCPU);
144 sigaddset(s, SIGXFSZ);
146 sigaddset(s, SIGVTALRM);
147 sigaddset(s, SIGPROF);
148 sigaddset(s, SIGWINCH);
152 sigdelset_deferrable(sigset_t *s)
154 sigdelset(s, SIGHUP);
155 sigdelset(s, SIGINT);
156 sigdelset(s, SIGQUIT);
157 sigdelset(s, SIGPIPE);
158 sigdelset(s, SIGALRM);
159 sigdelset(s, SIGURG);
160 sigdelset(s, SIGTSTP);
161 sigdelset(s, SIGCHLD);
163 #ifndef LISP_FEATURE_HPUX
164 sigdelset(s, SIGXCPU);
165 sigdelset(s, SIGXFSZ);
167 sigdelset(s, SIGVTALRM);
168 sigdelset(s, SIGPROF);
169 sigdelset(s, SIGWINCH);
173 sigaddset_blockable(sigset_t *sigset)
175 sigaddset_deferrable(sigset);
176 sigaddset_gc(sigset);
180 sigaddset_gc(sigset_t *sigset)
182 #ifdef LISP_FEATURE_SB_THREAD
183 sigaddset(sigset,SIG_STOP_FOR_GC);
188 sigdelset_gc(sigset_t *sigset)
190 #ifdef LISP_FEATURE_SB_THREAD
191 sigdelset(sigset,SIG_STOP_FOR_GC);
195 /* initialized in interrupt_init */
196 sigset_t deferrable_sigset;
197 sigset_t blockable_sigset;
202 deferrables_blocked_in_sigset_p(sigset_t *sigset)
204 #if !defined(LISP_FEATURE_WIN32)
206 for(i = 1; i < NSIG; i++) {
207 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
215 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
217 #if !defined(LISP_FEATURE_WIN32)
219 for(i = 1; i < NSIG; i++) {
220 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
221 lose("deferrable signal %d blocked\n",i);
227 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
229 #if !defined(LISP_FEATURE_WIN32)
231 for(i = 1; i < NSIG; i++) {
232 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
233 lose("deferrable signal %d not blocked\n",i);
239 check_deferrables_unblocked_or_lose(void)
241 #if !defined(LISP_FEATURE_WIN32)
243 fill_current_sigmask(¤t);
244 check_deferrables_unblocked_in_sigset_or_lose(¤t);
249 check_deferrables_blocked_or_lose(void)
251 #if !defined(LISP_FEATURE_WIN32)
253 fill_current_sigmask(¤t);
254 check_deferrables_blocked_in_sigset_or_lose(¤t);
259 check_blockables_blocked_or_lose(void)
261 #if !defined(LISP_FEATURE_WIN32)
264 fill_current_sigmask(¤t);
265 for(i = 1; i < NSIG; i++) {
266 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
267 lose("blockable signal %d not blocked\n",i);
273 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
275 #if !defined(LISP_FEATURE_WIN32)
277 for(i = 1; i < NSIG; i++) {
278 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
279 lose("gc signal %d blocked\n",i);
285 check_gc_signals_unblocked_or_lose(void)
287 #if !defined(LISP_FEATURE_WIN32)
289 fill_current_sigmask(¤t);
290 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
295 check_interrupts_enabled_or_lose(os_context_t *context)
297 struct thread *thread=arch_os_get_current_thread();
298 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
299 lose("interrupts not enabled\n");
300 if (arch_pseudo_atomic_atomic(context))
301 lose ("in pseudo atomic section\n");
304 /* Save sigset (or the current sigmask if 0) if there is no pending
305 * handler, because that means that deferabbles are already blocked.
306 * The purpose is to avoid losing the pending gc signal if a
307 * deferrable interrupt async unwinds between clearing the pseudo
308 * atomic and trapping to GC.*/
310 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
312 struct thread *thread = arch_os_get_current_thread();
313 struct interrupt_data *data = thread->interrupt_data;
315 /* Obviously, this function is called when signals may not be
316 * blocked. Let's make sure we are not interrupted. */
317 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
318 #ifndef LISP_FEATURE_SB_THREAD
319 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
321 if (data->gc_blocked_deferrables)
322 lose("gc_blocked_deferrables already true\n");
324 if ((!data->pending_handler) &&
325 (!data->gc_blocked_deferrables)) {
326 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
327 data->gc_blocked_deferrables = 1;
329 /* This is the sigmask of some context. */
330 sigcopyset(&data->pending_mask, sigset);
331 sigaddset_deferrable(sigset);
332 thread_sigmask(SIG_SETMASK,&oldset,0);
335 /* Operating on the current sigmask. Save oldset and
336 * unblock gc signals. In the end, this is equivalent to
337 * blocking the deferrables. */
338 sigcopyset(&data->pending_mask, &oldset);
339 unblock_gc_signals();
343 thread_sigmask(SIG_SETMASK,&oldset,0);
346 /* Are we leaving WITH-GCING and already running with interrupts
347 * enabled, without the protection of *GC-INHIBIT* T and there is gc
348 * (or stop for gc) pending, but we haven't trapped yet? */
350 in_leaving_without_gcing_race_p(struct thread *thread)
352 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
353 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
354 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
355 ((SymbolValue(GC_PENDING,thread) != NIL)
356 #if defined(LISP_FEATURE_SB_THREAD)
357 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
362 /* Check our baroque invariants. */
364 check_interrupt_context_or_lose(os_context_t *context)
366 struct thread *thread = arch_os_get_current_thread();
367 struct interrupt_data *data = thread->interrupt_data;
368 int interrupt_deferred_p = (data->pending_handler != 0);
369 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
370 sigset_t *sigset = os_context_sigmask_addr(context);
371 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
372 * handle_allocation_trap. */
373 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
374 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
375 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
376 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
377 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
378 int in_race_p = in_leaving_without_gcing_race_p(thread);
379 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
380 * section and trapping, a SIG_STOP_FOR_GC would see the next
381 * check fail, for this reason sig_stop_for_gc handler does not
382 * call this function. */
383 if (interrupt_deferred_p) {
384 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
385 lose("Stray deferred interrupt.\n");
388 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
389 lose("GC_PENDING, but why?\n");
390 #if defined(LISP_FEATURE_SB_THREAD)
392 int stop_for_gc_pending =
393 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
394 if (stop_for_gc_pending)
395 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
396 lose("STOP_FOR_GC_PENDING, but why?\n");
400 if (interrupt_pending && !interrupt_deferred_p)
401 lose("INTERRUPT_PENDING but not pending handler.\n");
402 if ((data->gc_blocked_deferrables) && interrupt_pending)
403 lose("gc_blocked_deferrables and interrupt pending\n.");
404 if (data->gc_blocked_deferrables)
405 check_deferrables_blocked_in_sigset_or_lose(sigset);
406 if (interrupt_pending || interrupt_deferred_p)
407 check_deferrables_blocked_in_sigset_or_lose(sigset);
409 check_deferrables_unblocked_in_sigset_or_lose(sigset);
410 /* If deferrables are unblocked then we are open to signals
411 * that run lisp code. */
412 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
416 /* When we catch an internal error, should we pass it back to Lisp to
417 * be handled in a high-level way? (Early in cold init, the answer is
418 * 'no', because Lisp is still too brain-dead to handle anything.
419 * After sufficient initialization has been completed, the answer
421 boolean internal_errors_enabled = 0;
423 #ifndef LISP_FEATURE_WIN32
425 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
427 union interrupt_handler interrupt_handlers[NSIG];
430 block_blockable_signals(void)
432 #ifndef LISP_FEATURE_WIN32
433 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
438 block_deferrable_signals(void)
440 #ifndef LISP_FEATURE_WIN32
441 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
446 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
448 #ifndef LISP_FEATURE_WIN32
449 if (interrupt_handler_pending_p())
450 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
451 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
452 sigdelset_deferrable(sigset);
457 unblock_deferrable_signals(void)
459 #ifndef LISP_FEATURE_WIN32
460 if (interrupt_handler_pending_p())
461 lose("unblock_deferrable_signals: losing proposition\n");
462 check_gc_signals_unblocked_or_lose();
463 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
468 unblock_gc_signals(void)
470 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
471 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
476 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
478 #ifndef LISP_FEATURE_WIN32
480 sigset_t *sigset=os_context_sigmask_addr(context);
481 for(i = 1; i < NSIG; i++) {
482 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
485 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
486 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
487 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
492 sigdelset_gc(sigset);
493 if (!interrupt_handler_pending_p()) {
494 unblock_deferrable_signals_in_sigset(sigset);
501 * utility routines used by various signal handlers
505 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
507 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
511 /* Build a fake stack frame or frames */
513 current_control_frame_pointer =
514 (lispobj *)(unsigned long)
515 (*os_context_register_addr(context, reg_CSP));
516 if ((lispobj *)(unsigned long)
517 (*os_context_register_addr(context, reg_CFP))
518 == current_control_frame_pointer) {
519 /* There is a small window during call where the callee's
520 * frame isn't built yet. */
521 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
522 == FUN_POINTER_LOWTAG) {
523 /* We have called, but not built the new frame, so
524 * build it for them. */
525 current_control_frame_pointer[0] =
526 *os_context_register_addr(context, reg_OCFP);
527 current_control_frame_pointer[1] =
528 *os_context_register_addr(context, reg_LRA);
529 current_control_frame_pointer += 8;
530 /* Build our frame on top of it. */
531 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
534 /* We haven't yet called, build our frame as if the
535 * partial frame wasn't there. */
536 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
539 /* We can't tell whether we are still in the caller if it had to
540 * allocate a stack frame due to stack arguments. */
541 /* This observation provoked some past CMUCL maintainer to ask
542 * "Can anything strange happen during return?" */
545 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
548 current_control_stack_pointer = current_control_frame_pointer + 8;
550 current_control_frame_pointer[0] = oldcont;
551 current_control_frame_pointer[1] = NIL;
552 current_control_frame_pointer[2] =
553 (lispobj)(*os_context_register_addr(context, reg_CODE));
557 /* Stores the context for gc to scavange and builds fake stack
560 fake_foreign_function_call(os_context_t *context)
563 struct thread *thread=arch_os_get_current_thread();
565 /* context_index incrementing must not be interrupted */
566 check_blockables_blocked_or_lose();
568 /* Get current Lisp state from context. */
570 dynamic_space_free_pointer =
571 (lispobj *)(unsigned long)
572 (*os_context_register_addr(context, reg_ALLOC));
573 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
574 /* dynamic_space_free_pointer); */
575 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
576 if ((long)dynamic_space_free_pointer & 1) {
577 lose("dead in fake_foreign_function_call, context = %x\n", context);
580 /* why doesnt PPC and SPARC do something like this: */
581 #if defined(LISP_FEATURE_HPPA)
582 if ((long)dynamic_space_free_pointer & 4) {
583 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
588 current_binding_stack_pointer =
589 (lispobj *)(unsigned long)
590 (*os_context_register_addr(context, reg_BSP));
593 build_fake_control_stack_frames(thread,context);
595 /* Do dynamic binding of the active interrupt context index
596 * and save the context in the context array. */
598 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
600 if (context_index >= MAX_INTERRUPTS) {
601 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
604 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
605 make_fixnum(context_index + 1),thread);
607 thread->interrupt_contexts[context_index] = context;
609 #ifdef FOREIGN_FUNCTION_CALL_FLAG
610 foreign_function_call_active = 1;
614 /* blocks all blockable signals. If you are calling from a signal handler,
615 * the usual signal mask will be restored from the context when the handler
616 * finishes. Otherwise, be careful */
618 undo_fake_foreign_function_call(os_context_t *context)
620 struct thread *thread=arch_os_get_current_thread();
621 /* Block all blockable signals. */
622 block_blockable_signals();
624 #ifdef FOREIGN_FUNCTION_CALL_FLAG
625 foreign_function_call_active = 0;
628 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
632 /* Put the dynamic space free pointer back into the context. */
633 *os_context_register_addr(context, reg_ALLOC) =
634 (unsigned long) dynamic_space_free_pointer
635 | (*os_context_register_addr(context, reg_ALLOC)
638 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
640 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
645 /* a handler for the signal caused by execution of a trap opcode
646 * signalling an internal error */
648 interrupt_internal_error(os_context_t *context, boolean continuable)
652 fake_foreign_function_call(context);
654 if (!internal_errors_enabled) {
655 describe_internal_error(context);
656 /* There's no good way to recover from an internal error
657 * before the Lisp error handling mechanism is set up. */
658 lose("internal error too early in init, can't recover\n");
661 /* Allocate the SAP object while the interrupts are still
663 unblock_gc_signals();
664 context_sap = alloc_sap(context);
666 #ifndef LISP_FEATURE_WIN32
667 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
670 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
671 /* Workaround for blocked SIGTRAP. */
674 sigemptyset(&newset);
675 sigaddset(&newset, SIGTRAP);
676 thread_sigmask(SIG_UNBLOCK, &newset, 0);
680 SHOW("in interrupt_internal_error");
682 /* Display some rudimentary debugging information about the
683 * error, so that even if the Lisp error handler gets badly
684 * confused, we have a chance to determine what's going on. */
685 describe_internal_error(context);
687 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
688 continuable ? T : NIL);
690 undo_fake_foreign_function_call(context); /* blocks signals again */
692 arch_skip_instruction(context);
696 interrupt_handler_pending_p(void)
698 struct thread *thread = arch_os_get_current_thread();
699 struct interrupt_data *data = thread->interrupt_data;
700 return (data->pending_handler != 0);
704 interrupt_handle_pending(os_context_t *context)
706 /* There are three ways we can get here. First, if an interrupt
707 * occurs within pseudo-atomic, it will be deferred, and we'll
708 * trap to here at the end of the pseudo-atomic block. Second, if
709 * the GC (in alloc()) decides that a GC is required, it will set
710 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
711 * and alloc() is always called from within pseudo-atomic, and
712 * thus we end up here again. Third, when calling GC-ON or at the
713 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
714 * here if there is a pending GC. Fourth, ahem, at the end of
715 * WITHOUT-INTERRUPTS (bar complications with nesting). */
717 /* Win32 only needs to handle the GC cases (for now?) */
719 struct thread *thread = arch_os_get_current_thread();
720 struct interrupt_data *data = thread->interrupt_data;
722 if (arch_pseudo_atomic_atomic(context)) {
723 lose("Handling pending interrupt in pseduo atomic.");
726 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
728 check_blockables_blocked_or_lose();
730 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
731 * handler, then the pending mask was saved and
732 * gc_blocked_deferrables set. Hence, there can be no pending
733 * handler and it's safe to restore the pending mask.
735 * Note, that if gc_blocked_deferrables is false we may still have
736 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
737 * pseudo atomic was interrupt be a deferrable first. */
738 if (data->gc_blocked_deferrables) {
739 if (data->pending_handler)
740 lose("GC blocked deferrables but still got a pending handler.");
741 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
742 lose("GC blocked deferrables while GC is inhibited.");
743 /* Restore the saved signal mask from the original signal (the
744 * one that interrupted us during the critical section) into
745 * the os_context for the signal we're currently in the
746 * handler for. This should ensure that when we return from
747 * the handler the blocked signals are unblocked. */
748 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
749 data->gc_blocked_deferrables = 0;
752 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
753 void *original_pending_handler = data->pending_handler;
755 #ifdef LISP_FEATURE_SB_THREAD
756 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
757 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
758 * the signal handler if it actually stops us. */
759 arch_clear_pseudo_atomic_interrupted(context);
760 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
763 /* Test for T and not for != NIL since the value :IN-PROGRESS
764 * is used in SUB-GC as part of the mechanism to supress
766 if (SymbolValue(GC_PENDING,thread) == T) {
768 /* Two reasons for doing this. First, if there is a
769 * pending handler we don't want to run. Second, we are
770 * going to clear pseudo atomic interrupted to avoid
771 * spurious trapping on every allocation in SUB_GC and
772 * having a pending handler with interrupts enabled and
773 * without pseudo atomic interrupted breaks an
775 if (data->pending_handler) {
776 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
777 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
780 arch_clear_pseudo_atomic_interrupted(context);
782 /* GC_PENDING is cleared in SUB-GC, or if another thread
783 * is doing a gc already we will get a SIG_STOP_FOR_GC and
784 * that will clear it.
786 * If there is a pending handler or gc was triggerred in a
787 * signal handler then maybe_gc won't run POST_GC and will
788 * return normally. */
789 if (!maybe_gc(context))
790 lose("GC not inhibited but maybe_gc did not GC.");
792 if (data->pending_handler) {
796 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
797 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
798 * GC-PENDING is not NIL then we cannot trap on pseudo
799 * atomic due to GC (see if(GC_PENDING) logic in
800 * cheneygc.c an gengcgc.c), plus there is a outer
801 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
803 lose("Trapping to run pending handler while GC in progress.");
806 check_blockables_blocked_or_lose();
808 /* No GC shall be lost. If SUB_GC triggers another GC then
809 * that should be handled on the spot. */
810 if (SymbolValue(GC_PENDING,thread) != NIL)
811 lose("GC_PENDING after doing gc.");
812 #ifdef LISP_FEATURE_SB_THREAD
813 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
814 lose("STOP_FOR_GC_PENDING after doing gc.");
816 /* Check two things. First, that gc does not clobber a handler
817 * that's already pending. Second, that there is no interrupt
818 * lossage: if original_pending_handler was NULL then even if
819 * an interrupt arrived during GC (POST-GC, really) it was
821 if (original_pending_handler != data->pending_handler)
822 lose("pending handler changed in gc: %x -> %d.",
823 original_pending_handler, data->pending_handler);
826 #ifndef LISP_FEATURE_WIN32
827 /* There may be no pending handler, because it was only a gc that
828 * had to be executed or because Lisp is a bit too eager to call
829 * DO-PENDING-INTERRUPT. */
830 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
831 (data->pending_handler)) {
832 /* No matter how we ended up here, clear both
833 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
834 * because we checked above that there is no GC pending. */
835 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
836 arch_clear_pseudo_atomic_interrupted(context);
837 /* Restore the sigmask in the context. */
838 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
839 run_deferred_handler(data, context);
841 /* It is possible that the end of this function was reached
842 * without never actually doing anything, the tests in Lisp for
843 * when to call receive-pending-interrupt are not exact. */
844 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
850 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
852 #ifdef FOREIGN_FUNCTION_CALL_FLAG
853 boolean were_in_lisp;
855 union interrupt_handler handler;
857 check_blockables_blocked_or_lose();
859 #ifndef LISP_FEATURE_WIN32
860 if (sigismember(&deferrable_sigset,signal))
861 check_interrupts_enabled_or_lose(context);
864 handler = interrupt_handlers[signal];
866 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
870 #ifdef FOREIGN_FUNCTION_CALL_FLAG
871 were_in_lisp = !foreign_function_call_active;
875 fake_foreign_function_call(context);
878 FSHOW_SIGNAL((stderr,
879 "/entering interrupt_handle_now(%d, info, context)\n",
882 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
884 /* This can happen if someone tries to ignore or default one
885 * of the signals we need for runtime support, and the runtime
886 * support decides to pass on it. */
887 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
889 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
890 /* Once we've decided what to do about contexts in a
891 * return-elsewhere world (the original context will no longer
892 * be available; should we copy it or was nobody using it anyway?)
893 * then we should convert this to return-elsewhere */
895 /* CMUCL comment said "Allocate the SAPs while the interrupts
896 * are still disabled.". I (dan, 2003.08.21) assume this is
897 * because we're not in pseudoatomic and allocation shouldn't
898 * be interrupted. In which case it's no longer an issue as
899 * all our allocation from C now goes through a PA wrapper,
900 * but still, doesn't hurt.
902 * Yeah, but non-gencgc platforms don't really wrap allocation
903 * in PA. MG - 2005-08-29 */
905 lispobj info_sap, context_sap;
906 /* Leave deferrable signals blocked, the handler itself will
907 * allow signals again when it sees fit. */
908 unblock_gc_signals();
909 context_sap = alloc_sap(context);
910 info_sap = alloc_sap(info);
912 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
914 funcall3(handler.lisp,
919 /* This cannot happen in sane circumstances. */
921 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
923 #ifndef LISP_FEATURE_WIN32
924 /* Allow signals again. */
925 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
927 (*handler.c)(signal, info, context);
930 #ifdef FOREIGN_FUNCTION_CALL_FLAG
934 undo_fake_foreign_function_call(context); /* block signals again */
937 FSHOW_SIGNAL((stderr,
938 "/returning from interrupt_handle_now(%d, info, context)\n",
942 /* This is called at the end of a critical section if the indications
943 * are that some signal was deferred during the section. Note that as
944 * far as C or the kernel is concerned we dealt with the signal
945 * already; we're just doing the Lisp-level processing now that we
948 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
950 /* The pending_handler may enable interrupts and then another
951 * interrupt may hit, overwrite interrupt_data, so reset the
952 * pending handler before calling it. Trust the handler to finish
953 * with the siginfo before enabling interrupts. */
954 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
955 data->pending_handler;
957 data->pending_handler=0;
958 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
959 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
962 #ifndef LISP_FEATURE_WIN32
964 maybe_defer_handler(void *handler, struct interrupt_data *data,
965 int signal, siginfo_t *info, os_context_t *context)
967 struct thread *thread=arch_os_get_current_thread();
969 check_blockables_blocked_or_lose();
971 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
972 lose("interrupt already pending\n");
973 if (thread->interrupt_data->pending_handler)
974 lose("there is a pending handler already (PA)\n");
975 if (data->gc_blocked_deferrables)
976 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
977 check_interrupt_context_or_lose(context);
978 /* If interrupts are disabled then INTERRUPT_PENDING is set and
979 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
980 * atomic section inside a WITHOUT-INTERRUPTS.
982 * Also, if in_leaving_without_gcing_race_p then
983 * interrupt_handle_pending is going to be called soon, so
984 * stashing the signal away is safe.
986 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
987 in_leaving_without_gcing_race_p(thread)) {
988 store_signal_data_for_later(data,handler,signal,info,context);
989 SetSymbolValue(INTERRUPT_PENDING, T,thread);
990 FSHOW_SIGNAL((stderr,
991 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
992 (unsigned int)handler,signal,
993 in_leaving_without_gcing_race_p(thread)));
994 check_interrupt_context_or_lose(context);
997 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
998 * actually use its argument for anything on x86, so this branch
999 * may succeed even when context is null (gencgc alloc()) */
1000 if (arch_pseudo_atomic_atomic(context)) {
1001 store_signal_data_for_later(data,handler,signal,info,context);
1002 arch_set_pseudo_atomic_interrupted(context);
1003 FSHOW_SIGNAL((stderr,
1004 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1005 (unsigned int)handler,signal));
1006 check_interrupt_context_or_lose(context);
1009 FSHOW_SIGNAL((stderr,
1010 "/maybe_defer_handler(%x,%d): not deferred\n",
1011 (unsigned int)handler,signal));
1016 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1018 siginfo_t *info, os_context_t *context)
1020 if (data->pending_handler)
1021 lose("tried to overwrite pending interrupt handler %x with %x\n",
1022 data->pending_handler, handler);
1024 lose("tried to defer null interrupt handler\n");
1025 data->pending_handler = handler;
1026 data->pending_signal = signal;
1028 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1030 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1034 lose("Null context");
1036 /* the signal mask in the context (from before we were
1037 * interrupted) is copied to be restored when run_deferred_handler
1038 * happens. Then the usually-blocked signals are added to the mask
1039 * in the context so that we are running with blocked signals when
1040 * the handler returns */
1041 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1042 sigaddset_deferrable(os_context_sigmask_addr(context));
1046 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1048 SAVE_ERRNO(context,void_context);
1049 struct thread *thread = arch_os_get_current_thread();
1050 struct interrupt_data *data = thread->interrupt_data;
1052 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1053 interrupt_handle_now(signal, info, context);
1058 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1059 os_context_t *context)
1061 /* No FP control fixage needed, caller has done that. */
1062 check_blockables_blocked_or_lose();
1063 check_interrupts_enabled_or_lose(context);
1064 (*interrupt_low_level_handlers[signal])(signal, info, context);
1065 /* No Darwin context fixage needed, caller does that. */
1069 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1071 SAVE_ERRNO(context,void_context);
1072 struct thread *thread = arch_os_get_current_thread();
1073 struct interrupt_data *data = thread->interrupt_data;
1075 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1076 signal,info,context))
1077 low_level_interrupt_handle_now(signal, info, context);
1082 #ifdef LISP_FEATURE_SB_THREAD
1084 /* This function must not cons, because that may trigger a GC. */
1086 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1088 struct thread *thread=arch_os_get_current_thread();
1091 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1092 * pseudo atomic until gc is finally allowed. */
1093 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1094 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1095 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1097 } else if (arch_pseudo_atomic_atomic(context)) {
1098 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1099 arch_set_pseudo_atomic_interrupted(context);
1100 maybe_save_gc_mask_and_block_deferrables
1101 (os_context_sigmask_addr(context));
1102 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1106 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1108 /* Not PA and GC not inhibited -- we can stop now. */
1110 /* need the context stored so it can have registers scavenged */
1111 fake_foreign_function_call(context);
1113 /* Block everything. */
1115 thread_sigmask(SIG_BLOCK,&ss,0);
1117 /* Not pending anymore. */
1118 SetSymbolValue(GC_PENDING,NIL,thread);
1119 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1121 if(thread_state(thread)!=STATE_RUNNING) {
1122 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1123 fixnum_value(thread->state));
1126 set_thread_state(thread,STATE_SUSPENDED);
1127 FSHOW_SIGNAL((stderr,"suspended\n"));
1129 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1130 FSHOW_SIGNAL((stderr,"resumed\n"));
1132 if(thread_state(thread)!=STATE_RUNNING) {
1133 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1134 fixnum_value(thread_state(thread)));
1137 undo_fake_foreign_function_call(context);
1143 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1145 SAVE_ERRNO(context,void_context);
1146 #ifndef LISP_FEATURE_WIN32
1147 if ((signal == SIGILL) || (signal == SIGBUS)
1148 #ifndef LISP_FEATURE_LINUX
1149 || (signal == SIGEMT)
1152 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1154 interrupt_handle_now(signal, info, context);
1158 /* manipulate the signal context and stack such that when the handler
1159 * returns, it will call function instead of whatever it was doing
1163 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1164 extern int *context_eflags_addr(os_context_t *context);
1167 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1168 extern void post_signal_tramp(void);
1169 extern void call_into_lisp_tramp(void);
1171 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1173 check_gc_signals_unblocked_in_sigset_or_lose
1174 (os_context_sigmask_addr(context));
1175 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1176 void * fun=native_pointer(function);
1177 void *code = &(((struct simple_fun *) fun)->code);
1180 /* Build a stack frame showing `interrupted' so that the
1181 * user's backtrace makes (as much) sense (as usual) */
1183 /* fp state is saved and restored by call_into_lisp */
1184 /* FIXME: errno is not restored, but since current uses of this
1185 * function only call Lisp code that signals an error, it's not
1186 * much of a problem. In other words, running out of the control
1187 * stack between a syscall and (GET-ERRNO) may clobber errno if
1188 * something fails during signalling or in the handler. But I
1189 * can't see what can go wrong as long as there is no CONTINUE
1190 * like restart on them. */
1191 #ifdef LISP_FEATURE_X86
1192 /* Suppose the existence of some function that saved all
1193 * registers, called call_into_lisp, then restored GP registers and
1194 * returned. It would look something like this:
1202 pushl {address of function to call}
1203 call 0x8058db0 <call_into_lisp>
1210 * What we do here is set up the stack that call_into_lisp would
1211 * expect to see if it had been called by this code, and frob the
1212 * signal context so that signal return goes directly to call_into_lisp,
1213 * and when that function (and the lisp function it invoked) returns,
1214 * it returns to the second half of this imaginary function which
1215 * restores all registers and returns to C
1217 * For this to work, the latter part of the imaginary function
1218 * must obviously exist in reality. That would be post_signal_tramp
1221 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1223 #if defined(LISP_FEATURE_DARWIN)
1224 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1226 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1227 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1229 /* 1. os_validate (malloc/mmap) register_save_block
1230 * 2. copy register state into register_save_block
1231 * 3. put a pointer to register_save_block in a register in the context
1232 * 4. set the context's EIP to point to a trampoline which:
1233 * a. builds the fake stack frame from the block
1234 * b. frees the block
1235 * c. calls the function
1238 *register_save_area = *os_context_pc_addr(context);
1239 *(register_save_area + 1) = function;
1240 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1241 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1242 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1243 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1244 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1245 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1246 *(register_save_area + 8) = *context_eflags_addr(context);
1248 *os_context_pc_addr(context) =
1249 (os_context_register_t) call_into_lisp_tramp;
1250 *os_context_register_addr(context,reg_ECX) =
1251 (os_context_register_t) register_save_area;
1254 /* return address for call_into_lisp: */
1255 *(sp-15) = (u32)post_signal_tramp;
1256 *(sp-14) = function; /* args for call_into_lisp : function*/
1257 *(sp-13) = 0; /* arg array */
1258 *(sp-12) = 0; /* no. args */
1259 /* this order matches that used in POPAD */
1260 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1261 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1263 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1264 /* POPAD ignores the value of ESP: */
1266 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1268 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1269 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1270 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1271 *(sp-3)=*context_eflags_addr(context);
1272 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1273 *(sp-1)=*os_context_pc_addr(context);
1277 #elif defined(LISP_FEATURE_X86_64)
1278 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1280 /* return address for call_into_lisp: */
1281 *(sp-18) = (u64)post_signal_tramp;
1283 *(sp-17)=*os_context_register_addr(context,reg_R15);
1284 *(sp-16)=*os_context_register_addr(context,reg_R14);
1285 *(sp-15)=*os_context_register_addr(context,reg_R13);
1286 *(sp-14)=*os_context_register_addr(context,reg_R12);
1287 *(sp-13)=*os_context_register_addr(context,reg_R11);
1288 *(sp-12)=*os_context_register_addr(context,reg_R10);
1289 *(sp-11)=*os_context_register_addr(context,reg_R9);
1290 *(sp-10)=*os_context_register_addr(context,reg_R8);
1291 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1292 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1293 /* skip RBP and RSP */
1294 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1295 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1296 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1297 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1298 *(sp-3)=*context_eflags_addr(context);
1299 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1300 *(sp-1)=*os_context_pc_addr(context);
1302 *os_context_register_addr(context,reg_RDI) =
1303 (os_context_register_t)function; /* function */
1304 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1305 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1307 struct thread *th=arch_os_get_current_thread();
1308 build_fake_control_stack_frames(th,context);
1311 #ifdef LISP_FEATURE_X86
1313 #if !defined(LISP_FEATURE_DARWIN)
1314 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1315 *os_context_register_addr(context,reg_ECX) = 0;
1316 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1318 *os_context_register_addr(context,reg_UESP) =
1319 (os_context_register_t)(sp-15);
1321 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1322 #endif /* __NETBSD__ */
1323 #endif /* LISP_FEATURE_DARWIN */
1325 #elif defined(LISP_FEATURE_X86_64)
1326 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1327 *os_context_register_addr(context,reg_RCX) = 0;
1328 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1329 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1331 /* this much of the calling convention is common to all
1333 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1334 *os_context_register_addr(context,reg_NARGS) = 0;
1335 *os_context_register_addr(context,reg_LIP) =
1336 (os_context_register_t)(unsigned long)code;
1337 *os_context_register_addr(context,reg_CFP) =
1338 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1340 #ifdef ARCH_HAS_NPC_REGISTER
1341 *os_context_npc_addr(context) =
1342 4 + *os_context_pc_addr(context);
1344 #ifdef LISP_FEATURE_SPARC
1345 *os_context_register_addr(context,reg_CODE) =
1346 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1348 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1352 /* KLUDGE: Theoretically the approach we use for undefined alien
1353 * variables should work for functions as well, but on PPC/Darwin
1354 * we get bus error at bogus addresses instead, hence this workaround,
1355 * that has the added benefit of automatically discriminating between
1356 * functions and variables.
1359 undefined_alien_function(void)
1361 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1365 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1367 struct thread *th=arch_os_get_current_thread();
1369 /* note the os_context hackery here. When the signal handler returns,
1370 * it won't go back to what it was doing ... */
1371 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1372 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1373 /* We hit the end of the control stack: disable guard page
1374 * protection so the error handler has some headroom, protect the
1375 * previous page so that we can catch returns from the guard page
1376 * and restore it. */
1377 corruption_warning_and_maybe_lose("Control stack exhausted");
1378 protect_control_stack_guard_page(0, NULL);
1379 protect_control_stack_return_guard_page(1, NULL);
1381 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1382 /* For the unfortunate case, when the control stack is
1383 * exhausted in a signal handler. */
1384 unblock_signals_in_context_and_maybe_warn(context);
1386 arrange_return_to_lisp_function
1387 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1390 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1391 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1392 /* We're returning from the guard page: reprotect it, and
1393 * unprotect this one. This works even if we somehow missed
1394 * the return-guard-page, and hit it on our way to new
1395 * exhaustion instead. */
1396 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1397 protect_control_stack_guard_page(1, NULL);
1398 protect_control_stack_return_guard_page(0, NULL);
1401 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1402 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1403 corruption_warning_and_maybe_lose("Binding stack exhausted");
1404 protect_binding_stack_guard_page(0, NULL);
1405 protect_binding_stack_return_guard_page(1, NULL);
1407 /* For the unfortunate case, when the binding stack is
1408 * exhausted in a signal handler. */
1409 unblock_signals_in_context_and_maybe_warn(context);
1410 arrange_return_to_lisp_function
1411 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1414 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1415 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1416 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1417 protect_binding_stack_guard_page(1, NULL);
1418 protect_binding_stack_return_guard_page(0, NULL);
1421 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1422 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1423 corruption_warning_and_maybe_lose("Alien stack exhausted");
1424 protect_alien_stack_guard_page(0, NULL);
1425 protect_alien_stack_return_guard_page(1, NULL);
1427 /* For the unfortunate case, when the alien stack is
1428 * exhausted in a signal handler. */
1429 unblock_signals_in_context_and_maybe_warn(context);
1430 arrange_return_to_lisp_function
1431 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1434 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1435 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1436 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1437 protect_alien_stack_guard_page(1, NULL);
1438 protect_alien_stack_return_guard_page(0, NULL);
1441 else if (addr >= undefined_alien_address &&
1442 addr < undefined_alien_address + os_vm_page_size) {
1443 arrange_return_to_lisp_function
1444 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1451 * noise to install handlers
1454 #ifndef LISP_FEATURE_WIN32
1455 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1456 * they are blocked, in Linux 2.6 the default handler is invoked
1457 * instead that usually coredumps. One might hastily think that adding
1458 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1459 * the whole sa_mask is ignored and instead of not adding the signal
1460 * in question to the mask. That means if it's not blockable the
1461 * signal must be unblocked at the beginning of signal handlers.
1463 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1464 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1465 * will be unblocked in the sigmask during the signal handler. -- RMK
1468 static volatile int sigaction_nodefer_works = -1;
1470 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1471 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1474 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1476 sigset_t empty, current;
1478 sigemptyset(&empty);
1479 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1480 /* There should be exactly two blocked signals: the two we added
1481 * to sa_mask when setting up the handler. NetBSD doesn't block
1482 * the signal we're handling when SA_NODEFER is set; Linux before
1483 * 2.6.13 or so also doesn't block the other signal when
1484 * SA_NODEFER is set. */
1485 for(i = 1; i < NSIG; i++)
1486 if (sigismember(¤t, i) !=
1487 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1488 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1489 sigaction_nodefer_works = 0;
1491 if (sigaction_nodefer_works == -1)
1492 sigaction_nodefer_works = 1;
1496 see_if_sigaction_nodefer_works(void)
1498 struct sigaction sa, old_sa;
1500 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1501 sa.sa_sigaction = sigaction_nodefer_test_handler;
1502 sigemptyset(&sa.sa_mask);
1503 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1504 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1505 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1506 /* Make sure no signals are blocked. */
1509 sigemptyset(&empty);
1510 thread_sigmask(SIG_SETMASK, &empty, 0);
1512 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1513 while (sigaction_nodefer_works == -1);
1514 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1517 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1518 #undef SA_NODEFER_TEST_KILL_SIGNAL
1521 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1523 SAVE_ERRNO(context,void_context);
1526 sigemptyset(&unblock);
1527 sigaddset(&unblock, signal);
1528 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1529 interrupt_handle_now(signal, info, context);
1534 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1536 SAVE_ERRNO(context,void_context);
1539 sigemptyset(&unblock);
1540 sigaddset(&unblock, signal);
1541 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1542 (*interrupt_low_level_handlers[signal])(signal, info, context);
1547 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1549 SAVE_ERRNO(context,void_context);
1550 (*interrupt_low_level_handlers[signal])(signal, info, context);
1555 undoably_install_low_level_interrupt_handler (int signal,
1556 interrupt_handler_t handler)
1558 struct sigaction sa;
1560 if (0 > signal || signal >= NSIG) {
1561 lose("bad signal number %d\n", signal);
1564 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1565 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1566 else if (sigismember(&deferrable_sigset,signal))
1567 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1568 else if (!sigaction_nodefer_works &&
1569 !sigismember(&blockable_sigset, signal))
1570 sa.sa_sigaction = low_level_unblock_me_trampoline;
1572 sa.sa_sigaction = low_level_handle_now_handler;
1574 sigcopyset(&sa.sa_mask, &blockable_sigset);
1575 sa.sa_flags = SA_SIGINFO | SA_RESTART
1576 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1577 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1578 if((signal==SIG_MEMORY_FAULT))
1579 sa.sa_flags |= SA_ONSTACK;
1582 sigaction(signal, &sa, NULL);
1583 interrupt_low_level_handlers[signal] =
1584 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1588 /* This is called from Lisp. */
1590 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1592 #ifndef LISP_FEATURE_WIN32
1593 struct sigaction sa;
1595 union interrupt_handler oldhandler;
1597 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1600 sigaddset(&new, signal);
1601 thread_sigmask(SIG_BLOCK, &new, &old);
1603 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1604 (unsigned int)interrupt_low_level_handlers[signal]));
1605 if (interrupt_low_level_handlers[signal]==0) {
1606 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1607 ARE_SAME_HANDLER(handler, SIG_IGN))
1608 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1609 else if (sigismember(&deferrable_sigset, signal))
1610 sa.sa_sigaction = maybe_now_maybe_later;
1611 else if (!sigaction_nodefer_works &&
1612 !sigismember(&blockable_sigset, signal))
1613 sa.sa_sigaction = unblock_me_trampoline;
1615 sa.sa_sigaction = interrupt_handle_now_handler;
1617 sigcopyset(&sa.sa_mask, &blockable_sigset);
1618 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1619 (sigaction_nodefer_works ? SA_NODEFER : 0);
1620 sigaction(signal, &sa, NULL);
1623 oldhandler = interrupt_handlers[signal];
1624 interrupt_handlers[signal].c = handler;
1626 thread_sigmask(SIG_SETMASK, &old, 0);
1628 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1630 return (unsigned long)oldhandler.lisp;
1632 /* Probably-wrong Win32 hack */
1637 /* This must not go through lisp as it's allowed anytime, even when on
1640 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1642 lose("SIGABRT received.\n");
1646 interrupt_init(void)
1648 #ifndef LISP_FEATURE_WIN32
1650 SHOW("entering interrupt_init()");
1651 see_if_sigaction_nodefer_works();
1652 sigemptyset(&deferrable_sigset);
1653 sigemptyset(&blockable_sigset);
1654 sigemptyset(&gc_sigset);
1655 sigaddset_deferrable(&deferrable_sigset);
1656 sigaddset_blockable(&blockable_sigset);
1657 sigaddset_gc(&gc_sigset);
1659 /* Set up high level handler information. */
1660 for (i = 0; i < NSIG; i++) {
1661 interrupt_handlers[i].c =
1662 /* (The cast here blasts away the distinction between
1663 * SA_SIGACTION-style three-argument handlers and
1664 * signal(..)-style one-argument handlers, which is OK
1665 * because it works to call the 1-argument form where the
1666 * 3-argument form is expected.) */
1667 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1669 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1670 SHOW("returning from interrupt_init()");
1674 #ifndef LISP_FEATURE_WIN32
1676 siginfo_code(siginfo_t *info)
1678 return info->si_code;
1680 os_vm_address_t current_memory_fault_address;
1683 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1685 /* FIXME: This is lossy: if we get another memory fault (eg. from
1686 * another thread) before lisp has read this, we lose the information.
1687 * However, since this is mostly informative, we'll live with that for
1688 * now -- some address is better then no address in this case.
1690 current_memory_fault_address = addr;
1691 /* To allow debugging memory faults in signal handlers and such. */
1692 corruption_warning_and_maybe_lose("Memory fault at %x", addr);
1693 unblock_signals_in_context_and_maybe_warn(context);
1694 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1695 arrange_return_to_lisp_function(context,
1696 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1698 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1704 unhandled_trap_error(os_context_t *context)
1706 lispobj context_sap;
1707 fake_foreign_function_call(context);
1708 unblock_gc_signals();
1709 context_sap = alloc_sap(context);
1710 #ifndef LISP_FEATURE_WIN32
1711 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1713 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1714 lose("UNHANDLED-TRAP-ERROR fell through");
1717 /* Common logic for trapping instructions. How we actually handle each
1718 * case is highly architecture dependent, but the overall shape is
1721 handle_trap(os_context_t *context, int trap)
1724 case trap_PendingInterrupt:
1725 FSHOW((stderr, "/<trap pending interrupt>\n"));
1726 arch_skip_instruction(context);
1727 interrupt_handle_pending(context);
1731 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1732 interrupt_internal_error(context, trap==trap_Cerror);
1734 case trap_Breakpoint:
1735 arch_handle_breakpoint(context);
1737 case trap_FunEndBreakpoint:
1738 arch_handle_fun_end_breakpoint(context);
1740 #ifdef trap_AfterBreakpoint
1741 case trap_AfterBreakpoint:
1742 arch_handle_after_breakpoint(context);
1745 #ifdef trap_SingleStepAround
1746 case trap_SingleStepAround:
1747 case trap_SingleStepBefore:
1748 arch_handle_single_step_trap(context, trap);
1752 fake_foreign_function_call(context);
1753 lose("%%PRIMITIVE HALT called; the party is over.\n");
1755 unhandled_trap_error(context);