2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 * This gives us a single point of control (or six) over errno, fp
94 * control word, and fixing up signal context on sparc.
96 * The SPARC/Linux platform doesn't quite do signals the way we want
97 * them done. The third argument in the handler isn't filled in by the
98 * kernel properly, so we fix it up ourselves in the
99 * arch_os_get_context(..) function. -- CSR, 2002-07-23
101 #define SAVE_ERRNO(context,void_context) \
103 int _saved_errno = errno; \
104 RESTORE_FP_CONTROL_WORD(context,void_context); \
107 #define RESTORE_ERRNO \
109 errno = _saved_errno; \
112 static void run_deferred_handler(struct interrupt_data *data,
113 os_context_t *context);
114 #ifndef LISP_FEATURE_WIN32
115 static void store_signal_data_for_later (struct interrupt_data *data,
116 void *handler, int signal,
118 os_context_t *context);
121 fill_current_sigmask(sigset_t *sigset)
123 /* Get the current sigmask, by blocking the empty set. */
126 thread_sigmask(SIG_BLOCK, &empty, sigset);
130 sigaddset_deferrable(sigset_t *s)
132 sigaddset(s, SIGHUP);
133 sigaddset(s, SIGINT);
134 sigaddset(s, SIGTERM);
135 sigaddset(s, SIGQUIT);
136 sigaddset(s, SIGPIPE);
137 sigaddset(s, SIGALRM);
138 sigaddset(s, SIGURG);
139 sigaddset(s, SIGTSTP);
140 sigaddset(s, SIGCHLD);
142 #ifndef LISP_FEATURE_HPUX
143 sigaddset(s, SIGXCPU);
144 sigaddset(s, SIGXFSZ);
146 sigaddset(s, SIGVTALRM);
147 sigaddset(s, SIGPROF);
148 sigaddset(s, SIGWINCH);
152 sigdelset_deferrable(sigset_t *s)
154 sigdelset(s, SIGHUP);
155 sigdelset(s, SIGINT);
156 sigdelset(s, SIGQUIT);
157 sigdelset(s, SIGPIPE);
158 sigdelset(s, SIGALRM);
159 sigdelset(s, SIGURG);
160 sigdelset(s, SIGTSTP);
161 sigdelset(s, SIGCHLD);
163 #ifndef LISP_FEATURE_HPUX
164 sigdelset(s, SIGXCPU);
165 sigdelset(s, SIGXFSZ);
167 sigdelset(s, SIGVTALRM);
168 sigdelset(s, SIGPROF);
169 sigdelset(s, SIGWINCH);
173 sigaddset_blockable(sigset_t *sigset)
175 sigaddset_deferrable(sigset);
176 sigaddset_gc(sigset);
180 sigaddset_gc(sigset_t *sigset)
182 #ifdef LISP_FEATURE_SB_THREAD
183 sigaddset(sigset,SIG_STOP_FOR_GC);
188 sigdelset_gc(sigset_t *sigset)
190 #ifdef LISP_FEATURE_SB_THREAD
191 sigdelset(sigset,SIG_STOP_FOR_GC);
195 /* initialized in interrupt_init */
196 sigset_t deferrable_sigset;
197 sigset_t blockable_sigset;
202 deferrables_blocked_in_sigset_p(sigset_t *sigset)
204 #if !defined(LISP_FEATURE_WIN32)
206 for(i = 1; i < NSIG; i++) {
207 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
215 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
217 #if !defined(LISP_FEATURE_WIN32)
219 for(i = 1; i < NSIG; i++) {
220 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
221 lose("deferrable signal %d blocked\n",i);
227 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
229 #if !defined(LISP_FEATURE_WIN32)
231 for(i = 1; i < NSIG; i++) {
232 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
233 lose("deferrable signal %d not blocked\n",i);
239 check_deferrables_unblocked_or_lose(void)
241 #if !defined(LISP_FEATURE_WIN32)
243 fill_current_sigmask(¤t);
244 check_deferrables_unblocked_in_sigset_or_lose(¤t);
249 check_deferrables_blocked_or_lose(void)
251 #if !defined(LISP_FEATURE_WIN32)
253 fill_current_sigmask(¤t);
254 check_deferrables_blocked_in_sigset_or_lose(¤t);
259 check_blockables_blocked_or_lose(void)
261 #if !defined(LISP_FEATURE_WIN32)
264 fill_current_sigmask(¤t);
265 for(i = 1; i < NSIG; i++) {
266 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
267 lose("blockable signal %d not blocked\n",i);
273 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
275 #if !defined(LISP_FEATURE_WIN32)
277 for(i = 1; i < NSIG; i++) {
278 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
279 lose("gc signal %d blocked\n",i);
285 check_gc_signals_unblocked_or_lose(void)
287 #if !defined(LISP_FEATURE_WIN32)
289 fill_current_sigmask(¤t);
290 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
295 check_interrupts_enabled_or_lose(os_context_t *context)
297 struct thread *thread=arch_os_get_current_thread();
298 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
299 lose("interrupts not enabled\n");
300 if (arch_pseudo_atomic_atomic(context))
301 lose ("in pseudo atomic section\n");
304 /* Save sigset (or the current sigmask if 0) if there is no pending
305 * handler, because that means that deferabbles are already blocked.
306 * The purpose is to avoid losing the pending gc signal if a
307 * deferrable interrupt async unwinds between clearing the pseudo
308 * atomic and trapping to GC.*/
310 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
312 #ifndef LISP_FEATURE_WIN32
313 struct thread *thread = arch_os_get_current_thread();
314 struct interrupt_data *data = thread->interrupt_data;
316 /* Obviously, this function is called when signals may not be
317 * blocked. Let's make sure we are not interrupted. */
318 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
319 #ifndef LISP_FEATURE_SB_THREAD
320 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
322 if (data->gc_blocked_deferrables)
323 lose("gc_blocked_deferrables already true\n");
325 if ((!data->pending_handler) &&
326 (!data->gc_blocked_deferrables)) {
327 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
328 data->gc_blocked_deferrables = 1;
330 /* This is the sigmask of some context. */
331 sigcopyset(&data->pending_mask, sigset);
332 sigaddset_deferrable(sigset);
333 thread_sigmask(SIG_SETMASK,&oldset,0);
336 /* Operating on the current sigmask. Save oldset and
337 * unblock gc signals. In the end, this is equivalent to
338 * blocking the deferrables. */
339 sigcopyset(&data->pending_mask, &oldset);
340 unblock_gc_signals();
344 thread_sigmask(SIG_SETMASK,&oldset,0);
348 /* Are we leaving WITH-GCING and already running with interrupts
349 * enabled, without the protection of *GC-INHIBIT* T and there is gc
350 * (or stop for gc) pending, but we haven't trapped yet? */
352 in_leaving_without_gcing_race_p(struct thread *thread)
354 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
355 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
356 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
357 ((SymbolValue(GC_PENDING,thread) != NIL)
358 #if defined(LISP_FEATURE_SB_THREAD)
359 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
364 /* Check our baroque invariants. */
366 check_interrupt_context_or_lose(os_context_t *context)
368 #ifndef LISP_FEATURE_WIN32
369 struct thread *thread = arch_os_get_current_thread();
370 struct interrupt_data *data = thread->interrupt_data;
371 int interrupt_deferred_p = (data->pending_handler != 0);
372 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
373 sigset_t *sigset = os_context_sigmask_addr(context);
374 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
375 * handle_allocation_trap. */
376 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
377 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
378 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
379 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
380 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
381 int in_race_p = in_leaving_without_gcing_race_p(thread);
382 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
383 * section and trapping, a SIG_STOP_FOR_GC would see the next
384 * check fail, for this reason sig_stop_for_gc handler does not
385 * call this function. */
386 if (interrupt_deferred_p) {
387 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
388 lose("Stray deferred interrupt.\n");
391 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
392 lose("GC_PENDING, but why?\n");
393 #if defined(LISP_FEATURE_SB_THREAD)
395 int stop_for_gc_pending =
396 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
397 if (stop_for_gc_pending)
398 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
399 lose("STOP_FOR_GC_PENDING, but why?\n");
403 if (interrupt_pending && !interrupt_deferred_p)
404 lose("INTERRUPT_PENDING but not pending handler.\n");
405 if ((data->gc_blocked_deferrables) && interrupt_pending)
406 lose("gc_blocked_deferrables and interrupt pending\n.");
407 if (data->gc_blocked_deferrables)
408 check_deferrables_blocked_in_sigset_or_lose(sigset);
409 if (interrupt_pending || interrupt_deferred_p ||
410 data->gc_blocked_deferrables)
411 check_deferrables_blocked_in_sigset_or_lose(sigset);
413 check_deferrables_unblocked_in_sigset_or_lose(sigset);
414 /* If deferrables are unblocked then we are open to signals
415 * that run lisp code. */
416 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
421 /* When we catch an internal error, should we pass it back to Lisp to
422 * be handled in a high-level way? (Early in cold init, the answer is
423 * 'no', because Lisp is still too brain-dead to handle anything.
424 * After sufficient initialization has been completed, the answer
426 boolean internal_errors_enabled = 0;
428 #ifndef LISP_FEATURE_WIN32
430 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
432 union interrupt_handler interrupt_handlers[NSIG];
435 block_blockable_signals(void)
437 #ifndef LISP_FEATURE_WIN32
438 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
443 block_deferrable_signals(void)
445 #ifndef LISP_FEATURE_WIN32
446 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
451 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
453 #ifndef LISP_FEATURE_WIN32
454 if (interrupt_handler_pending_p())
455 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
456 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
457 sigdelset_deferrable(sigset);
462 unblock_deferrable_signals(void)
464 #ifndef LISP_FEATURE_WIN32
465 if (interrupt_handler_pending_p())
466 lose("unblock_deferrable_signals: losing proposition\n");
467 check_gc_signals_unblocked_or_lose();
468 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
473 unblock_gc_signals(void)
475 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
476 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
481 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
483 #ifndef LISP_FEATURE_WIN32
485 sigset_t *sigset=os_context_sigmask_addr(context);
486 for(i = 1; i < NSIG; i++) {
487 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
490 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
491 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
492 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
497 sigdelset_gc(sigset);
498 if (!interrupt_handler_pending_p()) {
499 unblock_deferrable_signals_in_sigset(sigset);
506 * utility routines used by various signal handlers
510 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
512 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
516 /* Build a fake stack frame or frames */
518 current_control_frame_pointer =
519 (lispobj *)(unsigned long)
520 (*os_context_register_addr(context, reg_CSP));
521 if ((lispobj *)(unsigned long)
522 (*os_context_register_addr(context, reg_CFP))
523 == current_control_frame_pointer) {
524 /* There is a small window during call where the callee's
525 * frame isn't built yet. */
526 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
527 == FUN_POINTER_LOWTAG) {
528 /* We have called, but not built the new frame, so
529 * build it for them. */
530 current_control_frame_pointer[0] =
531 *os_context_register_addr(context, reg_OCFP);
532 current_control_frame_pointer[1] =
533 *os_context_register_addr(context, reg_LRA);
534 current_control_frame_pointer += 8;
535 /* Build our frame on top of it. */
536 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
539 /* We haven't yet called, build our frame as if the
540 * partial frame wasn't there. */
541 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
544 /* We can't tell whether we are still in the caller if it had to
545 * allocate a stack frame due to stack arguments. */
546 /* This observation provoked some past CMUCL maintainer to ask
547 * "Can anything strange happen during return?" */
550 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
553 current_control_stack_pointer = current_control_frame_pointer + 8;
555 current_control_frame_pointer[0] = oldcont;
556 current_control_frame_pointer[1] = NIL;
557 current_control_frame_pointer[2] =
558 (lispobj)(*os_context_register_addr(context, reg_CODE));
562 /* Stores the context for gc to scavange and builds fake stack
565 fake_foreign_function_call(os_context_t *context)
568 struct thread *thread=arch_os_get_current_thread();
570 /* context_index incrementing must not be interrupted */
571 check_blockables_blocked_or_lose();
573 /* Get current Lisp state from context. */
575 dynamic_space_free_pointer =
576 (lispobj *)(unsigned long)
577 (*os_context_register_addr(context, reg_ALLOC));
578 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
579 /* dynamic_space_free_pointer); */
580 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
581 if ((long)dynamic_space_free_pointer & 1) {
582 lose("dead in fake_foreign_function_call, context = %x\n", context);
585 /* why doesnt PPC and SPARC do something like this: */
586 #if defined(LISP_FEATURE_HPPA)
587 if ((long)dynamic_space_free_pointer & 4) {
588 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
593 current_binding_stack_pointer =
594 (lispobj *)(unsigned long)
595 (*os_context_register_addr(context, reg_BSP));
598 build_fake_control_stack_frames(thread,context);
600 /* Do dynamic binding of the active interrupt context index
601 * and save the context in the context array. */
603 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
605 if (context_index >= MAX_INTERRUPTS) {
606 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
609 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
610 make_fixnum(context_index + 1),thread);
612 thread->interrupt_contexts[context_index] = context;
614 #ifdef FOREIGN_FUNCTION_CALL_FLAG
615 foreign_function_call_active = 1;
619 /* blocks all blockable signals. If you are calling from a signal handler,
620 * the usual signal mask will be restored from the context when the handler
621 * finishes. Otherwise, be careful */
623 undo_fake_foreign_function_call(os_context_t *context)
625 struct thread *thread=arch_os_get_current_thread();
626 /* Block all blockable signals. */
627 block_blockable_signals();
629 #ifdef FOREIGN_FUNCTION_CALL_FLAG
630 foreign_function_call_active = 0;
633 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
637 /* Put the dynamic space free pointer back into the context. */
638 *os_context_register_addr(context, reg_ALLOC) =
639 (unsigned long) dynamic_space_free_pointer
640 | (*os_context_register_addr(context, reg_ALLOC)
643 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
645 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
650 /* a handler for the signal caused by execution of a trap opcode
651 * signalling an internal error */
653 interrupt_internal_error(os_context_t *context, boolean continuable)
657 fake_foreign_function_call(context);
659 if (!internal_errors_enabled) {
660 describe_internal_error(context);
661 /* There's no good way to recover from an internal error
662 * before the Lisp error handling mechanism is set up. */
663 lose("internal error too early in init, can't recover\n");
666 /* Allocate the SAP object while the interrupts are still
668 unblock_gc_signals();
669 context_sap = alloc_sap(context);
671 #ifndef LISP_FEATURE_WIN32
672 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
675 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
676 /* Workaround for blocked SIGTRAP. */
679 sigemptyset(&newset);
680 sigaddset(&newset, SIGTRAP);
681 thread_sigmask(SIG_UNBLOCK, &newset, 0);
685 SHOW("in interrupt_internal_error");
687 /* Display some rudimentary debugging information about the
688 * error, so that even if the Lisp error handler gets badly
689 * confused, we have a chance to determine what's going on. */
690 describe_internal_error(context);
692 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
693 continuable ? T : NIL);
695 undo_fake_foreign_function_call(context); /* blocks signals again */
697 arch_skip_instruction(context);
701 interrupt_handler_pending_p(void)
703 struct thread *thread = arch_os_get_current_thread();
704 struct interrupt_data *data = thread->interrupt_data;
705 return (data->pending_handler != 0);
709 interrupt_handle_pending(os_context_t *context)
711 /* There are three ways we can get here. First, if an interrupt
712 * occurs within pseudo-atomic, it will be deferred, and we'll
713 * trap to here at the end of the pseudo-atomic block. Second, if
714 * the GC (in alloc()) decides that a GC is required, it will set
715 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
716 * and alloc() is always called from within pseudo-atomic, and
717 * thus we end up here again. Third, when calling GC-ON or at the
718 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
719 * here if there is a pending GC. Fourth, ahem, at the end of
720 * WITHOUT-INTERRUPTS (bar complications with nesting). */
722 /* Win32 only needs to handle the GC cases (for now?) */
724 struct thread *thread = arch_os_get_current_thread();
725 struct interrupt_data *data = thread->interrupt_data;
727 if (arch_pseudo_atomic_atomic(context)) {
728 lose("Handling pending interrupt in pseduo atomic.");
731 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
733 check_blockables_blocked_or_lose();
735 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
736 * handler, then the pending mask was saved and
737 * gc_blocked_deferrables set. Hence, there can be no pending
738 * handler and it's safe to restore the pending mask.
740 * Note, that if gc_blocked_deferrables is false we may still have
741 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
742 * pseudo atomic was interrupt be a deferrable first. */
743 if (data->gc_blocked_deferrables) {
744 if (data->pending_handler)
745 lose("GC blocked deferrables but still got a pending handler.");
746 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
747 lose("GC blocked deferrables while GC is inhibited.");
748 /* Restore the saved signal mask from the original signal (the
749 * one that interrupted us during the critical section) into
750 * the os_context for the signal we're currently in the
751 * handler for. This should ensure that when we return from
752 * the handler the blocked signals are unblocked. */
753 #ifndef LISP_FEATURE_WIN32
754 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
756 data->gc_blocked_deferrables = 0;
759 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
760 void *original_pending_handler = data->pending_handler;
762 #ifdef LISP_FEATURE_SB_THREAD
763 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
764 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
765 * the signal handler if it actually stops us. */
766 arch_clear_pseudo_atomic_interrupted(context);
767 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
770 /* Test for T and not for != NIL since the value :IN-PROGRESS
771 * is used in SUB-GC as part of the mechanism to supress
773 if (SymbolValue(GC_PENDING,thread) == T) {
775 /* Two reasons for doing this. First, if there is a
776 * pending handler we don't want to run. Second, we are
777 * going to clear pseudo atomic interrupted to avoid
778 * spurious trapping on every allocation in SUB_GC and
779 * having a pending handler with interrupts enabled and
780 * without pseudo atomic interrupted breaks an
782 if (data->pending_handler) {
783 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
784 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
787 arch_clear_pseudo_atomic_interrupted(context);
789 /* GC_PENDING is cleared in SUB-GC, or if another thread
790 * is doing a gc already we will get a SIG_STOP_FOR_GC and
791 * that will clear it.
793 * If there is a pending handler or gc was triggerred in a
794 * signal handler then maybe_gc won't run POST_GC and will
795 * return normally. */
796 if (!maybe_gc(context))
797 lose("GC not inhibited but maybe_gc did not GC.");
799 if (data->pending_handler) {
803 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
804 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
805 * GC-PENDING is not NIL then we cannot trap on pseudo
806 * atomic due to GC (see if(GC_PENDING) logic in
807 * cheneygc.c an gengcgc.c), plus there is a outer
808 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
810 lose("Trapping to run pending handler while GC in progress.");
813 check_blockables_blocked_or_lose();
815 /* No GC shall be lost. If SUB_GC triggers another GC then
816 * that should be handled on the spot. */
817 if (SymbolValue(GC_PENDING,thread) != NIL)
818 lose("GC_PENDING after doing gc.");
819 #ifdef LISP_FEATURE_SB_THREAD
820 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
821 lose("STOP_FOR_GC_PENDING after doing gc.");
823 /* Check two things. First, that gc does not clobber a handler
824 * that's already pending. Second, that there is no interrupt
825 * lossage: if original_pending_handler was NULL then even if
826 * an interrupt arrived during GC (POST-GC, really) it was
828 if (original_pending_handler != data->pending_handler)
829 lose("pending handler changed in gc: %x -> %d.",
830 original_pending_handler, data->pending_handler);
833 #ifndef LISP_FEATURE_WIN32
834 /* There may be no pending handler, because it was only a gc that
835 * had to be executed or because Lisp is a bit too eager to call
836 * DO-PENDING-INTERRUPT. */
837 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
838 (data->pending_handler)) {
839 /* No matter how we ended up here, clear both
840 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
841 * because we checked above that there is no GC pending. */
842 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
843 arch_clear_pseudo_atomic_interrupted(context);
844 /* Restore the sigmask in the context. */
845 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
846 run_deferred_handler(data, context);
848 /* It is possible that the end of this function was reached
849 * without never actually doing anything, the tests in Lisp for
850 * when to call receive-pending-interrupt are not exact. */
851 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
857 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
859 #ifdef FOREIGN_FUNCTION_CALL_FLAG
860 boolean were_in_lisp;
862 union interrupt_handler handler;
864 check_blockables_blocked_or_lose();
866 #ifndef LISP_FEATURE_WIN32
867 if (sigismember(&deferrable_sigset,signal))
868 check_interrupts_enabled_or_lose(context);
871 handler = interrupt_handlers[signal];
873 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
877 #ifdef FOREIGN_FUNCTION_CALL_FLAG
878 were_in_lisp = !foreign_function_call_active;
882 fake_foreign_function_call(context);
885 FSHOW_SIGNAL((stderr,
886 "/entering interrupt_handle_now(%d, info, context)\n",
889 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
891 /* This can happen if someone tries to ignore or default one
892 * of the signals we need for runtime support, and the runtime
893 * support decides to pass on it. */
894 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
896 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
897 /* Once we've decided what to do about contexts in a
898 * return-elsewhere world (the original context will no longer
899 * be available; should we copy it or was nobody using it anyway?)
900 * then we should convert this to return-elsewhere */
902 /* CMUCL comment said "Allocate the SAPs while the interrupts
903 * are still disabled.". I (dan, 2003.08.21) assume this is
904 * because we're not in pseudoatomic and allocation shouldn't
905 * be interrupted. In which case it's no longer an issue as
906 * all our allocation from C now goes through a PA wrapper,
907 * but still, doesn't hurt.
909 * Yeah, but non-gencgc platforms don't really wrap allocation
910 * in PA. MG - 2005-08-29 */
912 lispobj info_sap, context_sap;
913 /* Leave deferrable signals blocked, the handler itself will
914 * allow signals again when it sees fit. */
915 unblock_gc_signals();
916 context_sap = alloc_sap(context);
917 info_sap = alloc_sap(info);
919 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
921 funcall3(handler.lisp,
926 /* This cannot happen in sane circumstances. */
928 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
930 #ifndef LISP_FEATURE_WIN32
931 /* Allow signals again. */
932 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
934 (*handler.c)(signal, info, context);
937 #ifdef FOREIGN_FUNCTION_CALL_FLAG
941 undo_fake_foreign_function_call(context); /* block signals again */
944 FSHOW_SIGNAL((stderr,
945 "/returning from interrupt_handle_now(%d, info, context)\n",
949 /* This is called at the end of a critical section if the indications
950 * are that some signal was deferred during the section. Note that as
951 * far as C or the kernel is concerned we dealt with the signal
952 * already; we're just doing the Lisp-level processing now that we
955 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
957 /* The pending_handler may enable interrupts and then another
958 * interrupt may hit, overwrite interrupt_data, so reset the
959 * pending handler before calling it. Trust the handler to finish
960 * with the siginfo before enabling interrupts. */
961 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
962 data->pending_handler;
964 data->pending_handler=0;
965 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
966 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
969 #ifndef LISP_FEATURE_WIN32
971 maybe_defer_handler(void *handler, struct interrupt_data *data,
972 int signal, siginfo_t *info, os_context_t *context)
974 struct thread *thread=arch_os_get_current_thread();
976 check_blockables_blocked_or_lose();
978 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
979 lose("interrupt already pending\n");
980 if (thread->interrupt_data->pending_handler)
981 lose("there is a pending handler already (PA)\n");
982 if (data->gc_blocked_deferrables)
983 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
984 check_interrupt_context_or_lose(context);
985 /* If interrupts are disabled then INTERRUPT_PENDING is set and
986 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
987 * atomic section inside a WITHOUT-INTERRUPTS.
989 * Also, if in_leaving_without_gcing_race_p then
990 * interrupt_handle_pending is going to be called soon, so
991 * stashing the signal away is safe.
993 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
994 in_leaving_without_gcing_race_p(thread)) {
995 store_signal_data_for_later(data,handler,signal,info,context);
996 SetSymbolValue(INTERRUPT_PENDING, T,thread);
997 FSHOW_SIGNAL((stderr,
998 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
999 (unsigned int)handler,signal,
1000 in_leaving_without_gcing_race_p(thread)));
1001 check_interrupt_context_or_lose(context);
1004 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1005 * actually use its argument for anything on x86, so this branch
1006 * may succeed even when context is null (gencgc alloc()) */
1007 if (arch_pseudo_atomic_atomic(context)) {
1008 store_signal_data_for_later(data,handler,signal,info,context);
1009 arch_set_pseudo_atomic_interrupted(context);
1010 FSHOW_SIGNAL((stderr,
1011 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1012 (unsigned int)handler,signal));
1013 check_interrupt_context_or_lose(context);
1016 FSHOW_SIGNAL((stderr,
1017 "/maybe_defer_handler(%x,%d): not deferred\n",
1018 (unsigned int)handler,signal));
1023 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1025 siginfo_t *info, os_context_t *context)
1027 if (data->pending_handler)
1028 lose("tried to overwrite pending interrupt handler %x with %x\n",
1029 data->pending_handler, handler);
1031 lose("tried to defer null interrupt handler\n");
1032 data->pending_handler = handler;
1033 data->pending_signal = signal;
1035 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1037 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1041 lose("Null context");
1043 /* the signal mask in the context (from before we were
1044 * interrupted) is copied to be restored when run_deferred_handler
1045 * happens. Then the usually-blocked signals are added to the mask
1046 * in the context so that we are running with blocked signals when
1047 * the handler returns */
1048 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1049 sigaddset_deferrable(os_context_sigmask_addr(context));
1053 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1055 SAVE_ERRNO(context,void_context);
1056 struct thread *thread = arch_os_get_current_thread();
1057 struct interrupt_data *data = thread->interrupt_data;
1059 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1060 interrupt_handle_now(signal, info, context);
1065 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1066 os_context_t *context)
1068 /* No FP control fixage needed, caller has done that. */
1069 check_blockables_blocked_or_lose();
1070 check_interrupts_enabled_or_lose(context);
1071 (*interrupt_low_level_handlers[signal])(signal, info, context);
1072 /* No Darwin context fixage needed, caller does that. */
1076 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1078 SAVE_ERRNO(context,void_context);
1079 struct thread *thread = arch_os_get_current_thread();
1080 struct interrupt_data *data = thread->interrupt_data;
1082 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1083 signal,info,context))
1084 low_level_interrupt_handle_now(signal, info, context);
1089 #ifdef LISP_FEATURE_SB_THREAD
1091 /* This function must not cons, because that may trigger a GC. */
1093 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1095 struct thread *thread=arch_os_get_current_thread();
1098 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1099 * pseudo atomic until gc is finally allowed. */
1100 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1101 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1102 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1104 } else if (arch_pseudo_atomic_atomic(context)) {
1105 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1106 arch_set_pseudo_atomic_interrupted(context);
1107 maybe_save_gc_mask_and_block_deferrables
1108 (os_context_sigmask_addr(context));
1109 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1113 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1115 /* Not PA and GC not inhibited -- we can stop now. */
1117 /* need the context stored so it can have registers scavenged */
1118 fake_foreign_function_call(context);
1120 /* Block everything. */
1122 thread_sigmask(SIG_BLOCK,&ss,0);
1124 /* Not pending anymore. */
1125 SetSymbolValue(GC_PENDING,NIL,thread);
1126 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1128 if(thread_state(thread)!=STATE_RUNNING) {
1129 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1130 fixnum_value(thread->state));
1133 set_thread_state(thread,STATE_SUSPENDED);
1134 FSHOW_SIGNAL((stderr,"suspended\n"));
1136 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1137 FSHOW_SIGNAL((stderr,"resumed\n"));
1139 if(thread_state(thread)!=STATE_RUNNING) {
1140 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1141 fixnum_value(thread_state(thread)));
1144 undo_fake_foreign_function_call(context);
1150 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1152 SAVE_ERRNO(context,void_context);
1153 #ifndef LISP_FEATURE_WIN32
1154 if ((signal == SIGILL) || (signal == SIGBUS)
1155 #ifndef LISP_FEATURE_LINUX
1156 || (signal == SIGEMT)
1159 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1161 interrupt_handle_now(signal, info, context);
1165 /* manipulate the signal context and stack such that when the handler
1166 * returns, it will call function instead of whatever it was doing
1170 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1171 extern int *context_eflags_addr(os_context_t *context);
1174 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1175 extern void post_signal_tramp(void);
1176 extern void call_into_lisp_tramp(void);
1178 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1180 #ifndef LISP_FEATURE_WIN32
1181 check_gc_signals_unblocked_in_sigset_or_lose
1182 (os_context_sigmask_addr(context));
1184 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1185 void * fun=native_pointer(function);
1186 void *code = &(((struct simple_fun *) fun)->code);
1189 /* Build a stack frame showing `interrupted' so that the
1190 * user's backtrace makes (as much) sense (as usual) */
1192 /* fp state is saved and restored by call_into_lisp */
1193 /* FIXME: errno is not restored, but since current uses of this
1194 * function only call Lisp code that signals an error, it's not
1195 * much of a problem. In other words, running out of the control
1196 * stack between a syscall and (GET-ERRNO) may clobber errno if
1197 * something fails during signalling or in the handler. But I
1198 * can't see what can go wrong as long as there is no CONTINUE
1199 * like restart on them. */
1200 #ifdef LISP_FEATURE_X86
1201 /* Suppose the existence of some function that saved all
1202 * registers, called call_into_lisp, then restored GP registers and
1203 * returned. It would look something like this:
1211 pushl {address of function to call}
1212 call 0x8058db0 <call_into_lisp>
1219 * What we do here is set up the stack that call_into_lisp would
1220 * expect to see if it had been called by this code, and frob the
1221 * signal context so that signal return goes directly to call_into_lisp,
1222 * and when that function (and the lisp function it invoked) returns,
1223 * it returns to the second half of this imaginary function which
1224 * restores all registers and returns to C
1226 * For this to work, the latter part of the imaginary function
1227 * must obviously exist in reality. That would be post_signal_tramp
1230 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1232 #if defined(LISP_FEATURE_DARWIN)
1233 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1235 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1236 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1238 /* 1. os_validate (malloc/mmap) register_save_block
1239 * 2. copy register state into register_save_block
1240 * 3. put a pointer to register_save_block in a register in the context
1241 * 4. set the context's EIP to point to a trampoline which:
1242 * a. builds the fake stack frame from the block
1243 * b. frees the block
1244 * c. calls the function
1247 *register_save_area = *os_context_pc_addr(context);
1248 *(register_save_area + 1) = function;
1249 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1250 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1251 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1252 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1253 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1254 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1255 *(register_save_area + 8) = *context_eflags_addr(context);
1257 *os_context_pc_addr(context) =
1258 (os_context_register_t) call_into_lisp_tramp;
1259 *os_context_register_addr(context,reg_ECX) =
1260 (os_context_register_t) register_save_area;
1263 /* return address for call_into_lisp: */
1264 *(sp-15) = (u32)post_signal_tramp;
1265 *(sp-14) = function; /* args for call_into_lisp : function*/
1266 *(sp-13) = 0; /* arg array */
1267 *(sp-12) = 0; /* no. args */
1268 /* this order matches that used in POPAD */
1269 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1270 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1272 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1273 /* POPAD ignores the value of ESP: */
1275 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1277 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1278 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1279 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1280 *(sp-3)=*context_eflags_addr(context);
1281 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1282 *(sp-1)=*os_context_pc_addr(context);
1286 #elif defined(LISP_FEATURE_X86_64)
1287 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1289 /* return address for call_into_lisp: */
1290 *(sp-18) = (u64)post_signal_tramp;
1292 *(sp-17)=*os_context_register_addr(context,reg_R15);
1293 *(sp-16)=*os_context_register_addr(context,reg_R14);
1294 *(sp-15)=*os_context_register_addr(context,reg_R13);
1295 *(sp-14)=*os_context_register_addr(context,reg_R12);
1296 *(sp-13)=*os_context_register_addr(context,reg_R11);
1297 *(sp-12)=*os_context_register_addr(context,reg_R10);
1298 *(sp-11)=*os_context_register_addr(context,reg_R9);
1299 *(sp-10)=*os_context_register_addr(context,reg_R8);
1300 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1301 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1302 /* skip RBP and RSP */
1303 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1304 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1305 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1306 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1307 *(sp-3)=*context_eflags_addr(context);
1308 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1309 *(sp-1)=*os_context_pc_addr(context);
1311 *os_context_register_addr(context,reg_RDI) =
1312 (os_context_register_t)function; /* function */
1313 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1314 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1316 struct thread *th=arch_os_get_current_thread();
1317 build_fake_control_stack_frames(th,context);
1320 #ifdef LISP_FEATURE_X86
1322 #if !defined(LISP_FEATURE_DARWIN)
1323 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1324 *os_context_register_addr(context,reg_ECX) = 0;
1325 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1327 *os_context_register_addr(context,reg_UESP) =
1328 (os_context_register_t)(sp-15);
1330 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1331 #endif /* __NETBSD__ */
1332 #endif /* LISP_FEATURE_DARWIN */
1334 #elif defined(LISP_FEATURE_X86_64)
1335 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1336 *os_context_register_addr(context,reg_RCX) = 0;
1337 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1338 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1340 /* this much of the calling convention is common to all
1342 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1343 *os_context_register_addr(context,reg_NARGS) = 0;
1344 *os_context_register_addr(context,reg_LIP) =
1345 (os_context_register_t)(unsigned long)code;
1346 *os_context_register_addr(context,reg_CFP) =
1347 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1349 #ifdef ARCH_HAS_NPC_REGISTER
1350 *os_context_npc_addr(context) =
1351 4 + *os_context_pc_addr(context);
1353 #ifdef LISP_FEATURE_SPARC
1354 *os_context_register_addr(context,reg_CODE) =
1355 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1357 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1361 /* KLUDGE: Theoretically the approach we use for undefined alien
1362 * variables should work for functions as well, but on PPC/Darwin
1363 * we get bus error at bogus addresses instead, hence this workaround,
1364 * that has the added benefit of automatically discriminating between
1365 * functions and variables.
1368 undefined_alien_function(void)
1370 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1374 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1376 struct thread *th=arch_os_get_current_thread();
1378 /* note the os_context hackery here. When the signal handler returns,
1379 * it won't go back to what it was doing ... */
1380 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1381 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1382 /* We hit the end of the control stack: disable guard page
1383 * protection so the error handler has some headroom, protect the
1384 * previous page so that we can catch returns from the guard page
1385 * and restore it. */
1386 corruption_warning_and_maybe_lose("Control stack exhausted");
1387 protect_control_stack_guard_page(0, NULL);
1388 protect_control_stack_return_guard_page(1, NULL);
1390 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1391 /* For the unfortunate case, when the control stack is
1392 * exhausted in a signal handler. */
1393 unblock_signals_in_context_and_maybe_warn(context);
1395 arrange_return_to_lisp_function
1396 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1399 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1400 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1401 /* We're returning from the guard page: reprotect it, and
1402 * unprotect this one. This works even if we somehow missed
1403 * the return-guard-page, and hit it on our way to new
1404 * exhaustion instead. */
1405 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1406 protect_control_stack_guard_page(1, NULL);
1407 protect_control_stack_return_guard_page(0, NULL);
1410 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1411 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1412 corruption_warning_and_maybe_lose("Binding stack exhausted");
1413 protect_binding_stack_guard_page(0, NULL);
1414 protect_binding_stack_return_guard_page(1, NULL);
1416 /* For the unfortunate case, when the binding stack is
1417 * exhausted in a signal handler. */
1418 unblock_signals_in_context_and_maybe_warn(context);
1419 arrange_return_to_lisp_function
1420 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1423 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1424 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1425 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1426 protect_binding_stack_guard_page(1, NULL);
1427 protect_binding_stack_return_guard_page(0, NULL);
1430 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1431 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1432 corruption_warning_and_maybe_lose("Alien stack exhausted");
1433 protect_alien_stack_guard_page(0, NULL);
1434 protect_alien_stack_return_guard_page(1, NULL);
1436 /* For the unfortunate case, when the alien stack is
1437 * exhausted in a signal handler. */
1438 unblock_signals_in_context_and_maybe_warn(context);
1439 arrange_return_to_lisp_function
1440 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1443 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1444 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1445 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1446 protect_alien_stack_guard_page(1, NULL);
1447 protect_alien_stack_return_guard_page(0, NULL);
1450 else if (addr >= undefined_alien_address &&
1451 addr < undefined_alien_address + os_vm_page_size) {
1452 arrange_return_to_lisp_function
1453 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1460 * noise to install handlers
1463 #ifndef LISP_FEATURE_WIN32
1464 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1465 * they are blocked, in Linux 2.6 the default handler is invoked
1466 * instead that usually coredumps. One might hastily think that adding
1467 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1468 * the whole sa_mask is ignored and instead of not adding the signal
1469 * in question to the mask. That means if it's not blockable the
1470 * signal must be unblocked at the beginning of signal handlers.
1472 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1473 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1474 * will be unblocked in the sigmask during the signal handler. -- RMK
1477 static volatile int sigaction_nodefer_works = -1;
1479 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1480 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1483 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1485 sigset_t empty, current;
1487 sigemptyset(&empty);
1488 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1489 /* There should be exactly two blocked signals: the two we added
1490 * to sa_mask when setting up the handler. NetBSD doesn't block
1491 * the signal we're handling when SA_NODEFER is set; Linux before
1492 * 2.6.13 or so also doesn't block the other signal when
1493 * SA_NODEFER is set. */
1494 for(i = 1; i < NSIG; i++)
1495 if (sigismember(¤t, i) !=
1496 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1497 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1498 sigaction_nodefer_works = 0;
1500 if (sigaction_nodefer_works == -1)
1501 sigaction_nodefer_works = 1;
1505 see_if_sigaction_nodefer_works(void)
1507 struct sigaction sa, old_sa;
1509 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1510 sa.sa_sigaction = sigaction_nodefer_test_handler;
1511 sigemptyset(&sa.sa_mask);
1512 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1513 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1514 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1515 /* Make sure no signals are blocked. */
1518 sigemptyset(&empty);
1519 thread_sigmask(SIG_SETMASK, &empty, 0);
1521 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1522 while (sigaction_nodefer_works == -1);
1523 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1526 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1527 #undef SA_NODEFER_TEST_KILL_SIGNAL
1530 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1532 SAVE_ERRNO(context,void_context);
1535 sigemptyset(&unblock);
1536 sigaddset(&unblock, signal);
1537 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1538 interrupt_handle_now(signal, info, context);
1543 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1545 SAVE_ERRNO(context,void_context);
1548 sigemptyset(&unblock);
1549 sigaddset(&unblock, signal);
1550 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1551 (*interrupt_low_level_handlers[signal])(signal, info, context);
1556 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1558 SAVE_ERRNO(context,void_context);
1559 (*interrupt_low_level_handlers[signal])(signal, info, context);
1564 undoably_install_low_level_interrupt_handler (int signal,
1565 interrupt_handler_t handler)
1567 struct sigaction sa;
1569 if (0 > signal || signal >= NSIG) {
1570 lose("bad signal number %d\n", signal);
1573 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1574 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1575 else if (sigismember(&deferrable_sigset,signal))
1576 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1577 else if (!sigaction_nodefer_works &&
1578 !sigismember(&blockable_sigset, signal))
1579 sa.sa_sigaction = low_level_unblock_me_trampoline;
1581 sa.sa_sigaction = low_level_handle_now_handler;
1583 sigcopyset(&sa.sa_mask, &blockable_sigset);
1584 sa.sa_flags = SA_SIGINFO | SA_RESTART
1585 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1586 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1587 if((signal==SIG_MEMORY_FAULT))
1588 sa.sa_flags |= SA_ONSTACK;
1591 sigaction(signal, &sa, NULL);
1592 interrupt_low_level_handlers[signal] =
1593 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1597 /* This is called from Lisp. */
1599 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1601 #ifndef LISP_FEATURE_WIN32
1602 struct sigaction sa;
1604 union interrupt_handler oldhandler;
1606 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1608 thread_sigmask(SIG_BLOCK, &blockable_sigset, &old);
1610 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1611 (unsigned int)interrupt_low_level_handlers[signal]));
1612 if (interrupt_low_level_handlers[signal]==0) {
1613 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1614 ARE_SAME_HANDLER(handler, SIG_IGN))
1615 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1616 else if (sigismember(&deferrable_sigset, signal))
1617 sa.sa_sigaction = maybe_now_maybe_later;
1618 else if (!sigaction_nodefer_works &&
1619 !sigismember(&blockable_sigset, signal))
1620 sa.sa_sigaction = unblock_me_trampoline;
1622 sa.sa_sigaction = interrupt_handle_now_handler;
1624 sigcopyset(&sa.sa_mask, &blockable_sigset);
1625 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1626 (sigaction_nodefer_works ? SA_NODEFER : 0);
1627 sigaction(signal, &sa, NULL);
1630 oldhandler = interrupt_handlers[signal];
1631 interrupt_handlers[signal].c = handler;
1633 thread_sigmask(SIG_SETMASK, &old, 0);
1635 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1637 return (unsigned long)oldhandler.lisp;
1639 /* Probably-wrong Win32 hack */
1644 /* This must not go through lisp as it's allowed anytime, even when on
1647 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1649 lose("SIGABRT received.\n");
1653 interrupt_init(void)
1655 #ifndef LISP_FEATURE_WIN32
1657 SHOW("entering interrupt_init()");
1658 see_if_sigaction_nodefer_works();
1659 sigemptyset(&deferrable_sigset);
1660 sigemptyset(&blockable_sigset);
1661 sigemptyset(&gc_sigset);
1662 sigaddset_deferrable(&deferrable_sigset);
1663 sigaddset_blockable(&blockable_sigset);
1664 sigaddset_gc(&gc_sigset);
1666 /* Set up high level handler information. */
1667 for (i = 0; i < NSIG; i++) {
1668 interrupt_handlers[i].c =
1669 /* (The cast here blasts away the distinction between
1670 * SA_SIGACTION-style three-argument handlers and
1671 * signal(..)-style one-argument handlers, which is OK
1672 * because it works to call the 1-argument form where the
1673 * 3-argument form is expected.) */
1674 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1676 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1677 SHOW("returning from interrupt_init()");
1681 #ifndef LISP_FEATURE_WIN32
1683 siginfo_code(siginfo_t *info)
1685 return info->si_code;
1687 os_vm_address_t current_memory_fault_address;
1690 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1692 /* FIXME: This is lossy: if we get another memory fault (eg. from
1693 * another thread) before lisp has read this, we lose the information.
1694 * However, since this is mostly informative, we'll live with that for
1695 * now -- some address is better then no address in this case.
1697 current_memory_fault_address = addr;
1698 /* To allow debugging memory faults in signal handlers and such. */
1699 corruption_warning_and_maybe_lose("Memory fault at %x", addr);
1700 unblock_signals_in_context_and_maybe_warn(context);
1701 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1702 arrange_return_to_lisp_function(context,
1703 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1705 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1711 unhandled_trap_error(os_context_t *context)
1713 lispobj context_sap;
1714 fake_foreign_function_call(context);
1715 unblock_gc_signals();
1716 context_sap = alloc_sap(context);
1717 #ifndef LISP_FEATURE_WIN32
1718 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1720 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1721 lose("UNHANDLED-TRAP-ERROR fell through");
1724 /* Common logic for trapping instructions. How we actually handle each
1725 * case is highly architecture dependent, but the overall shape is
1728 handle_trap(os_context_t *context, int trap)
1731 case trap_PendingInterrupt:
1732 FSHOW((stderr, "/<trap pending interrupt>\n"));
1733 arch_skip_instruction(context);
1734 interrupt_handle_pending(context);
1738 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1739 interrupt_internal_error(context, trap==trap_Cerror);
1741 case trap_Breakpoint:
1742 arch_handle_breakpoint(context);
1744 case trap_FunEndBreakpoint:
1745 arch_handle_fun_end_breakpoint(context);
1747 #ifdef trap_AfterBreakpoint
1748 case trap_AfterBreakpoint:
1749 arch_handle_after_breakpoint(context);
1752 #ifdef trap_SingleStepAround
1753 case trap_SingleStepAround:
1754 case trap_SingleStepBefore:
1755 arch_handle_single_step_trap(context, trap);
1759 fake_foreign_function_call(context);
1760 lose("%%PRIMITIVE HALT called; the party is over.\n");
1762 unhandled_trap_error(context);