2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 * This gives us a single point of control (or six) over errno, fp
94 * control word, and fixing up signal context on sparc.
96 * The SPARC/Linux platform doesn't quite do signals the way we want
97 * them done. The third argument in the handler isn't filled in by the
98 * kernel properly, so we fix it up ourselves in the
99 * arch_os_get_context(..) function. -- CSR, 2002-07-23
101 #define SAVE_ERRNO(context,void_context) \
103 int _saved_errno = errno; \
104 RESTORE_FP_CONTROL_WORD(context,void_context); \
107 #define RESTORE_ERRNO \
109 errno = _saved_errno; \
112 static void run_deferred_handler(struct interrupt_data *data,
113 os_context_t *context);
114 #ifndef LISP_FEATURE_WIN32
115 static void store_signal_data_for_later (struct interrupt_data *data,
116 void *handler, int signal,
118 os_context_t *context);
121 fill_current_sigmask(sigset_t *sigset)
123 /* Get the current sigmask, by blocking the empty set. */
126 thread_sigmask(SIG_BLOCK, &empty, sigset);
130 sigaddset_deferrable(sigset_t *s)
132 sigaddset(s, SIGHUP);
133 sigaddset(s, SIGINT);
134 sigaddset(s, SIGTERM);
135 sigaddset(s, SIGQUIT);
136 sigaddset(s, SIGPIPE);
137 sigaddset(s, SIGALRM);
138 sigaddset(s, SIGURG);
139 sigaddset(s, SIGTSTP);
140 sigaddset(s, SIGCHLD);
142 #ifndef LISP_FEATURE_HPUX
143 sigaddset(s, SIGXCPU);
144 sigaddset(s, SIGXFSZ);
146 sigaddset(s, SIGVTALRM);
147 sigaddset(s, SIGPROF);
148 sigaddset(s, SIGWINCH);
152 sigdelset_deferrable(sigset_t *s)
154 sigdelset(s, SIGHUP);
155 sigdelset(s, SIGINT);
156 sigdelset(s, SIGQUIT);
157 sigdelset(s, SIGPIPE);
158 sigdelset(s, SIGALRM);
159 sigdelset(s, SIGURG);
160 sigdelset(s, SIGTSTP);
161 sigdelset(s, SIGCHLD);
163 #ifndef LISP_FEATURE_HPUX
164 sigdelset(s, SIGXCPU);
165 sigdelset(s, SIGXFSZ);
167 sigdelset(s, SIGVTALRM);
168 sigdelset(s, SIGPROF);
169 sigdelset(s, SIGWINCH);
173 sigaddset_blockable(sigset_t *sigset)
175 sigaddset_deferrable(sigset);
176 sigaddset_gc(sigset);
180 sigaddset_gc(sigset_t *sigset)
182 #ifdef LISP_FEATURE_SB_THREAD
183 sigaddset(sigset,SIG_STOP_FOR_GC);
188 sigdelset_gc(sigset_t *sigset)
190 #ifdef LISP_FEATURE_SB_THREAD
191 sigdelset(sigset,SIG_STOP_FOR_GC);
195 /* initialized in interrupt_init */
196 sigset_t deferrable_sigset;
197 sigset_t blockable_sigset;
202 deferrables_blocked_in_sigset_p(sigset_t *sigset)
204 #if !defined(LISP_FEATURE_WIN32)
206 for(i = 1; i < NSIG; i++) {
207 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
215 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
217 #if !defined(LISP_FEATURE_WIN32)
219 for(i = 1; i < NSIG; i++) {
220 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
221 lose("deferrable signal %d blocked\n",i);
227 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
229 #if !defined(LISP_FEATURE_WIN32)
231 for(i = 1; i < NSIG; i++) {
232 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
233 lose("deferrable signal %d not blocked\n",i);
239 check_deferrables_unblocked_or_lose(void)
241 #if !defined(LISP_FEATURE_WIN32)
243 fill_current_sigmask(¤t);
244 check_deferrables_unblocked_in_sigset_or_lose(¤t);
249 check_deferrables_blocked_or_lose(void)
251 #if !defined(LISP_FEATURE_WIN32)
253 fill_current_sigmask(¤t);
254 check_deferrables_blocked_in_sigset_or_lose(¤t);
259 check_blockables_blocked_or_lose(void)
261 #if !defined(LISP_FEATURE_WIN32)
264 fill_current_sigmask(¤t);
265 for(i = 1; i < NSIG; i++) {
266 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
267 lose("blockable signal %d not blocked\n",i);
273 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
275 #if !defined(LISP_FEATURE_WIN32)
277 for(i = 1; i < NSIG; i++) {
278 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
279 lose("gc signal %d blocked\n",i);
285 check_gc_signals_unblocked_or_lose(void)
287 #if !defined(LISP_FEATURE_WIN32)
289 fill_current_sigmask(¤t);
290 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
295 check_interrupts_enabled_or_lose(os_context_t *context)
297 struct thread *thread=arch_os_get_current_thread();
298 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
299 lose("interrupts not enabled\n");
300 if (arch_pseudo_atomic_atomic(context))
301 lose ("in pseudo atomic section\n");
304 /* Save sigset (or the current sigmask if 0) if there is no pending
305 * handler, because that means that deferabbles are already blocked.
306 * The purpose is to avoid losing the pending gc signal if a
307 * deferrable interrupt async unwinds between clearing the pseudo
308 * atomic and trapping to GC.*/
310 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
312 #ifndef LISP_FEATURE_WIN32
313 struct thread *thread = arch_os_get_current_thread();
314 struct interrupt_data *data = thread->interrupt_data;
316 /* Obviously, this function is called when signals may not be
317 * blocked. Let's make sure we are not interrupted. */
318 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
319 #ifndef LISP_FEATURE_SB_THREAD
320 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
322 if (data->gc_blocked_deferrables)
323 lose("gc_blocked_deferrables already true\n");
325 if ((!data->pending_handler) &&
326 (!data->gc_blocked_deferrables)) {
327 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
328 data->gc_blocked_deferrables = 1;
330 /* This is the sigmask of some context. */
331 sigcopyset(&data->pending_mask, sigset);
332 sigaddset_deferrable(sigset);
333 thread_sigmask(SIG_SETMASK,&oldset,0);
336 /* Operating on the current sigmask. Save oldset and
337 * unblock gc signals. In the end, this is equivalent to
338 * blocking the deferrables. */
339 sigcopyset(&data->pending_mask, &oldset);
340 unblock_gc_signals();
344 thread_sigmask(SIG_SETMASK,&oldset,0);
348 /* Are we leaving WITH-GCING and already running with interrupts
349 * enabled, without the protection of *GC-INHIBIT* T and there is gc
350 * (or stop for gc) pending, but we haven't trapped yet? */
352 in_leaving_without_gcing_race_p(struct thread *thread)
354 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
355 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
356 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
357 ((SymbolValue(GC_PENDING,thread) != NIL)
358 #if defined(LISP_FEATURE_SB_THREAD)
359 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
364 /* Check our baroque invariants. */
366 check_interrupt_context_or_lose(os_context_t *context)
368 #ifndef LISP_FEATURE_WIN32
369 struct thread *thread = arch_os_get_current_thread();
370 struct interrupt_data *data = thread->interrupt_data;
371 int interrupt_deferred_p = (data->pending_handler != 0);
372 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
373 sigset_t *sigset = os_context_sigmask_addr(context);
374 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
375 * handle_allocation_trap. */
376 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
377 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
378 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
379 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
380 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
381 int in_race_p = in_leaving_without_gcing_race_p(thread);
382 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
383 * section and trapping, a SIG_STOP_FOR_GC would see the next
384 * check fail, for this reason sig_stop_for_gc handler does not
385 * call this function. */
386 if (interrupt_deferred_p) {
387 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
388 lose("Stray deferred interrupt.\n");
391 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
392 lose("GC_PENDING, but why?\n");
393 #if defined(LISP_FEATURE_SB_THREAD)
395 int stop_for_gc_pending =
396 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
397 if (stop_for_gc_pending)
398 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
399 lose("STOP_FOR_GC_PENDING, but why?\n");
403 if (interrupt_pending && !interrupt_deferred_p)
404 lose("INTERRUPT_PENDING but not pending handler.\n");
405 if ((data->gc_blocked_deferrables) && interrupt_pending)
406 lose("gc_blocked_deferrables and interrupt pending\n.");
407 if (data->gc_blocked_deferrables)
408 check_deferrables_blocked_in_sigset_or_lose(sigset);
409 if (interrupt_pending || interrupt_deferred_p)
410 check_deferrables_blocked_in_sigset_or_lose(sigset);
412 check_deferrables_unblocked_in_sigset_or_lose(sigset);
413 /* If deferrables are unblocked then we are open to signals
414 * that run lisp code. */
415 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
420 /* When we catch an internal error, should we pass it back to Lisp to
421 * be handled in a high-level way? (Early in cold init, the answer is
422 * 'no', because Lisp is still too brain-dead to handle anything.
423 * After sufficient initialization has been completed, the answer
425 boolean internal_errors_enabled = 0;
427 #ifndef LISP_FEATURE_WIN32
429 void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, os_context_t*);
431 union interrupt_handler interrupt_handlers[NSIG];
434 block_blockable_signals(void)
436 #ifndef LISP_FEATURE_WIN32
437 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
442 block_deferrable_signals(void)
444 #ifndef LISP_FEATURE_WIN32
445 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
450 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
452 #ifndef LISP_FEATURE_WIN32
453 if (interrupt_handler_pending_p())
454 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
455 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
456 sigdelset_deferrable(sigset);
461 unblock_deferrable_signals(void)
463 #ifndef LISP_FEATURE_WIN32
464 if (interrupt_handler_pending_p())
465 lose("unblock_deferrable_signals: losing proposition\n");
466 check_gc_signals_unblocked_or_lose();
467 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
472 unblock_gc_signals(void)
474 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
475 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
480 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
482 #ifndef LISP_FEATURE_WIN32
484 sigset_t *sigset=os_context_sigmask_addr(context);
485 for(i = 1; i < NSIG; i++) {
486 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
489 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
490 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
491 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
496 sigdelset_gc(sigset);
497 if (!interrupt_handler_pending_p()) {
498 unblock_deferrable_signals_in_sigset(sigset);
505 * utility routines used by various signal handlers
509 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
511 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
515 /* Build a fake stack frame or frames */
517 current_control_frame_pointer =
518 (lispobj *)(unsigned long)
519 (*os_context_register_addr(context, reg_CSP));
520 if ((lispobj *)(unsigned long)
521 (*os_context_register_addr(context, reg_CFP))
522 == current_control_frame_pointer) {
523 /* There is a small window during call where the callee's
524 * frame isn't built yet. */
525 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
526 == FUN_POINTER_LOWTAG) {
527 /* We have called, but not built the new frame, so
528 * build it for them. */
529 current_control_frame_pointer[0] =
530 *os_context_register_addr(context, reg_OCFP);
531 current_control_frame_pointer[1] =
532 *os_context_register_addr(context, reg_LRA);
533 current_control_frame_pointer += 8;
534 /* Build our frame on top of it. */
535 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
538 /* We haven't yet called, build our frame as if the
539 * partial frame wasn't there. */
540 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
543 /* We can't tell whether we are still in the caller if it had to
544 * allocate a stack frame due to stack arguments. */
545 /* This observation provoked some past CMUCL maintainer to ask
546 * "Can anything strange happen during return?" */
549 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
552 current_control_stack_pointer = current_control_frame_pointer + 8;
554 current_control_frame_pointer[0] = oldcont;
555 current_control_frame_pointer[1] = NIL;
556 current_control_frame_pointer[2] =
557 (lispobj)(*os_context_register_addr(context, reg_CODE));
561 /* Stores the context for gc to scavange and builds fake stack
564 fake_foreign_function_call(os_context_t *context)
567 struct thread *thread=arch_os_get_current_thread();
569 /* context_index incrementing must not be interrupted */
570 check_blockables_blocked_or_lose();
572 /* Get current Lisp state from context. */
574 dynamic_space_free_pointer =
575 (lispobj *)(unsigned long)
576 (*os_context_register_addr(context, reg_ALLOC));
577 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
578 /* dynamic_space_free_pointer); */
579 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
580 if ((long)dynamic_space_free_pointer & 1) {
581 lose("dead in fake_foreign_function_call, context = %x\n", context);
584 /* why doesnt PPC and SPARC do something like this: */
585 #if defined(LISP_FEATURE_HPPA)
586 if ((long)dynamic_space_free_pointer & 4) {
587 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
592 current_binding_stack_pointer =
593 (lispobj *)(unsigned long)
594 (*os_context_register_addr(context, reg_BSP));
597 build_fake_control_stack_frames(thread,context);
599 /* Do dynamic binding of the active interrupt context index
600 * and save the context in the context array. */
602 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
604 if (context_index >= MAX_INTERRUPTS) {
605 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
608 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
609 make_fixnum(context_index + 1),thread);
611 thread->interrupt_contexts[context_index] = context;
613 #ifdef FOREIGN_FUNCTION_CALL_FLAG
614 foreign_function_call_active = 1;
618 /* blocks all blockable signals. If you are calling from a signal handler,
619 * the usual signal mask will be restored from the context when the handler
620 * finishes. Otherwise, be careful */
622 undo_fake_foreign_function_call(os_context_t *context)
624 struct thread *thread=arch_os_get_current_thread();
625 /* Block all blockable signals. */
626 block_blockable_signals();
628 #ifdef FOREIGN_FUNCTION_CALL_FLAG
629 foreign_function_call_active = 0;
632 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
636 /* Put the dynamic space free pointer back into the context. */
637 *os_context_register_addr(context, reg_ALLOC) =
638 (unsigned long) dynamic_space_free_pointer
639 | (*os_context_register_addr(context, reg_ALLOC)
642 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
644 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
649 /* a handler for the signal caused by execution of a trap opcode
650 * signalling an internal error */
652 interrupt_internal_error(os_context_t *context, boolean continuable)
656 fake_foreign_function_call(context);
658 if (!internal_errors_enabled) {
659 describe_internal_error(context);
660 /* There's no good way to recover from an internal error
661 * before the Lisp error handling mechanism is set up. */
662 lose("internal error too early in init, can't recover\n");
665 /* Allocate the SAP object while the interrupts are still
667 unblock_gc_signals();
668 context_sap = alloc_sap(context);
670 #ifndef LISP_FEATURE_WIN32
671 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
674 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
675 /* Workaround for blocked SIGTRAP. */
678 sigemptyset(&newset);
679 sigaddset(&newset, SIGTRAP);
680 thread_sigmask(SIG_UNBLOCK, &newset, 0);
684 SHOW("in interrupt_internal_error");
686 /* Display some rudimentary debugging information about the
687 * error, so that even if the Lisp error handler gets badly
688 * confused, we have a chance to determine what's going on. */
689 describe_internal_error(context);
691 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
692 continuable ? T : NIL);
694 undo_fake_foreign_function_call(context); /* blocks signals again */
696 arch_skip_instruction(context);
700 interrupt_handler_pending_p(void)
702 struct thread *thread = arch_os_get_current_thread();
703 struct interrupt_data *data = thread->interrupt_data;
704 return (data->pending_handler != 0);
708 interrupt_handle_pending(os_context_t *context)
710 /* There are three ways we can get here. First, if an interrupt
711 * occurs within pseudo-atomic, it will be deferred, and we'll
712 * trap to here at the end of the pseudo-atomic block. Second, if
713 * the GC (in alloc()) decides that a GC is required, it will set
714 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
715 * and alloc() is always called from within pseudo-atomic, and
716 * thus we end up here again. Third, when calling GC-ON or at the
717 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
718 * here if there is a pending GC. Fourth, ahem, at the end of
719 * WITHOUT-INTERRUPTS (bar complications with nesting). */
721 /* Win32 only needs to handle the GC cases (for now?) */
723 struct thread *thread = arch_os_get_current_thread();
724 struct interrupt_data *data = thread->interrupt_data;
726 if (arch_pseudo_atomic_atomic(context)) {
727 lose("Handling pending interrupt in pseduo atomic.");
730 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
732 check_blockables_blocked_or_lose();
734 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
735 * handler, then the pending mask was saved and
736 * gc_blocked_deferrables set. Hence, there can be no pending
737 * handler and it's safe to restore the pending mask.
739 * Note, that if gc_blocked_deferrables is false we may still have
740 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
741 * pseudo atomic was interrupt be a deferrable first. */
742 if (data->gc_blocked_deferrables) {
743 if (data->pending_handler)
744 lose("GC blocked deferrables but still got a pending handler.");
745 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
746 lose("GC blocked deferrables while GC is inhibited.");
747 /* Restore the saved signal mask from the original signal (the
748 * one that interrupted us during the critical section) into
749 * the os_context for the signal we're currently in the
750 * handler for. This should ensure that when we return from
751 * the handler the blocked signals are unblocked. */
752 #ifndef LISP_FEATURE_WIN32
753 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
755 data->gc_blocked_deferrables = 0;
758 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
759 void *original_pending_handler = data->pending_handler;
761 #ifdef LISP_FEATURE_SB_THREAD
762 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
763 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
764 * the signal handler if it actually stops us. */
765 arch_clear_pseudo_atomic_interrupted(context);
766 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
769 /* Test for T and not for != NIL since the value :IN-PROGRESS
770 * is used in SUB-GC as part of the mechanism to supress
772 if (SymbolValue(GC_PENDING,thread) == T) {
774 /* Two reasons for doing this. First, if there is a
775 * pending handler we don't want to run. Second, we are
776 * going to clear pseudo atomic interrupted to avoid
777 * spurious trapping on every allocation in SUB_GC and
778 * having a pending handler with interrupts enabled and
779 * without pseudo atomic interrupted breaks an
781 if (data->pending_handler) {
782 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
783 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
786 arch_clear_pseudo_atomic_interrupted(context);
788 /* GC_PENDING is cleared in SUB-GC, or if another thread
789 * is doing a gc already we will get a SIG_STOP_FOR_GC and
790 * that will clear it.
792 * If there is a pending handler or gc was triggerred in a
793 * signal handler then maybe_gc won't run POST_GC and will
794 * return normally. */
795 if (!maybe_gc(context))
796 lose("GC not inhibited but maybe_gc did not GC.");
798 if (data->pending_handler) {
802 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
803 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
804 * GC-PENDING is not NIL then we cannot trap on pseudo
805 * atomic due to GC (see if(GC_PENDING) logic in
806 * cheneygc.c an gengcgc.c), plus there is a outer
807 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
809 lose("Trapping to run pending handler while GC in progress.");
812 check_blockables_blocked_or_lose();
814 /* No GC shall be lost. If SUB_GC triggers another GC then
815 * that should be handled on the spot. */
816 if (SymbolValue(GC_PENDING,thread) != NIL)
817 lose("GC_PENDING after doing gc.");
818 #ifdef LISP_FEATURE_SB_THREAD
819 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
820 lose("STOP_FOR_GC_PENDING after doing gc.");
822 /* Check two things. First, that gc does not clobber a handler
823 * that's already pending. Second, that there is no interrupt
824 * lossage: if original_pending_handler was NULL then even if
825 * an interrupt arrived during GC (POST-GC, really) it was
827 if (original_pending_handler != data->pending_handler)
828 lose("pending handler changed in gc: %x -> %d.",
829 original_pending_handler, data->pending_handler);
832 #ifndef LISP_FEATURE_WIN32
833 /* There may be no pending handler, because it was only a gc that
834 * had to be executed or because Lisp is a bit too eager to call
835 * DO-PENDING-INTERRUPT. */
836 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
837 (data->pending_handler)) {
838 /* No matter how we ended up here, clear both
839 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
840 * because we checked above that there is no GC pending. */
841 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
842 arch_clear_pseudo_atomic_interrupted(context);
843 /* Restore the sigmask in the context. */
844 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
845 run_deferred_handler(data, context);
847 /* It is possible that the end of this function was reached
848 * without never actually doing anything, the tests in Lisp for
849 * when to call receive-pending-interrupt are not exact. */
850 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
856 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
858 #ifdef FOREIGN_FUNCTION_CALL_FLAG
859 boolean were_in_lisp;
861 union interrupt_handler handler;
863 check_blockables_blocked_or_lose();
865 #ifndef LISP_FEATURE_WIN32
866 if (sigismember(&deferrable_sigset,signal))
867 check_interrupts_enabled_or_lose(context);
870 handler = interrupt_handlers[signal];
872 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
876 #ifdef FOREIGN_FUNCTION_CALL_FLAG
877 were_in_lisp = !foreign_function_call_active;
881 fake_foreign_function_call(context);
884 FSHOW_SIGNAL((stderr,
885 "/entering interrupt_handle_now(%d, info, context)\n",
888 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
890 /* This can happen if someone tries to ignore or default one
891 * of the signals we need for runtime support, and the runtime
892 * support decides to pass on it. */
893 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
895 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
896 /* Once we've decided what to do about contexts in a
897 * return-elsewhere world (the original context will no longer
898 * be available; should we copy it or was nobody using it anyway?)
899 * then we should convert this to return-elsewhere */
901 /* CMUCL comment said "Allocate the SAPs while the interrupts
902 * are still disabled.". I (dan, 2003.08.21) assume this is
903 * because we're not in pseudoatomic and allocation shouldn't
904 * be interrupted. In which case it's no longer an issue as
905 * all our allocation from C now goes through a PA wrapper,
906 * but still, doesn't hurt.
908 * Yeah, but non-gencgc platforms don't really wrap allocation
909 * in PA. MG - 2005-08-29 */
911 lispobj info_sap, context_sap;
912 /* Leave deferrable signals blocked, the handler itself will
913 * allow signals again when it sees fit. */
914 unblock_gc_signals();
915 context_sap = alloc_sap(context);
916 info_sap = alloc_sap(info);
918 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
920 funcall3(handler.lisp,
925 /* This cannot happen in sane circumstances. */
927 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
929 #ifndef LISP_FEATURE_WIN32
930 /* Allow signals again. */
931 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
933 (*handler.c)(signal, info, context);
936 #ifdef FOREIGN_FUNCTION_CALL_FLAG
940 undo_fake_foreign_function_call(context); /* block signals again */
943 FSHOW_SIGNAL((stderr,
944 "/returning from interrupt_handle_now(%d, info, context)\n",
948 /* This is called at the end of a critical section if the indications
949 * are that some signal was deferred during the section. Note that as
950 * far as C or the kernel is concerned we dealt with the signal
951 * already; we're just doing the Lisp-level processing now that we
954 run_deferred_handler(struct interrupt_data *data, os_context_t *context)
956 /* The pending_handler may enable interrupts and then another
957 * interrupt may hit, overwrite interrupt_data, so reset the
958 * pending handler before calling it. Trust the handler to finish
959 * with the siginfo before enabling interrupts. */
960 void (*pending_handler) (int, siginfo_t*, os_context_t*) =
961 data->pending_handler;
963 data->pending_handler=0;
964 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
965 (*pending_handler)(data->pending_signal,&(data->pending_info), context);
968 #ifndef LISP_FEATURE_WIN32
970 maybe_defer_handler(void *handler, struct interrupt_data *data,
971 int signal, siginfo_t *info, os_context_t *context)
973 struct thread *thread=arch_os_get_current_thread();
975 check_blockables_blocked_or_lose();
977 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
978 lose("interrupt already pending\n");
979 if (thread->interrupt_data->pending_handler)
980 lose("there is a pending handler already (PA)\n");
981 if (data->gc_blocked_deferrables)
982 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
983 check_interrupt_context_or_lose(context);
984 /* If interrupts are disabled then INTERRUPT_PENDING is set and
985 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
986 * atomic section inside a WITHOUT-INTERRUPTS.
988 * Also, if in_leaving_without_gcing_race_p then
989 * interrupt_handle_pending is going to be called soon, so
990 * stashing the signal away is safe.
992 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
993 in_leaving_without_gcing_race_p(thread)) {
994 store_signal_data_for_later(data,handler,signal,info,context);
995 SetSymbolValue(INTERRUPT_PENDING, T,thread);
996 FSHOW_SIGNAL((stderr,
997 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
998 (unsigned int)handler,signal,
999 in_leaving_without_gcing_race_p(thread)));
1000 check_interrupt_context_or_lose(context);
1003 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1004 * actually use its argument for anything on x86, so this branch
1005 * may succeed even when context is null (gencgc alloc()) */
1006 if (arch_pseudo_atomic_atomic(context)) {
1007 store_signal_data_for_later(data,handler,signal,info,context);
1008 arch_set_pseudo_atomic_interrupted(context);
1009 FSHOW_SIGNAL((stderr,
1010 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1011 (unsigned int)handler,signal));
1012 check_interrupt_context_or_lose(context);
1015 FSHOW_SIGNAL((stderr,
1016 "/maybe_defer_handler(%x,%d): not deferred\n",
1017 (unsigned int)handler,signal));
1022 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1024 siginfo_t *info, os_context_t *context)
1026 if (data->pending_handler)
1027 lose("tried to overwrite pending interrupt handler %x with %x\n",
1028 data->pending_handler, handler);
1030 lose("tried to defer null interrupt handler\n");
1031 data->pending_handler = handler;
1032 data->pending_signal = signal;
1034 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1036 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1040 lose("Null context");
1042 /* the signal mask in the context (from before we were
1043 * interrupted) is copied to be restored when run_deferred_handler
1044 * happens. Then the usually-blocked signals are added to the mask
1045 * in the context so that we are running with blocked signals when
1046 * the handler returns */
1047 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1048 sigaddset_deferrable(os_context_sigmask_addr(context));
1052 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1054 SAVE_ERRNO(context,void_context);
1055 struct thread *thread = arch_os_get_current_thread();
1056 struct interrupt_data *data = thread->interrupt_data;
1058 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1059 interrupt_handle_now(signal, info, context);
1064 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1065 os_context_t *context)
1067 /* No FP control fixage needed, caller has done that. */
1068 check_blockables_blocked_or_lose();
1069 check_interrupts_enabled_or_lose(context);
1070 (*interrupt_low_level_handlers[signal])(signal, info, context);
1071 /* No Darwin context fixage needed, caller does that. */
1075 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1077 SAVE_ERRNO(context,void_context);
1078 struct thread *thread = arch_os_get_current_thread();
1079 struct interrupt_data *data = thread->interrupt_data;
1081 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1082 signal,info,context))
1083 low_level_interrupt_handle_now(signal, info, context);
1088 #ifdef LISP_FEATURE_SB_THREAD
1090 /* This function must not cons, because that may trigger a GC. */
1092 sig_stop_for_gc_handler(int signal, siginfo_t *info, os_context_t *context)
1094 struct thread *thread=arch_os_get_current_thread();
1097 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1098 * pseudo atomic until gc is finally allowed. */
1099 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1100 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1101 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1103 } else if (arch_pseudo_atomic_atomic(context)) {
1104 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1105 arch_set_pseudo_atomic_interrupted(context);
1106 maybe_save_gc_mask_and_block_deferrables
1107 (os_context_sigmask_addr(context));
1108 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1112 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1114 /* Not PA and GC not inhibited -- we can stop now. */
1116 /* need the context stored so it can have registers scavenged */
1117 fake_foreign_function_call(context);
1119 /* Block everything. */
1121 thread_sigmask(SIG_BLOCK,&ss,0);
1123 /* Not pending anymore. */
1124 SetSymbolValue(GC_PENDING,NIL,thread);
1125 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1127 if(thread_state(thread)!=STATE_RUNNING) {
1128 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1129 fixnum_value(thread->state));
1132 set_thread_state(thread,STATE_SUSPENDED);
1133 FSHOW_SIGNAL((stderr,"suspended\n"));
1135 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1136 FSHOW_SIGNAL((stderr,"resumed\n"));
1138 if(thread_state(thread)!=STATE_RUNNING) {
1139 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1140 fixnum_value(thread_state(thread)));
1143 undo_fake_foreign_function_call(context);
1149 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1151 SAVE_ERRNO(context,void_context);
1152 #ifndef LISP_FEATURE_WIN32
1153 if ((signal == SIGILL) || (signal == SIGBUS)
1154 #ifndef LISP_FEATURE_LINUX
1155 || (signal == SIGEMT)
1158 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1160 interrupt_handle_now(signal, info, context);
1164 /* manipulate the signal context and stack such that when the handler
1165 * returns, it will call function instead of whatever it was doing
1169 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1170 extern int *context_eflags_addr(os_context_t *context);
1173 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1174 extern void post_signal_tramp(void);
1175 extern void call_into_lisp_tramp(void);
1177 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1179 #ifndef LISP_FEATURE_WIN32
1180 check_gc_signals_unblocked_in_sigset_or_lose
1181 (os_context_sigmask_addr(context));
1183 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1184 void * fun=native_pointer(function);
1185 void *code = &(((struct simple_fun *) fun)->code);
1188 /* Build a stack frame showing `interrupted' so that the
1189 * user's backtrace makes (as much) sense (as usual) */
1191 /* fp state is saved and restored by call_into_lisp */
1192 /* FIXME: errno is not restored, but since current uses of this
1193 * function only call Lisp code that signals an error, it's not
1194 * much of a problem. In other words, running out of the control
1195 * stack between a syscall and (GET-ERRNO) may clobber errno if
1196 * something fails during signalling or in the handler. But I
1197 * can't see what can go wrong as long as there is no CONTINUE
1198 * like restart on them. */
1199 #ifdef LISP_FEATURE_X86
1200 /* Suppose the existence of some function that saved all
1201 * registers, called call_into_lisp, then restored GP registers and
1202 * returned. It would look something like this:
1210 pushl {address of function to call}
1211 call 0x8058db0 <call_into_lisp>
1218 * What we do here is set up the stack that call_into_lisp would
1219 * expect to see if it had been called by this code, and frob the
1220 * signal context so that signal return goes directly to call_into_lisp,
1221 * and when that function (and the lisp function it invoked) returns,
1222 * it returns to the second half of this imaginary function which
1223 * restores all registers and returns to C
1225 * For this to work, the latter part of the imaginary function
1226 * must obviously exist in reality. That would be post_signal_tramp
1229 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1231 #if defined(LISP_FEATURE_DARWIN)
1232 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1234 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1235 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1237 /* 1. os_validate (malloc/mmap) register_save_block
1238 * 2. copy register state into register_save_block
1239 * 3. put a pointer to register_save_block in a register in the context
1240 * 4. set the context's EIP to point to a trampoline which:
1241 * a. builds the fake stack frame from the block
1242 * b. frees the block
1243 * c. calls the function
1246 *register_save_area = *os_context_pc_addr(context);
1247 *(register_save_area + 1) = function;
1248 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1249 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1250 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1251 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1252 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1253 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1254 *(register_save_area + 8) = *context_eflags_addr(context);
1256 *os_context_pc_addr(context) =
1257 (os_context_register_t) call_into_lisp_tramp;
1258 *os_context_register_addr(context,reg_ECX) =
1259 (os_context_register_t) register_save_area;
1262 /* return address for call_into_lisp: */
1263 *(sp-15) = (u32)post_signal_tramp;
1264 *(sp-14) = function; /* args for call_into_lisp : function*/
1265 *(sp-13) = 0; /* arg array */
1266 *(sp-12) = 0; /* no. args */
1267 /* this order matches that used in POPAD */
1268 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1269 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1271 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1272 /* POPAD ignores the value of ESP: */
1274 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1276 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1277 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1278 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1279 *(sp-3)=*context_eflags_addr(context);
1280 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1281 *(sp-1)=*os_context_pc_addr(context);
1285 #elif defined(LISP_FEATURE_X86_64)
1286 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1288 /* return address for call_into_lisp: */
1289 *(sp-18) = (u64)post_signal_tramp;
1291 *(sp-17)=*os_context_register_addr(context,reg_R15);
1292 *(sp-16)=*os_context_register_addr(context,reg_R14);
1293 *(sp-15)=*os_context_register_addr(context,reg_R13);
1294 *(sp-14)=*os_context_register_addr(context,reg_R12);
1295 *(sp-13)=*os_context_register_addr(context,reg_R11);
1296 *(sp-12)=*os_context_register_addr(context,reg_R10);
1297 *(sp-11)=*os_context_register_addr(context,reg_R9);
1298 *(sp-10)=*os_context_register_addr(context,reg_R8);
1299 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1300 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1301 /* skip RBP and RSP */
1302 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1303 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1304 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1305 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1306 *(sp-3)=*context_eflags_addr(context);
1307 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1308 *(sp-1)=*os_context_pc_addr(context);
1310 *os_context_register_addr(context,reg_RDI) =
1311 (os_context_register_t)function; /* function */
1312 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1313 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1315 struct thread *th=arch_os_get_current_thread();
1316 build_fake_control_stack_frames(th,context);
1319 #ifdef LISP_FEATURE_X86
1321 #if !defined(LISP_FEATURE_DARWIN)
1322 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1323 *os_context_register_addr(context,reg_ECX) = 0;
1324 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1326 *os_context_register_addr(context,reg_UESP) =
1327 (os_context_register_t)(sp-15);
1329 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1330 #endif /* __NETBSD__ */
1331 #endif /* LISP_FEATURE_DARWIN */
1333 #elif defined(LISP_FEATURE_X86_64)
1334 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1335 *os_context_register_addr(context,reg_RCX) = 0;
1336 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1337 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1339 /* this much of the calling convention is common to all
1341 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1342 *os_context_register_addr(context,reg_NARGS) = 0;
1343 *os_context_register_addr(context,reg_LIP) =
1344 (os_context_register_t)(unsigned long)code;
1345 *os_context_register_addr(context,reg_CFP) =
1346 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1348 #ifdef ARCH_HAS_NPC_REGISTER
1349 *os_context_npc_addr(context) =
1350 4 + *os_context_pc_addr(context);
1352 #ifdef LISP_FEATURE_SPARC
1353 *os_context_register_addr(context,reg_CODE) =
1354 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1356 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1360 /* KLUDGE: Theoretically the approach we use for undefined alien
1361 * variables should work for functions as well, but on PPC/Darwin
1362 * we get bus error at bogus addresses instead, hence this workaround,
1363 * that has the added benefit of automatically discriminating between
1364 * functions and variables.
1367 undefined_alien_function(void)
1369 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1373 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1375 struct thread *th=arch_os_get_current_thread();
1377 /* note the os_context hackery here. When the signal handler returns,
1378 * it won't go back to what it was doing ... */
1379 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1380 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1381 /* We hit the end of the control stack: disable guard page
1382 * protection so the error handler has some headroom, protect the
1383 * previous page so that we can catch returns from the guard page
1384 * and restore it. */
1385 corruption_warning_and_maybe_lose("Control stack exhausted");
1386 protect_control_stack_guard_page(0, NULL);
1387 protect_control_stack_return_guard_page(1, NULL);
1389 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1390 /* For the unfortunate case, when the control stack is
1391 * exhausted in a signal handler. */
1392 unblock_signals_in_context_and_maybe_warn(context);
1394 arrange_return_to_lisp_function
1395 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1398 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1399 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1400 /* We're returning from the guard page: reprotect it, and
1401 * unprotect this one. This works even if we somehow missed
1402 * the return-guard-page, and hit it on our way to new
1403 * exhaustion instead. */
1404 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1405 protect_control_stack_guard_page(1, NULL);
1406 protect_control_stack_return_guard_page(0, NULL);
1409 else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
1410 addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1411 corruption_warning_and_maybe_lose("Binding stack exhausted");
1412 protect_binding_stack_guard_page(0, NULL);
1413 protect_binding_stack_return_guard_page(1, NULL);
1415 /* For the unfortunate case, when the binding stack is
1416 * exhausted in a signal handler. */
1417 unblock_signals_in_context_and_maybe_warn(context);
1418 arrange_return_to_lisp_function
1419 (context, StaticSymbolFunction(BINDING_STACK_EXHAUSTED_ERROR));
1422 else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
1423 addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1424 fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
1425 protect_binding_stack_guard_page(1, NULL);
1426 protect_binding_stack_return_guard_page(0, NULL);
1429 else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
1430 addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1431 corruption_warning_and_maybe_lose("Alien stack exhausted");
1432 protect_alien_stack_guard_page(0, NULL);
1433 protect_alien_stack_return_guard_page(1, NULL);
1435 /* For the unfortunate case, when the alien stack is
1436 * exhausted in a signal handler. */
1437 unblock_signals_in_context_and_maybe_warn(context);
1438 arrange_return_to_lisp_function
1439 (context, StaticSymbolFunction(ALIEN_STACK_EXHAUSTED_ERROR));
1442 else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
1443 addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1444 fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
1445 protect_alien_stack_guard_page(1, NULL);
1446 protect_alien_stack_return_guard_page(0, NULL);
1449 else if (addr >= undefined_alien_address &&
1450 addr < undefined_alien_address + os_vm_page_size) {
1451 arrange_return_to_lisp_function
1452 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1459 * noise to install handlers
1462 #ifndef LISP_FEATURE_WIN32
1463 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1464 * they are blocked, in Linux 2.6 the default handler is invoked
1465 * instead that usually coredumps. One might hastily think that adding
1466 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1467 * the whole sa_mask is ignored and instead of not adding the signal
1468 * in question to the mask. That means if it's not blockable the
1469 * signal must be unblocked at the beginning of signal handlers.
1471 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1472 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1473 * will be unblocked in the sigmask during the signal handler. -- RMK
1476 static volatile int sigaction_nodefer_works = -1;
1478 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1479 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1482 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1484 sigset_t empty, current;
1486 sigemptyset(&empty);
1487 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1488 /* There should be exactly two blocked signals: the two we added
1489 * to sa_mask when setting up the handler. NetBSD doesn't block
1490 * the signal we're handling when SA_NODEFER is set; Linux before
1491 * 2.6.13 or so also doesn't block the other signal when
1492 * SA_NODEFER is set. */
1493 for(i = 1; i < NSIG; i++)
1494 if (sigismember(¤t, i) !=
1495 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1496 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1497 sigaction_nodefer_works = 0;
1499 if (sigaction_nodefer_works == -1)
1500 sigaction_nodefer_works = 1;
1504 see_if_sigaction_nodefer_works(void)
1506 struct sigaction sa, old_sa;
1508 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1509 sa.sa_sigaction = sigaction_nodefer_test_handler;
1510 sigemptyset(&sa.sa_mask);
1511 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1512 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1513 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1514 /* Make sure no signals are blocked. */
1517 sigemptyset(&empty);
1518 thread_sigmask(SIG_SETMASK, &empty, 0);
1520 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1521 while (sigaction_nodefer_works == -1);
1522 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1525 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1526 #undef SA_NODEFER_TEST_KILL_SIGNAL
1529 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1531 SAVE_ERRNO(context,void_context);
1534 sigemptyset(&unblock);
1535 sigaddset(&unblock, signal);
1536 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1537 interrupt_handle_now(signal, info, context);
1542 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1544 SAVE_ERRNO(context,void_context);
1547 sigemptyset(&unblock);
1548 sigaddset(&unblock, signal);
1549 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1550 (*interrupt_low_level_handlers[signal])(signal, info, context);
1555 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1557 SAVE_ERRNO(context,void_context);
1558 (*interrupt_low_level_handlers[signal])(signal, info, context);
1563 undoably_install_low_level_interrupt_handler (int signal,
1564 interrupt_handler_t handler)
1566 struct sigaction sa;
1568 if (0 > signal || signal >= NSIG) {
1569 lose("bad signal number %d\n", signal);
1572 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1573 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1574 else if (sigismember(&deferrable_sigset,signal))
1575 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1576 else if (!sigaction_nodefer_works &&
1577 !sigismember(&blockable_sigset, signal))
1578 sa.sa_sigaction = low_level_unblock_me_trampoline;
1580 sa.sa_sigaction = low_level_handle_now_handler;
1582 sigcopyset(&sa.sa_mask, &blockable_sigset);
1583 sa.sa_flags = SA_SIGINFO | SA_RESTART
1584 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1585 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1586 if((signal==SIG_MEMORY_FAULT))
1587 sa.sa_flags |= SA_ONSTACK;
1590 sigaction(signal, &sa, NULL);
1591 interrupt_low_level_handlers[signal] =
1592 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1596 /* This is called from Lisp. */
1598 install_handler(int signal, void handler(int, siginfo_t*, os_context_t*))
1600 #ifndef LISP_FEATURE_WIN32
1601 struct sigaction sa;
1603 union interrupt_handler oldhandler;
1605 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1608 sigaddset(&new, signal);
1609 thread_sigmask(SIG_BLOCK, &new, &old);
1611 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1612 (unsigned int)interrupt_low_level_handlers[signal]));
1613 if (interrupt_low_level_handlers[signal]==0) {
1614 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1615 ARE_SAME_HANDLER(handler, SIG_IGN))
1616 sa.sa_sigaction = (void (*)(int, siginfo_t*, void*))handler;
1617 else if (sigismember(&deferrable_sigset, signal))
1618 sa.sa_sigaction = maybe_now_maybe_later;
1619 else if (!sigaction_nodefer_works &&
1620 !sigismember(&blockable_sigset, signal))
1621 sa.sa_sigaction = unblock_me_trampoline;
1623 sa.sa_sigaction = interrupt_handle_now_handler;
1625 sigcopyset(&sa.sa_mask, &blockable_sigset);
1626 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1627 (sigaction_nodefer_works ? SA_NODEFER : 0);
1628 sigaction(signal, &sa, NULL);
1631 oldhandler = interrupt_handlers[signal];
1632 interrupt_handlers[signal].c = handler;
1634 thread_sigmask(SIG_SETMASK, &old, 0);
1636 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1638 return (unsigned long)oldhandler.lisp;
1640 /* Probably-wrong Win32 hack */
1645 /* This must not go through lisp as it's allowed anytime, even when on
1648 sigabrt_handler(int signal, siginfo_t *info, os_context_t *context)
1650 lose("SIGABRT received.\n");
1654 interrupt_init(void)
1656 #ifndef LISP_FEATURE_WIN32
1658 SHOW("entering interrupt_init()");
1659 see_if_sigaction_nodefer_works();
1660 sigemptyset(&deferrable_sigset);
1661 sigemptyset(&blockable_sigset);
1662 sigemptyset(&gc_sigset);
1663 sigaddset_deferrable(&deferrable_sigset);
1664 sigaddset_blockable(&blockable_sigset);
1665 sigaddset_gc(&gc_sigset);
1667 /* Set up high level handler information. */
1668 for (i = 0; i < NSIG; i++) {
1669 interrupt_handlers[i].c =
1670 /* (The cast here blasts away the distinction between
1671 * SA_SIGACTION-style three-argument handlers and
1672 * signal(..)-style one-argument handlers, which is OK
1673 * because it works to call the 1-argument form where the
1674 * 3-argument form is expected.) */
1675 (void (*)(int, siginfo_t*, os_context_t*))SIG_DFL;
1677 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1678 SHOW("returning from interrupt_init()");
1682 #ifndef LISP_FEATURE_WIN32
1684 siginfo_code(siginfo_t *info)
1686 return info->si_code;
1688 os_vm_address_t current_memory_fault_address;
1691 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1693 /* FIXME: This is lossy: if we get another memory fault (eg. from
1694 * another thread) before lisp has read this, we lose the information.
1695 * However, since this is mostly informative, we'll live with that for
1696 * now -- some address is better then no address in this case.
1698 current_memory_fault_address = addr;
1699 /* To allow debugging memory faults in signal handlers and such. */
1700 corruption_warning_and_maybe_lose("Memory fault at %x", addr);
1701 unblock_signals_in_context_and_maybe_warn(context);
1702 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1703 arrange_return_to_lisp_function(context,
1704 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1706 funcall0(StaticSymbolFunction(MEMORY_FAULT_ERROR));
1712 unhandled_trap_error(os_context_t *context)
1714 lispobj context_sap;
1715 fake_foreign_function_call(context);
1716 unblock_gc_signals();
1717 context_sap = alloc_sap(context);
1718 #ifndef LISP_FEATURE_WIN32
1719 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1721 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1722 lose("UNHANDLED-TRAP-ERROR fell through");
1725 /* Common logic for trapping instructions. How we actually handle each
1726 * case is highly architecture dependent, but the overall shape is
1729 handle_trap(os_context_t *context, int trap)
1732 case trap_PendingInterrupt:
1733 FSHOW((stderr, "/<trap pending interrupt>\n"));
1734 arch_skip_instruction(context);
1735 interrupt_handle_pending(context);
1739 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1740 interrupt_internal_error(context, trap==trap_Cerror);
1742 case trap_Breakpoint:
1743 arch_handle_breakpoint(context);
1745 case trap_FunEndBreakpoint:
1746 arch_handle_fun_end_breakpoint(context);
1748 #ifdef trap_AfterBreakpoint
1749 case trap_AfterBreakpoint:
1750 arch_handle_after_breakpoint(context);
1753 #ifdef trap_SingleStepAround
1754 case trap_SingleStepAround:
1755 case trap_SingleStepBefore:
1756 arch_handle_single_step_trap(context, trap);
1760 fake_foreign_function_call(context);
1761 lose("%%PRIMITIVE HALT called; the party is over.\n");
1763 unhandled_trap_error(context);