2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 static void run_deferred_handler(struct interrupt_data *data, void *v_context);
72 #ifndef LISP_FEATURE_WIN32
73 static void store_signal_data_for_later (struct interrupt_data *data,
74 void *handler, int signal,
76 os_context_t *context);
79 fill_current_sigmask(sigset_t *sigset)
81 /* Get the current sigmask, by blocking the empty set. */
84 thread_sigmask(SIG_BLOCK, &empty, sigset);
88 sigaddset_deferrable(sigset_t *s)
92 sigaddset(s, SIGTERM);
93 sigaddset(s, SIGQUIT);
94 sigaddset(s, SIGPIPE);
95 sigaddset(s, SIGALRM);
97 sigaddset(s, SIGTSTP);
98 sigaddset(s, SIGCHLD);
100 #ifndef LISP_FEATURE_HPUX
101 sigaddset(s, SIGXCPU);
102 sigaddset(s, SIGXFSZ);
104 sigaddset(s, SIGVTALRM);
105 sigaddset(s, SIGPROF);
106 sigaddset(s, SIGWINCH);
108 #ifdef LISP_FEATURE_SB_THREAD
109 sigaddset(s, SIG_INTERRUPT_THREAD);
114 sigdelset_deferrable(sigset_t *s)
116 sigdelset(s, SIGHUP);
117 sigdelset(s, SIGINT);
118 sigdelset(s, SIGQUIT);
119 sigdelset(s, SIGPIPE);
120 sigdelset(s, SIGALRM);
121 sigdelset(s, SIGURG);
122 sigdelset(s, SIGTSTP);
123 sigdelset(s, SIGCHLD);
125 #ifndef LISP_FEATURE_HPUX
126 sigdelset(s, SIGXCPU);
127 sigdelset(s, SIGXFSZ);
129 sigdelset(s, SIGVTALRM);
130 sigdelset(s, SIGPROF);
131 sigdelset(s, SIGWINCH);
133 #ifdef LISP_FEATURE_SB_THREAD
134 sigdelset(s, SIG_INTERRUPT_THREAD);
139 sigaddset_blockable(sigset_t *sigset)
141 sigaddset_deferrable(sigset);
142 sigaddset_gc(sigset);
146 sigaddset_gc(sigset_t *sigset)
148 #ifdef LISP_FEATURE_SB_THREAD
149 sigaddset(sigset,SIG_STOP_FOR_GC);
154 sigdelset_gc(sigset_t *sigset)
156 #ifdef LISP_FEATURE_SB_THREAD
157 sigdelset(sigset,SIG_STOP_FOR_GC);
161 /* initialized in interrupt_init */
162 sigset_t deferrable_sigset;
163 sigset_t blockable_sigset;
168 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
170 #if !defined(LISP_FEATURE_WIN32)
172 for(i = 1; i < NSIG; i++) {
173 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
174 lose("deferrable signal %d blocked\n",i);
180 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
182 #if !defined(LISP_FEATURE_WIN32)
184 for(i = 1; i < NSIG; i++) {
185 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
186 lose("deferrable signal %d not blocked\n",i);
192 check_deferrables_blocked_or_lose(void)
194 #if !defined(LISP_FEATURE_WIN32)
196 fill_current_sigmask(¤t);
197 check_deferrables_blocked_in_sigset_or_lose(¤t);
202 check_blockables_blocked_or_lose(void)
204 #if !defined(LISP_FEATURE_WIN32)
207 fill_current_sigmask(¤t);
208 for(i = 1; i < NSIG; i++) {
209 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
210 lose("blockable signal %d not blocked\n",i);
216 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
218 #if !defined(LISP_FEATURE_WIN32)
220 for(i = 1; i < NSIG; i++) {
221 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
222 lose("gc signal %d blocked\n",i);
228 check_gc_signals_unblocked_or_lose(void)
230 #if !defined(LISP_FEATURE_WIN32)
232 fill_current_sigmask(¤t);
233 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
238 check_interrupts_enabled_or_lose(os_context_t *context)
240 struct thread *thread=arch_os_get_current_thread();
241 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
242 lose("interrupts not enabled\n");
243 if (arch_pseudo_atomic_atomic(context))
244 lose ("in pseudo atomic section\n");
247 /* Are we leaving WITH-GCING and already running with interrupts
248 * enabled, without the protection of *GC-INHIBIT* T and there is gc
249 * (or stop for gc) pending, but we haven't trapped yet? */
251 in_leaving_without_gcing_race_p(struct thread *thread)
253 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
254 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
255 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
256 ((SymbolValue(GC_PENDING,thread) != NIL)
257 #if defined(LISP_FEATURE_SB_THREAD)
258 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
263 /* Check our baroque invariants. */
265 check_interrupt_context_or_lose(os_context_t *context)
267 struct thread *thread = arch_os_get_current_thread();
268 struct interrupt_data *data = thread->interrupt_data;
269 int interrupt_deferred_p = (data->pending_handler != 0);
270 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
271 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
272 * handle_allocation_trap. */
273 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
275 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
276 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
277 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
278 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
279 int in_race_p = in_leaving_without_gcing_race_p(thread);
281 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
282 * section and trapping, a SIG_STOP_FOR_GC would see the next
283 * check fail, for this reason sig_stop_for_gc handler does not
284 * call this function. Plus, there may be interrupt lossage when a
285 * pseudo atomic is interrupted by a deferrable signal and gc is
288 if (interrupt_deferred_p)
289 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
290 lose("Stray deferred interrupt.");
294 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
295 lose("GC_PENDING, but why?.");
296 #if defined(LISP_FEATURE_SB_THREAD)
298 int stop_for_gc_pending =
299 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
300 if (stop_for_gc_pending)
301 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
302 lose("STOP_FOR_GC_PENDING, but why?.");
307 if (interrupt_pending && !interrupt_deferred_p)
308 lose("INTERRUPT_PENDING but not pending handler.");
309 if (interrupt_deferred_p)
310 check_deferrables_blocked_in_sigset_or_lose
311 (os_context_sigmask_addr(context));
313 check_deferrables_unblocked_in_sigset_or_lose
314 (os_context_sigmask_addr(context));
317 /* When we catch an internal error, should we pass it back to Lisp to
318 * be handled in a high-level way? (Early in cold init, the answer is
319 * 'no', because Lisp is still too brain-dead to handle anything.
320 * After sufficient initialization has been completed, the answer
322 boolean internal_errors_enabled = 0;
324 #ifndef LISP_FEATURE_WIN32
325 static void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*);
327 union interrupt_handler interrupt_handlers[NSIG];
330 block_blockable_signals(void)
332 #ifndef LISP_FEATURE_WIN32
333 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
338 block_deferrable_signals(void)
340 #ifndef LISP_FEATURE_WIN32
341 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
346 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
348 #ifndef LISP_FEATURE_WIN32
349 if (interrupt_handler_pending_p())
350 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
351 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
352 sigdelset_deferrable(sigset);
357 unblock_deferrable_signals(void)
359 #ifndef LISP_FEATURE_WIN32
360 if (interrupt_handler_pending_p())
361 lose("unblock_deferrable_signals: losing proposition\n");
362 check_gc_signals_unblocked_or_lose();
363 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
368 unblock_gc_signals(void)
370 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
371 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
376 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
378 #ifndef LISP_FEATURE_WIN32
380 sigset_t *sigset=os_context_sigmask_addr(context);
381 for(i = 1; i < NSIG; i++) {
382 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
385 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
386 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
387 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
392 sigdelset_gc(sigset);
393 if (!interrupt_handler_pending_p()) {
394 unblock_deferrable_signals_in_sigset(sigset);
401 * utility routines used by various signal handlers
405 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
407 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
411 /* Build a fake stack frame or frames */
413 current_control_frame_pointer =
414 (lispobj *)(unsigned long)
415 (*os_context_register_addr(context, reg_CSP));
416 if ((lispobj *)(unsigned long)
417 (*os_context_register_addr(context, reg_CFP))
418 == current_control_frame_pointer) {
419 /* There is a small window during call where the callee's
420 * frame isn't built yet. */
421 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
422 == FUN_POINTER_LOWTAG) {
423 /* We have called, but not built the new frame, so
424 * build it for them. */
425 current_control_frame_pointer[0] =
426 *os_context_register_addr(context, reg_OCFP);
427 current_control_frame_pointer[1] =
428 *os_context_register_addr(context, reg_LRA);
429 current_control_frame_pointer += 8;
430 /* Build our frame on top of it. */
431 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
434 /* We haven't yet called, build our frame as if the
435 * partial frame wasn't there. */
436 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
439 /* We can't tell whether we are still in the caller if it had to
440 * allocate a stack frame due to stack arguments. */
441 /* This observation provoked some past CMUCL maintainer to ask
442 * "Can anything strange happen during return?" */
445 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
448 current_control_stack_pointer = current_control_frame_pointer + 8;
450 current_control_frame_pointer[0] = oldcont;
451 current_control_frame_pointer[1] = NIL;
452 current_control_frame_pointer[2] =
453 (lispobj)(*os_context_register_addr(context, reg_CODE));
457 /* Stores the context for gc to scavange and builds fake stack
460 fake_foreign_function_call(os_context_t *context)
463 struct thread *thread=arch_os_get_current_thread();
465 /* context_index incrementing must not be interrupted */
466 check_blockables_blocked_or_lose();
468 /* Get current Lisp state from context. */
470 dynamic_space_free_pointer =
471 (lispobj *)(unsigned long)
472 (*os_context_register_addr(context, reg_ALLOC));
473 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
474 /* dynamic_space_free_pointer); */
475 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
476 if ((long)dynamic_space_free_pointer & 1) {
477 lose("dead in fake_foreign_function_call, context = %x\n", context);
480 /* why doesnt PPC and SPARC do something like this: */
481 #if defined(LISP_FEATURE_HPPA)
482 if ((long)dynamic_space_free_pointer & 4) {
483 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
488 current_binding_stack_pointer =
489 (lispobj *)(unsigned long)
490 (*os_context_register_addr(context, reg_BSP));
493 build_fake_control_stack_frames(thread,context);
495 /* Do dynamic binding of the active interrupt context index
496 * and save the context in the context array. */
498 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
500 if (context_index >= MAX_INTERRUPTS) {
501 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
504 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
505 make_fixnum(context_index + 1),thread);
507 thread->interrupt_contexts[context_index] = context;
509 #ifdef FOREIGN_FUNCTION_CALL_FLAG
510 foreign_function_call_active = 1;
514 /* blocks all blockable signals. If you are calling from a signal handler,
515 * the usual signal mask will be restored from the context when the handler
516 * finishes. Otherwise, be careful */
518 undo_fake_foreign_function_call(os_context_t *context)
520 struct thread *thread=arch_os_get_current_thread();
521 /* Block all blockable signals. */
522 block_blockable_signals();
524 #ifdef FOREIGN_FUNCTION_CALL_FLAG
525 foreign_function_call_active = 0;
528 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
532 /* Put the dynamic space free pointer back into the context. */
533 *os_context_register_addr(context, reg_ALLOC) =
534 (unsigned long) dynamic_space_free_pointer
535 | (*os_context_register_addr(context, reg_ALLOC)
538 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
540 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
545 /* a handler for the signal caused by execution of a trap opcode
546 * signalling an internal error */
548 interrupt_internal_error(os_context_t *context, boolean continuable)
552 fake_foreign_function_call(context);
554 if (!internal_errors_enabled) {
555 describe_internal_error(context);
556 /* There's no good way to recover from an internal error
557 * before the Lisp error handling mechanism is set up. */
558 lose("internal error too early in init, can't recover\n");
561 /* Allocate the SAP object while the interrupts are still
563 unblock_gc_signals();
564 context_sap = alloc_sap(context);
566 #ifndef LISP_FEATURE_WIN32
567 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
570 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
571 /* Workaround for blocked SIGTRAP. */
574 sigemptyset(&newset);
575 sigaddset(&newset, SIGTRAP);
576 thread_sigmask(SIG_UNBLOCK, &newset, 0);
580 SHOW("in interrupt_internal_error");
582 /* Display some rudimentary debugging information about the
583 * error, so that even if the Lisp error handler gets badly
584 * confused, we have a chance to determine what's going on. */
585 describe_internal_error(context);
587 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
588 continuable ? T : NIL);
590 undo_fake_foreign_function_call(context); /* blocks signals again */
592 arch_skip_instruction(context);
596 interrupt_handler_pending_p(void)
598 struct thread *thread = arch_os_get_current_thread();
599 struct interrupt_data *data = thread->interrupt_data;
600 return (data->pending_handler != 0);
604 interrupt_handle_pending(os_context_t *context)
606 /* There are three ways we can get here. First, if an interrupt
607 * occurs within pseudo-atomic, it will be deferred, and we'll
608 * trap to here at the end of the pseudo-atomic block. Second, if
609 * the GC (in alloc()) decides that a GC is required, it will set
610 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
611 * and alloc() is always called from within pseudo-atomic, and
612 * thus we end up here again. Third, when calling GC-ON or at the
613 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
614 * here if there is a pending GC. Fourth, ahem, at the end of
615 * WITHOUT-INTERRUPTS (bar complications with nesting). */
617 /* Win32 only needs to handle the GC cases (for now?) */
619 struct thread *thread;
621 if (arch_pseudo_atomic_atomic(context)) {
622 lose("Handling pending interrupt in pseduo atomic.");
625 thread = arch_os_get_current_thread();
627 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
629 check_blockables_blocked_or_lose();
631 /* If pseudo_atomic_interrupted is set then the interrupt is going
632 * to be handled now, ergo it's safe to clear it. */
633 arch_clear_pseudo_atomic_interrupted(context);
635 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
636 #ifdef LISP_FEATURE_SB_THREAD
637 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
638 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
639 * the signal handler if it actually stops us. */
640 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
643 /* Test for T and not for != NIL since the value :IN-PROGRESS
644 * is used in SUB-GC as part of the mechanism to supress
646 if (SymbolValue(GC_PENDING,thread) == T) {
647 /* GC_PENDING is cleared in SUB-GC, or if another thread
648 * is doing a gc already we will get a SIG_STOP_FOR_GC and
649 * that will clear it. */
652 check_blockables_blocked_or_lose();
655 #ifndef LISP_FEATURE_WIN32
656 /* we may be here only to do the gc stuff, if interrupts are
657 * enabled run the pending handler */
658 if (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) {
659 struct interrupt_data *data = thread->interrupt_data;
661 /* There may be no pending handler, because it was only a gc
662 * that had to be executed or because pseudo atomic triggered
663 * twice for a single interrupt. For the interested reader,
664 * that may happen if an interrupt hits after the interrupted
665 * flag is cleared but before pseudo-atomic is set and a
666 * pseudo atomic is interrupted in that interrupt. */
667 if (data->pending_handler) {
669 /* If we're here as the result of a pseudo-atomic as opposed
670 * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already
671 * NIL, because maybe_defer_handler sets
672 * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/
673 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
675 /* restore the saved signal mask from the original signal (the
676 * one that interrupted us during the critical section) into the
677 * os_context for the signal we're currently in the handler for.
678 * This should ensure that when we return from the handler the
679 * blocked signals are unblocked */
680 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
682 /* This will break on sparc linux: the deferred handler really wants
683 * to be called with a void_context */
684 run_deferred_handler(data,(void *)context);
691 * the two main signal handlers:
692 * interrupt_handle_now(..)
693 * maybe_now_maybe_later(..)
695 * to which we have added interrupt_handle_now_handler(..). Why?
696 * Well, mostly because the SPARC/Linux platform doesn't quite do
697 * signals the way we want them done. The third argument in the
698 * handler isn't filled in by the kernel properly, so we fix it up
699 * ourselves in the arch_os_get_context(..) function; however, we only
700 * want to do this when we first hit the handler, and not when
701 * interrupt_handle_now(..) is being called from some other handler
702 * (when the fixup will already have been done). -- CSR, 2002-07-23
706 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
708 #ifdef FOREIGN_FUNCTION_CALL_FLAG
709 boolean were_in_lisp;
711 union interrupt_handler handler;
713 check_blockables_blocked_or_lose();
715 #ifndef LISP_FEATURE_WIN32
716 if (sigismember(&deferrable_sigset,signal))
717 check_interrupts_enabled_or_lose(context);
720 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
721 /* Under Linux on some architectures, we appear to have to restore
722 the FPU control word from the context, as after the signal is
723 delivered we appear to have a null FPU control word. */
724 os_restore_fp_control(context);
727 handler = interrupt_handlers[signal];
729 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
733 #ifdef FOREIGN_FUNCTION_CALL_FLAG
734 were_in_lisp = !foreign_function_call_active;
738 fake_foreign_function_call(context);
741 FSHOW_SIGNAL((stderr,
742 "/entering interrupt_handle_now(%d, info, context)\n",
745 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
747 /* This can happen if someone tries to ignore or default one
748 * of the signals we need for runtime support, and the runtime
749 * support decides to pass on it. */
750 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
752 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
753 /* Once we've decided what to do about contexts in a
754 * return-elsewhere world (the original context will no longer
755 * be available; should we copy it or was nobody using it anyway?)
756 * then we should convert this to return-elsewhere */
758 /* CMUCL comment said "Allocate the SAPs while the interrupts
759 * are still disabled.". I (dan, 2003.08.21) assume this is
760 * because we're not in pseudoatomic and allocation shouldn't
761 * be interrupted. In which case it's no longer an issue as
762 * all our allocation from C now goes through a PA wrapper,
763 * but still, doesn't hurt.
765 * Yeah, but non-gencgc platforms don't really wrap allocation
766 * in PA. MG - 2005-08-29 */
768 lispobj info_sap, context_sap;
769 /* Leave deferrable signals blocked, the handler itself will
770 * allow signals again when it sees fit. */
771 unblock_gc_signals();
772 context_sap = alloc_sap(context);
773 info_sap = alloc_sap(info);
775 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
777 funcall3(handler.lisp,
783 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
785 #ifndef LISP_FEATURE_WIN32
786 /* Allow signals again. */
787 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
789 (*handler.c)(signal, info, context);
792 #ifdef FOREIGN_FUNCTION_CALL_FLAG
796 undo_fake_foreign_function_call(context); /* block signals again */
799 FSHOW_SIGNAL((stderr,
800 "/returning from interrupt_handle_now(%d, info, context)\n",
804 /* This is called at the end of a critical section if the indications
805 * are that some signal was deferred during the section. Note that as
806 * far as C or the kernel is concerned we dealt with the signal
807 * already; we're just doing the Lisp-level processing now that we
810 run_deferred_handler(struct interrupt_data *data, void *v_context)
812 /* The pending_handler may enable interrupts and then another
813 * interrupt may hit, overwrite interrupt_data, so reset the
814 * pending handler before calling it. Trust the handler to finish
815 * with the siginfo before enabling interrupts. */
816 void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
818 data->pending_handler=0;
819 (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
822 #ifndef LISP_FEATURE_WIN32
824 maybe_defer_handler(void *handler, struct interrupt_data *data,
825 int signal, siginfo_t *info, os_context_t *context)
827 struct thread *thread=arch_os_get_current_thread();
829 check_blockables_blocked_or_lose();
831 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
832 lose("interrupt already pending\n");
833 check_interrupt_context_or_lose(context);
834 /* If interrupts are disabled then INTERRUPT_PENDING is set and
835 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
836 * atomic section inside a WITHOUT-INTERRUPTS.
838 * Also, if in_leaving_without_gcing_race_p then
839 * interrupt_handle_pending is going to be called soon, so
840 * stashing the signal away is safe.
842 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
843 in_leaving_without_gcing_race_p(thread)) {
844 store_signal_data_for_later(data,handler,signal,info,context);
845 SetSymbolValue(INTERRUPT_PENDING, T,thread);
846 FSHOW_SIGNAL((stderr,
847 "/maybe_defer_handler(%x,%d): deferred\n",
848 (unsigned int)handler,signal));
849 check_interrupt_context_or_lose(context);
852 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
853 * actually use its argument for anything on x86, so this branch
854 * may succeed even when context is null (gencgc alloc()) */
855 if (arch_pseudo_atomic_atomic(context)) {
856 store_signal_data_for_later(data,handler,signal,info,context);
857 arch_set_pseudo_atomic_interrupted(context);
858 FSHOW_SIGNAL((stderr,
859 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
860 (unsigned int)handler,signal));
861 check_interrupt_context_or_lose(context);
864 FSHOW_SIGNAL((stderr,
865 "/maybe_defer_handler(%x,%d): not deferred\n",
866 (unsigned int)handler,signal));
871 store_signal_data_for_later (struct interrupt_data *data, void *handler,
873 siginfo_t *info, os_context_t *context)
875 if (data->pending_handler)
876 lose("tried to overwrite pending interrupt handler %x with %x\n",
877 data->pending_handler, handler);
879 lose("tried to defer null interrupt handler\n");
880 data->pending_handler = handler;
881 data->pending_signal = signal;
883 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
885 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
889 /* the signal mask in the context (from before we were
890 * interrupted) is copied to be restored when
891 * run_deferred_handler happens. Then the usually-blocked
892 * signals are added to the mask in the context so that we are
893 * running with blocked signals when the handler returns */
894 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
895 sigaddset_deferrable(os_context_sigmask_addr(context));
900 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
902 os_context_t *context = arch_os_get_context(&void_context);
903 struct thread *thread = arch_os_get_current_thread();
904 struct interrupt_data *data = thread->interrupt_data;
906 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
907 os_restore_fp_control(context);
910 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
911 interrupt_handle_now(signal, info, context);
915 low_level_interrupt_handle_now(int signal, siginfo_t *info,
916 os_context_t *context)
918 /* No FP control fixage needed, caller has done that. */
919 check_blockables_blocked_or_lose();
920 check_interrupts_enabled_or_lose(context);
921 (*interrupt_low_level_handlers[signal])(signal, info, context);
922 /* No Darwin context fixage needed, caller does that. */
926 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
928 os_context_t *context = arch_os_get_context(&void_context);
929 struct thread *thread = arch_os_get_current_thread();
930 struct interrupt_data *data = thread->interrupt_data;
932 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
933 os_restore_fp_control(context);
936 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
937 signal,info,context))
938 low_level_interrupt_handle_now(signal, info, context);
942 #ifdef LISP_FEATURE_SB_THREAD
945 sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
947 os_context_t *context = arch_os_get_context(&void_context);
949 struct thread *thread=arch_os_get_current_thread();
952 /* Test for GC_INHIBIT _first_, else we'd trap on every single
953 * pseudo atomic until gc is finally allowed. */
954 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
955 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
956 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
958 } else if (arch_pseudo_atomic_atomic(context)) {
959 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
960 arch_set_pseudo_atomic_interrupted(context);
961 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
965 /* Not PA and GC not inhibited -- we can stop now. */
967 /* need the context stored so it can have registers scavenged */
968 fake_foreign_function_call(context);
970 /* Block everything. */
972 thread_sigmask(SIG_BLOCK,&ss,0);
974 /* Not pending anymore. */
975 SetSymbolValue(GC_PENDING,NIL,thread);
976 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
978 if(thread_state(thread)!=STATE_RUNNING) {
979 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
980 fixnum_value(thread->state));
983 set_thread_state(thread,STATE_SUSPENDED);
984 FSHOW_SIGNAL((stderr,"suspended\n"));
986 wait_for_thread_state_change(thread, STATE_SUSPENDED);
987 FSHOW_SIGNAL((stderr,"resumed\n"));
989 if(thread_state(thread)!=STATE_RUNNING) {
990 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
991 fixnum_value(thread_state(thread)));
994 undo_fake_foreign_function_call(context);
1000 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1002 os_context_t *context = arch_os_get_context(&void_context);
1003 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1004 os_restore_fp_control(context);
1005 #ifndef LISP_FEATURE_WIN32
1006 if ((signal == SIGILL) || (signal == SIGBUS)
1007 #ifndef LISP_FEATURE_LINUX
1008 || (signal == SIGEMT)
1011 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1014 interrupt_handle_now(signal, info, context);
1017 /* manipulate the signal context and stack such that when the handler
1018 * returns, it will call function instead of whatever it was doing
1022 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1023 extern int *context_eflags_addr(os_context_t *context);
1026 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1027 extern void post_signal_tramp(void);
1028 extern void call_into_lisp_tramp(void);
1030 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1032 check_gc_signals_unblocked_in_sigset_or_lose
1033 (os_context_sigmask_addr(context));
1034 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1035 void * fun=native_pointer(function);
1036 void *code = &(((struct simple_fun *) fun)->code);
1039 /* Build a stack frame showing `interrupted' so that the
1040 * user's backtrace makes (as much) sense (as usual) */
1042 /* FIXME: what about restoring fp state? */
1043 /* FIXME: what about restoring errno? */
1044 #ifdef LISP_FEATURE_X86
1045 /* Suppose the existence of some function that saved all
1046 * registers, called call_into_lisp, then restored GP registers and
1047 * returned. It would look something like this:
1055 pushl {address of function to call}
1056 call 0x8058db0 <call_into_lisp>
1063 * What we do here is set up the stack that call_into_lisp would
1064 * expect to see if it had been called by this code, and frob the
1065 * signal context so that signal return goes directly to call_into_lisp,
1066 * and when that function (and the lisp function it invoked) returns,
1067 * it returns to the second half of this imaginary function which
1068 * restores all registers and returns to C
1070 * For this to work, the latter part of the imaginary function
1071 * must obviously exist in reality. That would be post_signal_tramp
1074 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1076 #if defined(LISP_FEATURE_DARWIN)
1077 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1079 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1080 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1082 /* 1. os_validate (malloc/mmap) register_save_block
1083 * 2. copy register state into register_save_block
1084 * 3. put a pointer to register_save_block in a register in the context
1085 * 4. set the context's EIP to point to a trampoline which:
1086 * a. builds the fake stack frame from the block
1087 * b. frees the block
1088 * c. calls the function
1091 *register_save_area = *os_context_pc_addr(context);
1092 *(register_save_area + 1) = function;
1093 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1094 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1095 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1096 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1097 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1098 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1099 *(register_save_area + 8) = *context_eflags_addr(context);
1101 *os_context_pc_addr(context) =
1102 (os_context_register_t) call_into_lisp_tramp;
1103 *os_context_register_addr(context,reg_ECX) =
1104 (os_context_register_t) register_save_area;
1107 /* return address for call_into_lisp: */
1108 *(sp-15) = (u32)post_signal_tramp;
1109 *(sp-14) = function; /* args for call_into_lisp : function*/
1110 *(sp-13) = 0; /* arg array */
1111 *(sp-12) = 0; /* no. args */
1112 /* this order matches that used in POPAD */
1113 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1114 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1116 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1117 /* POPAD ignores the value of ESP: */
1119 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1121 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1122 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1123 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1124 *(sp-3)=*context_eflags_addr(context);
1125 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1126 *(sp-1)=*os_context_pc_addr(context);
1130 #elif defined(LISP_FEATURE_X86_64)
1131 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1133 /* return address for call_into_lisp: */
1134 *(sp-18) = (u64)post_signal_tramp;
1136 *(sp-17)=*os_context_register_addr(context,reg_R15);
1137 *(sp-16)=*os_context_register_addr(context,reg_R14);
1138 *(sp-15)=*os_context_register_addr(context,reg_R13);
1139 *(sp-14)=*os_context_register_addr(context,reg_R12);
1140 *(sp-13)=*os_context_register_addr(context,reg_R11);
1141 *(sp-12)=*os_context_register_addr(context,reg_R10);
1142 *(sp-11)=*os_context_register_addr(context,reg_R9);
1143 *(sp-10)=*os_context_register_addr(context,reg_R8);
1144 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1145 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1146 /* skip RBP and RSP */
1147 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1148 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1149 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1150 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1151 *(sp-3)=*context_eflags_addr(context);
1152 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1153 *(sp-1)=*os_context_pc_addr(context);
1155 *os_context_register_addr(context,reg_RDI) =
1156 (os_context_register_t)function; /* function */
1157 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1158 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1160 struct thread *th=arch_os_get_current_thread();
1161 build_fake_control_stack_frames(th,context);
1164 #ifdef LISP_FEATURE_X86
1166 #if !defined(LISP_FEATURE_DARWIN)
1167 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1168 *os_context_register_addr(context,reg_ECX) = 0;
1169 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1171 *os_context_register_addr(context,reg_UESP) =
1172 (os_context_register_t)(sp-15);
1174 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1175 #endif /* __NETBSD__ */
1176 #endif /* LISP_FEATURE_DARWIN */
1178 #elif defined(LISP_FEATURE_X86_64)
1179 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1180 *os_context_register_addr(context,reg_RCX) = 0;
1181 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1182 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1184 /* this much of the calling convention is common to all
1186 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1187 *os_context_register_addr(context,reg_NARGS) = 0;
1188 *os_context_register_addr(context,reg_LIP) =
1189 (os_context_register_t)(unsigned long)code;
1190 *os_context_register_addr(context,reg_CFP) =
1191 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1193 #ifdef ARCH_HAS_NPC_REGISTER
1194 *os_context_npc_addr(context) =
1195 4 + *os_context_pc_addr(context);
1197 #ifdef LISP_FEATURE_SPARC
1198 *os_context_register_addr(context,reg_CODE) =
1199 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1201 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1205 #ifdef LISP_FEATURE_SB_THREAD
1207 /* FIXME: this function can go away when all lisp handlers are invoked
1208 * via arrange_return_to_lisp_function. */
1210 interrupt_thread_handler(int num, siginfo_t *info, void *v_context)
1212 os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
1214 FSHOW_SIGNAL((stderr,"/interrupt_thread_handler\n"));
1215 check_blockables_blocked_or_lose();
1217 /* let the handler enable interrupts again when it sees fit */
1218 sigaddset_deferrable(os_context_sigmask_addr(context));
1219 arrange_return_to_lisp_function(context,
1220 StaticSymbolFunction(RUN_INTERRUPTION));
1225 /* KLUDGE: Theoretically the approach we use for undefined alien
1226 * variables should work for functions as well, but on PPC/Darwin
1227 * we get bus error at bogus addresses instead, hence this workaround,
1228 * that has the added benefit of automatically discriminating between
1229 * functions and variables.
1232 undefined_alien_function(void)
1234 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1238 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1240 struct thread *th=arch_os_get_current_thread();
1242 /* note the os_context hackery here. When the signal handler returns,
1243 * it won't go back to what it was doing ... */
1244 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1245 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1246 /* We hit the end of the control stack: disable guard page
1247 * protection so the error handler has some headroom, protect the
1248 * previous page so that we can catch returns from the guard page
1249 * and restore it. */
1250 corruption_warning_and_maybe_lose("Control stack exhausted");
1251 protect_control_stack_guard_page(0);
1252 protect_control_stack_return_guard_page(1);
1254 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1255 /* For the unfortunate case, when the control stack is
1256 * exhausted in a signal handler. */
1257 unblock_signals_in_context_and_maybe_warn(context);
1259 arrange_return_to_lisp_function
1260 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1263 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1264 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1265 /* We're returning from the guard page: reprotect it, and
1266 * unprotect this one. This works even if we somehow missed
1267 * the return-guard-page, and hit it on our way to new
1268 * exhaustion instead. */
1269 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1270 protect_control_stack_guard_page(1);
1271 protect_control_stack_return_guard_page(0);
1274 else if (addr >= undefined_alien_address &&
1275 addr < undefined_alien_address + os_vm_page_size) {
1276 arrange_return_to_lisp_function
1277 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1284 * noise to install handlers
1287 #ifndef LISP_FEATURE_WIN32
1288 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1289 * they are blocked, in Linux 2.6 the default handler is invoked
1290 * instead that usually coredumps. One might hastily think that adding
1291 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1292 * the whole sa_mask is ignored and instead of not adding the signal
1293 * in question to the mask. That means if it's not blockable the
1294 * signal must be unblocked at the beginning of signal handlers.
1296 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1297 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1298 * will be unblocked in the sigmask during the signal handler. -- RMK
1301 static volatile int sigaction_nodefer_works = -1;
1303 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1304 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1307 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1309 sigset_t empty, current;
1311 sigemptyset(&empty);
1312 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1313 /* There should be exactly two blocked signals: the two we added
1314 * to sa_mask when setting up the handler. NetBSD doesn't block
1315 * the signal we're handling when SA_NODEFER is set; Linux before
1316 * 2.6.13 or so also doesn't block the other signal when
1317 * SA_NODEFER is set. */
1318 for(i = 1; i < NSIG; i++)
1319 if (sigismember(¤t, i) !=
1320 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1321 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1322 sigaction_nodefer_works = 0;
1324 if (sigaction_nodefer_works == -1)
1325 sigaction_nodefer_works = 1;
1329 see_if_sigaction_nodefer_works(void)
1331 struct sigaction sa, old_sa;
1333 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1334 sa.sa_sigaction = sigaction_nodefer_test_handler;
1335 sigemptyset(&sa.sa_mask);
1336 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1337 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1338 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1339 /* Make sure no signals are blocked. */
1342 sigemptyset(&empty);
1343 thread_sigmask(SIG_SETMASK, &empty, 0);
1345 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1346 while (sigaction_nodefer_works == -1);
1347 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1350 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1351 #undef SA_NODEFER_TEST_KILL_SIGNAL
1354 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1358 sigemptyset(&unblock);
1359 sigaddset(&unblock, signal);
1360 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1361 interrupt_handle_now_handler(signal, info, void_context);
1365 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1369 sigemptyset(&unblock);
1370 sigaddset(&unblock, signal);
1371 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1372 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1376 undoably_install_low_level_interrupt_handler (int signal,
1377 interrupt_handler_t handler)
1379 struct sigaction sa;
1381 if (0 > signal || signal >= NSIG) {
1382 lose("bad signal number %d\n", signal);
1385 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1386 sa.sa_sigaction = handler;
1387 else if (sigismember(&deferrable_sigset,signal))
1388 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1389 /* The use of a trampoline appears to break the
1390 arch_os_get_context() workaround for SPARC/Linux. For now,
1391 don't use the trampoline (and so be vulnerable to the problems
1392 that SA_NODEFER is meant to solve. */
1393 #if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX))
1394 else if (!sigaction_nodefer_works &&
1395 !sigismember(&blockable_sigset, signal))
1396 sa.sa_sigaction = low_level_unblock_me_trampoline;
1399 sa.sa_sigaction = handler;
1401 sigcopyset(&sa.sa_mask, &blockable_sigset);
1402 sa.sa_flags = SA_SIGINFO | SA_RESTART
1403 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1404 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1405 if((signal==SIG_MEMORY_FAULT)
1406 #ifdef SIG_INTERRUPT_THREAD
1407 || (signal==SIG_INTERRUPT_THREAD)
1410 sa.sa_flags |= SA_ONSTACK;
1413 sigaction(signal, &sa, NULL);
1414 interrupt_low_level_handlers[signal] =
1415 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1419 /* This is called from Lisp. */
1421 install_handler(int signal, void handler(int, siginfo_t*, void*))
1423 #ifndef LISP_FEATURE_WIN32
1424 struct sigaction sa;
1426 union interrupt_handler oldhandler;
1428 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1431 sigaddset(&new, signal);
1432 thread_sigmask(SIG_BLOCK, &new, &old);
1434 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1435 (unsigned int)interrupt_low_level_handlers[signal]));
1436 if (interrupt_low_level_handlers[signal]==0) {
1437 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1438 ARE_SAME_HANDLER(handler, SIG_IGN))
1439 sa.sa_sigaction = handler;
1440 else if (sigismember(&deferrable_sigset, signal))
1441 sa.sa_sigaction = maybe_now_maybe_later;
1442 else if (!sigaction_nodefer_works &&
1443 !sigismember(&blockable_sigset, signal))
1444 sa.sa_sigaction = unblock_me_trampoline;
1446 sa.sa_sigaction = interrupt_handle_now_handler;
1448 sigcopyset(&sa.sa_mask, &blockable_sigset);
1449 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1450 (sigaction_nodefer_works ? SA_NODEFER : 0);
1451 sigaction(signal, &sa, NULL);
1454 oldhandler = interrupt_handlers[signal];
1455 interrupt_handlers[signal].c = handler;
1457 thread_sigmask(SIG_SETMASK, &old, 0);
1459 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1461 return (unsigned long)oldhandler.lisp;
1463 /* Probably-wrong Win32 hack */
1468 /* This must not go through lisp as it's allowed anytime, even when on
1471 sigabrt_handler(int signal, siginfo_t *info, void *void_context)
1473 lose("SIGABRT received.\n");
1477 interrupt_init(void)
1479 #ifndef LISP_FEATURE_WIN32
1481 SHOW("entering interrupt_init()");
1482 see_if_sigaction_nodefer_works();
1483 sigemptyset(&deferrable_sigset);
1484 sigemptyset(&blockable_sigset);
1485 sigemptyset(&gc_sigset);
1486 sigaddset_deferrable(&deferrable_sigset);
1487 sigaddset_blockable(&blockable_sigset);
1488 sigaddset_gc(&gc_sigset);
1490 /* Set up high level handler information. */
1491 for (i = 0; i < NSIG; i++) {
1492 interrupt_handlers[i].c =
1493 /* (The cast here blasts away the distinction between
1494 * SA_SIGACTION-style three-argument handlers and
1495 * signal(..)-style one-argument handlers, which is OK
1496 * because it works to call the 1-argument form where the
1497 * 3-argument form is expected.) */
1498 (void (*)(int, siginfo_t*, void*))SIG_DFL;
1500 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1501 SHOW("returning from interrupt_init()");
1505 #ifndef LISP_FEATURE_WIN32
1507 siginfo_code(siginfo_t *info)
1509 return info->si_code;
1511 os_vm_address_t current_memory_fault_address;
1514 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1516 /* FIXME: This is lossy: if we get another memory fault (eg. from
1517 * another thread) before lisp has read this, we lose the information.
1518 * However, since this is mostly informative, we'll live with that for
1519 * now -- some address is better then no address in this case.
1521 current_memory_fault_address = addr;
1522 /* To allow debugging memory faults in signal handlers and such. */
1523 corruption_warning_and_maybe_lose("Memory fault");
1524 unblock_signals_in_context_and_maybe_warn(context);
1525 arrange_return_to_lisp_function(context,
1526 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1531 unhandled_trap_error(os_context_t *context)
1533 lispobj context_sap;
1534 fake_foreign_function_call(context);
1535 unblock_gc_signals();
1536 context_sap = alloc_sap(context);
1537 #ifndef LISP_FEATURE_WIN32
1538 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1540 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1541 lose("UNHANDLED-TRAP-ERROR fell through");
1544 /* Common logic for trapping instructions. How we actually handle each
1545 * case is highly architecture dependent, but the overall shape is
1548 handle_trap(os_context_t *context, int trap)
1551 case trap_PendingInterrupt:
1552 FSHOW((stderr, "/<trap pending interrupt>\n"));
1553 arch_skip_instruction(context);
1554 interrupt_handle_pending(context);
1558 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1559 interrupt_internal_error(context, trap==trap_Cerror);
1561 case trap_Breakpoint:
1562 arch_handle_breakpoint(context);
1564 case trap_FunEndBreakpoint:
1565 arch_handle_fun_end_breakpoint(context);
1567 #ifdef trap_AfterBreakpoint
1568 case trap_AfterBreakpoint:
1569 arch_handle_after_breakpoint(context);
1572 #ifdef trap_SingleStepAround
1573 case trap_SingleStepAround:
1574 case trap_SingleStepBefore:
1575 arch_handle_single_step_trap(context, trap);
1579 fake_foreign_function_call(context);
1580 lose("%%PRIMITIVE HALT called; the party is over.\n");
1582 unhandled_trap_error(context);