2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 static void run_deferred_handler(struct interrupt_data *data, void *v_context);
72 #ifndef LISP_FEATURE_WIN32
73 static void store_signal_data_for_later (struct interrupt_data *data,
74 void *handler, int signal,
76 os_context_t *context);
79 fill_current_sigmask(sigset_t *sigset)
81 /* Get the current sigmask, by blocking the empty set. */
84 thread_sigmask(SIG_BLOCK, &empty, sigset);
88 sigaddset_deferrable(sigset_t *s)
92 sigaddset(s, SIGTERM);
93 sigaddset(s, SIGQUIT);
94 sigaddset(s, SIGPIPE);
95 sigaddset(s, SIGALRM);
97 sigaddset(s, SIGTSTP);
98 sigaddset(s, SIGCHLD);
100 #ifndef LISP_FEATURE_HPUX
101 sigaddset(s, SIGXCPU);
102 sigaddset(s, SIGXFSZ);
104 sigaddset(s, SIGVTALRM);
105 sigaddset(s, SIGPROF);
106 sigaddset(s, SIGWINCH);
110 sigdelset_deferrable(sigset_t *s)
112 sigdelset(s, SIGHUP);
113 sigdelset(s, SIGINT);
114 sigdelset(s, SIGQUIT);
115 sigdelset(s, SIGPIPE);
116 sigdelset(s, SIGALRM);
117 sigdelset(s, SIGURG);
118 sigdelset(s, SIGTSTP);
119 sigdelset(s, SIGCHLD);
121 #ifndef LISP_FEATURE_HPUX
122 sigdelset(s, SIGXCPU);
123 sigdelset(s, SIGXFSZ);
125 sigdelset(s, SIGVTALRM);
126 sigdelset(s, SIGPROF);
127 sigdelset(s, SIGWINCH);
131 sigaddset_blockable(sigset_t *sigset)
133 sigaddset_deferrable(sigset);
134 sigaddset_gc(sigset);
138 sigaddset_gc(sigset_t *sigset)
140 #ifdef LISP_FEATURE_SB_THREAD
141 sigaddset(sigset,SIG_STOP_FOR_GC);
146 sigdelset_gc(sigset_t *sigset)
148 #ifdef LISP_FEATURE_SB_THREAD
149 sigdelset(sigset,SIG_STOP_FOR_GC);
153 /* initialized in interrupt_init */
154 sigset_t deferrable_sigset;
155 sigset_t blockable_sigset;
160 deferrables_blocked_in_sigset_p(sigset_t *sigset)
162 #if !defined(LISP_FEATURE_WIN32)
164 for(i = 1; i < NSIG; i++) {
165 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
173 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
175 #if !defined(LISP_FEATURE_WIN32)
177 for(i = 1; i < NSIG; i++) {
178 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
179 lose("deferrable signal %d blocked\n",i);
185 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
187 #if !defined(LISP_FEATURE_WIN32)
189 for(i = 1; i < NSIG; i++) {
190 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
191 lose("deferrable signal %d not blocked\n",i);
197 check_deferrables_unblocked_or_lose(void)
199 #if !defined(LISP_FEATURE_WIN32)
201 fill_current_sigmask(¤t);
202 check_deferrables_unblocked_in_sigset_or_lose(¤t);
207 check_deferrables_blocked_or_lose(void)
209 #if !defined(LISP_FEATURE_WIN32)
211 fill_current_sigmask(¤t);
212 check_deferrables_blocked_in_sigset_or_lose(¤t);
217 check_blockables_blocked_or_lose(void)
219 #if !defined(LISP_FEATURE_WIN32)
222 fill_current_sigmask(¤t);
223 for(i = 1; i < NSIG; i++) {
224 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
225 lose("blockable signal %d not blocked\n",i);
231 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
233 #if !defined(LISP_FEATURE_WIN32)
235 for(i = 1; i < NSIG; i++) {
236 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
237 lose("gc signal %d blocked\n",i);
243 check_gc_signals_unblocked_or_lose(void)
245 #if !defined(LISP_FEATURE_WIN32)
247 fill_current_sigmask(¤t);
248 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
253 check_interrupts_enabled_or_lose(os_context_t *context)
255 struct thread *thread=arch_os_get_current_thread();
256 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
257 lose("interrupts not enabled\n");
258 if (arch_pseudo_atomic_atomic(context))
259 lose ("in pseudo atomic section\n");
262 /* Save sigset (or the current sigmask if 0) if there is no pending
263 * handler, because that means that deferabbles are already blocked.
264 * The purpose is to avoid losing the pending gc signal if a
265 * deferrable interrupt async unwinds between clearing the pseudo
266 * atomic and trapping to GC.*/
268 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
270 struct thread *thread = arch_os_get_current_thread();
271 struct interrupt_data *data = thread->interrupt_data;
273 /* Obviously, this function is called when signals may not be
274 * blocked. Let's make sure we are not interrupted. */
275 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
276 #ifndef LISP_FEATURE_SB_THREAD
277 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
279 if (data->gc_blocked_deferrables)
280 lose("gc_blocked_deferrables already true\n");
282 if ((!data->pending_handler) &&
283 (!data->gc_blocked_deferrables)) {
284 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
285 data->gc_blocked_deferrables = 1;
287 /* This is the sigmask of some context. */
288 sigcopyset(&data->pending_mask, sigset);
289 sigaddset_deferrable(sigset);
290 thread_sigmask(SIG_SETMASK,&oldset,0);
293 /* Operating on the current sigmask. Save oldset and
294 * unblock gc signals. In the end, this is equivalent to
295 * blocking the deferrables. */
296 sigcopyset(&data->pending_mask, &oldset);
297 unblock_gc_signals();
301 thread_sigmask(SIG_SETMASK,&oldset,0);
304 /* Are we leaving WITH-GCING and already running with interrupts
305 * enabled, without the protection of *GC-INHIBIT* T and there is gc
306 * (or stop for gc) pending, but we haven't trapped yet? */
308 in_leaving_without_gcing_race_p(struct thread *thread)
310 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
311 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
312 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
313 ((SymbolValue(GC_PENDING,thread) != NIL)
314 #if defined(LISP_FEATURE_SB_THREAD)
315 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
320 /* Check our baroque invariants. */
322 check_interrupt_context_or_lose(os_context_t *context)
324 struct thread *thread = arch_os_get_current_thread();
325 struct interrupt_data *data = thread->interrupt_data;
326 int interrupt_deferred_p = (data->pending_handler != 0);
327 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
328 sigset_t *sigset = os_context_sigmask_addr(context);
329 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
330 * handle_allocation_trap. */
331 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
332 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
333 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
334 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
335 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
336 int in_race_p = in_leaving_without_gcing_race_p(thread);
337 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
338 * section and trapping, a SIG_STOP_FOR_GC would see the next
339 * check fail, for this reason sig_stop_for_gc handler does not
340 * call this function. */
341 if (interrupt_deferred_p) {
342 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
343 lose("Stray deferred interrupt.\n");
346 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
347 lose("GC_PENDING, but why?\n");
348 #if defined(LISP_FEATURE_SB_THREAD)
350 int stop_for_gc_pending =
351 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
352 if (stop_for_gc_pending)
353 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
354 lose("STOP_FOR_GC_PENDING, but why?\n");
358 if (interrupt_pending && !interrupt_deferred_p)
359 lose("INTERRUPT_PENDING but not pending handler.\n");
360 if ((data->gc_blocked_deferrables) && interrupt_pending)
361 lose("gc_blocked_deferrables and interrupt pending\n.");
362 if (data->gc_blocked_deferrables)
363 check_deferrables_blocked_in_sigset_or_lose(sigset);
364 if (interrupt_pending || interrupt_deferred_p)
365 check_deferrables_blocked_in_sigset_or_lose(sigset);
367 check_deferrables_unblocked_in_sigset_or_lose(sigset);
368 /* If deferrables are unblocked then we are open to signals
369 * that run lisp code. */
370 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
374 /* When we catch an internal error, should we pass it back to Lisp to
375 * be handled in a high-level way? (Early in cold init, the answer is
376 * 'no', because Lisp is still too brain-dead to handle anything.
377 * After sufficient initialization has been completed, the answer
379 boolean internal_errors_enabled = 0;
381 #ifndef LISP_FEATURE_WIN32
382 static void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*);
384 union interrupt_handler interrupt_handlers[NSIG];
387 block_blockable_signals(void)
389 #ifndef LISP_FEATURE_WIN32
390 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
395 block_deferrable_signals(void)
397 #ifndef LISP_FEATURE_WIN32
398 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
403 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
405 #ifndef LISP_FEATURE_WIN32
406 if (interrupt_handler_pending_p())
407 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
408 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
409 sigdelset_deferrable(sigset);
414 unblock_deferrable_signals(void)
416 #ifndef LISP_FEATURE_WIN32
417 if (interrupt_handler_pending_p())
418 lose("unblock_deferrable_signals: losing proposition\n");
419 check_gc_signals_unblocked_or_lose();
420 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
425 unblock_gc_signals(void)
427 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
428 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
433 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
435 #ifndef LISP_FEATURE_WIN32
437 sigset_t *sigset=os_context_sigmask_addr(context);
438 for(i = 1; i < NSIG; i++) {
439 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
442 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
443 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
444 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
449 sigdelset_gc(sigset);
450 if (!interrupt_handler_pending_p()) {
451 unblock_deferrable_signals_in_sigset(sigset);
458 * utility routines used by various signal handlers
462 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
464 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
468 /* Build a fake stack frame or frames */
470 current_control_frame_pointer =
471 (lispobj *)(unsigned long)
472 (*os_context_register_addr(context, reg_CSP));
473 if ((lispobj *)(unsigned long)
474 (*os_context_register_addr(context, reg_CFP))
475 == current_control_frame_pointer) {
476 /* There is a small window during call where the callee's
477 * frame isn't built yet. */
478 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
479 == FUN_POINTER_LOWTAG) {
480 /* We have called, but not built the new frame, so
481 * build it for them. */
482 current_control_frame_pointer[0] =
483 *os_context_register_addr(context, reg_OCFP);
484 current_control_frame_pointer[1] =
485 *os_context_register_addr(context, reg_LRA);
486 current_control_frame_pointer += 8;
487 /* Build our frame on top of it. */
488 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
491 /* We haven't yet called, build our frame as if the
492 * partial frame wasn't there. */
493 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
496 /* We can't tell whether we are still in the caller if it had to
497 * allocate a stack frame due to stack arguments. */
498 /* This observation provoked some past CMUCL maintainer to ask
499 * "Can anything strange happen during return?" */
502 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
505 current_control_stack_pointer = current_control_frame_pointer + 8;
507 current_control_frame_pointer[0] = oldcont;
508 current_control_frame_pointer[1] = NIL;
509 current_control_frame_pointer[2] =
510 (lispobj)(*os_context_register_addr(context, reg_CODE));
514 /* Stores the context for gc to scavange and builds fake stack
517 fake_foreign_function_call(os_context_t *context)
520 struct thread *thread=arch_os_get_current_thread();
522 /* context_index incrementing must not be interrupted */
523 check_blockables_blocked_or_lose();
525 /* Get current Lisp state from context. */
527 dynamic_space_free_pointer =
528 (lispobj *)(unsigned long)
529 (*os_context_register_addr(context, reg_ALLOC));
530 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
531 /* dynamic_space_free_pointer); */
532 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
533 if ((long)dynamic_space_free_pointer & 1) {
534 lose("dead in fake_foreign_function_call, context = %x\n", context);
537 /* why doesnt PPC and SPARC do something like this: */
538 #if defined(LISP_FEATURE_HPPA)
539 if ((long)dynamic_space_free_pointer & 4) {
540 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
545 current_binding_stack_pointer =
546 (lispobj *)(unsigned long)
547 (*os_context_register_addr(context, reg_BSP));
550 build_fake_control_stack_frames(thread,context);
552 /* Do dynamic binding of the active interrupt context index
553 * and save the context in the context array. */
555 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
557 if (context_index >= MAX_INTERRUPTS) {
558 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
561 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
562 make_fixnum(context_index + 1),thread);
564 thread->interrupt_contexts[context_index] = context;
566 #ifdef FOREIGN_FUNCTION_CALL_FLAG
567 foreign_function_call_active = 1;
571 /* blocks all blockable signals. If you are calling from a signal handler,
572 * the usual signal mask will be restored from the context when the handler
573 * finishes. Otherwise, be careful */
575 undo_fake_foreign_function_call(os_context_t *context)
577 struct thread *thread=arch_os_get_current_thread();
578 /* Block all blockable signals. */
579 block_blockable_signals();
581 #ifdef FOREIGN_FUNCTION_CALL_FLAG
582 foreign_function_call_active = 0;
585 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
589 /* Put the dynamic space free pointer back into the context. */
590 *os_context_register_addr(context, reg_ALLOC) =
591 (unsigned long) dynamic_space_free_pointer
592 | (*os_context_register_addr(context, reg_ALLOC)
595 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
597 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
602 /* a handler for the signal caused by execution of a trap opcode
603 * signalling an internal error */
605 interrupt_internal_error(os_context_t *context, boolean continuable)
609 fake_foreign_function_call(context);
611 if (!internal_errors_enabled) {
612 describe_internal_error(context);
613 /* There's no good way to recover from an internal error
614 * before the Lisp error handling mechanism is set up. */
615 lose("internal error too early in init, can't recover\n");
618 /* Allocate the SAP object while the interrupts are still
620 unblock_gc_signals();
621 context_sap = alloc_sap(context);
623 #ifndef LISP_FEATURE_WIN32
624 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
627 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
628 /* Workaround for blocked SIGTRAP. */
631 sigemptyset(&newset);
632 sigaddset(&newset, SIGTRAP);
633 thread_sigmask(SIG_UNBLOCK, &newset, 0);
637 SHOW("in interrupt_internal_error");
639 /* Display some rudimentary debugging information about the
640 * error, so that even if the Lisp error handler gets badly
641 * confused, we have a chance to determine what's going on. */
642 describe_internal_error(context);
644 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
645 continuable ? T : NIL);
647 undo_fake_foreign_function_call(context); /* blocks signals again */
649 arch_skip_instruction(context);
653 interrupt_handler_pending_p(void)
655 struct thread *thread = arch_os_get_current_thread();
656 struct interrupt_data *data = thread->interrupt_data;
657 return (data->pending_handler != 0);
661 interrupt_handle_pending(os_context_t *context)
663 /* There are three ways we can get here. First, if an interrupt
664 * occurs within pseudo-atomic, it will be deferred, and we'll
665 * trap to here at the end of the pseudo-atomic block. Second, if
666 * the GC (in alloc()) decides that a GC is required, it will set
667 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
668 * and alloc() is always called from within pseudo-atomic, and
669 * thus we end up here again. Third, when calling GC-ON or at the
670 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
671 * here if there is a pending GC. Fourth, ahem, at the end of
672 * WITHOUT-INTERRUPTS (bar complications with nesting). */
674 /* Win32 only needs to handle the GC cases (for now?) */
676 struct thread *thread = arch_os_get_current_thread();
677 struct interrupt_data *data = thread->interrupt_data;
679 if (arch_pseudo_atomic_atomic(context)) {
680 lose("Handling pending interrupt in pseduo atomic.");
683 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
685 check_blockables_blocked_or_lose();
687 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
688 * handler, then the pending mask was saved and
689 * gc_blocked_deferrables set. Hence, there can be no pending
690 * handler and it's safe to restore the pending mask.
692 * Note, that if gc_blocked_deferrables is false we may still have
693 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
694 * pseudo atomic was interrupt be a deferrable first. */
695 if (data->gc_blocked_deferrables) {
696 if (data->pending_handler)
697 lose("GC blocked deferrables but still got a pending handler.");
698 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
699 lose("GC blocked deferrables while GC is inhibited.");
700 /* Restore the saved signal mask from the original signal (the
701 * one that interrupted us during the critical section) into
702 * the os_context for the signal we're currently in the
703 * handler for. This should ensure that when we return from
704 * the handler the blocked signals are unblocked. */
705 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
706 data->gc_blocked_deferrables = 0;
709 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
710 void *original_pending_handler = data->pending_handler;
712 #ifdef LISP_FEATURE_SB_THREAD
713 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
714 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
715 * the signal handler if it actually stops us. */
716 arch_clear_pseudo_atomic_interrupted(context);
717 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
720 /* Test for T and not for != NIL since the value :IN-PROGRESS
721 * is used in SUB-GC as part of the mechanism to supress
723 if (SymbolValue(GC_PENDING,thread) == T) {
725 /* Two reasons for doing this. First, if there is a
726 * pending handler we don't want to run. Second, we are
727 * going to clear pseudo atomic interrupted to avoid
728 * spurious trapping on every allocation in SUB_GC and
729 * having a pending handler with interrupts enabled and
730 * without pseudo atomic interrupted breaks an
732 if (data->pending_handler) {
733 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
734 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
737 arch_clear_pseudo_atomic_interrupted(context);
739 /* GC_PENDING is cleared in SUB-GC, or if another thread
740 * is doing a gc already we will get a SIG_STOP_FOR_GC and
741 * that will clear it.
743 * If there is a pending handler or gc was triggerred in a
744 * signal handler then maybe_gc won't run POST_GC and will
745 * return normally. */
746 if (!maybe_gc(context))
747 lose("GC not inhibited but maybe_gc did not GC.");
749 if (data->pending_handler) {
753 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
754 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
755 * GC-PENDING is not NIL then we cannot trap on pseudo
756 * atomic due to GC (see if(GC_PENDING) logic in
757 * cheneygc.c an gengcgc.c), plus there is a outer
758 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
760 lose("Trapping to run pending handler while GC in progress.");
763 check_blockables_blocked_or_lose();
765 /* No GC shall be lost. If SUB_GC triggers another GC then
766 * that should be handled on the spot. */
767 if (SymbolValue(GC_PENDING,thread) != NIL)
768 lose("GC_PENDING after doing gc.");
769 #ifdef LISP_FEATURE_SB_THREAD
770 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
771 lose("STOP_FOR_GC_PENDING after doing gc.");
773 /* Check two things. First, that gc does not clobber a handler
774 * that's already pending. Second, that there is no interrupt
775 * lossage: if original_pending_handler was NULL then even if
776 * an interrupt arrived during GC (POST-GC, really) it was
778 if (original_pending_handler != data->pending_handler)
779 lose("pending handler changed in gc: %x -> %d.",
780 original_pending_handler, data->pending_handler);
783 #ifndef LISP_FEATURE_WIN32
784 /* There may be no pending handler, because it was only a gc that
785 * had to be executed or because Lisp is a bit too eager to call
786 * DO-PENDING-INTERRUPT. */
787 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
788 (data->pending_handler)) {
789 /* No matter how we ended up here, clear both
790 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
791 * because we checked above that there is no GC pending. */
792 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
793 arch_clear_pseudo_atomic_interrupted(context);
794 /* Restore the sigmask in the context. */
795 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
796 /* This will break on sparc linux: the deferred handler really
797 * wants to be called with a void_context */
798 run_deferred_handler(data,(void *)context);
800 /* It is possible that the end of this function was reached
801 * without never actually doing anything, the tests in Lisp for
802 * when to call receive-pending-interrupt are not exact. */
803 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
808 * the two main signal handlers:
809 * interrupt_handle_now(..)
810 * maybe_now_maybe_later(..)
812 * to which we have added interrupt_handle_now_handler(..). Why?
813 * Well, mostly because the SPARC/Linux platform doesn't quite do
814 * signals the way we want them done. The third argument in the
815 * handler isn't filled in by the kernel properly, so we fix it up
816 * ourselves in the arch_os_get_context(..) function; however, we only
817 * want to do this when we first hit the handler, and not when
818 * interrupt_handle_now(..) is being called from some other handler
819 * (when the fixup will already have been done). -- CSR, 2002-07-23
823 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
825 #ifdef FOREIGN_FUNCTION_CALL_FLAG
826 boolean were_in_lisp;
828 union interrupt_handler handler;
830 check_blockables_blocked_or_lose();
832 #ifndef LISP_FEATURE_WIN32
833 if (sigismember(&deferrable_sigset,signal))
834 check_interrupts_enabled_or_lose(context);
837 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
838 /* Under Linux on some architectures, we appear to have to restore
839 the FPU control word from the context, as after the signal is
840 delivered we appear to have a null FPU control word. */
841 os_restore_fp_control(context);
844 handler = interrupt_handlers[signal];
846 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
850 #ifdef FOREIGN_FUNCTION_CALL_FLAG
851 were_in_lisp = !foreign_function_call_active;
855 fake_foreign_function_call(context);
858 FSHOW_SIGNAL((stderr,
859 "/entering interrupt_handle_now(%d, info, context)\n",
862 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
864 /* This can happen if someone tries to ignore or default one
865 * of the signals we need for runtime support, and the runtime
866 * support decides to pass on it. */
867 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
869 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
870 /* Once we've decided what to do about contexts in a
871 * return-elsewhere world (the original context will no longer
872 * be available; should we copy it or was nobody using it anyway?)
873 * then we should convert this to return-elsewhere */
875 /* CMUCL comment said "Allocate the SAPs while the interrupts
876 * are still disabled.". I (dan, 2003.08.21) assume this is
877 * because we're not in pseudoatomic and allocation shouldn't
878 * be interrupted. In which case it's no longer an issue as
879 * all our allocation from C now goes through a PA wrapper,
880 * but still, doesn't hurt.
882 * Yeah, but non-gencgc platforms don't really wrap allocation
883 * in PA. MG - 2005-08-29 */
885 lispobj info_sap, context_sap;
886 /* Leave deferrable signals blocked, the handler itself will
887 * allow signals again when it sees fit. */
888 unblock_gc_signals();
889 context_sap = alloc_sap(context);
890 info_sap = alloc_sap(info);
892 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
894 funcall3(handler.lisp,
899 /* This cannot happen in sane circumstances. */
901 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
903 #ifndef LISP_FEATURE_WIN32
904 /* Allow signals again. */
905 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
907 (*handler.c)(signal, info, context);
910 #ifdef FOREIGN_FUNCTION_CALL_FLAG
914 undo_fake_foreign_function_call(context); /* block signals again */
917 FSHOW_SIGNAL((stderr,
918 "/returning from interrupt_handle_now(%d, info, context)\n",
922 /* This is called at the end of a critical section if the indications
923 * are that some signal was deferred during the section. Note that as
924 * far as C or the kernel is concerned we dealt with the signal
925 * already; we're just doing the Lisp-level processing now that we
928 run_deferred_handler(struct interrupt_data *data, void *v_context)
930 /* The pending_handler may enable interrupts and then another
931 * interrupt may hit, overwrite interrupt_data, so reset the
932 * pending handler before calling it. Trust the handler to finish
933 * with the siginfo before enabling interrupts. */
934 void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
936 data->pending_handler=0;
937 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
938 (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
941 #ifndef LISP_FEATURE_WIN32
943 maybe_defer_handler(void *handler, struct interrupt_data *data,
944 int signal, siginfo_t *info, os_context_t *context)
946 struct thread *thread=arch_os_get_current_thread();
948 check_blockables_blocked_or_lose();
950 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
951 lose("interrupt already pending\n");
952 if (thread->interrupt_data->pending_handler)
953 lose("there is a pending handler already (PA)\n");
954 if (data->gc_blocked_deferrables)
955 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
956 check_interrupt_context_or_lose(context);
957 /* If interrupts are disabled then INTERRUPT_PENDING is set and
958 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
959 * atomic section inside a WITHOUT-INTERRUPTS.
961 * Also, if in_leaving_without_gcing_race_p then
962 * interrupt_handle_pending is going to be called soon, so
963 * stashing the signal away is safe.
965 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
966 in_leaving_without_gcing_race_p(thread)) {
967 store_signal_data_for_later(data,handler,signal,info,context);
968 SetSymbolValue(INTERRUPT_PENDING, T,thread);
969 FSHOW_SIGNAL((stderr,
970 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
971 (unsigned int)handler,signal,
972 in_leaving_without_gcing_race_p(thread)));
973 check_interrupt_context_or_lose(context);
976 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
977 * actually use its argument for anything on x86, so this branch
978 * may succeed even when context is null (gencgc alloc()) */
979 if (arch_pseudo_atomic_atomic(context)) {
980 store_signal_data_for_later(data,handler,signal,info,context);
981 arch_set_pseudo_atomic_interrupted(context);
982 FSHOW_SIGNAL((stderr,
983 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
984 (unsigned int)handler,signal));
985 check_interrupt_context_or_lose(context);
988 FSHOW_SIGNAL((stderr,
989 "/maybe_defer_handler(%x,%d): not deferred\n",
990 (unsigned int)handler,signal));
995 store_signal_data_for_later (struct interrupt_data *data, void *handler,
997 siginfo_t *info, os_context_t *context)
999 if (data->pending_handler)
1000 lose("tried to overwrite pending interrupt handler %x with %x\n",
1001 data->pending_handler, handler);
1003 lose("tried to defer null interrupt handler\n");
1004 data->pending_handler = handler;
1005 data->pending_signal = signal;
1007 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1009 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1013 lose("Null context");
1015 /* the signal mask in the context (from before we were
1016 * interrupted) is copied to be restored when run_deferred_handler
1017 * happens. Then the usually-blocked signals are added to the mask
1018 * in the context so that we are running with blocked signals when
1019 * the handler returns */
1020 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1021 sigaddset_deferrable(os_context_sigmask_addr(context));
1025 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1027 os_context_t *context = arch_os_get_context(&void_context);
1028 struct thread *thread = arch_os_get_current_thread();
1029 struct interrupt_data *data = thread->interrupt_data;
1031 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1032 os_restore_fp_control(context);
1035 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1036 interrupt_handle_now(signal, info, context);
1040 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1041 os_context_t *context)
1043 /* No FP control fixage needed, caller has done that. */
1044 check_blockables_blocked_or_lose();
1045 check_interrupts_enabled_or_lose(context);
1046 (*interrupt_low_level_handlers[signal])(signal, info, context);
1047 /* No Darwin context fixage needed, caller does that. */
1051 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1053 os_context_t *context = arch_os_get_context(&void_context);
1054 struct thread *thread = arch_os_get_current_thread();
1055 struct interrupt_data *data = thread->interrupt_data;
1057 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1058 os_restore_fp_control(context);
1061 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1062 signal,info,context))
1063 low_level_interrupt_handle_now(signal, info, context);
1067 #ifdef LISP_FEATURE_SB_THREAD
1069 /* This function must not cons, because that may trigger a GC. */
1071 sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
1073 os_context_t *context = arch_os_get_context(&void_context);
1075 struct thread *thread=arch_os_get_current_thread();
1078 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1079 * pseudo atomic until gc is finally allowed. */
1080 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1081 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1082 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1084 } else if (arch_pseudo_atomic_atomic(context)) {
1085 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1086 arch_set_pseudo_atomic_interrupted(context);
1087 maybe_save_gc_mask_and_block_deferrables
1088 (os_context_sigmask_addr(context));
1089 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1093 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1095 /* Not PA and GC not inhibited -- we can stop now. */
1097 /* need the context stored so it can have registers scavenged */
1098 fake_foreign_function_call(context);
1100 /* Block everything. */
1102 thread_sigmask(SIG_BLOCK,&ss,0);
1104 /* Not pending anymore. */
1105 SetSymbolValue(GC_PENDING,NIL,thread);
1106 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1108 if(thread_state(thread)!=STATE_RUNNING) {
1109 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1110 fixnum_value(thread->state));
1113 set_thread_state(thread,STATE_SUSPENDED);
1114 FSHOW_SIGNAL((stderr,"suspended\n"));
1116 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1117 FSHOW_SIGNAL((stderr,"resumed\n"));
1119 if(thread_state(thread)!=STATE_RUNNING) {
1120 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1121 fixnum_value(thread_state(thread)));
1124 undo_fake_foreign_function_call(context);
1130 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1132 os_context_t *context = arch_os_get_context(&void_context);
1133 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1134 os_restore_fp_control(context);
1135 #ifndef LISP_FEATURE_WIN32
1136 if ((signal == SIGILL) || (signal == SIGBUS)
1137 #ifndef LISP_FEATURE_LINUX
1138 || (signal == SIGEMT)
1141 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1144 interrupt_handle_now(signal, info, context);
1147 /* manipulate the signal context and stack such that when the handler
1148 * returns, it will call function instead of whatever it was doing
1152 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1153 extern int *context_eflags_addr(os_context_t *context);
1156 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1157 extern void post_signal_tramp(void);
1158 extern void call_into_lisp_tramp(void);
1160 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1162 check_gc_signals_unblocked_in_sigset_or_lose
1163 (os_context_sigmask_addr(context));
1164 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1165 void * fun=native_pointer(function);
1166 void *code = &(((struct simple_fun *) fun)->code);
1169 /* Build a stack frame showing `interrupted' so that the
1170 * user's backtrace makes (as much) sense (as usual) */
1172 /* FIXME: what about restoring fp state? */
1173 /* FIXME: what about restoring errno? */
1174 #ifdef LISP_FEATURE_X86
1175 /* Suppose the existence of some function that saved all
1176 * registers, called call_into_lisp, then restored GP registers and
1177 * returned. It would look something like this:
1185 pushl {address of function to call}
1186 call 0x8058db0 <call_into_lisp>
1193 * What we do here is set up the stack that call_into_lisp would
1194 * expect to see if it had been called by this code, and frob the
1195 * signal context so that signal return goes directly to call_into_lisp,
1196 * and when that function (and the lisp function it invoked) returns,
1197 * it returns to the second half of this imaginary function which
1198 * restores all registers and returns to C
1200 * For this to work, the latter part of the imaginary function
1201 * must obviously exist in reality. That would be post_signal_tramp
1204 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1206 #if defined(LISP_FEATURE_DARWIN)
1207 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1209 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1210 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1212 /* 1. os_validate (malloc/mmap) register_save_block
1213 * 2. copy register state into register_save_block
1214 * 3. put a pointer to register_save_block in a register in the context
1215 * 4. set the context's EIP to point to a trampoline which:
1216 * a. builds the fake stack frame from the block
1217 * b. frees the block
1218 * c. calls the function
1221 *register_save_area = *os_context_pc_addr(context);
1222 *(register_save_area + 1) = function;
1223 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1224 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1225 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1226 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1227 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1228 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1229 *(register_save_area + 8) = *context_eflags_addr(context);
1231 *os_context_pc_addr(context) =
1232 (os_context_register_t) call_into_lisp_tramp;
1233 *os_context_register_addr(context,reg_ECX) =
1234 (os_context_register_t) register_save_area;
1237 /* return address for call_into_lisp: */
1238 *(sp-15) = (u32)post_signal_tramp;
1239 *(sp-14) = function; /* args for call_into_lisp : function*/
1240 *(sp-13) = 0; /* arg array */
1241 *(sp-12) = 0; /* no. args */
1242 /* this order matches that used in POPAD */
1243 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1244 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1246 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1247 /* POPAD ignores the value of ESP: */
1249 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1251 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1252 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1253 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1254 *(sp-3)=*context_eflags_addr(context);
1255 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1256 *(sp-1)=*os_context_pc_addr(context);
1260 #elif defined(LISP_FEATURE_X86_64)
1261 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1263 /* return address for call_into_lisp: */
1264 *(sp-18) = (u64)post_signal_tramp;
1266 *(sp-17)=*os_context_register_addr(context,reg_R15);
1267 *(sp-16)=*os_context_register_addr(context,reg_R14);
1268 *(sp-15)=*os_context_register_addr(context,reg_R13);
1269 *(sp-14)=*os_context_register_addr(context,reg_R12);
1270 *(sp-13)=*os_context_register_addr(context,reg_R11);
1271 *(sp-12)=*os_context_register_addr(context,reg_R10);
1272 *(sp-11)=*os_context_register_addr(context,reg_R9);
1273 *(sp-10)=*os_context_register_addr(context,reg_R8);
1274 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1275 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1276 /* skip RBP and RSP */
1277 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1278 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1279 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1280 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1281 *(sp-3)=*context_eflags_addr(context);
1282 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1283 *(sp-1)=*os_context_pc_addr(context);
1285 *os_context_register_addr(context,reg_RDI) =
1286 (os_context_register_t)function; /* function */
1287 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1288 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1290 struct thread *th=arch_os_get_current_thread();
1291 build_fake_control_stack_frames(th,context);
1294 #ifdef LISP_FEATURE_X86
1296 #if !defined(LISP_FEATURE_DARWIN)
1297 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1298 *os_context_register_addr(context,reg_ECX) = 0;
1299 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1301 *os_context_register_addr(context,reg_UESP) =
1302 (os_context_register_t)(sp-15);
1304 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1305 #endif /* __NETBSD__ */
1306 #endif /* LISP_FEATURE_DARWIN */
1308 #elif defined(LISP_FEATURE_X86_64)
1309 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1310 *os_context_register_addr(context,reg_RCX) = 0;
1311 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1312 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1314 /* this much of the calling convention is common to all
1316 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1317 *os_context_register_addr(context,reg_NARGS) = 0;
1318 *os_context_register_addr(context,reg_LIP) =
1319 (os_context_register_t)(unsigned long)code;
1320 *os_context_register_addr(context,reg_CFP) =
1321 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1323 #ifdef ARCH_HAS_NPC_REGISTER
1324 *os_context_npc_addr(context) =
1325 4 + *os_context_pc_addr(context);
1327 #ifdef LISP_FEATURE_SPARC
1328 *os_context_register_addr(context,reg_CODE) =
1329 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1331 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1335 /* KLUDGE: Theoretically the approach we use for undefined alien
1336 * variables should work for functions as well, but on PPC/Darwin
1337 * we get bus error at bogus addresses instead, hence this workaround,
1338 * that has the added benefit of automatically discriminating between
1339 * functions and variables.
1342 undefined_alien_function(void)
1344 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1348 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1350 struct thread *th=arch_os_get_current_thread();
1352 /* note the os_context hackery here. When the signal handler returns,
1353 * it won't go back to what it was doing ... */
1354 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1355 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1356 /* We hit the end of the control stack: disable guard page
1357 * protection so the error handler has some headroom, protect the
1358 * previous page so that we can catch returns from the guard page
1359 * and restore it. */
1360 corruption_warning_and_maybe_lose("Control stack exhausted");
1361 protect_control_stack_guard_page(0);
1362 protect_control_stack_return_guard_page(1);
1364 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1365 /* For the unfortunate case, when the control stack is
1366 * exhausted in a signal handler. */
1367 unblock_signals_in_context_and_maybe_warn(context);
1369 arrange_return_to_lisp_function
1370 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1373 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1374 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1375 /* We're returning from the guard page: reprotect it, and
1376 * unprotect this one. This works even if we somehow missed
1377 * the return-guard-page, and hit it on our way to new
1378 * exhaustion instead. */
1379 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1380 protect_control_stack_guard_page(1);
1381 protect_control_stack_return_guard_page(0);
1384 else if (addr >= undefined_alien_address &&
1385 addr < undefined_alien_address + os_vm_page_size) {
1386 arrange_return_to_lisp_function
1387 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1394 * noise to install handlers
1397 #ifndef LISP_FEATURE_WIN32
1398 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1399 * they are blocked, in Linux 2.6 the default handler is invoked
1400 * instead that usually coredumps. One might hastily think that adding
1401 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1402 * the whole sa_mask is ignored and instead of not adding the signal
1403 * in question to the mask. That means if it's not blockable the
1404 * signal must be unblocked at the beginning of signal handlers.
1406 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1407 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1408 * will be unblocked in the sigmask during the signal handler. -- RMK
1411 static volatile int sigaction_nodefer_works = -1;
1413 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1414 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1417 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1419 sigset_t empty, current;
1421 sigemptyset(&empty);
1422 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1423 /* There should be exactly two blocked signals: the two we added
1424 * to sa_mask when setting up the handler. NetBSD doesn't block
1425 * the signal we're handling when SA_NODEFER is set; Linux before
1426 * 2.6.13 or so also doesn't block the other signal when
1427 * SA_NODEFER is set. */
1428 for(i = 1; i < NSIG; i++)
1429 if (sigismember(¤t, i) !=
1430 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1431 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1432 sigaction_nodefer_works = 0;
1434 if (sigaction_nodefer_works == -1)
1435 sigaction_nodefer_works = 1;
1439 see_if_sigaction_nodefer_works(void)
1441 struct sigaction sa, old_sa;
1443 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1444 sa.sa_sigaction = sigaction_nodefer_test_handler;
1445 sigemptyset(&sa.sa_mask);
1446 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1447 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1448 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1449 /* Make sure no signals are blocked. */
1452 sigemptyset(&empty);
1453 thread_sigmask(SIG_SETMASK, &empty, 0);
1455 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1456 while (sigaction_nodefer_works == -1);
1457 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1460 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1461 #undef SA_NODEFER_TEST_KILL_SIGNAL
1464 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1468 sigemptyset(&unblock);
1469 sigaddset(&unblock, signal);
1470 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1471 interrupt_handle_now_handler(signal, info, void_context);
1475 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1479 sigemptyset(&unblock);
1480 sigaddset(&unblock, signal);
1481 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1482 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1486 undoably_install_low_level_interrupt_handler (int signal,
1487 interrupt_handler_t handler)
1489 struct sigaction sa;
1491 if (0 > signal || signal >= NSIG) {
1492 lose("bad signal number %d\n", signal);
1495 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1496 sa.sa_sigaction = handler;
1497 else if (sigismember(&deferrable_sigset,signal))
1498 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1499 /* The use of a trampoline appears to break the
1500 arch_os_get_context() workaround for SPARC/Linux. For now,
1501 don't use the trampoline (and so be vulnerable to the problems
1502 that SA_NODEFER is meant to solve. */
1503 #if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX))
1504 else if (!sigaction_nodefer_works &&
1505 !sigismember(&blockable_sigset, signal))
1506 sa.sa_sigaction = low_level_unblock_me_trampoline;
1509 sa.sa_sigaction = handler;
1511 sigcopyset(&sa.sa_mask, &blockable_sigset);
1512 sa.sa_flags = SA_SIGINFO | SA_RESTART
1513 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1514 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1515 if((signal==SIG_MEMORY_FAULT))
1516 sa.sa_flags |= SA_ONSTACK;
1519 sigaction(signal, &sa, NULL);
1520 interrupt_low_level_handlers[signal] =
1521 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1525 /* This is called from Lisp. */
1527 install_handler(int signal, void handler(int, siginfo_t*, void*))
1529 #ifndef LISP_FEATURE_WIN32
1530 struct sigaction sa;
1532 union interrupt_handler oldhandler;
1534 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1537 sigaddset(&new, signal);
1538 thread_sigmask(SIG_BLOCK, &new, &old);
1540 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1541 (unsigned int)interrupt_low_level_handlers[signal]));
1542 if (interrupt_low_level_handlers[signal]==0) {
1543 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1544 ARE_SAME_HANDLER(handler, SIG_IGN))
1545 sa.sa_sigaction = handler;
1546 else if (sigismember(&deferrable_sigset, signal))
1547 sa.sa_sigaction = maybe_now_maybe_later;
1548 else if (!sigaction_nodefer_works &&
1549 !sigismember(&blockable_sigset, signal))
1550 sa.sa_sigaction = unblock_me_trampoline;
1552 sa.sa_sigaction = interrupt_handle_now_handler;
1554 sigcopyset(&sa.sa_mask, &blockable_sigset);
1555 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1556 (sigaction_nodefer_works ? SA_NODEFER : 0);
1557 sigaction(signal, &sa, NULL);
1560 oldhandler = interrupt_handlers[signal];
1561 interrupt_handlers[signal].c = handler;
1563 thread_sigmask(SIG_SETMASK, &old, 0);
1565 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1567 return (unsigned long)oldhandler.lisp;
1569 /* Probably-wrong Win32 hack */
1574 /* This must not go through lisp as it's allowed anytime, even when on
1577 sigabrt_handler(int signal, siginfo_t *info, void *void_context)
1579 lose("SIGABRT received.\n");
1583 interrupt_init(void)
1585 #ifndef LISP_FEATURE_WIN32
1587 SHOW("entering interrupt_init()");
1588 see_if_sigaction_nodefer_works();
1589 sigemptyset(&deferrable_sigset);
1590 sigemptyset(&blockable_sigset);
1591 sigemptyset(&gc_sigset);
1592 sigaddset_deferrable(&deferrable_sigset);
1593 sigaddset_blockable(&blockable_sigset);
1594 sigaddset_gc(&gc_sigset);
1596 /* Set up high level handler information. */
1597 for (i = 0; i < NSIG; i++) {
1598 interrupt_handlers[i].c =
1599 /* (The cast here blasts away the distinction between
1600 * SA_SIGACTION-style three-argument handlers and
1601 * signal(..)-style one-argument handlers, which is OK
1602 * because it works to call the 1-argument form where the
1603 * 3-argument form is expected.) */
1604 (void (*)(int, siginfo_t*, void*))SIG_DFL;
1606 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1607 SHOW("returning from interrupt_init()");
1611 #ifndef LISP_FEATURE_WIN32
1613 siginfo_code(siginfo_t *info)
1615 return info->si_code;
1617 os_vm_address_t current_memory_fault_address;
1620 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1622 /* FIXME: This is lossy: if we get another memory fault (eg. from
1623 * another thread) before lisp has read this, we lose the information.
1624 * However, since this is mostly informative, we'll live with that for
1625 * now -- some address is better then no address in this case.
1627 current_memory_fault_address = addr;
1628 /* To allow debugging memory faults in signal handlers and such. */
1629 corruption_warning_and_maybe_lose("Memory fault");
1630 unblock_signals_in_context_and_maybe_warn(context);
1631 arrange_return_to_lisp_function(context,
1632 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1637 unhandled_trap_error(os_context_t *context)
1639 lispobj context_sap;
1640 fake_foreign_function_call(context);
1641 unblock_gc_signals();
1642 context_sap = alloc_sap(context);
1643 #ifndef LISP_FEATURE_WIN32
1644 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1646 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1647 lose("UNHANDLED-TRAP-ERROR fell through");
1650 /* Common logic for trapping instructions. How we actually handle each
1651 * case is highly architecture dependent, but the overall shape is
1654 handle_trap(os_context_t *context, int trap)
1657 case trap_PendingInterrupt:
1658 FSHOW((stderr, "/<trap pending interrupt>\n"));
1659 arch_skip_instruction(context);
1660 interrupt_handle_pending(context);
1664 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1665 interrupt_internal_error(context, trap==trap_Cerror);
1667 case trap_Breakpoint:
1668 arch_handle_breakpoint(context);
1670 case trap_FunEndBreakpoint:
1671 arch_handle_fun_end_breakpoint(context);
1673 #ifdef trap_AfterBreakpoint
1674 case trap_AfterBreakpoint:
1675 arch_handle_after_breakpoint(context);
1678 #ifdef trap_SingleStepAround
1679 case trap_SingleStepAround:
1680 case trap_SingleStepBefore:
1681 arch_handle_single_step_trap(context, trap);
1685 fake_foreign_function_call(context);
1686 lose("%%PRIMITIVE HALT called; the party is over.\n");
1688 unhandled_trap_error(context);