2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* Under Linux on some architectures, we appear to have to restore the
72 * FPU control word from the context, as after the signal is delivered
73 * we appear to have a null FPU control word. */
74 #if defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
75 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
76 os_context_t *context = arch_os_get_context(&void_context); \
77 os_restore_fp_control(context);
79 #define RESTORE_FP_CONTROL_WORD(context,void_context) \
80 os_context_t *context = arch_os_get_context(&void_context);
83 /* These are to be used in signal handlers. Currently all handlers are
86 * interrupt_handle_now_handler
87 * maybe_now_maybe_later
88 * unblock_me_trampoline
89 * low_level_handle_now_handler
90 * low_level_maybe_now_maybe_later
91 * low_level_unblock_me_trampoline
93 #define SAVE_ERRNO(context,void_context) \
95 int _saved_errno = errno; \
96 RESTORE_FP_CONTROL_WORD(context,void_context); \
99 #define RESTORE_ERRNO \
101 errno = _saved_errno; \
104 static void run_deferred_handler(struct interrupt_data *data, void *v_context);
105 #ifndef LISP_FEATURE_WIN32
106 static void store_signal_data_for_later (struct interrupt_data *data,
107 void *handler, int signal,
109 os_context_t *context);
112 fill_current_sigmask(sigset_t *sigset)
114 /* Get the current sigmask, by blocking the empty set. */
117 thread_sigmask(SIG_BLOCK, &empty, sigset);
121 sigaddset_deferrable(sigset_t *s)
123 sigaddset(s, SIGHUP);
124 sigaddset(s, SIGINT);
125 sigaddset(s, SIGTERM);
126 sigaddset(s, SIGQUIT);
127 sigaddset(s, SIGPIPE);
128 sigaddset(s, SIGALRM);
129 sigaddset(s, SIGURG);
130 sigaddset(s, SIGTSTP);
131 sigaddset(s, SIGCHLD);
133 #ifndef LISP_FEATURE_HPUX
134 sigaddset(s, SIGXCPU);
135 sigaddset(s, SIGXFSZ);
137 sigaddset(s, SIGVTALRM);
138 sigaddset(s, SIGPROF);
139 sigaddset(s, SIGWINCH);
143 sigdelset_deferrable(sigset_t *s)
145 sigdelset(s, SIGHUP);
146 sigdelset(s, SIGINT);
147 sigdelset(s, SIGQUIT);
148 sigdelset(s, SIGPIPE);
149 sigdelset(s, SIGALRM);
150 sigdelset(s, SIGURG);
151 sigdelset(s, SIGTSTP);
152 sigdelset(s, SIGCHLD);
154 #ifndef LISP_FEATURE_HPUX
155 sigdelset(s, SIGXCPU);
156 sigdelset(s, SIGXFSZ);
158 sigdelset(s, SIGVTALRM);
159 sigdelset(s, SIGPROF);
160 sigdelset(s, SIGWINCH);
164 sigaddset_blockable(sigset_t *sigset)
166 sigaddset_deferrable(sigset);
167 sigaddset_gc(sigset);
171 sigaddset_gc(sigset_t *sigset)
173 #ifdef LISP_FEATURE_SB_THREAD
174 sigaddset(sigset,SIG_STOP_FOR_GC);
179 sigdelset_gc(sigset_t *sigset)
181 #ifdef LISP_FEATURE_SB_THREAD
182 sigdelset(sigset,SIG_STOP_FOR_GC);
186 /* initialized in interrupt_init */
187 sigset_t deferrable_sigset;
188 sigset_t blockable_sigset;
193 deferrables_blocked_in_sigset_p(sigset_t *sigset)
195 #if !defined(LISP_FEATURE_WIN32)
197 for(i = 1; i < NSIG; i++) {
198 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
206 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
208 #if !defined(LISP_FEATURE_WIN32)
210 for(i = 1; i < NSIG; i++) {
211 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
212 lose("deferrable signal %d blocked\n",i);
218 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
220 #if !defined(LISP_FEATURE_WIN32)
222 for(i = 1; i < NSIG; i++) {
223 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
224 lose("deferrable signal %d not blocked\n",i);
230 check_deferrables_unblocked_or_lose(void)
232 #if !defined(LISP_FEATURE_WIN32)
234 fill_current_sigmask(¤t);
235 check_deferrables_unblocked_in_sigset_or_lose(¤t);
240 check_deferrables_blocked_or_lose(void)
242 #if !defined(LISP_FEATURE_WIN32)
244 fill_current_sigmask(¤t);
245 check_deferrables_blocked_in_sigset_or_lose(¤t);
250 check_blockables_blocked_or_lose(void)
252 #if !defined(LISP_FEATURE_WIN32)
255 fill_current_sigmask(¤t);
256 for(i = 1; i < NSIG; i++) {
257 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
258 lose("blockable signal %d not blocked\n",i);
264 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
266 #if !defined(LISP_FEATURE_WIN32)
268 for(i = 1; i < NSIG; i++) {
269 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
270 lose("gc signal %d blocked\n",i);
276 check_gc_signals_unblocked_or_lose(void)
278 #if !defined(LISP_FEATURE_WIN32)
280 fill_current_sigmask(¤t);
281 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
286 check_interrupts_enabled_or_lose(os_context_t *context)
288 struct thread *thread=arch_os_get_current_thread();
289 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
290 lose("interrupts not enabled\n");
291 if (arch_pseudo_atomic_atomic(context))
292 lose ("in pseudo atomic section\n");
295 /* Save sigset (or the current sigmask if 0) if there is no pending
296 * handler, because that means that deferabbles are already blocked.
297 * The purpose is to avoid losing the pending gc signal if a
298 * deferrable interrupt async unwinds between clearing the pseudo
299 * atomic and trapping to GC.*/
301 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
303 struct thread *thread = arch_os_get_current_thread();
304 struct interrupt_data *data = thread->interrupt_data;
306 /* Obviously, this function is called when signals may not be
307 * blocked. Let's make sure we are not interrupted. */
308 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
309 #ifndef LISP_FEATURE_SB_THREAD
310 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
312 if (data->gc_blocked_deferrables)
313 lose("gc_blocked_deferrables already true\n");
315 if ((!data->pending_handler) &&
316 (!data->gc_blocked_deferrables)) {
317 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
318 data->gc_blocked_deferrables = 1;
320 /* This is the sigmask of some context. */
321 sigcopyset(&data->pending_mask, sigset);
322 sigaddset_deferrable(sigset);
323 thread_sigmask(SIG_SETMASK,&oldset,0);
326 /* Operating on the current sigmask. Save oldset and
327 * unblock gc signals. In the end, this is equivalent to
328 * blocking the deferrables. */
329 sigcopyset(&data->pending_mask, &oldset);
330 unblock_gc_signals();
334 thread_sigmask(SIG_SETMASK,&oldset,0);
337 /* Are we leaving WITH-GCING and already running with interrupts
338 * enabled, without the protection of *GC-INHIBIT* T and there is gc
339 * (or stop for gc) pending, but we haven't trapped yet? */
341 in_leaving_without_gcing_race_p(struct thread *thread)
343 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
344 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
345 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
346 ((SymbolValue(GC_PENDING,thread) != NIL)
347 #if defined(LISP_FEATURE_SB_THREAD)
348 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
353 /* Check our baroque invariants. */
355 check_interrupt_context_or_lose(os_context_t *context)
357 struct thread *thread = arch_os_get_current_thread();
358 struct interrupt_data *data = thread->interrupt_data;
359 int interrupt_deferred_p = (data->pending_handler != 0);
360 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
361 sigset_t *sigset = os_context_sigmask_addr(context);
362 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
363 * handle_allocation_trap. */
364 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
365 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
366 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
367 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
368 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
369 int in_race_p = in_leaving_without_gcing_race_p(thread);
370 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
371 * section and trapping, a SIG_STOP_FOR_GC would see the next
372 * check fail, for this reason sig_stop_for_gc handler does not
373 * call this function. */
374 if (interrupt_deferred_p) {
375 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
376 lose("Stray deferred interrupt.\n");
379 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
380 lose("GC_PENDING, but why?\n");
381 #if defined(LISP_FEATURE_SB_THREAD)
383 int stop_for_gc_pending =
384 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
385 if (stop_for_gc_pending)
386 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
387 lose("STOP_FOR_GC_PENDING, but why?\n");
391 if (interrupt_pending && !interrupt_deferred_p)
392 lose("INTERRUPT_PENDING but not pending handler.\n");
393 if ((data->gc_blocked_deferrables) && interrupt_pending)
394 lose("gc_blocked_deferrables and interrupt pending\n.");
395 if (data->gc_blocked_deferrables)
396 check_deferrables_blocked_in_sigset_or_lose(sigset);
397 if (interrupt_pending || interrupt_deferred_p)
398 check_deferrables_blocked_in_sigset_or_lose(sigset);
400 check_deferrables_unblocked_in_sigset_or_lose(sigset);
401 /* If deferrables are unblocked then we are open to signals
402 * that run lisp code. */
403 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
407 /* When we catch an internal error, should we pass it back to Lisp to
408 * be handled in a high-level way? (Early in cold init, the answer is
409 * 'no', because Lisp is still too brain-dead to handle anything.
410 * After sufficient initialization has been completed, the answer
412 boolean internal_errors_enabled = 0;
414 #ifndef LISP_FEATURE_WIN32
415 static void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*);
417 union interrupt_handler interrupt_handlers[NSIG];
420 block_blockable_signals(void)
422 #ifndef LISP_FEATURE_WIN32
423 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
428 block_deferrable_signals(void)
430 #ifndef LISP_FEATURE_WIN32
431 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
436 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
438 #ifndef LISP_FEATURE_WIN32
439 if (interrupt_handler_pending_p())
440 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
441 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
442 sigdelset_deferrable(sigset);
447 unblock_deferrable_signals(void)
449 #ifndef LISP_FEATURE_WIN32
450 if (interrupt_handler_pending_p())
451 lose("unblock_deferrable_signals: losing proposition\n");
452 check_gc_signals_unblocked_or_lose();
453 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
458 unblock_gc_signals(void)
460 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
461 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
466 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
468 #ifndef LISP_FEATURE_WIN32
470 sigset_t *sigset=os_context_sigmask_addr(context);
471 for(i = 1; i < NSIG; i++) {
472 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
475 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
476 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
477 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
482 sigdelset_gc(sigset);
483 if (!interrupt_handler_pending_p()) {
484 unblock_deferrable_signals_in_sigset(sigset);
491 * utility routines used by various signal handlers
495 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
497 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
501 /* Build a fake stack frame or frames */
503 current_control_frame_pointer =
504 (lispobj *)(unsigned long)
505 (*os_context_register_addr(context, reg_CSP));
506 if ((lispobj *)(unsigned long)
507 (*os_context_register_addr(context, reg_CFP))
508 == current_control_frame_pointer) {
509 /* There is a small window during call where the callee's
510 * frame isn't built yet. */
511 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
512 == FUN_POINTER_LOWTAG) {
513 /* We have called, but not built the new frame, so
514 * build it for them. */
515 current_control_frame_pointer[0] =
516 *os_context_register_addr(context, reg_OCFP);
517 current_control_frame_pointer[1] =
518 *os_context_register_addr(context, reg_LRA);
519 current_control_frame_pointer += 8;
520 /* Build our frame on top of it. */
521 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
524 /* We haven't yet called, build our frame as if the
525 * partial frame wasn't there. */
526 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
529 /* We can't tell whether we are still in the caller if it had to
530 * allocate a stack frame due to stack arguments. */
531 /* This observation provoked some past CMUCL maintainer to ask
532 * "Can anything strange happen during return?" */
535 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
538 current_control_stack_pointer = current_control_frame_pointer + 8;
540 current_control_frame_pointer[0] = oldcont;
541 current_control_frame_pointer[1] = NIL;
542 current_control_frame_pointer[2] =
543 (lispobj)(*os_context_register_addr(context, reg_CODE));
547 /* Stores the context for gc to scavange and builds fake stack
550 fake_foreign_function_call(os_context_t *context)
553 struct thread *thread=arch_os_get_current_thread();
555 /* context_index incrementing must not be interrupted */
556 check_blockables_blocked_or_lose();
558 /* Get current Lisp state from context. */
560 dynamic_space_free_pointer =
561 (lispobj *)(unsigned long)
562 (*os_context_register_addr(context, reg_ALLOC));
563 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
564 /* dynamic_space_free_pointer); */
565 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
566 if ((long)dynamic_space_free_pointer & 1) {
567 lose("dead in fake_foreign_function_call, context = %x\n", context);
570 /* why doesnt PPC and SPARC do something like this: */
571 #if defined(LISP_FEATURE_HPPA)
572 if ((long)dynamic_space_free_pointer & 4) {
573 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
578 current_binding_stack_pointer =
579 (lispobj *)(unsigned long)
580 (*os_context_register_addr(context, reg_BSP));
583 build_fake_control_stack_frames(thread,context);
585 /* Do dynamic binding of the active interrupt context index
586 * and save the context in the context array. */
588 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
590 if (context_index >= MAX_INTERRUPTS) {
591 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
594 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
595 make_fixnum(context_index + 1),thread);
597 thread->interrupt_contexts[context_index] = context;
599 #ifdef FOREIGN_FUNCTION_CALL_FLAG
600 foreign_function_call_active = 1;
604 /* blocks all blockable signals. If you are calling from a signal handler,
605 * the usual signal mask will be restored from the context when the handler
606 * finishes. Otherwise, be careful */
608 undo_fake_foreign_function_call(os_context_t *context)
610 struct thread *thread=arch_os_get_current_thread();
611 /* Block all blockable signals. */
612 block_blockable_signals();
614 #ifdef FOREIGN_FUNCTION_CALL_FLAG
615 foreign_function_call_active = 0;
618 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
622 /* Put the dynamic space free pointer back into the context. */
623 *os_context_register_addr(context, reg_ALLOC) =
624 (unsigned long) dynamic_space_free_pointer
625 | (*os_context_register_addr(context, reg_ALLOC)
628 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
630 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
635 /* a handler for the signal caused by execution of a trap opcode
636 * signalling an internal error */
638 interrupt_internal_error(os_context_t *context, boolean continuable)
642 fake_foreign_function_call(context);
644 if (!internal_errors_enabled) {
645 describe_internal_error(context);
646 /* There's no good way to recover from an internal error
647 * before the Lisp error handling mechanism is set up. */
648 lose("internal error too early in init, can't recover\n");
651 /* Allocate the SAP object while the interrupts are still
653 unblock_gc_signals();
654 context_sap = alloc_sap(context);
656 #ifndef LISP_FEATURE_WIN32
657 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
660 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
661 /* Workaround for blocked SIGTRAP. */
664 sigemptyset(&newset);
665 sigaddset(&newset, SIGTRAP);
666 thread_sigmask(SIG_UNBLOCK, &newset, 0);
670 SHOW("in interrupt_internal_error");
672 /* Display some rudimentary debugging information about the
673 * error, so that even if the Lisp error handler gets badly
674 * confused, we have a chance to determine what's going on. */
675 describe_internal_error(context);
677 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
678 continuable ? T : NIL);
680 undo_fake_foreign_function_call(context); /* blocks signals again */
682 arch_skip_instruction(context);
686 interrupt_handler_pending_p(void)
688 struct thread *thread = arch_os_get_current_thread();
689 struct interrupt_data *data = thread->interrupt_data;
690 return (data->pending_handler != 0);
694 interrupt_handle_pending(os_context_t *context)
696 /* There are three ways we can get here. First, if an interrupt
697 * occurs within pseudo-atomic, it will be deferred, and we'll
698 * trap to here at the end of the pseudo-atomic block. Second, if
699 * the GC (in alloc()) decides that a GC is required, it will set
700 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
701 * and alloc() is always called from within pseudo-atomic, and
702 * thus we end up here again. Third, when calling GC-ON or at the
703 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
704 * here if there is a pending GC. Fourth, ahem, at the end of
705 * WITHOUT-INTERRUPTS (bar complications with nesting). */
707 /* Win32 only needs to handle the GC cases (for now?) */
709 struct thread *thread = arch_os_get_current_thread();
710 struct interrupt_data *data = thread->interrupt_data;
712 if (arch_pseudo_atomic_atomic(context)) {
713 lose("Handling pending interrupt in pseduo atomic.");
716 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
718 check_blockables_blocked_or_lose();
720 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
721 * handler, then the pending mask was saved and
722 * gc_blocked_deferrables set. Hence, there can be no pending
723 * handler and it's safe to restore the pending mask.
725 * Note, that if gc_blocked_deferrables is false we may still have
726 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
727 * pseudo atomic was interrupt be a deferrable first. */
728 if (data->gc_blocked_deferrables) {
729 if (data->pending_handler)
730 lose("GC blocked deferrables but still got a pending handler.");
731 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
732 lose("GC blocked deferrables while GC is inhibited.");
733 /* Restore the saved signal mask from the original signal (the
734 * one that interrupted us during the critical section) into
735 * the os_context for the signal we're currently in the
736 * handler for. This should ensure that when we return from
737 * the handler the blocked signals are unblocked. */
738 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
739 data->gc_blocked_deferrables = 0;
742 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
743 void *original_pending_handler = data->pending_handler;
745 #ifdef LISP_FEATURE_SB_THREAD
746 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
747 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
748 * the signal handler if it actually stops us. */
749 arch_clear_pseudo_atomic_interrupted(context);
750 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
753 /* Test for T and not for != NIL since the value :IN-PROGRESS
754 * is used in SUB-GC as part of the mechanism to supress
756 if (SymbolValue(GC_PENDING,thread) == T) {
758 /* Two reasons for doing this. First, if there is a
759 * pending handler we don't want to run. Second, we are
760 * going to clear pseudo atomic interrupted to avoid
761 * spurious trapping on every allocation in SUB_GC and
762 * having a pending handler with interrupts enabled and
763 * without pseudo atomic interrupted breaks an
765 if (data->pending_handler) {
766 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
767 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
770 arch_clear_pseudo_atomic_interrupted(context);
772 /* GC_PENDING is cleared in SUB-GC, or if another thread
773 * is doing a gc already we will get a SIG_STOP_FOR_GC and
774 * that will clear it.
776 * If there is a pending handler or gc was triggerred in a
777 * signal handler then maybe_gc won't run POST_GC and will
778 * return normally. */
779 if (!maybe_gc(context))
780 lose("GC not inhibited but maybe_gc did not GC.");
782 if (data->pending_handler) {
786 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
787 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
788 * GC-PENDING is not NIL then we cannot trap on pseudo
789 * atomic due to GC (see if(GC_PENDING) logic in
790 * cheneygc.c an gengcgc.c), plus there is a outer
791 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
793 lose("Trapping to run pending handler while GC in progress.");
796 check_blockables_blocked_or_lose();
798 /* No GC shall be lost. If SUB_GC triggers another GC then
799 * that should be handled on the spot. */
800 if (SymbolValue(GC_PENDING,thread) != NIL)
801 lose("GC_PENDING after doing gc.");
802 #ifdef LISP_FEATURE_SB_THREAD
803 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
804 lose("STOP_FOR_GC_PENDING after doing gc.");
806 /* Check two things. First, that gc does not clobber a handler
807 * that's already pending. Second, that there is no interrupt
808 * lossage: if original_pending_handler was NULL then even if
809 * an interrupt arrived during GC (POST-GC, really) it was
811 if (original_pending_handler != data->pending_handler)
812 lose("pending handler changed in gc: %x -> %d.",
813 original_pending_handler, data->pending_handler);
816 #ifndef LISP_FEATURE_WIN32
817 /* There may be no pending handler, because it was only a gc that
818 * had to be executed or because Lisp is a bit too eager to call
819 * DO-PENDING-INTERRUPT. */
820 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
821 (data->pending_handler)) {
822 /* No matter how we ended up here, clear both
823 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
824 * because we checked above that there is no GC pending. */
825 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
826 arch_clear_pseudo_atomic_interrupted(context);
827 /* Restore the sigmask in the context. */
828 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
829 /* This will break on sparc linux: the deferred handler really
830 * wants to be called with a void_context */
831 run_deferred_handler(data,(void *)context);
833 /* It is possible that the end of this function was reached
834 * without never actually doing anything, the tests in Lisp for
835 * when to call receive-pending-interrupt are not exact. */
836 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
841 * the two main signal handlers:
842 * interrupt_handle_now(..)
843 * maybe_now_maybe_later(..)
845 * to which we have added interrupt_handle_now_handler(..). Why?
846 * Well, mostly because the SPARC/Linux platform doesn't quite do
847 * signals the way we want them done. The third argument in the
848 * handler isn't filled in by the kernel properly, so we fix it up
849 * ourselves in the arch_os_get_context(..) function; however, we only
850 * want to do this when we first hit the handler, and not when
851 * interrupt_handle_now(..) is being called from some other handler
852 * (when the fixup will already have been done). -- CSR, 2002-07-23
856 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
858 #ifdef FOREIGN_FUNCTION_CALL_FLAG
859 boolean were_in_lisp;
861 union interrupt_handler handler;
863 check_blockables_blocked_or_lose();
865 #ifndef LISP_FEATURE_WIN32
866 if (sigismember(&deferrable_sigset,signal))
867 check_interrupts_enabled_or_lose(context);
870 handler = interrupt_handlers[signal];
872 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
876 #ifdef FOREIGN_FUNCTION_CALL_FLAG
877 were_in_lisp = !foreign_function_call_active;
881 fake_foreign_function_call(context);
884 FSHOW_SIGNAL((stderr,
885 "/entering interrupt_handle_now(%d, info, context)\n",
888 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
890 /* This can happen if someone tries to ignore or default one
891 * of the signals we need for runtime support, and the runtime
892 * support decides to pass on it. */
893 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
895 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
896 /* Once we've decided what to do about contexts in a
897 * return-elsewhere world (the original context will no longer
898 * be available; should we copy it or was nobody using it anyway?)
899 * then we should convert this to return-elsewhere */
901 /* CMUCL comment said "Allocate the SAPs while the interrupts
902 * are still disabled.". I (dan, 2003.08.21) assume this is
903 * because we're not in pseudoatomic and allocation shouldn't
904 * be interrupted. In which case it's no longer an issue as
905 * all our allocation from C now goes through a PA wrapper,
906 * but still, doesn't hurt.
908 * Yeah, but non-gencgc platforms don't really wrap allocation
909 * in PA. MG - 2005-08-29 */
911 lispobj info_sap, context_sap;
912 /* Leave deferrable signals blocked, the handler itself will
913 * allow signals again when it sees fit. */
914 unblock_gc_signals();
915 context_sap = alloc_sap(context);
916 info_sap = alloc_sap(info);
918 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
920 funcall3(handler.lisp,
925 /* This cannot happen in sane circumstances. */
927 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
929 #ifndef LISP_FEATURE_WIN32
930 /* Allow signals again. */
931 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
933 (*handler.c)(signal, info, context);
936 #ifdef FOREIGN_FUNCTION_CALL_FLAG
940 undo_fake_foreign_function_call(context); /* block signals again */
943 FSHOW_SIGNAL((stderr,
944 "/returning from interrupt_handle_now(%d, info, context)\n",
948 /* This is called at the end of a critical section if the indications
949 * are that some signal was deferred during the section. Note that as
950 * far as C or the kernel is concerned we dealt with the signal
951 * already; we're just doing the Lisp-level processing now that we
954 run_deferred_handler(struct interrupt_data *data, void *v_context)
956 /* The pending_handler may enable interrupts and then another
957 * interrupt may hit, overwrite interrupt_data, so reset the
958 * pending handler before calling it. Trust the handler to finish
959 * with the siginfo before enabling interrupts. */
960 void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
962 data->pending_handler=0;
963 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
964 (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
967 #ifndef LISP_FEATURE_WIN32
969 maybe_defer_handler(void *handler, struct interrupt_data *data,
970 int signal, siginfo_t *info, os_context_t *context)
972 struct thread *thread=arch_os_get_current_thread();
974 check_blockables_blocked_or_lose();
976 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
977 lose("interrupt already pending\n");
978 if (thread->interrupt_data->pending_handler)
979 lose("there is a pending handler already (PA)\n");
980 if (data->gc_blocked_deferrables)
981 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
982 check_interrupt_context_or_lose(context);
983 /* If interrupts are disabled then INTERRUPT_PENDING is set and
984 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
985 * atomic section inside a WITHOUT-INTERRUPTS.
987 * Also, if in_leaving_without_gcing_race_p then
988 * interrupt_handle_pending is going to be called soon, so
989 * stashing the signal away is safe.
991 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
992 in_leaving_without_gcing_race_p(thread)) {
993 store_signal_data_for_later(data,handler,signal,info,context);
994 SetSymbolValue(INTERRUPT_PENDING, T,thread);
995 FSHOW_SIGNAL((stderr,
996 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
997 (unsigned int)handler,signal,
998 in_leaving_without_gcing_race_p(thread)));
999 check_interrupt_context_or_lose(context);
1002 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
1003 * actually use its argument for anything on x86, so this branch
1004 * may succeed even when context is null (gencgc alloc()) */
1005 if (arch_pseudo_atomic_atomic(context)) {
1006 store_signal_data_for_later(data,handler,signal,info,context);
1007 arch_set_pseudo_atomic_interrupted(context);
1008 FSHOW_SIGNAL((stderr,
1009 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1010 (unsigned int)handler,signal));
1011 check_interrupt_context_or_lose(context);
1014 FSHOW_SIGNAL((stderr,
1015 "/maybe_defer_handler(%x,%d): not deferred\n",
1016 (unsigned int)handler,signal));
1021 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1023 siginfo_t *info, os_context_t *context)
1025 if (data->pending_handler)
1026 lose("tried to overwrite pending interrupt handler %x with %x\n",
1027 data->pending_handler, handler);
1029 lose("tried to defer null interrupt handler\n");
1030 data->pending_handler = handler;
1031 data->pending_signal = signal;
1033 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1035 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1039 lose("Null context");
1041 /* the signal mask in the context (from before we were
1042 * interrupted) is copied to be restored when run_deferred_handler
1043 * happens. Then the usually-blocked signals are added to the mask
1044 * in the context so that we are running with blocked signals when
1045 * the handler returns */
1046 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1047 sigaddset_deferrable(os_context_sigmask_addr(context));
1051 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1053 SAVE_ERRNO(context,void_context);
1054 struct thread *thread = arch_os_get_current_thread();
1055 struct interrupt_data *data = thread->interrupt_data;
1057 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1058 interrupt_handle_now(signal, info, context);
1063 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1064 os_context_t *context)
1066 /* No FP control fixage needed, caller has done that. */
1067 check_blockables_blocked_or_lose();
1068 check_interrupts_enabled_or_lose(context);
1069 (*interrupt_low_level_handlers[signal])(signal, info, context);
1070 /* No Darwin context fixage needed, caller does that. */
1074 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1076 SAVE_ERRNO(context,void_context);
1077 struct thread *thread = arch_os_get_current_thread();
1078 struct interrupt_data *data = thread->interrupt_data;
1080 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1081 signal,info,context))
1082 low_level_interrupt_handle_now(signal, info, context);
1087 #ifdef LISP_FEATURE_SB_THREAD
1089 /* This function must not cons, because that may trigger a GC. */
1091 sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
1093 os_context_t *context = arch_os_get_context(&void_context);
1095 struct thread *thread=arch_os_get_current_thread();
1098 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1099 * pseudo atomic until gc is finally allowed. */
1100 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1101 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1102 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1104 } else if (arch_pseudo_atomic_atomic(context)) {
1105 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1106 arch_set_pseudo_atomic_interrupted(context);
1107 maybe_save_gc_mask_and_block_deferrables
1108 (os_context_sigmask_addr(context));
1109 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1113 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1115 /* Not PA and GC not inhibited -- we can stop now. */
1117 /* need the context stored so it can have registers scavenged */
1118 fake_foreign_function_call(context);
1120 /* Block everything. */
1122 thread_sigmask(SIG_BLOCK,&ss,0);
1124 /* Not pending anymore. */
1125 SetSymbolValue(GC_PENDING,NIL,thread);
1126 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1128 if(thread_state(thread)!=STATE_RUNNING) {
1129 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1130 fixnum_value(thread->state));
1133 set_thread_state(thread,STATE_SUSPENDED);
1134 FSHOW_SIGNAL((stderr,"suspended\n"));
1136 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1137 FSHOW_SIGNAL((stderr,"resumed\n"));
1139 if(thread_state(thread)!=STATE_RUNNING) {
1140 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1141 fixnum_value(thread_state(thread)));
1144 undo_fake_foreign_function_call(context);
1150 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1152 SAVE_ERRNO(context,void_context);
1153 #ifndef LISP_FEATURE_WIN32
1154 if ((signal == SIGILL) || (signal == SIGBUS)
1155 #ifndef LISP_FEATURE_LINUX
1156 || (signal == SIGEMT)
1159 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1161 interrupt_handle_now(signal, info, context);
1165 /* manipulate the signal context and stack such that when the handler
1166 * returns, it will call function instead of whatever it was doing
1170 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1171 extern int *context_eflags_addr(os_context_t *context);
1174 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1175 extern void post_signal_tramp(void);
1176 extern void call_into_lisp_tramp(void);
1178 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1180 check_gc_signals_unblocked_in_sigset_or_lose
1181 (os_context_sigmask_addr(context));
1182 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1183 void * fun=native_pointer(function);
1184 void *code = &(((struct simple_fun *) fun)->code);
1187 /* Build a stack frame showing `interrupted' so that the
1188 * user's backtrace makes (as much) sense (as usual) */
1190 /* fp state is saved and restored by call_into_lisp */
1191 /* FIXME: errno is not restored, but since current uses of this
1192 * function only call Lisp code that signals an error, it's not
1193 * much of a problem. In other words, running out of the control
1194 * stack between a syscall and (GET-ERRNO) may clobber errno if
1195 * something fails during signalling or in the handler. But I
1196 * can't see what can go wrong as long as there is no CONTINUE
1197 * like restart on them. */
1198 #ifdef LISP_FEATURE_X86
1199 /* Suppose the existence of some function that saved all
1200 * registers, called call_into_lisp, then restored GP registers and
1201 * returned. It would look something like this:
1209 pushl {address of function to call}
1210 call 0x8058db0 <call_into_lisp>
1217 * What we do here is set up the stack that call_into_lisp would
1218 * expect to see if it had been called by this code, and frob the
1219 * signal context so that signal return goes directly to call_into_lisp,
1220 * and when that function (and the lisp function it invoked) returns,
1221 * it returns to the second half of this imaginary function which
1222 * restores all registers and returns to C
1224 * For this to work, the latter part of the imaginary function
1225 * must obviously exist in reality. That would be post_signal_tramp
1228 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1230 #if defined(LISP_FEATURE_DARWIN)
1231 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1233 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1234 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1236 /* 1. os_validate (malloc/mmap) register_save_block
1237 * 2. copy register state into register_save_block
1238 * 3. put a pointer to register_save_block in a register in the context
1239 * 4. set the context's EIP to point to a trampoline which:
1240 * a. builds the fake stack frame from the block
1241 * b. frees the block
1242 * c. calls the function
1245 *register_save_area = *os_context_pc_addr(context);
1246 *(register_save_area + 1) = function;
1247 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1248 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1249 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1250 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1251 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1252 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1253 *(register_save_area + 8) = *context_eflags_addr(context);
1255 *os_context_pc_addr(context) =
1256 (os_context_register_t) call_into_lisp_tramp;
1257 *os_context_register_addr(context,reg_ECX) =
1258 (os_context_register_t) register_save_area;
1261 /* return address for call_into_lisp: */
1262 *(sp-15) = (u32)post_signal_tramp;
1263 *(sp-14) = function; /* args for call_into_lisp : function*/
1264 *(sp-13) = 0; /* arg array */
1265 *(sp-12) = 0; /* no. args */
1266 /* this order matches that used in POPAD */
1267 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1268 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1270 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1271 /* POPAD ignores the value of ESP: */
1273 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1275 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1276 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1277 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1278 *(sp-3)=*context_eflags_addr(context);
1279 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1280 *(sp-1)=*os_context_pc_addr(context);
1284 #elif defined(LISP_FEATURE_X86_64)
1285 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1287 /* return address for call_into_lisp: */
1288 *(sp-18) = (u64)post_signal_tramp;
1290 *(sp-17)=*os_context_register_addr(context,reg_R15);
1291 *(sp-16)=*os_context_register_addr(context,reg_R14);
1292 *(sp-15)=*os_context_register_addr(context,reg_R13);
1293 *(sp-14)=*os_context_register_addr(context,reg_R12);
1294 *(sp-13)=*os_context_register_addr(context,reg_R11);
1295 *(sp-12)=*os_context_register_addr(context,reg_R10);
1296 *(sp-11)=*os_context_register_addr(context,reg_R9);
1297 *(sp-10)=*os_context_register_addr(context,reg_R8);
1298 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1299 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1300 /* skip RBP and RSP */
1301 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1302 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1303 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1304 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1305 *(sp-3)=*context_eflags_addr(context);
1306 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1307 *(sp-1)=*os_context_pc_addr(context);
1309 *os_context_register_addr(context,reg_RDI) =
1310 (os_context_register_t)function; /* function */
1311 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1312 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1314 struct thread *th=arch_os_get_current_thread();
1315 build_fake_control_stack_frames(th,context);
1318 #ifdef LISP_FEATURE_X86
1320 #if !defined(LISP_FEATURE_DARWIN)
1321 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1322 *os_context_register_addr(context,reg_ECX) = 0;
1323 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1325 *os_context_register_addr(context,reg_UESP) =
1326 (os_context_register_t)(sp-15);
1328 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1329 #endif /* __NETBSD__ */
1330 #endif /* LISP_FEATURE_DARWIN */
1332 #elif defined(LISP_FEATURE_X86_64)
1333 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1334 *os_context_register_addr(context,reg_RCX) = 0;
1335 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1336 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1338 /* this much of the calling convention is common to all
1340 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1341 *os_context_register_addr(context,reg_NARGS) = 0;
1342 *os_context_register_addr(context,reg_LIP) =
1343 (os_context_register_t)(unsigned long)code;
1344 *os_context_register_addr(context,reg_CFP) =
1345 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1347 #ifdef ARCH_HAS_NPC_REGISTER
1348 *os_context_npc_addr(context) =
1349 4 + *os_context_pc_addr(context);
1351 #ifdef LISP_FEATURE_SPARC
1352 *os_context_register_addr(context,reg_CODE) =
1353 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1355 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1359 /* KLUDGE: Theoretically the approach we use for undefined alien
1360 * variables should work for functions as well, but on PPC/Darwin
1361 * we get bus error at bogus addresses instead, hence this workaround,
1362 * that has the added benefit of automatically discriminating between
1363 * functions and variables.
1366 undefined_alien_function(void)
1368 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1372 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1374 struct thread *th=arch_os_get_current_thread();
1376 /* note the os_context hackery here. When the signal handler returns,
1377 * it won't go back to what it was doing ... */
1378 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1379 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1380 /* We hit the end of the control stack: disable guard page
1381 * protection so the error handler has some headroom, protect the
1382 * previous page so that we can catch returns from the guard page
1383 * and restore it. */
1384 corruption_warning_and_maybe_lose("Control stack exhausted");
1385 protect_control_stack_guard_page(0);
1386 protect_control_stack_return_guard_page(1);
1388 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1389 /* For the unfortunate case, when the control stack is
1390 * exhausted in a signal handler. */
1391 unblock_signals_in_context_and_maybe_warn(context);
1393 arrange_return_to_lisp_function
1394 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1397 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1398 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1399 /* We're returning from the guard page: reprotect it, and
1400 * unprotect this one. This works even if we somehow missed
1401 * the return-guard-page, and hit it on our way to new
1402 * exhaustion instead. */
1403 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1404 protect_control_stack_guard_page(1);
1405 protect_control_stack_return_guard_page(0);
1408 else if (addr >= undefined_alien_address &&
1409 addr < undefined_alien_address + os_vm_page_size) {
1410 arrange_return_to_lisp_function
1411 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1418 * noise to install handlers
1421 #ifndef LISP_FEATURE_WIN32
1422 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1423 * they are blocked, in Linux 2.6 the default handler is invoked
1424 * instead that usually coredumps. One might hastily think that adding
1425 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1426 * the whole sa_mask is ignored and instead of not adding the signal
1427 * in question to the mask. That means if it's not blockable the
1428 * signal must be unblocked at the beginning of signal handlers.
1430 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1431 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1432 * will be unblocked in the sigmask during the signal handler. -- RMK
1435 static volatile int sigaction_nodefer_works = -1;
1437 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1438 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1441 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1443 sigset_t empty, current;
1445 sigemptyset(&empty);
1446 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1447 /* There should be exactly two blocked signals: the two we added
1448 * to sa_mask when setting up the handler. NetBSD doesn't block
1449 * the signal we're handling when SA_NODEFER is set; Linux before
1450 * 2.6.13 or so also doesn't block the other signal when
1451 * SA_NODEFER is set. */
1452 for(i = 1; i < NSIG; i++)
1453 if (sigismember(¤t, i) !=
1454 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1455 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1456 sigaction_nodefer_works = 0;
1458 if (sigaction_nodefer_works == -1)
1459 sigaction_nodefer_works = 1;
1463 see_if_sigaction_nodefer_works(void)
1465 struct sigaction sa, old_sa;
1467 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1468 sa.sa_sigaction = sigaction_nodefer_test_handler;
1469 sigemptyset(&sa.sa_mask);
1470 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1471 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1472 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1473 /* Make sure no signals are blocked. */
1476 sigemptyset(&empty);
1477 thread_sigmask(SIG_SETMASK, &empty, 0);
1479 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1480 while (sigaction_nodefer_works == -1);
1481 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1484 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1485 #undef SA_NODEFER_TEST_KILL_SIGNAL
1488 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1490 SAVE_ERRNO(context,void_context);
1493 sigemptyset(&unblock);
1494 sigaddset(&unblock, signal);
1495 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1496 interrupt_handle_now(signal, info, context);
1501 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1503 SAVE_ERRNO(context,void_context);
1506 sigemptyset(&unblock);
1507 sigaddset(&unblock, signal);
1508 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1509 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1514 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1516 SAVE_ERRNO(context,void_context);
1517 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1522 undoably_install_low_level_interrupt_handler (int signal,
1523 interrupt_handler_t handler)
1525 struct sigaction sa;
1527 if (0 > signal || signal >= NSIG) {
1528 lose("bad signal number %d\n", signal);
1531 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1532 sa.sa_sigaction = handler;
1533 else if (sigismember(&deferrable_sigset,signal))
1534 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1535 /* The use of a trampoline appears to break the
1536 arch_os_get_context() workaround for SPARC/Linux. For now,
1537 don't use the trampoline (and so be vulnerable to the problems
1538 that SA_NODEFER is meant to solve. */
1539 #if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX))
1540 else if (!sigaction_nodefer_works &&
1541 !sigismember(&blockable_sigset, signal))
1542 sa.sa_sigaction = low_level_unblock_me_trampoline;
1545 sa.sa_sigaction = low_level_handle_now_handler;
1547 sigcopyset(&sa.sa_mask, &blockable_sigset);
1548 sa.sa_flags = SA_SIGINFO | SA_RESTART
1549 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1550 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1551 if((signal==SIG_MEMORY_FAULT))
1552 sa.sa_flags |= SA_ONSTACK;
1555 sigaction(signal, &sa, NULL);
1556 interrupt_low_level_handlers[signal] =
1557 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1561 /* This is called from Lisp. */
1563 install_handler(int signal, void handler(int, siginfo_t*, void*))
1565 #ifndef LISP_FEATURE_WIN32
1566 struct sigaction sa;
1568 union interrupt_handler oldhandler;
1570 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1573 sigaddset(&new, signal);
1574 thread_sigmask(SIG_BLOCK, &new, &old);
1576 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1577 (unsigned int)interrupt_low_level_handlers[signal]));
1578 if (interrupt_low_level_handlers[signal]==0) {
1579 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1580 ARE_SAME_HANDLER(handler, SIG_IGN))
1581 sa.sa_sigaction = handler;
1582 else if (sigismember(&deferrable_sigset, signal))
1583 sa.sa_sigaction = maybe_now_maybe_later;
1584 else if (!sigaction_nodefer_works &&
1585 !sigismember(&blockable_sigset, signal))
1586 sa.sa_sigaction = unblock_me_trampoline;
1588 sa.sa_sigaction = interrupt_handle_now_handler;
1590 sigcopyset(&sa.sa_mask, &blockable_sigset);
1591 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1592 (sigaction_nodefer_works ? SA_NODEFER : 0);
1593 sigaction(signal, &sa, NULL);
1596 oldhandler = interrupt_handlers[signal];
1597 interrupt_handlers[signal].c = handler;
1599 thread_sigmask(SIG_SETMASK, &old, 0);
1601 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1603 return (unsigned long)oldhandler.lisp;
1605 /* Probably-wrong Win32 hack */
1610 /* This must not go through lisp as it's allowed anytime, even when on
1613 sigabrt_handler(int signal, siginfo_t *info, void *void_context)
1615 lose("SIGABRT received.\n");
1619 interrupt_init(void)
1621 #ifndef LISP_FEATURE_WIN32
1623 SHOW("entering interrupt_init()");
1624 see_if_sigaction_nodefer_works();
1625 sigemptyset(&deferrable_sigset);
1626 sigemptyset(&blockable_sigset);
1627 sigemptyset(&gc_sigset);
1628 sigaddset_deferrable(&deferrable_sigset);
1629 sigaddset_blockable(&blockable_sigset);
1630 sigaddset_gc(&gc_sigset);
1632 /* Set up high level handler information. */
1633 for (i = 0; i < NSIG; i++) {
1634 interrupt_handlers[i].c =
1635 /* (The cast here blasts away the distinction between
1636 * SA_SIGACTION-style three-argument handlers and
1637 * signal(..)-style one-argument handlers, which is OK
1638 * because it works to call the 1-argument form where the
1639 * 3-argument form is expected.) */
1640 (void (*)(int, siginfo_t*, void*))SIG_DFL;
1642 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1643 SHOW("returning from interrupt_init()");
1647 #ifndef LISP_FEATURE_WIN32
1649 siginfo_code(siginfo_t *info)
1651 return info->si_code;
1653 os_vm_address_t current_memory_fault_address;
1656 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1658 /* FIXME: This is lossy: if we get another memory fault (eg. from
1659 * another thread) before lisp has read this, we lose the information.
1660 * However, since this is mostly informative, we'll live with that for
1661 * now -- some address is better then no address in this case.
1663 current_memory_fault_address = addr;
1664 /* To allow debugging memory faults in signal handlers and such. */
1665 corruption_warning_and_maybe_lose("Memory fault");
1666 unblock_signals_in_context_and_maybe_warn(context);
1667 arrange_return_to_lisp_function(context,
1668 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1673 unhandled_trap_error(os_context_t *context)
1675 lispobj context_sap;
1676 fake_foreign_function_call(context);
1677 unblock_gc_signals();
1678 context_sap = alloc_sap(context);
1679 #ifndef LISP_FEATURE_WIN32
1680 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1682 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1683 lose("UNHANDLED-TRAP-ERROR fell through");
1686 /* Common logic for trapping instructions. How we actually handle each
1687 * case is highly architecture dependent, but the overall shape is
1690 handle_trap(os_context_t *context, int trap)
1693 case trap_PendingInterrupt:
1694 FSHOW((stderr, "/<trap pending interrupt>\n"));
1695 arch_skip_instruction(context);
1696 interrupt_handle_pending(context);
1700 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1701 interrupt_internal_error(context, trap==trap_Cerror);
1703 case trap_Breakpoint:
1704 arch_handle_breakpoint(context);
1706 case trap_FunEndBreakpoint:
1707 arch_handle_fun_end_breakpoint(context);
1709 #ifdef trap_AfterBreakpoint
1710 case trap_AfterBreakpoint:
1711 arch_handle_after_breakpoint(context);
1714 #ifdef trap_SingleStepAround
1715 case trap_SingleStepAround:
1716 case trap_SingleStepBefore:
1717 arch_handle_single_step_trap(context, trap);
1721 fake_foreign_function_call(context);
1722 lose("%%PRIMITIVE HALT called; the party is over.\n");
1724 unhandled_trap_error(context);