2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 /* These are to be used in signal handlers. Currently all handlers are
74 * interrupt_handle_now_handler
75 * maybe_now_maybe_later
76 * unblock_me_trampoline
77 * low_level_handle_now_handler
78 * low_level_maybe_now_maybe_later
79 * low_level_unblock_me_trampoline
81 #define SAVE_ERRNO() \
83 int _saved_errno = errno; \
86 #define RESTORE_ERRNO \
88 errno = _saved_errno; \
91 static void run_deferred_handler(struct interrupt_data *data, void *v_context);
92 #ifndef LISP_FEATURE_WIN32
93 static void store_signal_data_for_later (struct interrupt_data *data,
94 void *handler, int signal,
96 os_context_t *context);
99 fill_current_sigmask(sigset_t *sigset)
101 /* Get the current sigmask, by blocking the empty set. */
104 thread_sigmask(SIG_BLOCK, &empty, sigset);
108 sigaddset_deferrable(sigset_t *s)
110 sigaddset(s, SIGHUP);
111 sigaddset(s, SIGINT);
112 sigaddset(s, SIGTERM);
113 sigaddset(s, SIGQUIT);
114 sigaddset(s, SIGPIPE);
115 sigaddset(s, SIGALRM);
116 sigaddset(s, SIGURG);
117 sigaddset(s, SIGTSTP);
118 sigaddset(s, SIGCHLD);
120 #ifndef LISP_FEATURE_HPUX
121 sigaddset(s, SIGXCPU);
122 sigaddset(s, SIGXFSZ);
124 sigaddset(s, SIGVTALRM);
125 sigaddset(s, SIGPROF);
126 sigaddset(s, SIGWINCH);
130 sigdelset_deferrable(sigset_t *s)
132 sigdelset(s, SIGHUP);
133 sigdelset(s, SIGINT);
134 sigdelset(s, SIGQUIT);
135 sigdelset(s, SIGPIPE);
136 sigdelset(s, SIGALRM);
137 sigdelset(s, SIGURG);
138 sigdelset(s, SIGTSTP);
139 sigdelset(s, SIGCHLD);
141 #ifndef LISP_FEATURE_HPUX
142 sigdelset(s, SIGXCPU);
143 sigdelset(s, SIGXFSZ);
145 sigdelset(s, SIGVTALRM);
146 sigdelset(s, SIGPROF);
147 sigdelset(s, SIGWINCH);
151 sigaddset_blockable(sigset_t *sigset)
153 sigaddset_deferrable(sigset);
154 sigaddset_gc(sigset);
158 sigaddset_gc(sigset_t *sigset)
160 #ifdef LISP_FEATURE_SB_THREAD
161 sigaddset(sigset,SIG_STOP_FOR_GC);
166 sigdelset_gc(sigset_t *sigset)
168 #ifdef LISP_FEATURE_SB_THREAD
169 sigdelset(sigset,SIG_STOP_FOR_GC);
173 /* initialized in interrupt_init */
174 sigset_t deferrable_sigset;
175 sigset_t blockable_sigset;
180 deferrables_blocked_in_sigset_p(sigset_t *sigset)
182 #if !defined(LISP_FEATURE_WIN32)
184 for(i = 1; i < NSIG; i++) {
185 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
193 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
195 #if !defined(LISP_FEATURE_WIN32)
197 for(i = 1; i < NSIG; i++) {
198 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
199 lose("deferrable signal %d blocked\n",i);
205 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
207 #if !defined(LISP_FEATURE_WIN32)
209 for(i = 1; i < NSIG; i++) {
210 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
211 lose("deferrable signal %d not blocked\n",i);
217 check_deferrables_unblocked_or_lose(void)
219 #if !defined(LISP_FEATURE_WIN32)
221 fill_current_sigmask(¤t);
222 check_deferrables_unblocked_in_sigset_or_lose(¤t);
227 check_deferrables_blocked_or_lose(void)
229 #if !defined(LISP_FEATURE_WIN32)
231 fill_current_sigmask(¤t);
232 check_deferrables_blocked_in_sigset_or_lose(¤t);
237 check_blockables_blocked_or_lose(void)
239 #if !defined(LISP_FEATURE_WIN32)
242 fill_current_sigmask(¤t);
243 for(i = 1; i < NSIG; i++) {
244 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
245 lose("blockable signal %d not blocked\n",i);
251 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
253 #if !defined(LISP_FEATURE_WIN32)
255 for(i = 1; i < NSIG; i++) {
256 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
257 lose("gc signal %d blocked\n",i);
263 check_gc_signals_unblocked_or_lose(void)
265 #if !defined(LISP_FEATURE_WIN32)
267 fill_current_sigmask(¤t);
268 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
273 check_interrupts_enabled_or_lose(os_context_t *context)
275 struct thread *thread=arch_os_get_current_thread();
276 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
277 lose("interrupts not enabled\n");
278 if (arch_pseudo_atomic_atomic(context))
279 lose ("in pseudo atomic section\n");
282 /* Save sigset (or the current sigmask if 0) if there is no pending
283 * handler, because that means that deferabbles are already blocked.
284 * The purpose is to avoid losing the pending gc signal if a
285 * deferrable interrupt async unwinds between clearing the pseudo
286 * atomic and trapping to GC.*/
288 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
290 struct thread *thread = arch_os_get_current_thread();
291 struct interrupt_data *data = thread->interrupt_data;
293 /* Obviously, this function is called when signals may not be
294 * blocked. Let's make sure we are not interrupted. */
295 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
296 #ifndef LISP_FEATURE_SB_THREAD
297 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
299 if (data->gc_blocked_deferrables)
300 lose("gc_blocked_deferrables already true\n");
302 if ((!data->pending_handler) &&
303 (!data->gc_blocked_deferrables)) {
304 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
305 data->gc_blocked_deferrables = 1;
307 /* This is the sigmask of some context. */
308 sigcopyset(&data->pending_mask, sigset);
309 sigaddset_deferrable(sigset);
310 thread_sigmask(SIG_SETMASK,&oldset,0);
313 /* Operating on the current sigmask. Save oldset and
314 * unblock gc signals. In the end, this is equivalent to
315 * blocking the deferrables. */
316 sigcopyset(&data->pending_mask, &oldset);
317 unblock_gc_signals();
321 thread_sigmask(SIG_SETMASK,&oldset,0);
324 /* Are we leaving WITH-GCING and already running with interrupts
325 * enabled, without the protection of *GC-INHIBIT* T and there is gc
326 * (or stop for gc) pending, but we haven't trapped yet? */
328 in_leaving_without_gcing_race_p(struct thread *thread)
330 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
331 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
332 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
333 ((SymbolValue(GC_PENDING,thread) != NIL)
334 #if defined(LISP_FEATURE_SB_THREAD)
335 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
340 /* Check our baroque invariants. */
342 check_interrupt_context_or_lose(os_context_t *context)
344 struct thread *thread = arch_os_get_current_thread();
345 struct interrupt_data *data = thread->interrupt_data;
346 int interrupt_deferred_p = (data->pending_handler != 0);
347 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
348 sigset_t *sigset = os_context_sigmask_addr(context);
349 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
350 * handle_allocation_trap. */
351 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
352 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
353 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
354 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
355 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
356 int in_race_p = in_leaving_without_gcing_race_p(thread);
357 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
358 * section and trapping, a SIG_STOP_FOR_GC would see the next
359 * check fail, for this reason sig_stop_for_gc handler does not
360 * call this function. */
361 if (interrupt_deferred_p) {
362 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
363 lose("Stray deferred interrupt.\n");
366 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
367 lose("GC_PENDING, but why?\n");
368 #if defined(LISP_FEATURE_SB_THREAD)
370 int stop_for_gc_pending =
371 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
372 if (stop_for_gc_pending)
373 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
374 lose("STOP_FOR_GC_PENDING, but why?\n");
378 if (interrupt_pending && !interrupt_deferred_p)
379 lose("INTERRUPT_PENDING but not pending handler.\n");
380 if ((data->gc_blocked_deferrables) && interrupt_pending)
381 lose("gc_blocked_deferrables and interrupt pending\n.");
382 if (data->gc_blocked_deferrables)
383 check_deferrables_blocked_in_sigset_or_lose(sigset);
384 if (interrupt_pending || interrupt_deferred_p)
385 check_deferrables_blocked_in_sigset_or_lose(sigset);
387 check_deferrables_unblocked_in_sigset_or_lose(sigset);
388 /* If deferrables are unblocked then we are open to signals
389 * that run lisp code. */
390 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
394 /* When we catch an internal error, should we pass it back to Lisp to
395 * be handled in a high-level way? (Early in cold init, the answer is
396 * 'no', because Lisp is still too brain-dead to handle anything.
397 * After sufficient initialization has been completed, the answer
399 boolean internal_errors_enabled = 0;
401 #ifndef LISP_FEATURE_WIN32
402 static void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*);
404 union interrupt_handler interrupt_handlers[NSIG];
407 block_blockable_signals(void)
409 #ifndef LISP_FEATURE_WIN32
410 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
415 block_deferrable_signals(void)
417 #ifndef LISP_FEATURE_WIN32
418 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
423 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
425 #ifndef LISP_FEATURE_WIN32
426 if (interrupt_handler_pending_p())
427 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
428 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
429 sigdelset_deferrable(sigset);
434 unblock_deferrable_signals(void)
436 #ifndef LISP_FEATURE_WIN32
437 if (interrupt_handler_pending_p())
438 lose("unblock_deferrable_signals: losing proposition\n");
439 check_gc_signals_unblocked_or_lose();
440 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
445 unblock_gc_signals(void)
447 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
448 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
453 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
455 #ifndef LISP_FEATURE_WIN32
457 sigset_t *sigset=os_context_sigmask_addr(context);
458 for(i = 1; i < NSIG; i++) {
459 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
462 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
463 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
464 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
469 sigdelset_gc(sigset);
470 if (!interrupt_handler_pending_p()) {
471 unblock_deferrable_signals_in_sigset(sigset);
478 * utility routines used by various signal handlers
482 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
484 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
488 /* Build a fake stack frame or frames */
490 current_control_frame_pointer =
491 (lispobj *)(unsigned long)
492 (*os_context_register_addr(context, reg_CSP));
493 if ((lispobj *)(unsigned long)
494 (*os_context_register_addr(context, reg_CFP))
495 == current_control_frame_pointer) {
496 /* There is a small window during call where the callee's
497 * frame isn't built yet. */
498 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
499 == FUN_POINTER_LOWTAG) {
500 /* We have called, but not built the new frame, so
501 * build it for them. */
502 current_control_frame_pointer[0] =
503 *os_context_register_addr(context, reg_OCFP);
504 current_control_frame_pointer[1] =
505 *os_context_register_addr(context, reg_LRA);
506 current_control_frame_pointer += 8;
507 /* Build our frame on top of it. */
508 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
511 /* We haven't yet called, build our frame as if the
512 * partial frame wasn't there. */
513 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
516 /* We can't tell whether we are still in the caller if it had to
517 * allocate a stack frame due to stack arguments. */
518 /* This observation provoked some past CMUCL maintainer to ask
519 * "Can anything strange happen during return?" */
522 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
525 current_control_stack_pointer = current_control_frame_pointer + 8;
527 current_control_frame_pointer[0] = oldcont;
528 current_control_frame_pointer[1] = NIL;
529 current_control_frame_pointer[2] =
530 (lispobj)(*os_context_register_addr(context, reg_CODE));
534 /* Stores the context for gc to scavange and builds fake stack
537 fake_foreign_function_call(os_context_t *context)
540 struct thread *thread=arch_os_get_current_thread();
542 /* context_index incrementing must not be interrupted */
543 check_blockables_blocked_or_lose();
545 /* Get current Lisp state from context. */
547 dynamic_space_free_pointer =
548 (lispobj *)(unsigned long)
549 (*os_context_register_addr(context, reg_ALLOC));
550 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
551 /* dynamic_space_free_pointer); */
552 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
553 if ((long)dynamic_space_free_pointer & 1) {
554 lose("dead in fake_foreign_function_call, context = %x\n", context);
557 /* why doesnt PPC and SPARC do something like this: */
558 #if defined(LISP_FEATURE_HPPA)
559 if ((long)dynamic_space_free_pointer & 4) {
560 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
565 current_binding_stack_pointer =
566 (lispobj *)(unsigned long)
567 (*os_context_register_addr(context, reg_BSP));
570 build_fake_control_stack_frames(thread,context);
572 /* Do dynamic binding of the active interrupt context index
573 * and save the context in the context array. */
575 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
577 if (context_index >= MAX_INTERRUPTS) {
578 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
581 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
582 make_fixnum(context_index + 1),thread);
584 thread->interrupt_contexts[context_index] = context;
586 #ifdef FOREIGN_FUNCTION_CALL_FLAG
587 foreign_function_call_active = 1;
591 /* blocks all blockable signals. If you are calling from a signal handler,
592 * the usual signal mask will be restored from the context when the handler
593 * finishes. Otherwise, be careful */
595 undo_fake_foreign_function_call(os_context_t *context)
597 struct thread *thread=arch_os_get_current_thread();
598 /* Block all blockable signals. */
599 block_blockable_signals();
601 #ifdef FOREIGN_FUNCTION_CALL_FLAG
602 foreign_function_call_active = 0;
605 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
609 /* Put the dynamic space free pointer back into the context. */
610 *os_context_register_addr(context, reg_ALLOC) =
611 (unsigned long) dynamic_space_free_pointer
612 | (*os_context_register_addr(context, reg_ALLOC)
615 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
617 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
622 /* a handler for the signal caused by execution of a trap opcode
623 * signalling an internal error */
625 interrupt_internal_error(os_context_t *context, boolean continuable)
629 fake_foreign_function_call(context);
631 if (!internal_errors_enabled) {
632 describe_internal_error(context);
633 /* There's no good way to recover from an internal error
634 * before the Lisp error handling mechanism is set up. */
635 lose("internal error too early in init, can't recover\n");
638 /* Allocate the SAP object while the interrupts are still
640 unblock_gc_signals();
641 context_sap = alloc_sap(context);
643 #ifndef LISP_FEATURE_WIN32
644 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
647 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
648 /* Workaround for blocked SIGTRAP. */
651 sigemptyset(&newset);
652 sigaddset(&newset, SIGTRAP);
653 thread_sigmask(SIG_UNBLOCK, &newset, 0);
657 SHOW("in interrupt_internal_error");
659 /* Display some rudimentary debugging information about the
660 * error, so that even if the Lisp error handler gets badly
661 * confused, we have a chance to determine what's going on. */
662 describe_internal_error(context);
664 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
665 continuable ? T : NIL);
667 undo_fake_foreign_function_call(context); /* blocks signals again */
669 arch_skip_instruction(context);
673 interrupt_handler_pending_p(void)
675 struct thread *thread = arch_os_get_current_thread();
676 struct interrupt_data *data = thread->interrupt_data;
677 return (data->pending_handler != 0);
681 interrupt_handle_pending(os_context_t *context)
683 /* There are three ways we can get here. First, if an interrupt
684 * occurs within pseudo-atomic, it will be deferred, and we'll
685 * trap to here at the end of the pseudo-atomic block. Second, if
686 * the GC (in alloc()) decides that a GC is required, it will set
687 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
688 * and alloc() is always called from within pseudo-atomic, and
689 * thus we end up here again. Third, when calling GC-ON or at the
690 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
691 * here if there is a pending GC. Fourth, ahem, at the end of
692 * WITHOUT-INTERRUPTS (bar complications with nesting). */
694 /* Win32 only needs to handle the GC cases (for now?) */
696 struct thread *thread = arch_os_get_current_thread();
697 struct interrupt_data *data = thread->interrupt_data;
699 if (arch_pseudo_atomic_atomic(context)) {
700 lose("Handling pending interrupt in pseduo atomic.");
703 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
705 check_blockables_blocked_or_lose();
707 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
708 * handler, then the pending mask was saved and
709 * gc_blocked_deferrables set. Hence, there can be no pending
710 * handler and it's safe to restore the pending mask.
712 * Note, that if gc_blocked_deferrables is false we may still have
713 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
714 * pseudo atomic was interrupt be a deferrable first. */
715 if (data->gc_blocked_deferrables) {
716 if (data->pending_handler)
717 lose("GC blocked deferrables but still got a pending handler.");
718 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
719 lose("GC blocked deferrables while GC is inhibited.");
720 /* Restore the saved signal mask from the original signal (the
721 * one that interrupted us during the critical section) into
722 * the os_context for the signal we're currently in the
723 * handler for. This should ensure that when we return from
724 * the handler the blocked signals are unblocked. */
725 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
726 data->gc_blocked_deferrables = 0;
729 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
730 void *original_pending_handler = data->pending_handler;
732 #ifdef LISP_FEATURE_SB_THREAD
733 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
734 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
735 * the signal handler if it actually stops us. */
736 arch_clear_pseudo_atomic_interrupted(context);
737 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
740 /* Test for T and not for != NIL since the value :IN-PROGRESS
741 * is used in SUB-GC as part of the mechanism to supress
743 if (SymbolValue(GC_PENDING,thread) == T) {
745 /* Two reasons for doing this. First, if there is a
746 * pending handler we don't want to run. Second, we are
747 * going to clear pseudo atomic interrupted to avoid
748 * spurious trapping on every allocation in SUB_GC and
749 * having a pending handler with interrupts enabled and
750 * without pseudo atomic interrupted breaks an
752 if (data->pending_handler) {
753 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
754 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
757 arch_clear_pseudo_atomic_interrupted(context);
759 /* GC_PENDING is cleared in SUB-GC, or if another thread
760 * is doing a gc already we will get a SIG_STOP_FOR_GC and
761 * that will clear it.
763 * If there is a pending handler or gc was triggerred in a
764 * signal handler then maybe_gc won't run POST_GC and will
765 * return normally. */
766 if (!maybe_gc(context))
767 lose("GC not inhibited but maybe_gc did not GC.");
769 if (data->pending_handler) {
773 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
774 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
775 * GC-PENDING is not NIL then we cannot trap on pseudo
776 * atomic due to GC (see if(GC_PENDING) logic in
777 * cheneygc.c an gengcgc.c), plus there is a outer
778 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
780 lose("Trapping to run pending handler while GC in progress.");
783 check_blockables_blocked_or_lose();
785 /* No GC shall be lost. If SUB_GC triggers another GC then
786 * that should be handled on the spot. */
787 if (SymbolValue(GC_PENDING,thread) != NIL)
788 lose("GC_PENDING after doing gc.");
789 #ifdef LISP_FEATURE_SB_THREAD
790 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
791 lose("STOP_FOR_GC_PENDING after doing gc.");
793 /* Check two things. First, that gc does not clobber a handler
794 * that's already pending. Second, that there is no interrupt
795 * lossage: if original_pending_handler was NULL then even if
796 * an interrupt arrived during GC (POST-GC, really) it was
798 if (original_pending_handler != data->pending_handler)
799 lose("pending handler changed in gc: %x -> %d.",
800 original_pending_handler, data->pending_handler);
803 #ifndef LISP_FEATURE_WIN32
804 /* There may be no pending handler, because it was only a gc that
805 * had to be executed or because Lisp is a bit too eager to call
806 * DO-PENDING-INTERRUPT. */
807 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
808 (data->pending_handler)) {
809 /* No matter how we ended up here, clear both
810 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
811 * because we checked above that there is no GC pending. */
812 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
813 arch_clear_pseudo_atomic_interrupted(context);
814 /* Restore the sigmask in the context. */
815 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
816 /* This will break on sparc linux: the deferred handler really
817 * wants to be called with a void_context */
818 run_deferred_handler(data,(void *)context);
820 /* It is possible that the end of this function was reached
821 * without never actually doing anything, the tests in Lisp for
822 * when to call receive-pending-interrupt are not exact. */
823 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
828 * the two main signal handlers:
829 * interrupt_handle_now(..)
830 * maybe_now_maybe_later(..)
832 * to which we have added interrupt_handle_now_handler(..). Why?
833 * Well, mostly because the SPARC/Linux platform doesn't quite do
834 * signals the way we want them done. The third argument in the
835 * handler isn't filled in by the kernel properly, so we fix it up
836 * ourselves in the arch_os_get_context(..) function; however, we only
837 * want to do this when we first hit the handler, and not when
838 * interrupt_handle_now(..) is being called from some other handler
839 * (when the fixup will already have been done). -- CSR, 2002-07-23
843 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
845 #ifdef FOREIGN_FUNCTION_CALL_FLAG
846 boolean were_in_lisp;
848 union interrupt_handler handler;
850 check_blockables_blocked_or_lose();
852 #ifndef LISP_FEATURE_WIN32
853 if (sigismember(&deferrable_sigset,signal))
854 check_interrupts_enabled_or_lose(context);
857 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
858 /* Under Linux on some architectures, we appear to have to restore
859 the FPU control word from the context, as after the signal is
860 delivered we appear to have a null FPU control word. */
861 os_restore_fp_control(context);
864 handler = interrupt_handlers[signal];
866 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
870 #ifdef FOREIGN_FUNCTION_CALL_FLAG
871 were_in_lisp = !foreign_function_call_active;
875 fake_foreign_function_call(context);
878 FSHOW_SIGNAL((stderr,
879 "/entering interrupt_handle_now(%d, info, context)\n",
882 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
884 /* This can happen if someone tries to ignore or default one
885 * of the signals we need for runtime support, and the runtime
886 * support decides to pass on it. */
887 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
889 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
890 /* Once we've decided what to do about contexts in a
891 * return-elsewhere world (the original context will no longer
892 * be available; should we copy it or was nobody using it anyway?)
893 * then we should convert this to return-elsewhere */
895 /* CMUCL comment said "Allocate the SAPs while the interrupts
896 * are still disabled.". I (dan, 2003.08.21) assume this is
897 * because we're not in pseudoatomic and allocation shouldn't
898 * be interrupted. In which case it's no longer an issue as
899 * all our allocation from C now goes through a PA wrapper,
900 * but still, doesn't hurt.
902 * Yeah, but non-gencgc platforms don't really wrap allocation
903 * in PA. MG - 2005-08-29 */
905 lispobj info_sap, context_sap;
906 /* Leave deferrable signals blocked, the handler itself will
907 * allow signals again when it sees fit. */
908 unblock_gc_signals();
909 context_sap = alloc_sap(context);
910 info_sap = alloc_sap(info);
912 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
914 funcall3(handler.lisp,
919 /* This cannot happen in sane circumstances. */
921 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
923 #ifndef LISP_FEATURE_WIN32
924 /* Allow signals again. */
925 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
927 (*handler.c)(signal, info, context);
930 #ifdef FOREIGN_FUNCTION_CALL_FLAG
934 undo_fake_foreign_function_call(context); /* block signals again */
937 FSHOW_SIGNAL((stderr,
938 "/returning from interrupt_handle_now(%d, info, context)\n",
942 /* This is called at the end of a critical section if the indications
943 * are that some signal was deferred during the section. Note that as
944 * far as C or the kernel is concerned we dealt with the signal
945 * already; we're just doing the Lisp-level processing now that we
948 run_deferred_handler(struct interrupt_data *data, void *v_context)
950 /* The pending_handler may enable interrupts and then another
951 * interrupt may hit, overwrite interrupt_data, so reset the
952 * pending handler before calling it. Trust the handler to finish
953 * with the siginfo before enabling interrupts. */
954 void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
956 data->pending_handler=0;
957 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
958 (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
961 #ifndef LISP_FEATURE_WIN32
963 maybe_defer_handler(void *handler, struct interrupt_data *data,
964 int signal, siginfo_t *info, os_context_t *context)
966 struct thread *thread=arch_os_get_current_thread();
968 check_blockables_blocked_or_lose();
970 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
971 lose("interrupt already pending\n");
972 if (thread->interrupt_data->pending_handler)
973 lose("there is a pending handler already (PA)\n");
974 if (data->gc_blocked_deferrables)
975 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
976 check_interrupt_context_or_lose(context);
977 /* If interrupts are disabled then INTERRUPT_PENDING is set and
978 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
979 * atomic section inside a WITHOUT-INTERRUPTS.
981 * Also, if in_leaving_without_gcing_race_p then
982 * interrupt_handle_pending is going to be called soon, so
983 * stashing the signal away is safe.
985 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
986 in_leaving_without_gcing_race_p(thread)) {
987 store_signal_data_for_later(data,handler,signal,info,context);
988 SetSymbolValue(INTERRUPT_PENDING, T,thread);
989 FSHOW_SIGNAL((stderr,
990 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
991 (unsigned int)handler,signal,
992 in_leaving_without_gcing_race_p(thread)));
993 check_interrupt_context_or_lose(context);
996 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
997 * actually use its argument for anything on x86, so this branch
998 * may succeed even when context is null (gencgc alloc()) */
999 if (arch_pseudo_atomic_atomic(context)) {
1000 store_signal_data_for_later(data,handler,signal,info,context);
1001 arch_set_pseudo_atomic_interrupted(context);
1002 FSHOW_SIGNAL((stderr,
1003 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
1004 (unsigned int)handler,signal));
1005 check_interrupt_context_or_lose(context);
1008 FSHOW_SIGNAL((stderr,
1009 "/maybe_defer_handler(%x,%d): not deferred\n",
1010 (unsigned int)handler,signal));
1015 store_signal_data_for_later (struct interrupt_data *data, void *handler,
1017 siginfo_t *info, os_context_t *context)
1019 if (data->pending_handler)
1020 lose("tried to overwrite pending interrupt handler %x with %x\n",
1021 data->pending_handler, handler);
1023 lose("tried to defer null interrupt handler\n");
1024 data->pending_handler = handler;
1025 data->pending_signal = signal;
1027 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1029 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1033 lose("Null context");
1035 /* the signal mask in the context (from before we were
1036 * interrupted) is copied to be restored when run_deferred_handler
1037 * happens. Then the usually-blocked signals are added to the mask
1038 * in the context so that we are running with blocked signals when
1039 * the handler returns */
1040 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1041 sigaddset_deferrable(os_context_sigmask_addr(context));
1045 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1048 os_context_t *context = arch_os_get_context(&void_context);
1049 struct thread *thread = arch_os_get_current_thread();
1050 struct interrupt_data *data = thread->interrupt_data;
1052 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1053 os_restore_fp_control(context);
1056 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1057 interrupt_handle_now(signal, info, context);
1062 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1063 os_context_t *context)
1065 /* No FP control fixage needed, caller has done that. */
1066 check_blockables_blocked_or_lose();
1067 check_interrupts_enabled_or_lose(context);
1068 (*interrupt_low_level_handlers[signal])(signal, info, context);
1069 /* No Darwin context fixage needed, caller does that. */
1073 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1076 os_context_t *context = arch_os_get_context(&void_context);
1077 struct thread *thread = arch_os_get_current_thread();
1078 struct interrupt_data *data = thread->interrupt_data;
1080 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1081 os_restore_fp_control(context);
1084 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1085 signal,info,context))
1086 low_level_interrupt_handle_now(signal, info, context);
1091 #ifdef LISP_FEATURE_SB_THREAD
1093 /* This function must not cons, because that may trigger a GC. */
1095 sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
1097 os_context_t *context = arch_os_get_context(&void_context);
1099 struct thread *thread=arch_os_get_current_thread();
1102 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1103 * pseudo atomic until gc is finally allowed. */
1104 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1105 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1106 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1108 } else if (arch_pseudo_atomic_atomic(context)) {
1109 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1110 arch_set_pseudo_atomic_interrupted(context);
1111 maybe_save_gc_mask_and_block_deferrables
1112 (os_context_sigmask_addr(context));
1113 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1117 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1119 /* Not PA and GC not inhibited -- we can stop now. */
1121 /* need the context stored so it can have registers scavenged */
1122 fake_foreign_function_call(context);
1124 /* Block everything. */
1126 thread_sigmask(SIG_BLOCK,&ss,0);
1128 /* Not pending anymore. */
1129 SetSymbolValue(GC_PENDING,NIL,thread);
1130 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1132 if(thread_state(thread)!=STATE_RUNNING) {
1133 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1134 fixnum_value(thread->state));
1137 set_thread_state(thread,STATE_SUSPENDED);
1138 FSHOW_SIGNAL((stderr,"suspended\n"));
1140 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1141 FSHOW_SIGNAL((stderr,"resumed\n"));
1143 if(thread_state(thread)!=STATE_RUNNING) {
1144 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1145 fixnum_value(thread_state(thread)));
1148 undo_fake_foreign_function_call(context);
1154 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1157 os_context_t *context = arch_os_get_context(&void_context);
1158 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1159 os_restore_fp_control(context);
1160 #ifndef LISP_FEATURE_WIN32
1161 if ((signal == SIGILL) || (signal == SIGBUS)
1162 #ifndef LISP_FEATURE_LINUX
1163 || (signal == SIGEMT)
1166 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1169 interrupt_handle_now(signal, info, context);
1173 /* manipulate the signal context and stack such that when the handler
1174 * returns, it will call function instead of whatever it was doing
1178 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1179 extern int *context_eflags_addr(os_context_t *context);
1182 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1183 extern void post_signal_tramp(void);
1184 extern void call_into_lisp_tramp(void);
1186 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1188 check_gc_signals_unblocked_in_sigset_or_lose
1189 (os_context_sigmask_addr(context));
1190 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1191 void * fun=native_pointer(function);
1192 void *code = &(((struct simple_fun *) fun)->code);
1195 /* Build a stack frame showing `interrupted' so that the
1196 * user's backtrace makes (as much) sense (as usual) */
1198 /* FIXME: what about restoring fp state? */
1199 /* FIXME: errno is not restored, but since current uses of this
1200 * function only call Lisp code that signals an error, it's not
1201 * much of a problem. In other words, running out of the control
1202 * stack between a syscall and (GET-ERRNO) may clobber errno if
1203 * something fails during signalling or in the handler. But I
1204 * can't see what can go wrong as long as there is no CONTINUE
1205 * like restart on them. */
1206 #ifdef LISP_FEATURE_X86
1207 /* Suppose the existence of some function that saved all
1208 * registers, called call_into_lisp, then restored GP registers and
1209 * returned. It would look something like this:
1217 pushl {address of function to call}
1218 call 0x8058db0 <call_into_lisp>
1225 * What we do here is set up the stack that call_into_lisp would
1226 * expect to see if it had been called by this code, and frob the
1227 * signal context so that signal return goes directly to call_into_lisp,
1228 * and when that function (and the lisp function it invoked) returns,
1229 * it returns to the second half of this imaginary function which
1230 * restores all registers and returns to C
1232 * For this to work, the latter part of the imaginary function
1233 * must obviously exist in reality. That would be post_signal_tramp
1236 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1238 #if defined(LISP_FEATURE_DARWIN)
1239 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1241 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1242 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1244 /* 1. os_validate (malloc/mmap) register_save_block
1245 * 2. copy register state into register_save_block
1246 * 3. put a pointer to register_save_block in a register in the context
1247 * 4. set the context's EIP to point to a trampoline which:
1248 * a. builds the fake stack frame from the block
1249 * b. frees the block
1250 * c. calls the function
1253 *register_save_area = *os_context_pc_addr(context);
1254 *(register_save_area + 1) = function;
1255 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1256 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1257 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1258 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1259 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1260 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1261 *(register_save_area + 8) = *context_eflags_addr(context);
1263 *os_context_pc_addr(context) =
1264 (os_context_register_t) call_into_lisp_tramp;
1265 *os_context_register_addr(context,reg_ECX) =
1266 (os_context_register_t) register_save_area;
1269 /* return address for call_into_lisp: */
1270 *(sp-15) = (u32)post_signal_tramp;
1271 *(sp-14) = function; /* args for call_into_lisp : function*/
1272 *(sp-13) = 0; /* arg array */
1273 *(sp-12) = 0; /* no. args */
1274 /* this order matches that used in POPAD */
1275 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1276 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1278 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1279 /* POPAD ignores the value of ESP: */
1281 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1283 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1284 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1285 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1286 *(sp-3)=*context_eflags_addr(context);
1287 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1288 *(sp-1)=*os_context_pc_addr(context);
1292 #elif defined(LISP_FEATURE_X86_64)
1293 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1295 /* return address for call_into_lisp: */
1296 *(sp-18) = (u64)post_signal_tramp;
1298 *(sp-17)=*os_context_register_addr(context,reg_R15);
1299 *(sp-16)=*os_context_register_addr(context,reg_R14);
1300 *(sp-15)=*os_context_register_addr(context,reg_R13);
1301 *(sp-14)=*os_context_register_addr(context,reg_R12);
1302 *(sp-13)=*os_context_register_addr(context,reg_R11);
1303 *(sp-12)=*os_context_register_addr(context,reg_R10);
1304 *(sp-11)=*os_context_register_addr(context,reg_R9);
1305 *(sp-10)=*os_context_register_addr(context,reg_R8);
1306 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1307 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1308 /* skip RBP and RSP */
1309 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1310 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1311 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1312 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1313 *(sp-3)=*context_eflags_addr(context);
1314 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1315 *(sp-1)=*os_context_pc_addr(context);
1317 *os_context_register_addr(context,reg_RDI) =
1318 (os_context_register_t)function; /* function */
1319 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1320 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1322 struct thread *th=arch_os_get_current_thread();
1323 build_fake_control_stack_frames(th,context);
1326 #ifdef LISP_FEATURE_X86
1328 #if !defined(LISP_FEATURE_DARWIN)
1329 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1330 *os_context_register_addr(context,reg_ECX) = 0;
1331 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1333 *os_context_register_addr(context,reg_UESP) =
1334 (os_context_register_t)(sp-15);
1336 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1337 #endif /* __NETBSD__ */
1338 #endif /* LISP_FEATURE_DARWIN */
1340 #elif defined(LISP_FEATURE_X86_64)
1341 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1342 *os_context_register_addr(context,reg_RCX) = 0;
1343 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1344 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1346 /* this much of the calling convention is common to all
1348 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1349 *os_context_register_addr(context,reg_NARGS) = 0;
1350 *os_context_register_addr(context,reg_LIP) =
1351 (os_context_register_t)(unsigned long)code;
1352 *os_context_register_addr(context,reg_CFP) =
1353 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1355 #ifdef ARCH_HAS_NPC_REGISTER
1356 *os_context_npc_addr(context) =
1357 4 + *os_context_pc_addr(context);
1359 #ifdef LISP_FEATURE_SPARC
1360 *os_context_register_addr(context,reg_CODE) =
1361 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1363 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1367 /* KLUDGE: Theoretically the approach we use for undefined alien
1368 * variables should work for functions as well, but on PPC/Darwin
1369 * we get bus error at bogus addresses instead, hence this workaround,
1370 * that has the added benefit of automatically discriminating between
1371 * functions and variables.
1374 undefined_alien_function(void)
1376 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1380 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1382 struct thread *th=arch_os_get_current_thread();
1384 /* note the os_context hackery here. When the signal handler returns,
1385 * it won't go back to what it was doing ... */
1386 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1387 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1388 /* We hit the end of the control stack: disable guard page
1389 * protection so the error handler has some headroom, protect the
1390 * previous page so that we can catch returns from the guard page
1391 * and restore it. */
1392 corruption_warning_and_maybe_lose("Control stack exhausted");
1393 protect_control_stack_guard_page(0);
1394 protect_control_stack_return_guard_page(1);
1396 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1397 /* For the unfortunate case, when the control stack is
1398 * exhausted in a signal handler. */
1399 unblock_signals_in_context_and_maybe_warn(context);
1401 arrange_return_to_lisp_function
1402 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1405 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1406 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1407 /* We're returning from the guard page: reprotect it, and
1408 * unprotect this one. This works even if we somehow missed
1409 * the return-guard-page, and hit it on our way to new
1410 * exhaustion instead. */
1411 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1412 protect_control_stack_guard_page(1);
1413 protect_control_stack_return_guard_page(0);
1416 else if (addr >= undefined_alien_address &&
1417 addr < undefined_alien_address + os_vm_page_size) {
1418 arrange_return_to_lisp_function
1419 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1426 * noise to install handlers
1429 #ifndef LISP_FEATURE_WIN32
1430 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1431 * they are blocked, in Linux 2.6 the default handler is invoked
1432 * instead that usually coredumps. One might hastily think that adding
1433 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1434 * the whole sa_mask is ignored and instead of not adding the signal
1435 * in question to the mask. That means if it's not blockable the
1436 * signal must be unblocked at the beginning of signal handlers.
1438 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1439 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1440 * will be unblocked in the sigmask during the signal handler. -- RMK
1443 static volatile int sigaction_nodefer_works = -1;
1445 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1446 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1449 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1451 sigset_t empty, current;
1453 sigemptyset(&empty);
1454 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1455 /* There should be exactly two blocked signals: the two we added
1456 * to sa_mask when setting up the handler. NetBSD doesn't block
1457 * the signal we're handling when SA_NODEFER is set; Linux before
1458 * 2.6.13 or so also doesn't block the other signal when
1459 * SA_NODEFER is set. */
1460 for(i = 1; i < NSIG; i++)
1461 if (sigismember(¤t, i) !=
1462 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1463 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1464 sigaction_nodefer_works = 0;
1466 if (sigaction_nodefer_works == -1)
1467 sigaction_nodefer_works = 1;
1471 see_if_sigaction_nodefer_works(void)
1473 struct sigaction sa, old_sa;
1475 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1476 sa.sa_sigaction = sigaction_nodefer_test_handler;
1477 sigemptyset(&sa.sa_mask);
1478 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1479 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1480 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1481 /* Make sure no signals are blocked. */
1484 sigemptyset(&empty);
1485 thread_sigmask(SIG_SETMASK, &empty, 0);
1487 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1488 while (sigaction_nodefer_works == -1);
1489 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1492 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1493 #undef SA_NODEFER_TEST_KILL_SIGNAL
1496 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1499 os_context_t *context = arch_os_get_context(&void_context);
1502 sigemptyset(&unblock);
1503 sigaddset(&unblock, signal);
1504 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1505 interrupt_handle_now(signal, info, context);
1510 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1515 sigemptyset(&unblock);
1516 sigaddset(&unblock, signal);
1517 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1518 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1523 low_level_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1526 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1531 undoably_install_low_level_interrupt_handler (int signal,
1532 interrupt_handler_t handler)
1534 struct sigaction sa;
1536 if (0 > signal || signal >= NSIG) {
1537 lose("bad signal number %d\n", signal);
1540 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1541 sa.sa_sigaction = handler;
1542 else if (sigismember(&deferrable_sigset,signal))
1543 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1544 /* The use of a trampoline appears to break the
1545 arch_os_get_context() workaround for SPARC/Linux. For now,
1546 don't use the trampoline (and so be vulnerable to the problems
1547 that SA_NODEFER is meant to solve. */
1548 #if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX))
1549 else if (!sigaction_nodefer_works &&
1550 !sigismember(&blockable_sigset, signal))
1551 sa.sa_sigaction = low_level_unblock_me_trampoline;
1554 sa.sa_sigaction = low_level_handle_now_handler;
1556 sigcopyset(&sa.sa_mask, &blockable_sigset);
1557 sa.sa_flags = SA_SIGINFO | SA_RESTART
1558 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1559 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1560 if((signal==SIG_MEMORY_FAULT))
1561 sa.sa_flags |= SA_ONSTACK;
1564 sigaction(signal, &sa, NULL);
1565 interrupt_low_level_handlers[signal] =
1566 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1570 /* This is called from Lisp. */
1572 install_handler(int signal, void handler(int, siginfo_t*, void*))
1574 #ifndef LISP_FEATURE_WIN32
1575 struct sigaction sa;
1577 union interrupt_handler oldhandler;
1579 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1582 sigaddset(&new, signal);
1583 thread_sigmask(SIG_BLOCK, &new, &old);
1585 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1586 (unsigned int)interrupt_low_level_handlers[signal]));
1587 if (interrupt_low_level_handlers[signal]==0) {
1588 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1589 ARE_SAME_HANDLER(handler, SIG_IGN))
1590 sa.sa_sigaction = handler;
1591 else if (sigismember(&deferrable_sigset, signal))
1592 sa.sa_sigaction = maybe_now_maybe_later;
1593 else if (!sigaction_nodefer_works &&
1594 !sigismember(&blockable_sigset, signal))
1595 sa.sa_sigaction = unblock_me_trampoline;
1597 sa.sa_sigaction = interrupt_handle_now_handler;
1599 sigcopyset(&sa.sa_mask, &blockable_sigset);
1600 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1601 (sigaction_nodefer_works ? SA_NODEFER : 0);
1602 sigaction(signal, &sa, NULL);
1605 oldhandler = interrupt_handlers[signal];
1606 interrupt_handlers[signal].c = handler;
1608 thread_sigmask(SIG_SETMASK, &old, 0);
1610 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1612 return (unsigned long)oldhandler.lisp;
1614 /* Probably-wrong Win32 hack */
1619 /* This must not go through lisp as it's allowed anytime, even when on
1622 sigabrt_handler(int signal, siginfo_t *info, void *void_context)
1624 lose("SIGABRT received.\n");
1628 interrupt_init(void)
1630 #ifndef LISP_FEATURE_WIN32
1632 SHOW("entering interrupt_init()");
1633 see_if_sigaction_nodefer_works();
1634 sigemptyset(&deferrable_sigset);
1635 sigemptyset(&blockable_sigset);
1636 sigemptyset(&gc_sigset);
1637 sigaddset_deferrable(&deferrable_sigset);
1638 sigaddset_blockable(&blockable_sigset);
1639 sigaddset_gc(&gc_sigset);
1641 /* Set up high level handler information. */
1642 for (i = 0; i < NSIG; i++) {
1643 interrupt_handlers[i].c =
1644 /* (The cast here blasts away the distinction between
1645 * SA_SIGACTION-style three-argument handlers and
1646 * signal(..)-style one-argument handlers, which is OK
1647 * because it works to call the 1-argument form where the
1648 * 3-argument form is expected.) */
1649 (void (*)(int, siginfo_t*, void*))SIG_DFL;
1651 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1652 SHOW("returning from interrupt_init()");
1656 #ifndef LISP_FEATURE_WIN32
1658 siginfo_code(siginfo_t *info)
1660 return info->si_code;
1662 os_vm_address_t current_memory_fault_address;
1665 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1667 /* FIXME: This is lossy: if we get another memory fault (eg. from
1668 * another thread) before lisp has read this, we lose the information.
1669 * However, since this is mostly informative, we'll live with that for
1670 * now -- some address is better then no address in this case.
1672 current_memory_fault_address = addr;
1673 /* To allow debugging memory faults in signal handlers and such. */
1674 corruption_warning_and_maybe_lose("Memory fault");
1675 unblock_signals_in_context_and_maybe_warn(context);
1676 arrange_return_to_lisp_function(context,
1677 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1682 unhandled_trap_error(os_context_t *context)
1684 lispobj context_sap;
1685 fake_foreign_function_call(context);
1686 unblock_gc_signals();
1687 context_sap = alloc_sap(context);
1688 #ifndef LISP_FEATURE_WIN32
1689 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1691 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1692 lose("UNHANDLED-TRAP-ERROR fell through");
1695 /* Common logic for trapping instructions. How we actually handle each
1696 * case is highly architecture dependent, but the overall shape is
1699 handle_trap(os_context_t *context, int trap)
1702 case trap_PendingInterrupt:
1703 FSHOW((stderr, "/<trap pending interrupt>\n"));
1704 arch_skip_instruction(context);
1705 interrupt_handle_pending(context);
1709 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1710 interrupt_internal_error(context, trap==trap_Cerror);
1712 case trap_Breakpoint:
1713 arch_handle_breakpoint(context);
1715 case trap_FunEndBreakpoint:
1716 arch_handle_fun_end_breakpoint(context);
1718 #ifdef trap_AfterBreakpoint
1719 case trap_AfterBreakpoint:
1720 arch_handle_after_breakpoint(context);
1723 #ifdef trap_SingleStepAround
1724 case trap_SingleStepAround:
1725 case trap_SingleStepBefore:
1726 arch_handle_single_step_trap(context, trap);
1730 fake_foreign_function_call(context);
1731 lose("%%PRIMITIVE HALT called; the party is over.\n");
1733 unhandled_trap_error(context);