2 * interrupt-handling magic
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 /* As far as I can tell, what's going on here is:
19 * In the case of most signals, when Lisp asks us to handle the
20 * signal, the outermost handler (the one actually passed to UNIX) is
21 * either interrupt_handle_now(..) or maybe_now_maybe_later(..).
22 * In that case, the Lisp-level handler is stored in interrupt_handlers[..]
23 * and interrupt_low_level_handlers[..] is cleared.
25 * However, some signals need special handling, e.g.
27 * o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
28 * garbage collector to detect violations of write protection,
29 * because some cases of such signals (e.g. GC-related violations of
30 * write protection) are handled at C level and never passed on to
31 * Lisp. For such signals, we still store any Lisp-level handler
32 * in interrupt_handlers[..], but for the outermost handle we use
33 * the value from interrupt_low_level_handlers[..], instead of the
34 * ordinary interrupt_handle_now(..) or interrupt_handle_later(..).
36 * o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
37 * pseudo-atomic sections, and some classes of error (e.g. "function
38 * not defined"). This never goes anywhere near the Lisp handlers at all.
39 * See runtime/alpha-arch.c and code/signal.lisp
41 * - WHN 20000728, dan 20010128 */
49 #include <sys/types.h>
50 #ifndef LISP_FEATURE_WIN32
58 #include "interrupt.h"
66 #include "pseudo-atomic.h"
67 #include "genesis/fdefn.h"
68 #include "genesis/simple-fun.h"
69 #include "genesis/cons.h"
71 static void run_deferred_handler(struct interrupt_data *data, void *v_context);
72 #ifndef LISP_FEATURE_WIN32
73 static void store_signal_data_for_later (struct interrupt_data *data,
74 void *handler, int signal,
76 os_context_t *context);
79 fill_current_sigmask(sigset_t *sigset)
81 /* Get the current sigmask, by blocking the empty set. */
84 thread_sigmask(SIG_BLOCK, &empty, sigset);
88 sigaddset_deferrable(sigset_t *s)
92 sigaddset(s, SIGTERM);
93 sigaddset(s, SIGQUIT);
94 sigaddset(s, SIGPIPE);
95 sigaddset(s, SIGALRM);
97 sigaddset(s, SIGTSTP);
98 sigaddset(s, SIGCHLD);
100 #ifndef LISP_FEATURE_HPUX
101 sigaddset(s, SIGXCPU);
102 sigaddset(s, SIGXFSZ);
104 sigaddset(s, SIGVTALRM);
105 sigaddset(s, SIGPROF);
106 sigaddset(s, SIGWINCH);
108 #ifdef LISP_FEATURE_SB_THREAD
109 sigaddset(s, SIG_INTERRUPT_THREAD);
114 sigdelset_deferrable(sigset_t *s)
116 sigdelset(s, SIGHUP);
117 sigdelset(s, SIGINT);
118 sigdelset(s, SIGQUIT);
119 sigdelset(s, SIGPIPE);
120 sigdelset(s, SIGALRM);
121 sigdelset(s, SIGURG);
122 sigdelset(s, SIGTSTP);
123 sigdelset(s, SIGCHLD);
125 #ifndef LISP_FEATURE_HPUX
126 sigdelset(s, SIGXCPU);
127 sigdelset(s, SIGXFSZ);
129 sigdelset(s, SIGVTALRM);
130 sigdelset(s, SIGPROF);
131 sigdelset(s, SIGWINCH);
133 #ifdef LISP_FEATURE_SB_THREAD
134 sigdelset(s, SIG_INTERRUPT_THREAD);
139 sigaddset_blockable(sigset_t *sigset)
141 sigaddset_deferrable(sigset);
142 sigaddset_gc(sigset);
146 sigaddset_gc(sigset_t *sigset)
148 #ifdef LISP_FEATURE_SB_THREAD
149 sigaddset(sigset,SIG_STOP_FOR_GC);
154 sigdelset_gc(sigset_t *sigset)
156 #ifdef LISP_FEATURE_SB_THREAD
157 sigdelset(sigset,SIG_STOP_FOR_GC);
161 /* initialized in interrupt_init */
162 sigset_t deferrable_sigset;
163 sigset_t blockable_sigset;
168 deferrables_blocked_in_sigset_p(sigset_t *sigset)
170 #if !defined(LISP_FEATURE_WIN32)
172 for(i = 1; i < NSIG; i++) {
173 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
181 check_deferrables_unblocked_in_sigset_or_lose(sigset_t *sigset)
183 #if !defined(LISP_FEATURE_WIN32)
185 for(i = 1; i < NSIG; i++) {
186 if (sigismember(&deferrable_sigset, i) && sigismember(sigset, i))
187 lose("deferrable signal %d blocked\n",i);
193 check_deferrables_blocked_in_sigset_or_lose(sigset_t *sigset)
195 #if !defined(LISP_FEATURE_WIN32)
197 for(i = 1; i < NSIG; i++) {
198 if (sigismember(&deferrable_sigset, i) && !sigismember(sigset, i))
199 lose("deferrable signal %d not blocked\n",i);
205 check_deferrables_blocked_or_lose(void)
207 #if !defined(LISP_FEATURE_WIN32)
209 fill_current_sigmask(¤t);
210 check_deferrables_blocked_in_sigset_or_lose(¤t);
215 check_blockables_blocked_or_lose(void)
217 #if !defined(LISP_FEATURE_WIN32)
220 fill_current_sigmask(¤t);
221 for(i = 1; i < NSIG; i++) {
222 if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
223 lose("blockable signal %d not blocked\n",i);
229 check_gc_signals_unblocked_in_sigset_or_lose(sigset_t *sigset)
231 #if !defined(LISP_FEATURE_WIN32)
233 for(i = 1; i < NSIG; i++) {
234 if (sigismember(&gc_sigset, i) && sigismember(sigset, i))
235 lose("gc signal %d blocked\n",i);
241 check_gc_signals_unblocked_or_lose(void)
243 #if !defined(LISP_FEATURE_WIN32)
245 fill_current_sigmask(¤t);
246 check_gc_signals_unblocked_in_sigset_or_lose(¤t);
251 check_interrupts_enabled_or_lose(os_context_t *context)
253 struct thread *thread=arch_os_get_current_thread();
254 if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
255 lose("interrupts not enabled\n");
256 if (arch_pseudo_atomic_atomic(context))
257 lose ("in pseudo atomic section\n");
260 /* Save sigset (or the current sigmask if 0) if there is no pending
261 * handler, because that means that deferabbles are already blocked.
262 * The purpose is to avoid losing the pending gc signal if a
263 * deferrable interrupt async unwinds between clearing the pseudo
264 * atomic and trapping to GC.*/
266 maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
268 struct thread *thread = arch_os_get_current_thread();
269 struct interrupt_data *data = thread->interrupt_data;
271 /* Obviously, this function is called when signals may not be
272 * blocked. Let's make sure we are not interrupted. */
273 thread_sigmask(SIG_BLOCK, &blockable_sigset, &oldset);
274 #ifndef LISP_FEATURE_SB_THREAD
275 /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
277 if (data->gc_blocked_deferrables)
278 lose("gc_blocked_deferrables already true\n");
280 if ((!data->pending_handler) &&
281 (!data->gc_blocked_deferrables)) {
282 FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
283 data->gc_blocked_deferrables = 1;
285 /* This is the sigmask of some context. */
286 sigcopyset(&data->pending_mask, sigset);
287 sigaddset_deferrable(sigset);
288 thread_sigmask(SIG_SETMASK,&oldset,0);
291 /* Operating on the current sigmask. Save oldset and
292 * unblock gc signals. In the end, this is equivalent to
293 * blocking the deferrables. */
294 sigcopyset(&data->pending_mask, &oldset);
295 unblock_gc_signals();
299 thread_sigmask(SIG_SETMASK,&oldset,0);
302 /* Are we leaving WITH-GCING and already running with interrupts
303 * enabled, without the protection of *GC-INHIBIT* T and there is gc
304 * (or stop for gc) pending, but we haven't trapped yet? */
306 in_leaving_without_gcing_race_p(struct thread *thread)
308 return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
309 (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
310 (SymbolValue(GC_INHIBIT,thread) == NIL) &&
311 ((SymbolValue(GC_PENDING,thread) != NIL)
312 #if defined(LISP_FEATURE_SB_THREAD)
313 || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
318 /* Check our baroque invariants. */
320 check_interrupt_context_or_lose(os_context_t *context)
322 struct thread *thread = arch_os_get_current_thread();
323 struct interrupt_data *data = thread->interrupt_data;
324 int interrupt_deferred_p = (data->pending_handler != 0);
325 int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
326 sigset_t *sigset = os_context_sigmask_addr(context);
327 /* On PPC pseudo_atomic_interrupted is cleared when coming out of
328 * handle_allocation_trap. */
329 #if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
330 int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
331 int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
332 int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
333 int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
334 int in_race_p = in_leaving_without_gcing_race_p(thread);
335 /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
336 * section and trapping, a SIG_STOP_FOR_GC would see the next
337 * check fail, for this reason sig_stop_for_gc handler does not
338 * call this function. */
339 if (interrupt_deferred_p) {
340 if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
341 lose("Stray deferred interrupt.\n");
344 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
345 lose("GC_PENDING, but why?\n");
346 #if defined(LISP_FEATURE_SB_THREAD)
348 int stop_for_gc_pending =
349 (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
350 if (stop_for_gc_pending)
351 if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
352 lose("STOP_FOR_GC_PENDING, but why?\n");
356 if (interrupt_pending && !interrupt_deferred_p)
357 lose("INTERRUPT_PENDING but not pending handler.\n");
358 if ((data->gc_blocked_deferrables) && interrupt_pending)
359 lose("gc_blocked_deferrables and interrupt pending\n.");
360 if (data->gc_blocked_deferrables)
361 check_deferrables_blocked_in_sigset_or_lose(sigset);
362 if (interrupt_pending || interrupt_deferred_p)
363 check_deferrables_blocked_in_sigset_or_lose(sigset);
365 check_deferrables_unblocked_in_sigset_or_lose(sigset);
366 /* If deferrables are unblocked then we are open to signals
367 * that run lisp code. */
368 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
372 /* When we catch an internal error, should we pass it back to Lisp to
373 * be handled in a high-level way? (Early in cold init, the answer is
374 * 'no', because Lisp is still too brain-dead to handle anything.
375 * After sufficient initialization has been completed, the answer
377 boolean internal_errors_enabled = 0;
379 #ifndef LISP_FEATURE_WIN32
380 static void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*);
382 union interrupt_handler interrupt_handlers[NSIG];
385 block_blockable_signals(void)
387 #ifndef LISP_FEATURE_WIN32
388 thread_sigmask(SIG_BLOCK, &blockable_sigset, 0);
393 block_deferrable_signals(void)
395 #ifndef LISP_FEATURE_WIN32
396 thread_sigmask(SIG_BLOCK, &deferrable_sigset, 0);
401 unblock_deferrable_signals_in_sigset(sigset_t *sigset)
403 #ifndef LISP_FEATURE_WIN32
404 if (interrupt_handler_pending_p())
405 lose("unblock_deferrable_signals_in_sigset: losing proposition\n");
406 check_gc_signals_unblocked_in_sigset_or_lose(sigset);
407 sigdelset_deferrable(sigset);
412 unblock_deferrable_signals(void)
414 #ifndef LISP_FEATURE_WIN32
415 if (interrupt_handler_pending_p())
416 lose("unblock_deferrable_signals: losing proposition\n");
417 check_gc_signals_unblocked_or_lose();
418 thread_sigmask(SIG_UNBLOCK, &deferrable_sigset, 0);
423 unblock_gc_signals(void)
425 #if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_WIN32)
426 thread_sigmask(SIG_UNBLOCK,&gc_sigset,0);
431 unblock_signals_in_context_and_maybe_warn(os_context_t *context)
433 #ifndef LISP_FEATURE_WIN32
435 sigset_t *sigset=os_context_sigmask_addr(context);
436 for(i = 1; i < NSIG; i++) {
437 if (sigismember(&gc_sigset, i) && sigismember(sigset, i)) {
440 "Enabling blocked gc signals to allow returning to Lisp without risking\n\
441 gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
442 they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
447 sigdelset_gc(sigset);
448 if (!interrupt_handler_pending_p()) {
449 unblock_deferrable_signals_in_sigset(sigset);
456 * utility routines used by various signal handlers
460 build_fake_control_stack_frames(struct thread *th,os_context_t *context)
462 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
466 /* Build a fake stack frame or frames */
468 current_control_frame_pointer =
469 (lispobj *)(unsigned long)
470 (*os_context_register_addr(context, reg_CSP));
471 if ((lispobj *)(unsigned long)
472 (*os_context_register_addr(context, reg_CFP))
473 == current_control_frame_pointer) {
474 /* There is a small window during call where the callee's
475 * frame isn't built yet. */
476 if (lowtag_of(*os_context_register_addr(context, reg_CODE))
477 == FUN_POINTER_LOWTAG) {
478 /* We have called, but not built the new frame, so
479 * build it for them. */
480 current_control_frame_pointer[0] =
481 *os_context_register_addr(context, reg_OCFP);
482 current_control_frame_pointer[1] =
483 *os_context_register_addr(context, reg_LRA);
484 current_control_frame_pointer += 8;
485 /* Build our frame on top of it. */
486 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
489 /* We haven't yet called, build our frame as if the
490 * partial frame wasn't there. */
491 oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
494 /* We can't tell whether we are still in the caller if it had to
495 * allocate a stack frame due to stack arguments. */
496 /* This observation provoked some past CMUCL maintainer to ask
497 * "Can anything strange happen during return?" */
500 oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
503 current_control_stack_pointer = current_control_frame_pointer + 8;
505 current_control_frame_pointer[0] = oldcont;
506 current_control_frame_pointer[1] = NIL;
507 current_control_frame_pointer[2] =
508 (lispobj)(*os_context_register_addr(context, reg_CODE));
512 /* Stores the context for gc to scavange and builds fake stack
515 fake_foreign_function_call(os_context_t *context)
518 struct thread *thread=arch_os_get_current_thread();
520 /* context_index incrementing must not be interrupted */
521 check_blockables_blocked_or_lose();
523 /* Get current Lisp state from context. */
525 dynamic_space_free_pointer =
526 (lispobj *)(unsigned long)
527 (*os_context_register_addr(context, reg_ALLOC));
528 /* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
529 /* dynamic_space_free_pointer); */
530 #if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
531 if ((long)dynamic_space_free_pointer & 1) {
532 lose("dead in fake_foreign_function_call, context = %x\n", context);
535 /* why doesnt PPC and SPARC do something like this: */
536 #if defined(LISP_FEATURE_HPPA)
537 if ((long)dynamic_space_free_pointer & 4) {
538 lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
543 current_binding_stack_pointer =
544 (lispobj *)(unsigned long)
545 (*os_context_register_addr(context, reg_BSP));
548 build_fake_control_stack_frames(thread,context);
550 /* Do dynamic binding of the active interrupt context index
551 * and save the context in the context array. */
553 fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
555 if (context_index >= MAX_INTERRUPTS) {
556 lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
559 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
560 make_fixnum(context_index + 1),thread);
562 thread->interrupt_contexts[context_index] = context;
564 #ifdef FOREIGN_FUNCTION_CALL_FLAG
565 foreign_function_call_active = 1;
569 /* blocks all blockable signals. If you are calling from a signal handler,
570 * the usual signal mask will be restored from the context when the handler
571 * finishes. Otherwise, be careful */
573 undo_fake_foreign_function_call(os_context_t *context)
575 struct thread *thread=arch_os_get_current_thread();
576 /* Block all blockable signals. */
577 block_blockable_signals();
579 #ifdef FOREIGN_FUNCTION_CALL_FLAG
580 foreign_function_call_active = 0;
583 /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
587 /* Put the dynamic space free pointer back into the context. */
588 *os_context_register_addr(context, reg_ALLOC) =
589 (unsigned long) dynamic_space_free_pointer
590 | (*os_context_register_addr(context, reg_ALLOC)
593 ((unsigned long)(*os_context_register_addr(context, reg_ALLOC))
595 | ((unsigned long) dynamic_space_free_pointer & LOWTAG_MASK);
600 /* a handler for the signal caused by execution of a trap opcode
601 * signalling an internal error */
603 interrupt_internal_error(os_context_t *context, boolean continuable)
607 fake_foreign_function_call(context);
609 if (!internal_errors_enabled) {
610 describe_internal_error(context);
611 /* There's no good way to recover from an internal error
612 * before the Lisp error handling mechanism is set up. */
613 lose("internal error too early in init, can't recover\n");
616 /* Allocate the SAP object while the interrupts are still
618 unblock_gc_signals();
619 context_sap = alloc_sap(context);
621 #ifndef LISP_FEATURE_WIN32
622 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
625 #if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
626 /* Workaround for blocked SIGTRAP. */
629 sigemptyset(&newset);
630 sigaddset(&newset, SIGTRAP);
631 thread_sigmask(SIG_UNBLOCK, &newset, 0);
635 SHOW("in interrupt_internal_error");
637 /* Display some rudimentary debugging information about the
638 * error, so that even if the Lisp error handler gets badly
639 * confused, we have a chance to determine what's going on. */
640 describe_internal_error(context);
642 funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
643 continuable ? T : NIL);
645 undo_fake_foreign_function_call(context); /* blocks signals again */
647 arch_skip_instruction(context);
651 interrupt_handler_pending_p(void)
653 struct thread *thread = arch_os_get_current_thread();
654 struct interrupt_data *data = thread->interrupt_data;
655 return (data->pending_handler != 0);
659 interrupt_handle_pending(os_context_t *context)
661 /* There are three ways we can get here. First, if an interrupt
662 * occurs within pseudo-atomic, it will be deferred, and we'll
663 * trap to here at the end of the pseudo-atomic block. Second, if
664 * the GC (in alloc()) decides that a GC is required, it will set
665 * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
666 * and alloc() is always called from within pseudo-atomic, and
667 * thus we end up here again. Third, when calling GC-ON or at the
668 * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
669 * here if there is a pending GC. Fourth, ahem, at the end of
670 * WITHOUT-INTERRUPTS (bar complications with nesting). */
672 /* Win32 only needs to handle the GC cases (for now?) */
674 struct thread *thread = arch_os_get_current_thread();
675 struct interrupt_data *data = thread->interrupt_data;
677 if (arch_pseudo_atomic_atomic(context)) {
678 lose("Handling pending interrupt in pseduo atomic.");
681 FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
683 check_blockables_blocked_or_lose();
685 /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
686 * handler, then the pending mask was saved and
687 * gc_blocked_deferrables set. Hence, there can be no pending
688 * handler and it's safe to restore the pending mask.
690 * Note, that if gc_blocked_deferrables is false we may still have
691 * to GC. In this case, we are coming out of a WITHOUT-GCING or a
692 * pseudo atomic was interrupt be a deferrable first. */
693 if (data->gc_blocked_deferrables) {
694 if (data->pending_handler)
695 lose("GC blocked deferrables but still got a pending handler.");
696 if (SymbolValue(GC_INHIBIT,thread)!=NIL)
697 lose("GC blocked deferrables while GC is inhibited.");
698 /* Restore the saved signal mask from the original signal (the
699 * one that interrupted us during the critical section) into
700 * the os_context for the signal we're currently in the
701 * handler for. This should ensure that when we return from
702 * the handler the blocked signals are unblocked. */
703 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
704 data->gc_blocked_deferrables = 0;
707 if (SymbolValue(GC_INHIBIT,thread)==NIL) {
708 void *original_pending_handler = data->pending_handler;
710 #ifdef LISP_FEATURE_SB_THREAD
711 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
712 /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
713 * the signal handler if it actually stops us. */
714 arch_clear_pseudo_atomic_interrupted(context);
715 sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
718 /* Test for T and not for != NIL since the value :IN-PROGRESS
719 * is used in SUB-GC as part of the mechanism to supress
721 if (SymbolValue(GC_PENDING,thread) == T) {
723 /* Two reasons for doing this. First, if there is a
724 * pending handler we don't want to run. Second, we are
725 * going to clear pseudo atomic interrupted to avoid
726 * spurious trapping on every allocation in SUB_GC and
727 * having a pending handler with interrupts enabled and
728 * without pseudo atomic interrupted breaks an
730 if (data->pending_handler) {
731 bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
732 bind_variable(INTERRUPTS_ENABLED, NIL, thread);
735 arch_clear_pseudo_atomic_interrupted(context);
737 /* GC_PENDING is cleared in SUB-GC, or if another thread
738 * is doing a gc already we will get a SIG_STOP_FOR_GC and
739 * that will clear it.
741 * If there is a pending handler or gc was triggerred in a
742 * signal handler then maybe_gc won't run POST_GC and will
743 * return normally. */
744 if (!maybe_gc(context))
745 lose("GC not inhibited but maybe_gc did not GC.");
747 if (data->pending_handler) {
751 } else if (SymbolValue(GC_PENDING,thread) != NIL) {
752 /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
753 * GC-PENDING is not NIL then we cannot trap on pseudo
754 * atomic due to GC (see if(GC_PENDING) logic in
755 * cheneygc.c an gengcgc.c), plus there is a outer
756 * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
758 lose("Trapping to run pending handler while GC in progress.");
761 check_blockables_blocked_or_lose();
763 /* No GC shall be lost. If SUB_GC triggers another GC then
764 * that should be handled on the spot. */
765 if (SymbolValue(GC_PENDING,thread) != NIL)
766 lose("GC_PENDING after doing gc.");
767 #ifdef LISP_FEATURE_SB_THREAD
768 if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
769 lose("STOP_FOR_GC_PENDING after doing gc.");
771 /* Check two things. First, that gc does not clobber a handler
772 * that's already pending. Second, that there is no interrupt
773 * lossage: if original_pending_handler was NULL then even if
774 * an interrupt arrived during GC (POST-GC, really) it was
776 if (original_pending_handler != data->pending_handler)
777 lose("pending handler changed in gc: %x -> %d.",
778 original_pending_handler, data->pending_handler);
781 #ifndef LISP_FEATURE_WIN32
782 /* There may be no pending handler, because it was only a gc that
783 * had to be executed or because Lisp is a bit too eager to call
784 * DO-PENDING-INTERRUPT. */
785 if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
786 (data->pending_handler)) {
787 /* No matter how we ended up here, clear both
788 * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
789 * because we checked above that there is no GC pending. */
790 SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
791 arch_clear_pseudo_atomic_interrupted(context);
792 /* Restore the sigmask in the context. */
793 sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
794 /* This will break on sparc linux: the deferred handler really
795 * wants to be called with a void_context */
796 run_deferred_handler(data,(void *)context);
798 /* It is possible that the end of this function was reached
799 * without never actually doing anything, the tests in Lisp for
800 * when to call receive-pending-interrupt are not exact. */
801 FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
806 * the two main signal handlers:
807 * interrupt_handle_now(..)
808 * maybe_now_maybe_later(..)
810 * to which we have added interrupt_handle_now_handler(..). Why?
811 * Well, mostly because the SPARC/Linux platform doesn't quite do
812 * signals the way we want them done. The third argument in the
813 * handler isn't filled in by the kernel properly, so we fix it up
814 * ourselves in the arch_os_get_context(..) function; however, we only
815 * want to do this when we first hit the handler, and not when
816 * interrupt_handle_now(..) is being called from some other handler
817 * (when the fixup will already have been done). -- CSR, 2002-07-23
821 interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
823 #ifdef FOREIGN_FUNCTION_CALL_FLAG
824 boolean were_in_lisp;
826 union interrupt_handler handler;
828 check_blockables_blocked_or_lose();
830 #ifndef LISP_FEATURE_WIN32
831 if (sigismember(&deferrable_sigset,signal))
832 check_interrupts_enabled_or_lose(context);
835 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
836 /* Under Linux on some architectures, we appear to have to restore
837 the FPU control word from the context, as after the signal is
838 delivered we appear to have a null FPU control word. */
839 os_restore_fp_control(context);
842 handler = interrupt_handlers[signal];
844 if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
848 #ifdef FOREIGN_FUNCTION_CALL_FLAG
849 were_in_lisp = !foreign_function_call_active;
853 fake_foreign_function_call(context);
856 FSHOW_SIGNAL((stderr,
857 "/entering interrupt_handle_now(%d, info, context)\n",
860 if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
862 /* This can happen if someone tries to ignore or default one
863 * of the signals we need for runtime support, and the runtime
864 * support decides to pass on it. */
865 lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
867 } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
868 /* Once we've decided what to do about contexts in a
869 * return-elsewhere world (the original context will no longer
870 * be available; should we copy it or was nobody using it anyway?)
871 * then we should convert this to return-elsewhere */
873 /* CMUCL comment said "Allocate the SAPs while the interrupts
874 * are still disabled.". I (dan, 2003.08.21) assume this is
875 * because we're not in pseudoatomic and allocation shouldn't
876 * be interrupted. In which case it's no longer an issue as
877 * all our allocation from C now goes through a PA wrapper,
878 * but still, doesn't hurt.
880 * Yeah, but non-gencgc platforms don't really wrap allocation
881 * in PA. MG - 2005-08-29 */
883 lispobj info_sap, context_sap;
884 /* Leave deferrable signals blocked, the handler itself will
885 * allow signals again when it sees fit. */
886 unblock_gc_signals();
887 context_sap = alloc_sap(context);
888 info_sap = alloc_sap(info);
890 FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
892 funcall3(handler.lisp,
897 /* This cannot happen in sane circumstances. */
899 FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
901 #ifndef LISP_FEATURE_WIN32
902 /* Allow signals again. */
903 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
905 (*handler.c)(signal, info, context);
908 #ifdef FOREIGN_FUNCTION_CALL_FLAG
912 undo_fake_foreign_function_call(context); /* block signals again */
915 FSHOW_SIGNAL((stderr,
916 "/returning from interrupt_handle_now(%d, info, context)\n",
920 /* This is called at the end of a critical section if the indications
921 * are that some signal was deferred during the section. Note that as
922 * far as C or the kernel is concerned we dealt with the signal
923 * already; we're just doing the Lisp-level processing now that we
926 run_deferred_handler(struct interrupt_data *data, void *v_context)
928 /* The pending_handler may enable interrupts and then another
929 * interrupt may hit, overwrite interrupt_data, so reset the
930 * pending handler before calling it. Trust the handler to finish
931 * with the siginfo before enabling interrupts. */
932 void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
934 data->pending_handler=0;
935 FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
936 (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
939 #ifndef LISP_FEATURE_WIN32
941 maybe_defer_handler(void *handler, struct interrupt_data *data,
942 int signal, siginfo_t *info, os_context_t *context)
944 struct thread *thread=arch_os_get_current_thread();
946 check_blockables_blocked_or_lose();
948 if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
949 lose("interrupt already pending\n");
950 if (thread->interrupt_data->pending_handler)
951 lose("there is a pending handler already (PA)\n");
952 if (data->gc_blocked_deferrables)
953 lose("maybe_defer_handler: gc_blocked_deferrables true\n");
954 check_interrupt_context_or_lose(context);
955 /* If interrupts are disabled then INTERRUPT_PENDING is set and
956 * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
957 * atomic section inside a WITHOUT-INTERRUPTS.
959 * Also, if in_leaving_without_gcing_race_p then
960 * interrupt_handle_pending is going to be called soon, so
961 * stashing the signal away is safe.
963 if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
964 in_leaving_without_gcing_race_p(thread)) {
965 store_signal_data_for_later(data,handler,signal,info,context);
966 SetSymbolValue(INTERRUPT_PENDING, T,thread);
967 FSHOW_SIGNAL((stderr,
968 "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
969 (unsigned int)handler,signal,
970 in_leaving_without_gcing_race_p(thread)));
971 check_interrupt_context_or_lose(context);
974 /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
975 * actually use its argument for anything on x86, so this branch
976 * may succeed even when context is null (gencgc alloc()) */
977 if (arch_pseudo_atomic_atomic(context)) {
978 store_signal_data_for_later(data,handler,signal,info,context);
979 arch_set_pseudo_atomic_interrupted(context);
980 FSHOW_SIGNAL((stderr,
981 "/maybe_defer_handler(%x,%d): deferred(PA)\n",
982 (unsigned int)handler,signal));
983 check_interrupt_context_or_lose(context);
986 FSHOW_SIGNAL((stderr,
987 "/maybe_defer_handler(%x,%d): not deferred\n",
988 (unsigned int)handler,signal));
993 store_signal_data_for_later (struct interrupt_data *data, void *handler,
995 siginfo_t *info, os_context_t *context)
997 if (data->pending_handler)
998 lose("tried to overwrite pending interrupt handler %x with %x\n",
999 data->pending_handler, handler);
1001 lose("tried to defer null interrupt handler\n");
1002 data->pending_handler = handler;
1003 data->pending_signal = signal;
1005 memcpy(&(data->pending_info), info, sizeof(siginfo_t));
1007 FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
1011 lose("Null context");
1013 /* the signal mask in the context (from before we were
1014 * interrupted) is copied to be restored when run_deferred_handler
1015 * happens. Then the usually-blocked signals are added to the mask
1016 * in the context so that we are running with blocked signals when
1017 * the handler returns */
1018 sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
1019 sigaddset_deferrable(os_context_sigmask_addr(context));
1023 maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1025 os_context_t *context = arch_os_get_context(&void_context);
1026 struct thread *thread = arch_os_get_current_thread();
1027 struct interrupt_data *data = thread->interrupt_data;
1029 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1030 os_restore_fp_control(context);
1033 if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
1034 interrupt_handle_now(signal, info, context);
1038 low_level_interrupt_handle_now(int signal, siginfo_t *info,
1039 os_context_t *context)
1041 /* No FP control fixage needed, caller has done that. */
1042 check_blockables_blocked_or_lose();
1043 check_interrupts_enabled_or_lose(context);
1044 (*interrupt_low_level_handlers[signal])(signal, info, context);
1045 /* No Darwin context fixage needed, caller does that. */
1049 low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
1051 os_context_t *context = arch_os_get_context(&void_context);
1052 struct thread *thread = arch_os_get_current_thread();
1053 struct interrupt_data *data = thread->interrupt_data;
1055 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1056 os_restore_fp_control(context);
1059 if(!maybe_defer_handler(low_level_interrupt_handle_now,data,
1060 signal,info,context))
1061 low_level_interrupt_handle_now(signal, info, context);
1065 #ifdef LISP_FEATURE_SB_THREAD
1067 /* This function must not cons, because that may trigger a GC. */
1069 sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
1071 os_context_t *context = arch_os_get_context(&void_context);
1073 struct thread *thread=arch_os_get_current_thread();
1076 /* Test for GC_INHIBIT _first_, else we'd trap on every single
1077 * pseudo atomic until gc is finally allowed. */
1078 if (SymbolValue(GC_INHIBIT,thread) != NIL) {
1079 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1080 FSHOW_SIGNAL((stderr, "sig_stop_for_gc deferred (*GC-INHIBIT*)\n"));
1082 } else if (arch_pseudo_atomic_atomic(context)) {
1083 SetSymbolValue(STOP_FOR_GC_PENDING,T,thread);
1084 arch_set_pseudo_atomic_interrupted(context);
1085 maybe_save_gc_mask_and_block_deferrables
1086 (os_context_sigmask_addr(context));
1087 FSHOW_SIGNAL((stderr,"sig_stop_for_gc deferred (PA)\n"));
1091 FSHOW_SIGNAL((stderr, "/sig_stop_for_gc_handler\n"));
1093 /* Not PA and GC not inhibited -- we can stop now. */
1095 /* need the context stored so it can have registers scavenged */
1096 fake_foreign_function_call(context);
1098 /* Block everything. */
1100 thread_sigmask(SIG_BLOCK,&ss,0);
1102 /* Not pending anymore. */
1103 SetSymbolValue(GC_PENDING,NIL,thread);
1104 SetSymbolValue(STOP_FOR_GC_PENDING,NIL,thread);
1106 if(thread_state(thread)!=STATE_RUNNING) {
1107 lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
1108 fixnum_value(thread->state));
1111 set_thread_state(thread,STATE_SUSPENDED);
1112 FSHOW_SIGNAL((stderr,"suspended\n"));
1114 wait_for_thread_state_change(thread, STATE_SUSPENDED);
1115 FSHOW_SIGNAL((stderr,"resumed\n"));
1117 if(thread_state(thread)!=STATE_RUNNING) {
1118 lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
1119 fixnum_value(thread_state(thread)));
1122 undo_fake_foreign_function_call(context);
1128 interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
1130 os_context_t *context = arch_os_get_context(&void_context);
1131 #if defined(LISP_FEATURE_LINUX) || defined(RESTORE_FP_CONTROL_FROM_CONTEXT)
1132 os_restore_fp_control(context);
1133 #ifndef LISP_FEATURE_WIN32
1134 if ((signal == SIGILL) || (signal == SIGBUS)
1135 #ifndef LISP_FEATURE_LINUX
1136 || (signal == SIGEMT)
1139 corruption_warning_and_maybe_lose("Signal %d recieved", signal);
1142 interrupt_handle_now(signal, info, context);
1145 /* manipulate the signal context and stack such that when the handler
1146 * returns, it will call function instead of whatever it was doing
1150 #if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1151 extern int *context_eflags_addr(os_context_t *context);
1154 extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
1155 extern void post_signal_tramp(void);
1156 extern void call_into_lisp_tramp(void);
1158 arrange_return_to_lisp_function(os_context_t *context, lispobj function)
1160 check_gc_signals_unblocked_in_sigset_or_lose
1161 (os_context_sigmask_addr(context));
1162 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
1163 void * fun=native_pointer(function);
1164 void *code = &(((struct simple_fun *) fun)->code);
1167 /* Build a stack frame showing `interrupted' so that the
1168 * user's backtrace makes (as much) sense (as usual) */
1170 /* FIXME: what about restoring fp state? */
1171 /* FIXME: what about restoring errno? */
1172 #ifdef LISP_FEATURE_X86
1173 /* Suppose the existence of some function that saved all
1174 * registers, called call_into_lisp, then restored GP registers and
1175 * returned. It would look something like this:
1183 pushl {address of function to call}
1184 call 0x8058db0 <call_into_lisp>
1191 * What we do here is set up the stack that call_into_lisp would
1192 * expect to see if it had been called by this code, and frob the
1193 * signal context so that signal return goes directly to call_into_lisp,
1194 * and when that function (and the lisp function it invoked) returns,
1195 * it returns to the second half of this imaginary function which
1196 * restores all registers and returns to C
1198 * For this to work, the latter part of the imaginary function
1199 * must obviously exist in reality. That would be post_signal_tramp
1202 u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
1204 #if defined(LISP_FEATURE_DARWIN)
1205 u32 *register_save_area = (u32 *)os_validate(0, 0x40);
1207 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: preparing to go to function %x, sp: %x\n", function, sp));
1208 FSHOW_SIGNAL((stderr, "/arrange_return_to_lisp_function: context: %x, &context %x\n", context, &context));
1210 /* 1. os_validate (malloc/mmap) register_save_block
1211 * 2. copy register state into register_save_block
1212 * 3. put a pointer to register_save_block in a register in the context
1213 * 4. set the context's EIP to point to a trampoline which:
1214 * a. builds the fake stack frame from the block
1215 * b. frees the block
1216 * c. calls the function
1219 *register_save_area = *os_context_pc_addr(context);
1220 *(register_save_area + 1) = function;
1221 *(register_save_area + 2) = *os_context_register_addr(context,reg_EDI);
1222 *(register_save_area + 3) = *os_context_register_addr(context,reg_ESI);
1223 *(register_save_area + 4) = *os_context_register_addr(context,reg_EDX);
1224 *(register_save_area + 5) = *os_context_register_addr(context,reg_ECX);
1225 *(register_save_area + 6) = *os_context_register_addr(context,reg_EBX);
1226 *(register_save_area + 7) = *os_context_register_addr(context,reg_EAX);
1227 *(register_save_area + 8) = *context_eflags_addr(context);
1229 *os_context_pc_addr(context) =
1230 (os_context_register_t) call_into_lisp_tramp;
1231 *os_context_register_addr(context,reg_ECX) =
1232 (os_context_register_t) register_save_area;
1235 /* return address for call_into_lisp: */
1236 *(sp-15) = (u32)post_signal_tramp;
1237 *(sp-14) = function; /* args for call_into_lisp : function*/
1238 *(sp-13) = 0; /* arg array */
1239 *(sp-12) = 0; /* no. args */
1240 /* this order matches that used in POPAD */
1241 *(sp-11)=*os_context_register_addr(context,reg_EDI);
1242 *(sp-10)=*os_context_register_addr(context,reg_ESI);
1244 *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
1245 /* POPAD ignores the value of ESP: */
1247 *(sp-7)=*os_context_register_addr(context,reg_EBX);
1249 *(sp-6)=*os_context_register_addr(context,reg_EDX);
1250 *(sp-5)=*os_context_register_addr(context,reg_ECX);
1251 *(sp-4)=*os_context_register_addr(context,reg_EAX);
1252 *(sp-3)=*context_eflags_addr(context);
1253 *(sp-2)=*os_context_register_addr(context,reg_EBP);
1254 *(sp-1)=*os_context_pc_addr(context);
1258 #elif defined(LISP_FEATURE_X86_64)
1259 u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
1261 /* return address for call_into_lisp: */
1262 *(sp-18) = (u64)post_signal_tramp;
1264 *(sp-17)=*os_context_register_addr(context,reg_R15);
1265 *(sp-16)=*os_context_register_addr(context,reg_R14);
1266 *(sp-15)=*os_context_register_addr(context,reg_R13);
1267 *(sp-14)=*os_context_register_addr(context,reg_R12);
1268 *(sp-13)=*os_context_register_addr(context,reg_R11);
1269 *(sp-12)=*os_context_register_addr(context,reg_R10);
1270 *(sp-11)=*os_context_register_addr(context,reg_R9);
1271 *(sp-10)=*os_context_register_addr(context,reg_R8);
1272 *(sp-9)=*os_context_register_addr(context,reg_RDI);
1273 *(sp-8)=*os_context_register_addr(context,reg_RSI);
1274 /* skip RBP and RSP */
1275 *(sp-7)=*os_context_register_addr(context,reg_RBX);
1276 *(sp-6)=*os_context_register_addr(context,reg_RDX);
1277 *(sp-5)=*os_context_register_addr(context,reg_RCX);
1278 *(sp-4)=*os_context_register_addr(context,reg_RAX);
1279 *(sp-3)=*context_eflags_addr(context);
1280 *(sp-2)=*os_context_register_addr(context,reg_RBP);
1281 *(sp-1)=*os_context_pc_addr(context);
1283 *os_context_register_addr(context,reg_RDI) =
1284 (os_context_register_t)function; /* function */
1285 *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
1286 *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
1288 struct thread *th=arch_os_get_current_thread();
1289 build_fake_control_stack_frames(th,context);
1292 #ifdef LISP_FEATURE_X86
1294 #if !defined(LISP_FEATURE_DARWIN)
1295 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1296 *os_context_register_addr(context,reg_ECX) = 0;
1297 *os_context_register_addr(context,reg_EBP) = (os_context_register_t)(sp-2);
1299 *os_context_register_addr(context,reg_UESP) =
1300 (os_context_register_t)(sp-15);
1302 *os_context_register_addr(context,reg_ESP) = (os_context_register_t)(sp-15);
1303 #endif /* __NETBSD__ */
1304 #endif /* LISP_FEATURE_DARWIN */
1306 #elif defined(LISP_FEATURE_X86_64)
1307 *os_context_pc_addr(context) = (os_context_register_t)call_into_lisp;
1308 *os_context_register_addr(context,reg_RCX) = 0;
1309 *os_context_register_addr(context,reg_RBP) = (os_context_register_t)(sp-2);
1310 *os_context_register_addr(context,reg_RSP) = (os_context_register_t)(sp-18);
1312 /* this much of the calling convention is common to all
1314 *os_context_pc_addr(context) = (os_context_register_t)(unsigned long)code;
1315 *os_context_register_addr(context,reg_NARGS) = 0;
1316 *os_context_register_addr(context,reg_LIP) =
1317 (os_context_register_t)(unsigned long)code;
1318 *os_context_register_addr(context,reg_CFP) =
1319 (os_context_register_t)(unsigned long)current_control_frame_pointer;
1321 #ifdef ARCH_HAS_NPC_REGISTER
1322 *os_context_npc_addr(context) =
1323 4 + *os_context_pc_addr(context);
1325 #ifdef LISP_FEATURE_SPARC
1326 *os_context_register_addr(context,reg_CODE) =
1327 (os_context_register_t)(fun + FUN_POINTER_LOWTAG);
1329 FSHOW((stderr, "/arranged return to Lisp function (0x%lx)\n",
1333 #ifdef LISP_FEATURE_SB_THREAD
1335 /* FIXME: this function can go away when all lisp handlers are invoked
1336 * via arrange_return_to_lisp_function. */
1338 interrupt_thread_handler(int num, siginfo_t *info, void *v_context)
1340 os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
1342 FSHOW_SIGNAL((stderr,"/interrupt_thread_handler\n"));
1343 check_blockables_blocked_or_lose();
1345 /* let the handler enable interrupts again when it sees fit */
1346 sigaddset_deferrable(os_context_sigmask_addr(context));
1347 arrange_return_to_lisp_function(context,
1348 StaticSymbolFunction(RUN_INTERRUPTION));
1353 /* KLUDGE: Theoretically the approach we use for undefined alien
1354 * variables should work for functions as well, but on PPC/Darwin
1355 * we get bus error at bogus addresses instead, hence this workaround,
1356 * that has the added benefit of automatically discriminating between
1357 * functions and variables.
1360 undefined_alien_function(void)
1362 funcall0(StaticSymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
1366 handle_guard_page_triggered(os_context_t *context,os_vm_address_t addr)
1368 struct thread *th=arch_os_get_current_thread();
1370 /* note the os_context hackery here. When the signal handler returns,
1371 * it won't go back to what it was doing ... */
1372 if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
1373 addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
1374 /* We hit the end of the control stack: disable guard page
1375 * protection so the error handler has some headroom, protect the
1376 * previous page so that we can catch returns from the guard page
1377 * and restore it. */
1378 corruption_warning_and_maybe_lose("Control stack exhausted");
1379 protect_control_stack_guard_page(0);
1380 protect_control_stack_return_guard_page(1);
1382 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1383 /* For the unfortunate case, when the control stack is
1384 * exhausted in a signal handler. */
1385 unblock_signals_in_context_and_maybe_warn(context);
1387 arrange_return_to_lisp_function
1388 (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
1391 else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
1392 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
1393 /* We're returning from the guard page: reprotect it, and
1394 * unprotect this one. This works even if we somehow missed
1395 * the return-guard-page, and hit it on our way to new
1396 * exhaustion instead. */
1397 fprintf(stderr, "INFO: Control stack guard page reprotected\n");
1398 protect_control_stack_guard_page(1);
1399 protect_control_stack_return_guard_page(0);
1402 else if (addr >= undefined_alien_address &&
1403 addr < undefined_alien_address + os_vm_page_size) {
1404 arrange_return_to_lisp_function
1405 (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
1412 * noise to install handlers
1415 #ifndef LISP_FEATURE_WIN32
1416 /* In Linux 2.4 synchronous signals (sigtrap & co) can be delivered if
1417 * they are blocked, in Linux 2.6 the default handler is invoked
1418 * instead that usually coredumps. One might hastily think that adding
1419 * SA_NODEFER helps, but until ~2.6.13 if SA_NODEFER is specified then
1420 * the whole sa_mask is ignored and instead of not adding the signal
1421 * in question to the mask. That means if it's not blockable the
1422 * signal must be unblocked at the beginning of signal handlers.
1424 * It turns out that NetBSD's SA_NODEFER doesn't DTRT in a different
1425 * way: if SA_NODEFER is set and the signal is in sa_mask, the signal
1426 * will be unblocked in the sigmask during the signal handler. -- RMK
1429 static volatile int sigaction_nodefer_works = -1;
1431 #define SA_NODEFER_TEST_BLOCK_SIGNAL SIGABRT
1432 #define SA_NODEFER_TEST_KILL_SIGNAL SIGUSR1
1435 sigaction_nodefer_test_handler(int signal, siginfo_t *info, void *void_context)
1437 sigset_t empty, current;
1439 sigemptyset(&empty);
1440 thread_sigmask(SIG_BLOCK, &empty, ¤t);
1441 /* There should be exactly two blocked signals: the two we added
1442 * to sa_mask when setting up the handler. NetBSD doesn't block
1443 * the signal we're handling when SA_NODEFER is set; Linux before
1444 * 2.6.13 or so also doesn't block the other signal when
1445 * SA_NODEFER is set. */
1446 for(i = 1; i < NSIG; i++)
1447 if (sigismember(¤t, i) !=
1448 (((i == SA_NODEFER_TEST_BLOCK_SIGNAL) || (i == signal)) ? 1 : 0)) {
1449 FSHOW_SIGNAL((stderr, "SA_NODEFER doesn't work, signal %d\n", i));
1450 sigaction_nodefer_works = 0;
1452 if (sigaction_nodefer_works == -1)
1453 sigaction_nodefer_works = 1;
1457 see_if_sigaction_nodefer_works(void)
1459 struct sigaction sa, old_sa;
1461 sa.sa_flags = SA_SIGINFO | SA_NODEFER;
1462 sa.sa_sigaction = sigaction_nodefer_test_handler;
1463 sigemptyset(&sa.sa_mask);
1464 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_BLOCK_SIGNAL);
1465 sigaddset(&sa.sa_mask, SA_NODEFER_TEST_KILL_SIGNAL);
1466 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &sa, &old_sa);
1467 /* Make sure no signals are blocked. */
1470 sigemptyset(&empty);
1471 thread_sigmask(SIG_SETMASK, &empty, 0);
1473 kill(getpid(), SA_NODEFER_TEST_KILL_SIGNAL);
1474 while (sigaction_nodefer_works == -1);
1475 sigaction(SA_NODEFER_TEST_KILL_SIGNAL, &old_sa, NULL);
1478 #undef SA_NODEFER_TEST_BLOCK_SIGNAL
1479 #undef SA_NODEFER_TEST_KILL_SIGNAL
1482 unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1486 sigemptyset(&unblock);
1487 sigaddset(&unblock, signal);
1488 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1489 interrupt_handle_now_handler(signal, info, void_context);
1493 low_level_unblock_me_trampoline(int signal, siginfo_t *info, void *void_context)
1497 sigemptyset(&unblock);
1498 sigaddset(&unblock, signal);
1499 thread_sigmask(SIG_UNBLOCK, &unblock, 0);
1500 (*interrupt_low_level_handlers[signal])(signal, info, void_context);
1504 undoably_install_low_level_interrupt_handler (int signal,
1505 interrupt_handler_t handler)
1507 struct sigaction sa;
1509 if (0 > signal || signal >= NSIG) {
1510 lose("bad signal number %d\n", signal);
1513 if (ARE_SAME_HANDLER(handler, SIG_DFL))
1514 sa.sa_sigaction = handler;
1515 else if (sigismember(&deferrable_sigset,signal))
1516 sa.sa_sigaction = low_level_maybe_now_maybe_later;
1517 /* The use of a trampoline appears to break the
1518 arch_os_get_context() workaround for SPARC/Linux. For now,
1519 don't use the trampoline (and so be vulnerable to the problems
1520 that SA_NODEFER is meant to solve. */
1521 #if !(defined(LISP_FEATURE_SPARC) && defined(LISP_FEATURE_LINUX))
1522 else if (!sigaction_nodefer_works &&
1523 !sigismember(&blockable_sigset, signal))
1524 sa.sa_sigaction = low_level_unblock_me_trampoline;
1527 sa.sa_sigaction = handler;
1529 sigcopyset(&sa.sa_mask, &blockable_sigset);
1530 sa.sa_flags = SA_SIGINFO | SA_RESTART
1531 | (sigaction_nodefer_works ? SA_NODEFER : 0);
1532 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
1533 if((signal==SIG_MEMORY_FAULT)
1534 #ifdef SIG_INTERRUPT_THREAD
1535 || (signal==SIG_INTERRUPT_THREAD)
1538 sa.sa_flags |= SA_ONSTACK;
1541 sigaction(signal, &sa, NULL);
1542 interrupt_low_level_handlers[signal] =
1543 (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
1547 /* This is called from Lisp. */
1549 install_handler(int signal, void handler(int, siginfo_t*, void*))
1551 #ifndef LISP_FEATURE_WIN32
1552 struct sigaction sa;
1554 union interrupt_handler oldhandler;
1556 FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
1559 sigaddset(&new, signal);
1560 thread_sigmask(SIG_BLOCK, &new, &old);
1562 FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%x\n",
1563 (unsigned int)interrupt_low_level_handlers[signal]));
1564 if (interrupt_low_level_handlers[signal]==0) {
1565 if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
1566 ARE_SAME_HANDLER(handler, SIG_IGN))
1567 sa.sa_sigaction = handler;
1568 else if (sigismember(&deferrable_sigset, signal))
1569 sa.sa_sigaction = maybe_now_maybe_later;
1570 else if (!sigaction_nodefer_works &&
1571 !sigismember(&blockable_sigset, signal))
1572 sa.sa_sigaction = unblock_me_trampoline;
1574 sa.sa_sigaction = interrupt_handle_now_handler;
1576 sigcopyset(&sa.sa_mask, &blockable_sigset);
1577 sa.sa_flags = SA_SIGINFO | SA_RESTART |
1578 (sigaction_nodefer_works ? SA_NODEFER : 0);
1579 sigaction(signal, &sa, NULL);
1582 oldhandler = interrupt_handlers[signal];
1583 interrupt_handlers[signal].c = handler;
1585 thread_sigmask(SIG_SETMASK, &old, 0);
1587 FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
1589 return (unsigned long)oldhandler.lisp;
1591 /* Probably-wrong Win32 hack */
1596 /* This must not go through lisp as it's allowed anytime, even when on
1599 sigabrt_handler(int signal, siginfo_t *info, void *void_context)
1601 lose("SIGABRT received.\n");
1605 interrupt_init(void)
1607 #ifndef LISP_FEATURE_WIN32
1609 SHOW("entering interrupt_init()");
1610 see_if_sigaction_nodefer_works();
1611 sigemptyset(&deferrable_sigset);
1612 sigemptyset(&blockable_sigset);
1613 sigemptyset(&gc_sigset);
1614 sigaddset_deferrable(&deferrable_sigset);
1615 sigaddset_blockable(&blockable_sigset);
1616 sigaddset_gc(&gc_sigset);
1618 /* Set up high level handler information. */
1619 for (i = 0; i < NSIG; i++) {
1620 interrupt_handlers[i].c =
1621 /* (The cast here blasts away the distinction between
1622 * SA_SIGACTION-style three-argument handlers and
1623 * signal(..)-style one-argument handlers, which is OK
1624 * because it works to call the 1-argument form where the
1625 * 3-argument form is expected.) */
1626 (void (*)(int, siginfo_t*, void*))SIG_DFL;
1628 undoably_install_low_level_interrupt_handler(SIGABRT, sigabrt_handler);
1629 SHOW("returning from interrupt_init()");
1633 #ifndef LISP_FEATURE_WIN32
1635 siginfo_code(siginfo_t *info)
1637 return info->si_code;
1639 os_vm_address_t current_memory_fault_address;
1642 lisp_memory_fault_error(os_context_t *context, os_vm_address_t addr)
1644 /* FIXME: This is lossy: if we get another memory fault (eg. from
1645 * another thread) before lisp has read this, we lose the information.
1646 * However, since this is mostly informative, we'll live with that for
1647 * now -- some address is better then no address in this case.
1649 current_memory_fault_address = addr;
1650 /* To allow debugging memory faults in signal handlers and such. */
1651 corruption_warning_and_maybe_lose("Memory fault");
1652 unblock_signals_in_context_and_maybe_warn(context);
1653 arrange_return_to_lisp_function(context,
1654 StaticSymbolFunction(MEMORY_FAULT_ERROR));
1659 unhandled_trap_error(os_context_t *context)
1661 lispobj context_sap;
1662 fake_foreign_function_call(context);
1663 unblock_gc_signals();
1664 context_sap = alloc_sap(context);
1665 #ifndef LISP_FEATURE_WIN32
1666 thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
1668 funcall1(StaticSymbolFunction(UNHANDLED_TRAP_ERROR), context_sap);
1669 lose("UNHANDLED-TRAP-ERROR fell through");
1672 /* Common logic for trapping instructions. How we actually handle each
1673 * case is highly architecture dependent, but the overall shape is
1676 handle_trap(os_context_t *context, int trap)
1679 case trap_PendingInterrupt:
1680 FSHOW((stderr, "/<trap pending interrupt>\n"));
1681 arch_skip_instruction(context);
1682 interrupt_handle_pending(context);
1686 FSHOW((stderr, "/<trap error/cerror %d>\n", trap));
1687 interrupt_internal_error(context, trap==trap_Cerror);
1689 case trap_Breakpoint:
1690 arch_handle_breakpoint(context);
1692 case trap_FunEndBreakpoint:
1693 arch_handle_fun_end_breakpoint(context);
1695 #ifdef trap_AfterBreakpoint
1696 case trap_AfterBreakpoint:
1697 arch_handle_after_breakpoint(context);
1700 #ifdef trap_SingleStepAround
1701 case trap_SingleStepAround:
1702 case trap_SingleStepBefore:
1703 arch_handle_single_step_trap(context, trap);
1707 fake_foreign_function_call(context);
1708 lose("%%PRIMITIVE HALT called; the party is over.\n");
1710 unhandled_trap_error(context);