+void
+check_deferrables_blocked_or_lose(sigset_t *sigset)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ if (!deferrables_blocked_p(sigset))
+ lose("deferrables unblocked\n");
+#endif
+}
+
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+boolean
+blockables_blocked_p(sigset_t *sigset)
+{
+ return all_signals_blocked_p(sigset, &blockable_sigset, "blockable");
+}
+#endif
+
+void
+check_blockables_unblocked_or_lose(sigset_t *sigset)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ if (blockables_blocked_p(sigset))
+ lose("blockables blocked\n");
+#endif
+}
+
+void
+check_blockables_blocked_or_lose(sigset_t *sigset)
+{
+#if !defined(LISP_FEATURE_WIN32)
+ /* On Windows, there are no actual signals, but since the win32 port
+ * tracks the sigmask and checks it explicitly, some functions are
+ * still required to keep the mask set up properly. (After all, the
+ * goal of the sigmask emulation is to not have to change all the
+ * call sites in the first place.)
+ *
+ * However, this does not hold for all signals equally: While
+ * deferrables matter ("is interrupt-thread okay?"), it is not worth
+ * having to set up blockables properly (which include the
+ * non-existing GC signals).
+ *
+ * Yet, as the original comment explains it:
+ * Adjusting FREE-INTERRUPT-CONTEXT-INDEX* and other aspecs of
+ * fake_foreign_function_call machinery are sometimes useful here[...].
+ *
+ * So we merely skip this assertion.
+ * -- DFL, trying to expand on a comment by AK.
+ */
+ if (!blockables_blocked_p(sigset))
+ lose("blockables unblocked\n");
+#endif
+}
+
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+#if !defined(LISP_FEATURE_WIN32)
+boolean
+gc_signals_blocked_p(sigset_t *sigset)
+{
+ return all_signals_blocked_p(sigset, &gc_sigset, "gc");
+}
+#endif
+
+void
+check_gc_signals_unblocked_or_lose(sigset_t *sigset)
+{
+#if !defined(LISP_FEATURE_WIN32)
+ if (gc_signals_blocked_p(sigset))
+ lose("gc signals blocked\n");
+#endif
+}
+
+void
+check_gc_signals_blocked_or_lose(sigset_t *sigset)
+{
+#if !defined(LISP_FEATURE_WIN32)
+ if (!gc_signals_blocked_p(sigset))
+ lose("gc signals unblocked\n");
+#endif
+}
+#endif
+
+void
+block_deferrable_signals(sigset_t *where, sigset_t *old)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ block_signals(&deferrable_sigset, where, old);
+#endif
+}
+
+void
+block_blockable_signals(sigset_t *where, sigset_t *old)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ block_signals(&blockable_sigset, where, old);
+#endif
+}
+
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+void
+block_gc_signals(sigset_t *where, sigset_t *old)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ block_signals(&gc_sigset, where, old);
+#endif
+}
+#endif
+
+void
+unblock_deferrable_signals(sigset_t *where, sigset_t *old)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ if (interrupt_handler_pending_p())
+ lose("unblock_deferrable_signals: losing proposition\n");
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ check_gc_signals_unblocked_or_lose(where);
+#endif
+ unblock_signals(&deferrable_sigset, where, old);
+#endif
+}
+
+void
+unblock_blockable_signals(sigset_t *where, sigset_t *old)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ unblock_signals(&blockable_sigset, where, old);
+#endif
+}
+
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+void
+unblock_gc_signals(sigset_t *where, sigset_t *old)
+{
+#ifndef LISP_FEATURE_WIN32
+ unblock_signals(&gc_sigset, where, old);
+#endif
+}
+#endif
+
+void
+unblock_signals_in_context_and_maybe_warn(os_context_t *context)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ sigset_t *sigset = os_context_sigmask_addr(context);
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ if (all_signals_blocked_p(sigset, &gc_sigset, "gc")) {
+ corruption_warning_and_maybe_lose(
+"Enabling blocked gc signals to allow returning to Lisp without risking\n\
+gc deadlocks. Since GC signals are only blocked in signal handlers when \n\
+they are not safe to interrupt at all, this is a pretty severe occurrence.\n");
+ unblock_gc_signals(sigset, 0);
+ }
+#endif
+ if (!interrupt_handler_pending_p()) {
+ unblock_deferrable_signals(sigset, 0);
+ }
+#endif
+}
+\f
+
+inline static void
+check_interrupts_enabled_or_lose(os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ lose("interrupts not enabled\n");
+ if (arch_pseudo_atomic_atomic(context))
+ lose ("in pseudo atomic section\n");
+}
+
+/* Save sigset (or the current sigmask if 0) if there is no pending
+ * handler, because that means that deferabbles are already blocked.
+ * The purpose is to avoid losing the pending gc signal if a
+ * deferrable interrupt async unwinds between clearing the pseudo
+ * atomic and trapping to GC.*/
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+void
+maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
+{
+#ifndef LISP_FEATURE_WIN32
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ sigset_t oldset;
+ /* Obviously, this function is called when signals may not be
+ * blocked. Let's make sure we are not interrupted. */
+ block_blockable_signals(0, &oldset);
+#ifndef LISP_FEATURE_SB_THREAD
+ /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
+ * block. */
+ if (data->gc_blocked_deferrables)
+ lose("gc_blocked_deferrables already true\n");
+#endif
+ if ((!data->pending_handler) &&
+ (!data->gc_blocked_deferrables)) {
+ FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
+ data->gc_blocked_deferrables = 1;
+ if (sigset) {
+ /* This is the sigmask of some context. */
+ sigcopyset(&data->pending_mask, sigset);
+ sigaddset_deferrable(sigset);
+ thread_sigmask(SIG_SETMASK,&oldset,0);
+ return;
+ } else {
+ /* Operating on the current sigmask. Save oldset and
+ * unblock gc signals. In the end, this is equivalent to
+ * blocking the deferrables. */
+ sigcopyset(&data->pending_mask, &oldset);
+ thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
+ return;
+ }
+ }
+ thread_sigmask(SIG_SETMASK,&oldset,0);
+#endif
+}
+#endif
+
+/* Are we leaving WITH-GCING and already running with interrupts
+ * enabled, without the protection of *GC-INHIBIT* T and there is gc
+ * (or stop for gc) pending, but we haven't trapped yet? */
+int
+in_leaving_without_gcing_race_p(struct thread *thread)
+{
+ return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
+ (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
+ (SymbolValue(GC_INHIBIT,thread) == NIL) &&
+ ((SymbolValue(GC_PENDING,thread) != NIL)
+#if defined(LISP_FEATURE_SB_THREAD)
+ || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
+#endif
+ ));
+}
+
+/* Check our baroque invariants. */
+void
+check_interrupt_context_or_lose(os_context_t *context)
+{
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ int interrupt_deferred_p = (data->pending_handler != 0);
+ int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
+ sigset_t *sigset = os_context_sigmask_addr(context);
+ /* On PPC pseudo_atomic_interrupted is cleared when coming out of
+ * handle_allocation_trap. */
+#if defined(LISP_FEATURE_GENCGC) && !defined(GENCGC_IS_PRECISE)
+ int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
+ int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
+ int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
+ int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
+ int in_race_p = in_leaving_without_gcing_race_p(thread);
+ /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
+ * section and trapping, a SIG_STOP_FOR_GC would see the next
+ * check fail, for this reason sig_stop_for_gc handler does not
+ * call this function. */
+ if (interrupt_deferred_p) {
+ if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
+ lose("Stray deferred interrupt.\n");
+ }
+ if (gc_pending)
+ if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
+ lose("GC_PENDING, but why?\n");
+#if defined(LISP_FEATURE_SB_THREAD)
+ {
+ int stop_for_gc_pending =
+ (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
+ if (stop_for_gc_pending)
+ if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
+ lose("STOP_FOR_GC_PENDING, but why?\n");
+ if (pseudo_atomic_interrupted)
+ if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
+ lose("pseudo_atomic_interrupted, but why?\n");
+ }
+#else
+ if (pseudo_atomic_interrupted)
+ if (!(gc_pending || interrupt_deferred_p))
+ lose("pseudo_atomic_interrupted, but why?\n");
+#endif
+#endif
+ if (interrupt_pending && !interrupt_deferred_p)
+ lose("INTERRUPT_PENDING but not pending handler.\n");
+ if ((data->gc_blocked_deferrables) && interrupt_pending)
+ lose("gc_blocked_deferrables and interrupt pending\n.");
+ if (data->gc_blocked_deferrables)
+ check_deferrables_blocked_or_lose(sigset);
+ if (interrupt_pending || interrupt_deferred_p ||
+ data->gc_blocked_deferrables)
+ check_deferrables_blocked_or_lose(sigset);
+ else {
+ check_deferrables_unblocked_or_lose(sigset);
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ /* If deferrables are unblocked then we are open to signals
+ * that run lisp code. */
+ check_gc_signals_unblocked_or_lose(sigset);
+#endif
+ }
+#endif
+}
+\f
+/*
+ * utility routines used by various signal handlers
+ */
+
+static void
+build_fake_control_stack_frames(struct thread *th,os_context_t *context)
+{
+#ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+
+ lispobj oldcont;
+
+ /* Build a fake stack frame or frames */
+
+ access_control_frame_pointer(th) =
+ (lispobj *)(uword_t)
+ (*os_context_register_addr(context, reg_CSP));
+ if ((lispobj *)(uword_t)
+ (*os_context_register_addr(context, reg_CFP))
+ == access_control_frame_pointer(th)) {
+ /* There is a small window during call where the callee's
+ * frame isn't built yet. */
+ if (lowtag_of(*os_context_register_addr(context, reg_CODE))
+ == FUN_POINTER_LOWTAG) {
+ /* We have called, but not built the new frame, so
+ * build it for them. */
+ access_control_frame_pointer(th)[0] =
+ *os_context_register_addr(context, reg_OCFP);
+ access_control_frame_pointer(th)[1] =
+ *os_context_register_addr(context, reg_LRA);
+ access_control_frame_pointer(th) += 8;
+ /* Build our frame on top of it. */
+ oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
+ }
+ else {
+ /* We haven't yet called, build our frame as if the
+ * partial frame wasn't there. */
+ oldcont = (lispobj)(*os_context_register_addr(context, reg_OCFP));
+ }
+ }
+ /* We can't tell whether we are still in the caller if it had to
+ * allocate a stack frame due to stack arguments. */
+ /* This observation provoked some past CMUCL maintainer to ask
+ * "Can anything strange happen during return?" */
+ else {
+ /* normal case */
+ oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
+ }
+
+ access_control_stack_pointer(th) = access_control_frame_pointer(th) + 8;
+
+ access_control_frame_pointer(th)[0] = oldcont;
+ access_control_frame_pointer(th)[1] = NIL;
+ access_control_frame_pointer(th)[2] =
+ (lispobj)(*os_context_register_addr(context, reg_CODE));
+#endif
+}
+
+/* Stores the context for gc to scavange and builds fake stack
+ * frames. */
+void
+fake_foreign_function_call(os_context_t *context)
+{
+ int context_index;
+ struct thread *thread=arch_os_get_current_thread();
+
+ /* context_index incrementing must not be interrupted */
+ check_blockables_blocked_or_lose(0);
+
+ /* Get current Lisp state from context. */
+#ifdef reg_ALLOC
+#ifdef LISP_FEATURE_SB_THREAD
+ thread->pseudo_atomic_bits =
+#else
+ dynamic_space_free_pointer =
+ (lispobj *)(uword_t)
+#endif
+ (*os_context_register_addr(context, reg_ALLOC));
+/* fprintf(stderr,"dynamic_space_free_pointer: %p\n", */
+/* dynamic_space_free_pointer); */
+#if defined(LISP_FEATURE_ALPHA) || defined(LISP_FEATURE_MIPS)
+ if ((sword_t)dynamic_space_free_pointer & 1) {
+ lose("dead in fake_foreign_function_call, context = %x\n", context);
+ }
+#endif
+/* why doesnt PPC and SPARC do something like this: */
+#if defined(LISP_FEATURE_HPPA)
+ if ((sword_t)dynamic_space_free_pointer & 4) {
+ lose("dead in fake_foreign_function_call, context = %x, d_s_f_p = %x\n", context, dynamic_space_free_pointer);
+ }
+#endif
+#endif
+#ifdef reg_BSP
+ set_binding_stack_pointer(thread,
+ *os_context_register_addr(context, reg_BSP));
+#endif
+
+ build_fake_control_stack_frames(thread,context);
+
+ /* Do dynamic binding of the active interrupt context index
+ * and save the context in the context array. */
+ context_index =
+ fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
+
+ if (context_index >= MAX_INTERRUPTS) {
+ lose("maximum interrupt nesting depth (%d) exceeded\n", MAX_INTERRUPTS);
+ }
+
+ bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
+ make_fixnum(context_index + 1),thread);
+
+ thread->interrupt_contexts[context_index] = context;
+
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ /* x86oid targets don't maintain the foreign function call flag at
+ * all, so leave them to believe that they are never in foreign
+ * code. */
+ foreign_function_call_active_p(thread) = 1;
+#endif
+}
+
+/* blocks all blockable signals. If you are calling from a signal handler,
+ * the usual signal mask will be restored from the context when the handler
+ * finishes. Otherwise, be careful */
+void
+undo_fake_foreign_function_call(os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+ /* Block all blockable signals. */
+ block_blockable_signals(0, 0);
+
+ foreign_function_call_active_p(thread) = 0;
+
+ /* Undo dynamic binding of FREE_INTERRUPT_CONTEXT_INDEX */
+ unbind(thread);
+
+#if defined(reg_ALLOC) && !defined(LISP_FEATURE_SB_THREAD)
+ /* Put the dynamic space free pointer back into the context. */
+ *os_context_register_addr(context, reg_ALLOC) =
+ (uword_t) dynamic_space_free_pointer
+ | (*os_context_register_addr(context, reg_ALLOC)
+ & LOWTAG_MASK);
+ /*
+ ((uword_t)(*os_context_register_addr(context, reg_ALLOC))
+ & ~LOWTAG_MASK)
+ | ((uword_t) dynamic_space_free_pointer & LOWTAG_MASK);
+ */
+#endif
+#if defined(reg_ALLOC) && defined(LISP_FEATURE_SB_THREAD)
+ /* Put the pseudo-atomic bits and dynamic space free pointer back
+ * into the context (p-a-bits for p-a, and dynamic space free
+ * pointer for ROOM). */
+ *os_context_register_addr(context, reg_ALLOC) =
+ (uword_t) dynamic_space_free_pointer
+ | (thread->pseudo_atomic_bits & LOWTAG_MASK);
+ /* And clear them so we don't get bit later by call-in/call-out
+ * not updating them. */
+ thread->pseudo_atomic_bits = 0;
+#endif
+}
+
+/* a handler for the signal caused by execution of a trap opcode
+ * signalling an internal error */
+void
+interrupt_internal_error(os_context_t *context, boolean continuable)
+{
+ lispobj context_sap;
+
+ fake_foreign_function_call(context);
+
+ if (!internal_errors_enabled) {
+ describe_internal_error(context);
+ /* There's no good way to recover from an internal error
+ * before the Lisp error handling mechanism is set up. */
+ lose("internal error too early in init, can't recover\n");
+ }
+
+ /* Allocate the SAP object while the interrupts are still
+ * disabled. */
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ unblock_gc_signals(0, 0);
+#endif
+ context_sap = alloc_sap(context);
+
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+#endif
+
+#if defined(LISP_FEATURE_LINUX) && defined(LISP_FEATURE_MIPS)
+ /* Workaround for blocked SIGTRAP. */
+ {
+ sigset_t newset;
+ sigemptyset(&newset);
+ sigaddset(&newset, SIGTRAP);
+ thread_sigmask(SIG_UNBLOCK, &newset, 0);
+ }
+#endif
+
+ SHOW("in interrupt_internal_error");
+#if QSHOW == 2
+ /* Display some rudimentary debugging information about the
+ * error, so that even if the Lisp error handler gets badly
+ * confused, we have a chance to determine what's going on. */
+ describe_internal_error(context);
+#endif
+ funcall2(StaticSymbolFunction(INTERNAL_ERROR), context_sap,
+ continuable ? T : NIL);
+
+ undo_fake_foreign_function_call(context); /* blocks signals again */
+ if (continuable)
+ arch_skip_instruction(context);
+}
+
+boolean
+interrupt_handler_pending_p(void)
+{
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ return (data->pending_handler != 0);
+}
+
+void
+interrupt_handle_pending(os_context_t *context)
+{
+ /* There are three ways we can get here. First, if an interrupt
+ * occurs within pseudo-atomic, it will be deferred, and we'll
+ * trap to here at the end of the pseudo-atomic block. Second, if
+ * the GC (in alloc()) decides that a GC is required, it will set
+ * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
+ * and alloc() is always called from within pseudo-atomic, and
+ * thus we end up here again. Third, when calling GC-ON or at the
+ * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
+ * here if there is a pending GC. Fourth, ahem, at the end of
+ * WITHOUT-INTERRUPTS (bar complications with nesting).
+ *
+ * A fourth way happens with safepoints: In addition to a stop for
+ * GC that is pending, there are thruptions. Both mechanisms are
+ * mostly signal-free, yet also of an asynchronous nature, so it makes
+ * sense to let interrupt_handle_pending take care of running them:
+ * It gets run precisely at those places where it is safe to process
+ * pending asynchronous tasks. */
+
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+
+ if (arch_pseudo_atomic_atomic(context)) {
+ lose("Handling pending interrupt in pseudo atomic.");
+ }
+
+ FSHOW_SIGNAL((stderr, "/entering interrupt_handle_pending\n"));
+
+ check_blockables_blocked_or_lose(0);
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ /*
+ * (On safepoint builds, there is no gc_blocked_deferrables nor
+ * SIG_STOP_FOR_GC.)
+ */
+ /* If GC/SIG_STOP_FOR_GC struck during PA and there was no pending
+ * handler, then the pending mask was saved and
+ * gc_blocked_deferrables set. Hence, there can be no pending
+ * handler and it's safe to restore the pending mask.
+ *
+ * Note, that if gc_blocked_deferrables is false we may still have
+ * to GC. In this case, we are coming out of a WITHOUT-GCING or a
+ * pseudo atomic was interrupt be a deferrable first. */
+ if (data->gc_blocked_deferrables) {
+ if (data->pending_handler)
+ lose("GC blocked deferrables but still got a pending handler.");
+ if (SymbolValue(GC_INHIBIT,thread)!=NIL)
+ lose("GC blocked deferrables while GC is inhibited.");
+ /* Restore the saved signal mask from the original signal (the
+ * one that interrupted us during the critical section) into
+ * the os_context for the signal we're currently in the
+ * handler for. This should ensure that when we return from
+ * the handler the blocked signals are unblocked. */
+#ifndef LISP_FEATURE_WIN32
+ sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
+#endif
+ data->gc_blocked_deferrables = 0;
+ }
+#endif
+
+ if (SymbolValue(GC_INHIBIT,thread)==NIL) {
+ void *original_pending_handler = data->pending_handler;
+
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+ /* handles the STOP_FOR_GC_PENDING case, plus THRUPTIONS */
+ if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL
+# ifdef LISP_FEATURE_SB_THRUPTION
+ || (SymbolValue(THRUPTION_PENDING,thread) != NIL
+ && SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
+# endif
+ )
+ /* We ought to take this chance to do a pitstop now. */
+ thread_in_lisp_raised(context);
+#elif defined(LISP_FEATURE_SB_THREAD)
+ if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL) {
+ /* STOP_FOR_GC_PENDING and GC_PENDING are cleared by
+ * the signal handler if it actually stops us. */
+ arch_clear_pseudo_atomic_interrupted(context);
+ sig_stop_for_gc_handler(SIG_STOP_FOR_GC,NULL,context);
+ } else
+#endif
+ /* Test for T and not for != NIL since the value :IN-PROGRESS
+ * is used in SUB-GC as part of the mechanism to supress
+ * recursive gcs.*/
+ if (SymbolValue(GC_PENDING,thread) == T) {
+
+ /* Two reasons for doing this. First, if there is a
+ * pending handler we don't want to run. Second, we are
+ * going to clear pseudo atomic interrupted to avoid
+ * spurious trapping on every allocation in SUB_GC and
+ * having a pending handler with interrupts enabled and
+ * without pseudo atomic interrupted breaks an
+ * invariant. */
+ if (data->pending_handler) {
+ bind_variable(ALLOW_WITH_INTERRUPTS, NIL, thread);
+ bind_variable(INTERRUPTS_ENABLED, NIL, thread);
+ }
+
+ arch_clear_pseudo_atomic_interrupted(context);
+
+ /* GC_PENDING is cleared in SUB-GC, or if another thread
+ * is doing a gc already we will get a SIG_STOP_FOR_GC and
+ * that will clear it.
+ *
+ * If there is a pending handler or gc was triggerred in a
+ * signal handler then maybe_gc won't run POST_GC and will
+ * return normally. */
+ if (!maybe_gc(context))
+ lose("GC not inhibited but maybe_gc did not GC.");
+
+ if (data->pending_handler) {
+ unbind(thread);
+ unbind(thread);
+ }
+ } else if (SymbolValue(GC_PENDING,thread) != NIL) {
+ /* It's not NIL or T so GC_PENDING is :IN-PROGRESS. If
+ * GC-PENDING is not NIL then we cannot trap on pseudo
+ * atomic due to GC (see if(GC_PENDING) logic in
+ * cheneygc.c an gengcgc.c), plus there is a outer
+ * WITHOUT-INTERRUPTS SUB_GC, so how did we end up
+ * here? */
+ lose("Trapping to run pending handler while GC in progress.");
+ }
+
+ check_blockables_blocked_or_lose(0);
+
+ /* No GC shall be lost. If SUB_GC triggers another GC then
+ * that should be handled on the spot. */
+ if (SymbolValue(GC_PENDING,thread) != NIL)
+ lose("GC_PENDING after doing gc.");
+#ifdef THREADS_USING_GCSIGNAL
+ if (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
+ lose("STOP_FOR_GC_PENDING after doing gc.");
+#endif
+ /* Check two things. First, that gc does not clobber a handler
+ * that's already pending. Second, that there is no interrupt
+ * lossage: if original_pending_handler was NULL then even if
+ * an interrupt arrived during GC (POST-GC, really) it was
+ * handled. */
+ if (original_pending_handler != data->pending_handler)
+ lose("pending handler changed in gc: %x -> %x.",
+ original_pending_handler, data->pending_handler);
+ }
+
+#ifndef LISP_FEATURE_WIN32
+ /* There may be no pending handler, because it was only a gc that
+ * had to be executed or because Lisp is a bit too eager to call
+ * DO-PENDING-INTERRUPT. */
+ if ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
+ (data->pending_handler)) {
+ /* No matter how we ended up here, clear both
+ * INTERRUPT_PENDING and pseudo atomic interrupted. It's safe
+ * because we checked above that there is no GC pending. */
+ SetSymbolValue(INTERRUPT_PENDING, NIL, thread);
+ arch_clear_pseudo_atomic_interrupted(context);
+ /* Restore the sigmask in the context. */
+ sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
+ run_deferred_handler(data, context);
+ }
+#ifdef LISP_FEATURE_SB_THRUPTION
+ if (SymbolValue(THRUPTION_PENDING,thread)==T)
+ /* Special case for the following situation: There is a
+ * thruption pending, but a signal had been deferred. The
+ * pitstop at the top of this function could only take care
+ * of GC, and skipped the thruption, so we need to try again
+ * now that INTERRUPT_PENDING and the sigmask have been
+ * reset. */
+ while (check_pending_thruptions(context))
+ ;
+#endif
+#endif
+#ifdef LISP_FEATURE_GENCGC
+ if (get_pseudo_atomic_interrupted(thread))
+ lose("pseudo_atomic_interrupted after interrupt_handle_pending\n");
+#endif
+ /* It is possible that the end of this function was reached
+ * without never actually doing anything, the tests in Lisp for
+ * when to call receive-pending-interrupt are not exact. */
+ FSHOW_SIGNAL((stderr, "/exiting interrupt_handle_pending\n"));
+}
+\f
+
+void
+interrupt_handle_now(int signal, siginfo_t *info, os_context_t *context)
+{
+ boolean were_in_lisp;
+ union interrupt_handler handler;
+
+ check_blockables_blocked_or_lose(0);
+
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ if (sigismember(&deferrable_sigset,signal))
+ check_interrupts_enabled_or_lose(context);
+#endif
+
+ handler = interrupt_handlers[signal];
+
+ if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
+ return;
+ }
+
+ were_in_lisp = !foreign_function_call_active_p(arch_os_get_current_thread());
+ if (were_in_lisp)
+ {
+ fake_foreign_function_call(context);
+ }
+
+ FSHOW_SIGNAL((stderr,
+ "/entering interrupt_handle_now(%d, info, context)\n",
+ signal));
+
+ if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
+
+ /* This can happen if someone tries to ignore or default one
+ * of the signals we need for runtime support, and the runtime
+ * support decides to pass on it. */
+ lose("no handler for signal %d in interrupt_handle_now(..)\n", signal);
+
+ } else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
+ /* Once we've decided what to do about contexts in a
+ * return-elsewhere world (the original context will no longer
+ * be available; should we copy it or was nobody using it anyway?)
+ * then we should convert this to return-elsewhere */
+
+ /* CMUCL comment said "Allocate the SAPs while the interrupts
+ * are still disabled.". I (dan, 2003.08.21) assume this is
+ * because we're not in pseudoatomic and allocation shouldn't
+ * be interrupted. In which case it's no longer an issue as
+ * all our allocation from C now goes through a PA wrapper,
+ * but still, doesn't hurt.
+ *
+ * Yeah, but non-gencgc platforms don't really wrap allocation
+ * in PA. MG - 2005-08-29 */
+
+ lispobj info_sap, context_sap;
+ /* Leave deferrable signals blocked, the handler itself will
+ * allow signals again when it sees fit. */
+#ifndef LISP_FEATURE_SB_SAFEPOINT
+ unblock_gc_signals(0, 0);
+#endif
+ context_sap = alloc_sap(context);
+ info_sap = alloc_sap(info);
+
+ FSHOW_SIGNAL((stderr,"/calling Lisp-level handler\n"));
+
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+ WITH_GC_AT_SAFEPOINTS_ONLY()
+#endif
+ funcall3(handler.lisp,
+ make_fixnum(signal),
+ info_sap,
+ context_sap);
+ } else {
+ /* This cannot happen in sane circumstances. */
+
+ FSHOW_SIGNAL((stderr,"/calling C-level handler\n"));
+
+#if !defined(LISP_FEATURE_WIN32) || defined(LISP_FEATURE_SB_THREAD)
+ /* Allow signals again. */
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ (*handler.c)(signal, info, context);
+#endif
+ }
+
+ if (were_in_lisp)
+ {
+ undo_fake_foreign_function_call(context); /* block signals again */
+ }
+
+ FSHOW_SIGNAL((stderr,
+ "/returning from interrupt_handle_now(%d, info, context)\n",
+ signal));
+}
+
+/* This is called at the end of a critical section if the indications
+ * are that some signal was deferred during the section. Note that as
+ * far as C or the kernel is concerned we dealt with the signal
+ * already; we're just doing the Lisp-level processing now that we
+ * put off then */
+static void
+run_deferred_handler(struct interrupt_data *data, os_context_t *context)
+{
+ /* The pending_handler may enable interrupts and then another
+ * interrupt may hit, overwrite interrupt_data, so reset the
+ * pending handler before calling it. Trust the handler to finish
+ * with the siginfo before enabling interrupts. */
+ void (*pending_handler) (int, siginfo_t*, os_context_t*) =
+ data->pending_handler;
+
+ data->pending_handler=0;
+ FSHOW_SIGNAL((stderr, "/running deferred handler %p\n", pending_handler));
+ (*pending_handler)(data->pending_signal,&(data->pending_info), context);
+}
+
+#ifndef LISP_FEATURE_WIN32
+boolean
+maybe_defer_handler(void *handler, struct interrupt_data *data,
+ int signal, siginfo_t *info, os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+
+ check_blockables_blocked_or_lose(0);
+
+ if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
+ lose("interrupt already pending\n");
+ if (thread->interrupt_data->pending_handler)
+ lose("there is a pending handler already (PA)\n");
+ if (data->gc_blocked_deferrables)
+ lose("maybe_defer_handler: gc_blocked_deferrables true\n");
+ check_interrupt_context_or_lose(context);
+ /* If interrupts are disabled then INTERRUPT_PENDING is set and
+ * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
+ * atomic section inside a WITHOUT-INTERRUPTS.
+ *
+ * Also, if in_leaving_without_gcing_race_p then
+ * interrupt_handle_pending is going to be called soon, so
+ * stashing the signal away is safe.
+ */
+ if ((SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) ||
+ in_leaving_without_gcing_race_p(thread)) {
+ FSHOW_SIGNAL((stderr,
+ "/maybe_defer_handler(%x,%d): deferred (RACE=%d)\n",
+ (unsigned int)handler,signal,
+ in_leaving_without_gcing_race_p(thread)));
+ store_signal_data_for_later(data,handler,signal,info,context);
+ SetSymbolValue(INTERRUPT_PENDING, T,thread);
+ check_interrupt_context_or_lose(context);
+ return 1;
+ }
+ /* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
+ * actually use its argument for anything on x86, so this branch
+ * may succeed even when context is null (gencgc alloc()) */
+ if (arch_pseudo_atomic_atomic(context)) {
+ FSHOW_SIGNAL((stderr,
+ "/maybe_defer_handler(%x,%d): deferred(PA)\n",
+ (unsigned int)handler,signal));
+ store_signal_data_for_later(data,handler,signal,info,context);
+ arch_set_pseudo_atomic_interrupted(context);
+ check_interrupt_context_or_lose(context);
+ return 1;
+ }
+ FSHOW_SIGNAL((stderr,
+ "/maybe_defer_handler(%x,%d): not deferred\n",
+ (unsigned int)handler,signal));
+ return 0;
+}
+
+static void
+store_signal_data_for_later (struct interrupt_data *data, void *handler,
+ int signal,
+ siginfo_t *info, os_context_t *context)
+{
+ if (data->pending_handler)
+ lose("tried to overwrite pending interrupt handler %x with %x\n",
+ data->pending_handler, handler);
+ if (!handler)
+ lose("tried to defer null interrupt handler\n");
+ data->pending_handler = handler;
+ data->pending_signal = signal;
+ if(info)
+ memcpy(&(data->pending_info), info, sizeof(siginfo_t));
+
+ FSHOW_SIGNAL((stderr, "/store_signal_data_for_later: signal: %d\n",
+ signal));
+
+ if(!context)
+ lose("Null context");
+
+ /* the signal mask in the context (from before we were
+ * interrupted) is copied to be restored when run_deferred_handler
+ * happens. Then the usually-blocked signals are added to the mask
+ * in the context so that we are running with blocked signals when
+ * the handler returns */
+ sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
+ sigaddset_deferrable(os_context_sigmask_addr(context));
+}
+
+static void
+maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
+{
+ SAVE_ERRNO(signal,context,void_context);
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ if(!maybe_defer_handler(interrupt_handle_now,data,signal,info,context))
+ interrupt_handle_now(signal, info, context);
+ RESTORE_ERRNO;
+}
+
+static void
+low_level_interrupt_handle_now(int signal, siginfo_t *info,
+ os_context_t *context)
+{
+ /* No FP control fixage needed, caller has done that. */
+ check_blockables_blocked_or_lose(0);
+ check_interrupts_enabled_or_lose(context);
+ (*interrupt_low_level_handlers[signal])(signal, info, context);
+ /* No Darwin context fixage needed, caller does that. */
+}