+inline static void
+check_interrupts_enabled_or_lose(os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ lose("interrupts not enabled\n");
+ if (arch_pseudo_atomic_atomic(context))
+ lose ("in pseudo atomic section\n");
+}
+
+/* Save sigset (or the current sigmask if 0) if there is no pending
+ * handler, because that means that deferabbles are already blocked.
+ * The purpose is to avoid losing the pending gc signal if a
+ * deferrable interrupt async unwinds between clearing the pseudo
+ * atomic and trapping to GC.*/
+void
+maybe_save_gc_mask_and_block_deferrables(sigset_t *sigset)
+{
+#ifndef LISP_FEATURE_WIN32
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ sigset_t oldset;
+ /* Obviously, this function is called when signals may not be
+ * blocked. Let's make sure we are not interrupted. */
+ block_blockable_signals(0, &oldset);
+#ifndef LISP_FEATURE_SB_THREAD
+ /* With threads a SIG_STOP_FOR_GC and a normal GC may also want to
+ * block. */
+ if (data->gc_blocked_deferrables)
+ lose("gc_blocked_deferrables already true\n");
+#endif
+ if ((!data->pending_handler) &&
+ (!data->gc_blocked_deferrables)) {
+ FSHOW_SIGNAL((stderr,"/setting gc_blocked_deferrables\n"));
+ data->gc_blocked_deferrables = 1;
+ if (sigset) {
+ /* This is the sigmask of some context. */
+ sigcopyset(&data->pending_mask, sigset);
+ sigaddset_deferrable(sigset);
+ thread_sigmask(SIG_SETMASK,&oldset,0);
+ return;
+ } else {
+ /* Operating on the current sigmask. Save oldset and
+ * unblock gc signals. In the end, this is equivalent to
+ * blocking the deferrables. */
+ sigcopyset(&data->pending_mask, &oldset);
+ thread_sigmask(SIG_UNBLOCK, &gc_sigset, 0);
+ return;
+ }
+ }
+ thread_sigmask(SIG_SETMASK,&oldset,0);
+#endif
+}
+
+/* Are we leaving WITH-GCING and already running with interrupts
+ * enabled, without the protection of *GC-INHIBIT* T and there is gc
+ * (or stop for gc) pending, but we haven't trapped yet? */
+int
+in_leaving_without_gcing_race_p(struct thread *thread)
+{
+ return ((SymbolValue(IN_WITHOUT_GCING,thread) != NIL) &&
+ (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) &&
+ (SymbolValue(GC_INHIBIT,thread) == NIL) &&
+ ((SymbolValue(GC_PENDING,thread) != NIL)
+#if defined(LISP_FEATURE_SB_THREAD)
+ || (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL)
+#endif
+ ));
+}
+
+/* Check our baroque invariants. */
+void
+check_interrupt_context_or_lose(os_context_t *context)
+{
+#ifndef LISP_FEATURE_WIN32
+ struct thread *thread = arch_os_get_current_thread();
+ struct interrupt_data *data = thread->interrupt_data;
+ int interrupt_deferred_p = (data->pending_handler != 0);
+ int interrupt_pending = (SymbolValue(INTERRUPT_PENDING,thread) != NIL);
+ sigset_t *sigset = os_context_sigmask_addr(context);
+ /* On PPC pseudo_atomic_interrupted is cleared when coming out of
+ * handle_allocation_trap. */
+#if defined(LISP_FEATURE_GENCGC) && !defined(LISP_FEATURE_PPC)
+ int interrupts_enabled = (SymbolValue(INTERRUPTS_ENABLED,thread) != NIL);
+ int gc_inhibit = (SymbolValue(GC_INHIBIT,thread) != NIL);
+ int gc_pending = (SymbolValue(GC_PENDING,thread) == T);
+ int pseudo_atomic_interrupted = get_pseudo_atomic_interrupted(thread);
+ int in_race_p = in_leaving_without_gcing_race_p(thread);
+ /* In the time window between leaving the *INTERRUPTS-ENABLED* NIL
+ * section and trapping, a SIG_STOP_FOR_GC would see the next
+ * check fail, for this reason sig_stop_for_gc handler does not
+ * call this function. */
+ if (interrupt_deferred_p) {
+ if (!(!interrupts_enabled || pseudo_atomic_interrupted || in_race_p))
+ lose("Stray deferred interrupt.\n");
+ }
+ if (gc_pending)
+ if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
+ lose("GC_PENDING, but why?\n");
+#if defined(LISP_FEATURE_SB_THREAD)
+ {
+ int stop_for_gc_pending =
+ (SymbolValue(STOP_FOR_GC_PENDING,thread) != NIL);
+ if (stop_for_gc_pending)
+ if (!(pseudo_atomic_interrupted || gc_inhibit || in_race_p))
+ lose("STOP_FOR_GC_PENDING, but why?\n");
+ if (pseudo_atomic_interrupted)
+ if (!(gc_pending || stop_for_gc_pending || interrupt_deferred_p))
+ lose("pseudo_atomic_interrupted, but why?\n");
+ }
+#else
+ if (pseudo_atomic_interrupted)
+ if (!(gc_pending || interrupt_deferred_p))
+ lose("pseudo_atomic_interrupted, but why?\n");
+#endif
+#endif
+ if (interrupt_pending && !interrupt_deferred_p)
+ lose("INTERRUPT_PENDING but not pending handler.\n");
+ if ((data->gc_blocked_deferrables) && interrupt_pending)
+ lose("gc_blocked_deferrables and interrupt pending\n.");
+ if (data->gc_blocked_deferrables)
+ check_deferrables_blocked_or_lose(sigset);
+ if (interrupt_pending || interrupt_deferred_p ||
+ data->gc_blocked_deferrables)
+ check_deferrables_blocked_or_lose(sigset);
+ else {
+ check_deferrables_unblocked_or_lose(sigset);
+ /* If deferrables are unblocked then we are open to signals
+ * that run lisp code. */
+ check_gc_signals_unblocked_or_lose(sigset);
+ }
+#endif
+}