* In that case, the Lisp-level handler is stored in interrupt_handlers[..]
* and interrupt_low_level_handlers[..] is cleared.
*
- * However, some signals need special handling, e.g.
+ * However, some signals need special handling, e.g.
*
* o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
* garbage collector to detect violations of write protection,
* o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
* pseudo-atomic sections, and some classes of error (e.g. "function
* not defined"). This never goes anywhere near the Lisp handlers at all.
- * See runtime/alpha-arch.c and code/signal.lisp
- *
+ * See runtime/alpha-arch.c and code/signal.lisp
+ *
* - WHN 20000728, dan 20010128 */
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <errno.h>
+#include "sbcl.h"
#include "runtime.h"
#include "arch.h"
-#include "sbcl.h"
#include "os.h"
#include "interrupt.h"
#include "globals.h"
#include "interr.h"
#include "genesis/fdefn.h"
#include "genesis/simple-fun.h"
+#include "genesis/cons.h"
void run_deferred_handler(struct interrupt_data *data, void *v_context) ;
-static void store_signal_data_for_later (struct interrupt_data *data,
- void *handler, int signal,
- siginfo_t *info,
- os_context_t *context);
+static void store_signal_data_for_later (struct interrupt_data *data,
+ void *handler, int signal,
+ siginfo_t *info,
+ os_context_t *context);
boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context);
-extern volatile lispobj all_threads_lock;
-extern volatile int countdown_to_gc;
-
-/*
- * This is a workaround for some slightly silly Linux/GNU Libc
- * behaviour: glibc defines sigset_t to support 1024 signals, which is
- * more than the kernel. This is usually not a problem, but becomes
- * one when we want to save a signal mask from a ucontext, and restore
- * it later into another ucontext: the ucontext is allocated on the
- * stack by the kernel, so copying a libc-sized sigset_t into it will
- * overflow and cause other data on the stack to be corrupted */
-
-#define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
-
void sigaddset_blockable(sigset_t *s)
{
sigaddset(s, SIGHUP);
#ifdef LISP_FEATURE_SB_THREAD
sigaddset(s, SIG_STOP_FOR_GC);
sigaddset(s, SIG_INTERRUPT_THREAD);
- sigaddset(s, SIG_THREAD_EXIT);
#endif
}
+static sigset_t blockable_sigset;
+
+inline static void check_blockables_blocked_or_lose()
+{
+ /* Get the current sigmask, by blocking the empty set. */
+ sigset_t empty,current;
+ int i;
+ sigemptyset(&empty);
+ thread_sigmask(SIG_BLOCK, &empty, ¤t);
+ for(i=0;i<NSIG;i++) {
+ if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
+ lose("blockable signal %d not blocked",i);
+ }
+}
+
+inline static void check_interrupts_enabled_or_lose(os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ lose("interrupts not enabled");
+ if (
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ (!foreign_function_call_active) &&
+#endif
+ arch_pseudo_atomic_atomic(context))
+ lose ("in pseudo atomic section");
+}
+
/* When we catch an internal error, should we pass it back to Lisp to
* be handled in a high-level way? (Early in cold init, the answer is
* 'no', because Lisp is still too brain-dead to handle anything.
* mask ought to be clear anyway most of the time, but may be non-zero
* if we were interrupted e.g. while waiting for a queue. */
-#if 1
-void reset_signal_mask ()
+void reset_signal_mask ()
{
sigset_t new;
sigemptyset(&new);
- sigprocmask(SIG_SETMASK,&new,0);
+ thread_sigmask(SIG_SETMASK,&new,0);
}
-#else
-void reset_signal_mask ()
+
+void block_blockable_signals ()
{
- sigset_t new,old;
- int i;
- int wrong=0;
- sigemptyset(&new);
- sigprocmask(SIG_SETMASK,&new,&old);
- for(i=1; i<NSIG; i++) {
- if(sigismember(&old,i)) {
- fprintf(stderr,
- "Warning: signal %d is masked: this is unexpected\n",i);
- wrong=1;
- }
- }
- if(wrong)
- fprintf(stderr,"If this version of SBCL is less than three months old, please report this.\nOtherwise, please try a newer version first\n. Reset signal mask.\n");
+ sigset_t block;
+ sigemptyset(&block);
+ sigaddset_blockable(&block);
+ thread_sigmask(SIG_BLOCK, &block, 0);
}
-#endif
-
-
\f
/*
* utility routines used by various signal handlers
*/
-void
+void
build_fake_control_stack_frames(struct thread *th,os_context_t *context)
{
-#ifndef LISP_FEATURE_X86
-
+#ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+
lispobj oldcont;
/* Build a fake stack frame or frames */
current_control_frame_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_CSP));
+ (lispobj *)(*os_context_register_addr(context, reg_CSP));
if ((lispobj *)(*os_context_register_addr(context, reg_CFP))
- == current_control_frame_pointer) {
+ == current_control_frame_pointer) {
/* There is a small window during call where the callee's
* frame isn't built yet. */
if (lowtag_of(*os_context_register_addr(context, reg_CODE))
- == FUN_POINTER_LOWTAG) {
+ == FUN_POINTER_LOWTAG) {
/* We have called, but not built the new frame, so
* build it for them. */
current_control_frame_pointer[0] =
- *os_context_register_addr(context, reg_OCFP);
+ *os_context_register_addr(context, reg_OCFP);
current_control_frame_pointer[1] =
- *os_context_register_addr(context, reg_LRA);
+ *os_context_register_addr(context, reg_LRA);
current_control_frame_pointer += 8;
/* Build our frame on top of it. */
oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
current_control_frame_pointer[0] = oldcont;
current_control_frame_pointer[1] = NIL;
current_control_frame_pointer[2] =
- (lispobj)(*os_context_register_addr(context, reg_CODE));
+ (lispobj)(*os_context_register_addr(context, reg_CODE));
#endif
}
int context_index;
struct thread *thread=arch_os_get_current_thread();
+ /* context_index incrementing must not be interrupted */
+ check_blockables_blocked_or_lose();
+
/* Get current Lisp state from context. */
#ifdef reg_ALLOC
dynamic_space_free_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_ALLOC));
-#ifdef alpha
+ (lispobj *)(*os_context_register_addr(context, reg_ALLOC));
+#if defined(LISP_FEATURE_ALPHA)
if ((long)dynamic_space_free_pointer & 1) {
- lose("dead in fake_foreign_function_call, context = %x", context);
+ lose("dead in fake_foreign_function_call, context = %x", context);
}
#endif
#endif
#ifdef reg_BSP
current_binding_stack_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_BSP));
+ (lispobj *)(*os_context_register_addr(context, reg_BSP));
#endif
build_fake_control_stack_frames(thread,context);
/* Do dynamic binding of the active interrupt context index
* and save the context in the context array. */
context_index =
- fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
-
+ fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
+
if (context_index >= MAX_INTERRUPTS) {
lose("maximum interrupt nesting depth (%d) exceeded", MAX_INTERRUPTS);
}
bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
- make_fixnum(context_index + 1),thread);
+ make_fixnum(context_index + 1),thread);
thread->interrupt_contexts[context_index] = context;
}
/* blocks all blockable signals. If you are calling from a signal handler,
- * the usual signal mask will be restored from the context when the handler
+ * the usual signal mask will be restored from the context when the handler
* finishes. Otherwise, be careful */
void
{
struct thread *thread=arch_os_get_current_thread();
/* Block all blockable signals. */
- sigset_t block;
- sigemptyset(&block);
- sigaddset_blockable(&block);
- sigprocmask(SIG_BLOCK, &block, 0);
+ block_blockable_signals();
/* going back into Lisp */
foreign_function_call_active = 0;
* signalling an internal error */
void
interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context,
- boolean continuable)
+ boolean continuable)
{
lispobj context_sap = 0;
+ check_blockables_blocked_or_lose();
fake_foreign_function_call(context);
/* Allocate the SAP object while the interrupts are still
* disabled. */
if (internal_errors_enabled) {
- context_sap = alloc_sap(context);
+ context_sap = alloc_sap(context);
}
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
if (internal_errors_enabled) {
SHOW("in interrupt_internal_error");
-#if QSHOW
- /* Display some rudimentary debugging information about the
- * error, so that even if the Lisp error handler gets badly
- * confused, we have a chance to determine what's going on. */
- describe_internal_error(context);
-#endif
- funcall2(SymbolFunction(INTERNAL_ERROR), context_sap,
- continuable ? T : NIL);
+#ifdef QSHOW
+ /* Display some rudimentary debugging information about the
+ * error, so that even if the Lisp error handler gets badly
+ * confused, we have a chance to determine what's going on. */
+ describe_internal_error(context);
+#endif
+ funcall2(SymbolFunction(INTERNAL_ERROR), context_sap,
+ continuable ? T : NIL);
} else {
- describe_internal_error(context);
- /* There's no good way to recover from an internal error
- * before the Lisp error handling mechanism is set up. */
- lose("internal error too early in init, can't recover");
+ describe_internal_error(context);
+ /* There's no good way to recover from an internal error
+ * before the Lisp error handling mechanism is set up. */
+ lose("internal error too early in init, can't recover");
}
undo_fake_foreign_function_call(context); /* blocks signals again */
if (continuable) {
- arch_skip_instruction(context);
+ arch_skip_instruction(context);
}
}
struct thread *thread;
struct interrupt_data *data;
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
+
thread=arch_os_get_current_thread();
data=thread->interrupt_data;
- /* FIXME I'm not altogether sure this is appropriate if we're
- * here as the result of a pseudo-atomic */
- SetSymbolValue(INTERRUPT_PENDING, NIL,thread);
-
- /* restore the saved signal mask from the original signal (the
- * one that interrupted us during the critical section) into the
- * os_context for the signal we're currently in the handler for.
- * This should ensure that when we return from the handler the
- * blocked signals are unblocked */
-
- memcpy(os_context_sigmask_addr(context), &data->pending_mask,
- REAL_SIGSET_SIZE_BYTES);
-
- sigemptyset(&data->pending_mask);
- /* This will break on sparc linux: the deferred handler really wants
- * to be called with a void_context */
- run_deferred_handler(data,(void *)context);
+
+ /* Pseudo atomic may trigger several times for a single interrupt,
+ * and while without-interrupts should not, a false trigger by
+ * pseudo-atomic may eat a pending handler even from
+ * without-interrupts. */
+ if (data->pending_handler) {
+
+ /* If we're here as the result of a pseudo-atomic as opposed
+ * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already
+ * NIL, because maybe_defer_handler sets
+ * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/
+ SetSymbolValue(INTERRUPT_PENDING, NIL,thread);
+
+ /* restore the saved signal mask from the original signal (the
+ * one that interrupted us during the critical section) into the
+ * os_context for the signal we're currently in the handler for.
+ * This should ensure that when we return from the handler the
+ * blocked signals are unblocked */
+ sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
+
+ sigemptyset(&data->pending_mask);
+ /* This will break on sparc linux: the deferred handler really wants
+ * to be called with a void_context */
+ run_deferred_handler(data,(void *)context);
+ }
}
\f
/*
{
os_context_t *context = (os_context_t*)void_context;
struct thread *thread=arch_os_get_current_thread();
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
boolean were_in_lisp;
#endif
union interrupt_handler handler;
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
#ifdef LISP_FEATURE_LINUX
/* Under Linux on some architectures, we appear to have to restore
the FPU control word from the context, as after the signal is
delivered we appear to have a null FPU control word. */
os_restore_fp_control(context);
-#endif
+#endif
handler = thread->interrupt_data->interrupt_handlers[signal];
if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
- return;
+ return;
}
-
-#ifndef LISP_FEATURE_X86
+
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
were_in_lisp = !foreign_function_call_active;
if (were_in_lisp)
#endif
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
- "/entering interrupt_handle_now(%d, info, context)\n",
- signal));
+ "/entering interrupt_handle_now(%d, info, context)\n",
+ signal));
#endif
if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- /* This can happen if someone tries to ignore or default one
- * of the signals we need for runtime support, and the runtime
- * support decides to pass on it. */
- lose("no handler for signal %d in interrupt_handle_now(..)", signal);
+ /* This can happen if someone tries to ignore or default one
+ * of the signals we need for runtime support, and the runtime
+ * support decides to pass on it. */
+ lose("no handler for signal %d in interrupt_handle_now(..)", signal);
} else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
- /* Once we've decided what to do about contexts in a
- * return-elsewhere world (the original context will no longer
- * be available; should we copy it or was nobody using it anyway?)
- * then we should convert this to return-elsewhere */
+ /* Once we've decided what to do about contexts in a
+ * return-elsewhere world (the original context will no longer
+ * be available; should we copy it or was nobody using it anyway?)
+ * then we should convert this to return-elsewhere */
/* CMUCL comment said "Allocate the SAPs while the interrupts
- * are still disabled.". I (dan, 2003.08.21) assume this is
- * because we're not in pseudoatomic and allocation shouldn't
- * be interrupted. In which case it's no longer an issue as
- * all our allocation from C now goes through a PA wrapper,
- * but still, doesn't hurt */
+ * are still disabled.". I (dan, 2003.08.21) assume this is
+ * because we're not in pseudoatomic and allocation shouldn't
+ * be interrupted. In which case it's no longer an issue as
+ * all our allocation from C now goes through a PA wrapper,
+ * but still, doesn't hurt */
lispobj info_sap,context_sap = alloc_sap(context);
info_sap = alloc_sap(info);
/* Allow signals again. */
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
#ifdef QSHOW_SIGNALS
- SHOW("calling Lisp-level handler");
+ SHOW("calling Lisp-level handler");
#endif
funcall3(handler.lisp,
- make_fixnum(signal),
- info_sap,
- context_sap);
+ make_fixnum(signal),
+ info_sap,
+ context_sap);
} else {
#ifdef QSHOW_SIGNALS
- SHOW("calling C-level handler");
+ SHOW("calling C-level handler");
#endif
/* Allow signals again. */
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
-
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+
(*handler.c)(signal, info, void_context);
}
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
if (were_in_lisp)
#endif
{
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
- "/returning from interrupt_handle_now(%d, info, context)\n",
- signal));
+ "/returning from interrupt_handle_now(%d, info, context)\n",
+ signal));
#endif
}
void
run_deferred_handler(struct interrupt_data *data, void *v_context) {
- (*(data->pending_handler))
- (data->pending_signal,&(data->pending_info), v_context);
+ /* The pending_handler may enable interrupts (see
+ * interrupt_maybe_gc_int) and then another interrupt may hit,
+ * overwrite interrupt_data, so reset the pending handler before
+ * calling it. Trust the handler to finish with the siginfo before
+ * enabling interrupts. */
+ void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
data->pending_handler=0;
+ (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
}
boolean
maybe_defer_handler(void *handler, struct interrupt_data *data,
- int signal, siginfo_t *info, os_context_t *context)
+ int signal, siginfo_t *info, os_context_t *context)
{
struct thread *thread=arch_os_get_current_thread();
+
+ check_blockables_blocked_or_lose();
+
+ if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
+ lose("interrupt already pending");
+ /* If interrupts are disabled then INTERRUPT_PENDING is set and
+ * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
+ * atomic section inside a without-interrupts.
+ */
if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) {
- store_signal_data_for_later(data,handler,signal,info,context);
+ store_signal_data_for_later(data,handler,signal,info,context);
SetSymbolValue(INTERRUPT_PENDING, T,thread);
- return 1;
- }
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: deferred\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
+ return 1;
+ }
/* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
* actually use its argument for anything on x86, so this branch
* may succeed even when context is null (gencgc alloc()) */
if (
-#ifndef LISP_FEATURE_X86
- (!foreign_function_call_active) &&
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ (!foreign_function_call_active) &&
#endif
- arch_pseudo_atomic_atomic(context)) {
- store_signal_data_for_later(data,handler,signal,info,context);
- arch_set_pseudo_atomic_interrupted(context);
- return 1;
+ arch_pseudo_atomic_atomic(context)) {
+ store_signal_data_for_later(data,handler,signal,info,context);
+ arch_set_pseudo_atomic_interrupted(context);
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
+ return 1;
}
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: not deferred\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
return 0;
}
+
static void
store_signal_data_for_later (struct interrupt_data *data, void *handler,
- int signal,
- siginfo_t *info, os_context_t *context)
+ int signal,
+ siginfo_t *info, os_context_t *context)
{
+ if (data->pending_handler)
+ lose("tried to overwrite pending interrupt handler %x with %x\n",
+ data->pending_handler, handler);
+ if (!handler)
+ lose("tried to defer null interrupt handler\n");
data->pending_handler = handler;
data->pending_signal = signal;
if(info)
- memcpy(&(data->pending_info), info, sizeof(siginfo_t));
+ memcpy(&(data->pending_info), info, sizeof(siginfo_t));
if(context) {
- /* the signal mask in the context (from before we were
- * interrupted) is copied to be restored when
- * run_deferred_handler happens. Then the usually-blocked
- * signals are added to the mask in the context so that we are
- * running with blocked signals when the handler returns */
- sigemptyset(&(data->pending_mask));
- memcpy(&(data->pending_mask),
- os_context_sigmask_addr(context),
- REAL_SIGSET_SIZE_BYTES);
- sigaddset_blockable(os_context_sigmask_addr(context));
- } else {
- /* this is also called from gencgc alloc(), in which case
- * there has been no signal and is therefore no context. */
- sigset_t new;
- sigemptyset(&new);
- sigaddset_blockable(&new);
- sigprocmask(SIG_BLOCK,&new,&(data->pending_mask));
+ /* the signal mask in the context (from before we were
+ * interrupted) is copied to be restored when
+ * run_deferred_handler happens. Then the usually-blocked
+ * signals are added to the mask in the context so that we are
+ * running with blocked signals when the handler returns */
+ sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
+ sigaddset_blockable(os_context_sigmask_addr(context));
}
}
-
static void
maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
{
struct interrupt_data *data=thread->interrupt_data;
#ifdef LISP_FEATURE_LINUX
os_restore_fp_control(context);
-#endif
+#endif
if(maybe_defer_handler(interrupt_handle_now,data,
- signal,info,context))
- return;
+ signal,info,context))
+ return;
interrupt_handle_now(signal, info, context);
+#ifdef LISP_FEATURE_DARWIN
+ /* Work around G5 bug */
+ DARWIN_FIX_CONTEXT(context);
+#endif
}
+static void
+low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context)
+{
+ os_context_t *context = (os_context_t*)void_context;
+ struct thread *thread=arch_os_get_current_thread();
+
+#ifdef LISP_FEATURE_LINUX
+ os_restore_fp_control(context);
+#endif
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
+ (*thread->interrupt_data->interrupt_low_level_handlers[signal])
+ (signal, info, void_context);
+#ifdef LISP_FEATURE_DARWIN
+ /* Work around G5 bug */
+ DARWIN_FIX_CONTEXT(context);
+#endif
+}
+
+static void
+low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
+{
+ os_context_t *context = arch_os_get_context(&void_context);
+ struct thread *thread=arch_os_get_current_thread();
+ struct interrupt_data *data=thread->interrupt_data;
+#ifdef LISP_FEATURE_LINUX
+ os_restore_fp_control(context);
+#endif
+ if(maybe_defer_handler(low_level_interrupt_handle_now,data,
+ signal,info,context))
+ return;
+ low_level_interrupt_handle_now(signal, info, context);
+#ifdef LISP_FEATURE_DARWIN
+ /* Work around G5 bug */
+ DARWIN_FIX_CONTEXT(context);
+#endif
+}
+
+#ifdef LISP_FEATURE_SB_THREAD
void
sig_stop_for_gc_handler(int signal, siginfo_t *info, void *void_context)
{
os_context_t *context = arch_os_get_context(&void_context);
struct thread *thread=arch_os_get_current_thread();
- struct interrupt_data *data=thread->interrupt_data;
+ sigset_t ss;
+ int i;
-
- if(maybe_defer_handler(sig_stop_for_gc_handler,data,
- signal,info,context)){
- return;
- }
/* need the context stored so it can have registers scavenged */
- fake_foreign_function_call(context);
+ fake_foreign_function_call(context);
+
+ sigemptyset(&ss);
+ for(i=1;i<NSIG;i++) sigaddset(&ss,i); /* Block everything. */
+ thread_sigmask(SIG_BLOCK,&ss,0);
+
+ /* The GC can't tell if a thread is a zombie, so this would be a
+ * good time to let the kernel reap any of our children in that
+ * awful state, to stop them from being waited for indefinitely.
+ * Userland reaping is done later when GC is finished */
+ if(thread->state!=STATE_RUNNING) {
+ lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
+ fixnum_value(thread->state));
+ }
+ thread->state=STATE_SUSPENDED;
- get_spinlock(&all_threads_lock,thread->pid);
- countdown_to_gc--;
- thread->state=STATE_STOPPED;
- release_spinlock(&all_threads_lock);
- kill(thread->pid,SIGSTOP);
+ sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC);
+ sigwaitinfo(&ss,0);
+ if(thread->state!=STATE_SUSPENDED) {
+ lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
+ fixnum_value(thread->state));
+ }
+ thread->state=STATE_RUNNING;
undo_fake_foreign_function_call(context);
}
+#endif
void
interrupt_handle_now_handler(int signal, siginfo_t *info, void *void_context)
{
os_context_t *context = arch_os_get_context(&void_context);
interrupt_handle_now(signal, info, context);
+#ifdef LISP_FEATURE_DARWIN
+ DARWIN_FIX_CONTEXT(context);
+#endif
}
/*
* stuff to detect and handle hitting the GC trigger
*/
-#ifndef LISP_FEATURE_GENCGC
+#ifndef LISP_FEATURE_GENCGC
/* since GENCGC has its own way to record trigger */
static boolean
gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context)
{
if (current_auto_gc_trigger == NULL)
- return 0;
+ return 0;
else{
- void *badaddr=arch_get_bad_addr(signal,info,context);
- return (badaddr >= (void *)current_auto_gc_trigger &&
- badaddr <((void *)current_dynamic_space + DYNAMIC_SPACE_SIZE));
+ void *badaddr=arch_get_bad_addr(signal,info,context);
+ return (badaddr >= (void *)current_auto_gc_trigger &&
+ badaddr <((void *)current_dynamic_space + DYNAMIC_SPACE_SIZE));
}
}
#endif
* previously
*/
+#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
+int *context_eflags_addr(os_context_t *context);
+#endif
+
extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
extern void post_signal_tramp(void);
void arrange_return_to_lisp_function(os_context_t *context, lispobj function)
{
+#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
void * fun=native_pointer(function);
- char *code = &(((struct simple_fun *) fun)->code);
-
+ void *code = &(((struct simple_fun *) fun)->code);
+#endif
+
/* Build a stack frame showing `interrupted' so that the
* user's backtrace makes (as much) sense (as usual) */
+
+ /* FIXME: what about restoring fp state? */
+ /* FIXME: what about restoring errno? */
#ifdef LISP_FEATURE_X86
/* Suppose the existence of some function that saved all
* registers, called call_into_lisp, then restored GP registers and
- * returned. We shortcut this: fake the stack that call_into_lisp
- * would see, then arrange to have it called directly. post_signal_tramp
- * is the second half of this function
+ * returned. It would look something like this:
+
+ push ebp
+ mov ebp esp
+ pushfl
+ pushal
+ push $0
+ push $0
+ pushl {address of function to call}
+ call 0x8058db0 <call_into_lisp>
+ addl $12,%esp
+ popal
+ popfl
+ leave
+ ret
+
+ * What we do here is set up the stack that call_into_lisp would
+ * expect to see if it had been called by this code, and frob the
+ * signal context so that signal return goes directly to call_into_lisp,
+ * and when that function (and the lisp function it invoked) returns,
+ * it returns to the second half of this imaginary function which
+ * restores all registers and returns to C
+
+ * For this to work, the latter part of the imaginary function
+ * must obviously exist in reality. That would be post_signal_tramp
*/
+
u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
- *(sp-14) = post_signal_tramp; /* return address for call_into_lisp */
- *(sp-13) = function; /* args for call_into_lisp : function*/
- *(sp-12) = 0; /* arg array */
- *(sp-11) = 0; /* no. args */
+ *(sp-15) = post_signal_tramp; /* return address for call_into_lisp */
+ *(sp-14) = function; /* args for call_into_lisp : function*/
+ *(sp-13) = 0; /* arg array */
+ *(sp-12) = 0; /* no. args */
/* this order matches that used in POPAD */
- *(sp-10)=*os_context_register_addr(context,reg_EDI);
- *(sp-9)=*os_context_register_addr(context,reg_ESI);
- /* this gets overwritten again before it's used, anyway */
- *(sp-8)=*os_context_register_addr(context,reg_EBP);
- *(sp-7)=0 ; /* POPAD doesn't set ESP, but expects a gap for it anyway */
- *(sp-6)=*os_context_register_addr(context,reg_EBX);
-
- *(sp-5)=*os_context_register_addr(context,reg_EDX);
- *(sp-4)=*os_context_register_addr(context,reg_ECX);
- *(sp-3)=*os_context_register_addr(context,reg_EAX);
+ *(sp-11)=*os_context_register_addr(context,reg_EDI);
+ *(sp-10)=*os_context_register_addr(context,reg_ESI);
+
+ *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
+ /* POPAD ignores the value of ESP: */
+ *(sp-8)=0;
+ *(sp-7)=*os_context_register_addr(context,reg_EBX);
+
+ *(sp-6)=*os_context_register_addr(context,reg_EDX);
+ *(sp-5)=*os_context_register_addr(context,reg_ECX);
+ *(sp-4)=*os_context_register_addr(context,reg_EAX);
+ *(sp-3)=*context_eflags_addr(context);
*(sp-2)=*os_context_register_addr(context,reg_EBP);
*(sp-1)=*os_context_pc_addr(context);
-#else
+#elif defined(LISP_FEATURE_X86_64)
+ u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
+ *(sp-18) = post_signal_tramp; /* return address for call_into_lisp */
+
+ *(sp-17)=*os_context_register_addr(context,reg_R15);
+ *(sp-16)=*os_context_register_addr(context,reg_R14);
+ *(sp-15)=*os_context_register_addr(context,reg_R13);
+ *(sp-14)=*os_context_register_addr(context,reg_R12);
+ *(sp-13)=*os_context_register_addr(context,reg_R11);
+ *(sp-12)=*os_context_register_addr(context,reg_R10);
+ *(sp-11)=*os_context_register_addr(context,reg_R9);
+ *(sp-10)=*os_context_register_addr(context,reg_R8);
+ *(sp-9)=*os_context_register_addr(context,reg_RDI);
+ *(sp-8)=*os_context_register_addr(context,reg_RSI);
+ /* skip RBP and RSP */
+ *(sp-7)=*os_context_register_addr(context,reg_RBX);
+ *(sp-6)=*os_context_register_addr(context,reg_RDX);
+ *(sp-5)=*os_context_register_addr(context,reg_RCX);
+ *(sp-4)=*os_context_register_addr(context,reg_RAX);
+ *(sp-3)=*context_eflags_addr(context);
+ *(sp-2)=*os_context_register_addr(context,reg_RBP);
+ *(sp-1)=*os_context_pc_addr(context);
+
+ *os_context_register_addr(context,reg_RDI) = function; /* function */
+ *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
+ *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
+#else
struct thread *th=arch_os_get_current_thread();
build_fake_control_stack_frames(th,context);
#endif
#ifdef LISP_FEATURE_X86
*os_context_pc_addr(context) = call_into_lisp;
- *os_context_register_addr(context,reg_ECX) = 0;
+ *os_context_register_addr(context,reg_ECX) = 0;
*os_context_register_addr(context,reg_EBP) = sp-2;
- *os_context_register_addr(context,reg_ESP) = sp-14;
+#ifdef __NetBSD__
+ *os_context_register_addr(context,reg_UESP) = sp-15;
+#else
+ *os_context_register_addr(context,reg_ESP) = sp-15;
+#endif
+#elif defined(LISP_FEATURE_X86_64)
+ *os_context_pc_addr(context) = call_into_lisp;
+ *os_context_register_addr(context,reg_RCX) = 0;
+ *os_context_register_addr(context,reg_RBP) = sp-2;
+ *os_context_register_addr(context,reg_RSP) = sp-18;
#else
/* this much of the calling convention is common to all
non-x86 ports */
*os_context_pc_addr(context) = code;
- *os_context_register_addr(context,reg_NARGS) = 0;
+ *os_context_register_addr(context,reg_NARGS) = 0;
*os_context_register_addr(context,reg_LIP) = code;
- *os_context_register_addr(context,reg_CFP) =
- current_control_frame_pointer;
+ *os_context_register_addr(context,reg_CFP) =
+ current_control_frame_pointer;
#endif
#ifdef ARCH_HAS_NPC_REGISTER
*os_context_npc_addr(context) =
- 4 + *os_context_pc_addr(context);
+ 4 + *os_context_pc_addr(context);
#endif
#ifdef LISP_FEATURE_SPARC
- *os_context_register_addr(context,reg_CODE) =
- fun + FUN_POINTER_LOWTAG;
+ *os_context_register_addr(context,reg_CODE) =
+ fun + FUN_POINTER_LOWTAG;
#endif
}
void interrupt_thread_handler(int num, siginfo_t *info, void *v_context)
{
os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
+ /* The order of interrupt execution is peculiar. If thread A
+ * interrupts thread B with I1, I2 and B for some reason recieves
+ * I1 when FUN2 is already on the list, then it is FUN2 that gets
+ * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is
+ * enabled again and I2 hits pretty soon in FUN2 and run
+ * FUN1. This is of course just one scenario, and the order of
+ * thread interrupt execution is undefined. */
struct thread *th=arch_os_get_current_thread();
- struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
- if(maybe_defer_handler(interrupt_thread_handler,data,num,info,context)){
- return ;
- }
- arrange_return_to_lisp_function(context,info->si_value.sival_int);
+ struct cons *c;
+ if (th->state != STATE_RUNNING)
+ lose("interrupt_thread_handler: thread %ld in wrong state: %d\n",
+ th->os_thread,fixnum_value(th->state));
+ get_spinlock(&th->interrupt_fun_lock,(long)th);
+ c=((struct cons *)native_pointer(th->interrupt_fun));
+ arrange_return_to_lisp_function(context,c->car);
+ th->interrupt_fun=(lispobj *)(c->cdr);
+ release_spinlock(&th->interrupt_fun_lock);
}
-void thread_exit_handler(int num, siginfo_t *info, void *v_context)
-{ /* called when a child thread exits */
- os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
- struct thread *th=arch_os_get_current_thread();
- pid_t kid;
- int *status;
- struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
- if(maybe_defer_handler(thread_exit_handler,data,num,info,context)){
- return ;
- }
- while(1) {
- kid=waitpid(-1,&status,__WALL|WNOHANG);
- if(kid<1) break;
- if(WIFEXITED(status) || WIFSIGNALED(status)) {
- struct thread *th=find_thread_by_pid(kid);
- if(!th) continue;
- funcall1(SymbolFunction(HANDLE_THREAD_EXIT),make_fixnum(kid));
- destroy_thread(th);
- }
- }
-}
#endif
-boolean handle_control_stack_guard_triggered(os_context_t *context,void *addr){
+/* KLUDGE: Theoretically the approach we use for undefined alien
+ * variables should work for functions as well, but on PPC/Darwin
+ * we get bus error at bogus addresses instead, hence this workaround,
+ * that has the added benefit of automatically discriminating between
+ * functions and variables.
+ */
+void undefined_alien_function() {
+ funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
+}
+
+boolean handle_guard_page_triggered(os_context_t *context,void *addr){
struct thread *th=arch_os_get_current_thread();
- /* note the os_context hackery here. When the signal handler returns,
+
+ /* note the os_context hackery here. When the signal handler returns,
* it won't go back to what it was doing ... */
- if(addr>=(void *)CONTROL_STACK_GUARD_PAGE(th) &&
- addr<(void *)(CONTROL_STACK_GUARD_PAGE(th)+os_vm_page_size)) {
- /* we hit the end of the control stack. disable protection
- * temporarily so the error handler has some headroom */
- protect_control_stack_guard_page(th->pid,0L);
-
- arrange_return_to_lisp_function
- (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
- return 1;
+ if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
+ /* We hit the end of the control stack: disable guard page
+ * protection so the error handler has some headroom, protect the
+ * previous page so that we can catch returns from the guard page
+ * and restore it. */
+ protect_control_stack_guard_page(th,0);
+ protect_control_stack_return_guard_page(th,1);
+
+ arrange_return_to_lisp_function
+ (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
+ return 1;
+ }
+ else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
+ /* We're returning from the guard page: reprotect it, and
+ * unprotect this one. This works even if we somehow missed
+ * the return-guard-page, and hit it on our way to new
+ * exhaustion instead. */
+ protect_control_stack_guard_page(th,1);
+ protect_control_stack_return_guard_page(th,0);
+ return 1;
+ }
+ else if (addr >= undefined_alien_address &&
+ addr < undefined_alien_address + os_vm_page_size) {
+ arrange_return_to_lisp_function
+ (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
+ return 1;
}
else return 0;
}
#ifndef LISP_FEATURE_GENCGC
-/* This function gets called from the SIGSEGV (for e.g. Linux or
+/* This function gets called from the SIGSEGV (for e.g. Linux, NetBSD, &
* OpenBSD) or SIGBUS (for e.g. FreeBSD) handler. Here we check
* whether the signal was due to treading on the mprotect()ed zone -
* and if so, arrange for a GC to happen. */
os_context_t *context=(os_context_t *) void_context;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
-
- if(!foreign_function_call_active && gc_trigger_hit(signal, info, context)){
- clear_auto_gc_trigger();
- if(!maybe_defer_handler
- (interrupt_maybe_gc_int,data,signal,info,void_context))
- interrupt_maybe_gc_int(signal,info,void_context);
- return 1;
+ th ? th->interrupt_data : global_interrupt_data;
+
+ if(!data->pending_handler && !foreign_function_call_active &&
+ gc_trigger_hit(signal, info, context)){
+ clear_auto_gc_trigger();
+ if(!maybe_defer_handler(interrupt_maybe_gc_int,
+ data,signal,info,void_context))
+ interrupt_maybe_gc_int(signal,info,void_context);
+ return 1;
}
return 0;
}
boolean
interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context)
{
- sigset_t new;
os_context_t *context=(os_context_t *) void_context;
+
+ check_blockables_blocked_or_lose();
fake_foreign_function_call(context);
+
/* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
* which case we will be running with no gc trigger barrier
* thing for a while. But it shouldn't be long until the end
- * of WITHOUT-GCING. */
+ * of WITHOUT-GCING.
+ *
+ * FIXME: It would be good to protect the end of dynamic space
+ * and signal a storage condition from there.
+ */
+
+ /* restore the signal mask from the interrupted context before
+ * calling into Lisp */
+ if (context)
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
- sigemptyset(&new);
- sigaddset_blockable(&new);
- /* enable signals before calling into Lisp */
- sigprocmask(SIG_UNBLOCK,&new,0);
funcall0(SymbolFunction(SUB_GC));
+
undo_fake_foreign_function_call(context);
return 1;
}
void
undoably_install_low_level_interrupt_handler (int signal,
- void handler(int,
- siginfo_t*,
- void*))
+ void handler(int,
+ siginfo_t*,
+ void*))
{
struct sigaction sa;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
if (0 > signal || signal >= NSIG) {
- lose("bad signal number %d", signal);
+ lose("bad signal number %d", signal);
}
- sa.sa_sigaction = handler;
+ if (sigismember(&blockable_sigset,signal))
+ sa.sa_sigaction = low_level_maybe_now_maybe_later;
+ else
+ sa.sa_sigaction = handler;
+
sigemptyset(&sa.sa_mask);
sigaddset_blockable(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESTART;
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
- if((signal==SIG_MEMORY_FAULT)
+ if((signal==SIG_MEMORY_FAULT)
#ifdef SIG_INTERRUPT_THREAD
|| (signal==SIG_INTERRUPT_THREAD)
#endif
)
- sa.sa_flags|= SA_ONSTACK;
+ sa.sa_flags|= SA_ONSTACK;
#endif
-
+
sigaction(signal, &sa, NULL);
data->interrupt_low_level_handlers[signal] =
- (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
+ (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
}
/* This is called from Lisp. */
union interrupt_handler oldhandler;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
sigemptyset(&new);
sigaddset(&new, signal);
- sigprocmask(SIG_BLOCK, &new, &old);
+ thread_sigmask(SIG_BLOCK, &new, &old);
sigemptyset(&new);
sigaddset_blockable(&new);
- FSHOW((stderr, "/interrupt_low_level_handlers[signal]=%d\n",
- interrupt_low_level_handlers[signal]));
+ FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n",
+ (unsigned int)data->interrupt_low_level_handlers[signal]));
if (data->interrupt_low_level_handlers[signal]==0) {
- if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
- ARE_SAME_HANDLER(handler, SIG_IGN)) {
- sa.sa_sigaction = handler;
- } else if (sigismember(&new, signal)) {
- sa.sa_sigaction = maybe_now_maybe_later;
- } else {
- sa.sa_sigaction = interrupt_handle_now_handler;
- }
-
- sigemptyset(&sa.sa_mask);
- sigaddset_blockable(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO | SA_RESTART;
- sigaction(signal, &sa, NULL);
+ if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
+ ARE_SAME_HANDLER(handler, SIG_IGN)) {
+ sa.sa_sigaction = handler;
+ } else if (sigismember(&new, signal)) {
+ sa.sa_sigaction = maybe_now_maybe_later;
+ } else {
+ sa.sa_sigaction = interrupt_handle_now_handler;
+ }
+
+ sigemptyset(&sa.sa_mask);
+ sigaddset_blockable(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
+ sigaction(signal, &sa, NULL);
}
oldhandler = data->interrupt_handlers[signal];
data->interrupt_handlers[signal].c = handler;
- sigprocmask(SIG_SETMASK, &old, 0);
+ thread_sigmask(SIG_SETMASK, &old, 0);
FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
{
int i;
SHOW("entering interrupt_init()");
+ sigemptyset(&blockable_sigset);
+ sigaddset_blockable(&blockable_sigset);
+
global_interrupt_data=calloc(sizeof(struct interrupt_data), 1);
/* Set up high level handler information. */
for (i = 0; i < NSIG; i++) {
global_interrupt_data->interrupt_handlers[i].c =
- /* (The cast here blasts away the distinction between
- * SA_SIGACTION-style three-argument handlers and
- * signal(..)-style one-argument handlers, which is OK
- * because it works to call the 1-argument form where the
- * 3-argument form is expected.) */
- (void (*)(int, siginfo_t*, void*))SIG_DFL;
+ /* (The cast here blasts away the distinction between
+ * SA_SIGACTION-style three-argument handlers and
+ * signal(..)-style one-argument handlers, which is OK
+ * because it works to call the 1-argument form where the
+ * 3-argument form is expected.) */
+ (void (*)(int, siginfo_t*, void*))SIG_DFL;
}
SHOW("returning from interrupt_init()");