#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <errno.h>
#include "sbcl.h"
#include "runtime.h"
#include "interr.h"
#include "genesis/fdefn.h"
#include "genesis/simple-fun.h"
+#include "genesis/cons.h"
extern volatile lispobj all_threads_lock;
-/*
- * This is a workaround for some slightly silly Linux/GNU Libc
- * behaviour: glibc defines sigset_t to support 1024 signals, which is
- * more than the kernel. This is usually not a problem, but becomes
- * one when we want to save a signal mask from a ucontext, and restore
- * it later into another ucontext: the ucontext is allocated on the
- * stack by the kernel, so copying a libc-sized sigset_t into it will
- * overflow and cause other data on the stack to be corrupted */
-
-#define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
-
void sigaddset_blockable(sigset_t *s)
{
sigaddset(s, SIGHUP);
#ifdef LISP_FEATURE_SB_THREAD
sigaddset(s, SIG_STOP_FOR_GC);
sigaddset(s, SIG_INTERRUPT_THREAD);
- sigaddset(s, SIG_THREAD_EXIT);
#endif
}
+static sigset_t blockable_sigset;
+
+inline static void check_blockables_blocked_or_lose()
+{
+ /* Get the current sigmask, by blocking the empty set. */
+ sigset_t empty,current;
+ int i;
+ sigemptyset(&empty);
+ thread_sigmask(SIG_BLOCK, &empty, ¤t);
+ for(i=0;i<NSIG;i++) {
+ if (sigismember(&blockable_sigset, i) && !sigismember(¤t, i))
+ lose("blockable signal %d not blocked",i);
+ }
+}
+
+inline static void check_interrupts_enabled_or_lose(os_context_t *context)
+{
+ struct thread *thread=arch_os_get_current_thread();
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ lose("interrupts not enabled");
+ if (
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ (!foreign_function_call_active) &&
+#endif
+ arch_pseudo_atomic_atomic(context))
+ lose ("in pseudo atomic section");
+}
+
/* When we catch an internal error, should we pass it back to Lisp to
* be handled in a high-level way? (Early in cold init, the answer is
* 'no', because Lisp is still too brain-dead to handle anything.
* mask ought to be clear anyway most of the time, but may be non-zero
* if we were interrupted e.g. while waiting for a queue. */
-#if 1
void reset_signal_mask ()
{
sigset_t new;
sigemptyset(&new);
- sigprocmask(SIG_SETMASK,&new,0);
-}
-#else
-void reset_signal_mask ()
-{
- sigset_t new,old;
- int i;
- int wrong=0;
- sigemptyset(&new);
- sigprocmask(SIG_SETMASK,&new,&old);
- for(i=1; i<NSIG; i++) {
- if(sigismember(&old,i)) {
- fprintf(stderr,
- "Warning: signal %d is masked: this is unexpected\n",i);
- wrong=1;
- }
- }
- if(wrong)
- fprintf(stderr,"If this version of SBCL is less than three months old, please report this.\nOtherwise, please try a newer version first\n. Reset signal mask.\n");
+ thread_sigmask(SIG_SETMASK,&new,0);
}
-#endif
void
build_fake_control_stack_frames(struct thread *th,os_context_t *context)
{
-#ifndef LISP_FEATURE_X86
+#ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
lispobj oldcont;
int context_index;
struct thread *thread=arch_os_get_current_thread();
+ /* context_index incrementing must not be interrupted */
+ check_blockables_blocked_or_lose();
+
/* Get current Lisp state from context. */
#ifdef reg_ALLOC
dynamic_space_free_pointer =
(lispobj *)(*os_context_register_addr(context, reg_ALLOC));
-#ifdef alpha
+#if defined(LISP_FEATURE_ALPHA)
if ((long)dynamic_space_free_pointer & 1) {
lose("dead in fake_foreign_function_call, context = %x", context);
}
sigset_t block;
sigemptyset(&block);
sigaddset_blockable(&block);
- sigprocmask(SIG_BLOCK, &block, 0);
+ thread_sigmask(SIG_BLOCK, &block, 0);
/* going back into Lisp */
foreign_function_call_active = 0;
{
lispobj context_sap = 0;
+ check_blockables_blocked_or_lose();
fake_foreign_function_call(context);
/* Allocate the SAP object while the interrupts are still
context_sap = alloc_sap(context);
}
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
if (internal_errors_enabled) {
SHOW("in interrupt_internal_error");
-#if QSHOW
+#ifdef QSHOW
/* Display some rudimentary debugging information about the
* error, so that even if the Lisp error handler gets badly
* confused, we have a chance to determine what's going on. */
struct thread *thread;
struct interrupt_data *data;
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
+
thread=arch_os_get_current_thread();
data=thread->interrupt_data;
- /* FIXME I'm not altogether sure this is appropriate if we're
- * here as the result of a pseudo-atomic */
- SetSymbolValue(INTERRUPT_PENDING, NIL,thread);
-
- /* restore the saved signal mask from the original signal (the
- * one that interrupted us during the critical section) into the
- * os_context for the signal we're currently in the handler for.
- * This should ensure that when we return from the handler the
- * blocked signals are unblocked */
-
- memcpy(os_context_sigmask_addr(context), &data->pending_mask,
- REAL_SIGSET_SIZE_BYTES);
-
- sigemptyset(&data->pending_mask);
- /* This will break on sparc linux: the deferred handler really wants
- * to be called with a void_context */
- run_deferred_handler(data,(void *)context);
+
+ /* Pseudo atomic may trigger several times for a single interrupt,
+ * and while without-interrupts should not, a false trigger by
+ * pseudo-atomic may eat a pending handler even from
+ * without-interrupts. */
+ if (data->pending_handler) {
+
+ /* If we're here as the result of a pseudo-atomic as opposed
+ * to WITHOUT-INTERRUPTS, then INTERRUPT_PENDING is already
+ * NIL, because maybe_defer_handler sets
+ * PSEUDO_ATOMIC_INTERRUPTED only if interrupts are enabled.*/
+ SetSymbolValue(INTERRUPT_PENDING, NIL,thread);
+
+ /* restore the saved signal mask from the original signal (the
+ * one that interrupted us during the critical section) into the
+ * os_context for the signal we're currently in the handler for.
+ * This should ensure that when we return from the handler the
+ * blocked signals are unblocked */
+ sigcopyset(os_context_sigmask_addr(context), &data->pending_mask);
+
+ sigemptyset(&data->pending_mask);
+ /* This will break on sparc linux: the deferred handler really wants
+ * to be called with a void_context */
+ run_deferred_handler(data,(void *)context);
+ }
}
\f
/*
{
os_context_t *context = (os_context_t*)void_context;
struct thread *thread=arch_os_get_current_thread();
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
boolean were_in_lisp;
#endif
union interrupt_handler handler;
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
#ifdef LISP_FEATURE_LINUX
/* Under Linux on some architectures, we appear to have to restore
return;
}
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
were_in_lisp = !foreign_function_call_active;
if (were_in_lisp)
#endif
lispobj info_sap,context_sap = alloc_sap(context);
info_sap = alloc_sap(info);
/* Allow signals again. */
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
#ifdef QSHOW_SIGNALS
SHOW("calling Lisp-level handler");
#endif
/* Allow signals again. */
- sigprocmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
(*handler.c)(signal, info, void_context);
}
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
if (were_in_lisp)
#endif
{
void
run_deferred_handler(struct interrupt_data *data, void *v_context) {
- (*(data->pending_handler))
- (data->pending_signal,&(data->pending_info), v_context);
+ /* The pending_handler may enable interrupts (see
+ * interrupt_maybe_gc_int) and then another interrupt may hit,
+ * overwrite interrupt_data, so reset the pending handler before
+ * calling it. Trust the handler to finish with the siginfo before
+ * enabling interrupts. */
+ void (*pending_handler) (int, siginfo_t*, void*)=data->pending_handler;
data->pending_handler=0;
+ (*pending_handler)(data->pending_signal,&(data->pending_info), v_context);
}
boolean
int signal, siginfo_t *info, os_context_t *context)
{
struct thread *thread=arch_os_get_current_thread();
+
+ check_blockables_blocked_or_lose();
+
+ if (SymbolValue(INTERRUPT_PENDING,thread) != NIL)
+ lose("interrupt already pending");
+ /* If interrupts are disabled then INTERRUPT_PENDING is set and
+ * not PSEDUO_ATOMIC_INTERRUPTED. This is important for a pseudo
+ * atomic section inside a without-interrupts.
+ */
if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) {
store_signal_data_for_later(data,handler,signal,info,context);
SetSymbolValue(INTERRUPT_PENDING, T,thread);
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: deferred\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
return 1;
}
/* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
* actually use its argument for anything on x86, so this branch
* may succeed even when context is null (gencgc alloc()) */
if (
-#ifndef LISP_FEATURE_X86
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
(!foreign_function_call_active) &&
#endif
arch_pseudo_atomic_atomic(context)) {
store_signal_data_for_later(data,handler,signal,info,context);
arch_set_pseudo_atomic_interrupted(context);
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
return 1;
}
+#ifdef QSHOW_SIGNALS
+ FSHOW((stderr,
+ "/maybe_defer_handler(%x,%d),thread=%ld: not deferred\n",
+ (unsigned int)handler,signal,thread->os_thread));
+#endif
return 0;
}
+
static void
store_signal_data_for_later (struct interrupt_data *data, void *handler,
int signal,
siginfo_t *info, os_context_t *context)
{
+ if (data->pending_handler)
+ lose("tried to overwrite pending interrupt handler %x with %x\n",
+ data->pending_handler, handler);
+ if (!handler)
+ lose("tried to defer null interrupt handler\n");
data->pending_handler = handler;
data->pending_signal = signal;
if(info)
* run_deferred_handler happens. Then the usually-blocked
* signals are added to the mask in the context so that we are
* running with blocked signals when the handler returns */
- sigemptyset(&(data->pending_mask));
- memcpy(&(data->pending_mask),
- os_context_sigmask_addr(context),
- REAL_SIGSET_SIZE_BYTES);
+ sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
sigaddset_blockable(os_context_sigmask_addr(context));
- } else {
- /* this is also called from gencgc alloc(), in which case
- * there has been no signal and is therefore no context. */
- sigset_t new;
- sigemptyset(&new);
- sigaddset_blockable(&new);
- sigprocmask(SIG_BLOCK,&new,&(data->pending_mask));
}
}
-
static void
maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
{
interrupt_handle_now(signal, info, context);
#ifdef LISP_FEATURE_DARWIN
/* Work around G5 bug */
- sigreturn(void_context);
+ DARWIN_FIX_CONTEXT(context);
+#endif
+}
+
+static void
+low_level_interrupt_handle_now(int signal, siginfo_t *info, void *void_context)
+{
+ os_context_t *context = (os_context_t*)void_context;
+ struct thread *thread=arch_os_get_current_thread();
+
+#ifdef LISP_FEATURE_LINUX
+ os_restore_fp_control(context);
+#endif
+ check_blockables_blocked_or_lose();
+ check_interrupts_enabled_or_lose(context);
+ (*thread->interrupt_data->interrupt_low_level_handlers[signal])
+ (signal, info, void_context);
+#ifdef LISP_FEATURE_DARWIN
+ /* Work around G5 bug */
+ DARWIN_FIX_CONTEXT(context);
+#endif
+}
+
+static void
+low_level_maybe_now_maybe_later(int signal, siginfo_t *info, void *void_context)
+{
+ os_context_t *context = arch_os_get_context(&void_context);
+ struct thread *thread=arch_os_get_current_thread();
+ struct interrupt_data *data=thread->interrupt_data;
+#ifdef LISP_FEATURE_LINUX
+ os_restore_fp_control(context);
+#endif
+ if(maybe_defer_handler(low_level_interrupt_handle_now,data,
+ signal,info,context))
+ return;
+ low_level_interrupt_handle_now(signal, info, context);
+#ifdef LISP_FEATURE_DARWIN
+ /* Work around G5 bug */
+ DARWIN_FIX_CONTEXT(context);
#endif
}
{
os_context_t *context = arch_os_get_context(&void_context);
struct thread *thread=arch_os_get_current_thread();
- struct interrupt_data *data=thread->interrupt_data;
sigset_t ss;
int i;
- if(maybe_defer_handler(sig_stop_for_gc_handler,data,
- signal,info,context)) {
- return;
- }
/* need the context stored so it can have registers scavenged */
fake_foreign_function_call(context);
sigemptyset(&ss);
for(i=1;i<NSIG;i++) sigaddset(&ss,i); /* Block everything. */
- sigprocmask(SIG_BLOCK,&ss,0);
-
- get_spinlock(&all_threads_lock,thread->pid);
+ thread_sigmask(SIG_BLOCK,&ss,0);
+
+ /* The GC can't tell if a thread is a zombie, so this would be a
+ * good time to let the kernel reap any of our children in that
+ * awful state, to stop them from being waited for indefinitely.
+ * Userland reaping is done later when GC is finished */
+ if(thread->state!=STATE_STOPPING) {
+ lose("sig_stop_for_gc_handler: wrong thread state: %ld\n",
+ fixnum_value(thread->state));
+ }
thread->state=STATE_STOPPED;
- release_spinlock(&all_threads_lock);
sigemptyset(&ss); sigaddset(&ss,SIG_STOP_FOR_GC);
sigwaitinfo(&ss,0);
+ if(thread->state!=STATE_STOPPED) {
+ lose("sig_stop_for_gc_handler: wrong thread state on wakeup: %ld\n",
+ fixnum_value(thread->state));
+ }
+ thread->state=STATE_RUNNING;
undo_fake_foreign_function_call(context);
}
os_context_t *context = arch_os_get_context(&void_context);
interrupt_handle_now(signal, info, context);
#ifdef LISP_FEATURE_DARWIN
- sigreturn(void_context);
+ DARWIN_FIX_CONTEXT(context);
#endif
}
* previously
*/
+#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
+int *context_eflags_addr(os_context_t *context);
+#endif
+
extern lispobj call_into_lisp(lispobj fun, lispobj *args, int nargs);
extern void post_signal_tramp(void);
void arrange_return_to_lisp_function(os_context_t *context, lispobj function)
{
+#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
void * fun=native_pointer(function);
- char *code = &(((struct simple_fun *) fun)->code);
-
+ void *code = &(((struct simple_fun *) fun)->code);
+#endif
+
/* Build a stack frame showing `interrupted' so that the
* user's backtrace makes (as much) sense (as usual) */
+
+ /* FIXME: what about restoring fp state? */
+ /* FIXME: what about restoring errno? */
#ifdef LISP_FEATURE_X86
/* Suppose the existence of some function that saved all
* registers, called call_into_lisp, then restored GP registers and
- * returned. We shortcut this: fake the stack that call_into_lisp
- * would see, then arrange to have it called directly. post_signal_tramp
- * is the second half of this function
+ * returned. It would look something like this:
+
+ push ebp
+ mov ebp esp
+ pushfl
+ pushal
+ push $0
+ push $0
+ pushl {address of function to call}
+ call 0x8058db0 <call_into_lisp>
+ addl $12,%esp
+ popal
+ popfl
+ leave
+ ret
+
+ * What we do here is set up the stack that call_into_lisp would
+ * expect to see if it had been called by this code, and frob the
+ * signal context so that signal return goes directly to call_into_lisp,
+ * and when that function (and the lisp function it invoked) returns,
+ * it returns to the second half of this imaginary function which
+ * restores all registers and returns to C
+
+ * For this to work, the latter part of the imaginary function
+ * must obviously exist in reality. That would be post_signal_tramp
*/
+
u32 *sp=(u32 *)*os_context_register_addr(context,reg_ESP);
- *(sp-14) = post_signal_tramp; /* return address for call_into_lisp */
- *(sp-13) = function; /* args for call_into_lisp : function*/
- *(sp-12) = 0; /* arg array */
- *(sp-11) = 0; /* no. args */
+ *(sp-15) = post_signal_tramp; /* return address for call_into_lisp */
+ *(sp-14) = function; /* args for call_into_lisp : function*/
+ *(sp-13) = 0; /* arg array */
+ *(sp-12) = 0; /* no. args */
/* this order matches that used in POPAD */
- *(sp-10)=*os_context_register_addr(context,reg_EDI);
- *(sp-9)=*os_context_register_addr(context,reg_ESI);
- /* this gets overwritten again before it's used, anyway */
- *(sp-8)=*os_context_register_addr(context,reg_EBP);
- *(sp-7)=0 ; /* POPAD doesn't set ESP, but expects a gap for it anyway */
- *(sp-6)=*os_context_register_addr(context,reg_EBX);
-
- *(sp-5)=*os_context_register_addr(context,reg_EDX);
- *(sp-4)=*os_context_register_addr(context,reg_ECX);
- *(sp-3)=*os_context_register_addr(context,reg_EAX);
+ *(sp-11)=*os_context_register_addr(context,reg_EDI);
+ *(sp-10)=*os_context_register_addr(context,reg_ESI);
+
+ *(sp-9)=*os_context_register_addr(context,reg_ESP)-8;
+ /* POPAD ignores the value of ESP: */
+ *(sp-8)=0;
+ *(sp-7)=*os_context_register_addr(context,reg_EBX);
+
+ *(sp-6)=*os_context_register_addr(context,reg_EDX);
+ *(sp-5)=*os_context_register_addr(context,reg_ECX);
+ *(sp-4)=*os_context_register_addr(context,reg_EAX);
+ *(sp-3)=*context_eflags_addr(context);
*(sp-2)=*os_context_register_addr(context,reg_EBP);
*(sp-1)=*os_context_pc_addr(context);
+#elif defined(LISP_FEATURE_X86_64)
+ u64 *sp=(u64 *)*os_context_register_addr(context,reg_RSP);
+ *(sp-20) = post_signal_tramp; /* return address for call_into_lisp */
+
+ *(sp-19)=*os_context_register_addr(context,reg_R15);
+ *(sp-18)=*os_context_register_addr(context,reg_R14);
+ *(sp-17)=*os_context_register_addr(context,reg_R13);
+ *(sp-16)=*os_context_register_addr(context,reg_R12);
+ *(sp-15)=*os_context_register_addr(context,reg_R11);
+ *(sp-14)=*os_context_register_addr(context,reg_R10);
+ *(sp-13)=*os_context_register_addr(context,reg_R9);
+ *(sp-12)=*os_context_register_addr(context,reg_R8);
+ *(sp-11)=*os_context_register_addr(context,reg_RDI);
+ *(sp-10)=*os_context_register_addr(context,reg_RSI);
+ *(sp-9)=*os_context_register_addr(context,reg_RSP)-16;
+ *(sp-8)=0;
+ *(sp-7)=*os_context_register_addr(context,reg_RBX);
+ *(sp-6)=*os_context_register_addr(context,reg_RDX);
+ *(sp-5)=*os_context_register_addr(context,reg_RCX);
+ *(sp-4)=*os_context_register_addr(context,reg_RAX);
+ *(sp-3)=*context_eflags_addr(context);
+ *(sp-2)=*os_context_register_addr(context,reg_RBP);
+ *(sp-1)=*os_context_pc_addr(context);
+
+ *os_context_register_addr(context,reg_RDI) = function; /* function */
+ *os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
+ *os_context_register_addr(context,reg_RDX) = 0; /* no. args */
#else
struct thread *th=arch_os_get_current_thread();
build_fake_control_stack_frames(th,context);
*os_context_pc_addr(context) = call_into_lisp;
*os_context_register_addr(context,reg_ECX) = 0;
*os_context_register_addr(context,reg_EBP) = sp-2;
- *os_context_register_addr(context,reg_ESP) = sp-14;
+#ifdef __NetBSD__
+ *os_context_register_addr(context,reg_UESP) = sp-15;
+#else
+ *os_context_register_addr(context,reg_ESP) = sp-15;
+#endif
+#elif defined(LISP_FEATURE_X86_64)
+ *os_context_pc_addr(context) = call_into_lisp;
+ *os_context_register_addr(context,reg_RCX) = 0;
+ *os_context_register_addr(context,reg_RBP) = sp-2;
+ *os_context_register_addr(context,reg_RSP) = sp-20;
#else
/* this much of the calling convention is common to all
non-x86 ports */
void interrupt_thread_handler(int num, siginfo_t *info, void *v_context)
{
os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
+ /* The order of interrupt execution is peculiar. If thread A
+ * interrupts thread B with I1, I2 and B for some reason recieves
+ * I1 when FUN2 is already on the list, then it is FUN2 that gets
+ * to run first. But when FUN2 is run SIG_INTERRUPT_THREAD is
+ * enabled again and I2 hits pretty soon in FUN2 and run
+ * FUN1. This is of course just one scenario, and the order of
+ * thread interrupt execution is undefined. */
struct thread *th=arch_os_get_current_thread();
- struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
- if(maybe_defer_handler(interrupt_thread_handler,data,num,info,context)){
- return ;
- }
- arrange_return_to_lisp_function(context,info->si_value.sival_int);
+ struct cons *c;
+ get_spinlock(&th->interrupt_fun_lock,(long)th);
+ c=((struct cons *)native_pointer(th->interrupt_fun));
+ arrange_return_to_lisp_function(context,c->car);
+ th->interrupt_fun=(lispobj *)(c->cdr);
+ release_spinlock(&th->interrupt_fun_lock);
}
-void thread_exit_handler(int num, siginfo_t *info, void *v_context)
-{ /* called when a child thread exits */
- os_context_t *context = (os_context_t*)arch_os_get_context(&v_context);
- struct thread *th=arch_os_get_current_thread();
- pid_t kid;
- int *status;
- struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
- if(maybe_defer_handler(thread_exit_handler,data,num,info,context)){
- return ;
- }
- while(1) {
- kid=waitpid(-1,&status,__WALL|WNOHANG);
- if(kid<1) break;
- if(WIFEXITED(status) || WIFSIGNALED(status)) {
- struct thread *th=find_thread_by_pid(kid);
- if(!th) continue;
- funcall1(SymbolFunction(HANDLE_THREAD_EXIT),make_fixnum(kid));
- destroy_thread(th);
- }
- }
-}
#endif
-boolean handle_control_stack_guard_triggered(os_context_t *context,void *addr){
+/* KLUDGE: Theoretically the approach we use for undefined alien
+ * variables should work for functions as well, but on PPC/Darwin
+ * we get bus error at bogus addresses instead, hence this workaround,
+ * that has the added benefit of automatically discriminating between
+ * functions and variables.
+ */
+void undefined_alien_function() {
+ funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
+}
+
+boolean handle_guard_page_triggered(os_context_t *context,void *addr){
struct thread *th=arch_os_get_current_thread();
+
/* note the os_context hackery here. When the signal handler returns,
* it won't go back to what it was doing ... */
- if(addr>=(void *)CONTROL_STACK_GUARD_PAGE(th) &&
- addr<(void *)(CONTROL_STACK_GUARD_PAGE(th)+os_vm_page_size)) {
- /* we hit the end of the control stack. disable protection
- * temporarily so the error handler has some headroom */
- protect_control_stack_guard_page(th->pid,0L);
-
+ if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
+ /* We hit the end of the control stack: disable guard page
+ * protection so the error handler has some headroom, protect the
+ * previous page so that we can catch returns from the guard page
+ * and restore it. */
+ protect_control_stack_guard_page(th->os_thread,0);
+ protect_control_stack_return_guard_page(th->os_thread,1);
+
+ arrange_return_to_lisp_function
+ (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
+ return 1;
+ }
+ else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
+ /* We're returning from the guard page: reprotect it, and
+ * unprotect this one. This works even if we somehow missed
+ * the return-guard-page, and hit it on our way to new
+ * exhaustion instead. */
+ protect_control_stack_guard_page(th->os_thread,1);
+ protect_control_stack_return_guard_page(th->os_thread,0);
+ return 1;
+ }
+ else if (addr >= undefined_alien_address &&
+ addr < undefined_alien_address + os_vm_page_size) {
arrange_return_to_lisp_function
- (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
+ (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
return 1;
}
else return 0;
}
#ifndef LISP_FEATURE_GENCGC
-/* This function gets called from the SIGSEGV (for e.g. Linux or
+/* This function gets called from the SIGSEGV (for e.g. Linux, NetBSD, &
* OpenBSD) or SIGBUS (for e.g. FreeBSD) handler. Here we check
* whether the signal was due to treading on the mprotect()ed zone -
* and if so, arrange for a GC to happen. */
struct interrupt_data *data=
th ? th->interrupt_data : global_interrupt_data;
- if(!foreign_function_call_active && gc_trigger_hit(signal, info, context)){
- clear_auto_gc_trigger();
- if(!maybe_defer_handler
- (interrupt_maybe_gc_int,data,signal,info,void_context))
- interrupt_maybe_gc_int(signal,info,void_context);
- return 1;
+ if(!data->pending_handler && !foreign_function_call_active &&
+ gc_trigger_hit(signal, info, context)){
+ clear_auto_gc_trigger();
+ if(!maybe_defer_handler(interrupt_maybe_gc_int,
+ data,signal,info,void_context))
+ interrupt_maybe_gc_int(signal,info,void_context);
+ return 1;
}
return 0;
}
boolean
interrupt_maybe_gc_int(int signal, siginfo_t *info, void *void_context)
{
- sigset_t new;
os_context_t *context=(os_context_t *) void_context;
+
+ check_blockables_blocked_or_lose();
fake_foreign_function_call(context);
+
/* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
* which case we will be running with no gc trigger barrier
* thing for a while. But it shouldn't be long until the end
- * of WITHOUT-GCING. */
+ * of WITHOUT-GCING.
+ *
+ * FIXME: It would be good to protect the end of dynamic space
+ * and signal a storage condition from there.
+ */
+
+ /* restore the signal mask from the interrupted context before
+ * calling into Lisp */
+ if (context)
+ thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
- sigemptyset(&new);
- sigaddset_blockable(&new);
- /* enable signals before calling into Lisp */
- sigprocmask(SIG_UNBLOCK,&new,0);
funcall0(SymbolFunction(SUB_GC));
+
undo_fake_foreign_function_call(context);
return 1;
}
lose("bad signal number %d", signal);
}
- sa.sa_sigaction = handler;
+ if (sigismember(&blockable_sigset,signal))
+ sa.sa_sigaction = low_level_maybe_now_maybe_later;
+ else
+ sa.sa_sigaction = handler;
+
sigemptyset(&sa.sa_mask);
sigaddset_blockable(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESTART;
sigemptyset(&new);
sigaddset(&new, signal);
- sigprocmask(SIG_BLOCK, &new, &old);
+ thread_sigmask(SIG_BLOCK, &new, &old);
sigemptyset(&new);
sigaddset_blockable(&new);
- FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%d\n",
- data->interrupt_low_level_handlers[signal]));
+ FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n",
+ (unsigned int)data->interrupt_low_level_handlers[signal]));
if (data->interrupt_low_level_handlers[signal]==0) {
if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
ARE_SAME_HANDLER(handler, SIG_IGN)) {
oldhandler = data->interrupt_handlers[signal];
data->interrupt_handlers[signal].c = handler;
- sigprocmask(SIG_SETMASK, &old, 0);
+ thread_sigmask(SIG_SETMASK, &old, 0);
FSHOW((stderr, "/leaving POSIX install_handler(%d, ..)\n", signal));
{
int i;
SHOW("entering interrupt_init()");
+ sigemptyset(&blockable_sigset);
+ sigaddset_blockable(&blockable_sigset);
+
global_interrupt_data=calloc(sizeof(struct interrupt_data), 1);
/* Set up high level handler information. */