#define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
-struct thread *all_threads;
-volatile lispobj all_threads_lock;
+struct thread * volatile all_threads;
extern struct interrupt_data * global_interrupt_data;
extern int linux_no_threads_p;
#ifdef LISP_FEATURE_SB_THREAD
+
+pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
+
/* When trying to get all_threads_lock one should make sure that
* sig_stop_for_gc is not blocked. Else there would be a possible
* deadlock: gc locks it, other thread blocks signals, gc sends stop
check_sig_stop_for_gc_can_arrive_or_lose(); \
FSHOW_SIGNAL((stderr,"/%s:waiting on lock=%ld, thread=%lu\n",name, \
all_threads_lock,arch_os_get_current_thread()->os_thread)); \
- get_spinlock(&all_threads_lock,(long)arch_os_get_current_thread()); \
+ pthread_mutex_lock(&all_threads_lock); \
FSHOW_SIGNAL((stderr,"/%s:got lock, thread=%lu\n", \
name,arch_os_get_current_thread()->os_thread));
#define RELEASE_ALL_THREADS_LOCK(name) \
FSHOW_SIGNAL((stderr,"/%s:released lock\n",name)); \
- release_spinlock(&all_threads_lock); \
+ pthread_mutex_unlock(&all_threads_lock); \
thread_sigmask(SIG_SETMASK,&_oldset,0); \
}
#endif
extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
#endif
-int
+static int
initial_thread_trampoline(struct thread *th)
{
lispobj function;
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
lispobj *args = NULL;
#endif
- function = th->unbound_marker;
- th->unbound_marker = UNBOUND_MARKER_WIDETAG;
+ function = th->no_tls_value_marker;
+ th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
if(arch_os_thread_init(th)==0) return 1;
if(th->os_thread < 1) lose("th->os_thread not set up right");
{
lispobj function;
int result;
- function = th->unbound_marker;
- th->unbound_marker = UNBOUND_MARKER_WIDETAG;
- if(arch_os_thread_init(th)==0) return 1;
+ function = th->no_tls_value_marker;
+ th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
+ if(arch_os_thread_init(th)==0) {
+ /* FIXME: handle error */
+ lose("arch_os_thread_init failed\n");
+ }
/* wait here until our thread is linked into all_threads: see below */
- while(th->os_thread<1) sched_yield();
+ {
+ volatile os_thread_t *tid=&th->os_thread;
+ while(*tid<1) sched_yield();
+ }
th->state=STATE_RUNNING;
result = funcall0(function);
}
#endif /* LISP_FEATURE_SB_THREAD */
+#define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
+ ALIEN_STACK_SIZE + dynamic_values_bytes + \
+ 32 * SIGSTKSZ)
+
+static void
+free_thread_struct(struct thread *th)
+{
+ if (th->interrupt_data)
+ os_invalidate((os_vm_address_t) th->interrupt_data,
+ (sizeof (struct interrupt_data)));
+ os_invalidate((os_vm_address_t) th->control_stack_start,
+ THREAD_STRUCT_SIZE);
+}
+
/* this is called from any other thread to create the new one, and
* initialize all parts of it that can be initialized from another
* thread
*/
-struct thread * create_thread_struct(lispobj initial_function) {
+static struct thread *
+create_thread_struct(lispobj initial_function) {
union per_thread_data *per_thread;
struct thread *th=0; /* subdue gcc */
void *spaces=0;
/* may as well allocate all the spaces at once: it saves us from
* having to decide what to do if only some of the allocations
* succeed */
- spaces=os_validate(0,
- THREAD_CONTROL_STACK_SIZE+
- BINDING_STACK_SIZE+
- ALIEN_STACK_SIZE+
- dynamic_values_bytes+
- 32*SIGSTKSZ);
+ spaces=os_validate(0, THREAD_STRUCT_SIZE);
if(!spaces)
return NULL;
per_thread=(union per_thread_data *)
#ifdef LISP_FEATURE_SB_THREAD
int i;
for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
- per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
- if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
+ per_thread->dynamic_values[i]=NO_TLS_VALUE_MARKER_WIDETAG;
+ if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
SetSymbolValue
(FREE_TLS_INDEX,
make_fixnum(MAX_INTERRUPTS+
sizeof(struct thread)/sizeof(lispobj)),
0);
+ SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
+ }
#define STATIC_TLS_INIT(sym,field) \
((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
th->interrupt_data = (struct interrupt_data *)
os_validate(0,(sizeof (struct interrupt_data)));
- if(all_threads)
- memcpy(th->interrupt_data,
- arch_os_get_current_thread()->interrupt_data,
- sizeof (struct interrupt_data));
- else
- memcpy(th->interrupt_data,global_interrupt_data,
- sizeof (struct interrupt_data));
-
- th->unbound_marker=initial_function;
+ if (!th->interrupt_data) {
+ free_thread_struct(th);
+ return 0;
+ }
+ th->interrupt_data->pending_handler = 0;
+ th->no_tls_value_marker=initial_function;
return th;
}
-void link_thread(struct thread *th,os_thread_t kid_tid)
+static void
+link_thread(struct thread *th,os_thread_t kid_tid)
{
if (all_threads) all_threads->prev=th;
th->next=all_threads;
protect_control_stack_guard_page(th,1);
/* child will not start until this is set */
th->os_thread=kid_tid;
+ FSHOW((stderr,"/created thread %lu\n",kid_tid));
}
void create_initial_thread(lispobj initial_function) {
sigset_t newset,oldset;
boolean r=1;
sigemptyset(&newset);
+ /* Blocking deferrable signals is enough, since gc_stop_the_world
+ * waits until the child leaves STATE_STARTING. And why not let gc
+ * proceed as soon as possible? */
sigaddset_deferrable(&newset);
thread_sigmask(SIG_BLOCK, &newset, &oldset);
if (success)
link_thread(th,kid_tid);
else
- os_invalidate((os_vm_address_t) th->control_stack_start,
- ((sizeof (lispobj))
- * (th->control_stack_end-th->control_stack_start)) +
- BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
- 32*SIGSTKSZ);
+ free_thread_struct(th);
RELEASE_ALL_THREADS_LOCK("create_thread")
sigaddset_blockable(&newset);
thread_sigmask(SIG_BLOCK, &newset, &oldset);
gc_alloc_update_page_tables(0, &th->alloc_region);
- release_spinlock(&all_threads_lock);
thread_sigmask(SIG_SETMASK,&oldset,0);
}
#endif
RELEASE_ALL_THREADS_LOCK("reap_dead_thread")
if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
gc_assert(pthread_join(th->os_thread,NULL)==0);
- os_invalidate((os_vm_address_t) th->control_stack_start,
- ((sizeof (lispobj))
- * (th->control_stack_end-th->control_stack_start)) +
- BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
- 32*SIGSTKSZ);
+ free_thread_struct(th);
+}
+
+/* Send the signo to os_thread, retry if the rt signal queue is
+ * full. */
+static int kill_thread_safely(os_thread_t os_thread, int signo)
+{
+ int r;
+ /* The man page does not mention EAGAIN as a valid return value
+ * for either pthread_kill or kill. But that's theory, this is
+ * practice. By waiting here we assume that the delivery of this
+ * signal is not necessary for the delivery of the signals in the
+ * queue. In other words, we _assume_ there are no deadlocks. */
+ while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
+ /* wait a bit then try again in the hope of the rt signal
+ * queue not being full */
+ FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
+ /* FIXME: some kind of backoff (random, exponential) would be
+ * nice. */
+ sleep(1);
+ }
+ return r;
}
int interrupt_thread(struct thread *th, lispobj function)
{
- /* A thread may also become dead after this test. */
- if ((th->state != STATE_DEAD)) {
- /* In clone_threads, if A and B both interrupt C at
- * approximately the same time, it does not matter: the
- * second signal will be masked until the handler has
- * returned from the first one. In pthreads though, we
- * can't put the knowledge of what function to call into
- * the siginfo, so we have to store it in the destination
- * thread, and do it in such a way that A won't clobber
- * B's interrupt. Hence this stupid linked list.
- *
- * This does depend on SIG_INTERRUPT_THREAD being queued
- * (as POSIX RT signals are): we need to keep
- * interrupt_fun data for exactly as many signals as are
- * going to be received by the destination thread.
- */
- lispobj c=alloc_cons(function,NIL);
- int kill_status;
- /* interrupt_thread_handler locks this spinlock with
- * interrupts blocked (it does so for the sake of
- * arrange_return_to_lisp_function), so we must also block
- * them or else SIG_STOP_FOR_GC and all_threads_lock will find
- * a way to deadlock. */
- sigset_t newset,oldset;
- sigemptyset(&newset);
- sigaddset_blockable(&newset);
- thread_sigmask(SIG_BLOCK, &newset, &oldset);
- get_spinlock(&th->interrupt_fun_lock,
- (long)arch_os_get_current_thread());
- kill_status=thread_kill(th->os_thread,SIG_INTERRUPT_THREAD);
- if(kill_status==0) {
- ((struct cons *)native_pointer(c))->cdr=th->interrupt_fun;
- th->interrupt_fun=c;
+ /* In clone_threads, if A and B both interrupt C at approximately
+ * the same time, it does not matter: the second signal will be
+ * masked until the handler has returned from the first one. In
+ * pthreads though, we can't put the knowledge of what function to
+ * call into the siginfo, so we have to store it in the
+ * destination thread, and do it in such a way that A won't
+ * clobber B's interrupt. Hence, this stupid linked list.
+ *
+ * This does depend on SIG_INTERRUPT_THREAD being queued (as POSIX
+ * RT signals are): we need to keep interrupt_fun data for exactly
+ * as many signals as are going to be received by the destination
+ * thread.
+ */
+ lispobj c=alloc_cons(function,NIL);
+ sigset_t newset,oldset;
+ sigemptyset(&newset);
+ /* interrupt_thread_handler locks this spinlock with blockables
+ * blocked (it does so for the sake of
+ * arrange_return_to_lisp_function), so we must also block them or
+ * else SIG_STOP_FOR_GC and all_threads_lock will find a way to
+ * deadlock. */
+ sigaddset_blockable(&newset);
+ thread_sigmask(SIG_BLOCK, &newset, &oldset);
+ if (th == arch_os_get_current_thread())
+ lose("cannot interrupt current thread");
+ get_spinlock(&th->interrupt_fun_lock,
+ (long)arch_os_get_current_thread());
+ ((struct cons *)native_pointer(c))->cdr=th->interrupt_fun;
+ th->interrupt_fun=c;
+ release_spinlock(&th->interrupt_fun_lock);
+ thread_sigmask(SIG_SETMASK,&oldset,0);
+ /* Called from lisp with the thread object as a parameter. Thus,
+ * the object cannot be garbage collected and consequently reaped
+ * and joined. Because it's not joined, kill should work (even if
+ * the thread has died/exited). */
+ {
+ int status=kill_thread_safely(th->os_thread,SIG_INTERRUPT_THREAD);
+ if (status==0) {
+ return 0;
+ } else if (status==ESRCH) {
+ /* This thread has exited. */
+ th->interrupt_fun=NIL;
+ errno=ESRCH;
+ return -1;
+ } else {
+ lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s",
+ th->os_thread,status,strerror(status));
}
- release_spinlock(&th->interrupt_fun_lock);
- thread_sigmask(SIG_SETMASK,&oldset,0);
- return (kill_status ? -1 : 0);
}
- errno=EPERM; return -1;
}
/* stopping the world is a two-stage process. From this thread we signal
void gc_stop_the_world()
{
struct thread *p,*th=arch_os_get_current_thread();
+ int status;
FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
th->os_thread));
/* keep threads from starting while the world is stopped. */
- get_spinlock(&all_threads_lock,(long)th);
+ pthread_mutex_lock(&all_threads_lock); \
FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
th->os_thread));
/* stop all other threads by sending them SIG_STOP_FOR_GC */
for(p=all_threads; p; p=p->next) {
while(p->state==STATE_STARTING) sched_yield();
if((p!=th) && (p->state==STATE_RUNNING)) {
- FSHOW_SIGNAL((stderr, "/gc_stop_the_world: suspending %lu\n",
+ FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n",
p->os_thread));
- if(thread_kill(p->os_thread,SIG_STOP_FOR_GC)==-1) {
- /* we can't kill the thread; assume because it died
- * since we last checked */
- p->state=STATE_DEAD;
- FSHOW_SIGNAL((stderr,"/gc_stop_the_world:assuming %lu dead\n",
- p->os_thread));
+ status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
+ if (status==ESRCH) {
+ /* This thread has exited. */
+ gc_assert(p->state==STATE_DEAD);
+ } else if (status) {
+ lose("cannot send suspend thread=%lu: %d, %s",
+ p->os_thread,status,strerror(status));
}
}
}
void gc_start_the_world()
{
struct thread *p,*th=arch_os_get_current_thread();
+ int status;
/* if a resumed thread creates a new thread before we're done with
* this loop, the new thread will get consed on the front of
* all_threads, but it won't have been stopped so won't need
FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
p->os_thread));
p->state=STATE_RUNNING;
- thread_kill(p->os_thread,SIG_STOP_FOR_GC);
+ status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
+ if (status) {
+ lose("cannot resume thread=%lu: %d, %s",
+ p->os_thread,status,strerror(status));
+ }
}
}
- release_spinlock(&all_threads_lock);
+ /* If we waited here until all threads leave STATE_SUSPENDED, then
+ * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
+ * performance implications, but does away with the 'rt signal
+ * queue full' problem. */
+ pthread_mutex_unlock(&all_threads_lock); \
FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
}
#endif