X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fthread.c;h=c0166d0b8f439ce2b537013170958ca04d07f3e3;hb=c3699db2053ff3b5ac6a98d4431c3789496002d8;hp=eb8f2414cab6213a764724aa578d30db65fd5cab;hpb=2675adcb29d689ee6d270f52658af17f2deeaf77;p=sbcl.git diff --git a/src/runtime/thread.c b/src/runtime/thread.c index eb8f241..c0166d0 100644 --- a/src/runtime/thread.c +++ b/src/runtime/thread.c @@ -4,9 +4,6 @@ #include #include #include -#ifndef CLONE_PARENT /* lameass glibc 2.2 doesn't define this */ -#define CLONE_PARENT 0x00008000 /* even though the manpage documents it */ -#endif #include "runtime.h" #include "sbcl.h" #include "validate.h" /* for CONTROL_STACK_SIZE etc */ @@ -15,16 +12,13 @@ #include "target-arch-os.h" #include "os.h" #include "globals.h" -#ifdef LISP_FEATURE_GENCGC -#include "gencgc.h" -#endif #include "dynbind.h" #include "genesis/cons.h" #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */ int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */ struct thread *all_threads; -lispobj all_threads_lock; +volatile lispobj all_threads_lock; volatile int countdown_to_gc; extern struct interrupt_data * global_interrupt_data; @@ -53,6 +47,8 @@ new_thread_trampoline(struct thread *th) fprintf(stderr, "/continue\n"); } th->unbound_marker = UNBOUND_MARKER_WIDETAG; + if(arch_os_thread_init(th)==0) + return 1; /* failure. no, really */ #ifdef LISP_FEATURE_SB_THREAD /* wait here until our thread is linked into all_threads: see below */ while(th->pid<1) sched_yield(); @@ -61,8 +57,7 @@ new_thread_trampoline(struct thread *th) lose("th->pid not set up right"); #endif - if(arch_os_thread_init(th)==0) - return 1; /* failure. no, really */ + th->state=STATE_RUNNING; #if !defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_X86) return call_into_lisp_first_time(function,args,0); #else @@ -139,6 +134,7 @@ pid_t create_thread(lispobj initial_function) { th->binding_stack_pointer=th->binding_stack_start; th->this=th; th->pid=0; + th->state=STATE_STOPPED; #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD th->alien_stack_pointer=((void *)th->alien_stack_start + ALIEN_STACK_SIZE-4); /* naked 4. FIXME */ @@ -147,9 +143,7 @@ pid_t create_thread(lispobj initial_function) { #endif #ifdef LISP_FEATURE_X86 th->pseudo_atomic_interrupted=0; - /* runtime.c used to set PSEUDO_ATOMIC_ATOMIC =1 globally. I'm not - * sure why, but it appears to help */ - th->pseudo_atomic_atomic=make_fixnum(1); + th->pseudo_atomic_atomic=0; #endif #ifdef LISP_FEATURE_GENCGC gc_set_region_empty(&th->alloc_region); @@ -196,8 +190,7 @@ pid_t create_thread(lispobj initial_function) { kid_pid= clone(new_thread_trampoline, (((void*)th->control_stack_start)+THREAD_CONTROL_STACK_SIZE-4), - (((getpid()!=parent_pid)?(CLONE_PARENT):0) - |CLONE_FILES|SIGALRM|CLONE_VM),th); + CLONE_FILES|SIG_THREAD_EXIT|CLONE_VM,th); if(kid_pid<=0) goto cleanup; #else @@ -238,6 +231,7 @@ void destroy_thread (struct thread *th) #endif get_spinlock(&all_threads_lock,th->pid); if(countdown_to_gc>0) countdown_to_gc--; + th->state=STATE_STOPPED; if(th==all_threads) all_threads=th->next; else { @@ -263,6 +257,10 @@ struct thread *find_thread_by_pid(pid_t pid) return 0; } +/* These are not needed unless #+SB-THREAD, and since sigwaitinfo() + * doesn't seem to be easily available everywhere (OpenBSD...) it's + * more trouble than it's worth to compile it when not needed. */ +#if defined LISP_FEATURE_SB_THREAD void block_sigcont(void) { /* don't allow ourselves to receive SIGCONT while we're in the @@ -274,10 +272,6 @@ void block_sigcont(void) sigprocmask(SIG_BLOCK, &newset, 0); } -/* This is not needed unless #+SB-THREAD, and since sigwaitinfo() - * doesn't seem to be easily available everywhere (OpenBSD...) it's - * more trouble than it's worth to compile it when not needed. */ -#if defined LISP_FEATURE_SB_THREAD void unblock_sigcont_and_sleep(void) { sigset_t set; @@ -298,6 +292,12 @@ int interrupt_thread(pid_t pid, lispobj function) return sigqueue(pid, SIG_INTERRUPT_THREAD, sigval); } +int signal_thread_to_dequeue (pid_t pid) +{ + return kill (pid, SIG_DEQUEUE); +} + + /* stopping the world is a two-stage process. From this thread we signal * all the others with SIG_STOP_FOR_GC. The handler for this thread does * the usual pseudo-atomic checks (we don't want to stop a thread while @@ -310,26 +310,36 @@ void gc_stop_the_world() { /* stop all other threads by sending them SIG_STOP_FOR_GC */ struct thread *p,*th=arch_os_get_current_thread(); - struct thread *tail=0; + pid_t old_pid; int finished=0; do { get_spinlock(&all_threads_lock,th->pid); - if(tail!=all_threads) { - /* new threads always get consed onto the front of all_threads, - * and may be created by any thread that we haven't signalled - * yet or hasn't received our signal and stopped yet. So, check - * for them on each time around */ - for(p=all_threads;p!=tail;p=p->next) { - if(p==th) continue; - countdown_to_gc++; - kill(p->pid,SIG_STOP_FOR_GC); - } - tail=all_threads; - } else { - finished=(countdown_to_gc==0); + for(p=all_threads,old_pid=p->pid; p; p=p->next) { + if(p==th) continue; + if(p->state!=STATE_RUNNING) continue; + countdown_to_gc++; + p->state=STATE_STOPPING; + /* Note no return value check from kill(). If the + * thread had been reaped already, we kill it and + * increment countdown_to_gc anyway. This is to avoid + * complicating the logic in destroy_thread, which would + * otherwise have to know whether the thread died before or + * after it was killed + */ + kill(p->pid,SIG_STOP_FOR_GC); } release_spinlock(&all_threads_lock); sched_yield(); + /* if everything has stopped, and there is no possibility that + * a new thread has been created, we're done. Otherwise go + * round again and signal anything that sprang up since last + * time */ + if(old_pid==all_threads->pid) { + finished=1; + for_each_thread(p) + finished = finished && + ((p==th) || (p->state==STATE_STOPPED)); + } } while(!finished); } @@ -339,6 +349,7 @@ void gc_start_the_world() get_spinlock(&all_threads_lock,th->pid); for(p=all_threads;p;p=p->next) { if(p==th) continue; + p->state=STATE_RUNNING; kill(p->pid,SIGCONT); } release_spinlock(&all_threads_lock);