X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fthread.c;h=ab0db0886eab31ea383d5a85b60349a046e63e4d;hb=656f994cdddc89af3a99c8af266816b09879df4a;hp=9e1b7a65a8743eb6e357fb6a20c65ace7c1640d7;hpb=481348f2f96f364374f669786f9fc61348decabc;p=sbcl.git diff --git a/src/runtime/thread.c b/src/runtime/thread.c index 9e1b7a6..ab0db08 100644 --- a/src/runtime/thread.c +++ b/src/runtime/thread.c @@ -27,11 +27,13 @@ int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */ struct thread * volatile all_threads; -volatile lispobj all_threads_lock; extern struct interrupt_data * global_interrupt_data; extern int linux_no_threads_p; #ifdef LISP_FEATURE_SB_THREAD + +pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER; + /* When trying to get all_threads_lock one should make sure that * sig_stop_for_gc is not blocked. Else there would be a possible * deadlock: gc locks it, other thread blocks signals, gc sends stop @@ -59,13 +61,13 @@ void check_sig_stop_for_gc_can_arrive_or_lose() check_sig_stop_for_gc_can_arrive_or_lose(); \ FSHOW_SIGNAL((stderr,"/%s:waiting on lock=%ld, thread=%lu\n",name, \ all_threads_lock,arch_os_get_current_thread()->os_thread)); \ - get_spinlock(&all_threads_lock,(long)arch_os_get_current_thread()); \ + pthread_mutex_lock(&all_threads_lock); \ FSHOW_SIGNAL((stderr,"/%s:got lock, thread=%lu\n", \ name,arch_os_get_current_thread()->os_thread)); #define RELEASE_ALL_THREADS_LOCK(name) \ FSHOW_SIGNAL((stderr,"/%s:released lock\n",name)); \ - release_spinlock(&all_threads_lock); \ + pthread_mutex_unlock(&all_threads_lock); \ thread_sigmask(SIG_SETMASK,&_oldset,0); \ } #endif @@ -82,8 +84,8 @@ initial_thread_trampoline(struct thread *th) #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) lispobj *args = NULL; #endif - function = th->unbound_marker; - th->unbound_marker = UNBOUND_MARKER_WIDETAG; + function = th->no_tls_value_marker; + th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG; if(arch_os_thread_init(th)==0) return 1; if(th->os_thread < 1) lose("th->os_thread not set up right"); @@ -107,8 +109,8 @@ new_thread_trampoline(struct thread *th) { lispobj function; int result; - function = th->unbound_marker; - th->unbound_marker = UNBOUND_MARKER_WIDETAG; + function = th->no_tls_value_marker; + th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG; if(arch_os_thread_init(th)==0) { /* FIXME: handle error */ lose("arch_os_thread_init failed\n"); @@ -127,12 +129,27 @@ new_thread_trampoline(struct thread *th) } #endif /* LISP_FEATURE_SB_THREAD */ +#define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \ + ALIEN_STACK_SIZE + dynamic_values_bytes + \ + 32 * SIGSTKSZ) + +static void +free_thread_struct(struct thread *th) +{ + if (th->interrupt_data) + os_invalidate((os_vm_address_t) th->interrupt_data, + (sizeof (struct interrupt_data))); + os_invalidate((os_vm_address_t) th->control_stack_start, + THREAD_STRUCT_SIZE); +} + /* this is called from any other thread to create the new one, and * initialize all parts of it that can be initialized from another * thread */ -struct thread * create_thread_struct(lispobj initial_function) { +static struct thread * +create_thread_struct(lispobj initial_function) { union per_thread_data *per_thread; struct thread *th=0; /* subdue gcc */ void *spaces=0; @@ -140,12 +157,7 @@ struct thread * create_thread_struct(lispobj initial_function) { /* may as well allocate all the spaces at once: it saves us from * having to decide what to do if only some of the allocations * succeed */ - spaces=os_validate(0, - THREAD_CONTROL_STACK_SIZE+ - BINDING_STACK_SIZE+ - ALIEN_STACK_SIZE+ - dynamic_values_bytes+ - 32*SIGSTKSZ); + spaces=os_validate(0, THREAD_STRUCT_SIZE); if(!spaces) return NULL; per_thread=(union per_thread_data *) @@ -161,13 +173,15 @@ struct thread * create_thread_struct(lispobj initial_function) { #ifdef LISP_FEATURE_SB_THREAD int i; for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++) - per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG; - if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) + per_thread->dynamic_values[i]=NO_TLS_VALUE_MARKER_WIDETAG; + if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) { SetSymbolValue (FREE_TLS_INDEX, make_fixnum(MAX_INTERRUPTS+ sizeof(struct thread)/sizeof(lispobj)), 0); + SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0); + } #define STATIC_TLS_INIT(sym,field) \ ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \ make_fixnum(THREAD_SLOT_OFFSET_WORDS(field)) @@ -244,15 +258,12 @@ struct thread * create_thread_struct(lispobj initial_function) { th->interrupt_data = (struct interrupt_data *) os_validate(0,(sizeof (struct interrupt_data))); - if(all_threads) - memcpy(th->interrupt_data, - arch_os_get_current_thread()->interrupt_data, - sizeof (struct interrupt_data)); - else - memcpy(th->interrupt_data,global_interrupt_data, - sizeof (struct interrupt_data)); - - th->unbound_marker=initial_function; + if (!th->interrupt_data) { + free_thread_struct(th); + return 0; + } + th->interrupt_data->pending_handler = 0; + th->no_tls_value_marker=initial_function; return th; } @@ -332,11 +343,7 @@ struct thread *create_thread(lispobj initial_function) { if (success) link_thread(th,kid_tid); else - os_invalidate((os_vm_address_t) th->control_stack_start, - ((sizeof (lispobj)) - * (th->control_stack_end-th->control_stack_start)) + - BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+ - 32*SIGSTKSZ); + free_thread_struct(th); RELEASE_ALL_THREADS_LOCK("create_thread") @@ -358,7 +365,6 @@ void reap_dead_thread(struct thread *th) sigaddset_blockable(&newset); thread_sigmask(SIG_BLOCK, &newset, &oldset); gc_alloc_update_page_tables(0, &th->alloc_region); - release_spinlock(&all_threads_lock); thread_sigmask(SIG_SETMASK,&oldset,0); } #endif @@ -372,11 +378,7 @@ void reap_dead_thread(struct thread *th) RELEASE_ALL_THREADS_LOCK("reap_dead_thread") if(th->tls_cookie>=0) arch_os_thread_cleanup(th); gc_assert(pthread_join(th->os_thread,NULL)==0); - os_invalidate((os_vm_address_t) th->control_stack_start, - ((sizeof (lispobj)) - * (th->control_stack_end-th->control_stack_start)) + - BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+ - 32*SIGSTKSZ); + free_thread_struct(th); } /* Send the signo to os_thread, retry if the rt signal queue is @@ -469,16 +471,16 @@ void gc_stop_the_world() FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n", th->os_thread)); /* keep threads from starting while the world is stopped. */ - get_spinlock(&all_threads_lock,(long)th); + pthread_mutex_lock(&all_threads_lock); \ FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n", th->os_thread)); /* stop all other threads by sending them SIG_STOP_FOR_GC */ for(p=all_threads; p; p=p->next) { while(p->state==STATE_STARTING) sched_yield(); if((p!=th) && (p->state==STATE_RUNNING)) { - status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC); FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n", p->os_thread)); + status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC); if (status==ESRCH) { /* This thread has exited. */ gc_assert(p->state==STATE_DEAD); @@ -533,7 +535,7 @@ void gc_start_the_world() * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some * performance implications, but does away with the 'rt signal * queue full' problem. */ - release_spinlock(&all_threads_lock); + pthread_mutex_unlock(&all_threads_lock); \ FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n")); } #endif