X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Fruntime%2Fthread.c;h=bcc470ca9a48a01d21c99b590ceef58631b4e2eb;hb=0d4c7a1323106c6e60511bef929048edcb040205;hp=63b39d766c7a6a905495b96fd42b50ff493043e8;hpb=b5b36522d5398715e53a3c6ca75cf16001ce46ac;p=sbcl.git diff --git a/src/runtime/thread.c b/src/runtime/thread.c index 63b39d7..bcc470c 100644 --- a/src/runtime/thread.c +++ b/src/runtime/thread.c @@ -83,7 +83,7 @@ static struct thread_post_mortem * volatile pending_thread_post_mortem = 0; #endif int dynamic_values_bytes=TLS_SIZE*sizeof(lispobj); /* same for all threads */ -struct thread * volatile all_threads; +struct thread *all_threads; extern struct interrupt_data * global_interrupt_data; #ifdef LISP_FEATURE_SB_THREAD @@ -145,8 +145,17 @@ initial_thread_trampoline(struct thread *th) #endif } +#ifdef LISP_FEATURE_SB_THREAD +#define THREAD_STATE_LOCK_SIZE \ + (sizeof(pthread_mutex_t))+(sizeof(pthread_cond_t)) +#else +#define THREAD_STATE_LOCK_SIZE 0 +#endif + #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \ - ALIEN_STACK_SIZE + dynamic_values_bytes + \ + ALIEN_STACK_SIZE + \ + THREAD_STATE_LOCK_SIZE + \ + dynamic_values_bytes + \ 32 * SIGSTKSZ + \ THREAD_ALIGNMENT_BYTES) @@ -249,6 +258,8 @@ new_thread_trampoline(struct thread *th) int result, lock_ret; FSHOW((stderr,"/creating thread %lu\n", thread_self())); + check_deferrables_blocked_or_lose(); + check_gc_signals_unblocked_or_lose(); function = th->no_tls_value_marker; th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG; if(arch_os_thread_init(th)==0) { @@ -259,9 +270,9 @@ new_thread_trampoline(struct thread *th) th->os_thread=thread_self(); protect_control_stack_guard_page(1); /* Since GC can only know about this thread from the all_threads - * list and we're just adding this thread to it there is no danger - * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is - * not). */ + * list and we're just adding this thread to it, there is no + * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which + * it is not). */ lock_ret = pthread_mutex_lock(&all_threads_lock); gc_assert(lock_ret == 0); link_thread(th); @@ -272,7 +283,7 @@ new_thread_trampoline(struct thread *th) /* Block GC */ block_blockable_signals(); - th->state=STATE_DEAD; + set_thread_state(th, STATE_DEAD); /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this * thread, but since we are already dead it won't wait long. */ @@ -285,6 +296,9 @@ new_thread_trampoline(struct thread *th) gc_assert(lock_ret == 0); if(th->tls_cookie>=0) arch_os_thread_cleanup(th); + pthread_mutex_destroy(th->state_lock); + pthread_cond_destroy(th->state_cond); + os_invalidate((os_vm_address_t)th->interrupt_data, (sizeof (struct interrupt_data))); @@ -351,7 +365,8 @@ create_thread_struct(lispobj initial_function) { (aligned_spaces+ thread_control_stack_size+ BINDING_STACK_SIZE+ - ALIEN_STACK_SIZE); + ALIEN_STACK_SIZE + + THREAD_STATE_LOCK_SIZE); #ifdef LISP_FEATURE_SB_THREAD for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++) @@ -395,6 +410,12 @@ create_thread_struct(lispobj initial_function) { th->os_thread=0; #ifdef LISP_FEATURE_SB_THREAD th->os_attr=malloc(sizeof(pthread_attr_t)); + th->state_lock=(pthread_mutex_t *)((void *)th->alien_stack_start + + ALIEN_STACK_SIZE); + pthread_mutex_init(th->state_lock, NULL); + th->state_cond=(pthread_cond_t *)((void *)th->state_lock + + (sizeof(pthread_mutex_t))); + pthread_cond_init(th->state_cond, NULL); #endif th->state=STATE_RUNNING; #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD @@ -448,6 +469,7 @@ create_thread_struct(lispobj initial_function) { return 0; } th->interrupt_data->pending_handler = 0; + th->interrupt_data->gc_blocked_deferrables = 0; th->no_tls_value_marker=initial_function; th->stepping = NIL; @@ -481,23 +503,22 @@ boolean create_os_thread(struct thread *th,os_thread_t *kid_tid) { /* The new thread inherits the restrictive signal mask set here, * and enables signals again when it is set up properly. */ - sigset_t newset,oldset; + sigset_t oldset; boolean r=1; int retcode = 0, initcode; FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n")); + /* Blocking deferrable signals is enough, no need to block + * SIG_STOP_FOR_GC because the child process is not linked onto + * all_threads until it's ready. */ + thread_sigmask(SIG_BLOCK, &deferrable_sigset, &oldset); + #ifdef LOCK_CREATE_THREAD retcode = pthread_mutex_lock(&create_thread_lock); gc_assert(retcode == 0); FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n")); #endif - sigemptyset(&newset); - /* Blocking deferrable signals is enough, no need to block - * SIG_STOP_FOR_GC because the child process is not linked onto - * all_threads until it's ready. */ - sigaddset_deferrable(&newset); - thread_sigmask(SIG_BLOCK, &newset, &oldset); if((initcode = pthread_attr_init(th->os_attr)) || /* call_into_lisp_first_time switches the stack for the initial thread. For the @@ -507,75 +528,39 @@ boolean create_os_thread(struct thread *th,os_thread_t *kid_tid) (kid_tid,th->os_attr,(void *(*)(void *))new_thread_trampoline,th))) { FSHOW_SIGNAL((stderr, "init = %d\n", initcode)); FSHOW_SIGNAL((stderr, printf("pthread_create returned %d, errno %d\n", retcode, errno))); - FSHOW_SIGNAL((stderr, "wanted stack size %d, min stack size %d\n", - cstack_size, PTHREAD_STACK_MIN)); if(retcode < 0) { perror("create_os_thread"); } r=0; } - thread_sigmask(SIG_SETMASK,&oldset,0); #ifdef LOCK_CREATE_THREAD retcode = pthread_mutex_unlock(&create_thread_lock); gc_assert(retcode == 0); FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n")); #endif + thread_sigmask(SIG_SETMASK,&oldset,0); return r; } os_thread_t create_thread(lispobj initial_function) { - struct thread *th; - os_thread_t kid_tid; + struct thread *th, *thread = arch_os_get_current_thread(); + os_thread_t kid_tid = 0; + /* Must defend against async unwinds. */ + if (SymbolValue(INTERRUPTS_ENABLED, thread) != NIL) + lose("create_thread is not safe when interrupts are enabled.\n"); + /* Assuming that a fresh thread struct has no lisp objects in it, * linking it to all_threads can be left to the thread itself * without fear of gc lossage. initial_function violates this * assumption and must stay pinned until the child starts up. */ th = create_thread_struct(initial_function); - if(th==0) return 0; - - if (create_os_thread(th,&kid_tid)) { - return kid_tid; - } else { + if (th && !create_os_thread(th,&kid_tid)) { free_thread_struct(th); - return 0; - } -} - -/* Send the signo to os_thread, retry if the rt signal queue is - * full. */ -int -kill_thread_safely(os_thread_t os_thread, int signo) -{ - int r; - /* The man page does not mention EAGAIN as a valid return value - * for either pthread_kill or kill. But that's theory, this is - * practice. By waiting here we assume that the delivery of this - * signal is not necessary for the delivery of the signals in the - * queue. In other words, we _assume_ there are no deadlocks. */ - while ((r=pthread_kill(os_thread,signo))==EAGAIN) { - /* wait a bit then try again in the hope of the rt signal - * queue not being full */ - FSHOW_SIGNAL((stderr,"/rt signal queue full\n")); - /* FIXME: some kind of backoff (random, exponential) would be - * nice. */ - sleep(1); - } - return r; -} - -int signal_interrupt_thread(os_thread_t os_thread) -{ - int status = kill_thread_safely(os_thread, SIG_INTERRUPT_THREAD); - if (status == 0) { - return 0; - } else if (status == ESRCH) { - return -1; - } else { - lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s\n", - os_thread, status, strerror(status)); + kid_tid = 0; } + return kid_tid; } /* stopping the world is a two-stage process. From this thread we signal @@ -594,32 +579,31 @@ void gc_stop_the_world() #ifdef LOCK_CREATE_THREAD /* KLUDGE: Stopping the thread during pthread_create() causes deadlock * on FreeBSD. */ - FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock, thread=%lu\n", - th->os_thread)); + FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock\n")); lock_ret = pthread_mutex_lock(&create_thread_lock); gc_assert(lock_ret == 0); - FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock, thread=%lu\n", - th->os_thread)); + FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock\n")); #endif - FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n", - th->os_thread)); + FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock\n")); /* keep threads from starting while the world is stopped. */ lock_ret = pthread_mutex_lock(&all_threads_lock); \ gc_assert(lock_ret == 0); - FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n", - th->os_thread)); + FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock\n")); /* stop all other threads by sending them SIG_STOP_FOR_GC */ for(p=all_threads; p; p=p->next) { gc_assert(p->os_thread != 0); - FSHOW_SIGNAL((stderr,"/gc_stop_the_world: p->state: %x\n", p->state)); - if((p!=th) && ((p->state==STATE_RUNNING))) { - FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %x, os_thread %x\n", - p, p->os_thread)); - status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC); + FSHOW_SIGNAL((stderr,"/gc_stop_the_world: thread=%lu, state=%x\n", + p->os_thread, thread_state(p))); + if((p!=th) && ((thread_state(p)==STATE_RUNNING))) { + FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending thread %lu\n", + p->os_thread)); + /* We already hold all_thread_lock, P can become DEAD but + * cannot exit, ergo it's safe to use pthread_kill. */ + status=pthread_kill(p->os_thread,SIG_STOP_FOR_GC); if (status==ESRCH) { /* This thread has exited. */ - gc_assert(p->state==STATE_DEAD); + gc_assert(thread_state(p)==STATE_DEAD); } else if (status) { lose("cannot send suspend thread=%lu: %d, %s\n", p->os_thread,status,strerror(status)); @@ -627,13 +611,15 @@ void gc_stop_the_world() } } FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n")); - /* wait for the running threads to stop or finish */ - for(p=all_threads;p;) { - FSHOW_SIGNAL((stderr,"/gc_stop_the_world: th: %p, p: %p\n", th, p)); - if((p!=th) && (p->state==STATE_RUNNING)) { - sched_yield(); - } else { - p=p->next; + for(p=all_threads;p;p=p->next) { + if (p!=th) { + FSHOW_SIGNAL + ((stderr, + "/gc_stop_the_world: waiting for thread=%lu: state=%x\n", + p->os_thread, thread_state(p))); + wait_for_thread_state_change(p, STATE_RUNNING); + if (p->state == STATE_RUNNING) + lose("/gc_stop_the_world: unexpected state"); } } FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n")); @@ -642,7 +628,7 @@ void gc_stop_the_world() void gc_start_the_world() { struct thread *p,*th=arch_os_get_current_thread(); - int status, lock_ret; + int lock_ret; /* if a resumed thread creates a new thread before we're done with * this loop, the new thread will get consed on the front of * all_threads, but it won't have been stopped so won't need @@ -650,30 +636,19 @@ void gc_start_the_world() FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n")); for(p=all_threads;p;p=p->next) { gc_assert(p->os_thread!=0); - if((p!=th) && (p->state!=STATE_DEAD)) { - if(p->state!=STATE_SUSPENDED) { - lose("gc_start_the_world: wrong thread state is %d\n", - fixnum_value(p->state)); - } - FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n", - p->os_thread)); - p->state=STATE_RUNNING; - -#if defined(SIG_RESUME_FROM_GC) - status=kill_thread_safely(p->os_thread,SIG_RESUME_FROM_GC); -#else - status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC); -#endif - if (status) { - lose("cannot resume thread=%lu: %d, %s\n", - p->os_thread,status,strerror(status)); + if (p!=th) { + lispobj state = thread_state(p); + if (state != STATE_DEAD) { + if(state != STATE_SUSPENDED) { + lose("gc_start_the_world: wrong thread state is %d\n", + fixnum_value(state)); + } + FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n", + p->os_thread)); + set_thread_state(p, STATE_RUNNING); } } } - /* If we waited here until all threads leave STATE_SUSPENDED, then - * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some - * performance implications, but does away with the 'rt signal - * queue full' problem. */ lock_ret = pthread_mutex_unlock(&all_threads_lock); gc_assert(lock_ret == 0); @@ -695,3 +670,57 @@ thread_yield() return 0; #endif } + +/* If the thread id given does not belong to a running thread (it has + * exited or never even existed) pthread_kill _may_ fail with ESRCH, + * but it is also allowed to just segfault, see + * . + * + * Relying on thread ids can easily backfire since ids are recycled + * (NPTL recycles them extremely fast) so a signal can be sent to + * another process if the one it was sent to exited. + * + * We send signals in two places: signal_interrupt_thread sends a + * signal that's harmless if delivered to another thread, but + * SIG_STOP_FOR_GC is fatal. + * + * For these reasons, we must make sure that the thread is still alive + * when the pthread_kill is called and return if the thread is + * exiting. */ +int +kill_safely(os_thread_t os_thread, int signal) +{ +#ifdef LISP_FEATURE_SB_THREAD + sigset_t oldset; + struct thread *thread; + /* pthread_kill is not async signal safe and we don't want to be + * interrupted while holding the lock. */ + thread_sigmask(SIG_BLOCK, &deferrable_sigset, &oldset); + pthread_mutex_lock(&all_threads_lock); + for (thread = all_threads; thread; thread = thread->next) { + if (thread->os_thread == os_thread) { + int status = pthread_kill(os_thread, signal); + if (status) + lose("kill_safely: pthread_kill failed with %d\n", status); + break; + } + } + pthread_mutex_unlock(&all_threads_lock); + thread_sigmask(SIG_SETMASK,&oldset,0); + if (thread) + return 0; + else + return -1; +#else + int status; + if (os_thread != 0) + lose("kill_safely: who do you want to kill? %d?\n", os_thread); + status = raise(signal); + if (status == 0) { + return 0; + } else { + lose("cannot raise signal %d, %d %s\n", + signal, status, strerror(errno)); + } +#endif +}