2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
28 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
29 #include <mach/mach.h>
30 #include <mach/mach_error.h>
31 #include <mach/mach_types.h>
35 #include "validate.h" /* for BINDING_STACK_SIZE etc */
38 #include "target-arch-os.h"
42 #include "genesis/cons.h"
43 #include "genesis/fdefn.h"
44 #include "interr.h" /* for lose() */
46 #include "gc-internal.h"
48 #ifdef LISP_FEATURE_WIN32
50 * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
51 * so define it arbitrarily
56 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_SB_THREAD)
57 #define DELAY_THREAD_POST_MORTEM 5
58 #define LOCK_CREATE_THREAD
61 #ifdef LISP_FEATURE_FREEBSD
62 #define CREATE_CLEANUP_THREAD
63 #define LOCK_CREATE_THREAD
66 #ifdef LISP_FEATURE_SB_THREAD
67 struct thread_post_mortem {
68 #ifdef DELAY_THREAD_POST_MORTEM
69 struct thread_post_mortem *next;
71 os_thread_t os_thread;
72 pthread_attr_t *os_attr;
73 os_vm_address_t os_address;
76 #ifdef DELAY_THREAD_POST_MORTEM
77 static int pending_thread_post_mortem_count = 0;
78 pthread_mutex_t thread_post_mortem_lock = PTHREAD_MUTEX_INITIALIZER;
80 static struct thread_post_mortem * volatile pending_thread_post_mortem = 0;
83 int dynamic_values_bytes=TLS_SIZE*sizeof(lispobj); /* same for all threads */
84 struct thread *all_threads;
85 extern struct interrupt_data * global_interrupt_data;
87 #ifdef LISP_FEATURE_SB_THREAD
88 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
89 #ifdef LOCK_CREATE_THREAD
90 static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
92 #ifdef LISP_FEATURE_GCC_TLS
93 __thread struct thread *current_thread;
95 pthread_key_t lisp_thread = 0;
98 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
99 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
103 link_thread(struct thread *th)
105 if (all_threads) all_threads->prev=th;
106 th->next=all_threads;
111 #ifdef LISP_FEATURE_SB_THREAD
113 unlink_thread(struct thread *th)
116 th->prev->next = th->next;
118 all_threads = th->next;
120 th->next->prev = th->prev;
125 initial_thread_trampoline(struct thread *th)
128 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
129 lispobj *args = NULL;
131 #ifdef LISP_FEATURE_SB_THREAD
132 pthread_setspecific(lisp_thread, (void *)1);
134 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_PPC)
135 /* SIG_STOP_FOR_GC defaults to blocked on PPC? */
136 unblock_gc_signals(0,0);
138 function = th->no_tls_value_marker;
139 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
140 if(arch_os_thread_init(th)==0) return 1;
142 th->os_thread=thread_self();
143 #ifndef LISP_FEATURE_WIN32
144 protect_control_stack_hard_guard_page(1, NULL);
145 protect_binding_stack_hard_guard_page(1, NULL);
146 protect_alien_stack_hard_guard_page(1, NULL);
147 protect_control_stack_guard_page(1, NULL);
148 protect_binding_stack_guard_page(1, NULL);
149 protect_alien_stack_guard_page(1, NULL);
152 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
153 return call_into_lisp_first_time(function,args,0);
155 return funcall0(function);
159 #ifdef LISP_FEATURE_SB_THREAD
160 #define THREAD_STATE_LOCK_SIZE \
161 (sizeof(pthread_mutex_t))+(sizeof(pthread_cond_t))
163 #define THREAD_STATE_LOCK_SIZE 0
166 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
168 THREAD_STATE_LOCK_SIZE + \
169 dynamic_values_bytes + \
171 THREAD_ALIGNMENT_BYTES)
173 #ifdef LISP_FEATURE_SB_THREAD
174 /* THREAD POST MORTEM CLEANUP
176 * Memory allocated for the thread stacks cannot be reclaimed while
177 * the thread is still alive, so we need a mechanism for post mortem
178 * cleanups. FIXME: We actually have three, for historical reasons as
179 * the saying goes. Do we really need three? Nikodemus guesses that
180 * not anymore, now that we properly call pthread_attr_destroy before
181 * freeing the stack. */
183 static struct thread_post_mortem *
184 plan_thread_post_mortem(struct thread *corpse)
187 struct thread_post_mortem *post_mortem = malloc(sizeof(struct thread_post_mortem));
188 gc_assert(post_mortem);
189 post_mortem->os_thread = corpse->os_thread;
190 post_mortem->os_attr = corpse->os_attr;
191 post_mortem->os_address = corpse->os_address;
192 #ifdef DELAY_THREAD_POST_MORTEM
193 post_mortem->next = NULL;
197 /* FIXME: When does this happen? */
203 perform_thread_post_mortem(struct thread_post_mortem *post_mortem)
205 #ifdef CREATE_POST_MORTEM_THREAD
206 pthread_detach(pthread_self());
209 gc_assert(!pthread_join(post_mortem->os_thread, NULL));
210 gc_assert(!pthread_attr_destroy(post_mortem->os_attr));
211 free(post_mortem->os_attr);
212 os_invalidate(post_mortem->os_address, THREAD_STRUCT_SIZE);
218 schedule_thread_post_mortem(struct thread *corpse)
220 struct thread_post_mortem *post_mortem = NULL;
222 post_mortem = plan_thread_post_mortem(corpse);
224 #ifdef DELAY_THREAD_POST_MORTEM
225 pthread_mutex_lock(&thread_post_mortem_lock);
226 /* First stick the new post mortem to the end of the queue. */
227 if (pending_thread_post_mortem) {
228 struct thread_post_mortem *next = pending_thread_post_mortem;
232 next->next = post_mortem;
234 pending_thread_post_mortem = post_mortem;
236 /* Then, if there are enough things in the queue, clean up one
237 * from the head -- or increment the count, and null out the
238 * post_mortem we have. */
239 if (pending_thread_post_mortem_count > DELAY_THREAD_POST_MORTEM) {
240 post_mortem = pending_thread_post_mortem;
241 pending_thread_post_mortem = post_mortem->next;
243 pending_thread_post_mortem_count++;
246 pthread_mutex_unlock(&thread_post_mortem_lock);
247 /* Finally run, the cleanup, if any. */
248 perform_thread_post_mortem(post_mortem);
249 #elif defined(CREATE_POST_MORTEM_THREAD)
250 gc_assert(!pthread_create(&thread, NULL, perform_thread_post_mortem, post_mortem));
252 post_mortem = (struct thread_post_mortem *)
253 swap_lispobjs((lispobj *)(void *)&pending_thread_post_mortem,
254 (lispobj)post_mortem);
255 perform_thread_post_mortem(post_mortem);
260 /* this is the first thing that runs in the child (which is why the
261 * silly calling convention). Basically it calls the user's requested
262 * lisp function after doing arch_os_thread_init and whatever other
263 * bookkeeping needs to be done
266 new_thread_trampoline(struct thread *th)
269 int result, lock_ret;
271 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
272 check_deferrables_blocked_or_lose(0);
273 check_gc_signals_unblocked_or_lose(0);
274 pthread_setspecific(lisp_thread, (void *)1);
275 function = th->no_tls_value_marker;
276 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
277 if(arch_os_thread_init(th)==0) {
278 /* FIXME: handle error */
279 lose("arch_os_thread_init failed\n");
282 th->os_thread=thread_self();
283 protect_control_stack_guard_page(1, NULL);
284 protect_binding_stack_guard_page(1, NULL);
285 protect_alien_stack_guard_page(1, NULL);
286 /* Since GC can only know about this thread from the all_threads
287 * list and we're just adding this thread to it, there is no
288 * danger of deadlocking even with SIG_STOP_FOR_GC blocked (which
290 lock_ret = pthread_mutex_lock(&all_threads_lock);
291 gc_assert(lock_ret == 0);
293 lock_ret = pthread_mutex_unlock(&all_threads_lock);
294 gc_assert(lock_ret == 0);
296 result = funcall0(function);
299 block_blockable_signals(0, 0);
300 set_thread_state(th, STATE_DEAD);
302 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
303 * thread, but since we are already dead it won't wait long. */
304 lock_ret = pthread_mutex_lock(&all_threads_lock);
305 gc_assert(lock_ret == 0);
307 gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
309 pthread_mutex_unlock(&all_threads_lock);
310 gc_assert(lock_ret == 0);
312 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
313 pthread_mutex_destroy(th->state_lock);
314 pthread_cond_destroy(th->state_cond);
316 os_invalidate((os_vm_address_t)th->interrupt_data,
317 (sizeof (struct interrupt_data)));
319 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
320 FSHOW((stderr, "Deallocating mach port %x\n", THREAD_STRUCT_TO_EXCEPTION_PORT(th)));
321 mach_port_move_member(current_mach_task,
322 THREAD_STRUCT_TO_EXCEPTION_PORT(th),
324 mach_port_deallocate(current_mach_task,
325 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
326 mach_port_destroy(current_mach_task,
327 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
330 schedule_thread_post_mortem(th);
331 FSHOW((stderr,"/exiting thread %lu\n", thread_self()));
335 #endif /* LISP_FEATURE_SB_THREAD */
338 free_thread_struct(struct thread *th)
340 if (th->interrupt_data)
341 os_invalidate((os_vm_address_t) th->interrupt_data,
342 (sizeof (struct interrupt_data)));
343 os_invalidate((os_vm_address_t) th->os_address,
347 #ifdef LISP_FEATURE_SB_THREAD
348 /* FIXME: should be MAX_INTERRUPTS -1 ? */
349 const unsigned int tls_index_start =
350 MAX_INTERRUPTS + sizeof(struct thread)/sizeof(lispobj);
353 /* this is called from any other thread to create the new one, and
354 * initialize all parts of it that can be initialized from another
358 static struct thread *
359 create_thread_struct(lispobj initial_function) {
360 union per_thread_data *per_thread;
361 struct thread *th=0; /* subdue gcc */
363 void *aligned_spaces=0;
364 #ifdef LISP_FEATURE_SB_THREAD
368 /* May as well allocate all the spaces at once: it saves us from
369 * having to decide what to do if only some of the allocations
370 * succeed. SPACES must be appropriately aligned, since the GC
371 * expects the control stack to start at a page boundary -- and
372 * the OS may have even more rigorous requirements. We can't rely
373 * on the alignment passed from os_validate, since that might
374 * assume the current (e.g. 4k) pagesize, while we calculate with
375 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
376 spaces=os_validate(0, THREAD_STRUCT_SIZE);
379 /* Aligning up is safe as THREAD_STRUCT_SIZE has
380 * THREAD_ALIGNMENT_BYTES padding. */
381 aligned_spaces = (void *)((((unsigned long)(char *)spaces)
382 + THREAD_ALIGNMENT_BYTES-1)
383 &~(unsigned long)(THREAD_ALIGNMENT_BYTES-1));
384 per_thread=(union per_thread_data *)
386 thread_control_stack_size+
389 THREAD_STATE_LOCK_SIZE);
391 #ifdef LISP_FEATURE_SB_THREAD
392 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
393 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
394 if (all_threads == 0) {
395 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
396 SetSymbolValue(FREE_TLS_INDEX,tls_index_start << WORD_SHIFT,0);
397 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
399 #define STATIC_TLS_INIT(sym,field) \
400 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
401 (THREAD_SLOT_OFFSET_WORDS(field) << WORD_SHIFT)
403 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
404 #ifdef BINDING_STACK_POINTER
405 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
407 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
408 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
410 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
412 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
413 STATIC_TLS_INIT(PSEUDO_ATOMIC_BITS,pseudo_atomic_bits);
415 #undef STATIC_TLS_INIT
419 th=&per_thread->thread;
420 th->os_address = spaces;
421 th->control_stack_start = aligned_spaces;
422 th->binding_stack_start=
423 (lispobj*)((void*)th->control_stack_start+thread_control_stack_size);
424 th->control_stack_end = th->binding_stack_start;
425 th->control_stack_guard_page_protected = T;
426 th->alien_stack_start=
427 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
428 set_binding_stack_pointer(th,th->binding_stack_start);
431 #ifdef LISP_FEATURE_SB_THREAD
432 th->os_attr=malloc(sizeof(pthread_attr_t));
433 th->state_lock=(pthread_mutex_t *)((void *)th->alien_stack_start +
435 pthread_mutex_init(th->state_lock, NULL);
436 th->state_cond=(pthread_cond_t *)((void *)th->state_lock +
437 (sizeof(pthread_mutex_t)));
438 pthread_cond_init(th->state_cond, NULL);
440 th->state=STATE_RUNNING;
441 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
442 th->alien_stack_pointer=((void *)th->alien_stack_start
443 + ALIEN_STACK_SIZE-N_WORD_BYTES);
445 th->alien_stack_pointer=((void *)th->alien_stack_start);
447 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64) || defined(LISP_FEATURE_SB_THREAD)
448 th->pseudo_atomic_bits=0;
450 #ifdef LISP_FEATURE_GENCGC
451 gc_set_region_empty(&th->alloc_region);
453 #ifdef LISP_FEATURE_SB_THREAD
454 /* This parallels the same logic in globals.c for the
455 * single-threaded foreign_function_call_active, KLUDGE and
457 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
458 th->foreign_function_call_active = 0;
460 th->foreign_function_call_active = 1;
464 #ifndef LISP_FEATURE_SB_THREAD
465 /* the tls-points-into-struct-thread trick is only good for threaded
466 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
467 * appropriate values from struct thread here, and make sure that
468 * we use the appropriate SymbolValue macros to access any of the
469 * variable quantities from the C runtime. It's not quite OAOOM,
470 * it just feels like it */
471 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
472 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
473 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
474 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
475 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
476 SetSymbolValue(PSEUDO_ATOMIC_BITS,(lispobj)th->pseudo_atomic_bits,th);
479 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
480 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
481 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
482 bind_variable(INTERRUPT_PENDING, NIL,th);
483 bind_variable(INTERRUPTS_ENABLED,T,th);
484 bind_variable(ALLOW_WITH_INTERRUPTS,T,th);
485 bind_variable(GC_PENDING,NIL,th);
486 bind_variable(ALLOC_SIGNAL,NIL,th);
487 #ifdef PINNED_OBJECTS
488 bind_variable(PINNED_OBJECTS,NIL,th);
490 #ifdef LISP_FEATURE_SB_THREAD
491 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
493 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
494 access_control_stack_pointer(th)=th->control_stack_start;
497 th->interrupt_data = (struct interrupt_data *)
498 os_validate(0,(sizeof (struct interrupt_data)));
499 if (!th->interrupt_data) {
500 free_thread_struct(th);
503 th->interrupt_data->pending_handler = 0;
504 th->interrupt_data->gc_blocked_deferrables = 0;
505 #ifdef LISP_FEATURE_PPC
506 th->interrupt_data->allocation_trap_context = 0;
508 th->no_tls_value_marker=initial_function;
514 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
515 mach_port_t setup_mach_exception_handling_thread();
516 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
520 void create_initial_thread(lispobj initial_function) {
521 struct thread *th=create_thread_struct(initial_function);
522 #ifdef LISP_FEATURE_SB_THREAD
523 pthread_key_create(&lisp_thread, 0);
526 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
527 setup_mach_exception_handling_thread();
529 initial_thread_trampoline(th); /* no return */
530 } else lose("can't create initial thread\n");
533 #ifdef LISP_FEATURE_SB_THREAD
535 #ifndef __USE_XOPEN2K
536 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
540 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
542 /* The new thread inherits the restrictive signal mask set here,
543 * and enables signals again when it is set up properly. */
546 int retcode = 0, initcode;
548 FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
550 /* Blocking deferrable signals is enough, no need to block
551 * SIG_STOP_FOR_GC because the child process is not linked onto
552 * all_threads until it's ready. */
553 block_deferrable_signals(0, &oldset);
555 #ifdef LOCK_CREATE_THREAD
556 retcode = pthread_mutex_lock(&create_thread_lock);
557 gc_assert(retcode == 0);
558 FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
561 if((initcode = pthread_attr_init(th->os_attr)) ||
562 /* call_into_lisp_first_time switches the stack for the initial
563 * thread. For the others, we use this. */
564 (pthread_attr_setstack(th->os_attr,th->control_stack_start,
565 thread_control_stack_size)) ||
566 (retcode = pthread_create
567 (kid_tid,th->os_attr,(void *(*)(void *))new_thread_trampoline,th))) {
568 FSHOW_SIGNAL((stderr, "init = %d\n", initcode));
569 FSHOW_SIGNAL((stderr, "pthread_create returned %d, errno %d\n",
572 perror("create_os_thread");
577 #ifdef LOCK_CREATE_THREAD
578 retcode = pthread_mutex_unlock(&create_thread_lock);
579 gc_assert(retcode == 0);
580 FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
582 thread_sigmask(SIG_SETMASK,&oldset,0);
586 os_thread_t create_thread(lispobj initial_function) {
587 struct thread *th, *thread = arch_os_get_current_thread();
588 os_thread_t kid_tid = 0;
590 /* Must defend against async unwinds. */
591 if (SymbolValue(INTERRUPTS_ENABLED, thread) != NIL)
592 lose("create_thread is not safe when interrupts are enabled.\n");
594 /* Assuming that a fresh thread struct has no lisp objects in it,
595 * linking it to all_threads can be left to the thread itself
596 * without fear of gc lossage. initial_function violates this
597 * assumption and must stay pinned until the child starts up. */
598 th = create_thread_struct(initial_function);
599 if (th && !create_os_thread(th,&kid_tid)) {
600 free_thread_struct(th);
606 /* stopping the world is a two-stage process. From this thread we signal
607 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
608 * the usual pseudo-atomic checks (we don't want to stop a thread while
609 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
612 /* To avoid deadlocks when gc stops the world all clients of each
613 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
614 * holding the lock, but they must agree on which. */
615 void gc_stop_the_world()
617 struct thread *p,*th=arch_os_get_current_thread();
618 int status, lock_ret;
619 #ifdef LOCK_CREATE_THREAD
620 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
622 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock\n"));
623 lock_ret = pthread_mutex_lock(&create_thread_lock);
624 gc_assert(lock_ret == 0);
625 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock\n"));
627 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock\n"));
628 /* keep threads from starting while the world is stopped. */
629 lock_ret = pthread_mutex_lock(&all_threads_lock); \
630 gc_assert(lock_ret == 0);
632 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock\n"));
633 /* stop all other threads by sending them SIG_STOP_FOR_GC */
634 for(p=all_threads; p; p=p->next) {
635 gc_assert(p->os_thread != 0);
636 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: thread=%lu, state=%x\n",
637 p->os_thread, thread_state(p)));
638 if((p!=th) && ((thread_state(p)==STATE_RUNNING))) {
639 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending thread %lu\n",
641 /* We already hold all_thread_lock, P can become DEAD but
642 * cannot exit, ergo it's safe to use pthread_kill. */
643 status=pthread_kill(p->os_thread,SIG_STOP_FOR_GC);
645 /* This thread has exited. */
646 gc_assert(thread_state(p)==STATE_DEAD);
648 lose("cannot send suspend thread=%lu: %d, %s\n",
649 p->os_thread,status,strerror(status));
653 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
654 for(p=all_threads;p;p=p->next) {
658 "/gc_stop_the_world: waiting for thread=%lu: state=%x\n",
659 p->os_thread, thread_state(p)));
660 wait_for_thread_state_change(p, STATE_RUNNING);
661 if (p->state == STATE_RUNNING)
662 lose("/gc_stop_the_world: unexpected state");
665 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
668 void gc_start_the_world()
670 struct thread *p,*th=arch_os_get_current_thread();
672 /* if a resumed thread creates a new thread before we're done with
673 * this loop, the new thread will get consed on the front of
674 * all_threads, but it won't have been stopped so won't need
676 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
677 for(p=all_threads;p;p=p->next) {
678 gc_assert(p->os_thread!=0);
680 lispobj state = thread_state(p);
681 if (state != STATE_DEAD) {
682 if(state != STATE_SUSPENDED) {
683 lose("gc_start_the_world: wrong thread state is %d\n",
684 fixnum_value(state));
686 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
688 set_thread_state(p, STATE_RUNNING);
693 lock_ret = pthread_mutex_unlock(&all_threads_lock);
694 gc_assert(lock_ret == 0);
695 #ifdef LOCK_CREATE_THREAD
696 lock_ret = pthread_mutex_unlock(&create_thread_lock);
697 gc_assert(lock_ret == 0);
700 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
707 #ifdef LISP_FEATURE_SB_THREAD
708 return sched_yield();
714 /* If the thread id given does not belong to a running thread (it has
715 * exited or never even existed) pthread_kill _may_ fail with ESRCH,
716 * but it is also allowed to just segfault, see
717 * <http://udrepper.livejournal.com/16844.html>.
719 * Relying on thread ids can easily backfire since ids are recycled
720 * (NPTL recycles them extremely fast) so a signal can be sent to
721 * another process if the one it was sent to exited.
723 * We send signals in two places: signal_interrupt_thread sends a
724 * signal that's harmless if delivered to another thread, but
725 * SIG_STOP_FOR_GC is fatal.
727 * For these reasons, we must make sure that the thread is still alive
728 * when the pthread_kill is called and return if the thread is
731 kill_safely(os_thread_t os_thread, int signal)
733 FSHOW_SIGNAL((stderr,"/kill_safely: %lu, %d\n", os_thread, signal));
735 #ifdef LISP_FEATURE_SB_THREAD
737 struct thread *thread;
738 /* pthread_kill is not async signal safe and we don't want to be
739 * interrupted while holding the lock. */
740 block_deferrable_signals(0, &oldset);
741 pthread_mutex_lock(&all_threads_lock);
742 for (thread = all_threads; thread; thread = thread->next) {
743 if (thread->os_thread == os_thread) {
744 int status = pthread_kill(os_thread, signal);
746 lose("kill_safely: pthread_kill failed with %d\n", status);
750 pthread_mutex_unlock(&all_threads_lock);
751 thread_sigmask(SIG_SETMASK,&oldset,0);
759 lose("kill_safely: who do you want to kill? %d?\n", os_thread);
760 /* Dubious (as in don't know why it works) workaround for the
761 * signal sometimes not being generated on darwin. */
762 #ifdef LISP_FEATURE_DARWIN
765 sigprocmask(SIG_BLOCK, &deferrable_sigset, &oldset);
766 status = raise(signal);
767 sigprocmask(SIG_SETMASK,&oldset,0);
770 status = raise(signal);
775 lose("cannot raise signal %d, %d %s\n",
776 signal, status, strerror(errno));