2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
28 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
29 #include <mach/mach.h>
30 #include <mach/mach_error.h>
31 #include <mach/mach_types.h>
35 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
39 #include "target-arch-os.h"
43 #include "genesis/cons.h"
44 #include "genesis/fdefn.h"
45 #include "interr.h" /* for lose() */
46 #include "gc-internal.h"
48 #ifdef LISP_FEATURE_WIN32
50 * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
51 * so define it arbitrarily
56 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_SB_THREAD)
57 #define QUEUE_FREEABLE_THREAD_STACKS
60 #ifdef LISP_FEATURE_FREEBSD
61 #define CREATE_CLEANUP_THREAD
62 #define LOCK_CREATE_THREAD
65 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
67 struct freeable_stack {
68 #ifdef QUEUE_FREEABLE_THREAD_STACKS
69 struct freeable_stack *next;
71 os_thread_t os_thread;
72 os_vm_address_t stack;
76 #ifdef QUEUE_FREEABLE_THREAD_STACKS
77 static struct freeable_stack * volatile freeable_stack_queue = 0;
78 static int freeable_stack_count = 0;
79 pthread_mutex_t freeable_stack_lock = PTHREAD_MUTEX_INITIALIZER;
81 static struct freeable_stack * volatile freeable_stack = 0;
84 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
85 struct thread * volatile all_threads;
86 extern struct interrupt_data * global_interrupt_data;
88 #ifdef LISP_FEATURE_SB_THREAD
89 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
90 #ifdef LOCK_CREATE_THREAD
91 static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
95 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
96 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
100 link_thread(struct thread *th)
102 if (all_threads) all_threads->prev=th;
103 th->next=all_threads;
108 #ifdef LISP_FEATURE_SB_THREAD
110 unlink_thread(struct thread *th)
113 th->prev->next = th->next;
115 all_threads = th->next;
117 th->next->prev = th->prev;
122 initial_thread_trampoline(struct thread *th)
125 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
126 lispobj *args = NULL;
128 function = th->no_tls_value_marker;
129 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
130 if(arch_os_thread_init(th)==0) return 1;
132 th->os_thread=thread_self();
133 #ifndef LISP_FEATURE_WIN32
134 protect_control_stack_guard_page(1);
137 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
138 return call_into_lisp_first_time(function,args,0);
140 return funcall0(function);
144 #define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
145 ALIEN_STACK_SIZE + dynamic_values_bytes + \
148 #ifdef LISP_FEATURE_SB_THREAD
150 #ifdef QUEUE_FREEABLE_THREAD_STACKS
153 queue_freeable_thread_stack(struct thread *thread_to_be_cleaned_up)
155 if (thread_to_be_cleaned_up) {
156 pthread_mutex_lock(&freeable_stack_lock);
157 if (freeable_stack_queue) {
158 struct freeable_stack *new_freeable_stack = 0, *next;
159 next = freeable_stack_queue;
163 new_freeable_stack = (struct freeable_stack *)
164 os_validate(0, sizeof(struct freeable_stack));
165 new_freeable_stack->next = NULL;
166 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
167 new_freeable_stack->stack = (os_vm_address_t)
168 thread_to_be_cleaned_up->control_stack_start;
169 next->next = new_freeable_stack;
170 freeable_stack_count++;
172 struct freeable_stack *new_freeable_stack = 0;
173 new_freeable_stack = (struct freeable_stack *)
174 os_validate(0, sizeof(struct freeable_stack));
175 new_freeable_stack->next = NULL;
176 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
177 new_freeable_stack->stack = (os_vm_address_t)
178 thread_to_be_cleaned_up->control_stack_start;
179 freeable_stack_queue = new_freeable_stack;
180 freeable_stack_count++;
182 pthread_mutex_unlock(&freeable_stack_lock);
186 #define FREEABLE_STACK_QUEUE_SIZE 4
189 free_freeable_stacks() {
190 if (freeable_stack_queue && (freeable_stack_count > FREEABLE_STACK_QUEUE_SIZE)) {
191 struct freeable_stack* old;
192 pthread_mutex_lock(&freeable_stack_lock);
193 old = freeable_stack_queue;
194 freeable_stack_queue = old->next;
195 freeable_stack_count--;
196 gc_assert(pthread_join(old->os_thread, NULL) == 0);
197 FSHOW((stderr, "freeing thread %x stack\n", old->os_thread));
198 os_invalidate(old->stack, THREAD_STRUCT_SIZE);
199 os_invalidate((os_vm_address_t)old, sizeof(struct freeable_stack));
200 pthread_mutex_unlock(&freeable_stack_lock);
204 #elif defined(CREATE_CLEANUP_THREAD)
206 cleanup_thread(void *arg)
208 struct freeable_stack *freeable = arg;
209 pthread_t self = pthread_self();
211 FSHOW((stderr, "/cleaner thread(%p): joining %p\n",
212 self, freeable->os_thread));
213 gc_assert(pthread_join(freeable->os_thread, NULL) == 0);
214 FSHOW((stderr, "/cleaner thread(%p): free stack %p\n",
215 self, freeable->stack));
216 os_invalidate(freeable->stack, THREAD_STRUCT_SIZE);
219 pthread_detach(self);
225 create_cleanup_thread(struct thread *thread_to_be_cleaned_up)
230 if (thread_to_be_cleaned_up) {
231 struct freeable_stack *freeable =
232 malloc(sizeof(struct freeable_stack));
233 gc_assert(freeable != NULL);
234 freeable->os_thread = thread_to_be_cleaned_up->os_thread;
236 (os_vm_address_t) thread_to_be_cleaned_up->control_stack_start;
237 result = pthread_create(&thread, NULL, cleanup_thread, freeable);
238 gc_assert(result == 0);
245 free_thread_stack_later(struct thread *thread_to_be_cleaned_up)
247 struct freeable_stack *new_freeable_stack = 0;
248 if (thread_to_be_cleaned_up) {
249 new_freeable_stack = (struct freeable_stack *)
250 os_validate(0, sizeof(struct freeable_stack));
251 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
252 new_freeable_stack->stack = (os_vm_address_t)
253 thread_to_be_cleaned_up->control_stack_start;
255 new_freeable_stack = (struct freeable_stack *)
256 swap_lispobjs((lispobj *)(void *)&freeable_stack,
257 (lispobj)new_freeable_stack);
258 if (new_freeable_stack) {
259 FSHOW((stderr,"/reaping %p\n", (void*) new_freeable_stack->os_thread));
260 /* Under NPTL pthread_join really waits until the thread
261 * exists and the stack can be safely freed. This is sadly not
262 * mandated by the pthread spec. */
263 gc_assert(pthread_join(new_freeable_stack->os_thread, NULL) == 0);
264 os_invalidate(new_freeable_stack->stack, THREAD_STRUCT_SIZE);
265 os_invalidate((os_vm_address_t) new_freeable_stack,
266 sizeof(struct freeable_stack));
271 /* this is the first thing that runs in the child (which is why the
272 * silly calling convention). Basically it calls the user's requested
273 * lisp function after doing arch_os_thread_init and whatever other
274 * bookkeeping needs to be done
277 new_thread_trampoline(struct thread *th)
280 int result, lock_ret;
282 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
283 function = th->no_tls_value_marker;
284 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
285 if(arch_os_thread_init(th)==0) {
286 /* FIXME: handle error */
287 lose("arch_os_thread_init failed\n");
290 th->os_thread=thread_self();
291 protect_control_stack_guard_page(1);
292 /* Since GC can only know about this thread from the all_threads
293 * list and we're just adding this thread to it there is no danger
294 * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
296 lock_ret = pthread_mutex_lock(&all_threads_lock);
297 gc_assert(lock_ret == 0);
299 lock_ret = pthread_mutex_unlock(&all_threads_lock);
300 gc_assert(lock_ret == 0);
302 result = funcall0(function);
305 block_blockable_signals();
306 th->state=STATE_DEAD;
308 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
309 * thread, but since we are already dead it won't wait long. */
310 lock_ret = pthread_mutex_lock(&all_threads_lock);
311 gc_assert(lock_ret == 0);
313 gc_alloc_update_page_tables(0, &th->alloc_region);
315 pthread_mutex_unlock(&all_threads_lock);
316 gc_assert(lock_ret == 0);
318 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
319 os_invalidate((os_vm_address_t)th->interrupt_data,
320 (sizeof (struct interrupt_data)));
322 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
323 FSHOW((stderr, "Deallocating mach port %x\n", THREAD_STRUCT_TO_EXCEPTION_PORT(th)));
324 mach_port_move_member(mach_task_self(),
325 THREAD_STRUCT_TO_EXCEPTION_PORT(th),
327 mach_port_deallocate(mach_task_self(),
328 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
329 mach_port_destroy(mach_task_self(),
330 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
333 #ifdef QUEUE_FREEABLE_THREAD_STACKS
334 queue_freeable_thread_stack(th);
335 #elif defined(CREATE_CLEANUP_THREAD)
336 create_cleanup_thread(th);
338 free_thread_stack_later(th);
341 FSHOW((stderr,"/exiting thread %p\n", thread_self()));
345 #endif /* LISP_FEATURE_SB_THREAD */
348 free_thread_struct(struct thread *th)
350 if (th->interrupt_data)
351 os_invalidate((os_vm_address_t) th->interrupt_data,
352 (sizeof (struct interrupt_data)));
353 os_invalidate((os_vm_address_t) th->control_stack_start,
357 /* this is called from any other thread to create the new one, and
358 * initialize all parts of it that can be initialized from another
362 static struct thread *
363 create_thread_struct(lispobj initial_function) {
364 union per_thread_data *per_thread;
365 struct thread *th=0; /* subdue gcc */
367 #ifdef LISP_FEATURE_SB_THREAD
371 #ifdef CREATE_CLEANUP_THREAD
372 /* Give a chance for cleanup threads to run. */
375 /* may as well allocate all the spaces at once: it saves us from
376 * having to decide what to do if only some of the allocations
378 spaces=os_validate(0, THREAD_STRUCT_SIZE);
381 per_thread=(union per_thread_data *)
383 THREAD_CONTROL_STACK_SIZE+
387 #ifdef LISP_FEATURE_SB_THREAD
388 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
389 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
390 if (all_threads == 0) {
391 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
394 /* FIXME: should be MAX_INTERRUPTS -1 ? */
395 make_fixnum(MAX_INTERRUPTS+
396 sizeof(struct thread)/sizeof(lispobj)),
398 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
400 #define STATIC_TLS_INIT(sym,field) \
401 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
402 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
404 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
405 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
406 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
407 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
408 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
409 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
410 STATIC_TLS_INIT(PSEUDO_ATOMIC_BITS,pseudo_atomic_bits);
412 #undef STATIC_TLS_INIT
416 th=&per_thread->thread;
417 th->control_stack_start = spaces;
418 th->binding_stack_start=
419 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
420 th->control_stack_end = th->binding_stack_start;
421 th->alien_stack_start=
422 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
423 th->binding_stack_pointer=th->binding_stack_start;
426 th->state=STATE_RUNNING;
427 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
428 th->alien_stack_pointer=((void *)th->alien_stack_start
429 + ALIEN_STACK_SIZE-N_WORD_BYTES);
431 th->alien_stack_pointer=((void *)th->alien_stack_start);
433 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
434 th->pseudo_atomic_bits=0;
436 #ifdef LISP_FEATURE_GENCGC
437 gc_set_region_empty(&th->alloc_region);
440 #ifndef LISP_FEATURE_SB_THREAD
441 /* the tls-points-into-struct-thread trick is only good for threaded
442 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
443 * appropriate values from struct thread here, and make sure that
444 * we use the appropriate SymbolValue macros to access any of the
445 * variable quantities from the C runtime. It's not quite OAOOM,
446 * it just feels like it */
447 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
448 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
449 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
450 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
451 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
452 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
453 SetSymbolValue(PSEUDO_ATOMIC_BITS,(lispobj)th->pseudo_atomic_bits,th);
455 current_binding_stack_pointer=th->binding_stack_pointer;
456 current_control_stack_pointer=th->control_stack_start;
459 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
460 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
461 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
462 bind_variable(INTERRUPT_PENDING, NIL,th);
463 bind_variable(INTERRUPTS_ENABLED,T,th);
464 bind_variable(GC_PENDING,NIL,th);
465 #ifdef LISP_FEATURE_SB_THREAD
466 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
469 th->interrupt_data = (struct interrupt_data *)
470 os_validate(0,(sizeof (struct interrupt_data)));
471 if (!th->interrupt_data) {
472 free_thread_struct(th);
475 th->interrupt_data->pending_handler = 0;
476 th->no_tls_value_marker=initial_function;
482 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
483 mach_port_t setup_mach_exception_handling_thread();
484 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
488 void create_initial_thread(lispobj initial_function) {
489 struct thread *th=create_thread_struct(initial_function);
491 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
494 setup_mach_exception_handling_thread();
496 initial_thread_trampoline(th); /* no return */
497 } else lose("can't create initial thread\n");
500 #ifdef LISP_FEATURE_SB_THREAD
502 #ifndef __USE_XOPEN2K
503 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
507 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
509 /* The new thread inherits the restrictive signal mask set here,
510 * and enables signals again when it is set up properly. */
512 sigset_t newset,oldset;
514 int retcode, initcode, sizecode, addrcode;
516 FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
518 #ifdef LOCK_CREATE_THREAD
519 retcode = pthread_mutex_lock(&create_thread_lock);
520 gc_assert(retcode == 0);
521 FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
523 sigemptyset(&newset);
524 /* Blocking deferrable signals is enough, no need to block
525 * SIG_STOP_FOR_GC because the child process is not linked onto
526 * all_threads until it's ready. */
527 sigaddset_deferrable(&newset);
528 thread_sigmask(SIG_BLOCK, &newset, &oldset);
530 #if defined(LISP_FEATURE_DARWIN)
531 #define CONTROL_STACK_ADJUST 8192 /* darwin wants page-aligned stacks */
533 #define CONTROL_STACK_ADJUST 16
536 if((initcode = pthread_attr_init(&attr)) ||
537 /* FIXME: why do we even have this in the first place? */
538 (pthread_attr_setstack(&attr,th->control_stack_start,
539 THREAD_CONTROL_STACK_SIZE-CONTROL_STACK_ADJUST)) ||
540 #undef CONTROL_STACK_ADJUST
541 (retcode = pthread_create
542 (kid_tid,&attr,(void *(*)(void *))new_thread_trampoline,th))) {
543 FSHOW_SIGNAL((stderr, "init, size, addr = %d, %d, %d\n", initcode, sizecode, addrcode));
544 FSHOW_SIGNAL((stderr, printf("pthread_create returned %d, errno %d\n", retcode, errno)));
545 FSHOW_SIGNAL((stderr, "wanted stack size %d, min stack size %d\n",
546 THREAD_CONTROL_STACK_SIZE-16, PTHREAD_STACK_MIN));
548 perror("create_os_thread");
553 #ifdef QUEUE_FREEABLE_THREAD_STACKS
554 free_freeable_stacks();
556 thread_sigmask(SIG_SETMASK,&oldset,0);
557 #ifdef LOCK_CREATE_THREAD
558 retcode = pthread_mutex_unlock(&create_thread_lock);
559 gc_assert(retcode == 0);
560 FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
565 os_thread_t create_thread(lispobj initial_function) {
569 /* Assuming that a fresh thread struct has no lisp objects in it,
570 * linking it to all_threads can be left to the thread itself
571 * without fear of gc lossage. initial_function violates this
572 * assumption and must stay pinned until the child starts up. */
573 th = create_thread_struct(initial_function);
576 if (create_os_thread(th,&kid_tid)) {
579 free_thread_struct(th);
584 /* Send the signo to os_thread, retry if the rt signal queue is
587 kill_thread_safely(os_thread_t os_thread, int signo)
590 /* The man page does not mention EAGAIN as a valid return value
591 * for either pthread_kill or kill. But that's theory, this is
592 * practice. By waiting here we assume that the delivery of this
593 * signal is not necessary for the delivery of the signals in the
594 * queue. In other words, we _assume_ there are no deadlocks. */
595 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
596 /* wait a bit then try again in the hope of the rt signal
597 * queue not being full */
598 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
599 /* FIXME: some kind of backoff (random, exponential) would be
606 int signal_interrupt_thread(os_thread_t os_thread)
608 int status = kill_thread_safely(os_thread, SIG_INTERRUPT_THREAD);
611 } else if (status == ESRCH) {
614 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s\n",
615 os_thread, status, strerror(status));
619 /* stopping the world is a two-stage process. From this thread we signal
620 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
621 * the usual pseudo-atomic checks (we don't want to stop a thread while
622 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
625 /* To avoid deadlocks when gc stops the world all clients of each
626 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
627 * holding the lock, but they must agree on which. */
628 void gc_stop_the_world()
630 struct thread *p,*th=arch_os_get_current_thread();
631 int status, lock_ret;
632 #ifdef LOCK_CREATE_THREAD
633 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
635 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock, thread=%lu\n",
637 lock_ret = pthread_mutex_lock(&create_thread_lock);
638 gc_assert(lock_ret == 0);
639 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock, thread=%lu\n",
642 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
644 /* keep threads from starting while the world is stopped. */
645 lock_ret = pthread_mutex_lock(&all_threads_lock); \
646 gc_assert(lock_ret == 0);
648 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
650 /* stop all other threads by sending them SIG_STOP_FOR_GC */
651 for(p=all_threads; p; p=p->next) {
652 gc_assert(p->os_thread != 0);
653 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: p->state: %x\n", p->state));
654 if((p!=th) && ((p->state==STATE_RUNNING))) {
655 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %x, os_thread %x\n",
657 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
659 /* This thread has exited. */
660 gc_assert(p->state==STATE_DEAD);
662 lose("cannot send suspend thread=%lu: %d, %s\n",
663 p->os_thread,status,strerror(status));
667 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
668 /* wait for the running threads to stop or finish */
669 for(p=all_threads;p;) {
670 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: th: %p, p: %p\n", th, p));
671 if((p!=th) && (p->state==STATE_RUNNING)) {
677 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
680 void gc_start_the_world()
682 struct thread *p,*th=arch_os_get_current_thread();
683 int status, lock_ret;
684 /* if a resumed thread creates a new thread before we're done with
685 * this loop, the new thread will get consed on the front of
686 * all_threads, but it won't have been stopped so won't need
688 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
689 for(p=all_threads;p;p=p->next) {
690 gc_assert(p->os_thread!=0);
691 if((p!=th) && (p->state!=STATE_DEAD)) {
692 if(p->state!=STATE_SUSPENDED) {
693 lose("gc_start_the_world: wrong thread state is %d\n",
694 fixnum_value(p->state));
696 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
698 p->state=STATE_RUNNING;
700 #if defined(SIG_RESUME_FROM_GC)
701 status=kill_thread_safely(p->os_thread,SIG_RESUME_FROM_GC);
703 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
706 lose("cannot resume thread=%lu: %d, %s\n",
707 p->os_thread,status,strerror(status));
711 /* If we waited here until all threads leave STATE_SUSPENDED, then
712 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
713 * performance implications, but does away with the 'rt signal
714 * queue full' problem. */
716 lock_ret = pthread_mutex_unlock(&all_threads_lock);
717 gc_assert(lock_ret == 0);
718 #ifdef LOCK_CREATE_THREAD
719 lock_ret = pthread_mutex_unlock(&create_thread_lock);
720 gc_assert(lock_ret == 0);
723 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));