2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
28 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
29 #include <mach/mach.h>
30 #include <mach/mach_error.h>
31 #include <mach/mach_types.h>
35 #include "validate.h" /* for BINDING_STACK_SIZE etc */
39 #include "target-arch-os.h"
43 #include "genesis/cons.h"
44 #include "genesis/fdefn.h"
45 #include "interr.h" /* for lose() */
46 #include "gc-internal.h"
48 #ifdef LISP_FEATURE_WIN32
50 * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
51 * so define it arbitrarily
56 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_SB_THREAD)
57 #define DELAY_THREAD_POST_MORTEM 5
58 #define LOCK_CREATE_THREAD
61 #ifdef LISP_FEATURE_FREEBSD
62 #define CREATE_CLEANUP_THREAD
63 #define LOCK_CREATE_THREAD
66 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
68 struct thread_post_mortem {
69 #ifdef DELAY_THREAD_POST_MORTEM
70 struct thread_post_mortem *next;
72 os_thread_t os_thread;
73 pthread_attr_t *os_attr;
74 os_vm_address_t os_address;
78 #ifdef DELAY_THREAD_POST_MORTEM
79 static int pending_thread_post_mortem_count = 0;
80 pthread_mutex_t thread_post_mortem_lock = PTHREAD_MUTEX_INITIALIZER;
82 static struct thread_post_mortem * volatile pending_thread_post_mortem = 0;
84 int dynamic_values_bytes=TLS_SIZE*sizeof(lispobj); /* same for all threads */
85 struct thread * volatile all_threads;
86 extern struct interrupt_data * global_interrupt_data;
88 #ifdef LISP_FEATURE_SB_THREAD
89 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
90 #ifdef LOCK_CREATE_THREAD
91 static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
93 #ifdef LISP_FEATURE_GCC_TLS
94 __thread struct thread *current_thread;
98 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
99 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
103 link_thread(struct thread *th)
105 if (all_threads) all_threads->prev=th;
106 th->next=all_threads;
111 #ifdef LISP_FEATURE_SB_THREAD
113 unlink_thread(struct thread *th)
116 th->prev->next = th->next;
118 all_threads = th->next;
120 th->next->prev = th->prev;
125 initial_thread_trampoline(struct thread *th)
128 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
129 lispobj *args = NULL;
131 function = th->no_tls_value_marker;
132 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
133 if(arch_os_thread_init(th)==0) return 1;
135 th->os_thread=thread_self();
136 #ifndef LISP_FEATURE_WIN32
137 protect_control_stack_guard_page(1);
140 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
141 return call_into_lisp_first_time(function,args,0);
143 return funcall0(function);
147 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
148 ALIEN_STACK_SIZE + dynamic_values_bytes + \
150 THREAD_ALIGNMENT_BYTES)
152 #ifdef LISP_FEATURE_SB_THREAD
153 /* THREAD POST MORTEM CLEANUP
155 * Memory allocated for the thread stacks cannot be reclaimed while
156 * the thread is still alive, so we need a mechanism for post mortem
157 * cleanups. FIXME: We actually have three, for historical reasons as
158 * the saying goes. Do we really need three? Nikodemus guesses that
159 * not anymore, now that we properly call pthread_attr_destroy before
160 * freeing the stack. */
162 static struct thread_post_mortem *
163 plan_thread_post_mortem(struct thread *corpse)
166 struct thread_post_mortem *post_mortem = malloc(sizeof(struct thread_post_mortem));
167 gc_assert(post_mortem);
168 post_mortem->os_thread = corpse->os_thread;
169 post_mortem->os_attr = corpse->os_attr;
170 post_mortem->os_address = corpse->os_address;
171 #ifdef DELAY_THREAD_POST_MORTEM
172 post_mortem->next = NULL;
176 /* FIXME: When does this happen? */
182 perform_thread_post_mortem(struct thread_post_mortem *post_mortem)
184 #ifdef CREATE_POST_MORTEM_THREAD
185 pthread_detach(pthread_self());
188 gc_assert(!pthread_join(post_mortem->os_thread, NULL));
189 gc_assert(!pthread_attr_destroy(post_mortem->os_attr));
190 free(post_mortem->os_attr);
191 os_invalidate(post_mortem->os_address, THREAD_STRUCT_SIZE);
197 schedule_thread_post_mortem(struct thread *corpse)
199 struct thread_post_mortem *post_mortem = NULL;
201 post_mortem = plan_thread_post_mortem(corpse);
203 #ifdef DELAY_THREAD_POST_MORTEM
204 pthread_mutex_lock(&thread_post_mortem_lock);
205 /* First stick the new post mortem to the end of the queue. */
206 if (pending_thread_post_mortem) {
207 struct thread_post_mortem *next = pending_thread_post_mortem;
211 next->next = post_mortem;
213 pending_thread_post_mortem = post_mortem;
215 /* Then, if there are enough things in the queue, clean up one
216 * from the head -- or increment the count, and null out the
217 * post_mortem we have. */
218 if (pending_thread_post_mortem_count > DELAY_THREAD_POST_MORTEM) {
219 post_mortem = pending_thread_post_mortem;
220 pending_thread_post_mortem = post_mortem->next;
222 pending_thread_post_mortem_count++;
225 pthread_mutex_unlock(&thread_post_mortem_lock);
226 /* Finally run, the cleanup, if any. */
227 perform_thread_post_mortem(post_mortem);
228 #elif defined(CREATE_POST_MORTEM_THREAD)
229 gc_assert(!pthread_create(&thread, NULL, perform_thread_post_mortem, post_mortem));
231 post_mortem = (struct thread_post_mortem *)
232 swap_lispobjs((lispobj *)(void *)&pending_thread_post_mortem,
233 (lispobj)post_mortem);
234 perform_thread_post_mortem(post_mortem);
239 /* this is the first thing that runs in the child (which is why the
240 * silly calling convention). Basically it calls the user's requested
241 * lisp function after doing arch_os_thread_init and whatever other
242 * bookkeeping needs to be done
245 new_thread_trampoline(struct thread *th)
248 int result, lock_ret;
250 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
251 function = th->no_tls_value_marker;
252 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
253 if(arch_os_thread_init(th)==0) {
254 /* FIXME: handle error */
255 lose("arch_os_thread_init failed\n");
258 th->os_thread=thread_self();
259 protect_control_stack_guard_page(1);
260 /* Since GC can only know about this thread from the all_threads
261 * list and we're just adding this thread to it there is no danger
262 * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
264 lock_ret = pthread_mutex_lock(&all_threads_lock);
265 gc_assert(lock_ret == 0);
267 lock_ret = pthread_mutex_unlock(&all_threads_lock);
268 gc_assert(lock_ret == 0);
270 result = funcall0(function);
273 block_blockable_signals();
274 th->state=STATE_DEAD;
276 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
277 * thread, but since we are already dead it won't wait long. */
278 lock_ret = pthread_mutex_lock(&all_threads_lock);
279 gc_assert(lock_ret == 0);
281 gc_alloc_update_page_tables(0, &th->alloc_region);
283 pthread_mutex_unlock(&all_threads_lock);
284 gc_assert(lock_ret == 0);
286 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
287 os_invalidate((os_vm_address_t)th->interrupt_data,
288 (sizeof (struct interrupt_data)));
290 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
291 FSHOW((stderr, "Deallocating mach port %x\n", THREAD_STRUCT_TO_EXCEPTION_PORT(th)));
292 mach_port_move_member(mach_task_self(),
293 THREAD_STRUCT_TO_EXCEPTION_PORT(th),
295 mach_port_deallocate(mach_task_self(),
296 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
297 mach_port_destroy(mach_task_self(),
298 THREAD_STRUCT_TO_EXCEPTION_PORT(th));
301 schedule_thread_post_mortem(th);
302 FSHOW((stderr,"/exiting thread %p\n", thread_self()));
306 #endif /* LISP_FEATURE_SB_THREAD */
309 free_thread_struct(struct thread *th)
311 if (th->interrupt_data)
312 os_invalidate((os_vm_address_t) th->interrupt_data,
313 (sizeof (struct interrupt_data)));
314 os_invalidate((os_vm_address_t) th->os_address,
318 /* this is called from any other thread to create the new one, and
319 * initialize all parts of it that can be initialized from another
323 static struct thread *
324 create_thread_struct(lispobj initial_function) {
325 union per_thread_data *per_thread;
326 struct thread *th=0; /* subdue gcc */
328 void *aligned_spaces=0;
329 #ifdef LISP_FEATURE_SB_THREAD
333 /* May as well allocate all the spaces at once: it saves us from
334 * having to decide what to do if only some of the allocations
335 * succeed. SPACES must be appropriately aligned, since the GC
336 * expects the control stack to start at a page boundary -- and
337 * the OS may have even more rigorous requirements. We can't rely
338 * on the alignment passed from os_validate, since that might
339 * assume the current (e.g. 4k) pagesize, while we calculate with
340 * the biggest (e.g. 64k) pagesize allowed by the ABI. */
341 spaces=os_validate(0, THREAD_STRUCT_SIZE);
344 /* Aligning up is safe as THREAD_STRUCT_SIZE has
345 * THREAD_ALIGNMENT_BYTES padding. */
346 aligned_spaces = (void *)((((unsigned long)(char *)spaces)
347 + THREAD_ALIGNMENT_BYTES-1)
348 &~(unsigned long)(THREAD_ALIGNMENT_BYTES-1));
349 per_thread=(union per_thread_data *)
351 thread_control_stack_size+
355 #ifdef LISP_FEATURE_SB_THREAD
356 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
357 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
358 if (all_threads == 0) {
359 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
362 /* FIXME: should be MAX_INTERRUPTS -1 ? */
363 make_fixnum(MAX_INTERRUPTS+
364 sizeof(struct thread)/sizeof(lispobj)),
366 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
368 #define STATIC_TLS_INIT(sym,field) \
369 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
370 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
372 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
373 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
374 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
375 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
376 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
377 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
378 STATIC_TLS_INIT(PSEUDO_ATOMIC_BITS,pseudo_atomic_bits);
380 #undef STATIC_TLS_INIT
384 th=&per_thread->thread;
385 th->os_address = spaces;
386 th->control_stack_start = aligned_spaces;
387 th->binding_stack_start=
388 (lispobj*)((void*)th->control_stack_start+thread_control_stack_size);
389 th->control_stack_end = th->binding_stack_start;
390 th->alien_stack_start=
391 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
392 th->binding_stack_pointer=th->binding_stack_start;
395 th->os_attr=malloc(sizeof(pthread_attr_t));
396 th->state=STATE_RUNNING;
397 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
398 th->alien_stack_pointer=((void *)th->alien_stack_start
399 + ALIEN_STACK_SIZE-N_WORD_BYTES);
401 th->alien_stack_pointer=((void *)th->alien_stack_start);
403 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
404 th->pseudo_atomic_bits=0;
406 #ifdef LISP_FEATURE_GENCGC
407 gc_set_region_empty(&th->alloc_region);
410 #ifndef LISP_FEATURE_SB_THREAD
411 /* the tls-points-into-struct-thread trick is only good for threaded
412 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
413 * appropriate values from struct thread here, and make sure that
414 * we use the appropriate SymbolValue macros to access any of the
415 * variable quantities from the C runtime. It's not quite OAOOM,
416 * it just feels like it */
417 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
418 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
419 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
420 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
421 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
422 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
423 SetSymbolValue(PSEUDO_ATOMIC_BITS,(lispobj)th->pseudo_atomic_bits,th);
425 current_binding_stack_pointer=th->binding_stack_pointer;
426 current_control_stack_pointer=th->control_stack_start;
429 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
430 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
431 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
432 bind_variable(INTERRUPT_PENDING, NIL,th);
433 bind_variable(INTERRUPTS_ENABLED,T,th);
434 bind_variable(ALLOW_WITH_INTERRUPTS,T,th);
435 bind_variable(GC_PENDING,NIL,th);
436 #ifdef LISP_FEATURE_SB_THREAD
437 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
440 th->interrupt_data = (struct interrupt_data *)
441 os_validate(0,(sizeof (struct interrupt_data)));
442 if (!th->interrupt_data) {
443 free_thread_struct(th);
446 th->interrupt_data->pending_handler = 0;
447 th->no_tls_value_marker=initial_function;
453 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
454 mach_port_t setup_mach_exception_handling_thread();
455 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
459 void create_initial_thread(lispobj initial_function) {
460 struct thread *th=create_thread_struct(initial_function);
462 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
463 setup_mach_exception_handling_thread();
465 initial_thread_trampoline(th); /* no return */
466 } else lose("can't create initial thread\n");
469 #ifdef LISP_FEATURE_SB_THREAD
471 #ifndef __USE_XOPEN2K
472 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
476 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
478 /* The new thread inherits the restrictive signal mask set here,
479 * and enables signals again when it is set up properly. */
480 sigset_t newset,oldset;
482 int retcode = 0, initcode;
484 FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
486 #ifdef LOCK_CREATE_THREAD
487 retcode = pthread_mutex_lock(&create_thread_lock);
488 gc_assert(retcode == 0);
489 FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
491 sigemptyset(&newset);
492 /* Blocking deferrable signals is enough, no need to block
493 * SIG_STOP_FOR_GC because the child process is not linked onto
494 * all_threads until it's ready. */
495 sigaddset_deferrable(&newset);
496 thread_sigmask(SIG_BLOCK, &newset, &oldset);
498 if((initcode = pthread_attr_init(th->os_attr)) ||
499 /* call_into_lisp_first_time switches the stack for the initial thread. For the
500 * others, we use this. */
501 (pthread_attr_setstack(th->os_attr,th->control_stack_start,thread_control_stack_size)) ||
502 (retcode = pthread_create
503 (kid_tid,th->os_attr,(void *(*)(void *))new_thread_trampoline,th))) {
504 FSHOW_SIGNAL((stderr, "init = %d\n", initcode));
505 FSHOW_SIGNAL((stderr, printf("pthread_create returned %d, errno %d\n", retcode, errno)));
506 FSHOW_SIGNAL((stderr, "wanted stack size %d, min stack size %d\n",
507 cstack_size, PTHREAD_STACK_MIN));
509 perror("create_os_thread");
514 thread_sigmask(SIG_SETMASK,&oldset,0);
515 #ifdef LOCK_CREATE_THREAD
516 retcode = pthread_mutex_unlock(&create_thread_lock);
517 gc_assert(retcode == 0);
518 FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
523 os_thread_t create_thread(lispobj initial_function) {
527 /* Assuming that a fresh thread struct has no lisp objects in it,
528 * linking it to all_threads can be left to the thread itself
529 * without fear of gc lossage. initial_function violates this
530 * assumption and must stay pinned until the child starts up. */
531 th = create_thread_struct(initial_function);
534 if (create_os_thread(th,&kid_tid)) {
537 free_thread_struct(th);
542 /* Send the signo to os_thread, retry if the rt signal queue is
545 kill_thread_safely(os_thread_t os_thread, int signo)
548 /* The man page does not mention EAGAIN as a valid return value
549 * for either pthread_kill or kill. But that's theory, this is
550 * practice. By waiting here we assume that the delivery of this
551 * signal is not necessary for the delivery of the signals in the
552 * queue. In other words, we _assume_ there are no deadlocks. */
553 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
554 /* wait a bit then try again in the hope of the rt signal
555 * queue not being full */
556 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
557 /* FIXME: some kind of backoff (random, exponential) would be
564 int signal_interrupt_thread(os_thread_t os_thread)
566 int status = kill_thread_safely(os_thread, SIG_INTERRUPT_THREAD);
569 } else if (status == ESRCH) {
572 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s\n",
573 os_thread, status, strerror(status));
577 /* stopping the world is a two-stage process. From this thread we signal
578 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
579 * the usual pseudo-atomic checks (we don't want to stop a thread while
580 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
583 /* To avoid deadlocks when gc stops the world all clients of each
584 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
585 * holding the lock, but they must agree on which. */
586 void gc_stop_the_world()
588 struct thread *p,*th=arch_os_get_current_thread();
589 int status, lock_ret;
590 #ifdef LOCK_CREATE_THREAD
591 /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
593 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock, thread=%lu\n",
595 lock_ret = pthread_mutex_lock(&create_thread_lock);
596 gc_assert(lock_ret == 0);
597 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock, thread=%lu\n",
600 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
602 /* keep threads from starting while the world is stopped. */
603 lock_ret = pthread_mutex_lock(&all_threads_lock); \
604 gc_assert(lock_ret == 0);
606 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
608 /* stop all other threads by sending them SIG_STOP_FOR_GC */
609 for(p=all_threads; p; p=p->next) {
610 gc_assert(p->os_thread != 0);
611 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: p->state: %x\n", p->state));
612 if((p!=th) && ((p->state==STATE_RUNNING))) {
613 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %x, os_thread %x\n",
615 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
617 /* This thread has exited. */
618 gc_assert(p->state==STATE_DEAD);
620 lose("cannot send suspend thread=%lu: %d, %s\n",
621 p->os_thread,status,strerror(status));
625 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
626 /* wait for the running threads to stop or finish */
627 for(p=all_threads;p;) {
628 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: th: %p, p: %p\n", th, p));
629 if((p!=th) && (p->state==STATE_RUNNING)) {
635 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
638 void gc_start_the_world()
640 struct thread *p,*th=arch_os_get_current_thread();
641 int status, lock_ret;
642 /* if a resumed thread creates a new thread before we're done with
643 * this loop, the new thread will get consed on the front of
644 * all_threads, but it won't have been stopped so won't need
646 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
647 for(p=all_threads;p;p=p->next) {
648 gc_assert(p->os_thread!=0);
649 if((p!=th) && (p->state!=STATE_DEAD)) {
650 if(p->state!=STATE_SUSPENDED) {
651 lose("gc_start_the_world: wrong thread state is %d\n",
652 fixnum_value(p->state));
654 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
656 p->state=STATE_RUNNING;
658 #if defined(SIG_RESUME_FROM_GC)
659 status=kill_thread_safely(p->os_thread,SIG_RESUME_FROM_GC);
661 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
664 lose("cannot resume thread=%lu: %d, %s\n",
665 p->os_thread,status,strerror(status));
669 /* If we waited here until all threads leave STATE_SUSPENDED, then
670 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
671 * performance implications, but does away with the 'rt signal
672 * queue full' problem. */
674 lock_ret = pthread_mutex_unlock(&all_threads_lock);
675 gc_assert(lock_ret == 0);
676 #ifdef LOCK_CREATE_THREAD
677 lock_ret = pthread_mutex_unlock(&create_thread_lock);
678 gc_assert(lock_ret == 0);
681 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
688 #ifdef LISP_FEATURE_SB_THREAD
689 return sched_yield();