13 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
17 #include "target-arch-os.h"
21 #include "genesis/cons.h"
22 #include "genesis/fdefn.h"
23 #include "interr.h" /* for lose() */
24 #include "gc-internal.h"
26 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
28 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
29 struct thread * volatile all_threads;
30 volatile lispobj all_threads_lock;
31 extern struct interrupt_data * global_interrupt_data;
32 extern int linux_no_threads_p;
34 #ifdef LISP_FEATURE_SB_THREAD
35 /* When trying to get all_threads_lock one should make sure that
36 * sig_stop_for_gc is not blocked. Else there would be a possible
37 * deadlock: gc locks it, other thread blocks signals, gc sends stop
38 * request to other thread and waits, other thread blocks on lock. */
39 void check_sig_stop_for_gc_can_arrive_or_lose()
41 /* Get the current sigmask, by blocking the empty set. */
42 sigset_t empty,current;
44 thread_sigmask(SIG_BLOCK, &empty, ¤t);
45 if (sigismember(¤t,SIG_STOP_FOR_GC))
46 lose("SIG_STOP_FOR_GC cannot arrive: it is blocked\n");
47 if (SymbolValue(GC_INHIBIT,arch_os_get_current_thread()) != NIL)
48 lose("SIG_STOP_FOR_GC cannot arrive: gc is inhibited\n");
49 if (arch_pseudo_atomic_atomic(NULL))
50 lose("SIG_STOP_FOR_GC cannot arrive: in pseudo atomic\n");
53 #define GET_ALL_THREADS_LOCK(name) \
55 sigset_t _newset,_oldset; \
56 sigemptyset(&_newset); \
57 sigaddset_deferrable(&_newset); \
58 thread_sigmask(SIG_BLOCK, &_newset, &_oldset); \
59 check_sig_stop_for_gc_can_arrive_or_lose(); \
60 FSHOW_SIGNAL((stderr,"/%s:waiting on lock=%ld, thread=%lu\n",name, \
61 all_threads_lock,arch_os_get_current_thread()->os_thread)); \
62 get_spinlock(&all_threads_lock,(long)arch_os_get_current_thread()); \
63 FSHOW_SIGNAL((stderr,"/%s:got lock, thread=%lu\n", \
64 name,arch_os_get_current_thread()->os_thread));
66 #define RELEASE_ALL_THREADS_LOCK(name) \
67 FSHOW_SIGNAL((stderr,"/%s:released lock\n",name)); \
68 release_spinlock(&all_threads_lock); \
69 thread_sigmask(SIG_SETMASK,&_oldset,0); \
74 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
75 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
79 initial_thread_trampoline(struct thread *th)
82 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
85 function = th->unbound_marker;
86 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
87 if(arch_os_thread_init(th)==0) return 1;
89 if(th->os_thread < 1) lose("th->os_thread not set up right");
90 th->state=STATE_RUNNING;
91 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
92 return call_into_lisp_first_time(function,args,0);
94 return funcall0(function);
98 #ifdef LISP_FEATURE_SB_THREAD
100 /* this is the first thing that runs in the child (which is why the
101 * silly calling convention). Basically it calls the user's requested
102 * lisp function after doing arch_os_thread_init and whatever other
103 * bookkeeping needs to be done
106 new_thread_trampoline(struct thread *th)
110 function = th->unbound_marker;
111 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
112 if(arch_os_thread_init(th)==0) {
113 /* FIXME: handle error */
114 lose("arch_os_thread_init failed\n");
117 /* wait here until our thread is linked into all_threads: see below */
119 volatile os_thread_t *tid=&th->os_thread;
120 while(*tid<1) sched_yield();
123 th->state=STATE_RUNNING;
124 result = funcall0(function);
125 th->state=STATE_DEAD;
128 #endif /* LISP_FEATURE_SB_THREAD */
130 /* this is called from any other thread to create the new one, and
131 * initialize all parts of it that can be initialized from another
135 struct thread * create_thread_struct(lispobj initial_function) {
136 union per_thread_data *per_thread;
137 struct thread *th=0; /* subdue gcc */
140 /* may as well allocate all the spaces at once: it saves us from
141 * having to decide what to do if only some of the allocations
143 spaces=os_validate(0,
144 THREAD_CONTROL_STACK_SIZE+
147 dynamic_values_bytes+
151 per_thread=(union per_thread_data *)
153 THREAD_CONTROL_STACK_SIZE+
158 memcpy(per_thread,arch_os_get_current_thread(),
159 dynamic_values_bytes);
161 #ifdef LISP_FEATURE_SB_THREAD
163 for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
164 per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
165 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
168 make_fixnum(MAX_INTERRUPTS+
169 sizeof(struct thread)/sizeof(lispobj)),
171 #define STATIC_TLS_INIT(sym,field) \
172 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
173 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
175 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
176 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
177 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
178 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
179 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
180 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
181 STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
182 STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
184 #undef STATIC_TLS_INIT
188 th=&per_thread->thread;
189 th->control_stack_start = spaces;
190 th->binding_stack_start=
191 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
192 th->control_stack_end = th->binding_stack_start;
193 th->alien_stack_start=
194 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
195 th->binding_stack_pointer=th->binding_stack_start;
198 th->interrupt_fun=NIL;
199 th->interrupt_fun_lock=0;
200 th->state=STATE_STARTING;
201 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
202 th->alien_stack_pointer=((void *)th->alien_stack_start
203 + ALIEN_STACK_SIZE-N_WORD_BYTES);
205 th->alien_stack_pointer=((void *)th->alien_stack_start);
207 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
208 th->pseudo_atomic_interrupted=0;
209 th->pseudo_atomic_atomic=0;
211 #ifdef LISP_FEATURE_GENCGC
212 gc_set_region_empty(&th->alloc_region);
215 #ifndef LISP_FEATURE_SB_THREAD
216 /* the tls-points-into-struct-thread trick is only good for threaded
217 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
218 * appropriate values from struct thread here, and make sure that
219 * we use the appropriate SymbolValue macros to access any of the
220 * variable quantities from the C runtime. It's not quite OAOOM,
221 * it just feels like it */
222 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
223 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
224 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
225 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
226 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
227 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
228 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC,(lispobj)th->pseudo_atomic_atomic,th);
229 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th->pseudo_atomic_interrupted,th);
231 current_binding_stack_pointer=th->binding_stack_pointer;
232 current_control_stack_pointer=th->control_stack_start;
235 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
236 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
237 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
238 bind_variable(INTERRUPT_PENDING, NIL,th);
239 bind_variable(INTERRUPTS_ENABLED,T,th);
240 bind_variable(GC_PENDING,NIL,th);
241 #ifdef LISP_FEATURE_SB_THREAD
242 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
245 th->interrupt_data = (struct interrupt_data *)
246 os_validate(0,(sizeof (struct interrupt_data)));
248 memcpy(th->interrupt_data,
249 arch_os_get_current_thread()->interrupt_data,
250 sizeof (struct interrupt_data));
252 memcpy(th->interrupt_data,global_interrupt_data,
253 sizeof (struct interrupt_data));
255 th->unbound_marker=initial_function;
260 link_thread(struct thread *th,os_thread_t kid_tid)
262 if (all_threads) all_threads->prev=th;
263 th->next=all_threads;
266 /* note that th->os_thread is 0 at this time. We rely on
267 * all_threads_lock to ensure that we don't have >1 thread with
268 * os_thread=0 on the list at once
270 protect_control_stack_guard_page(th,1);
271 /* child will not start until this is set */
272 th->os_thread=kid_tid;
273 FSHOW((stderr,"/created thread %lu\n",kid_tid));
276 void create_initial_thread(lispobj initial_function) {
277 struct thread *th=create_thread_struct(initial_function);
278 os_thread_t kid_tid=thread_self();
279 if(th && kid_tid>0) {
280 link_thread(th,kid_tid);
281 initial_thread_trampoline(all_threads); /* no return */
282 } else lose("can't create initial thread");
285 #ifdef LISP_FEATURE_SB_THREAD
287 #ifndef __USE_XOPEN2K
288 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
292 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
294 /* The new thread inherits the restrictive signal mask set here,
295 * and enables signals again when it is set up properly. */
297 sigset_t newset,oldset;
299 sigemptyset(&newset);
300 /* Blocking deferrable signals is enough, since gc_stop_the_world
301 * waits until the child leaves STATE_STARTING. And why not let gc
302 * proceed as soon as possible? */
303 sigaddset_deferrable(&newset);
304 thread_sigmask(SIG_BLOCK, &newset, &oldset);
306 if((pthread_attr_init(&attr)) ||
307 (pthread_attr_setstack(&attr,th->control_stack_start,
308 THREAD_CONTROL_STACK_SIZE-16)) ||
310 (kid_tid,&attr,(void *(*)(void *))new_thread_trampoline,th)))
312 thread_sigmask(SIG_SETMASK,&oldset,0);
316 struct thread *create_thread(lispobj initial_function) {
318 os_thread_t kid_tid=0;
321 if(linux_no_threads_p) return 0;
323 th=create_thread_struct(initial_function);
326 /* we must not be interrupted here after a successful
327 * create_os_thread, because the kid will be waiting for its
328 * thread struct to be linked */
329 GET_ALL_THREADS_LOCK("create_thread")
331 success=create_os_thread(th,&kid_tid);
333 link_thread(th,kid_tid);
335 os_invalidate((os_vm_address_t) th->control_stack_start,
337 * (th->control_stack_end-th->control_stack_start)) +
338 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
341 RELEASE_ALL_THREADS_LOCK("create_thread")
349 /* called from lisp from the thread object finalizer */
350 void reap_dead_thread(struct thread *th)
352 if(th->state!=STATE_DEAD)
353 lose("thread %p is not joinable, state=%d\n",th,th->state);
354 #ifdef LISP_FEATURE_GENCGC
356 sigset_t newset,oldset;
357 sigemptyset(&newset);
358 sigaddset_blockable(&newset);
359 thread_sigmask(SIG_BLOCK, &newset, &oldset);
360 gc_alloc_update_page_tables(0, &th->alloc_region);
361 release_spinlock(&all_threads_lock);
362 thread_sigmask(SIG_SETMASK,&oldset,0);
365 GET_ALL_THREADS_LOCK("reap_dead_thread")
366 FSHOW((stderr,"/reap_dead_thread: reaping %lu\n",th->os_thread));
368 th->prev->next=th->next;
369 else all_threads=th->next;
371 th->next->prev=th->prev;
372 RELEASE_ALL_THREADS_LOCK("reap_dead_thread")
373 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
374 gc_assert(pthread_join(th->os_thread,NULL)==0);
375 os_invalidate((os_vm_address_t) th->control_stack_start,
377 * (th->control_stack_end-th->control_stack_start)) +
378 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
382 /* Send the signo to os_thread, retry if the rt signal queue is
384 static int kill_thread_safely(os_thread_t os_thread, int signo)
387 /* The man page does not mention EAGAIN as a valid return value
388 * for either pthread_kill or kill. But that's theory, this is
389 * practice. By waiting here we assume that the delivery of this
390 * signal is not necessary for the delivery of the signals in the
391 * queue. In other words, we _assume_ there are no deadlocks. */
392 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
393 /* wait a bit then try again in the hope of the rt signal
394 * queue not being full */
395 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
396 /* FIXME: some kind of backoff (random, exponential) would be
403 int interrupt_thread(struct thread *th, lispobj function)
405 /* In clone_threads, if A and B both interrupt C at approximately
406 * the same time, it does not matter: the second signal will be
407 * masked until the handler has returned from the first one. In
408 * pthreads though, we can't put the knowledge of what function to
409 * call into the siginfo, so we have to store it in the
410 * destination thread, and do it in such a way that A won't
411 * clobber B's interrupt. Hence, this stupid linked list.
413 * This does depend on SIG_INTERRUPT_THREAD being queued (as POSIX
414 * RT signals are): we need to keep interrupt_fun data for exactly
415 * as many signals as are going to be received by the destination
418 lispobj c=alloc_cons(function,NIL);
419 sigset_t newset,oldset;
420 sigemptyset(&newset);
421 /* interrupt_thread_handler locks this spinlock with blockables
422 * blocked (it does so for the sake of
423 * arrange_return_to_lisp_function), so we must also block them or
424 * else SIG_STOP_FOR_GC and all_threads_lock will find a way to
426 sigaddset_blockable(&newset);
427 thread_sigmask(SIG_BLOCK, &newset, &oldset);
428 if (th == arch_os_get_current_thread())
429 lose("cannot interrupt current thread");
430 get_spinlock(&th->interrupt_fun_lock,
431 (long)arch_os_get_current_thread());
432 ((struct cons *)native_pointer(c))->cdr=th->interrupt_fun;
434 release_spinlock(&th->interrupt_fun_lock);
435 thread_sigmask(SIG_SETMASK,&oldset,0);
436 /* Called from lisp with the thread object as a parameter. Thus,
437 * the object cannot be garbage collected and consequently reaped
438 * and joined. Because it's not joined, kill should work (even if
439 * the thread has died/exited). */
441 int status=kill_thread_safely(th->os_thread,SIG_INTERRUPT_THREAD);
444 } else if (status==ESRCH) {
445 /* This thread has exited. */
446 th->interrupt_fun=NIL;
450 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s",
451 th->os_thread,status,strerror(status));
456 /* stopping the world is a two-stage process. From this thread we signal
457 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
458 * the usual pseudo-atomic checks (we don't want to stop a thread while
459 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
462 /* To avoid deadlocks when gc stops the world all clients of each
463 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
464 * holding the lock, but they must agree on which. */
465 void gc_stop_the_world()
467 struct thread *p,*th=arch_os_get_current_thread();
469 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
471 /* keep threads from starting while the world is stopped. */
472 get_spinlock(&all_threads_lock,(long)th);
473 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
475 /* stop all other threads by sending them SIG_STOP_FOR_GC */
476 for(p=all_threads; p; p=p->next) {
477 while(p->state==STATE_STARTING) sched_yield();
478 if((p!=th) && (p->state==STATE_RUNNING)) {
479 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
480 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n",
483 /* This thread has exited. */
484 gc_assert(p->state==STATE_DEAD);
486 lose("cannot send suspend thread=%lu: %d, %s",
487 p->os_thread,status,strerror(status));
491 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
492 /* wait for the running threads to stop or finish */
493 for(p=all_threads;p;) {
494 gc_assert(p->os_thread!=0);
495 gc_assert(p->state!=STATE_STARTING);
496 if((p==th) || (p->state==STATE_SUSPENDED) ||
497 (p->state==STATE_DEAD)) {
503 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
506 void gc_start_the_world()
508 struct thread *p,*th=arch_os_get_current_thread();
510 /* if a resumed thread creates a new thread before we're done with
511 * this loop, the new thread will get consed on the front of
512 * all_threads, but it won't have been stopped so won't need
514 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
515 for(p=all_threads;p;p=p->next) {
516 gc_assert(p->os_thread!=0);
517 if((p!=th) && (p->state!=STATE_DEAD)) {
518 if(p->state!=STATE_SUSPENDED) {
519 lose("gc_start_the_world: wrong thread state is %d\n",
520 fixnum_value(p->state));
522 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
524 p->state=STATE_RUNNING;
525 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
527 lose("cannot resume thread=%lu: %d, %s",
528 p->os_thread,status,strerror(status));
532 /* If we waited here until all threads leave STATE_SUSPENDED, then
533 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
534 * performance implications, but does away with the 'rt signal
535 * queue full' problem. */
536 release_spinlock(&all_threads_lock);
537 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));