13 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
17 #include "target-arch-os.h"
21 #include "genesis/cons.h"
22 #include "genesis/fdefn.h"
23 #include "interr.h" /* for lose() */
24 #include "gc-internal.h"
26 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
28 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
29 struct thread * volatile all_threads;
30 extern struct interrupt_data * global_interrupt_data;
31 extern int linux_no_threads_p;
33 #ifdef LISP_FEATURE_SB_THREAD
35 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
37 /* When trying to get all_threads_lock one should make sure that
38 * sig_stop_for_gc is not blocked. Else there would be a possible
39 * deadlock: gc locks it, other thread blocks signals, gc sends stop
40 * request to other thread and waits, other thread blocks on lock. */
41 void check_sig_stop_for_gc_can_arrive_or_lose()
43 /* Get the current sigmask, by blocking the empty set. */
44 sigset_t empty,current;
46 thread_sigmask(SIG_BLOCK, &empty, ¤t);
47 if (sigismember(¤t,SIG_STOP_FOR_GC))
48 lose("SIG_STOP_FOR_GC cannot arrive: it is blocked\n");
49 if (SymbolValue(GC_INHIBIT,arch_os_get_current_thread()) != NIL)
50 lose("SIG_STOP_FOR_GC cannot arrive: gc is inhibited\n");
51 if (arch_pseudo_atomic_atomic(NULL))
52 lose("SIG_STOP_FOR_GC cannot arrive: in pseudo atomic\n");
55 #define GET_ALL_THREADS_LOCK(name) \
57 sigset_t _newset,_oldset; \
58 sigemptyset(&_newset); \
59 sigaddset_deferrable(&_newset); \
60 thread_sigmask(SIG_BLOCK, &_newset, &_oldset); \
61 check_sig_stop_for_gc_can_arrive_or_lose(); \
62 FSHOW_SIGNAL((stderr,"/%s:waiting on lock=%ld, thread=%lu\n",name, \
63 all_threads_lock,arch_os_get_current_thread()->os_thread)); \
64 pthread_mutex_lock(&all_threads_lock); \
65 FSHOW_SIGNAL((stderr,"/%s:got lock, thread=%lu\n", \
66 name,arch_os_get_current_thread()->os_thread));
68 #define RELEASE_ALL_THREADS_LOCK(name) \
69 FSHOW_SIGNAL((stderr,"/%s:released lock\n",name)); \
70 pthread_mutex_unlock(&all_threads_lock); \
71 thread_sigmask(SIG_SETMASK,&_oldset,0); \
76 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
77 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
81 initial_thread_trampoline(struct thread *th)
84 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
87 function = th->no_tls_value_marker;
88 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
89 if(arch_os_thread_init(th)==0) return 1;
91 if(th->os_thread < 1) lose("th->os_thread not set up right");
92 th->state=STATE_RUNNING;
93 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
94 return call_into_lisp_first_time(function,args,0);
96 return funcall0(function);
100 #ifdef LISP_FEATURE_SB_THREAD
102 /* this is the first thing that runs in the child (which is why the
103 * silly calling convention). Basically it calls the user's requested
104 * lisp function after doing arch_os_thread_init and whatever other
105 * bookkeeping needs to be done
108 new_thread_trampoline(struct thread *th)
112 function = th->no_tls_value_marker;
113 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
114 if(arch_os_thread_init(th)==0) {
115 /* FIXME: handle error */
116 lose("arch_os_thread_init failed\n");
119 /* wait here until our thread is linked into all_threads: see below */
121 volatile os_thread_t *tid=&th->os_thread;
122 while(*tid<1) sched_yield();
125 th->state=STATE_RUNNING;
126 result = funcall0(function);
127 th->state=STATE_DEAD;
130 #endif /* LISP_FEATURE_SB_THREAD */
132 #define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
133 ALIEN_STACK_SIZE + dynamic_values_bytes + \
137 free_thread_struct(struct thread *th)
139 if (th->interrupt_data)
140 os_invalidate((os_vm_address_t) th->interrupt_data,
141 (sizeof (struct interrupt_data)));
142 os_invalidate((os_vm_address_t) th->control_stack_start,
146 /* this is called from any other thread to create the new one, and
147 * initialize all parts of it that can be initialized from another
151 static struct thread *
152 create_thread_struct(lispobj initial_function) {
153 union per_thread_data *per_thread;
154 struct thread *th=0; /* subdue gcc */
157 /* may as well allocate all the spaces at once: it saves us from
158 * having to decide what to do if only some of the allocations
160 spaces=os_validate(0, THREAD_STRUCT_SIZE);
163 per_thread=(union per_thread_data *)
165 THREAD_CONTROL_STACK_SIZE+
170 memcpy(per_thread,arch_os_get_current_thread(),
171 dynamic_values_bytes);
173 #ifdef LISP_FEATURE_SB_THREAD
175 for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
176 per_thread->dynamic_values[i]=NO_TLS_VALUE_MARKER_WIDETAG;
177 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
180 make_fixnum(MAX_INTERRUPTS+
181 sizeof(struct thread)/sizeof(lispobj)),
183 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
185 #define STATIC_TLS_INIT(sym,field) \
186 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
187 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
189 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
190 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
191 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
192 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
193 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
194 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
195 STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
196 STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
198 #undef STATIC_TLS_INIT
202 th=&per_thread->thread;
203 th->control_stack_start = spaces;
204 th->binding_stack_start=
205 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
206 th->control_stack_end = th->binding_stack_start;
207 th->alien_stack_start=
208 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
209 th->binding_stack_pointer=th->binding_stack_start;
212 th->interrupt_fun=NIL;
213 th->interrupt_fun_lock=0;
214 th->state=STATE_STARTING;
215 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
216 th->alien_stack_pointer=((void *)th->alien_stack_start
217 + ALIEN_STACK_SIZE-N_WORD_BYTES);
219 th->alien_stack_pointer=((void *)th->alien_stack_start);
221 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
222 th->pseudo_atomic_interrupted=0;
223 th->pseudo_atomic_atomic=0;
225 #ifdef LISP_FEATURE_GENCGC
226 gc_set_region_empty(&th->alloc_region);
229 #ifndef LISP_FEATURE_SB_THREAD
230 /* the tls-points-into-struct-thread trick is only good for threaded
231 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
232 * appropriate values from struct thread here, and make sure that
233 * we use the appropriate SymbolValue macros to access any of the
234 * variable quantities from the C runtime. It's not quite OAOOM,
235 * it just feels like it */
236 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
237 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
238 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
239 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
240 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
241 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
242 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC,(lispobj)th->pseudo_atomic_atomic,th);
243 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th->pseudo_atomic_interrupted,th);
245 current_binding_stack_pointer=th->binding_stack_pointer;
246 current_control_stack_pointer=th->control_stack_start;
249 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
250 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
251 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
252 bind_variable(INTERRUPT_PENDING, NIL,th);
253 bind_variable(INTERRUPTS_ENABLED,T,th);
254 bind_variable(GC_PENDING,NIL,th);
255 #ifdef LISP_FEATURE_SB_THREAD
256 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
259 th->interrupt_data = (struct interrupt_data *)
260 os_validate(0,(sizeof (struct interrupt_data)));
261 if (!th->interrupt_data) {
262 free_thread_struct(th);
265 th->interrupt_data->pending_handler = 0;
266 th->no_tls_value_marker=initial_function;
271 link_thread(struct thread *th,os_thread_t kid_tid)
273 if (all_threads) all_threads->prev=th;
274 th->next=all_threads;
277 /* note that th->os_thread is 0 at this time. We rely on
278 * all_threads_lock to ensure that we don't have >1 thread with
279 * os_thread=0 on the list at once
281 protect_control_stack_guard_page(th,1);
282 /* child will not start until this is set */
283 th->os_thread=kid_tid;
284 FSHOW((stderr,"/created thread %lu\n",kid_tid));
287 void create_initial_thread(lispobj initial_function) {
288 struct thread *th=create_thread_struct(initial_function);
289 os_thread_t kid_tid=thread_self();
290 if(th && kid_tid>0) {
291 link_thread(th,kid_tid);
292 initial_thread_trampoline(all_threads); /* no return */
293 } else lose("can't create initial thread");
296 #ifdef LISP_FEATURE_SB_THREAD
298 #ifndef __USE_XOPEN2K
299 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
303 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
305 /* The new thread inherits the restrictive signal mask set here,
306 * and enables signals again when it is set up properly. */
308 sigset_t newset,oldset;
310 sigemptyset(&newset);
311 /* Blocking deferrable signals is enough, since gc_stop_the_world
312 * waits until the child leaves STATE_STARTING. And why not let gc
313 * proceed as soon as possible? */
314 sigaddset_deferrable(&newset);
315 thread_sigmask(SIG_BLOCK, &newset, &oldset);
317 if((pthread_attr_init(&attr)) ||
318 (pthread_attr_setstack(&attr,th->control_stack_start,
319 THREAD_CONTROL_STACK_SIZE-16)) ||
321 (kid_tid,&attr,(void *(*)(void *))new_thread_trampoline,th)))
323 thread_sigmask(SIG_SETMASK,&oldset,0);
327 struct thread *create_thread(lispobj initial_function) {
329 os_thread_t kid_tid=0;
332 if(linux_no_threads_p) return 0;
334 th=create_thread_struct(initial_function);
337 /* we must not be interrupted here after a successful
338 * create_os_thread, because the kid will be waiting for its
339 * thread struct to be linked */
340 GET_ALL_THREADS_LOCK("create_thread")
342 success=create_os_thread(th,&kid_tid);
344 link_thread(th,kid_tid);
346 free_thread_struct(th);
348 RELEASE_ALL_THREADS_LOCK("create_thread")
356 /* called from lisp from the thread object finalizer */
357 void reap_dead_thread(struct thread *th)
359 if(th->state!=STATE_DEAD)
360 lose("thread %p is not joinable, state=%d\n",th,th->state);
361 #ifdef LISP_FEATURE_GENCGC
363 sigset_t newset,oldset;
364 sigemptyset(&newset);
365 sigaddset_blockable(&newset);
366 thread_sigmask(SIG_BLOCK, &newset, &oldset);
367 gc_alloc_update_page_tables(0, &th->alloc_region);
368 thread_sigmask(SIG_SETMASK,&oldset,0);
371 GET_ALL_THREADS_LOCK("reap_dead_thread")
372 FSHOW((stderr,"/reap_dead_thread: reaping %lu\n",th->os_thread));
374 th->prev->next=th->next;
375 else all_threads=th->next;
377 th->next->prev=th->prev;
378 RELEASE_ALL_THREADS_LOCK("reap_dead_thread")
379 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
380 gc_assert(pthread_join(th->os_thread,NULL)==0);
381 free_thread_struct(th);
384 /* Send the signo to os_thread, retry if the rt signal queue is
386 static int kill_thread_safely(os_thread_t os_thread, int signo)
389 /* The man page does not mention EAGAIN as a valid return value
390 * for either pthread_kill or kill. But that's theory, this is
391 * practice. By waiting here we assume that the delivery of this
392 * signal is not necessary for the delivery of the signals in the
393 * queue. In other words, we _assume_ there are no deadlocks. */
394 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
395 /* wait a bit then try again in the hope of the rt signal
396 * queue not being full */
397 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
398 /* FIXME: some kind of backoff (random, exponential) would be
405 int interrupt_thread(struct thread *th, lispobj function)
407 /* In clone_threads, if A and B both interrupt C at approximately
408 * the same time, it does not matter: the second signal will be
409 * masked until the handler has returned from the first one. In
410 * pthreads though, we can't put the knowledge of what function to
411 * call into the siginfo, so we have to store it in the
412 * destination thread, and do it in such a way that A won't
413 * clobber B's interrupt. Hence, this stupid linked list.
415 * This does depend on SIG_INTERRUPT_THREAD being queued (as POSIX
416 * RT signals are): we need to keep interrupt_fun data for exactly
417 * as many signals as are going to be received by the destination
420 lispobj c=alloc_cons(function,NIL);
421 sigset_t newset,oldset;
422 sigemptyset(&newset);
423 /* interrupt_thread_handler locks this spinlock with blockables
424 * blocked (it does so for the sake of
425 * arrange_return_to_lisp_function), so we must also block them or
426 * else SIG_STOP_FOR_GC and all_threads_lock will find a way to
428 sigaddset_blockable(&newset);
429 thread_sigmask(SIG_BLOCK, &newset, &oldset);
430 if (th == arch_os_get_current_thread())
431 lose("cannot interrupt current thread");
432 get_spinlock(&th->interrupt_fun_lock,
433 (long)arch_os_get_current_thread());
434 ((struct cons *)native_pointer(c))->cdr=th->interrupt_fun;
436 release_spinlock(&th->interrupt_fun_lock);
437 thread_sigmask(SIG_SETMASK,&oldset,0);
438 /* Called from lisp with the thread object as a parameter. Thus,
439 * the object cannot be garbage collected and consequently reaped
440 * and joined. Because it's not joined, kill should work (even if
441 * the thread has died/exited). */
443 int status=kill_thread_safely(th->os_thread,SIG_INTERRUPT_THREAD);
446 } else if (status==ESRCH) {
447 /* This thread has exited. */
448 th->interrupt_fun=NIL;
452 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s",
453 th->os_thread,status,strerror(status));
458 /* stopping the world is a two-stage process. From this thread we signal
459 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
460 * the usual pseudo-atomic checks (we don't want to stop a thread while
461 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
464 /* To avoid deadlocks when gc stops the world all clients of each
465 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
466 * holding the lock, but they must agree on which. */
467 void gc_stop_the_world()
469 struct thread *p,*th=arch_os_get_current_thread();
471 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
473 /* keep threads from starting while the world is stopped. */
474 pthread_mutex_lock(&all_threads_lock); \
475 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
477 /* stop all other threads by sending them SIG_STOP_FOR_GC */
478 for(p=all_threads; p; p=p->next) {
479 while(p->state==STATE_STARTING) sched_yield();
480 if((p!=th) && (p->state==STATE_RUNNING)) {
481 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n",
483 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
485 /* This thread has exited. */
486 gc_assert(p->state==STATE_DEAD);
488 lose("cannot send suspend thread=%lu: %d, %s",
489 p->os_thread,status,strerror(status));
493 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
494 /* wait for the running threads to stop or finish */
495 for(p=all_threads;p;) {
496 gc_assert(p->os_thread!=0);
497 gc_assert(p->state!=STATE_STARTING);
498 if((p==th) || (p->state==STATE_SUSPENDED) ||
499 (p->state==STATE_DEAD)) {
505 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
508 void gc_start_the_world()
510 struct thread *p,*th=arch_os_get_current_thread();
512 /* if a resumed thread creates a new thread before we're done with
513 * this loop, the new thread will get consed on the front of
514 * all_threads, but it won't have been stopped so won't need
516 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
517 for(p=all_threads;p;p=p->next) {
518 gc_assert(p->os_thread!=0);
519 if((p!=th) && (p->state!=STATE_DEAD)) {
520 if(p->state!=STATE_SUSPENDED) {
521 lose("gc_start_the_world: wrong thread state is %d\n",
522 fixnum_value(p->state));
524 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
526 p->state=STATE_RUNNING;
527 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
529 lose("cannot resume thread=%lu: %d, %s",
530 p->os_thread,status,strerror(status));
534 /* If we waited here until all threads leave STATE_SUSPENDED, then
535 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
536 * performance implications, but does away with the 'rt signal
537 * queue full' problem. */
538 pthread_mutex_unlock(&all_threads_lock); \
539 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));