2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
29 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
33 #include "target-arch-os.h"
37 #include "genesis/cons.h"
38 #include "genesis/fdefn.h"
39 #include "interr.h" /* for lose() */
40 #include "gc-internal.h"
42 #ifdef LISP_FEATURE_WIN32
44 * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
45 * so define it arbitrarily
50 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
52 struct freeable_stack {
53 os_thread_t os_thread;
54 os_vm_address_t stack;
57 static struct freeable_stack * volatile freeable_stack = 0;
59 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
60 struct thread * volatile all_threads;
61 extern struct interrupt_data * global_interrupt_data;
62 extern int linux_no_threads_p;
64 #ifdef LISP_FEATURE_SB_THREAD
65 pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
68 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
69 extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
73 link_thread(struct thread *th)
75 if (all_threads) all_threads->prev=th;
81 #ifdef LISP_FEATURE_SB_THREAD
83 unlink_thread(struct thread *th)
86 th->prev->next = th->next;
88 all_threads = th->next;
90 th->next->prev = th->prev;
95 initial_thread_trampoline(struct thread *th)
98 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
101 function = th->no_tls_value_marker;
102 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
103 if(arch_os_thread_init(th)==0) return 1;
105 th->os_thread=thread_self();
106 #ifndef LISP_FEATURE_WIN32
107 protect_control_stack_guard_page(1);
110 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
111 return call_into_lisp_first_time(function,args,0);
113 return funcall0(function);
117 #define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
118 ALIEN_STACK_SIZE + dynamic_values_bytes + \
121 #ifdef LISP_FEATURE_SB_THREAD
124 free_thread_stack_later(struct thread *thread_to_be_cleaned_up)
126 struct freeable_stack *new_freeable_stack = 0;
127 if (thread_to_be_cleaned_up) {
128 new_freeable_stack = (struct freeable_stack *)
129 os_validate(0, sizeof(struct freeable_stack));
130 new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
131 new_freeable_stack->stack = (os_vm_address_t)
132 thread_to_be_cleaned_up->control_stack_start;
134 new_freeable_stack = (struct freeable_stack *)
135 swap_lispobjs((lispobj *)(void *)&freeable_stack,
136 (lispobj)new_freeable_stack);
137 if (new_freeable_stack) {
138 FSHOW((stderr,"/reaping %lu\n", new_freeable_stack->os_thread));
139 /* Under NPTL pthread_join really waits until the thread
140 * exists and the stack can be safely freed. This is sadly not
141 * mandated by the pthread spec. */
142 gc_assert(pthread_join(new_freeable_stack->os_thread, NULL) == 0);
143 os_invalidate(new_freeable_stack->stack, THREAD_STRUCT_SIZE);
144 os_invalidate((os_vm_address_t) new_freeable_stack,
145 sizeof(struct freeable_stack));
149 /* this is the first thing that runs in the child (which is why the
150 * silly calling convention). Basically it calls the user's requested
151 * lisp function after doing arch_os_thread_init and whatever other
152 * bookkeeping needs to be done
155 new_thread_trampoline(struct thread *th)
159 FSHOW((stderr,"/creating thread %lu\n", thread_self()));
160 function = th->no_tls_value_marker;
161 th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
162 if(arch_os_thread_init(th)==0) {
163 /* FIXME: handle error */
164 lose("arch_os_thread_init failed\n");
167 th->os_thread=thread_self();
168 protect_control_stack_guard_page(1);
169 /* Since GC can only know about this thread from the all_threads
170 * list and we're just adding this thread to it there is no danger
171 * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
173 pthread_mutex_lock(&all_threads_lock);
175 pthread_mutex_unlock(&all_threads_lock);
177 result = funcall0(function);
178 th->state=STATE_DEAD;
180 /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
181 * thread, but since we are already dead it won't wait long. */
182 pthread_mutex_lock(&all_threads_lock);
183 gc_alloc_update_page_tables(0, &th->alloc_region);
185 pthread_mutex_unlock(&all_threads_lock);
187 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
188 os_invalidate((os_vm_address_t)th->interrupt_data,
189 (sizeof (struct interrupt_data)));
190 free_thread_stack_later(th);
191 FSHOW((stderr,"/exiting thread %lu\n", thread_self()));
195 #endif /* LISP_FEATURE_SB_THREAD */
198 free_thread_struct(struct thread *th)
200 if (th->interrupt_data)
201 os_invalidate((os_vm_address_t) th->interrupt_data,
202 (sizeof (struct interrupt_data)));
203 os_invalidate((os_vm_address_t) th->control_stack_start,
207 /* this is called from any other thread to create the new one, and
208 * initialize all parts of it that can be initialized from another
212 static struct thread *
213 create_thread_struct(lispobj initial_function) {
214 union per_thread_data *per_thread;
215 struct thread *th=0; /* subdue gcc */
217 #ifdef LISP_FEATURE_SB_THREAD
221 /* may as well allocate all the spaces at once: it saves us from
222 * having to decide what to do if only some of the allocations
224 spaces=os_validate(0, THREAD_STRUCT_SIZE);
227 per_thread=(union per_thread_data *)
229 THREAD_CONTROL_STACK_SIZE+
233 #ifdef LISP_FEATURE_SB_THREAD
234 for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
235 per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
236 if (all_threads == 0) {
237 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
240 /* FIXME: should be MAX_INTERRUPTS -1 ? */
241 make_fixnum(MAX_INTERRUPTS+
242 sizeof(struct thread)/sizeof(lispobj)),
244 SetSymbolValue(TLS_INDEX_LOCK,make_fixnum(0),0);
246 #define STATIC_TLS_INIT(sym,field) \
247 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
248 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
250 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
251 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
252 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
253 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
254 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
255 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
256 STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
257 STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
259 #undef STATIC_TLS_INIT
263 th=&per_thread->thread;
264 th->control_stack_start = spaces;
265 th->binding_stack_start=
266 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
267 th->control_stack_end = th->binding_stack_start;
268 th->alien_stack_start=
269 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
270 th->binding_stack_pointer=th->binding_stack_start;
273 th->state=STATE_RUNNING;
274 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
275 th->alien_stack_pointer=((void *)th->alien_stack_start
276 + ALIEN_STACK_SIZE-N_WORD_BYTES);
278 th->alien_stack_pointer=((void *)th->alien_stack_start);
280 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
281 th->pseudo_atomic_interrupted=0;
282 th->pseudo_atomic_atomic=0;
284 #ifdef LISP_FEATURE_GENCGC
285 gc_set_region_empty(&th->alloc_region);
288 #ifndef LISP_FEATURE_SB_THREAD
289 /* the tls-points-into-struct-thread trick is only good for threaded
290 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
291 * appropriate values from struct thread here, and make sure that
292 * we use the appropriate SymbolValue macros to access any of the
293 * variable quantities from the C runtime. It's not quite OAOOM,
294 * it just feels like it */
295 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
296 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
297 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
298 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
299 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
300 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
301 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC,(lispobj)th->pseudo_atomic_atomic,th);
302 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th->pseudo_atomic_interrupted,th);
304 current_binding_stack_pointer=th->binding_stack_pointer;
305 current_control_stack_pointer=th->control_stack_start;
308 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
309 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
310 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
311 bind_variable(INTERRUPT_PENDING, NIL,th);
312 bind_variable(INTERRUPTS_ENABLED,T,th);
313 bind_variable(GC_PENDING,NIL,th);
314 #ifdef LISP_FEATURE_SB_THREAD
315 bind_variable(STOP_FOR_GC_PENDING,NIL,th);
318 th->interrupt_data = (struct interrupt_data *)
319 os_validate(0,(sizeof (struct interrupt_data)));
320 if (!th->interrupt_data) {
321 free_thread_struct(th);
324 th->interrupt_data->pending_handler = 0;
325 th->no_tls_value_marker=initial_function;
329 void create_initial_thread(lispobj initial_function) {
330 struct thread *th=create_thread_struct(initial_function);
332 initial_thread_trampoline(th); /* no return */
333 } else lose("can't create initial thread\n");
336 #ifdef LISP_FEATURE_SB_THREAD
338 #ifndef __USE_XOPEN2K
339 extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
343 boolean create_os_thread(struct thread *th,os_thread_t *kid_tid)
345 /* The new thread inherits the restrictive signal mask set here,
346 * and enables signals again when it is set up properly. */
348 sigset_t newset,oldset;
350 sigemptyset(&newset);
351 /* Blocking deferrable signals is enough, no need to block
352 * SIG_STOP_FOR_GC because the child process is not linked onto
353 * all_threads until it's ready. */
354 sigaddset_deferrable(&newset);
355 thread_sigmask(SIG_BLOCK, &newset, &oldset);
357 if((pthread_attr_init(&attr)) ||
358 (pthread_attr_setstack(&attr,th->control_stack_start,
359 THREAD_CONTROL_STACK_SIZE-16)) ||
361 (kid_tid,&attr,(void *(*)(void *))new_thread_trampoline,th)))
363 thread_sigmask(SIG_SETMASK,&oldset,0);
367 os_thread_t create_thread(lispobj initial_function) {
371 if(linux_no_threads_p) return 0;
373 /* Assuming that a fresh thread struct has no lisp objects in it,
374 * linking it to all_threads can be left to the thread itself
375 * without fear of gc lossage. initial_function violates this
376 * assumption and must stay pinned until the child starts up. */
377 th = create_thread_struct(initial_function);
380 if (create_os_thread(th,&kid_tid)) {
383 free_thread_struct(th);
388 /* Send the signo to os_thread, retry if the rt signal queue is
390 static int kill_thread_safely(os_thread_t os_thread, int signo)
393 /* The man page does not mention EAGAIN as a valid return value
394 * for either pthread_kill or kill. But that's theory, this is
395 * practice. By waiting here we assume that the delivery of this
396 * signal is not necessary for the delivery of the signals in the
397 * queue. In other words, we _assume_ there are no deadlocks. */
398 while ((r=pthread_kill(os_thread,signo))==EAGAIN) {
399 /* wait a bit then try again in the hope of the rt signal
400 * queue not being full */
401 FSHOW_SIGNAL((stderr,"/rt signal queue full\n"));
402 /* FIXME: some kind of backoff (random, exponential) would be
409 int signal_interrupt_thread(os_thread_t os_thread)
411 int status = kill_thread_safely(os_thread, SIG_INTERRUPT_THREAD);
414 } else if (status == ESRCH) {
417 lose("cannot send SIG_INTERRUPT_THREAD to thread=%lu: %d, %s\n",
418 os_thread, status, strerror(status));
422 /* stopping the world is a two-stage process. From this thread we signal
423 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
424 * the usual pseudo-atomic checks (we don't want to stop a thread while
425 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
428 /* To avoid deadlocks when gc stops the world all clients of each
429 * mutex must enable or disable SIG_STOP_FOR_GC for the duration of
430 * holding the lock, but they must agree on which. */
431 void gc_stop_the_world()
433 struct thread *p,*th=arch_os_get_current_thread();
435 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
437 /* keep threads from starting while the world is stopped. */
438 pthread_mutex_lock(&all_threads_lock); \
439 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got lock, thread=%lu\n",
441 /* stop all other threads by sending them SIG_STOP_FOR_GC */
442 for(p=all_threads; p; p=p->next) {
443 gc_assert(p->os_thread != 0);
444 if((p!=th) && ((p->state==STATE_RUNNING))) {
445 FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n",
447 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
449 /* This thread has exited. */
450 gc_assert(p->state==STATE_DEAD);
452 lose("cannot send suspend thread=%lu: %d, %s\n",
453 p->os_thread,status,strerror(status));
457 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
458 /* wait for the running threads to stop or finish */
459 for(p=all_threads;p;) {
460 if((p!=th) && (p->state==STATE_RUNNING)) {
466 FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));
469 void gc_start_the_world()
471 struct thread *p,*th=arch_os_get_current_thread();
473 /* if a resumed thread creates a new thread before we're done with
474 * this loop, the new thread will get consed on the front of
475 * all_threads, but it won't have been stopped so won't need
477 FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
478 for(p=all_threads;p;p=p->next) {
479 gc_assert(p->os_thread!=0);
480 if((p!=th) && (p->state!=STATE_DEAD)) {
481 if(p->state!=STATE_SUSPENDED) {
482 lose("gc_start_the_world: wrong thread state is %d\n",
483 fixnum_value(p->state));
485 FSHOW_SIGNAL((stderr, "/gc_start_the_world: resuming %lu\n",
487 p->state=STATE_RUNNING;
488 status=kill_thread_safely(p->os_thread,SIG_STOP_FOR_GC);
490 lose("cannot resume thread=%lu: %d, %s\n",
491 p->os_thread,status,strerror(status));
495 /* If we waited here until all threads leave STATE_SUSPENDED, then
496 * SIG_STOP_FOR_GC wouldn't need to be a rt signal. That has some
497 * performance implications, but does away with the 'rt signal
498 * queue full' problem. */
499 pthread_mutex_unlock(&all_threads_lock); \
500 FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));