13 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
16 #include "target-arch-os.h"
20 #include "genesis/cons.h"
21 #include "genesis/fdefn.h"
22 #include "interr.h" /* for lose() */
23 #include "gc-internal.h"
25 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
27 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
28 struct thread *all_threads;
29 volatile lispobj all_threads_lock;
30 volatile lispobj thread_start_lock;
31 extern struct interrupt_data * global_interrupt_data;
32 extern int linux_no_threads_p;
35 initial_thread_trampoline(struct thread *th)
38 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
42 function = th->unbound_marker;
43 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
44 if(arch_os_thread_init(th)==0) return 1;
46 if(th->pid < 1) lose("th->pid not set up right");
47 th->state=STATE_RUNNING;
48 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
49 return call_into_lisp_first_time(function,args,0);
51 return funcall0(function);
55 /* this is the first thing that clone() runs in the child (which is
56 * why the silly calling convention). Basically it calls the user's
57 * requested lisp function after doing arch_os_thread_init and
58 * whatever other bookkeeping needs to be done
61 #ifdef LISP_FEATURE_SB_THREAD
63 new_thread_trampoline(struct thread *th)
66 function = th->unbound_marker;
67 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
68 if(arch_os_thread_init(th)==0) return 1;
70 /* wait here until our thread is linked into all_threads: see below */
71 while(th->pid<1) sched_yield();
73 th->state=STATE_RUNNING;
74 return funcall0(function);
76 #endif /* LISP_FEATURE_SB_THREAD */
78 /* this is called from any other thread to create the new one, and
79 * initialize all parts of it that can be initialized from another
83 struct thread * create_thread_struct(lispobj initial_function) {
84 union per_thread_data *per_thread;
85 struct thread *th=0; /* subdue gcc */
88 /* may as well allocate all the spaces at once: it saves us from
89 * having to decide what to do if only some of the allocations
92 THREAD_CONTROL_STACK_SIZE+
99 per_thread=(union per_thread_data *)
101 THREAD_CONTROL_STACK_SIZE+
106 memcpy(per_thread,arch_os_get_current_thread(),
107 dynamic_values_bytes);
109 #ifdef LISP_FEATURE_SB_THREAD
111 for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
112 per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
113 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
116 make_fixnum(MAX_INTERRUPTS+
117 sizeof(struct thread)/sizeof(lispobj)),
119 #define STATIC_TLS_INIT(sym,field) \
120 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
121 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
123 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
124 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
125 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
126 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
127 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
128 #ifdef LISP_FEATURE_X86
129 STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
130 STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
132 #undef STATIC_TLS_INIT
136 th=&per_thread->thread;
137 th->control_stack_start = spaces;
138 th->binding_stack_start=
139 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
140 th->control_stack_end = th->binding_stack_start;
141 th->alien_stack_start=
142 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
143 th->binding_stack_pointer=th->binding_stack_start;
146 th->state=STATE_STARTING;
147 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
148 th->alien_stack_pointer=((void *)th->alien_stack_start
149 + ALIEN_STACK_SIZE-N_WORD_BYTES);
151 th->alien_stack_pointer=((void *)th->alien_stack_start);
153 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
154 th->pseudo_atomic_interrupted=0;
155 th->pseudo_atomic_atomic=0;
157 #ifdef LISP_FEATURE_GENCGC
158 gc_set_region_empty(&th->alloc_region);
161 #ifndef LISP_FEATURE_SB_THREAD
162 /* the tls-points-into-struct-thread trick is only good for threaded
163 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
164 * appropriate values from struct thread here, and make sure that
165 * we use the appropriate SymbolValue macros to access any of the
166 * variable quantities from the C runtime. It's not quite OAOOM,
167 * it just feels like it */
168 SetSymbolValue(BINDING_STACK_START,(lispobj)th->binding_stack_start,th);
169 SetSymbolValue(CONTROL_STACK_START,(lispobj)th->control_stack_start,th);
170 SetSymbolValue(CONTROL_STACK_END,(lispobj)th->control_stack_end,th);
171 #if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
172 SetSymbolValue(BINDING_STACK_POINTER,(lispobj)th->binding_stack_pointer,th);
173 SetSymbolValue(ALIEN_STACK,(lispobj)th->alien_stack_pointer,th);
174 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC,(lispobj)th->pseudo_atomic_atomic,th);
175 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th->pseudo_atomic_interrupted,th);
177 current_binding_stack_pointer=th->binding_stack_pointer;
178 current_control_stack_pointer=th->control_stack_start;
181 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
182 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
183 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
184 bind_variable(INTERRUPT_PENDING, NIL,th);
185 bind_variable(INTERRUPTS_ENABLED,T,th);
188 os_validate(0,(sizeof (struct interrupt_data)));
190 memcpy(th->interrupt_data,
191 arch_os_get_current_thread()->interrupt_data,
192 sizeof (struct interrupt_data));
194 memcpy(th->interrupt_data,global_interrupt_data,
195 sizeof (struct interrupt_data));
197 th->unbound_marker=initial_function;
201 void link_thread(struct thread *th,pid_t kid_pid)
203 sigset_t newset,oldset;
204 sigemptyset(&newset);
205 sigaddset_blockable(&newset);
206 sigprocmask(SIG_BLOCK, &newset, &oldset);
208 get_spinlock(&all_threads_lock,kid_pid);
209 th->next=all_threads;
211 /* note that th->pid is 0 at this time. We rely on all_threads_lock
212 * to ensure that we don't have >1 thread with pid=0 on the list at once
214 protect_control_stack_guard_page(th->pid,1);
215 th->pid=kid_pid; /* child will not start until this is set */
216 release_spinlock(&all_threads_lock);
218 sigprocmask(SIG_SETMASK,&oldset,0);
221 void create_initial_thread(lispobj initial_function) {
222 struct thread *th=create_thread_struct(initial_function);
223 pid_t kid_pid=getpid();
224 if(th && kid_pid>0) {
225 link_thread(th,kid_pid);
226 initial_thread_trampoline(all_threads); /* no return */
227 } else lose("can't create initial thread");
230 #ifdef LISP_FEATURE_SB_THREAD
231 pid_t create_thread(lispobj initial_function) {
235 if(linux_no_threads_p) return 0;
236 th=create_thread_struct(initial_function);
239 SHOW("create_thread:waiting on lock");
241 get_spinlock(&thread_start_lock,arch_os_get_current_thread()->pid);
243 SHOW("create_thread:got lock");
245 kid_pid=clone(new_thread_trampoline,
246 (((void*)th->control_stack_start)+
247 THREAD_CONTROL_STACK_SIZE-16),
248 CLONE_FILES|SIG_THREAD_EXIT|CLONE_VM,th);
251 link_thread(th,kid_pid);
252 /* wait here until our thread is started: see new_thread_trampoline */
253 while(th->state==STATE_STARTING) sched_yield();
254 /* it's started and initialized, it's safe to gc */
255 release_spinlock(&thread_start_lock);
257 SHOW("create_thread:released lock");
261 release_spinlock(&thread_start_lock);
263 SHOW("create_thread:released lock(failure)");
265 os_invalidate((os_vm_address_t) th->control_stack_start,
267 * (th->control_stack_end-th->control_stack_start)) +
268 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
275 struct thread *find_thread_by_pid(pid_t pid)
279 if(th->pid==pid) return th;
283 #if defined LISP_FEATURE_SB_THREAD
284 /* This is not needed unless #+SB-THREAD, as there's a trivial null
285 * unithread definition. */
287 void mark_dead_threads()
292 kid=waitpid(-1,&status,__WALL|WNOHANG);
294 if(WIFEXITED(status) || WIFSIGNALED(status)) {
295 struct thread *th=find_thread_by_pid(kid);
296 if(th) th->state=STATE_DEAD;
301 void reap_dead_threads()
303 struct thread *th,*next,*prev=0;
307 if(th->state==STATE_DEAD) {
308 funcall1(SymbolFunction(HANDLE_THREAD_EXIT),make_fixnum(th->pid));
309 #ifdef LISP_FEATURE_GENCGC
310 gc_alloc_update_page_tables(0, &th->alloc_region);
312 get_spinlock(&all_threads_lock,th->pid);
313 if(prev) prev->next=next;
314 else all_threads=next;
315 release_spinlock(&all_threads_lock);
316 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
317 os_invalidate((os_vm_address_t) th->control_stack_start,
319 * (th->control_stack_end-th->control_stack_start)) +
320 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
328 int interrupt_thread(pid_t pid, lispobj function)
332 sigval.sival_int=function;
334 if((th->pid==pid) && (th->state != STATE_DEAD))
335 return sigqueue(pid, SIG_INTERRUPT_THREAD, sigval);
336 errno=EPERM; return -1;
339 int signal_thread_to_dequeue (pid_t pid)
341 return kill (pid, SIG_DEQUEUE);
345 /* stopping the world is a two-stage process. From this thread we signal
346 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
347 * the usual pseudo-atomic checks (we don't want to stop a thread while
348 * it's in the middle of allocation) then waits for another SIG_STOP_FOR_GC.
351 void gc_stop_the_world()
354 SHOW("gc_stop_the_world:begin");
356 struct thread *p,*th=arch_os_get_current_thread();
357 /* keep threads from starting while the world is stopped. */
358 get_spinlock(&thread_start_lock,th->pid);
360 SHOW("gc_stop_the_world:locked");
362 /* stop all other threads by sending them SIG_STOP_FOR_GC */
363 for(p=all_threads; p; p=p->next) {
364 if((p!=th) && (p->pid!=0) && (p->state==STATE_RUNNING)) {
365 p->state=STATE_STOPPING;
366 if(kill(p->pid,SIG_STOP_FOR_GC)==-1) {
367 /* we can't kill the process; assume because it
368 * died already (and its parent is dead so never
369 * saw the SIGCHLD) */
375 SHOW("gc_stop_the_world:signals sent");
377 /* wait for the running threads to stop */
378 for(p=all_threads;p;) {
379 if((p==th) || (p->pid==0) || (p->state==STATE_STARTING) ||
380 (p->state==STATE_DEAD) || (p->state==STATE_STOPPED)) {
385 SHOW("gc_stop_the_world:end");
389 void gc_start_the_world()
391 struct thread *p,*th=arch_os_get_current_thread();
392 /* if a resumed thread creates a new thread before we're done with
393 * this loop, the new thread will get consed on the front of
394 * all_threads, but it won't have been stopped so won't need
395 * restarting; there can be threads just starting from before
396 * gc_stop_the_world, though */
398 SHOW("gc_start_the_world:begin");
400 for(p=all_threads;p;p=p->next) {
401 if((p!=th) && (p->pid!=0) && (p->state!=STATE_STARTING) &&
402 (p->state!=STATE_DEAD)) {
403 if(p->state!=STATE_STOPPED) {
404 lose("gc_start_the_world: wrong thread state is %ld\n",
405 fixnum_value(p->state));
407 kill(p->pid,SIG_STOP_FOR_GC);
410 /* we must wait for all threads to leave stopped state else we
411 * risk signal accumulation and lose any meaning of
413 for(p=all_threads;p;) {
414 gc_assert(p->state!=STATE_STOPPING);
415 if((p==th) || (p->pid==0) || (p->state!=STATE_STOPPED)) {
419 release_spinlock(&thread_start_lock);
421 SHOW("gc_start_the_world:end");