13 #include "validate.h" /* for CONTROL_STACK_SIZE etc */
16 #include "target-arch-os.h"
20 #include "genesis/cons.h"
21 #include "genesis/fdefn.h"
22 #define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
24 int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
25 struct thread *all_threads;
26 volatile lispobj all_threads_lock;
27 extern struct interrupt_data * global_interrupt_data;
29 void get_spinlock(volatile lispobj *word,int value);
32 initial_thread_trampoline(struct thread *th)
36 function = th->unbound_marker;
37 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
38 if(arch_os_thread_init(th)==0) return 1;
40 if(th->pid < 1) lose("th->pid not set up right");
41 th->state=STATE_RUNNING;
42 #if defined(LISP_FEATURE_X86)
43 return call_into_lisp_first_time(function,args,0);
45 return funcall0(function);
49 /* this is the first thing that clone() runs in the child (which is
50 * why the silly calling convention). Basically it calls the user's
51 * requested lisp function after doing arch_os_thread_init and
52 * whatever other bookkeeping needs to be done
55 #ifdef LISP_FEATURE_SB_THREAD
57 new_thread_trampoline(struct thread *th)
60 function = th->unbound_marker;
61 th->unbound_marker = UNBOUND_MARKER_WIDETAG;
62 if(arch_os_thread_init(th)==0) return 1;
64 /* wait here until our thread is linked into all_threads: see below */
65 while(th->pid<1) sched_yield();
67 th->state=STATE_RUNNING;
68 return funcall0(function);
70 #endif /* LISP_FEATURE_SB_THREAD */
72 /* this is called from any other thread to create the new one, and
73 * initialize all parts of it that can be initialized from another
77 struct thread * create_thread_struct(lispobj initial_function) {
78 union per_thread_data *per_thread;
79 struct thread *th=0; /* subdue gcc */
82 /* may as well allocate all the spaces at once: it saves us from
83 * having to decide what to do if only some of the allocations
86 THREAD_CONTROL_STACK_SIZE+
92 if(!spaces) goto cleanup;
93 per_thread=(union per_thread_data *)
95 THREAD_CONTROL_STACK_SIZE+
99 th=&per_thread->thread;
101 memcpy(per_thread,arch_os_get_current_thread(),
102 dynamic_values_bytes);
104 #ifdef LISP_FEATURE_SB_THREAD
106 for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
107 per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
108 if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
111 make_fixnum(MAX_INTERRUPTS+
112 sizeof(struct thread)/sizeof(lispobj)),
114 #define STATIC_TLS_INIT(sym,field) \
115 ((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
116 make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
118 STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
119 STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
120 STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
121 STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
122 STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
123 #ifdef LISP_FEATURE_X86
124 STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
125 STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
127 #undef STATIC_TLS_INIT
131 th->control_stack_start = spaces;
132 th->binding_stack_start=
133 (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
134 th->control_stack_end = th->binding_stack_start;
135 th->alien_stack_start=
136 (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
137 th->binding_stack_pointer=th->binding_stack_start;
140 th->state=STATE_STOPPED;
141 #ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
142 th->alien_stack_pointer=((void *)th->alien_stack_start
143 + ALIEN_STACK_SIZE-4); /* naked 4. FIXME */
145 th->alien_stack_pointer=((void *)th->alien_stack_start);
147 #ifdef LISP_FEATURE_X86
148 th->pseudo_atomic_interrupted=0;
149 th->pseudo_atomic_atomic=0;
151 #ifdef LISP_FEATURE_GENCGC
152 gc_set_region_empty(&th->alloc_region);
155 #ifndef LISP_FEATURE_SB_THREAD
156 /* the tls-points-into-struct-thread trick is only good for threaded
157 * sbcl, because unithread sbcl doesn't have tls. So, we copy the
158 * appropriate values from struct thread here, and make sure that
159 * we use the appropriate SymbolValue macros to access any of the
160 * variable quantities from the C runtime. It's not quite OAOOM,
161 * it just feels like it */
162 SetSymbolValue(BINDING_STACK_START,th->binding_stack_start,th);
163 SetSymbolValue(CONTROL_STACK_START,th->control_stack_start,th);
164 SetSymbolValue(CONTROL_STACK_END,th->control_stack_end,th);
165 #ifdef LISP_FEATURE_X86
166 SetSymbolValue(BINDING_STACK_POINTER,th->binding_stack_pointer,th);
167 SetSymbolValue(ALIEN_STACK,th->alien_stack_pointer,th);
168 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC,th->pseudo_atomic_atomic,th);
169 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th->pseudo_atomic_interrupted,th);
171 current_binding_stack_pointer=th->binding_stack_pointer;
172 current_control_stack_pointer=th->control_stack_start;
175 bind_variable(CURRENT_CATCH_BLOCK,make_fixnum(0),th);
176 bind_variable(CURRENT_UNWIND_PROTECT_BLOCK,make_fixnum(0),th);
177 bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,make_fixnum(0),th);
178 bind_variable(INTERRUPT_PENDING, NIL,th);
179 bind_variable(INTERRUPTS_ENABLED,T,th);
181 th->interrupt_data=os_validate(0,(sizeof (struct interrupt_data)));
183 memcpy(th->interrupt_data,
184 arch_os_get_current_thread()->interrupt_data,
185 sizeof (struct interrupt_data));
187 memcpy(th->interrupt_data,global_interrupt_data,
188 sizeof (struct interrupt_data));
190 th->unbound_marker=initial_function;
193 /* if(th && th->tls_cookie>=0) os_free_tls_pointer(th); */
194 if(spaces) os_invalidate(spaces,
195 THREAD_CONTROL_STACK_SIZE+BINDING_STACK_SIZE+
196 ALIEN_STACK_SIZE+dynamic_values_bytes);
200 void link_thread(struct thread *th,pid_t kid_pid)
202 sigset_t newset,oldset;
203 sigemptyset(&newset);
204 sigaddset_blockable(&newset);
205 sigprocmask(SIG_BLOCK, &newset, &oldset);
207 get_spinlock(&all_threads_lock,kid_pid);
208 th->next=all_threads;
210 /* note that th->pid is 0 at this time. We rely on all_threads_lock
211 * to ensure that we don't have >1 thread with pid=0 on the list at once
213 protect_control_stack_guard_page(th->pid,1);
214 release_spinlock(&all_threads_lock);
216 sigprocmask(SIG_SETMASK,&oldset,0);
217 th->pid=kid_pid; /* child will not start until this is set */
220 void create_initial_thread(lispobj initial_function) {
221 struct thread *th=create_thread_struct(initial_function);
222 pid_t kid_pid=getpid();
223 if(th && kid_pid>0) {
224 link_thread(th,kid_pid);
225 initial_thread_trampoline(all_threads); /* no return */
226 } else lose("can't create initial thread");
229 #ifdef LISP_FEATURE_SB_THREAD
230 pid_t create_thread(lispobj initial_function) {
231 struct thread *th=create_thread_struct(initial_function);
235 kid_pid=clone(new_thread_trampoline,
236 (((void*)th->control_stack_start)+
237 THREAD_CONTROL_STACK_SIZE-4),
238 CLONE_FILES|SIG_THREAD_EXIT|CLONE_VM,th);
241 link_thread(th,kid_pid);
244 os_invalidate((os_vm_address_t) th->control_stack_start,
246 * (th->control_stack_end-th->control_stack_start)) +
247 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
255 void destroy_thread (struct thread *th)
257 /* precondition: the unix task has already been killed and exited.
258 * This is called by the parent or some other thread */
259 #ifdef LISP_FEATURE_GENCGC
260 gc_alloc_update_page_tables(0, &th->alloc_region);
262 get_spinlock(&all_threads_lock,th->pid);
263 th->unbound_marker=0; /* for debugging */
265 all_threads=th->next;
267 struct thread *th1=all_threads;
268 while(th1 && th1->next!=th) th1=th1->next;
269 if(th1) th1->next=th->next; /* unlink */
271 release_spinlock(&all_threads_lock);
272 if(th && th->tls_cookie>=0) arch_os_thread_cleanup(th);
273 os_invalidate((os_vm_address_t) th->control_stack_start,
275 * (th->control_stack_end-th->control_stack_start)) +
276 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
280 void reap_dead_threads()
282 struct thread *th,*next,*prev=0;
286 if(th->state==STATE_DEAD) {
287 funcall1(SymbolFunction(HANDLE_THREAD_EXIT),make_fixnum(th->pid));
288 #ifdef LISP_FEATURE_GENCGC
289 gc_alloc_update_page_tables(0, &th->alloc_region);
291 get_spinlock(&all_threads_lock,th->pid);
292 if(prev) prev->next=next;
293 else all_threads=next;
294 release_spinlock(&all_threads_lock);
295 if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
296 os_invalidate((os_vm_address_t) th->control_stack_start,
298 * (th->control_stack_end-th->control_stack_start)) +
299 BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
308 struct thread *find_thread_by_pid(pid_t pid)
312 if(th->pid==pid) return th;
316 /* These are not needed unless #+SB-THREAD, and since sigwaitinfo()
317 * doesn't seem to be easily available everywhere (OpenBSD...) it's
318 * more trouble than it's worth to compile it when not needed. */
319 #if defined LISP_FEATURE_SB_THREAD
320 void block_sigcont(void)
322 /* don't allow ourselves to receive SIGCONT while we're in the
323 * "ambiguous" state of being on the queue but not actually stopped.
326 sigemptyset(&newset);
327 sigaddset(&newset,SIG_DEQUEUE);
328 sigprocmask(SIG_BLOCK, &newset, 0);
331 void unblock_sigcont_and_sleep(void)
335 sigaddset(&set,SIG_DEQUEUE);
339 }while(errno==EINTR);
340 sigprocmask(SIG_UNBLOCK,&set,0);
343 int interrupt_thread(pid_t pid, lispobj function)
346 sigval.sival_int=function;
348 return sigqueue(pid, SIG_INTERRUPT_THREAD, sigval);
351 int signal_thread_to_dequeue (pid_t pid)
353 return kill (pid, SIG_DEQUEUE);
357 /* stopping the world is a two-stage process. From this thread we signal
358 * all the others with SIG_STOP_FOR_GC. The handler for this signal does
359 * the usual pseudo-atomic checks (we don't want to stop a thread while
360 * it's in the middle of allocation) then kills _itself_ with SIGSTOP.
363 void gc_stop_the_world()
365 /* stop all other threads by sending them SIG_STOP_FOR_GC */
366 struct thread *p,*th=arch_os_get_current_thread();
371 for(p=all_threads,old_pid=p->pid; p; p=p->next) {
373 if(p->state==STATE_RUNNING) {
374 p->state=STATE_STOPPING;
375 kill(p->pid,SIG_STOP_FOR_GC);
377 if((p->state!=STATE_STOPPED) &&
378 (p->state!=STATE_DEAD)) {
382 if(old_pid!=all_threads->pid) {
388 void gc_start_the_world()
390 struct thread *p,*th=arch_os_get_current_thread();
391 /* if a resumed thread creates a new thread before we're done with
392 * this loop, the new thread will get consed on the front of *
393 * all_threads_lock, but it won't have been stopped so won't need
395 for(p=all_threads;p;p=p->next) {
396 if((p==th) || (p->state==STATE_DEAD)) continue;
397 p->state=STATE_RUNNING;
398 kill(p->pid,SIG_STOP_FOR_GC);