1 #if !defined(_INCLUDE_THREAD_H_)
2 #define _INCLUDE_THREAD_H_
11 #ifdef LISP_FEATURE_GENCGC
12 #include "gencgc-alloc-region.h"
14 #include "genesis/symbol.h"
15 #include "genesis/static-symbols.h"
17 #include "genesis/thread.h"
18 #include "genesis/fdefn.h"
19 #include "interrupt.h"
20 #include "validate.h" /* for BINDING_STACK_SIZE etc */
22 #define STATE_RUNNING MAKE_FIXNUM(1)
23 #define STATE_STOPPED MAKE_FIXNUM(2)
24 #define STATE_DEAD MAKE_FIXNUM(3)
25 #if defined(LISP_FEATURE_SB_SAFEPOINT)
26 # define STATE_SUSPENDED_BRIEFLY MAKE_FIXNUM(4)
27 # define STATE_GC_BLOCKER MAKE_FIXNUM(5)
28 # define STATE_PHASE1_BLOCKER MAKE_FIXNUM(5)
29 # define STATE_PHASE2_BLOCKER MAKE_FIXNUM(6)
30 # define STATE_INTERRUPT_BLOCKER MAKE_FIXNUM(7)
33 #ifdef LISP_FEATURE_SB_THREAD
34 lispobj thread_state(struct thread *thread);
35 void set_thread_state(struct thread *thread, lispobj state);
36 void wait_for_thread_state_change(struct thread *thread, lispobj state);
38 #if defined(LISP_FEATURE_SB_SAFEPOINT)
39 enum threads_suspend_reason {
42 SUSPEND_REASON_INTERRUPT,
46 struct threads_suspend_info {
48 pthread_mutex_t world_lock;
50 enum threads_suspend_reason reason;
52 struct thread * gc_thread;
53 struct thread * interrupted_thread;
58 struct suspend_phase {
60 enum threads_suspend_reason reason;
62 struct suspend_phase *next;
65 extern struct threads_suspend_info suspend_info;
68 lispobj csp_around_foreign_call;
69 lispobj* pc_around_foreign_call;
72 int handle_safepoint_violation(os_context_t *context, os_vm_address_t addr);
73 void** os_get_csp(struct thread* th);
75 void assert_on_stack(struct thread *th, void *esp);
76 #endif /* defined(LISP_FEATURE_SB_SAFEPOINT) */
78 extern pthread_key_t lisp_thread;
81 extern int kill_safely(os_thread_t os_thread, int signal);
83 #define THREAD_SLOT_OFFSET_WORDS(c) \
84 (offsetof(struct thread,c)/(sizeof (struct thread *)))
86 union per_thread_data {
88 lispobj dynamic_values[1]; /* actually more like 4000 or so */
91 /* A helper structure for data local to a thread, which is not pointer-sized.
93 * Originally, all layouting of these fields was done manually in C code
94 * with pointer arithmetic. We let the C compiler figure it out now.
96 * (Why is this not part of `struct thread'? Because that structure is
97 * declared using genesis, and we would run into issues with fields that
98 * are of unknown length.)
100 struct nonpointer_thread_data
102 #ifdef LISP_FEATURE_SB_THREAD
104 os_sem_t state_not_running_sem;
105 os_sem_t state_not_stopped_sem;
106 # ifdef LISP_FEATURE_SB_SAFEPOINT
107 /* For safepoint-based builds, together with thread's
108 * csp_around_foreign_call pointer target, thread_qrl(thread) makes
109 * `quickly revokable lock'. Unlike most mutexes, this one is
110 * normally locked; by convention, other thread may read and use the
111 * thread's FFI-CSP location _either_ when the former holds the
112 * lock(mutex) _or_ when page permissions for FFI-CSP location were
115 * Combined semantic of QRL is not the same as the semantic of mutex
116 * returned by this function; rather, the mutex, when released by the
117 * owning thread, provides an edge-triggered notification of QRL
118 * release, which is represented by writing non-null
119 * csp_around_foreign_call.
121 * When owner thread is `in Lisp' (i.e. a heap mutator), its FFI-CSP
122 * contains null, otherwise it points to the top of C stack that
123 * should be preserved by GENCGC. If another thread needs to wait for
124 * mutator state change with `in Lisp => in C' direction, it disables
125 * FFI-CSP overwrite using page protection, and takes the mutex
126 * returned by thread_qrl(). Page fault handler normally ends up in a
127 * routine releasing this mutex and waiting for some appropriate
128 * event to take it back.
130 * This way, each thread may modify its own FFI-CSP content freely
131 * without memory barriers (paying with exception handling overhead
132 * whenever a contention happens). */
133 pthread_mutex_t qrl_lock;
136 /* An unused field follows, to ensure that the struct in non-empty
137 * for non-GCC compilers. */
142 extern struct thread *all_threads;
143 extern int dynamic_values_bytes;
145 #if defined(LISP_FEATURE_DARWIN)
146 #define CONTROL_STACK_ALIGNMENT_BYTES 8192 /* darwin wants page-aligned stacks */
147 #define THREAD_ALIGNMENT_BYTES CONTROL_STACK_ALIGNMENT_BYTES
149 #define THREAD_ALIGNMENT_BYTES BACKEND_PAGE_BYTES
150 #define CONTROL_STACK_ALIGNMENT_BYTES 16
154 #ifdef LISP_FEATURE_SB_THREAD
155 #define for_each_thread(th) for(th=all_threads;th;th=th->next)
157 /* there's some possibility a SSC could notice this never actually
159 #define for_each_thread(th) for(th=all_threads;th;th=0)
162 static inline lispobj *
163 SymbolValueAddress(u64 tagged_symbol_pointer, void *thread)
165 struct symbol *sym= (struct symbol *)
166 (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
167 #ifdef LISP_FEATURE_SB_THREAD
168 if(thread && sym->tls_index) {
169 lispobj *r = &(((union per_thread_data *)thread)
170 ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
171 if((*r)!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
177 static inline lispobj
178 SymbolValue(u64 tagged_symbol_pointer, void *thread)
180 struct symbol *sym= (struct symbol *)
181 (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
182 #ifdef LISP_FEATURE_SB_THREAD
183 if(thread && sym->tls_index) {
185 ((union per_thread_data *)thread)
186 ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
187 if(r!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
193 static inline lispobj
194 SymbolTlValue(u64 tagged_symbol_pointer, void *thread)
196 struct symbol *sym= (struct symbol *)
197 (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
198 #ifdef LISP_FEATURE_SB_THREAD
199 return ((union per_thread_data *)thread)
200 ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
207 SetSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
209 struct symbol *sym= (struct symbol *)
210 (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
211 #ifdef LISP_FEATURE_SB_THREAD
212 if(thread && sym->tls_index) {
213 lispobj *pr= &(((union per_thread_data *)thread)
214 ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
215 if(*pr!=NO_TLS_VALUE_MARKER_WIDETAG) {
225 SetTlSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
227 #ifdef LISP_FEATURE_SB_THREAD
228 struct symbol *sym= (struct symbol *)
229 (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
230 ((union per_thread_data *)thread)
231 ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]
234 SetSymbolValue(tagged_symbol_pointer,val,thread) ;
238 /* This only works for static symbols. */
239 static inline lispobj
240 StaticSymbolFunction(lispobj sym)
242 return ((struct fdefn *)native_pointer(SymbolValue(sym, 0)))->fun;
245 /* These are for use during GC, on the current thread, or on prenatal
247 #if defined(LISP_FEATURE_SB_THREAD)
248 #define get_binding_stack_pointer(thread) \
249 ((thread)->binding_stack_pointer)
250 #define set_binding_stack_pointer(thread,value) \
251 ((thread)->binding_stack_pointer = (lispobj *)(value))
252 #define access_control_stack_pointer(thread) \
253 ((thread)->control_stack_pointer)
254 # if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
255 #define access_control_frame_pointer(thread) \
256 ((thread)->control_frame_pointer)
258 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
259 #define get_binding_stack_pointer(thread) \
260 SymbolValue(BINDING_STACK_POINTER, thread)
261 #define set_binding_stack_pointer(thread,value) \
262 SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread)
263 #define access_control_stack_pointer(thread) \
264 (current_control_stack_pointer)
266 #define get_binding_stack_pointer(thread) \
267 (current_binding_stack_pointer)
268 #define set_binding_stack_pointer(thread,value) \
269 (current_binding_stack_pointer = (lispobj *)(value))
270 #define access_control_stack_pointer(thread) \
271 (current_control_stack_pointer)
272 #define access_control_frame_pointer(thread) \
273 (current_control_frame_pointer)
276 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS)
277 extern __thread struct thread *current_thread;
280 #ifdef LISP_FEATURE_SB_SAFEPOINT
281 # define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
283 # define THREAD_CSP_PAGE_SIZE 0
286 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
288 sizeof(struct nonpointer_thread_data) + \
289 dynamic_values_bytes + \
291 THREAD_ALIGNMENT_BYTES + \
292 THREAD_CSP_PAGE_SIZE)
294 /* This is clearly per-arch and possibly even per-OS code, but we can't
295 * put it somewhere sensible like x86-linux-os.c because it needs too
296 * much stuff like struct thread and all_threads to be defined, which
297 * usually aren't by that time. So, it's here instead. Sorry */
299 static inline struct thread *arch_os_get_current_thread(void)
301 #if defined(LISP_FEATURE_SB_THREAD)
302 #if defined(LISP_FEATURE_X86)
303 register struct thread *me=0;
305 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
307 struct thread *th = pthread_getspecific(specials);
308 sel.index = th->tls_cookie;
311 __asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel));
312 #elif defined(LISP_FEATURE_FREEBSD)
313 #ifdef LISP_FEATURE_GCC_TLS
314 struct thread *th = current_thread;
316 struct thread *th = pthread_getspecific(specials);
318 #ifdef LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS
319 unsigned int sel = LSEL(th->tls_cookie, SEL_UPL);
320 unsigned int fs = rfs();
322 /* Load FS only if it's necessary. Modifying a selector
323 * causes privilege checking and it takes long time. */
329 __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
330 : "i" (offsetof (struct thread,this)));
334 #ifdef LISP_FEATURE_GCC_TLS
335 return current_thread;
337 return pthread_getspecific(specials);
345 #if defined(LISP_FEATURE_MACH_EXCEPTION_HANDLER)
346 extern kern_return_t mach_lisp_thread_init(struct thread *thread);
347 extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
350 #ifdef LISP_FEATURE_SB_SAFEPOINT
351 void thread_in_safety_transition(os_context_t *ctx);
352 void thread_in_lisp_raised(os_context_t *ctx);
353 void thread_interrupted(os_context_t *ctx);
354 void thread_pitstop(os_context_t *ctxptr);
355 extern void thread_register_gc_trigger();
357 # ifdef LISP_FEATURE_SB_THRUPTION
358 int wake_thread(os_thread_t os_thread);
359 int wake_thread_posix(os_thread_t os_thread);
362 #define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock)
365 void push_gcing_safety(struct gcing_safety *into)
367 struct thread* th = arch_os_get_current_thread();
369 if ((into->csp_around_foreign_call =
370 *th->csp_around_foreign_call)) {
371 *th->csp_around_foreign_call = 0;
373 into->pc_around_foreign_call = th->pc_around_foreign_call;
374 th->pc_around_foreign_call = 0;
377 into->pc_around_foreign_call = 0;
382 void pop_gcing_safety(struct gcing_safety *from)
384 struct thread* th = arch_os_get_current_thread();
385 if (from->csp_around_foreign_call) {
387 *th->csp_around_foreign_call = from->csp_around_foreign_call;
389 th->pc_around_foreign_call = from->pc_around_foreign_call;
394 /* Even with just -O1, gcc optimizes the jumps in this "loop" away
395 * entirely, giving the ability to define WITH-FOO-style macros. */
396 #define RUN_BODY_ONCE(prefix, finally_do) \
397 int prefix##done = 0; \
398 for (; !prefix##done; finally_do, prefix##done = 1)
400 #define WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(var) \
401 struct gcing_safety var; \
402 push_gcing_safety(&var); \
403 RUN_BODY_ONCE(var, pop_gcing_safety(&var))
405 #define WITH_GC_AT_SAFEPOINTS_ONLY() \
406 WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(sbcl__gc_safety)
408 #define WITH_STATE_SEM_hygenic(var, thread) \
409 os_sem_wait((thread)->state_sem, "thread_state"); \
410 RUN_BODY_ONCE(var, os_sem_post((thread)->state_sem, "thread_state"))
412 #define WITH_STATE_SEM(thread) \
413 WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
415 int check_pending_thruptions(os_context_t *ctx);
419 extern boolean is_some_thread_local_addr(os_vm_address_t addr);
420 extern void create_initial_thread(lispobj);
422 #ifdef LISP_FEATURE_SB_THREAD
423 extern pthread_mutex_t all_threads_lock;
426 #endif /* _INCLUDE_THREAD_H_ */