Preliminary work towards threads on win32
[sbcl.git] / src / runtime / thread.h
1 #if !defined(_INCLUDE_THREAD_H_)
2 #define _INCLUDE_THREAD_H_
3
4 #include <sys/types.h>
5 #include <unistd.h>
6 #include <stddef.h>
7 #include "sbcl.h"
8 #include "globals.h"
9 #include "runtime.h"
10 #include "os.h"
11 #ifdef LISP_FEATURE_GENCGC
12 #include "gencgc-alloc-region.h"
13 #endif
14 #ifdef LISP_FEATURE_WIN32
15 #include "win32-thread-private-events.h"
16 #endif
17 #include "genesis/symbol.h"
18 #include "genesis/static-symbols.h"
19
20 #include "genesis/thread.h"
21 #include "genesis/fdefn.h"
22 #include "interrupt.h"
23 #include "validate.h"           /* for BINDING_STACK_SIZE etc */
24
25 #define STATE_RUNNING MAKE_FIXNUM(1)
26 #define STATE_STOPPED MAKE_FIXNUM(2)
27 #define STATE_DEAD MAKE_FIXNUM(3)
28 #if defined(LISP_FEATURE_SB_SAFEPOINT)
29 # define STATE_SUSPENDED_BRIEFLY MAKE_FIXNUM(4)
30 # define STATE_GC_BLOCKER MAKE_FIXNUM(5)
31 # define STATE_PHASE1_BLOCKER MAKE_FIXNUM(5)
32 # define STATE_PHASE2_BLOCKER MAKE_FIXNUM(6)
33 # define STATE_INTERRUPT_BLOCKER MAKE_FIXNUM(7)
34 #endif
35
36 #ifdef LISP_FEATURE_SB_THREAD
37 lispobj thread_state(struct thread *thread);
38 void set_thread_state(struct thread *thread, lispobj state);
39 void wait_for_thread_state_change(struct thread *thread, lispobj state);
40
41 #if defined(LISP_FEATURE_SB_SAFEPOINT)
42 enum threads_suspend_reason {
43     SUSPEND_REASON_NONE,
44     SUSPEND_REASON_GC,
45     SUSPEND_REASON_INTERRUPT,
46     SUSPEND_REASON_GCING
47 };
48
49 struct threads_suspend_info {
50     int suspend;
51     pthread_mutex_t world_lock;
52     pthread_mutex_t lock;
53     enum threads_suspend_reason reason;
54     int phase;
55     struct thread * gc_thread;
56     struct thread * interrupted_thread;
57     int blockers;
58     int used_gc_page;
59 };
60
61 struct suspend_phase {
62     int suspend;
63     enum threads_suspend_reason reason;
64     int phase;
65     struct suspend_phase *next;
66 };
67
68 extern struct threads_suspend_info suspend_info;
69
70 struct gcing_safety {
71     lispobj csp_around_foreign_call;
72     lispobj* pc_around_foreign_call;
73 };
74
75 int handle_safepoint_violation(os_context_t *context, os_vm_address_t addr);
76 void** os_get_csp(struct thread* th);
77 void alloc_gc_page();
78 void assert_on_stack(struct thread *th, void *esp);
79 #endif /* defined(LISP_FEATURE_SB_SAFEPOINT) */
80
81 extern pthread_key_t lisp_thread;
82 #endif
83
84 extern int kill_safely(os_thread_t os_thread, int signal);
85
86 #define THREAD_SLOT_OFFSET_WORDS(c) \
87  (offsetof(struct thread,c)/(sizeof (struct thread *)))
88
89 union per_thread_data {
90     struct thread thread;
91     lispobj dynamic_values[1];  /* actually more like 4000 or so */
92 };
93
94 /* A helper structure for data local to a thread, which is not pointer-sized.
95  *
96  * Originally, all layouting of these fields was done manually in C code
97  * with pointer arithmetic.  We let the C compiler figure it out now.
98  *
99  * (Why is this not part of `struct thread'?  Because that structure is
100  * declared using genesis, and we would run into issues with fields that
101  * are of unknown length.)
102  */
103 struct nonpointer_thread_data
104 {
105 #ifdef LISP_FEATURE_SB_THREAD
106     os_sem_t state_sem;
107     os_sem_t state_not_running_sem;
108     os_sem_t state_not_stopped_sem;
109 # ifdef LISP_FEATURE_SB_SAFEPOINT
110    /* For safepoint-based builds, together with thread's
111     * csp_around_foreign_call pointer target, thread_qrl(thread) makes
112     * `quickly revokable lock'. Unlike most mutexes, this one is
113     * normally locked; by convention, other thread may read and use the
114     * thread's FFI-CSP location _either_ when the former holds the
115     * lock(mutex) _or_ when page permissions for FFI-CSP location were
116     * set to read-only.
117     *
118     * Combined semantic of QRL is not the same as the semantic of mutex
119     * returned by this function; rather, the mutex, when released by the
120     * owning thread, provides an edge-triggered notification of QRL
121     * release, which is represented by writing non-null
122     * csp_around_foreign_call.
123     *
124     * When owner thread is `in Lisp' (i.e. a heap mutator), its FFI-CSP
125     * contains null, otherwise it points to the top of C stack that
126     * should be preserved by GENCGC. If another thread needs to wait for
127     * mutator state change with `in Lisp => in C' direction, it disables
128     * FFI-CSP overwrite using page protection, and takes the mutex
129     * returned by thread_qrl(). Page fault handler normally ends up in a
130     * routine releasing this mutex and waiting for some appropriate
131     * event to take it back.
132     *
133     * This way, each thread may modify its own FFI-CSP content freely
134     * without memory barriers (paying with exception handling overhead
135     * whenever a contention happens). */
136     pthread_mutex_t qrl_lock;
137 # endif
138 #else
139     /* An unused field follows, to ensure that the struct in non-empty
140      * for non-GCC compilers. */
141     int unused;
142 #endif
143 };
144
145 extern struct thread *all_threads;
146 extern int dynamic_values_bytes;
147
148 #if defined(LISP_FEATURE_DARWIN)
149 #define CONTROL_STACK_ALIGNMENT_BYTES 8192 /* darwin wants page-aligned stacks */
150 #define THREAD_ALIGNMENT_BYTES CONTROL_STACK_ALIGNMENT_BYTES
151 #else
152 #define THREAD_ALIGNMENT_BYTES BACKEND_PAGE_BYTES
153 #define CONTROL_STACK_ALIGNMENT_BYTES 16
154 #endif
155
156
157 #ifdef LISP_FEATURE_SB_THREAD
158 #define for_each_thread(th) for(th=all_threads;th;th=th->next)
159 #else
160 /* there's some possibility a SSC could notice this never actually
161  * loops  */
162 #define for_each_thread(th) for(th=all_threads;th;th=0)
163 #endif
164
165 static inline lispobj *
166 SymbolValueAddress(u64 tagged_symbol_pointer, void *thread)
167 {
168     struct symbol *sym= (struct symbol *)
169         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
170 #ifdef LISP_FEATURE_SB_THREAD
171     if(thread && sym->tls_index) {
172         lispobj *r = &(((union per_thread_data *)thread)
173                        ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
174         if((*r)!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
175     }
176 #endif
177     return &sym->value;
178 }
179
180 static inline lispobj
181 SymbolValue(u64 tagged_symbol_pointer, void *thread)
182 {
183     struct symbol *sym= (struct symbol *)
184         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
185 #ifdef LISP_FEATURE_SB_THREAD
186     if(thread && sym->tls_index) {
187         lispobj r=
188             ((union per_thread_data *)thread)
189             ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
190         if(r!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
191     }
192 #endif
193     return sym->value;
194 }
195
196 static inline lispobj
197 SymbolTlValue(u64 tagged_symbol_pointer, void *thread)
198 {
199     struct symbol *sym= (struct symbol *)
200         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
201 #ifdef LISP_FEATURE_SB_THREAD
202     return ((union per_thread_data *)thread)
203         ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
204 #else
205     return sym->value;
206 #endif
207 }
208
209 static inline void
210 SetSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
211 {
212     struct symbol *sym= (struct symbol *)
213         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
214 #ifdef LISP_FEATURE_SB_THREAD
215     if(thread && sym->tls_index) {
216         lispobj *pr= &(((union per_thread_data *)thread)
217                        ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
218         if(*pr!=NO_TLS_VALUE_MARKER_WIDETAG) {
219             *pr=val;
220             return;
221         }
222     }
223 #endif
224     sym->value = val;
225 }
226
227 static inline void
228 SetTlSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
229 {
230 #ifdef LISP_FEATURE_SB_THREAD
231     struct symbol *sym= (struct symbol *)
232         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
233     ((union per_thread_data *)thread)
234         ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]
235         =val;
236 #else
237     SetSymbolValue(tagged_symbol_pointer,val,thread) ;
238 #endif
239 }
240
241 /* This only works for static symbols. */
242 static inline lispobj
243 StaticSymbolFunction(lispobj sym)
244 {
245     return ((struct fdefn *)native_pointer(SymbolValue(sym, 0)))->fun;
246 }
247
248 /* These are for use during GC, on the current thread, or on prenatal
249  * threads only. */
250 #if defined(LISP_FEATURE_SB_THREAD)
251 #define get_binding_stack_pointer(thread)       \
252     ((thread)->binding_stack_pointer)
253 #define set_binding_stack_pointer(thread,value) \
254     ((thread)->binding_stack_pointer = (lispobj *)(value))
255 #define access_control_stack_pointer(thread) \
256     ((thread)->control_stack_pointer)
257 #  if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
258 #define access_control_frame_pointer(thread) \
259     ((thread)->control_frame_pointer)
260 #  endif
261 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
262 #define get_binding_stack_pointer(thread)       \
263     SymbolValue(BINDING_STACK_POINTER, thread)
264 #define set_binding_stack_pointer(thread,value) \
265     SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread)
266 #define access_control_stack_pointer(thread)    \
267     (current_control_stack_pointer)
268 #else
269 #define get_binding_stack_pointer(thread)       \
270     (current_binding_stack_pointer)
271 #define set_binding_stack_pointer(thread,value) \
272     (current_binding_stack_pointer = (lispobj *)(value))
273 #define access_control_stack_pointer(thread) \
274     (current_control_stack_pointer)
275 #define access_control_frame_pointer(thread) \
276     (current_control_frame_pointer)
277 #endif
278
279 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS)
280 extern __thread struct thread *current_thread;
281 #endif
282
283 #ifdef LISP_FEATURE_SB_SAFEPOINT
284 # define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
285 #else
286 # define THREAD_CSP_PAGE_SIZE 0
287 #endif
288
289 #ifdef LISP_FEATURE_WIN32
290 /*
291  * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
292  * so define it arbitrarily
293  */
294 #define SIGSTKSZ 1024
295 #endif
296
297 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
298                             ALIEN_STACK_SIZE +                          \
299                             sizeof(struct nonpointer_thread_data) +     \
300                             dynamic_values_bytes +                      \
301                             32 * SIGSTKSZ +                             \
302                             THREAD_ALIGNMENT_BYTES +                    \
303                             THREAD_CSP_PAGE_SIZE)
304
305 #if defined(LISP_FEATURE_WIN32)
306 static inline struct thread* arch_os_get_current_thread()
307     __attribute__((__const__));
308 #endif
309
310 /* This is clearly per-arch and possibly even per-OS code, but we can't
311  * put it somewhere sensible like x86-linux-os.c because it needs too
312  * much stuff like struct thread and all_threads to be defined, which
313  * usually aren't by that time.  So, it's here instead.  Sorry */
314
315 static inline struct thread *arch_os_get_current_thread(void)
316 {
317 #if defined(LISP_FEATURE_SB_THREAD)
318 #if defined(LISP_FEATURE_X86)
319     register struct thread *me=0;
320 #if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
321     __asm__ ("movl %%fs:0xE10+(4*63), %0" : "=r"(me) :);
322     return me;
323 #endif
324     if(all_threads) {
325 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
326         sel_t sel;
327         struct thread *th = pthread_getspecific(specials);
328         sel.index = th->tls_cookie;
329         sel.rpl = USER_PRIV;
330         sel.ti = SEL_LDT;
331         __asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel));
332 #elif defined(LISP_FEATURE_FREEBSD)
333 #ifdef LISP_FEATURE_GCC_TLS
334         struct thread *th = current_thread;
335 #else
336         struct thread *th = pthread_getspecific(specials);
337 #endif
338 #ifdef LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS
339         unsigned int sel = LSEL(th->tls_cookie, SEL_UPL);
340         unsigned int fs = rfs();
341
342         /* Load FS only if it's necessary.  Modifying a selector
343          * causes privilege checking and it takes long time. */
344         if (fs != sel)
345             load_fs(sel);
346 #endif
347         return th;
348 #endif
349         __asm__ ("movl %%fs:%c1,%0" : "=r" (me)
350                  : "i" (offsetof (struct thread,this)));
351     }
352     return me;
353 #else
354 #ifdef LISP_FEATURE_GCC_TLS
355     return current_thread;
356 #else
357     return pthread_getspecific(specials);
358 #endif
359 #endif /* x86 */
360 #else
361      return all_threads;
362 #endif
363 }
364
365 #if defined(LISP_FEATURE_MACH_EXCEPTION_HANDLER)
366 extern kern_return_t mach_lisp_thread_init(struct thread *thread);
367 extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
368 #endif
369
370 #ifdef LISP_FEATURE_SB_SAFEPOINT
371 void thread_in_safety_transition(os_context_t *ctx);
372 void thread_in_lisp_raised(os_context_t *ctx);
373 void thread_interrupted(os_context_t *ctx);
374 void thread_pitstop(os_context_t *ctxptr);
375 extern void thread_register_gc_trigger();
376
377 # ifdef LISP_FEATURE_SB_THRUPTION
378 int wake_thread(os_thread_t os_thread);
379 #  ifdef LISP_FEATURE_WIN32
380 void wake_thread_win32(struct thread *thread);
381 #  else
382 int wake_thread_posix(os_thread_t os_thread);
383 #  endif
384 # endif
385
386 #define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock)
387
388 static inline
389 void push_gcing_safety(struct gcing_safety *into)
390 {
391     struct thread* th = arch_os_get_current_thread();
392     asm volatile ("");
393     if ((into->csp_around_foreign_call =
394          *th->csp_around_foreign_call)) {
395         *th->csp_around_foreign_call = 0;
396         asm volatile ("");
397         into->pc_around_foreign_call = th->pc_around_foreign_call;
398         th->pc_around_foreign_call = 0;
399         asm volatile ("");
400     } else {
401         into->pc_around_foreign_call = 0;
402     }
403 }
404
405 static inline
406 void pop_gcing_safety(struct gcing_safety *from)
407 {
408     struct thread* th = arch_os_get_current_thread();
409     if (from->csp_around_foreign_call) {
410         asm volatile ("");
411         *th->csp_around_foreign_call = from->csp_around_foreign_call;
412         asm volatile ("");
413         th->pc_around_foreign_call = from->pc_around_foreign_call;
414         asm volatile ("");
415     }
416 }
417
418 /* Even with just -O1, gcc optimizes the jumps in this "loop" away
419  * entirely, giving the ability to define WITH-FOO-style macros. */
420 #define RUN_BODY_ONCE(prefix, finally_do)               \
421     int prefix##done = 0;                               \
422     for (; !prefix##done; finally_do, prefix##done = 1)
423
424 #define WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(var)        \
425     struct gcing_safety var;                    \
426     push_gcing_safety(&var);                    \
427     RUN_BODY_ONCE(var, pop_gcing_safety(&var))
428
429 #define WITH_GC_AT_SAFEPOINTS_ONLY()                           \
430     WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(sbcl__gc_safety)
431
432 #define WITH_STATE_SEM_hygenic(var, thread)                             \
433     os_sem_wait((thread)->state_sem, "thread_state");                   \
434     RUN_BODY_ONCE(var, os_sem_post((thread)->state_sem, "thread_state"))
435
436 #define WITH_STATE_SEM(thread)                                     \
437     WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
438
439 int check_pending_thruptions(os_context_t *ctx);
440
441 #endif
442
443 extern void create_initial_thread(lispobj);
444
445 #ifdef LISP_FEATURE_SB_THREAD
446 extern pthread_mutex_t all_threads_lock;
447 #endif
448
449 #endif /* _INCLUDE_THREAD_H_ */