Use safepoints for INTERRUPT-THREAD
[sbcl.git] / src / runtime / thread.h
1 #if !defined(_INCLUDE_THREAD_H_)
2 #define _INCLUDE_THREAD_H_
3
4 #include <sys/types.h>
5 #include <unistd.h>
6 #include <stddef.h>
7 #include "sbcl.h"
8 #include "globals.h"
9 #include "runtime.h"
10 #include "os.h"
11 #ifdef LISP_FEATURE_GENCGC
12 #include "gencgc-alloc-region.h"
13 #endif
14 #include "genesis/symbol.h"
15 #include "genesis/static-symbols.h"
16
17 #include "genesis/thread.h"
18 #include "genesis/fdefn.h"
19 #include "interrupt.h"
20 #include "validate.h"           /* for BINDING_STACK_SIZE etc */
21
22 #define STATE_RUNNING MAKE_FIXNUM(1)
23 #define STATE_STOPPED MAKE_FIXNUM(2)
24 #define STATE_DEAD MAKE_FIXNUM(3)
25 #if defined(LISP_FEATURE_SB_SAFEPOINT)
26 # define STATE_SUSPENDED_BRIEFLY MAKE_FIXNUM(4)
27 # define STATE_GC_BLOCKER MAKE_FIXNUM(5)
28 # define STATE_PHASE1_BLOCKER MAKE_FIXNUM(5)
29 # define STATE_PHASE2_BLOCKER MAKE_FIXNUM(6)
30 # define STATE_INTERRUPT_BLOCKER MAKE_FIXNUM(7)
31 #endif
32
33 #ifdef LISP_FEATURE_SB_THREAD
34 lispobj thread_state(struct thread *thread);
35 void set_thread_state(struct thread *thread, lispobj state);
36 void wait_for_thread_state_change(struct thread *thread, lispobj state);
37
38 #if defined(LISP_FEATURE_SB_SAFEPOINT)
39 enum threads_suspend_reason {
40     SUSPEND_REASON_NONE,
41     SUSPEND_REASON_GC,
42     SUSPEND_REASON_INTERRUPT,
43     SUSPEND_REASON_GCING
44 };
45
46 struct threads_suspend_info {
47     int suspend;
48     pthread_mutex_t world_lock;
49     pthread_mutex_t lock;
50     enum threads_suspend_reason reason;
51     int phase;
52     struct thread * gc_thread;
53     struct thread * interrupted_thread;
54     int blockers;
55     int used_gc_page;
56 };
57
58 struct suspend_phase {
59     int suspend;
60     enum threads_suspend_reason reason;
61     int phase;
62     struct suspend_phase *next;
63 };
64
65 extern struct threads_suspend_info suspend_info;
66
67 struct gcing_safety {
68     lispobj csp_around_foreign_call;
69     lispobj* pc_around_foreign_call;
70 };
71
72 int handle_safepoint_violation(os_context_t *context, os_vm_address_t addr);
73 void** os_get_csp(struct thread* th);
74 void alloc_gc_page();
75 void assert_on_stack(struct thread *th, void *esp);
76 #endif /* defined(LISP_FEATURE_SB_SAFEPOINT) */
77
78 extern pthread_key_t lisp_thread;
79 #endif
80
81 extern int kill_safely(os_thread_t os_thread, int signal);
82
83 #define THREAD_SLOT_OFFSET_WORDS(c) \
84  (offsetof(struct thread,c)/(sizeof (struct thread *)))
85
86 union per_thread_data {
87     struct thread thread;
88     lispobj dynamic_values[1];  /* actually more like 4000 or so */
89 };
90
91 /* A helper structure for data local to a thread, which is not pointer-sized.
92  *
93  * Originally, all layouting of these fields was done manually in C code
94  * with pointer arithmetic.  We let the C compiler figure it out now.
95  *
96  * (Why is this not part of `struct thread'?  Because that structure is
97  * declared using genesis, and we would run into issues with fields that
98  * are of unknown length.)
99  */
100 struct nonpointer_thread_data
101 {
102 #ifdef LISP_FEATURE_SB_THREAD
103     os_sem_t state_sem;
104     os_sem_t state_not_running_sem;
105     os_sem_t state_not_stopped_sem;
106 # ifdef LISP_FEATURE_SB_SAFEPOINT
107    /* For safepoint-based builds, together with thread's
108     * csp_around_foreign_call pointer target, thread_qrl(thread) makes
109     * `quickly revokable lock'. Unlike most mutexes, this one is
110     * normally locked; by convention, other thread may read and use the
111     * thread's FFI-CSP location _either_ when the former holds the
112     * lock(mutex) _or_ when page permissions for FFI-CSP location were
113     * set to read-only.
114     *
115     * Combined semantic of QRL is not the same as the semantic of mutex
116     * returned by this function; rather, the mutex, when released by the
117     * owning thread, provides an edge-triggered notification of QRL
118     * release, which is represented by writing non-null
119     * csp_around_foreign_call.
120     *
121     * When owner thread is `in Lisp' (i.e. a heap mutator), its FFI-CSP
122     * contains null, otherwise it points to the top of C stack that
123     * should be preserved by GENCGC. If another thread needs to wait for
124     * mutator state change with `in Lisp => in C' direction, it disables
125     * FFI-CSP overwrite using page protection, and takes the mutex
126     * returned by thread_qrl(). Page fault handler normally ends up in a
127     * routine releasing this mutex and waiting for some appropriate
128     * event to take it back.
129     *
130     * This way, each thread may modify its own FFI-CSP content freely
131     * without memory barriers (paying with exception handling overhead
132     * whenever a contention happens). */
133     pthread_mutex_t qrl_lock;
134 # endif
135 #else
136     /* An unused field follows, to ensure that the struct in non-empty
137      * for non-GCC compilers. */
138     int unused;
139 #endif
140 };
141
142 extern struct thread *all_threads;
143 extern int dynamic_values_bytes;
144
145 #if defined(LISP_FEATURE_DARWIN)
146 #define CONTROL_STACK_ALIGNMENT_BYTES 8192 /* darwin wants page-aligned stacks */
147 #define THREAD_ALIGNMENT_BYTES CONTROL_STACK_ALIGNMENT_BYTES
148 #else
149 #define THREAD_ALIGNMENT_BYTES BACKEND_PAGE_BYTES
150 #define CONTROL_STACK_ALIGNMENT_BYTES 16
151 #endif
152
153
154 #ifdef LISP_FEATURE_SB_THREAD
155 #define for_each_thread(th) for(th=all_threads;th;th=th->next)
156 #else
157 /* there's some possibility a SSC could notice this never actually
158  * loops  */
159 #define for_each_thread(th) for(th=all_threads;th;th=0)
160 #endif
161
162 static inline lispobj *
163 SymbolValueAddress(u64 tagged_symbol_pointer, void *thread)
164 {
165     struct symbol *sym= (struct symbol *)
166         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
167 #ifdef LISP_FEATURE_SB_THREAD
168     if(thread && sym->tls_index) {
169         lispobj *r = &(((union per_thread_data *)thread)
170                        ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
171         if((*r)!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
172     }
173 #endif
174     return &sym->value;
175 }
176
177 static inline lispobj
178 SymbolValue(u64 tagged_symbol_pointer, void *thread)
179 {
180     struct symbol *sym= (struct symbol *)
181         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
182 #ifdef LISP_FEATURE_SB_THREAD
183     if(thread && sym->tls_index) {
184         lispobj r=
185             ((union per_thread_data *)thread)
186             ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
187         if(r!=NO_TLS_VALUE_MARKER_WIDETAG) return r;
188     }
189 #endif
190     return sym->value;
191 }
192
193 static inline lispobj
194 SymbolTlValue(u64 tagged_symbol_pointer, void *thread)
195 {
196     struct symbol *sym= (struct symbol *)
197         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
198 #ifdef LISP_FEATURE_SB_THREAD
199     return ((union per_thread_data *)thread)
200         ->dynamic_values[(sym->tls_index) >> WORD_SHIFT];
201 #else
202     return sym->value;
203 #endif
204 }
205
206 static inline void
207 SetSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
208 {
209     struct symbol *sym= (struct symbol *)
210         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
211 #ifdef LISP_FEATURE_SB_THREAD
212     if(thread && sym->tls_index) {
213         lispobj *pr= &(((union per_thread_data *)thread)
214                        ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]);
215         if(*pr!=NO_TLS_VALUE_MARKER_WIDETAG) {
216             *pr=val;
217             return;
218         }
219     }
220 #endif
221     sym->value = val;
222 }
223
224 static inline void
225 SetTlSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread)
226 {
227 #ifdef LISP_FEATURE_SB_THREAD
228     struct symbol *sym= (struct symbol *)
229         (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
230     ((union per_thread_data *)thread)
231         ->dynamic_values[(sym->tls_index) >> WORD_SHIFT]
232         =val;
233 #else
234     SetSymbolValue(tagged_symbol_pointer,val,thread) ;
235 #endif
236 }
237
238 /* This only works for static symbols. */
239 static inline lispobj
240 StaticSymbolFunction(lispobj sym)
241 {
242     return ((struct fdefn *)native_pointer(SymbolValue(sym, 0)))->fun;
243 }
244
245 /* These are for use during GC, on the current thread, or on prenatal
246  * threads only. */
247 #if defined(LISP_FEATURE_SB_THREAD)
248 #define get_binding_stack_pointer(thread)       \
249     ((thread)->binding_stack_pointer)
250 #define set_binding_stack_pointer(thread,value) \
251     ((thread)->binding_stack_pointer = (lispobj *)(value))
252 #define access_control_stack_pointer(thread) \
253     ((thread)->control_stack_pointer)
254 #  if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
255 #define access_control_frame_pointer(thread) \
256     ((thread)->control_frame_pointer)
257 #  endif
258 #elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
259 #define get_binding_stack_pointer(thread)       \
260     SymbolValue(BINDING_STACK_POINTER, thread)
261 #define set_binding_stack_pointer(thread,value) \
262     SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread)
263 #define access_control_stack_pointer(thread)    \
264     (current_control_stack_pointer)
265 #else
266 #define get_binding_stack_pointer(thread)       \
267     (current_binding_stack_pointer)
268 #define set_binding_stack_pointer(thread,value) \
269     (current_binding_stack_pointer = (lispobj *)(value))
270 #define access_control_stack_pointer(thread) \
271     (current_control_stack_pointer)
272 #define access_control_frame_pointer(thread) \
273     (current_control_frame_pointer)
274 #endif
275
276 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS)
277 extern __thread struct thread *current_thread;
278 #endif
279
280 #ifdef LISP_FEATURE_SB_SAFEPOINT
281 # define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
282 #else
283 # define THREAD_CSP_PAGE_SIZE 0
284 #endif
285
286 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
287                             ALIEN_STACK_SIZE +                          \
288                             sizeof(struct nonpointer_thread_data) +     \
289                             dynamic_values_bytes +                      \
290                             32 * SIGSTKSZ +                             \
291                             THREAD_ALIGNMENT_BYTES +                    \
292                             THREAD_CSP_PAGE_SIZE)
293
294 /* This is clearly per-arch and possibly even per-OS code, but we can't
295  * put it somewhere sensible like x86-linux-os.c because it needs too
296  * much stuff like struct thread and all_threads to be defined, which
297  * usually aren't by that time.  So, it's here instead.  Sorry */
298
299 static inline struct thread *arch_os_get_current_thread(void)
300 {
301 #if defined(LISP_FEATURE_SB_THREAD)
302 #if defined(LISP_FEATURE_X86)
303     register struct thread *me=0;
304     if(all_threads) {
305 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
306         sel_t sel;
307         struct thread *th = pthread_getspecific(specials);
308         sel.index = th->tls_cookie;
309         sel.rpl = USER_PRIV;
310         sel.ti = SEL_LDT;
311         __asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel));
312 #elif defined(LISP_FEATURE_FREEBSD)
313 #ifdef LISP_FEATURE_GCC_TLS
314         struct thread *th = current_thread;
315 #else
316         struct thread *th = pthread_getspecific(specials);
317 #endif
318 #ifdef LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS
319         unsigned int sel = LSEL(th->tls_cookie, SEL_UPL);
320         unsigned int fs = rfs();
321
322         /* Load FS only if it's necessary.  Modifying a selector
323          * causes privilege checking and it takes long time. */
324         if (fs != sel)
325             load_fs(sel);
326 #endif
327         return th;
328 #endif
329         __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
330                  : "i" (offsetof (struct thread,this)));
331     }
332     return me;
333 #else
334 #ifdef LISP_FEATURE_GCC_TLS
335     return current_thread;
336 #else
337     return pthread_getspecific(specials);
338 #endif
339 #endif /* x86 */
340 #else
341      return all_threads;
342 #endif
343 }
344
345 #if defined(LISP_FEATURE_MACH_EXCEPTION_HANDLER)
346 extern kern_return_t mach_lisp_thread_init(struct thread *thread);
347 extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
348 #endif
349
350 #ifdef LISP_FEATURE_SB_SAFEPOINT
351 void thread_in_safety_transition(os_context_t *ctx);
352 void thread_in_lisp_raised(os_context_t *ctx);
353 void thread_interrupted(os_context_t *ctx);
354 void thread_pitstop(os_context_t *ctxptr);
355 extern void thread_register_gc_trigger();
356
357 # ifdef LISP_FEATURE_SB_THRUPTION
358 int wake_thread(os_thread_t os_thread);
359 int wake_thread_posix(os_thread_t os_thread);
360 # endif
361
362 #define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock)
363
364 static inline
365 void push_gcing_safety(struct gcing_safety *into)
366 {
367     struct thread* th = arch_os_get_current_thread();
368     asm volatile ("");
369     if ((into->csp_around_foreign_call =
370          *th->csp_around_foreign_call)) {
371         *th->csp_around_foreign_call = 0;
372         asm volatile ("");
373         into->pc_around_foreign_call = th->pc_around_foreign_call;
374         th->pc_around_foreign_call = 0;
375         asm volatile ("");
376     } else {
377         into->pc_around_foreign_call = 0;
378     }
379 }
380
381 static inline
382 void pop_gcing_safety(struct gcing_safety *from)
383 {
384     struct thread* th = arch_os_get_current_thread();
385     if (from->csp_around_foreign_call) {
386         asm volatile ("");
387         *th->csp_around_foreign_call = from->csp_around_foreign_call;
388         asm volatile ("");
389         th->pc_around_foreign_call = from->pc_around_foreign_call;
390         asm volatile ("");
391     }
392 }
393
394 /* Even with just -O1, gcc optimizes the jumps in this "loop" away
395  * entirely, giving the ability to define WITH-FOO-style macros. */
396 #define RUN_BODY_ONCE(prefix, finally_do)               \
397     int prefix##done = 0;                               \
398     for (; !prefix##done; finally_do, prefix##done = 1)
399
400 #define WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(var)        \
401     struct gcing_safety var;                    \
402     push_gcing_safety(&var);                    \
403     RUN_BODY_ONCE(var, pop_gcing_safety(&var))
404
405 #define WITH_GC_AT_SAFEPOINTS_ONLY()                           \
406     WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(sbcl__gc_safety)
407
408 #define WITH_STATE_SEM_hygenic(var, thread)                             \
409     os_sem_wait((thread)->state_sem, "thread_state");                   \
410     RUN_BODY_ONCE(var, os_sem_post((thread)->state_sem, "thread_state"))
411
412 #define WITH_STATE_SEM(thread)                                     \
413     WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
414
415 int check_pending_thruptions(os_context_t *ctx);
416
417 #endif
418
419 extern boolean is_some_thread_local_addr(os_vm_address_t addr);
420 extern void create_initial_thread(lispobj);
421
422 #ifdef LISP_FEATURE_SB_THREAD
423 extern pthread_mutex_t all_threads_lock;
424 #endif
425
426 #endif /* _INCLUDE_THREAD_H_ */