417adc6182aee821048e4fd2e95c45e766ee0651
[sbcl.git] / src / runtime / safepoint.c
1 /*
2  * This software is part of the SBCL system. See the README file for
3  * more information.
4  *
5  * This software is derived from the CMU CL system, which was
6  * written at Carnegie Mellon University and released into the
7  * public domain. The software is in the public domain and is
8  * provided with absolutely no warranty. See the COPYING and CREDITS
9  * files for more information.
10  */
11 #include "sbcl.h"
12
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
48
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51  * definition goes here.  Fixme: (Why) don't these work for Windows?
52  */
53 void
54 alloc_gc_page()
55 {
56     os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
57 }
58
59 void
60 map_gc_page()
61 {
62     odxprint(misc, "map_gc_page");
63     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
64                4,
65                OS_VM_PROT_READ | OS_VM_PROT_WRITE);
66 }
67
68 void
69 unmap_gc_page()
70 {
71     odxprint(misc, "unmap_gc_page");
72     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
73 }
74 #endif /* !LISP_FEATURE_WIN32 */
75
76 /* Planned state progressions:
77  *
78  * none -> flight:
79  *
80  *     unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
81  *
82  * flight -> message:
83  *
84  *     happens when a master thread enters its trap.
85  *
86  *     The only blocker for flight mode is the master thread itself
87  *     (GC_FLIGHT can't be left until the master thread traps).
88  *
89  * message -> invoked:
90  *
91  *     happens after each (other) thread is notified, i.e. it will
92  *     eventually stop (already stopped). map_gc_page().
93  *
94  *     Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
95  *
96  * invoked -> collect:
97  *
98  *     happens when every gc-inhibitor comes to completion (that's
99  *     normally pending interrupt trap).
100  *
101  *     NB gc_stop_the_world, if it happens in non-master thread, "takes
102  *     over" as a master, also deregistering itself as a blocker
103  *     (i.e. it's ready to leave GC_INVOKED, but now it objects to
104  *     leaving GC_COLLECT; this "usurpation" doesn't require any change
105  *     to GC_COLLECT counter: for the counter, it's immaterial _which_
106  *     thread is waiting).
107  *
108  * collect -> none:
109  *
110  *     happens at gc_start_the_world (that should always happen in the
111  *     master).
112  *
113  *     Any thread waiting until GC end now continues.
114  */
115 struct gc_state {
116     /* Flag: conditions are initialized */
117     boolean initialized;
118
119     /* Per-process lock for gc_state */
120     pthread_mutex_t lock;
121
122     /* Conditions: one per phase */
123     pthread_cond_t phase_cond[GC_NPHASES];
124
125     /* For each [current or future] phase, a number of threads not yet ready to
126      * leave it */
127     int phase_wait[GC_NPHASES];
128
129     /* Master thread controlling the topmost stop/gc/start sequence */
130     struct thread* master;
131     struct thread* collector;
132
133     /* Current GC phase */
134     gc_phase_t phase;
135 };
136
137 static struct gc_state gc_state = {
138     .lock = PTHREAD_MUTEX_INITIALIZER,
139     .phase = GC_NONE,
140 };
141
142 void
143 gc_state_lock()
144 {
145     odxprint(safepoints,"GC state [%p] to be locked",gc_state.lock);
146     gc_assert(0==pthread_mutex_lock(&gc_state.lock));
147     if (gc_state.master) {
148         fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d\n",
149                 gc_state.master,arch_os_get_current_thread(),gc_state.phase);
150         odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
151     }
152     gc_assert(!gc_state.master);
153     gc_state.master = arch_os_get_current_thread();
154     if (!gc_state.initialized) {
155         int i;
156         for (i=GC_NONE; i<GC_NPHASES; ++i)
157             pthread_cond_init(&gc_state.phase_cond[i],NULL);
158         gc_state.initialized = 1;
159     }
160     odxprint(safepoints,"GC state [%p] locked in phase %d",gc_state.lock, gc_state.phase);
161 }
162
163 void
164 gc_state_unlock()
165 {
166     odxprint(safepoints,"GC state to be unlocked in phase %d",gc_state.phase);
167     gc_assert(arch_os_get_current_thread()==gc_state.master);
168     gc_state.master = NULL;
169     gc_assert(0==pthread_mutex_unlock(&gc_state.lock));
170     odxprint(safepoints,"%s","GC state unlocked");
171 }
172
173 void
174 gc_state_wait(gc_phase_t phase)
175 {
176     struct thread* self = arch_os_get_current_thread();
177     odxprint(safepoints,"Waiting for %d -> %d [%d holders]",
178              gc_state.phase,phase,gc_state.phase_wait[gc_state.phase]);
179     gc_assert(gc_state.master == self);
180     gc_state.master = NULL;
181     while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET)))
182         pthread_cond_wait(&gc_state.phase_cond[phase],&gc_state.lock);
183     gc_assert(gc_state.master == NULL);
184     gc_state.master = self;
185 }
186
187 static void
188 set_csp_from_context(struct thread *self, os_context_t *ctx)
189 {
190     void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
191     /* On POSIX platforms, it is sufficient to investigate only the part
192      * of the stack that was live before the interrupt, because in
193      * addition, we consider interrupt contexts explicitly.  On Windows,
194      * however, we do not keep an explicit stack of exception contexts,
195      * and instead arrange for the conservative stack scan to also cover
196      * the context implicitly.  The obvious way to do that is to start
197      * at the context itself: */
198 #ifdef LISP_FEATURE_WIN32
199     gc_assert((void **) ctx < sp);
200     sp = (void**) ctx;
201 #endif
202     gc_assert((void **)self->control_stack_start
203               <= sp && sp
204               < (void **)self->control_stack_end);
205     *self->csp_around_foreign_call = (lispobj) sp;
206 }
207
208 \f
209 static inline gc_phase_t gc_phase_next(gc_phase_t old) {
210     return (old+1) % GC_NPHASES;
211 }
212
213 static inline gc_phase_t thread_gc_phase(struct thread* p)
214 {
215     boolean inhibit = (SymbolTlValue(GC_INHIBIT,p)==T)||
216         (SymbolTlValue(IN_WITHOUT_GCING,p)==IN_WITHOUT_GCING);
217
218     boolean inprogress =
219         (SymbolTlValue(GC_PENDING,p)!=T&& SymbolTlValue(GC_PENDING,p)!=NIL);
220
221     return
222         inprogress ? (gc_state.collector && (gc_state.collector != p)
223                       ? GC_NONE : GC_QUIET)
224         : (inhibit ? GC_INVOKED : GC_NONE);
225 }
226
227 static inline void thread_gc_promote(struct thread* p, gc_phase_t cur, gc_phase_t old) {
228     if (old != GC_NONE)
229         gc_state.phase_wait[old]--;
230     if (cur != GC_NONE) {
231         gc_state.phase_wait[cur]++;
232     }
233     if (cur != GC_NONE)
234         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
235 }
236
237 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
238    flag (Lisp Stack Top) of the thread `p'. The flag may be modified
239    if `writable' is true.
240
241    Return true if there is a non-null value in the flag.
242
243    When a thread enters C code or leaves it, a per-thread location is
244    modified. That machine word serves as a not-in-Lisp flag; for
245    convenience, when in C, it's filled with a topmost stack location
246    that may contain Lisp data. When thread is in Lisp, the word
247    contains NULL.
248
249    GENCGC uses each thread's flag value for conservative garbage collection.
250
251    There is a full VM page reserved for this word; page permissions
252    are switched to read-only for race-free examine + wait + use
253    scenarios. */
254 static inline boolean
255 set_thread_csp_access(struct thread* p, boolean writable)
256 {
257     os_protect((os_vm_address_t) p->csp_around_foreign_call,
258                THREAD_CSP_PAGE_SIZE,
259                writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
260                : (OS_VM_PROT_READ));
261     return !!*p->csp_around_foreign_call;
262 }
263
264 static inline void gc_notify_early()
265 {
266     struct thread *self = arch_os_get_current_thread(), *p;
267     odxprint(safepoints,"%s","global notification");
268     pthread_mutex_lock(&all_threads_lock);
269     for_each_thread(p) {
270         if (p==self)
271             continue;
272         odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
273         if (!set_thread_csp_access(p,0)) {
274             thread_gc_promote(p, gc_state.phase, GC_NONE);
275         } else {
276             thread_gc_promote(p, thread_gc_phase(p), GC_NONE);
277         }
278     }
279     pthread_mutex_unlock(&all_threads_lock);
280 }
281
282 static inline void gc_notify_final()
283 {
284     struct thread *p;
285     odxprint(safepoints,"%s","global notification");
286     gc_state.phase_wait[gc_state.phase]=0;
287     pthread_mutex_lock(&all_threads_lock);
288     for_each_thread(p) {
289         if (p == gc_state.collector)
290             continue;
291         odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
292         if (!set_thread_csp_access(p,0)) {
293             thread_gc_promote(p, gc_state.phase, GC_NONE);
294         }
295     }
296     pthread_mutex_unlock(&all_threads_lock);
297 }
298
299 static inline void gc_done()
300 {
301     struct thread *self = arch_os_get_current_thread(), *p;
302     boolean inhibit = (SymbolTlValue(GC_INHIBIT,self)==T);
303
304     odxprint(safepoints,"%s","global denotification");
305     pthread_mutex_lock(&all_threads_lock);
306     for_each_thread(p) {
307         if (inhibit && (SymbolTlValue(GC_PENDING,p)==T))
308             SetTlSymbolValue(GC_PENDING,NIL,p);
309         set_thread_csp_access(p,1);
310     }
311     pthread_mutex_unlock(&all_threads_lock);
312 }
313
314 static inline void gc_handle_phase()
315 {
316     odxprint(safepoints,"Entering phase %d",gc_state.phase);
317     switch (gc_state.phase) {
318     case GC_FLIGHT:
319         unmap_gc_page();
320         break;
321     case GC_MESSAGE:
322         gc_notify_early();
323         break;
324     case GC_INVOKED:
325         map_gc_page();
326         break;
327     case GC_SETTLED:
328         gc_notify_final();
329         unmap_gc_page();
330         break;
331     case GC_COLLECT:
332         map_gc_page();
333         break;
334     case GC_NONE:
335         gc_done();
336         break;
337     default:
338         break;
339     }
340 }
341
342
343 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
344  * `old' can be GC_NONE, it means this thread weren't blocking any state.  `cur'
345  * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
346  * for it. */
347 static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
348     odxprint(safepoints,"GC advance request %d -> %d in phase %d",old,cur,gc_state.phase);
349     if (cur == old)
350         return;
351     if (cur == gc_state.phase)
352         return;
353     if (old < gc_state.phase)
354         old = GC_NONE;
355     if (old != GC_NONE) {
356         gc_state.phase_wait[old]--;
357         odxprint(safepoints,"%d holders of phase %d without me",gc_state.phase_wait[old],old);
358     }
359     if (cur != GC_NONE) {
360         gc_state.phase_wait[cur]++;
361         odxprint(safepoints,"%d holders of phase %d with me",gc_state.phase_wait[cur],cur);
362     }
363     /* roll forth as long as there's no waiters */
364     while (gc_state.phase_wait[gc_state.phase]==0
365            && gc_state.phase != cur) {
366         gc_state.phase = gc_phase_next(gc_state.phase);
367         odxprint(safepoints,"no blockers, direct advance to %d",gc_state.phase);
368         gc_handle_phase();
369         pthread_cond_broadcast(&gc_state.phase_cond[gc_state.phase]);
370     }
371     odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
372     gc_state_wait(cur);
373 }
374
375 void
376 thread_register_gc_trigger()
377 {
378     odxprint(misc, "/thread_register_gc_trigger");
379     struct thread *self = arch_os_get_current_thread();
380     gc_state_lock();
381     if (gc_state.phase == GC_NONE &&
382         SymbolTlValue(IN_SAFEPOINT,self)!=T &&
383         thread_gc_phase(self)==GC_NONE) {
384         gc_advance(GC_FLIGHT,GC_NONE);
385     }
386     gc_state_unlock();
387 }
388
389 static inline int
390 thread_may_gc()
391 {
392     /* Thread may gc if all of these are true:
393      * 1) GC_INHIBIT == NIL  (outside of protected part of without-gcing)
394      * 2) GC_PENDING != :in-progress    (outside of recursion protection)
395      * Note that we are in a safepoint here, which is always outside of PA. */
396
397     struct thread *self = arch_os_get_current_thread();
398     return (SymbolValue(GC_INHIBIT, self) == NIL
399             && (SymbolTlValue(GC_PENDING, self) == T ||
400                 SymbolTlValue(GC_PENDING, self) == NIL));
401 }
402
403 #ifdef LISP_FEATURE_SB_THRUPTION
404 static inline int
405 thread_may_thrupt(os_context_t *ctx)
406 {
407     struct thread * self = arch_os_get_current_thread();
408     /* Thread may be interrupted if all of these are true:
409      * 1) Deferrables are unblocked in the context of the signal that
410      *    went into the safepoint.  -- Otherwise the surrounding code
411      *    didn't want to be interrupted by a signal, so presumably it didn't
412      *    want to be INTERRUPT-THREADed either.
413      *    (See interrupt_handle_pending for an exception.)
414      * 2) On POSIX: There is no pending signal.  This is important even
415      *    after checking the sigmask, since we could be in the
416      *    handle_pending trap following re-enabling of interrupts.
417      *    Signals are unblocked in that case, but the signal is still
418      *    pending; we want to run GC before handling the signal and
419      *    therefore entered this safepoint.  But the thruption would call
420      *    ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
421      *    trap, leading to recursion.
422      * 3) INTERRUPTS_ENABLED is non-nil.
423      * 4) No GC pending; it takes precedence.
424      * Note that we are in a safepoint here, which is always outside of PA. */
425
426     if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
427         return 0;
428
429     if (SymbolValue(GC_PENDING, self) != NIL)
430         return 0;
431
432     if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
433         return 0;
434
435 #ifdef LISP_FEATURE_WIN32
436     if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
437         return 0;
438 #else
439     /* ctx is NULL if the caller wants to ignore the sigmask. */
440     if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
441         return 0;
442     if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
443         return 0;
444 #endif
445
446     if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
447         /* This special case prevents TERMINATE-THREAD from hitting
448          * during INITIAL-THREAD-FUNCTION before it's ready.  Curiously,
449          * deferrables are already unblocked there.  Further
450          * investigation may be in order. */
451         return 0;
452
453     return 1;
454 }
455
456 // returns 0 if skipped, 1 otherwise
457 int
458 check_pending_thruptions(os_context_t *ctx)
459 {
460     struct thread *p = arch_os_get_current_thread();
461
462 #ifdef LISP_FEATURE_WIN32
463     pthread_t pself = p->os_thread;
464     sigset_t oldset;
465     /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
466      * in the self-kill case; instead we do it here while also clearing the
467      * "signal". */
468     if (pself->pending_signal_set)
469         if (__sync_fetch_and_and(&pself->pending_signal_set,0))
470             SetSymbolValue(THRUPTION_PENDING, T, p);
471 #endif
472
473     if (!thread_may_thrupt(ctx))
474         return 0;
475     if (SymbolValue(THRUPTION_PENDING, p) == NIL)
476         return 0;
477     SetSymbolValue(THRUPTION_PENDING, NIL, p);
478
479 #ifdef LISP_FEATURE_WIN32
480     oldset = pself->blocked_signal_set;
481     pself->blocked_signal_set = deferrable_sigset;
482     if (ctx) fake_foreign_function_call(ctx);
483 #else
484     sigset_t oldset;
485     block_deferrable_signals(0, &oldset);
486 #endif
487
488     funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
489
490 #ifdef LISP_FEATURE_WIN32
491     if (ctx) undo_fake_foreign_function_call(ctx);
492     pself->blocked_signal_set = oldset;
493     if (ctx) ctx->sigmask = oldset;
494 #else
495     pthread_sigmask(SIG_SETMASK, &oldset, 0);
496 #endif
497     return 1;
498 }
499 #endif
500
501 int
502 on_stack_p(struct thread *th, void *esp)
503 {
504     return (void *)th->control_stack_start
505         <= esp && esp
506         < (void *)th->control_stack_end;
507 }
508
509 #ifndef LISP_FEATURE_WIN32
510 /* (Technically, we still allocate an altstack even on Windows.  Since
511  * Windows has a contiguous stack with an automatic guard page of
512  * user-configurable size instead of an alternative stack though, the
513  * SBCL-allocated altstack doesn't actually apply and won't be used.) */
514 int
515 on_altstack_p(struct thread *th, void *esp)
516 {
517     void *start = (void *)th+dynamic_values_bytes;
518     void *end = (char *)start + 32*SIGSTKSZ;
519     return start <= esp && esp < end;
520 }
521 #endif
522
523 void
524 assert_on_stack(struct thread *th, void *esp)
525 {
526     if (on_stack_p(th, esp))
527         return;
528 #ifndef LISP_FEATURE_WIN32
529     if (on_altstack_p(th, esp))
530         lose("thread %p: esp on altstack: %p", th, esp);
531 #endif
532     lose("thread %p: bogus esp: %p", th, esp);
533 }
534
535 // returns 0 if skipped, 1 otherwise
536 int
537 check_pending_gc(os_context_t *ctx)
538 {
539     odxprint(misc, "check_pending_gc");
540     struct thread * self = arch_os_get_current_thread();
541     int done = 0;
542     sigset_t sigset;
543
544     if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
545         ((SymbolValue(GC_INHIBIT,self) == NIL) &&
546          (SymbolValue(GC_PENDING,self) == NIL))) {
547         SetSymbolValue(IN_SAFEPOINT,NIL,self);
548     }
549     if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
550         if ((SymbolTlValue(GC_PENDING, self) == T)) {
551             lispobj gc_happened = NIL;
552
553             bind_variable(IN_SAFEPOINT,T,self);
554             block_deferrable_signals(NULL,&sigset);
555             if(SymbolTlValue(GC_PENDING,self)==T)
556                 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
557             unbind_variable(IN_SAFEPOINT,self);
558             thread_sigmask(SIG_SETMASK,&sigset,NULL);
559             if (gc_happened == T) {
560                 /* POST_GC wants to enable interrupts */
561                 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
562                     SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
563                     odxprint(misc, "going to call POST_GC");
564                     funcall0(StaticSymbolFunction(POST_GC));
565                 }
566                 done = 1;
567             }
568         }
569     }
570     return done;
571 }
572
573 \f
574 void thread_in_lisp_raised(os_context_t *ctxptr)
575 {
576     struct thread *self = arch_os_get_current_thread();
577     gc_phase_t phase;
578     odxprint(safepoints,"%s","thread_in_lisp_raised");
579     gc_state_lock();
580
581     if (gc_state.phase == GC_FLIGHT &&
582         SymbolTlValue(GC_PENDING,self)==T &&
583         thread_gc_phase(self)==GC_NONE &&
584         thread_may_gc() && SymbolTlValue(IN_SAFEPOINT,self)!=T) {
585         set_csp_from_context(self, ctxptr);
586         gc_advance(GC_QUIET,GC_FLIGHT);
587         set_thread_csp_access(self,1);
588         if (gc_state.collector) {
589             gc_advance(GC_NONE,GC_QUIET);
590         } else {
591             *self->csp_around_foreign_call = 0;
592             SetTlSymbolValue(GC_PENDING,T,self);
593         }
594         gc_state_unlock();
595         check_pending_gc(ctxptr);
596 #ifdef LISP_FEATURE_SB_THRUPTION
597         while(check_pending_thruptions(ctxptr));
598 #endif
599         return;
600     }
601     if (gc_state.phase == GC_FLIGHT) {
602         gc_state_wait(GC_MESSAGE);
603     }
604     phase = thread_gc_phase(self);
605     if (phase == GC_NONE) {
606         SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
607         set_thread_csp_access(self,1);
608         set_csp_from_context(self, ctxptr);
609         if (gc_state.phase <= GC_SETTLED)
610             gc_advance(phase,gc_state.phase);
611         else
612             gc_state_wait(phase);
613         *self->csp_around_foreign_call = 0;
614         gc_state_unlock();
615         check_pending_gc(ctxptr);
616 #ifdef LISP_FEATURE_SB_THRUPTION
617         while(check_pending_thruptions(ctxptr));
618 #endif
619     } else {
620         gc_advance(phase,gc_state.phase);
621         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
622         gc_state_unlock();
623     }
624 }
625
626 void thread_in_safety_transition(os_context_t *ctxptr)
627 {
628     struct thread *self = arch_os_get_current_thread();
629
630     odxprint(safepoints,"%s","GC safety transition");
631     gc_state_lock();
632     if (set_thread_csp_access(self,1)) {
633         gc_state_wait(thread_gc_phase(self));
634         gc_state_unlock();
635 #ifdef LISP_FEATURE_SB_THRUPTION
636         while(check_pending_thruptions(ctxptr));
637 #endif
638     } else {
639         gc_phase_t phase = thread_gc_phase(self);
640         if (phase == GC_NONE) {
641             SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
642             set_csp_from_context(self, ctxptr);
643             if (gc_state.phase <= GC_SETTLED)
644                 gc_advance(phase,gc_state.phase);
645             else
646                 gc_state_wait(phase);
647             *self->csp_around_foreign_call = 0;
648         } else {
649             gc_advance(phase,gc_state.phase);
650             SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
651         }
652         gc_state_unlock();
653     }
654 }
655
656 void thread_interrupted(os_context_t *ctxptr)
657 {
658     struct thread *self = arch_os_get_current_thread();
659
660     odxprint(safepoints,"%s","pending interrupt trap");
661     gc_state_lock();
662     if (gc_state.phase != GC_NONE) {
663         if (set_thread_csp_access(self,1)) {
664             gc_state_unlock();
665             thread_in_safety_transition(ctxptr);
666         } else {
667             gc_state_unlock();
668             thread_in_lisp_raised(ctxptr);
669         }
670     } else {
671         gc_state_unlock();
672     }
673     check_pending_gc(ctxptr);
674 #ifdef LISP_FEATURE_SB_THRUPTION
675     while(check_pending_thruptions(ctxptr));
676 #endif
677 }
678
679 void
680 gc_stop_the_world()
681 {
682     struct thread* self = arch_os_get_current_thread();
683     odxprint(safepoints, "stop the world");
684     gc_state_lock();
685     gc_state.collector = self;
686     gc_state.phase_wait[GC_QUIET]++;
687
688     switch(gc_state.phase) {
689     case GC_NONE:
690         gc_advance(GC_QUIET,gc_state.phase);
691     case GC_FLIGHT:
692     case GC_MESSAGE:
693     case GC_INVOKED:
694         gc_state_wait(GC_QUIET);
695     case GC_QUIET:
696         gc_state.phase_wait[GC_QUIET]=1;
697         gc_advance(GC_COLLECT,GC_QUIET);
698         break;
699     case GC_COLLECT:
700         break;
701     default:
702         lose("Stopping the world in unexpected state %d",gc_state.phase);
703         break;
704     }
705     set_thread_csp_access(self,1);
706     gc_state_unlock();
707     SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
708 }
709
710 \f
711 void gc_start_the_world()
712 {
713     odxprint(safepoints,"%s","start the world");
714     gc_state_lock();
715     gc_state.collector = NULL;
716     SetSymbolValue(IN_WITHOUT_GCING,IN_WITHOUT_GCING,
717                      arch_os_get_current_thread());
718     gc_advance(GC_NONE,GC_COLLECT);
719     gc_state_unlock();
720 }
721
722 \f
723 #ifdef LISP_FEATURE_SB_THRUPTION
724 /* wake_thread(thread) -- ensure a thruption delivery to
725  * `thread'. */
726
727 # ifdef LISP_FEATURE_WIN32
728
729 void
730 wake_thread_io(struct thread * thread)
731 {
732     SetEvent(thread->private_events.events[1]);
733     win32_maybe_interrupt_io(thread);
734 }
735
736 void
737 wake_thread_win32(struct thread *thread)
738 {
739     struct thread *self = arch_os_get_current_thread();
740
741     wake_thread_io(thread);
742
743     if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
744         return;
745
746     SetTlSymbolValue(THRUPTION_PENDING,T,thread);
747
748     if ((SymbolTlValue(GC_PENDING,thread)==T)||
749         (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
750         return;
751
752     wake_thread_io(thread);
753     pthread_mutex_unlock(&all_threads_lock);
754
755     gc_state_lock();
756     if (gc_state.phase == GC_NONE) {
757         gc_advance(GC_INVOKED,GC_NONE);
758         gc_advance(GC_NONE,GC_INVOKED);
759     }
760     gc_state_unlock();
761
762     pthread_mutex_lock(&all_threads_lock);
763     return;
764 }
765 # else
766 int
767 wake_thread_posix(os_thread_t os_thread)
768 {
769     int found = 0;
770     struct thread *thread;
771     struct thread *self = arch_os_get_current_thread();
772
773     /* Must not and need not attempt to signal ourselves while we're the
774      * STW initiator. */
775     if (self->os_thread == os_thread) {
776         SetTlSymbolValue(THRUPTION_PENDING,T,self);
777         WITH_GC_AT_SAFEPOINTS_ONLY()
778             while (check_pending_thruptions(0 /* ignore the sigmask */))
779                 ;
780         return 0;
781     }
782
783     /* We are not in a signal handler here, so need to block signals
784      * manually. */
785     sigset_t oldset;
786     block_deferrable_signals(0, &oldset);
787
788     gc_state_lock();
789     if (gc_state.phase == GC_NONE) {
790         odxprint(safepoints, "wake_thread_posix: invoking");
791         gc_advance(GC_INVOKED,GC_NONE);
792         {
793             /* only if in foreign code, notify using signal */
794             pthread_mutex_lock(&all_threads_lock);
795             for_each_thread (thread)
796                 if (thread->os_thread == os_thread) {
797                     /* it's still alive... */
798                     found = 1;
799
800                     odxprint(safepoints, "wake_thread_posix: found");
801                     SetTlSymbolValue(THRUPTION_PENDING,T,thread);
802                     if (SymbolTlValue(GC_PENDING,thread) == T
803                         || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
804                         break;
805
806                     if (os_get_csp(thread)) {
807                         odxprint(safepoints, "wake_thread_posix: kill");
808                         /* ... and in foreign code.  Push it into a safety
809                          * transition. */
810                         int status = pthread_kill(os_thread, SIGPIPE);
811                         if (status)
812                             lose("wake_thread_posix: pthread_kill failed with %d\n",
813                                  status);
814                     }
815                     break;
816                 }
817             pthread_mutex_unlock(&all_threads_lock);
818         }
819         gc_advance(GC_NONE,GC_INVOKED);
820     } else {
821         odxprint(safepoints, "wake_thread_posix: passive");
822         /* We are not able to wake the thread up actively, but maybe
823          * some other thread will take care of it.  Kludge: Unless it is
824          * in foreign code.  Let's at least try to get our return value
825          * right. */
826         pthread_mutex_lock(&all_threads_lock);
827         for_each_thread (thread)
828             if (thread->os_thread == os_thread) {
829                 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
830                 found = 1;
831                 break;
832             }
833         pthread_mutex_unlock(&all_threads_lock);
834     }
835     gc_state_unlock();
836
837     odxprint(safepoints, "wake_thread_posix leaving, found=%d", found);
838     pthread_sigmask(SIG_SETMASK, &oldset, 0);
839     return found ? 0 : -1;
840 }
841 #endif /* !LISP_FEATURE_WIN32 */
842 #endif /* LISP_FEATURE_SB_THRUPTION */
843
844 void**
845 os_get_csp(struct thread* th)
846 {
847     FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
848                   th,
849                   th->csp_around_foreign_call,
850                   *(void***)th->csp_around_foreign_call,
851                   th->control_stack_start,
852                   th->control_stack_end));
853     return *(void***)th->csp_around_foreign_call;
854 }
855
856
857 #ifndef LISP_FEATURE_WIN32
858
859 # ifdef LISP_FEATURE_SB_THRUPTION
860 void
861 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
862 {
863     struct thread *self = arch_os_get_current_thread();
864
865     void *transition_sp = os_get_csp(self);
866     if (!transition_sp)
867         /* In Lisp code.  Do not run thruptions asynchronously.  The
868          * next safepoint will take care of it. */
869         return;
870
871     /* In C code.  As a rule, we assume that running thruptions is OK. */
872     *self->csp_around_foreign_call = 0;
873     thread_in_lisp_raised(ctx);
874     *self->csp_around_foreign_call = transition_sp;
875 }
876 # endif
877
878 /* Designed to be of the same type as call_into_lisp.  Ignores its
879  * arguments. */
880 lispobj
881 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
882 {
883 #if trap_GlobalSafepoint != 0x1a
884 # error trap_GlobalSafepoint mismatch
885 #endif
886     asm("int3; .byte 0x1a;");
887     return 0;
888 }
889
890 lispobj
891 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
892 {
893 #if trap_CspSafepoint != 0x1b
894 # error trap_CspSafepoint mismatch
895 #endif
896     asm("int3; .byte 0x1b;");
897     return 0;
898 }
899
900 int
901 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
902 {
903     FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
904                   fault_address,
905                   GC_SAFEPOINT_PAGE_ADDR,
906                   arch_os_get_current_thread()->csp_around_foreign_call));
907
908     struct thread *self = arch_os_get_current_thread();
909
910     if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
911         /* We're on the altstack and don't want to run Lisp code. */
912         arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
913         return 1;
914     }
915
916     if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
917         arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
918         return 1;
919     }
920
921     /* not a safepoint */
922     return 0;
923 }
924 #endif /* LISP_FEATURE_WIN32 */
925
926 void
927 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
928 {
929     struct thread* th = arch_os_get_current_thread();
930     if (!th)
931         lose("callback invoked in non-lisp thread.  Sorry, that is not supported yet.");
932
933     WITH_GC_AT_SAFEPOINTS_ONLY()
934         funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
935 }
936
937 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */