953b8e9f5d4b038efa46ffd782fb3bde331ebb07
[sbcl.git] / src / runtime / safepoint.c
1 /*
2  * This software is part of the SBCL system. See the README file for
3  * more information.
4  *
5  * This software is derived from the CMU CL system, which was
6  * written at Carnegie Mellon University and released into the
7  * public domain. The software is in the public domain and is
8  * provided with absolutely no warranty. See the COPYING and CREDITS
9  * files for more information.
10  */
11 #include "sbcl.h"
12
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
48
49 /* Temporarily, this macro is a wrapper for FSHOW_SIGNAL.  Ultimately,
50  * it will be restored to its full win32 branch functionality, where it
51  * provides a very useful tracing mechanism that is configurable at
52  * runtime. */
53 #define odxprint_show(what, fmt, args...)                       \
54      do {                                                       \
55          struct thread *__self = arch_os_get_current_thread();  \
56          FSHOW_SIGNAL((stderr, "[%p/%p:%s] " fmt "\n",          \
57                        __self,                                  \
58                        __self->os_thread,                       \
59                        #what,                                   \
60                        ##args));                                \
61      } while (0)
62
63 #if QSHOW_SIGNALS
64 # define odxprint odxprint_show
65 #else
66 # define odxprint(what, fmt, args...) do {} while (0)
67 #endif
68
69 #if !defined(LISP_FEATURE_WIN32)
70 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
71  * definition goes here.  Fixme: (Why) don't these work for Windows?
72  */
73 void
74 map_gc_page()
75 {
76     odxprint(misc, "map_gc_page");
77     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
78                4,
79                OS_VM_PROT_READ | OS_VM_PROT_WRITE);
80 }
81
82 void
83 unmap_gc_page()
84 {
85     odxprint(misc, "unmap_gc_page");
86     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
87 }
88 #endif /* !LISP_FEATURE_WIN32 */
89
90 static inline int
91 thread_may_gc()
92 {
93     /* Thread may gc if all of these are true:
94      * 1) GC_INHIBIT == NIL  (outside of protected part of without-gcing)
95      * 2) GC_PENDING != :in-progress    (outside of recursion protection)
96      * Note that we are in a safepoint here, which is always outside of PA. */
97
98     struct thread *self = arch_os_get_current_thread();
99     return (SymbolValue(GC_INHIBIT, self) == NIL
100             && (SymbolTlValue(GC_PENDING, self) == T ||
101                 SymbolTlValue(GC_PENDING, self) == NIL));
102 }
103
104 int
105 on_stack_p(struct thread *th, void *esp)
106 {
107     return (void *)th->control_stack_start
108         <= esp && esp
109         < (void *)th->control_stack_end;
110 }
111
112 #ifndef LISP_FEATURE_WIN32
113 /* (Technically, we still allocate an altstack even on Windows.  Since
114  * Windows has a contiguous stack with an automatic guard page of
115  * user-configurable size instead of an alternative stack though, the
116  * SBCL-allocated altstack doesn't actually apply and won't be used.) */
117 int
118 on_altstack_p(struct thread *th, void *esp)
119 {
120     void *start = (void *)th+dynamic_values_bytes;
121     void *end = (char *)start + 32*SIGSTKSZ;
122     return start <= esp && esp < end;
123 }
124 #endif
125
126 void
127 assert_on_stack(struct thread *th, void *esp)
128 {
129     if (on_stack_p(th, esp))
130         return;
131 #ifndef LISP_FEATURE_WIN32
132     if (on_altstack_p(th, esp))
133         lose("thread %p: esp on altstack: %p", th, esp);
134 #endif
135     lose("thread %p: bogus esp: %p", th, esp);
136 }
137
138 // returns 0 if skipped, 1 otherwise
139 int
140 check_pending_gc(os_context_t *ctx)
141 {
142     odxprint(misc, "check_pending_gc");
143     struct thread * self = arch_os_get_current_thread();
144     int done = 0;
145     sigset_t sigset;
146
147     if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
148         ((SymbolValue(GC_INHIBIT,self) == NIL) &&
149          (SymbolValue(GC_PENDING,self) == NIL))) {
150         SetSymbolValue(IN_SAFEPOINT,NIL,self);
151     }
152     if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
153         if ((SymbolTlValue(GC_PENDING, self) == T)) {
154             lispobj gc_happened = NIL;
155
156             bind_variable(IN_SAFEPOINT,T,self);
157             block_deferrable_signals(NULL,&sigset);
158             if(SymbolTlValue(GC_PENDING,self)==T)
159                 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
160             unbind_variable(IN_SAFEPOINT,self);
161             thread_sigmask(SIG_SETMASK,&sigset,NULL);
162             if (gc_happened == T) {
163                 /* POST_GC wants to enable interrupts */
164                 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
165                     SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
166                     odxprint(misc, "going to call POST_GC");
167                     funcall0(StaticSymbolFunction(POST_GC));
168                 }
169                 done = 1;
170             }
171         }
172     }
173     return done;
174 }
175
176 /* Several ideas on interthread signalling should be
177    tried. Implementation below was chosen for its moderate size and
178    relative simplicity.
179
180    Mutex is the only (conventional) system synchronization primitive
181    used by it. Some of the code below looks weird with this
182    limitation; rwlocks, Windows Event Objects, or perhaps pthread
183    barriers could be used to improve clarity.
184
185    No condvars here: our pthreads_win32 is great, but it doesn't
186    provide wait morphing optimization; let's avoid extra context
187    switches and extra contention. */
188
189 struct gc_dispatcher {
190
191     /* Held by the first thread that decides to signal all others, for
192        the entire period while common GC safepoint page is
193        unmapped. This thread is called `STW (stop-the-world)
194        initiator' below. */
195     pthread_mutex_t mx_gpunmapped;
196
197     /* Held by STW initiator while it updates th_stw_initiator and
198        takes other locks in this structure */
199     pthread_mutex_t mx_gptransition;
200
201     /* Held by STW initiator until the world should be started (GC
202        complete, thruptions delivered). */
203     pthread_mutex_t mx_gcing;
204
205     /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
206        holds the GC Lisp-level mutex, but _couldn't_ become STW
207        initiator (i.e. another thread is already stopping the
208        world). */
209     pthread_mutex_t mx_subgc;
210
211     /* First thread (at this round) that decided to stop the world */
212     struct thread *th_stw_initiator;
213
214     /* Thread running SUB-GC under the `supervision' of STW
215        initiator */
216     struct thread *th_subgc;
217
218     /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
219        work without thundering herd. */
220     int stopped;
221
222 } gc_dispatcher = {
223     /* mutexes lazy initialized, other data initially zeroed */
224     .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
225     .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
226     .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
227     .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
228 };
229
230 \f
231 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
232    flag (Lisp Stack Top) of the thread `p'. The flag may be modified
233    if `writable' is true.
234
235    Return true if there is a non-null value in the flag.
236
237    When a thread enters C code or leaves it, a per-thread location is
238    modified. That machine word serves as a not-in-Lisp flag; for
239    convenience, when in C, it's filled with a topmost stack location
240    that may contain Lisp data. When thread is in Lisp, the word
241    contains NULL.
242
243    GENCGC uses each thread's flag value for conservative garbage collection.
244
245    There is a full VM page reserved for this word; page permissions
246    are switched to read-only for race-free examine + wait + use
247    scenarios. */
248 static inline boolean
249 set_thread_csp_access(struct thread* p, boolean writable)
250 {
251     os_protect((os_vm_address_t) p->csp_around_foreign_call,
252                THREAD_CSP_PAGE_SIZE,
253                writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
254                : (OS_VM_PROT_READ));
255     return !!*p->csp_around_foreign_call;
256 }
257
258 \f
259 /* maybe_become_stw_initiator -- if there is no stop-the-world action
260    in progress, begin it by unmapping GC page, and record current
261    thread as STW initiator.
262
263    Return true if current thread becomes a GC initiator, or already
264    _is_ a STW initiator.
265
266    Unlike gc_stop_the_world and gc_start_the_world (that should be
267    used in matching pairs), maybe_become_stw_initiator is idempotent
268    within a stop-restart cycle. With this call, a thread may `reserve
269    the right' to stop the world as early as it wants. */
270
271 static inline boolean
272 maybe_become_stw_initiator()
273 {
274     struct thread* self = arch_os_get_current_thread();
275
276     /* Double-checked locking. Possible word tearing on some
277        architectures, FIXME FIXME, but let's think of it when GENCGC
278        and threaded SBCL is ported to them. */
279     if (!gc_dispatcher.th_stw_initiator) {
280         odxprint(misc,"NULL STW BEFORE GPTRANSITION");
281         pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
282         /* We hold mx_gptransition. Is there no STW initiator yet? */
283         if (!gc_dispatcher.th_stw_initiator) {
284             odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
285             /* Then we are... */
286             gc_dispatcher.th_stw_initiator = self;
287
288             /* hold mx_gcing until we restart the world */
289             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
290
291             /* and mx_gpunmapped until we remap common GC page */
292             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
293
294             /* we unmap it; other threads running Lisp code will now
295                trap. */
296             unmap_gc_page();
297
298             /* stop counter; the world is not stopped yet. */
299             gc_dispatcher.stopped = 0;
300         }
301         pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
302     }
303     return gc_dispatcher.th_stw_initiator == self;
304 }
305
306 \f
307 /* maybe_let_the_world_go -- if current thread is a STW initiator,
308    unlock internal GC structures, and return true. */
309 static inline boolean
310 maybe_let_the_world_go()
311 {
312     struct thread* self = arch_os_get_current_thread();
313     if (gc_dispatcher.th_stw_initiator == self) {
314         pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
315         if (gc_dispatcher.th_stw_initiator == self) {
316             gc_dispatcher.th_stw_initiator = NULL;
317         }
318         pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
319         pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
320         return 1;
321     } else {
322         return 0;
323     }
324 }
325
326 \f
327 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
328    complete if necessary), and make sure all other threads are either
329    stopped or gc-safe (i.e. running foreign calls).
330
331    If GC initiator already exists, gc_stop_the_world() either waits
332    for its completion, or cooperates with it: e.g. concurrent pending
333    thruption handler allows (SUB-GC) to complete under its
334    `supervision'.
335
336    Code sections bounded by gc_stop_the_world and gc_start_the_world
337    may be nested; inner calls don't stop or start threads,
338    decrementing or incrementing the stop counter instead. */
339 void
340 gc_stop_the_world()
341 {
342     struct thread* self = arch_os_get_current_thread(), *p;
343     if (SymbolTlValue(GC_INHIBIT,self)!=T) {
344         /* If GC is enabled, this thread may wait for current STW
345            initiator without causing deadlock. */
346         if (!maybe_become_stw_initiator()) {
347             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
348             maybe_become_stw_initiator();
349             pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
350         }
351         /* Now _this thread_ should be STW initiator */
352         gc_assert(self == gc_dispatcher.th_stw_initiator);
353     } else {
354         /* GC inhibited; e.g. we are inside SUB-GC */
355         if (!maybe_become_stw_initiator()) {
356             /* Some trouble. Inside SUB-GC, holding the Lisp-side
357                mutex, but some other thread is stopping the world. */
358             {
359                 /* In SUB-GC, holding mutex; other thread wants to
360                    GC. */
361                 if (gc_dispatcher.th_subgc == self) {
362                     /* There is an outer gc_stop_the_world() by _this_
363                        thread, running subordinately to initiator.
364                        Just increase stop counter. */
365                     ++gc_dispatcher.stopped;
366                     return;
367                 }
368                 /* Register as subordinate collector thread: take
369                    mx_subgc */
370                 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
371                 ++gc_dispatcher.stopped;
372
373                 /* Unlocking thread's own thread_qrl() designates
374                    `time to examine me' to other threads. */
375                 pthread_mutex_unlock(thread_qrl(self));
376
377                 /* STW (GC) initiator thread will see our thread needs
378                    to finish GC. It will stop the world and itself,
379                    and unlock its qrl. */
380                 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
381                 return;
382             }
383         }
384     }
385     if (!gc_dispatcher.stopped++) {
386         /* Outermost stop: signal other threads */
387         pthread_mutex_lock(&all_threads_lock);
388         /* Phase 1: ensure all threads are aware of the need to stop,
389            or locked in the foreign code. */
390         for_each_thread(p) {
391             pthread_mutex_t *p_qrl = thread_qrl(p);
392             if (p==self)
393                 continue;
394
395             /* Read-protect p's flag */
396             if (!set_thread_csp_access(p,0)) {
397                 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
398                 /* Thread is in Lisp, so it should trap (either in
399                    Lisp or in Lisp->FFI transition). Trap handler
400                    unlocks thread_qrl(p); when it happens, we're safe
401                    to examine that thread. */
402                 pthread_mutex_lock(p_qrl);
403                 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
404                 /* Mark thread for the future: should we collect, or
405                    wait for its final permission? */
406                 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
407                     SetTlSymbolValue(GC_SAFE,T,p);
408                 } else {
409                     SetTlSymbolValue(GC_SAFE,NIL,p);
410                 }
411                 pthread_mutex_unlock(p_qrl);
412             } else {
413                 /* In C; we just disabled writing. */
414                 {
415                     if (SymbolTlValue(GC_INHIBIT,p)==T) {
416                         /* GC inhibited there */
417                         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
418                         /* Enable writing.  Such threads trap by
419                            pending thruption when WITHOUT-GCING
420                            section ends */
421                         set_thread_csp_access(p,1);
422                         SetTlSymbolValue(GC_SAFE,NIL,p);
423                     } else {
424                         /* Thread allows concurrent GC. It runs in C
425                            (not a mutator), its in-Lisp flag is
426                            read-only (so it traps on return). */
427                         SetTlSymbolValue(GC_SAFE,T,p);
428                     }
429                 }
430             }
431         }
432         /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
433         map_gc_page();
434         pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
435         /* Threads with GC inhibited -- continued */
436         odxprint(safepoints,"after remapping GC page %p",self);
437
438         SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
439         {
440             struct thread* priority_gc = NULL;
441             for_each_thread(p) {
442                 if (p==self)
443                     continue;
444                 if (SymbolTlValue(GC_SAFE,p)!=T) {
445                     /* Wait for thread to `park'. NB it _always_ does
446                        it with a pending interrupt trap, so CSP locking is
447                        not needed */
448                     odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
449                     WITH_STATE_SEM(p) {
450                         pthread_mutex_lock(thread_qrl(p));
451                         if (SymbolTlValue(GC_INHIBIT,p)==T) {
452                             /* Concurrent GC invoked manually */
453                             gc_assert(!priority_gc); /* Should be at most one at a time */
454                             priority_gc = p;
455                         }
456                         pthread_mutex_unlock(thread_qrl(p));
457                     }
458                 }
459                 if (!os_get_csp(p))
460                     lose("gc_stop_the_world: no SP in parked thread: %p", p);
461             }
462             if (priority_gc) {
463                 /* This thread is managing the entire process, so it
464                    has to allow manually-invoked GC to complete */
465                 if (!set_thread_csp_access(self,1)) {
466                     /* Create T.O.S. */
467                     *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
468                     /* Unlock myself */
469                     pthread_mutex_unlock(thread_qrl(self));
470                     /* Priority GC should take over, holding
471                        mx_subgc until it's done. */
472                     pthread_mutex_lock(&gc_dispatcher.mx_subgc);
473                     /* Lock myself */
474                     pthread_mutex_lock(thread_qrl(self));
475                     *self->csp_around_foreign_call = 0;
476                     SetTlSymbolValue(GC_PENDING,NIL,self);
477                     pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
478                 } else {
479                     /* Unlock myself */
480                     pthread_mutex_unlock(thread_qrl(self));
481                     /* Priority GC should take over, holding
482                        mx_subgc until it's done. */
483                     pthread_mutex_lock(&gc_dispatcher.mx_subgc);
484                     /* Lock myself */
485                     pthread_mutex_lock(thread_qrl(self));
486                     /* Unlock sub-gc */
487                     pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
488                 }
489             }
490         }
491     }
492 }
493
494 \f
495 /* gc_start_the_world() -- restart all other threads if the call
496    matches the _outermost_ gc_stop_the_world(), or decrement the stop
497    counter. */
498 void
499 gc_start_the_world()
500 {
501     struct thread* self = arch_os_get_current_thread(), *p;
502     if (gc_dispatcher.th_stw_initiator != self) {
503         odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
504         gc_assert (gc_dispatcher.th_subgc == self);
505         if (--gc_dispatcher.stopped == 1) {
506             gc_dispatcher.th_subgc = NULL;
507             pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
508             /* GC initiator may continue now */
509             pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
510         }
511         return;
512     }
513
514     gc_assert(gc_dispatcher.th_stw_initiator == self);
515
516     if (!--gc_dispatcher.stopped) {
517         for_each_thread(p) {
518             {
519                 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
520                 SetTlSymbolValue(GC_PENDING,NIL,p);
521             }
522             set_thread_csp_access(p,1);
523         }
524         pthread_mutex_unlock(&all_threads_lock);
525         /* Release everyone */
526         maybe_let_the_world_go();
527     }
528 }
529
530 \f
531 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
532    GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
533    SUB-GC, auto-gc and thruption. */
534 static inline boolean
535 in_race_p()
536 {
537     struct thread* self = arch_os_get_current_thread(), *p;
538     boolean result = 0;
539     pthread_mutex_lock(&all_threads_lock);
540     for_each_thread(p) {
541         if (p!=self &&
542             SymbolTlValue(GC_PENDING,p)!=T &&
543             SymbolTlValue(GC_PENDING,p)!=NIL) {
544             result = 1;
545             break;
546         }
547     }
548     pthread_mutex_unlock(&all_threads_lock);
549     if (result) {
550         map_gc_page();
551         pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
552         maybe_let_the_world_go();
553     }
554     return result;
555 }
556 \f
557 static void
558 set_csp_from_context(struct thread *self, os_context_t *ctx)
559 {
560     void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
561     gc_assert((void **)self->control_stack_start
562               <= sp && sp
563               < (void **)self->control_stack_end);
564     *self->csp_around_foreign_call = (lispobj) sp;
565 }
566
567 void
568 thread_pitstop(os_context_t *ctxptr)
569 {
570     struct thread* self = arch_os_get_current_thread();
571     boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
572
573     odxprint(safepoints,"pitstop [%p]", ctxptr);
574     if (inhibitor) {
575         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
576         /* Free qrl to let know we're ready... */
577         WITH_STATE_SEM(self) {
578             pthread_mutex_unlock(thread_qrl(self));
579             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
580             pthread_mutex_lock(thread_qrl(self));
581             pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
582         }
583         /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
584            pit-stop always waits for GC end) */
585         set_thread_csp_access(self,1);
586     } else {
587         if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
588             set_thread_csp_access(self,1);
589             check_pending_gc(ctxptr);
590             return;
591         }
592         if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
593             maybe_become_stw_initiator() && !in_race_p()) {
594             gc_stop_the_world();
595             set_thread_csp_access(self,1);
596             check_pending_gc(ctxptr);
597             gc_start_the_world();
598         } else {
599             /* An innocent thread which is not an initiator _and_ is
600                not objecting. */
601             odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
602             if (!set_thread_csp_access(self,1)) {
603                 if (os_get_csp(self))
604                     lose("thread_pitstop: would lose csp");
605                 set_csp_from_context(self, ctxptr);
606                 pthread_mutex_unlock(thread_qrl(self));
607                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
608                 *self->csp_around_foreign_call = 0;
609                 pthread_mutex_lock(thread_qrl(self));
610                 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
611             } else {
612                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
613                 set_thread_csp_access(self,1);
614                 WITH_GC_AT_SAFEPOINTS_ONLY() {
615                     pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
616                 }
617                 return;
618             }
619         }
620     }
621 }
622
623 static inline void
624 thread_edge(os_context_t *ctxptr)
625 {
626     struct thread *self = arch_os_get_current_thread();
627     set_thread_csp_access(self,1);
628     if (os_get_csp(self)) {
629         if (!self->pc_around_foreign_call)
630             return;             /* trivialize */
631         odxprint(safepoints,"edge leaving [%p]", ctxptr);
632         if (SymbolTlValue(GC_INHIBIT,self)!=T) {
633             {
634                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
635                 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
636                 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
637                 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
638             }
639         }
640     } else {
641         /* Entering. */
642         odxprint(safepoints,"edge entering [%p]", ctxptr);
643         if (os_get_csp(self))
644             lose("thread_edge: would lose csp");
645         set_csp_from_context(self, ctxptr);
646         if (SymbolTlValue(GC_INHIBIT,self)!=T) {
647             pthread_mutex_unlock(thread_qrl(self));
648             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
649             *self->csp_around_foreign_call = 0;
650             pthread_mutex_lock(thread_qrl(self));
651             pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
652         } else {
653             SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
654             pthread_mutex_unlock(thread_qrl(self));
655             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
656             *self->csp_around_foreign_call = 0;
657             pthread_mutex_lock(thread_qrl(self));
658             pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
659         }
660     }
661 }
662
663 \f
664 /* thread_register_gc_trigger --
665
666    Called by GENCGC in each thread where GC_PENDING becomes T because
667    allocated memory size has crossed the threshold in
668    auto_gc_trigger. For the new collective GC sequence, its first call
669    marks a process-wide beginning of GC.
670 */
671 void
672 thread_register_gc_trigger()
673 {
674     odxprint(misc, "/thread_register_gc_trigger");
675     struct thread* self = arch_os_get_current_thread();
676     /* This function should be called instead of former
677        set_pseudo_atomic_interrupted(), e.g. never with true
678        GC_INHIBIT */
679     gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
680
681     /* unmap GC page, signal other threads... */
682     maybe_become_stw_initiator();
683 }
684
685
686 \f
687 void
688 thread_in_safety_transition(os_context_t *ctx)
689 {
690     FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
691     thread_edge(ctx);
692 }
693
694 void
695 thread_in_lisp_raised(os_context_t *ctx)
696 {
697     FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
698     thread_pitstop(ctx);
699 }
700
701 void**
702 os_get_csp(struct thread* th)
703 {
704     FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
705                   th,
706                   th->csp_around_foreign_call,
707                   *(void***)th->csp_around_foreign_call,
708                   th->control_stack_start,
709                   th->control_stack_end));
710     return *(void***)th->csp_around_foreign_call;
711 }
712
713
714 #ifndef LISP_FEATURE_WIN32
715
716 /* Designed to be of the same type as call_into_lisp.  Ignores its
717  * arguments. */
718 lispobj
719 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
720 {
721 #if trap_GlobalSafepoint != 0x1a
722 # error trap_GlobalSafepoint mismatch
723 #endif
724     asm("int3; .byte 0x1a;");
725     return 0;
726 }
727
728 lispobj
729 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
730 {
731 #if trap_CspSafepoint != 0x1b
732 # error trap_CspSafepoint mismatch
733 #endif
734     asm("int3; .byte 0x1b;");
735     return 0;
736 }
737
738 int
739 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
740 {
741     FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
742                   fault_address,
743                   GC_SAFEPOINT_PAGE_ADDR,
744                   arch_os_get_current_thread()->csp_around_foreign_call));
745
746     struct thread *self = arch_os_get_current_thread();
747
748     if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
749         /* We're on the altstack and don't want to run Lisp code. */
750         arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
751         return 1;
752     }
753
754     if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
755         arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
756         return 1;
757     }
758
759     /* not a safepoint */
760     return 0;
761 }
762 #endif /* LISP_FEATURE_WIN32 */
763
764 void
765 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
766 {
767     struct thread* th = arch_os_get_current_thread();
768     if (!th)
769         lose("callback invoked in non-lisp thread.  Sorry, that is not supported yet.");
770
771     WITH_GC_AT_SAFEPOINTS_ONLY()
772         funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
773 }
774
775 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */