Allow synchronous win32 I/O to be interrupted, too
[sbcl.git] / src / runtime / safepoint.c
1 /*
2  * This software is part of the SBCL system. See the README file for
3  * more information.
4  *
5  * This software is derived from the CMU CL system, which was
6  * written at Carnegie Mellon University and released into the
7  * public domain. The software is in the public domain and is
8  * provided with absolutely no warranty. See the COPYING and CREDITS
9  * files for more information.
10  */
11 #include "sbcl.h"
12
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <string.h>
17 #ifndef LISP_FEATURE_WIN32
18 #include <sched.h>
19 #endif
20 #include <signal.h>
21 #include <stddef.h>
22 #include <errno.h>
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
25 #include <sys/wait.h>
26 #endif
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
31 #endif
32 #include "runtime.h"
33 #include "validate.h"
34 #include "thread.h"
35 #include "arch.h"
36 #include "target-arch-os.h"
37 #include "os.h"
38 #include "globals.h"
39 #include "dynbind.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
42 #include "interr.h"
43 #include "alloc.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
47 #include "lispregs.h"
48
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51  * definition goes here.  Fixme: (Why) don't these work for Windows?
52  */
53 void
54 map_gc_page()
55 {
56     odxprint(misc, "map_gc_page");
57     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
58                4,
59                OS_VM_PROT_READ | OS_VM_PROT_WRITE);
60 }
61
62 void
63 unmap_gc_page()
64 {
65     odxprint(misc, "unmap_gc_page");
66     os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
67 }
68 #endif /* !LISP_FEATURE_WIN32 */
69
70 static inline int
71 thread_may_gc()
72 {
73     /* Thread may gc if all of these are true:
74      * 1) GC_INHIBIT == NIL  (outside of protected part of without-gcing)
75      * 2) GC_PENDING != :in-progress    (outside of recursion protection)
76      * Note that we are in a safepoint here, which is always outside of PA. */
77
78     struct thread *self = arch_os_get_current_thread();
79     return (SymbolValue(GC_INHIBIT, self) == NIL
80             && (SymbolTlValue(GC_PENDING, self) == T ||
81                 SymbolTlValue(GC_PENDING, self) == NIL));
82 }
83
84 #ifdef LISP_FEATURE_SB_THRUPTION
85 static inline int
86 thread_may_thrupt(os_context_t *ctx)
87 {
88     struct thread * self = arch_os_get_current_thread();
89     /* Thread may be interrupted if all of these are true:
90      * 1) Deferrables are unblocked in the context of the signal that
91      *    went into the safepoint.  -- Otherwise the surrounding code
92      *    didn't want to be interrupted by a signal, so presumably it didn't
93      *    want to be INTERRUPT-THREADed either.
94      *    (See interrupt_handle_pending for an exception.)
95      * 2) On POSIX: There is no pending signal.  This is important even
96      *    after checking the sigmask, since we could be in the
97      *    handle_pending trap following re-enabling of interrupts.
98      *    Signals are unblocked in that case, but the signal is still
99      *    pending; we want to run GC before handling the signal and
100      *    therefore entered this safepoint.  But the thruption would call
101      *    ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
102      *    trap, leading to recursion.
103      * 3) INTERRUPTS_ENABLED is non-nil.
104      * 4) No GC pending; it takes precedence.
105      * Note that we are in a safepoint here, which is always outside of PA. */
106
107     if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
108         return 0;
109
110     if (SymbolValue(GC_PENDING, self) != NIL)
111         return 0;
112
113     if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
114         return 0;
115
116 #ifdef LISP_FEATURE_WIN32
117     if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
118         return 0;
119 #else
120     /* ctx is NULL if the caller wants to ignore the sigmask. */
121     if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
122         return 0;
123     if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
124         return 0;
125 #endif
126
127     if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
128         /* This special case prevents TERMINATE-THREAD from hitting
129          * during INITIAL-THREAD-FUNCTION before it's ready.  Curiously,
130          * deferrables are already unblocked there.  Further
131          * investigation may be in order. */
132         return 0;
133
134     return 1;
135 }
136
137 // returns 0 if skipped, 1 otherwise
138 int
139 check_pending_thruptions(os_context_t *ctx)
140 {
141     struct thread *p = arch_os_get_current_thread();
142
143 #ifdef LISP_FEATURE_WIN32
144     pthread_t pself = p->os_thread;
145     sigset_t oldset;
146     /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
147      * in the self-kill case; instead we do it here while also clearing the
148      * "signal". */
149     if (pself->pending_signal_set)
150         if (__sync_fetch_and_and(&pself->pending_signal_set,0))
151             SetSymbolValue(THRUPTION_PENDING, T, p);
152 #endif
153
154     if (!thread_may_thrupt(ctx))
155         return 0;
156     if (SymbolValue(THRUPTION_PENDING, p) == NIL)
157         return 0;
158     SetSymbolValue(THRUPTION_PENDING, NIL, p);
159
160 #ifdef LISP_FEATURE_WIN32
161     oldset = pself->blocked_signal_set;
162     pself->blocked_signal_set = deferrable_sigset;
163     if (ctx) fake_foreign_function_call(ctx);
164 #else
165     sigset_t oldset;
166     block_deferrable_signals(0, &oldset);
167 #endif
168
169     funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
170
171 #ifdef LISP_FEATURE_WIN32
172     if (ctx) undo_fake_foreign_function_call(ctx);
173     pself->blocked_signal_set = oldset;
174     if (ctx) ctx->sigmask = oldset;
175 #else
176     pthread_sigmask(SIG_SETMASK, &oldset, 0);
177 #endif
178     return 1;
179 }
180 #endif
181
182 int
183 on_stack_p(struct thread *th, void *esp)
184 {
185     return (void *)th->control_stack_start
186         <= esp && esp
187         < (void *)th->control_stack_end;
188 }
189
190 #ifndef LISP_FEATURE_WIN32
191 /* (Technically, we still allocate an altstack even on Windows.  Since
192  * Windows has a contiguous stack with an automatic guard page of
193  * user-configurable size instead of an alternative stack though, the
194  * SBCL-allocated altstack doesn't actually apply and won't be used.) */
195 int
196 on_altstack_p(struct thread *th, void *esp)
197 {
198     void *start = (void *)th+dynamic_values_bytes;
199     void *end = (char *)start + 32*SIGSTKSZ;
200     return start <= esp && esp < end;
201 }
202 #endif
203
204 void
205 assert_on_stack(struct thread *th, void *esp)
206 {
207     if (on_stack_p(th, esp))
208         return;
209 #ifndef LISP_FEATURE_WIN32
210     if (on_altstack_p(th, esp))
211         lose("thread %p: esp on altstack: %p", th, esp);
212 #endif
213     lose("thread %p: bogus esp: %p", th, esp);
214 }
215
216 // returns 0 if skipped, 1 otherwise
217 int
218 check_pending_gc(os_context_t *ctx)
219 {
220     odxprint(misc, "check_pending_gc");
221     struct thread * self = arch_os_get_current_thread();
222     int done = 0;
223     sigset_t sigset;
224
225     if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
226         ((SymbolValue(GC_INHIBIT,self) == NIL) &&
227          (SymbolValue(GC_PENDING,self) == NIL))) {
228         SetSymbolValue(IN_SAFEPOINT,NIL,self);
229     }
230     if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
231         if ((SymbolTlValue(GC_PENDING, self) == T)) {
232             lispobj gc_happened = NIL;
233
234             bind_variable(IN_SAFEPOINT,T,self);
235             block_deferrable_signals(NULL,&sigset);
236             if(SymbolTlValue(GC_PENDING,self)==T)
237                 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
238             unbind_variable(IN_SAFEPOINT,self);
239             thread_sigmask(SIG_SETMASK,&sigset,NULL);
240             if (gc_happened == T) {
241                 /* POST_GC wants to enable interrupts */
242                 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
243                     SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
244                     odxprint(misc, "going to call POST_GC");
245                     funcall0(StaticSymbolFunction(POST_GC));
246                 }
247                 done = 1;
248             }
249         }
250     }
251     return done;
252 }
253
254 /* Several ideas on interthread signalling should be
255    tried. Implementation below was chosen for its moderate size and
256    relative simplicity.
257
258    Mutex is the only (conventional) system synchronization primitive
259    used by it. Some of the code below looks weird with this
260    limitation; rwlocks, Windows Event Objects, or perhaps pthread
261    barriers could be used to improve clarity.
262
263    No condvars here: our pthreads_win32 is great, but it doesn't
264    provide wait morphing optimization; let's avoid extra context
265    switches and extra contention. */
266
267 struct gc_dispatcher {
268
269     /* Held by the first thread that decides to signal all others, for
270        the entire period while common GC safepoint page is
271        unmapped. This thread is called `STW (stop-the-world)
272        initiator' below. */
273     pthread_mutex_t mx_gpunmapped;
274
275     /* Held by STW initiator while it updates th_stw_initiator and
276        takes other locks in this structure */
277     pthread_mutex_t mx_gptransition;
278
279     /* Held by STW initiator until the world should be started (GC
280        complete, thruptions delivered). */
281     pthread_mutex_t mx_gcing;
282
283     /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
284        holds the GC Lisp-level mutex, but _couldn't_ become STW
285        initiator (i.e. another thread is already stopping the
286        world). */
287     pthread_mutex_t mx_subgc;
288
289     /* First thread (at this round) that decided to stop the world */
290     struct thread *th_stw_initiator;
291
292     /* Thread running SUB-GC under the `supervision' of STW
293        initiator */
294     struct thread *th_subgc;
295
296     /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
297        work without thundering herd. */
298     int stopped;
299
300     /* Thruption flag: Iff true, current STW initiator is delivering
301        thruptions and not GCing. */
302     boolean thruption;
303
304 } gc_dispatcher = {
305     /* mutexes lazy initialized, other data initially zeroed */
306     .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
307     .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
308     .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
309     .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
310 };
311
312 \f
313 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
314    flag (Lisp Stack Top) of the thread `p'. The flag may be modified
315    if `writable' is true.
316
317    Return true if there is a non-null value in the flag.
318
319    When a thread enters C code or leaves it, a per-thread location is
320    modified. That machine word serves as a not-in-Lisp flag; for
321    convenience, when in C, it's filled with a topmost stack location
322    that may contain Lisp data. When thread is in Lisp, the word
323    contains NULL.
324
325    GENCGC uses each thread's flag value for conservative garbage collection.
326
327    There is a full VM page reserved for this word; page permissions
328    are switched to read-only for race-free examine + wait + use
329    scenarios. */
330 static inline boolean
331 set_thread_csp_access(struct thread* p, boolean writable)
332 {
333     os_protect((os_vm_address_t) p->csp_around_foreign_call,
334                THREAD_CSP_PAGE_SIZE,
335                writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
336                : (OS_VM_PROT_READ));
337     return !!*p->csp_around_foreign_call;
338 }
339
340 \f
341 /* maybe_become_stw_initiator -- if there is no stop-the-world action
342    in progress, begin it by unmapping GC page, and record current
343    thread as STW initiator.
344
345    `thruption' flag affects some subtleties of stop/start methods:
346    waiting for other threads allowing GC; setting and clearing
347    STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
348
349    Return true if current thread becomes a GC initiator, or already
350    _is_ a STW initiator.
351
352    Unlike gc_stop_the_world and gc_start_the_world (that should be
353    used in matching pairs), maybe_become_stw_initiator is idempotent
354    within a stop-restart cycle. With this call, a thread may `reserve
355    the right' to stop the world as early as it wants. */
356
357 static inline boolean
358 maybe_become_stw_initiator(boolean thruption)
359 {
360     struct thread* self = arch_os_get_current_thread();
361
362     /* Double-checked locking. Possible word tearing on some
363        architectures, FIXME FIXME, but let's think of it when GENCGC
364        and threaded SBCL is ported to them. */
365     if (!gc_dispatcher.th_stw_initiator) {
366         odxprint(misc,"NULL STW BEFORE GPTRANSITION");
367         pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
368         /* We hold mx_gptransition. Is there no STW initiator yet? */
369         if (!gc_dispatcher.th_stw_initiator) {
370             odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
371             /* Then we are... */
372             gc_dispatcher.th_stw_initiator = self;
373             gc_dispatcher.thruption = thruption;
374
375             /* hold mx_gcing until we restart the world */
376             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
377
378             /* and mx_gpunmapped until we remap common GC page */
379             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
380
381             /* we unmap it; other threads running Lisp code will now
382                trap. */
383             unmap_gc_page();
384
385             /* stop counter; the world is not stopped yet. */
386             gc_dispatcher.stopped = 0;
387         }
388         pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
389     }
390     return gc_dispatcher.th_stw_initiator == self;
391 }
392
393 \f
394 /* maybe_let_the_world_go -- if current thread is a STW initiator,
395    unlock internal GC structures, and return true. */
396 static inline boolean
397 maybe_let_the_world_go()
398 {
399     struct thread* self = arch_os_get_current_thread();
400     if (gc_dispatcher.th_stw_initiator == self) {
401         pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
402         if (gc_dispatcher.th_stw_initiator == self) {
403             gc_dispatcher.th_stw_initiator = NULL;
404         }
405         pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
406         pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
407         return 1;
408     } else {
409         return 0;
410     }
411 }
412
413 \f
414 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
415    complete if necessary), and make sure all other threads are either
416    stopped or gc-safe (i.e. running foreign calls).
417
418    If GC initiator already exists, gc_stop_the_world() either waits
419    for its completion, or cooperates with it: e.g. concurrent pending
420    thruption handler allows (SUB-GC) to complete under its
421    `supervision'.
422
423    Code sections bounded by gc_stop_the_world and gc_start_the_world
424    may be nested; inner calls don't stop or start threads,
425    decrementing or incrementing the stop counter instead. */
426 void
427 gc_stop_the_world()
428 {
429     struct thread* self = arch_os_get_current_thread(), *p;
430     boolean thruption;
431     if (SymbolTlValue(GC_INHIBIT,self)!=T) {
432         /* If GC is enabled, this thread may wait for current STW
433            initiator without causing deadlock. */
434         if (!maybe_become_stw_initiator(0)) {
435             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
436             maybe_become_stw_initiator(0);
437             pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
438         }
439         /* Now _this thread_ should be STW initiator */
440         gc_assert(self == gc_dispatcher.th_stw_initiator);
441     } else {
442         /* GC inhibited; e.g. we are inside SUB-GC */
443         if (!maybe_become_stw_initiator(0)) {
444             /* Some trouble. Inside SUB-GC, holding the Lisp-side
445                mutex, but some other thread is stopping the world. */
446             if (gc_dispatcher.thruption) {
447                 /* Thruption. Wait until it's delivered */
448                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
449                 /* Warning: mx_gcing is held recursively. */
450                 gc_assert(maybe_become_stw_initiator(0));
451                 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
452             } else {
453                 /* In SUB-GC, holding mutex; other thread wants to
454                    GC. */
455                 if (gc_dispatcher.th_subgc == self) {
456                     /* There is an outer gc_stop_the_world() by _this_
457                        thread, running subordinately to initiator.
458                        Just increase stop counter. */
459                     ++gc_dispatcher.stopped;
460                     return;
461                 }
462                 /* Register as subordinate collector thread: take
463                    mx_subgc */
464                 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
465                 ++gc_dispatcher.stopped;
466
467                 /* Unlocking thread's own thread_qrl() designates
468                    `time to examine me' to other threads. */
469                 pthread_mutex_unlock(thread_qrl(self));
470
471                 /* STW (GC) initiator thread will see our thread needs
472                    to finish GC. It will stop the world and itself,
473                    and unlock its qrl. */
474                 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
475                 return;
476             }
477         }
478     }
479     thruption = gc_dispatcher.thruption; /* Thruption or GC? */
480     if (!gc_dispatcher.stopped++) {
481         /* Outermost stop: signal other threads */
482         pthread_mutex_lock(&all_threads_lock);
483         /* Phase 1: ensure all threads are aware of the need to stop,
484            or locked in the foreign code. */
485         for_each_thread(p) {
486             pthread_mutex_t *p_qrl = thread_qrl(p);
487             if (p==self)
488                 continue;
489
490             /* Read-protect p's flag */
491             if (!set_thread_csp_access(p,0)) {
492                 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
493                 /* Thread is in Lisp, so it should trap (either in
494                    Lisp or in Lisp->FFI transition). Trap handler
495                    unlocks thread_qrl(p); when it happens, we're safe
496                    to examine that thread. */
497                 pthread_mutex_lock(p_qrl);
498                 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
499                 /* Mark thread for the future: should we collect, or
500                    wait for its final permission? */
501                 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
502                     SetTlSymbolValue(GC_SAFE,T,p);
503                 } else {
504                     SetTlSymbolValue(GC_SAFE,NIL,p);
505                 }
506                 pthread_mutex_unlock(p_qrl);
507             } else {
508                 /* In C; we just disabled writing. */
509                 if (!thruption) {
510                     if (SymbolTlValue(GC_INHIBIT,p)==T) {
511                         /* GC inhibited there */
512                         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
513                         /* Enable writing.  Such threads trap by
514                            pending thruption when WITHOUT-GCING
515                            section ends */
516                         set_thread_csp_access(p,1);
517                         SetTlSymbolValue(GC_SAFE,NIL,p);
518                     } else {
519                         /* Thread allows concurrent GC. It runs in C
520                            (not a mutator), its in-Lisp flag is
521                            read-only (so it traps on return). */
522                         SetTlSymbolValue(GC_SAFE,T,p);
523                     }
524                 }
525             }
526         }
527         /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
528         map_gc_page();
529         pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
530         /* Threads with GC inhibited -- continued */
531         odxprint(safepoints,"after remapping GC page %p",self);
532
533         SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
534         if (!thruption) {
535             struct thread* priority_gc = NULL;
536             for_each_thread(p) {
537                 if (p==self)
538                     continue;
539                 if (SymbolTlValue(GC_SAFE,p)!=T) {
540                     /* Wait for thread to `park'. NB it _always_ does
541                        it with a pending interrupt trap, so CSP locking is
542                        not needed */
543                     odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
544                     WITH_STATE_SEM(p) {
545                         pthread_mutex_lock(thread_qrl(p));
546                         if (SymbolTlValue(GC_INHIBIT,p)==T) {
547                             /* Concurrent GC invoked manually */
548                             gc_assert(!priority_gc); /* Should be at most one at a time */
549                             priority_gc = p;
550                         }
551                         pthread_mutex_unlock(thread_qrl(p));
552                     }
553                 }
554                 if (!os_get_csp(p))
555                     lose("gc_stop_the_world: no SP in parked thread: %p", p);
556             }
557             if (priority_gc) {
558                 /* This thread is managing the entire process, so it
559                    has to allow manually-invoked GC to complete */
560                 if (!set_thread_csp_access(self,1)) {
561                     /* Create T.O.S. */
562                     *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
563                     /* Unlock myself */
564                     pthread_mutex_unlock(thread_qrl(self));
565                     /* Priority GC should take over, holding
566                        mx_subgc until it's done. */
567                     pthread_mutex_lock(&gc_dispatcher.mx_subgc);
568                     /* Lock myself */
569                     pthread_mutex_lock(thread_qrl(self));
570                     *self->csp_around_foreign_call = 0;
571                     SetTlSymbolValue(GC_PENDING,NIL,self);
572                     pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
573                 } else {
574                     /* Unlock myself */
575                     pthread_mutex_unlock(thread_qrl(self));
576                     /* Priority GC should take over, holding
577                        mx_subgc until it's done. */
578                     pthread_mutex_lock(&gc_dispatcher.mx_subgc);
579                     /* Lock myself */
580                     pthread_mutex_lock(thread_qrl(self));
581                     /* Unlock sub-gc */
582                     pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
583                 }
584             }
585         }
586     }
587 }
588
589 \f
590 /* gc_start_the_world() -- restart all other threads if the call
591    matches the _outermost_ gc_stop_the_world(), or decrement the stop
592    counter. */
593 void
594 gc_start_the_world()
595 {
596     struct thread* self = arch_os_get_current_thread(), *p;
597     boolean thruption = gc_dispatcher.thruption;
598     if (gc_dispatcher.th_stw_initiator != self) {
599         odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
600         gc_assert (gc_dispatcher.th_subgc == self);
601         if (--gc_dispatcher.stopped == 1) {
602             gc_dispatcher.th_subgc = NULL;
603             pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
604             /* GC initiator may continue now */
605             pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
606         }
607         return;
608     }
609
610     gc_assert(gc_dispatcher.th_stw_initiator == self);
611
612     if (!--gc_dispatcher.stopped) {
613         for_each_thread(p) {
614             if (!thruption) {
615                 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
616                 SetTlSymbolValue(GC_PENDING,NIL,p);
617             }
618             if (
619 #ifdef LISP_FEATURE_SB_THRUPTION
620                 SymbolTlValue(THRUPTION_PENDING,p)!=T
621 #else
622                 1 /* trivially no thruption pending */
623 #endif
624                 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
625                 set_thread_csp_access(p,1);
626         }
627         pthread_mutex_unlock(&all_threads_lock);
628         /* Release everyone */
629         maybe_let_the_world_go();
630     }
631 }
632
633 \f
634 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
635    GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
636    SUB-GC, auto-gc and thruption. */
637 static inline boolean
638 in_race_p()
639 {
640     struct thread* self = arch_os_get_current_thread(), *p;
641     boolean result = 0;
642     pthread_mutex_lock(&all_threads_lock);
643     for_each_thread(p) {
644         if (p!=self &&
645             SymbolTlValue(GC_PENDING,p)!=T &&
646             SymbolTlValue(GC_PENDING,p)!=NIL) {
647             result = 1;
648             break;
649         }
650     }
651     pthread_mutex_unlock(&all_threads_lock);
652     if (result) {
653         map_gc_page();
654         pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
655         maybe_let_the_world_go();
656     }
657     return result;
658 }
659 \f
660 static void
661 set_csp_from_context(struct thread *self, os_context_t *ctx)
662 {
663     void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
664     /* On POSIX platforms, it is sufficient to investigate only the part
665      * of the stack that was live before the interrupt, because in
666      * addition, we consider interrupt contexts explicitly.  On Windows,
667      * however, we do not keep an explicit stack of exception contexts,
668      * and instead arrange for the conservative stack scan to also cover
669      * the context implicitly.  The obvious way to do that is to start
670      * at the context itself: */
671 #ifdef LISP_FEATURE_WIN32
672     gc_assert((void **) ctx < sp);
673     sp = (void**) ctx;
674 #endif
675     gc_assert((void **)self->control_stack_start
676               <= sp && sp
677               < (void **)self->control_stack_end);
678     *self->csp_around_foreign_call = (lispobj) sp;
679 }
680
681 void
682 thread_pitstop(os_context_t *ctxptr)
683 {
684     struct thread* self = arch_os_get_current_thread();
685     boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
686
687     odxprint(safepoints,"pitstop [%p]", ctxptr);
688     if (inhibitor) {
689         SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
690         /* Free qrl to let know we're ready... */
691         WITH_STATE_SEM(self) {
692             pthread_mutex_unlock(thread_qrl(self));
693             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
694             pthread_mutex_lock(thread_qrl(self));
695             pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
696         }
697         /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
698            pit-stop always waits for GC end) */
699         set_thread_csp_access(self,1);
700     } else {
701         if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
702             set_thread_csp_access(self,1);
703             check_pending_gc(ctxptr);
704             return;
705         }
706         if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
707             maybe_become_stw_initiator(0) && !in_race_p()) {
708             gc_stop_the_world();
709             set_thread_csp_access(self,1);
710             check_pending_gc(ctxptr);
711             gc_start_the_world();
712         } else {
713             /* An innocent thread which is not an initiator _and_ is
714                not objecting. */
715             odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
716             if (!set_thread_csp_access(self,1)) {
717                 if (os_get_csp(self))
718                     lose("thread_pitstop: would lose csp");
719                 set_csp_from_context(self, ctxptr);
720                 pthread_mutex_unlock(thread_qrl(self));
721                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
722                 *self->csp_around_foreign_call = 0;
723                 pthread_mutex_lock(thread_qrl(self));
724                 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
725             } else {
726                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
727                 set_thread_csp_access(self,1);
728                 WITH_GC_AT_SAFEPOINTS_ONLY() {
729                     pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
730 #ifdef LISP_FEATURE_SB_THRUPTION
731                     while (check_pending_thruptions(ctxptr))
732                         ;
733 #endif
734                 }
735                 return;
736             }
737         }
738     }
739 #ifdef LISP_FEATURE_SB_THRUPTION
740     while(check_pending_thruptions(ctxptr));
741 #endif
742 }
743
744 static inline void
745 thread_edge(os_context_t *ctxptr)
746 {
747     struct thread *self = arch_os_get_current_thread();
748     set_thread_csp_access(self,1);
749     if (os_get_csp(self)) {
750         if (!self->pc_around_foreign_call)
751             return;             /* trivialize */
752         odxprint(safepoints,"edge leaving [%p]", ctxptr);
753         if (SymbolTlValue(GC_INHIBIT,self)!=T) {
754 #ifdef LISP_FEATURE_SB_THRUPTION
755             if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
756                 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
757                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
758                 set_thread_csp_access(self,1);
759                 WITH_GC_AT_SAFEPOINTS_ONLY() {
760                     pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
761                     while (check_pending_thruptions(ctxptr))
762                         ;
763                 }
764             } else
765 #endif
766             {
767                 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
768                 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
769                 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
770                 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
771             }
772         }
773     } else {
774         /* Entering. */
775         odxprint(safepoints,"edge entering [%p]", ctxptr);
776 #ifdef LISP_FEATURE_SB_THRUPTION
777         while(check_pending_thruptions(ctxptr))
778             ;
779 #endif
780         if (os_get_csp(self))
781             lose("thread_edge: would lose csp");
782         set_csp_from_context(self, ctxptr);
783         if (SymbolTlValue(GC_INHIBIT,self)!=T) {
784             pthread_mutex_unlock(thread_qrl(self));
785             pthread_mutex_lock(&gc_dispatcher.mx_gcing);
786             *self->csp_around_foreign_call = 0;
787             pthread_mutex_lock(thread_qrl(self));
788             pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
789         } else {
790             SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
791             pthread_mutex_unlock(thread_qrl(self));
792             pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
793             *self->csp_around_foreign_call = 0;
794             pthread_mutex_lock(thread_qrl(self));
795             pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
796         }
797     }
798 }
799
800 \f
801 /* thread_register_gc_trigger --
802
803    Called by GENCGC in each thread where GC_PENDING becomes T because
804    allocated memory size has crossed the threshold in
805    auto_gc_trigger. For the new collective GC sequence, its first call
806    marks a process-wide beginning of GC.
807 */
808 void
809 thread_register_gc_trigger()
810 {
811     odxprint(misc, "/thread_register_gc_trigger");
812     struct thread* self = arch_os_get_current_thread();
813     /* This function should be called instead of former
814        set_pseudo_atomic_interrupted(), e.g. never with true
815        GC_INHIBIT */
816     gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
817
818     /* unmap GC page, signal other threads... */
819     maybe_become_stw_initiator(0);
820 }
821
822
823 \f
824 #ifdef LISP_FEATURE_SB_THRUPTION
825 /* wake_thread(thread) -- ensure a thruption delivery to
826  * `thread'. */
827
828 # ifdef LISP_FEATURE_WIN32
829
830 void
831 wake_thread_io(struct thread * thread)
832 {
833     SetEvent(thread->private_events.events[1]);
834     win32_maybe_interrupt_io(thread);
835 }
836
837 void
838 wake_thread_win32(struct thread *thread)
839 {
840     wake_thread_io(thread);
841
842     if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
843         return;
844
845     SetTlSymbolValue(THRUPTION_PENDING,T,thread);
846
847     if ((SymbolTlValue(GC_PENDING,thread)==T)||
848         (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
849         return;
850
851     wake_thread_io(thread);
852     pthread_mutex_unlock(&all_threads_lock);
853
854     if (maybe_become_stw_initiator(1) && !in_race_p()) {
855         gc_stop_the_world();
856         gc_start_the_world();
857     }
858     pthread_mutex_lock(&all_threads_lock);
859     return;
860 }
861 # else
862 int
863 wake_thread_posix(os_thread_t os_thread)
864 {
865     int found = 0;
866     struct thread *thread;
867     struct thread *self = arch_os_get_current_thread();
868
869     /* Must not and need not attempt to signal ourselves while we're the
870      * STW initiator. */
871     if (self->os_thread == os_thread) {
872         SetTlSymbolValue(THRUPTION_PENDING,T,self);
873         WITH_GC_AT_SAFEPOINTS_ONLY()
874             while (check_pending_thruptions(0 /* ignore the sigmask */))
875                 ;
876         return 0;
877     }
878
879     /* We are not in a signal handler here, so need to block signals
880      * manually. */
881     sigset_t oldset;
882     block_deferrable_signals(0, &oldset);
883
884     if (!maybe_become_stw_initiator(1) || in_race_p()) {
885         /* we are not able to wake the thread up, but the STW initiator
886          * will take care of it (kludge: unless it is in foreign code).
887          * Let's at least try to get our return value right. */
888         pthread_mutex_lock(&all_threads_lock);
889         for_each_thread (thread)
890             if (thread->os_thread == os_thread) {
891                 found = 1;
892                 break;
893             }
894         pthread_mutex_unlock(&all_threads_lock);
895         goto cleanup;
896     }
897     gc_stop_the_world();
898
899     /* we hold the all_threads lock */
900     for_each_thread (thread)
901         if (thread->os_thread == os_thread) {
902             /* it's still alive... */
903             found = 1;
904
905             SetTlSymbolValue(THRUPTION_PENDING,T,thread);
906             if (SymbolTlValue(GC_PENDING,thread) == T
907                 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
908                 break;
909
910             if (os_get_csp(thread)) {
911                 /* ... and in foreign code.  Push it into a safety
912                  * transition. */
913                 int status = pthread_kill(os_thread, SIGPIPE);
914                 if (status)
915                     lose("wake_thread_posix: pthread_kill failed with %d\n",
916                          status);
917             }
918             break;
919         }
920
921     /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
922     gc_start_the_world();
923
924 cleanup:
925     pthread_sigmask(SIG_SETMASK, &oldset, 0);
926     return found ? 0 : -1;
927 }
928 #endif /* !LISP_FEATURE_WIN32 */
929 #endif /* LISP_FEATURE_SB_THRUPTION */
930
931 void
932 thread_in_safety_transition(os_context_t *ctx)
933 {
934     FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
935     thread_edge(ctx);
936 }
937
938 void
939 thread_in_lisp_raised(os_context_t *ctx)
940 {
941     FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
942     thread_pitstop(ctx);
943 }
944
945 void
946 thread_interrupted(os_context_t *ctx)
947 {
948     FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
949     thread_pitstop(ctx);
950 }
951
952 void**
953 os_get_csp(struct thread* th)
954 {
955     FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
956                   th,
957                   th->csp_around_foreign_call,
958                   *(void***)th->csp_around_foreign_call,
959                   th->control_stack_start,
960                   th->control_stack_end));
961     return *(void***)th->csp_around_foreign_call;
962 }
963
964
965 #ifndef LISP_FEATURE_WIN32
966
967 # ifdef LISP_FEATURE_SB_THRUPTION
968 void
969 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
970 {
971     struct thread *self = arch_os_get_current_thread();
972
973     if (!os_get_csp(self))
974         /* In Lisp code.  Do not run thruptions asynchronously.  The
975          * next safepoint will take care of it. */
976         return;
977
978     /* In C code.  As a rule, we assume that running thruptions is OK. */
979     fake_foreign_function_call(ctx);
980     thread_in_safety_transition(ctx);
981     undo_fake_foreign_function_call(ctx);
982 }
983 # endif
984
985 /* Designed to be of the same type as call_into_lisp.  Ignores its
986  * arguments. */
987 lispobj
988 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
989 {
990 #if trap_GlobalSafepoint != 0x1a
991 # error trap_GlobalSafepoint mismatch
992 #endif
993     asm("int3; .byte 0x1a;");
994     return 0;
995 }
996
997 lispobj
998 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
999 {
1000 #if trap_CspSafepoint != 0x1b
1001 # error trap_CspSafepoint mismatch
1002 #endif
1003     asm("int3; .byte 0x1b;");
1004     return 0;
1005 }
1006
1007 int
1008 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
1009 {
1010     FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
1011                   fault_address,
1012                   GC_SAFEPOINT_PAGE_ADDR,
1013                   arch_os_get_current_thread()->csp_around_foreign_call));
1014
1015     struct thread *self = arch_os_get_current_thread();
1016
1017     if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
1018         /* We're on the altstack and don't want to run Lisp code. */
1019         arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
1020         return 1;
1021     }
1022
1023     if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
1024         arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
1025         return 1;
1026     }
1027
1028     /* not a safepoint */
1029     return 0;
1030 }
1031 #endif /* LISP_FEATURE_WIN32 */
1032
1033 void
1034 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
1035 {
1036     struct thread* th = arch_os_get_current_thread();
1037     if (!th)
1038         lose("callback invoked in non-lisp thread.  Sorry, that is not supported yet.");
1039
1040     WITH_GC_AT_SAFEPOINTS_ONLY()
1041         funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
1042 }
1043
1044 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */