2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
62 odxprint(misc, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
65 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
71 odxprint(misc, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
74 #endif /* !LISP_FEATURE_WIN32 */
79 /* Thread may gc if all of these are true:
80 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
81 * 2) GC_PENDING != :in-progress (outside of recursion protection)
82 * Note that we are in a safepoint here, which is always outside of PA. */
84 struct thread *self = arch_os_get_current_thread();
85 return (SymbolValue(GC_INHIBIT, self) == NIL
86 && (SymbolTlValue(GC_PENDING, self) == T ||
87 SymbolTlValue(GC_PENDING, self) == NIL));
90 #ifdef LISP_FEATURE_SB_THRUPTION
92 thread_may_thrupt(os_context_t *ctx)
94 struct thread * self = arch_os_get_current_thread();
95 /* Thread may be interrupted if all of these are true:
96 * 1) Deferrables are unblocked in the context of the signal that
97 * went into the safepoint. -- Otherwise the surrounding code
98 * didn't want to be interrupted by a signal, so presumably it didn't
99 * want to be INTERRUPT-THREADed either.
100 * (See interrupt_handle_pending for an exception.)
101 * 2) On POSIX: There is no pending signal. This is important even
102 * after checking the sigmask, since we could be in the
103 * handle_pending trap following re-enabling of interrupts.
104 * Signals are unblocked in that case, but the signal is still
105 * pending; we want to run GC before handling the signal and
106 * therefore entered this safepoint. But the thruption would call
107 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
108 * trap, leading to recursion.
109 * 3) INTERRUPTS_ENABLED is non-nil.
110 * 4) No GC pending; it takes precedence.
111 * Note that we are in a safepoint here, which is always outside of PA. */
113 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
116 if (SymbolValue(GC_PENDING, self) != NIL)
119 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
122 #ifdef LISP_FEATURE_WIN32
123 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
126 /* ctx is NULL if the caller wants to ignore the sigmask. */
127 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
129 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
133 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
134 /* This special case prevents TERMINATE-THREAD from hitting
135 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
136 * deferrables are already unblocked there. Further
137 * investigation may be in order. */
143 // returns 0 if skipped, 1 otherwise
145 check_pending_thruptions(os_context_t *ctx)
147 struct thread *p = arch_os_get_current_thread();
149 #ifdef LISP_FEATURE_WIN32
150 pthread_t pself = p->os_thread;
152 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
153 * in the self-kill case; instead we do it here while also clearing the
155 if (pself->pending_signal_set)
156 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
157 SetSymbolValue(THRUPTION_PENDING, T, p);
160 if (!thread_may_thrupt(ctx))
162 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
164 SetSymbolValue(THRUPTION_PENDING, NIL, p);
166 #ifdef LISP_FEATURE_WIN32
167 oldset = pself->blocked_signal_set;
168 pself->blocked_signal_set = deferrable_sigset;
169 if (ctx) fake_foreign_function_call(ctx);
172 block_deferrable_signals(0, &oldset);
175 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
177 #ifdef LISP_FEATURE_WIN32
178 if (ctx) undo_fake_foreign_function_call(ctx);
179 pself->blocked_signal_set = oldset;
180 if (ctx) ctx->sigmask = oldset;
182 pthread_sigmask(SIG_SETMASK, &oldset, 0);
189 on_stack_p(struct thread *th, void *esp)
191 return (void *)th->control_stack_start
193 < (void *)th->control_stack_end;
196 #ifndef LISP_FEATURE_WIN32
197 /* (Technically, we still allocate an altstack even on Windows. Since
198 * Windows has a contiguous stack with an automatic guard page of
199 * user-configurable size instead of an alternative stack though, the
200 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
202 on_altstack_p(struct thread *th, void *esp)
204 void *start = (void *)th+dynamic_values_bytes;
205 void *end = (char *)start + 32*SIGSTKSZ;
206 return start <= esp && esp < end;
211 assert_on_stack(struct thread *th, void *esp)
213 if (on_stack_p(th, esp))
215 #ifndef LISP_FEATURE_WIN32
216 if (on_altstack_p(th, esp))
217 lose("thread %p: esp on altstack: %p", th, esp);
219 lose("thread %p: bogus esp: %p", th, esp);
222 // returns 0 if skipped, 1 otherwise
224 check_pending_gc(os_context_t *ctx)
226 odxprint(misc, "check_pending_gc");
227 struct thread * self = arch_os_get_current_thread();
231 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
232 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
233 (SymbolValue(GC_PENDING,self) == NIL))) {
234 SetSymbolValue(IN_SAFEPOINT,NIL,self);
236 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
237 if ((SymbolTlValue(GC_PENDING, self) == T)) {
238 lispobj gc_happened = NIL;
240 bind_variable(IN_SAFEPOINT,T,self);
241 block_deferrable_signals(NULL,&sigset);
242 if(SymbolTlValue(GC_PENDING,self)==T)
243 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
244 unbind_variable(IN_SAFEPOINT,self);
245 thread_sigmask(SIG_SETMASK,&sigset,NULL);
246 if (gc_happened == T) {
247 /* POST_GC wants to enable interrupts */
248 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
249 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
250 odxprint(misc, "going to call POST_GC");
251 funcall0(StaticSymbolFunction(POST_GC));
260 /* Several ideas on interthread signalling should be
261 tried. Implementation below was chosen for its moderate size and
264 Mutex is the only (conventional) system synchronization primitive
265 used by it. Some of the code below looks weird with this
266 limitation; rwlocks, Windows Event Objects, or perhaps pthread
267 barriers could be used to improve clarity.
269 No condvars here: our pthreads_win32 is great, but it doesn't
270 provide wait morphing optimization; let's avoid extra context
271 switches and extra contention. */
273 struct gc_dispatcher {
275 /* Held by the first thread that decides to signal all others, for
276 the entire period while common GC safepoint page is
277 unmapped. This thread is called `STW (stop-the-world)
279 pthread_mutex_t mx_gpunmapped;
281 /* Held by STW initiator while it updates th_stw_initiator and
282 takes other locks in this structure */
283 pthread_mutex_t mx_gptransition;
285 /* Held by STW initiator until the world should be started (GC
286 complete, thruptions delivered). */
287 pthread_mutex_t mx_gcing;
289 /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
290 holds the GC Lisp-level mutex, but _couldn't_ become STW
291 initiator (i.e. another thread is already stopping the
293 pthread_mutex_t mx_subgc;
295 /* First thread (at this round) that decided to stop the world */
296 struct thread *th_stw_initiator;
298 /* Thread running SUB-GC under the `supervision' of STW
300 struct thread *th_subgc;
302 /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
303 work without thundering herd. */
306 /* Thruption flag: Iff true, current STW initiator is delivering
307 thruptions and not GCing. */
311 /* mutexes lazy initialized, other data initially zeroed */
312 .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
313 .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
314 .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
315 .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
319 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
320 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
321 if `writable' is true.
323 Return true if there is a non-null value in the flag.
325 When a thread enters C code or leaves it, a per-thread location is
326 modified. That machine word serves as a not-in-Lisp flag; for
327 convenience, when in C, it's filled with a topmost stack location
328 that may contain Lisp data. When thread is in Lisp, the word
331 GENCGC uses each thread's flag value for conservative garbage collection.
333 There is a full VM page reserved for this word; page permissions
334 are switched to read-only for race-free examine + wait + use
336 static inline boolean
337 set_thread_csp_access(struct thread* p, boolean writable)
339 os_protect((os_vm_address_t) p->csp_around_foreign_call,
340 THREAD_CSP_PAGE_SIZE,
341 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
342 : (OS_VM_PROT_READ));
343 return !!*p->csp_around_foreign_call;
347 /* maybe_become_stw_initiator -- if there is no stop-the-world action
348 in progress, begin it by unmapping GC page, and record current
349 thread as STW initiator.
351 `thruption' flag affects some subtleties of stop/start methods:
352 waiting for other threads allowing GC; setting and clearing
353 STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
355 Return true if current thread becomes a GC initiator, or already
356 _is_ a STW initiator.
358 Unlike gc_stop_the_world and gc_start_the_world (that should be
359 used in matching pairs), maybe_become_stw_initiator is idempotent
360 within a stop-restart cycle. With this call, a thread may `reserve
361 the right' to stop the world as early as it wants. */
363 static inline boolean
364 maybe_become_stw_initiator(boolean thruption)
366 struct thread* self = arch_os_get_current_thread();
368 /* Double-checked locking. Possible word tearing on some
369 architectures, FIXME FIXME, but let's think of it when GENCGC
370 and threaded SBCL is ported to them. */
371 if (!gc_dispatcher.th_stw_initiator) {
372 odxprint(misc,"NULL STW BEFORE GPTRANSITION");
373 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
374 /* We hold mx_gptransition. Is there no STW initiator yet? */
375 if (!gc_dispatcher.th_stw_initiator) {
376 odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
378 gc_dispatcher.th_stw_initiator = self;
379 gc_dispatcher.thruption = thruption;
381 /* hold mx_gcing until we restart the world */
382 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
384 /* and mx_gpunmapped until we remap common GC page */
385 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
387 /* we unmap it; other threads running Lisp code will now
391 /* stop counter; the world is not stopped yet. */
392 gc_dispatcher.stopped = 0;
394 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
396 return gc_dispatcher.th_stw_initiator == self;
400 /* maybe_let_the_world_go -- if current thread is a STW initiator,
401 unlock internal GC structures, and return true. */
402 static inline boolean
403 maybe_let_the_world_go()
405 struct thread* self = arch_os_get_current_thread();
406 if (gc_dispatcher.th_stw_initiator == self) {
407 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
408 if (gc_dispatcher.th_stw_initiator == self) {
409 gc_dispatcher.th_stw_initiator = NULL;
411 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
412 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
420 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
421 complete if necessary), and make sure all other threads are either
422 stopped or gc-safe (i.e. running foreign calls).
424 If GC initiator already exists, gc_stop_the_world() either waits
425 for its completion, or cooperates with it: e.g. concurrent pending
426 thruption handler allows (SUB-GC) to complete under its
429 Code sections bounded by gc_stop_the_world and gc_start_the_world
430 may be nested; inner calls don't stop or start threads,
431 decrementing or incrementing the stop counter instead. */
435 struct thread* self = arch_os_get_current_thread(), *p;
437 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
438 /* If GC is enabled, this thread may wait for current STW
439 initiator without causing deadlock. */
440 if (!maybe_become_stw_initiator(0)) {
441 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
442 maybe_become_stw_initiator(0);
443 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
445 /* Now _this thread_ should be STW initiator */
446 gc_assert(self == gc_dispatcher.th_stw_initiator);
448 /* GC inhibited; e.g. we are inside SUB-GC */
449 if (!maybe_become_stw_initiator(0)) {
450 /* Some trouble. Inside SUB-GC, holding the Lisp-side
451 mutex, but some other thread is stopping the world. */
452 if (gc_dispatcher.thruption) {
453 /* Thruption. Wait until it's delivered */
454 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
455 /* Warning: mx_gcing is held recursively. */
456 gc_assert(maybe_become_stw_initiator(0));
457 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
459 /* In SUB-GC, holding mutex; other thread wants to
461 if (gc_dispatcher.th_subgc == self) {
462 /* There is an outer gc_stop_the_world() by _this_
463 thread, running subordinately to initiator.
464 Just increase stop counter. */
465 ++gc_dispatcher.stopped;
468 /* Register as subordinate collector thread: take
470 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
471 ++gc_dispatcher.stopped;
473 /* Unlocking thread's own thread_qrl() designates
474 `time to examine me' to other threads. */
475 pthread_mutex_unlock(thread_qrl(self));
477 /* STW (GC) initiator thread will see our thread needs
478 to finish GC. It will stop the world and itself,
479 and unlock its qrl. */
480 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
485 thruption = gc_dispatcher.thruption; /* Thruption or GC? */
486 if (!gc_dispatcher.stopped++) {
487 /* Outermost stop: signal other threads */
488 pthread_mutex_lock(&all_threads_lock);
489 /* Phase 1: ensure all threads are aware of the need to stop,
490 or locked in the foreign code. */
492 pthread_mutex_t *p_qrl = thread_qrl(p);
496 /* Read-protect p's flag */
497 if (!set_thread_csp_access(p,0)) {
498 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
499 /* Thread is in Lisp, so it should trap (either in
500 Lisp or in Lisp->FFI transition). Trap handler
501 unlocks thread_qrl(p); when it happens, we're safe
502 to examine that thread. */
503 pthread_mutex_lock(p_qrl);
504 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
505 /* Mark thread for the future: should we collect, or
506 wait for its final permission? */
507 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
508 SetTlSymbolValue(GC_SAFE,T,p);
510 SetTlSymbolValue(GC_SAFE,NIL,p);
512 pthread_mutex_unlock(p_qrl);
514 /* In C; we just disabled writing. */
516 if (SymbolTlValue(GC_INHIBIT,p)==T) {
517 /* GC inhibited there */
518 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
519 /* Enable writing. Such threads trap by
520 pending thruption when WITHOUT-GCING
522 set_thread_csp_access(p,1);
523 SetTlSymbolValue(GC_SAFE,NIL,p);
525 /* Thread allows concurrent GC. It runs in C
526 (not a mutator), its in-Lisp flag is
527 read-only (so it traps on return). */
528 SetTlSymbolValue(GC_SAFE,T,p);
533 /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
535 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
536 /* Threads with GC inhibited -- continued */
537 odxprint(safepoints,"after remapping GC page %p",self);
539 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
541 struct thread* priority_gc = NULL;
545 if (SymbolTlValue(GC_SAFE,p)!=T) {
546 /* Wait for thread to `park'. NB it _always_ does
547 it with a pending interrupt trap, so CSP locking is
549 odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
551 pthread_mutex_lock(thread_qrl(p));
552 if (SymbolTlValue(GC_INHIBIT,p)==T) {
553 /* Concurrent GC invoked manually */
554 gc_assert(!priority_gc); /* Should be at most one at a time */
557 pthread_mutex_unlock(thread_qrl(p));
561 lose("gc_stop_the_world: no SP in parked thread: %p", p);
564 /* This thread is managing the entire process, so it
565 has to allow manually-invoked GC to complete */
566 if (!set_thread_csp_access(self,1)) {
568 *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
570 pthread_mutex_unlock(thread_qrl(self));
571 /* Priority GC should take over, holding
572 mx_subgc until it's done. */
573 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
575 pthread_mutex_lock(thread_qrl(self));
576 *self->csp_around_foreign_call = 0;
577 SetTlSymbolValue(GC_PENDING,NIL,self);
578 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
581 pthread_mutex_unlock(thread_qrl(self));
582 /* Priority GC should take over, holding
583 mx_subgc until it's done. */
584 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
586 pthread_mutex_lock(thread_qrl(self));
588 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
596 /* gc_start_the_world() -- restart all other threads if the call
597 matches the _outermost_ gc_stop_the_world(), or decrement the stop
602 struct thread* self = arch_os_get_current_thread(), *p;
603 boolean thruption = gc_dispatcher.thruption;
604 if (gc_dispatcher.th_stw_initiator != self) {
605 odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
606 gc_assert (gc_dispatcher.th_subgc == self);
607 if (--gc_dispatcher.stopped == 1) {
608 gc_dispatcher.th_subgc = NULL;
609 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
610 /* GC initiator may continue now */
611 pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
616 gc_assert(gc_dispatcher.th_stw_initiator == self);
618 if (!--gc_dispatcher.stopped) {
621 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
622 SetTlSymbolValue(GC_PENDING,NIL,p);
625 #ifdef LISP_FEATURE_SB_THRUPTION
626 SymbolTlValue(THRUPTION_PENDING,p)!=T
628 1 /* trivially no thruption pending */
630 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
631 set_thread_csp_access(p,1);
633 pthread_mutex_unlock(&all_threads_lock);
634 /* Release everyone */
635 maybe_let_the_world_go();
640 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
641 GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
642 SUB-GC, auto-gc and thruption. */
643 static inline boolean
646 struct thread* self = arch_os_get_current_thread(), *p;
648 pthread_mutex_lock(&all_threads_lock);
651 SymbolTlValue(GC_PENDING,p)!=T &&
652 SymbolTlValue(GC_PENDING,p)!=NIL) {
657 pthread_mutex_unlock(&all_threads_lock);
660 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
661 maybe_let_the_world_go();
667 set_csp_from_context(struct thread *self, os_context_t *ctx)
669 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
670 /* On POSIX platforms, it is sufficient to investigate only the part
671 * of the stack that was live before the interrupt, because in
672 * addition, we consider interrupt contexts explicitly. On Windows,
673 * however, we do not keep an explicit stack of exception contexts,
674 * and instead arrange for the conservative stack scan to also cover
675 * the context implicitly. The obvious way to do that is to start
676 * at the context itself: */
677 #ifdef LISP_FEATURE_WIN32
678 gc_assert((void **) ctx < sp);
681 gc_assert((void **)self->control_stack_start
683 < (void **)self->control_stack_end);
684 *self->csp_around_foreign_call = (lispobj) sp;
688 thread_pitstop(os_context_t *ctxptr)
690 struct thread* self = arch_os_get_current_thread();
691 boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
693 odxprint(safepoints,"pitstop [%p]", ctxptr);
695 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
696 /* Free qrl to let know we're ready... */
697 WITH_STATE_SEM(self) {
698 pthread_mutex_unlock(thread_qrl(self));
699 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
700 pthread_mutex_lock(thread_qrl(self));
701 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
703 /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
704 pit-stop always waits for GC end) */
705 set_thread_csp_access(self,1);
707 if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
708 set_thread_csp_access(self,1);
709 check_pending_gc(ctxptr);
712 if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
713 maybe_become_stw_initiator(0) && !in_race_p()) {
715 set_thread_csp_access(self,1);
716 check_pending_gc(ctxptr);
717 gc_start_the_world();
719 /* An innocent thread which is not an initiator _and_ is
721 odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
722 if (!set_thread_csp_access(self,1)) {
723 if (os_get_csp(self))
724 lose("thread_pitstop: would lose csp");
725 set_csp_from_context(self, ctxptr);
726 pthread_mutex_unlock(thread_qrl(self));
727 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
728 *self->csp_around_foreign_call = 0;
729 pthread_mutex_lock(thread_qrl(self));
730 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
732 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
733 set_thread_csp_access(self,1);
734 WITH_GC_AT_SAFEPOINTS_ONLY() {
735 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
736 #ifdef LISP_FEATURE_SB_THRUPTION
737 while (check_pending_thruptions(ctxptr))
745 #ifdef LISP_FEATURE_SB_THRUPTION
746 while(check_pending_thruptions(ctxptr));
751 thread_edge(os_context_t *ctxptr)
753 struct thread *self = arch_os_get_current_thread();
754 set_thread_csp_access(self,1);
755 if (os_get_csp(self)) {
756 if (!self->pc_around_foreign_call)
757 return; /* trivialize */
758 odxprint(safepoints,"edge leaving [%p]", ctxptr);
759 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
760 #ifdef LISP_FEATURE_SB_THRUPTION
761 if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
762 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
763 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
764 set_thread_csp_access(self,1);
765 WITH_GC_AT_SAFEPOINTS_ONLY() {
766 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
767 while (check_pending_thruptions(ctxptr))
773 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
774 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
775 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
776 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
781 odxprint(safepoints,"edge entering [%p]", ctxptr);
782 #ifdef LISP_FEATURE_SB_THRUPTION
783 while(check_pending_thruptions(ctxptr))
786 if (os_get_csp(self))
787 lose("thread_edge: would lose csp");
788 set_csp_from_context(self, ctxptr);
789 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
790 pthread_mutex_unlock(thread_qrl(self));
791 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
792 *self->csp_around_foreign_call = 0;
793 pthread_mutex_lock(thread_qrl(self));
794 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
796 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
797 pthread_mutex_unlock(thread_qrl(self));
798 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
799 *self->csp_around_foreign_call = 0;
800 pthread_mutex_lock(thread_qrl(self));
801 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
807 /* thread_register_gc_trigger --
809 Called by GENCGC in each thread where GC_PENDING becomes T because
810 allocated memory size has crossed the threshold in
811 auto_gc_trigger. For the new collective GC sequence, its first call
812 marks a process-wide beginning of GC.
815 thread_register_gc_trigger()
817 odxprint(misc, "/thread_register_gc_trigger");
818 struct thread* self = arch_os_get_current_thread();
819 /* This function should be called instead of former
820 set_pseudo_atomic_interrupted(), e.g. never with true
822 gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
824 /* unmap GC page, signal other threads... */
825 maybe_become_stw_initiator(0);
830 #ifdef LISP_FEATURE_SB_THRUPTION
831 /* wake_thread(thread) -- ensure a thruption delivery to
834 # ifdef LISP_FEATURE_WIN32
837 wake_thread_io(struct thread * thread)
839 SetEvent(thread->private_events.events[1]);
840 win32_maybe_interrupt_io(thread);
844 wake_thread_win32(struct thread *thread)
846 wake_thread_io(thread);
848 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
851 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
853 if ((SymbolTlValue(GC_PENDING,thread)==T)||
854 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
857 wake_thread_io(thread);
858 pthread_mutex_unlock(&all_threads_lock);
860 if (maybe_become_stw_initiator(1) && !in_race_p()) {
862 gc_start_the_world();
864 pthread_mutex_lock(&all_threads_lock);
869 wake_thread_posix(os_thread_t os_thread)
872 struct thread *thread;
873 struct thread *self = arch_os_get_current_thread();
875 /* Must not and need not attempt to signal ourselves while we're the
877 if (self->os_thread == os_thread) {
878 SetTlSymbolValue(THRUPTION_PENDING,T,self);
879 WITH_GC_AT_SAFEPOINTS_ONLY()
880 while (check_pending_thruptions(0 /* ignore the sigmask */))
885 /* We are not in a signal handler here, so need to block signals
888 block_deferrable_signals(0, &oldset);
890 if (!maybe_become_stw_initiator(1) || in_race_p()) {
891 /* we are not able to wake the thread up, but the STW initiator
892 * will take care of it (kludge: unless it is in foreign code).
893 * Let's at least try to get our return value right. */
894 pthread_mutex_lock(&all_threads_lock);
895 for_each_thread (thread)
896 if (thread->os_thread == os_thread) {
900 pthread_mutex_unlock(&all_threads_lock);
905 /* we hold the all_threads lock */
906 for_each_thread (thread)
907 if (thread->os_thread == os_thread) {
908 /* it's still alive... */
911 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
912 if (SymbolTlValue(GC_PENDING,thread) == T
913 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
916 if (os_get_csp(thread)) {
917 /* ... and in foreign code. Push it into a safety
919 int status = pthread_kill(os_thread, SIGPIPE);
921 lose("wake_thread_posix: pthread_kill failed with %d\n",
927 /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
928 gc_start_the_world();
931 pthread_sigmask(SIG_SETMASK, &oldset, 0);
932 return found ? 0 : -1;
934 #endif /* !LISP_FEATURE_WIN32 */
935 #endif /* LISP_FEATURE_SB_THRUPTION */
938 thread_in_safety_transition(os_context_t *ctx)
940 FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
945 thread_in_lisp_raised(os_context_t *ctx)
947 FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
952 thread_interrupted(os_context_t *ctx)
954 FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
959 os_get_csp(struct thread* th)
961 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
963 th->csp_around_foreign_call,
964 *(void***)th->csp_around_foreign_call,
965 th->control_stack_start,
966 th->control_stack_end));
967 return *(void***)th->csp_around_foreign_call;
971 #ifndef LISP_FEATURE_WIN32
973 # ifdef LISP_FEATURE_SB_THRUPTION
975 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
977 struct thread *self = arch_os_get_current_thread();
979 if (!os_get_csp(self))
980 /* In Lisp code. Do not run thruptions asynchronously. The
981 * next safepoint will take care of it. */
984 /* In C code. As a rule, we assume that running thruptions is OK. */
985 fake_foreign_function_call(ctx);
986 thread_in_safety_transition(ctx);
987 undo_fake_foreign_function_call(ctx);
991 /* Designed to be of the same type as call_into_lisp. Ignores its
994 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
996 #if trap_GlobalSafepoint != 0x1a
997 # error trap_GlobalSafepoint mismatch
999 asm("int3; .byte 0x1a;");
1004 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
1006 #if trap_CspSafepoint != 0x1b
1007 # error trap_CspSafepoint mismatch
1009 asm("int3; .byte 0x1b;");
1014 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
1016 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
1018 GC_SAFEPOINT_PAGE_ADDR,
1019 arch_os_get_current_thread()->csp_around_foreign_call));
1021 struct thread *self = arch_os_get_current_thread();
1023 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
1024 /* We're on the altstack and don't want to run Lisp code. */
1025 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
1029 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
1030 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
1034 /* not a safepoint */
1037 #endif /* LISP_FEATURE_WIN32 */
1040 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
1042 struct thread* th = arch_os_get_current_thread();
1044 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
1046 WITH_GC_AT_SAFEPOINTS_ONLY()
1047 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
1050 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */