2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 odxprint(misc, "map_gc_page");
57 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
59 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
65 odxprint(misc, "unmap_gc_page");
66 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
68 #endif /* !LISP_FEATURE_WIN32 */
73 /* Thread may gc if all of these are true:
74 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
75 * 2) GC_PENDING != :in-progress (outside of recursion protection)
76 * Note that we are in a safepoint here, which is always outside of PA. */
78 struct thread *self = arch_os_get_current_thread();
79 return (SymbolValue(GC_INHIBIT, self) == NIL
80 && (SymbolTlValue(GC_PENDING, self) == T ||
81 SymbolTlValue(GC_PENDING, self) == NIL));
84 #ifdef LISP_FEATURE_SB_THRUPTION
86 thread_may_thrupt(os_context_t *ctx)
88 struct thread * self = arch_os_get_current_thread();
89 /* Thread may be interrupted if all of these are true:
90 * 1) Deferrables are unblocked in the context of the signal that
91 * went into the safepoint. -- Otherwise the surrounding code
92 * didn't want to be interrupted by a signal, so presumably it didn't
93 * want to be INTERRUPT-THREADed either.
94 * (See interrupt_handle_pending for an exception.)
95 * 2) On POSIX: There is no pending signal. This is important even
96 * after checking the sigmask, since we could be in the
97 * handle_pending trap following re-enabling of interrupts.
98 * Signals are unblocked in that case, but the signal is still
99 * pending; we want to run GC before handling the signal and
100 * therefore entered this safepoint. But the thruption would call
101 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
102 * trap, leading to recursion.
103 * 3) INTERRUPTS_ENABLED is non-nil.
104 * 4) No GC pending; it takes precedence.
105 * Note that we are in a safepoint here, which is always outside of PA. */
107 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
110 if (SymbolValue(GC_PENDING, self) != NIL)
113 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
116 #ifdef LISP_FEATURE_WIN32
117 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
120 /* ctx is NULL if the caller wants to ignore the sigmask. */
121 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
123 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
127 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
128 /* This special case prevents TERMINATE-THREAD from hitting
129 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
130 * deferrables are already unblocked there. Further
131 * investigation may be in order. */
137 // returns 0 if skipped, 1 otherwise
139 check_pending_thruptions(os_context_t *ctx)
141 struct thread *p = arch_os_get_current_thread();
143 #ifdef LISP_FEATURE_WIN32
144 pthread_t pself = p->os_thread;
146 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
147 * in the self-kill case; instead we do it here while also clearing the
149 if (pself->pending_signal_set)
150 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
151 SetSymbolValue(THRUPTION_PENDING, T, p);
154 if (!thread_may_thrupt(ctx))
156 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
158 SetSymbolValue(THRUPTION_PENDING, NIL, p);
160 #ifdef LISP_FEATURE_WIN32
161 oldset = pself->blocked_signal_set;
162 pself->blocked_signal_set = deferrable_sigset;
163 if (ctx) fake_foreign_function_call(ctx);
166 block_deferrable_signals(0, &oldset);
169 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
171 #ifdef LISP_FEATURE_WIN32
172 if (ctx) undo_fake_foreign_function_call(ctx);
173 pself->blocked_signal_set = oldset;
174 if (ctx) ctx->sigmask = oldset;
176 pthread_sigmask(SIG_SETMASK, &oldset, 0);
183 on_stack_p(struct thread *th, void *esp)
185 return (void *)th->control_stack_start
187 < (void *)th->control_stack_end;
190 #ifndef LISP_FEATURE_WIN32
191 /* (Technically, we still allocate an altstack even on Windows. Since
192 * Windows has a contiguous stack with an automatic guard page of
193 * user-configurable size instead of an alternative stack though, the
194 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
196 on_altstack_p(struct thread *th, void *esp)
198 void *start = (void *)th+dynamic_values_bytes;
199 void *end = (char *)start + 32*SIGSTKSZ;
200 return start <= esp && esp < end;
205 assert_on_stack(struct thread *th, void *esp)
207 if (on_stack_p(th, esp))
209 #ifndef LISP_FEATURE_WIN32
210 if (on_altstack_p(th, esp))
211 lose("thread %p: esp on altstack: %p", th, esp);
213 lose("thread %p: bogus esp: %p", th, esp);
216 // returns 0 if skipped, 1 otherwise
218 check_pending_gc(os_context_t *ctx)
220 odxprint(misc, "check_pending_gc");
221 struct thread * self = arch_os_get_current_thread();
225 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
226 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
227 (SymbolValue(GC_PENDING,self) == NIL))) {
228 SetSymbolValue(IN_SAFEPOINT,NIL,self);
230 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
231 if ((SymbolTlValue(GC_PENDING, self) == T)) {
232 lispobj gc_happened = NIL;
234 bind_variable(IN_SAFEPOINT,T,self);
235 block_deferrable_signals(NULL,&sigset);
236 if(SymbolTlValue(GC_PENDING,self)==T)
237 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
238 unbind_variable(IN_SAFEPOINT,self);
239 thread_sigmask(SIG_SETMASK,&sigset,NULL);
240 if (gc_happened == T) {
241 /* POST_GC wants to enable interrupts */
242 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
243 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
244 odxprint(misc, "going to call POST_GC");
245 funcall0(StaticSymbolFunction(POST_GC));
254 /* Several ideas on interthread signalling should be
255 tried. Implementation below was chosen for its moderate size and
258 Mutex is the only (conventional) system synchronization primitive
259 used by it. Some of the code below looks weird with this
260 limitation; rwlocks, Windows Event Objects, or perhaps pthread
261 barriers could be used to improve clarity.
263 No condvars here: our pthreads_win32 is great, but it doesn't
264 provide wait morphing optimization; let's avoid extra context
265 switches and extra contention. */
267 struct gc_dispatcher {
269 /* Held by the first thread that decides to signal all others, for
270 the entire period while common GC safepoint page is
271 unmapped. This thread is called `STW (stop-the-world)
273 pthread_mutex_t mx_gpunmapped;
275 /* Held by STW initiator while it updates th_stw_initiator and
276 takes other locks in this structure */
277 pthread_mutex_t mx_gptransition;
279 /* Held by STW initiator until the world should be started (GC
280 complete, thruptions delivered). */
281 pthread_mutex_t mx_gcing;
283 /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
284 holds the GC Lisp-level mutex, but _couldn't_ become STW
285 initiator (i.e. another thread is already stopping the
287 pthread_mutex_t mx_subgc;
289 /* First thread (at this round) that decided to stop the world */
290 struct thread *th_stw_initiator;
292 /* Thread running SUB-GC under the `supervision' of STW
294 struct thread *th_subgc;
296 /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
297 work without thundering herd. */
300 /* Thruption flag: Iff true, current STW initiator is delivering
301 thruptions and not GCing. */
305 /* mutexes lazy initialized, other data initially zeroed */
306 .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
307 .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
308 .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
309 .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
313 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
314 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
315 if `writable' is true.
317 Return true if there is a non-null value in the flag.
319 When a thread enters C code or leaves it, a per-thread location is
320 modified. That machine word serves as a not-in-Lisp flag; for
321 convenience, when in C, it's filled with a topmost stack location
322 that may contain Lisp data. When thread is in Lisp, the word
325 GENCGC uses each thread's flag value for conservative garbage collection.
327 There is a full VM page reserved for this word; page permissions
328 are switched to read-only for race-free examine + wait + use
330 static inline boolean
331 set_thread_csp_access(struct thread* p, boolean writable)
333 os_protect((os_vm_address_t) p->csp_around_foreign_call,
334 THREAD_CSP_PAGE_SIZE,
335 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
336 : (OS_VM_PROT_READ));
337 return !!*p->csp_around_foreign_call;
341 /* maybe_become_stw_initiator -- if there is no stop-the-world action
342 in progress, begin it by unmapping GC page, and record current
343 thread as STW initiator.
345 `thruption' flag affects some subtleties of stop/start methods:
346 waiting for other threads allowing GC; setting and clearing
347 STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
349 Return true if current thread becomes a GC initiator, or already
350 _is_ a STW initiator.
352 Unlike gc_stop_the_world and gc_start_the_world (that should be
353 used in matching pairs), maybe_become_stw_initiator is idempotent
354 within a stop-restart cycle. With this call, a thread may `reserve
355 the right' to stop the world as early as it wants. */
357 static inline boolean
358 maybe_become_stw_initiator(boolean thruption)
360 struct thread* self = arch_os_get_current_thread();
362 /* Double-checked locking. Possible word tearing on some
363 architectures, FIXME FIXME, but let's think of it when GENCGC
364 and threaded SBCL is ported to them. */
365 if (!gc_dispatcher.th_stw_initiator) {
366 odxprint(misc,"NULL STW BEFORE GPTRANSITION");
367 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
368 /* We hold mx_gptransition. Is there no STW initiator yet? */
369 if (!gc_dispatcher.th_stw_initiator) {
370 odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
372 gc_dispatcher.th_stw_initiator = self;
373 gc_dispatcher.thruption = thruption;
375 /* hold mx_gcing until we restart the world */
376 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
378 /* and mx_gpunmapped until we remap common GC page */
379 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
381 /* we unmap it; other threads running Lisp code will now
385 /* stop counter; the world is not stopped yet. */
386 gc_dispatcher.stopped = 0;
388 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
390 return gc_dispatcher.th_stw_initiator == self;
394 /* maybe_let_the_world_go -- if current thread is a STW initiator,
395 unlock internal GC structures, and return true. */
396 static inline boolean
397 maybe_let_the_world_go()
399 struct thread* self = arch_os_get_current_thread();
400 if (gc_dispatcher.th_stw_initiator == self) {
401 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
402 if (gc_dispatcher.th_stw_initiator == self) {
403 gc_dispatcher.th_stw_initiator = NULL;
405 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
406 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
414 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
415 complete if necessary), and make sure all other threads are either
416 stopped or gc-safe (i.e. running foreign calls).
418 If GC initiator already exists, gc_stop_the_world() either waits
419 for its completion, or cooperates with it: e.g. concurrent pending
420 thruption handler allows (SUB-GC) to complete under its
423 Code sections bounded by gc_stop_the_world and gc_start_the_world
424 may be nested; inner calls don't stop or start threads,
425 decrementing or incrementing the stop counter instead. */
429 struct thread* self = arch_os_get_current_thread(), *p;
431 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
432 /* If GC is enabled, this thread may wait for current STW
433 initiator without causing deadlock. */
434 if (!maybe_become_stw_initiator(0)) {
435 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
436 maybe_become_stw_initiator(0);
437 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
439 /* Now _this thread_ should be STW initiator */
440 gc_assert(self == gc_dispatcher.th_stw_initiator);
442 /* GC inhibited; e.g. we are inside SUB-GC */
443 if (!maybe_become_stw_initiator(0)) {
444 /* Some trouble. Inside SUB-GC, holding the Lisp-side
445 mutex, but some other thread is stopping the world. */
446 if (gc_dispatcher.thruption) {
447 /* Thruption. Wait until it's delivered */
448 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
449 /* Warning: mx_gcing is held recursively. */
450 gc_assert(maybe_become_stw_initiator(0));
451 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
453 /* In SUB-GC, holding mutex; other thread wants to
455 if (gc_dispatcher.th_subgc == self) {
456 /* There is an outer gc_stop_the_world() by _this_
457 thread, running subordinately to initiator.
458 Just increase stop counter. */
459 ++gc_dispatcher.stopped;
462 /* Register as subordinate collector thread: take
464 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
465 ++gc_dispatcher.stopped;
467 /* Unlocking thread's own thread_qrl() designates
468 `time to examine me' to other threads. */
469 pthread_mutex_unlock(thread_qrl(self));
471 /* STW (GC) initiator thread will see our thread needs
472 to finish GC. It will stop the world and itself,
473 and unlock its qrl. */
474 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
479 thruption = gc_dispatcher.thruption; /* Thruption or GC? */
480 if (!gc_dispatcher.stopped++) {
481 /* Outermost stop: signal other threads */
482 pthread_mutex_lock(&all_threads_lock);
483 /* Phase 1: ensure all threads are aware of the need to stop,
484 or locked in the foreign code. */
486 pthread_mutex_t *p_qrl = thread_qrl(p);
490 /* Read-protect p's flag */
491 if (!set_thread_csp_access(p,0)) {
492 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
493 /* Thread is in Lisp, so it should trap (either in
494 Lisp or in Lisp->FFI transition). Trap handler
495 unlocks thread_qrl(p); when it happens, we're safe
496 to examine that thread. */
497 pthread_mutex_lock(p_qrl);
498 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
499 /* Mark thread for the future: should we collect, or
500 wait for its final permission? */
501 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
502 SetTlSymbolValue(GC_SAFE,T,p);
504 SetTlSymbolValue(GC_SAFE,NIL,p);
506 pthread_mutex_unlock(p_qrl);
508 /* In C; we just disabled writing. */
510 if (SymbolTlValue(GC_INHIBIT,p)==T) {
511 /* GC inhibited there */
512 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
513 /* Enable writing. Such threads trap by
514 pending thruption when WITHOUT-GCING
516 set_thread_csp_access(p,1);
517 SetTlSymbolValue(GC_SAFE,NIL,p);
519 /* Thread allows concurrent GC. It runs in C
520 (not a mutator), its in-Lisp flag is
521 read-only (so it traps on return). */
522 SetTlSymbolValue(GC_SAFE,T,p);
527 /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
529 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
530 /* Threads with GC inhibited -- continued */
531 odxprint(safepoints,"after remapping GC page %p",self);
533 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
535 struct thread* priority_gc = NULL;
539 if (SymbolTlValue(GC_SAFE,p)!=T) {
540 /* Wait for thread to `park'. NB it _always_ does
541 it with a pending interrupt trap, so CSP locking is
543 odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
545 pthread_mutex_lock(thread_qrl(p));
546 if (SymbolTlValue(GC_INHIBIT,p)==T) {
547 /* Concurrent GC invoked manually */
548 gc_assert(!priority_gc); /* Should be at most one at a time */
551 pthread_mutex_unlock(thread_qrl(p));
555 lose("gc_stop_the_world: no SP in parked thread: %p", p);
558 /* This thread is managing the entire process, so it
559 has to allow manually-invoked GC to complete */
560 if (!set_thread_csp_access(self,1)) {
562 *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
564 pthread_mutex_unlock(thread_qrl(self));
565 /* Priority GC should take over, holding
566 mx_subgc until it's done. */
567 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
569 pthread_mutex_lock(thread_qrl(self));
570 *self->csp_around_foreign_call = 0;
571 SetTlSymbolValue(GC_PENDING,NIL,self);
572 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
575 pthread_mutex_unlock(thread_qrl(self));
576 /* Priority GC should take over, holding
577 mx_subgc until it's done. */
578 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
580 pthread_mutex_lock(thread_qrl(self));
582 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
590 /* gc_start_the_world() -- restart all other threads if the call
591 matches the _outermost_ gc_stop_the_world(), or decrement the stop
596 struct thread* self = arch_os_get_current_thread(), *p;
597 boolean thruption = gc_dispatcher.thruption;
598 if (gc_dispatcher.th_stw_initiator != self) {
599 odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
600 gc_assert (gc_dispatcher.th_subgc == self);
601 if (--gc_dispatcher.stopped == 1) {
602 gc_dispatcher.th_subgc = NULL;
603 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
604 /* GC initiator may continue now */
605 pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
610 gc_assert(gc_dispatcher.th_stw_initiator == self);
612 if (!--gc_dispatcher.stopped) {
615 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
616 SetTlSymbolValue(GC_PENDING,NIL,p);
619 #ifdef LISP_FEATURE_SB_THRUPTION
620 SymbolTlValue(THRUPTION_PENDING,p)!=T
622 1 /* trivially no thruption pending */
624 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
625 set_thread_csp_access(p,1);
627 pthread_mutex_unlock(&all_threads_lock);
628 /* Release everyone */
629 maybe_let_the_world_go();
634 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
635 GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
636 SUB-GC, auto-gc and thruption. */
637 static inline boolean
640 struct thread* self = arch_os_get_current_thread(), *p;
642 pthread_mutex_lock(&all_threads_lock);
645 SymbolTlValue(GC_PENDING,p)!=T &&
646 SymbolTlValue(GC_PENDING,p)!=NIL) {
651 pthread_mutex_unlock(&all_threads_lock);
654 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
655 maybe_let_the_world_go();
661 set_csp_from_context(struct thread *self, os_context_t *ctx)
663 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
664 /* On POSIX platforms, it is sufficient to investigate only the part
665 * of the stack that was live before the interrupt, because in
666 * addition, we consider interrupt contexts explicitly. On Windows,
667 * however, we do not keep an explicit stack of exception contexts,
668 * and instead arrange for the conservative stack scan to also cover
669 * the context implicitly. The obvious way to do that is to start
670 * at the context itself: */
671 #ifdef LISP_FEATURE_WIN32
672 gc_assert((void **) ctx < sp);
675 gc_assert((void **)self->control_stack_start
677 < (void **)self->control_stack_end);
678 *self->csp_around_foreign_call = (lispobj) sp;
682 thread_pitstop(os_context_t *ctxptr)
684 struct thread* self = arch_os_get_current_thread();
685 boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
687 odxprint(safepoints,"pitstop [%p]", ctxptr);
689 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
690 /* Free qrl to let know we're ready... */
691 WITH_STATE_SEM(self) {
692 pthread_mutex_unlock(thread_qrl(self));
693 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
694 pthread_mutex_lock(thread_qrl(self));
695 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
697 /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
698 pit-stop always waits for GC end) */
699 set_thread_csp_access(self,1);
701 if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
702 set_thread_csp_access(self,1);
703 check_pending_gc(ctxptr);
706 if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
707 maybe_become_stw_initiator(0) && !in_race_p()) {
709 set_thread_csp_access(self,1);
710 check_pending_gc(ctxptr);
711 gc_start_the_world();
713 /* An innocent thread which is not an initiator _and_ is
715 odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
716 if (!set_thread_csp_access(self,1)) {
717 if (os_get_csp(self))
718 lose("thread_pitstop: would lose csp");
719 set_csp_from_context(self, ctxptr);
720 pthread_mutex_unlock(thread_qrl(self));
721 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
722 *self->csp_around_foreign_call = 0;
723 pthread_mutex_lock(thread_qrl(self));
724 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
726 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
727 set_thread_csp_access(self,1);
728 WITH_GC_AT_SAFEPOINTS_ONLY() {
729 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
730 #ifdef LISP_FEATURE_SB_THRUPTION
731 while (check_pending_thruptions(ctxptr))
739 #ifdef LISP_FEATURE_SB_THRUPTION
740 while(check_pending_thruptions(ctxptr));
745 thread_edge(os_context_t *ctxptr)
747 struct thread *self = arch_os_get_current_thread();
748 set_thread_csp_access(self,1);
749 if (os_get_csp(self)) {
750 if (!self->pc_around_foreign_call)
751 return; /* trivialize */
752 odxprint(safepoints,"edge leaving [%p]", ctxptr);
753 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
754 #ifdef LISP_FEATURE_SB_THRUPTION
755 if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
756 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
757 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
758 set_thread_csp_access(self,1);
759 WITH_GC_AT_SAFEPOINTS_ONLY() {
760 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
761 while (check_pending_thruptions(ctxptr))
767 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
768 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
769 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
770 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
775 odxprint(safepoints,"edge entering [%p]", ctxptr);
776 #ifdef LISP_FEATURE_SB_THRUPTION
777 while(check_pending_thruptions(ctxptr))
780 if (os_get_csp(self))
781 lose("thread_edge: would lose csp");
782 set_csp_from_context(self, ctxptr);
783 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
784 pthread_mutex_unlock(thread_qrl(self));
785 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
786 *self->csp_around_foreign_call = 0;
787 pthread_mutex_lock(thread_qrl(self));
788 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
790 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
791 pthread_mutex_unlock(thread_qrl(self));
792 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
793 *self->csp_around_foreign_call = 0;
794 pthread_mutex_lock(thread_qrl(self));
795 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
801 /* thread_register_gc_trigger --
803 Called by GENCGC in each thread where GC_PENDING becomes T because
804 allocated memory size has crossed the threshold in
805 auto_gc_trigger. For the new collective GC sequence, its first call
806 marks a process-wide beginning of GC.
809 thread_register_gc_trigger()
811 odxprint(misc, "/thread_register_gc_trigger");
812 struct thread* self = arch_os_get_current_thread();
813 /* This function should be called instead of former
814 set_pseudo_atomic_interrupted(), e.g. never with true
816 gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
818 /* unmap GC page, signal other threads... */
819 maybe_become_stw_initiator(0);
824 #ifdef LISP_FEATURE_SB_THRUPTION
825 /* wake_thread(thread) -- ensure a thruption delivery to
828 # ifdef LISP_FEATURE_WIN32
831 wake_thread_io(struct thread * thread)
833 SetEvent(thread->private_events.events[1]);
837 wake_thread_win32(struct thread *thread)
839 wake_thread_io(thread);
841 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
844 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
846 if ((SymbolTlValue(GC_PENDING,thread)==T)||
847 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
850 wake_thread_io(thread);
851 pthread_mutex_unlock(&all_threads_lock);
853 if (maybe_become_stw_initiator(1) && !in_race_p()) {
855 gc_start_the_world();
857 pthread_mutex_lock(&all_threads_lock);
862 wake_thread_posix(os_thread_t os_thread)
865 struct thread *thread;
866 struct thread *self = arch_os_get_current_thread();
868 /* Must not and need not attempt to signal ourselves while we're the
870 if (self->os_thread == os_thread) {
871 SetTlSymbolValue(THRUPTION_PENDING,T,self);
872 WITH_GC_AT_SAFEPOINTS_ONLY()
873 while (check_pending_thruptions(0 /* ignore the sigmask */))
878 /* We are not in a signal handler here, so need to block signals
881 block_deferrable_signals(0, &oldset);
883 if (!maybe_become_stw_initiator(1) || in_race_p()) {
884 /* we are not able to wake the thread up, but the STW initiator
885 * will take care of it (kludge: unless it is in foreign code).
886 * Let's at least try to get our return value right. */
887 pthread_mutex_lock(&all_threads_lock);
888 for_each_thread (thread)
889 if (thread->os_thread == os_thread) {
893 pthread_mutex_unlock(&all_threads_lock);
898 /* we hold the all_threads lock */
899 for_each_thread (thread)
900 if (thread->os_thread == os_thread) {
901 /* it's still alive... */
904 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
905 if (SymbolTlValue(GC_PENDING,thread) == T
906 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
909 if (os_get_csp(thread)) {
910 /* ... and in foreign code. Push it into a safety
912 int status = pthread_kill(os_thread, SIGPIPE);
914 lose("wake_thread_posix: pthread_kill failed with %d\n",
920 /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
921 gc_start_the_world();
924 pthread_sigmask(SIG_SETMASK, &oldset, 0);
925 return found ? 0 : -1;
927 #endif /* !LISP_FEATURE_WIN32 */
928 #endif /* LISP_FEATURE_SB_THRUPTION */
931 thread_in_safety_transition(os_context_t *ctx)
933 FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
938 thread_in_lisp_raised(os_context_t *ctx)
940 FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
945 thread_interrupted(os_context_t *ctx)
947 FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
952 os_get_csp(struct thread* th)
954 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
956 th->csp_around_foreign_call,
957 *(void***)th->csp_around_foreign_call,
958 th->control_stack_start,
959 th->control_stack_end));
960 return *(void***)th->csp_around_foreign_call;
964 #ifndef LISP_FEATURE_WIN32
966 # ifdef LISP_FEATURE_SB_THRUPTION
968 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
970 struct thread *self = arch_os_get_current_thread();
972 if (!os_get_csp(self))
973 /* In Lisp code. Do not run thruptions asynchronously. The
974 * next safepoint will take care of it. */
977 /* In C code. As a rule, we assume that running thruptions is OK. */
978 fake_foreign_function_call(ctx);
979 thread_in_safety_transition(ctx);
980 undo_fake_foreign_function_call(ctx);
984 /* Designed to be of the same type as call_into_lisp. Ignores its
987 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
989 #if trap_GlobalSafepoint != 0x1a
990 # error trap_GlobalSafepoint mismatch
992 asm("int3; .byte 0x1a;");
997 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
999 #if trap_CspSafepoint != 0x1b
1000 # error trap_CspSafepoint mismatch
1002 asm("int3; .byte 0x1b;");
1007 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
1009 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
1011 GC_SAFEPOINT_PAGE_ADDR,
1012 arch_os_get_current_thread()->csp_around_foreign_call));
1014 struct thread *self = arch_os_get_current_thread();
1016 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
1017 /* We're on the altstack and don't want to run Lisp code. */
1018 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
1022 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
1023 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
1027 /* not a safepoint */
1030 #endif /* LISP_FEATURE_WIN32 */
1033 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
1035 struct thread* th = arch_os_get_current_thread();
1037 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
1039 WITH_GC_AT_SAFEPOINTS_ONLY()
1040 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
1043 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */