2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 /* Temporarily, this macro is a wrapper for FSHOW_SIGNAL. Ultimately,
50 * it will be restored to its full win32 branch functionality, where it
51 * provides a very useful tracing mechanism that is configurable at
53 #define odxprint_show(what, fmt, args...) \
55 struct thread *__self = arch_os_get_current_thread(); \
56 FSHOW_SIGNAL((stderr, "[%p/%p:%s] " fmt "\n", \
64 # define odxprint odxprint_show
66 # define odxprint(what, fmt, args...) do {} while (0)
69 #if !defined(LISP_FEATURE_WIN32)
70 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
71 * definition goes here. Fixme: (Why) don't these work for Windows?
76 odxprint(misc, "map_gc_page");
77 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
79 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
85 odxprint(misc, "unmap_gc_page");
86 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
88 #endif /* !LISP_FEATURE_WIN32 */
93 /* Thread may gc if all of these are true:
94 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
95 * 2) GC_PENDING != :in-progress (outside of recursion protection)
96 * Note that we are in a safepoint here, which is always outside of PA. */
98 struct thread *self = arch_os_get_current_thread();
99 return (SymbolValue(GC_INHIBIT, self) == NIL
100 && (SymbolTlValue(GC_PENDING, self) == T ||
101 SymbolTlValue(GC_PENDING, self) == NIL));
104 #ifdef LISP_FEATURE_SB_THRUPTION
106 thread_may_thrupt(os_context_t *ctx)
108 struct thread * self = arch_os_get_current_thread();
109 /* Thread may be interrupted if all of these are true:
110 * 1) Deferrables are unblocked in the context of the signal that
111 * went into the safepoint. -- Otherwise the surrounding code
112 * didn't want to be interrupted by a signal, so presumably it didn't
113 * want to be INTERRUPT-THREADed either.
114 * (See interrupt_handle_pending for an exception.)
115 * 2) On POSIX: There is no pending signal. This is important even
116 * after checking the sigmask, since we could be in the
117 * handle_pending trap following re-enabling of interrupts.
118 * Signals are unblocked in that case, but the signal is still
119 * pending; we want to run GC before handling the signal and
120 * therefore entered this safepoint. But the thruption would call
121 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
122 * trap, leading to recursion.
123 * 3) INTERRUPTS_ENABLED is non-nil.
124 * 4) No GC pending; it takes precedence.
125 * Note that we are in a safepoint here, which is always outside of PA. */
127 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
130 if (SymbolValue(GC_PENDING, self) != NIL)
133 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
136 #ifdef LISP_FEATURE_WIN32
137 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
140 /* ctx is NULL if the caller wants to ignore the sigmask. */
141 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
143 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
147 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
148 /* This special case prevents TERMINATE-THREAD from hitting
149 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
150 * deferrables are already unblocked there. Further
151 * investigation may be in order. */
157 // returns 0 if skipped, 1 otherwise
159 check_pending_thruptions(os_context_t *ctx)
161 struct thread *p = arch_os_get_current_thread();
163 gc_assert(!os_get_csp(p));
165 if (!thread_may_thrupt(ctx))
167 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
169 SetSymbolValue(THRUPTION_PENDING, NIL, p);
172 block_deferrable_signals(0, &oldset);
174 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
176 pthread_sigmask(SIG_SETMASK, &oldset, 0);
182 on_stack_p(struct thread *th, void *esp)
184 return (void *)th->control_stack_start
186 < (void *)th->control_stack_end;
189 #ifndef LISP_FEATURE_WIN32
190 /* (Technically, we still allocate an altstack even on Windows. Since
191 * Windows has a contiguous stack with an automatic guard page of
192 * user-configurable size instead of an alternative stack though, the
193 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
195 on_altstack_p(struct thread *th, void *esp)
197 void *start = (void *)th+dynamic_values_bytes;
198 void *end = (char *)start + 32*SIGSTKSZ;
199 return start <= esp && esp < end;
204 assert_on_stack(struct thread *th, void *esp)
206 if (on_stack_p(th, esp))
208 #ifndef LISP_FEATURE_WIN32
209 if (on_altstack_p(th, esp))
210 lose("thread %p: esp on altstack: %p", th, esp);
212 lose("thread %p: bogus esp: %p", th, esp);
215 // returns 0 if skipped, 1 otherwise
217 check_pending_gc(os_context_t *ctx)
219 odxprint(misc, "check_pending_gc");
220 struct thread * self = arch_os_get_current_thread();
224 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
225 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
226 (SymbolValue(GC_PENDING,self) == NIL))) {
227 SetSymbolValue(IN_SAFEPOINT,NIL,self);
229 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
230 if ((SymbolTlValue(GC_PENDING, self) == T)) {
231 lispobj gc_happened = NIL;
233 bind_variable(IN_SAFEPOINT,T,self);
234 block_deferrable_signals(NULL,&sigset);
235 if(SymbolTlValue(GC_PENDING,self)==T)
236 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
237 unbind_variable(IN_SAFEPOINT,self);
238 thread_sigmask(SIG_SETMASK,&sigset,NULL);
239 if (gc_happened == T) {
240 /* POST_GC wants to enable interrupts */
241 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
242 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
243 odxprint(misc, "going to call POST_GC");
244 funcall0(StaticSymbolFunction(POST_GC));
253 /* Several ideas on interthread signalling should be
254 tried. Implementation below was chosen for its moderate size and
257 Mutex is the only (conventional) system synchronization primitive
258 used by it. Some of the code below looks weird with this
259 limitation; rwlocks, Windows Event Objects, or perhaps pthread
260 barriers could be used to improve clarity.
262 No condvars here: our pthreads_win32 is great, but it doesn't
263 provide wait morphing optimization; let's avoid extra context
264 switches and extra contention. */
266 struct gc_dispatcher {
268 /* Held by the first thread that decides to signal all others, for
269 the entire period while common GC safepoint page is
270 unmapped. This thread is called `STW (stop-the-world)
272 pthread_mutex_t mx_gpunmapped;
274 /* Held by STW initiator while it updates th_stw_initiator and
275 takes other locks in this structure */
276 pthread_mutex_t mx_gptransition;
278 /* Held by STW initiator until the world should be started (GC
279 complete, thruptions delivered). */
280 pthread_mutex_t mx_gcing;
282 /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
283 holds the GC Lisp-level mutex, but _couldn't_ become STW
284 initiator (i.e. another thread is already stopping the
286 pthread_mutex_t mx_subgc;
288 /* First thread (at this round) that decided to stop the world */
289 struct thread *th_stw_initiator;
291 /* Thread running SUB-GC under the `supervision' of STW
293 struct thread *th_subgc;
295 /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
296 work without thundering herd. */
299 /* Thruption flag: Iff true, current STW initiator is delivering
300 thruptions and not GCing. */
304 /* mutexes lazy initialized, other data initially zeroed */
305 .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
306 .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
307 .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
308 .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
312 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
313 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
314 if `writable' is true.
316 Return true if there is a non-null value in the flag.
318 When a thread enters C code or leaves it, a per-thread location is
319 modified. That machine word serves as a not-in-Lisp flag; for
320 convenience, when in C, it's filled with a topmost stack location
321 that may contain Lisp data. When thread is in Lisp, the word
324 GENCGC uses each thread's flag value for conservative garbage collection.
326 There is a full VM page reserved for this word; page permissions
327 are switched to read-only for race-free examine + wait + use
329 static inline boolean
330 set_thread_csp_access(struct thread* p, boolean writable)
332 os_protect((os_vm_address_t) p->csp_around_foreign_call,
333 THREAD_CSP_PAGE_SIZE,
334 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
335 : (OS_VM_PROT_READ));
336 return !!*p->csp_around_foreign_call;
340 /* maybe_become_stw_initiator -- if there is no stop-the-world action
341 in progress, begin it by unmapping GC page, and record current
342 thread as STW initiator.
344 `thruption' flag affects some subtleties of stop/start methods:
345 waiting for other threads allowing GC; setting and clearing
346 STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
348 Return true if current thread becomes a GC initiator, or already
349 _is_ a STW initiator.
351 Unlike gc_stop_the_world and gc_start_the_world (that should be
352 used in matching pairs), maybe_become_stw_initiator is idempotent
353 within a stop-restart cycle. With this call, a thread may `reserve
354 the right' to stop the world as early as it wants. */
356 static inline boolean
357 maybe_become_stw_initiator(boolean thruption)
359 struct thread* self = arch_os_get_current_thread();
361 /* Double-checked locking. Possible word tearing on some
362 architectures, FIXME FIXME, but let's think of it when GENCGC
363 and threaded SBCL is ported to them. */
364 if (!gc_dispatcher.th_stw_initiator) {
365 odxprint(misc,"NULL STW BEFORE GPTRANSITION");
366 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
367 /* We hold mx_gptransition. Is there no STW initiator yet? */
368 if (!gc_dispatcher.th_stw_initiator) {
369 odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
371 gc_dispatcher.th_stw_initiator = self;
372 gc_dispatcher.thruption = thruption;
374 /* hold mx_gcing until we restart the world */
375 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
377 /* and mx_gpunmapped until we remap common GC page */
378 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
380 /* we unmap it; other threads running Lisp code will now
384 /* stop counter; the world is not stopped yet. */
385 gc_dispatcher.stopped = 0;
387 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
389 return gc_dispatcher.th_stw_initiator == self;
393 /* maybe_let_the_world_go -- if current thread is a STW initiator,
394 unlock internal GC structures, and return true. */
395 static inline boolean
396 maybe_let_the_world_go()
398 struct thread* self = arch_os_get_current_thread();
399 if (gc_dispatcher.th_stw_initiator == self) {
400 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
401 if (gc_dispatcher.th_stw_initiator == self) {
402 gc_dispatcher.th_stw_initiator = NULL;
404 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
405 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
413 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
414 complete if necessary), and make sure all other threads are either
415 stopped or gc-safe (i.e. running foreign calls).
417 If GC initiator already exists, gc_stop_the_world() either waits
418 for its completion, or cooperates with it: e.g. concurrent pending
419 thruption handler allows (SUB-GC) to complete under its
422 Code sections bounded by gc_stop_the_world and gc_start_the_world
423 may be nested; inner calls don't stop or start threads,
424 decrementing or incrementing the stop counter instead. */
428 struct thread* self = arch_os_get_current_thread(), *p;
430 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
431 /* If GC is enabled, this thread may wait for current STW
432 initiator without causing deadlock. */
433 if (!maybe_become_stw_initiator(0)) {
434 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
435 maybe_become_stw_initiator(0);
436 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
438 /* Now _this thread_ should be STW initiator */
439 gc_assert(self == gc_dispatcher.th_stw_initiator);
441 /* GC inhibited; e.g. we are inside SUB-GC */
442 if (!maybe_become_stw_initiator(0)) {
443 /* Some trouble. Inside SUB-GC, holding the Lisp-side
444 mutex, but some other thread is stopping the world. */
445 if (gc_dispatcher.thruption) {
446 /* Thruption. Wait until it's delivered */
447 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
448 /* Warning: mx_gcing is held recursively. */
449 gc_assert(maybe_become_stw_initiator(0));
450 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
452 /* In SUB-GC, holding mutex; other thread wants to
454 if (gc_dispatcher.th_subgc == self) {
455 /* There is an outer gc_stop_the_world() by _this_
456 thread, running subordinately to initiator.
457 Just increase stop counter. */
458 ++gc_dispatcher.stopped;
461 /* Register as subordinate collector thread: take
463 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
464 ++gc_dispatcher.stopped;
466 /* Unlocking thread's own thread_qrl() designates
467 `time to examine me' to other threads. */
468 pthread_mutex_unlock(thread_qrl(self));
470 /* STW (GC) initiator thread will see our thread needs
471 to finish GC. It will stop the world and itself,
472 and unlock its qrl. */
473 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
478 thruption = gc_dispatcher.thruption; /* Thruption or GC? */
479 if (!gc_dispatcher.stopped++) {
480 /* Outermost stop: signal other threads */
481 pthread_mutex_lock(&all_threads_lock);
482 /* Phase 1: ensure all threads are aware of the need to stop,
483 or locked in the foreign code. */
485 pthread_mutex_t *p_qrl = thread_qrl(p);
489 /* Read-protect p's flag */
490 if (!set_thread_csp_access(p,0)) {
491 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
492 /* Thread is in Lisp, so it should trap (either in
493 Lisp or in Lisp->FFI transition). Trap handler
494 unlocks thread_qrl(p); when it happens, we're safe
495 to examine that thread. */
496 pthread_mutex_lock(p_qrl);
497 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
498 /* Mark thread for the future: should we collect, or
499 wait for its final permission? */
500 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
501 SetTlSymbolValue(GC_SAFE,T,p);
503 SetTlSymbolValue(GC_SAFE,NIL,p);
505 pthread_mutex_unlock(p_qrl);
507 /* In C; we just disabled writing. */
509 if (SymbolTlValue(GC_INHIBIT,p)==T) {
510 /* GC inhibited there */
511 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
512 /* Enable writing. Such threads trap by
513 pending thruption when WITHOUT-GCING
515 set_thread_csp_access(p,1);
516 SetTlSymbolValue(GC_SAFE,NIL,p);
518 /* Thread allows concurrent GC. It runs in C
519 (not a mutator), its in-Lisp flag is
520 read-only (so it traps on return). */
521 SetTlSymbolValue(GC_SAFE,T,p);
526 /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
528 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
529 /* Threads with GC inhibited -- continued */
530 odxprint(safepoints,"after remapping GC page %p",self);
532 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
534 struct thread* priority_gc = NULL;
538 if (SymbolTlValue(GC_SAFE,p)!=T) {
539 /* Wait for thread to `park'. NB it _always_ does
540 it with a pending interrupt trap, so CSP locking is
542 odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
544 pthread_mutex_lock(thread_qrl(p));
545 if (SymbolTlValue(GC_INHIBIT,p)==T) {
546 /* Concurrent GC invoked manually */
547 gc_assert(!priority_gc); /* Should be at most one at a time */
550 pthread_mutex_unlock(thread_qrl(p));
554 lose("gc_stop_the_world: no SP in parked thread: %p", p);
557 /* This thread is managing the entire process, so it
558 has to allow manually-invoked GC to complete */
559 if (!set_thread_csp_access(self,1)) {
561 *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
563 pthread_mutex_unlock(thread_qrl(self));
564 /* Priority GC should take over, holding
565 mx_subgc until it's done. */
566 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
568 pthread_mutex_lock(thread_qrl(self));
569 *self->csp_around_foreign_call = 0;
570 SetTlSymbolValue(GC_PENDING,NIL,self);
571 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
574 pthread_mutex_unlock(thread_qrl(self));
575 /* Priority GC should take over, holding
576 mx_subgc until it's done. */
577 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
579 pthread_mutex_lock(thread_qrl(self));
581 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
589 /* gc_start_the_world() -- restart all other threads if the call
590 matches the _outermost_ gc_stop_the_world(), or decrement the stop
595 struct thread* self = arch_os_get_current_thread(), *p;
596 boolean thruption = gc_dispatcher.thruption;
597 if (gc_dispatcher.th_stw_initiator != self) {
598 odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
599 gc_assert (gc_dispatcher.th_subgc == self);
600 if (--gc_dispatcher.stopped == 1) {
601 gc_dispatcher.th_subgc = NULL;
602 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
603 /* GC initiator may continue now */
604 pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
609 gc_assert(gc_dispatcher.th_stw_initiator == self);
611 if (!--gc_dispatcher.stopped) {
614 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
615 SetTlSymbolValue(GC_PENDING,NIL,p);
618 #ifdef LISP_FEATURE_SB_THRUPTION
619 SymbolTlValue(THRUPTION_PENDING,p)!=T
621 1 /* trivially no thruption pending */
623 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
624 set_thread_csp_access(p,1);
626 pthread_mutex_unlock(&all_threads_lock);
627 /* Release everyone */
628 maybe_let_the_world_go();
633 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
634 GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
635 SUB-GC, auto-gc and thruption. */
636 static inline boolean
639 struct thread* self = arch_os_get_current_thread(), *p;
641 pthread_mutex_lock(&all_threads_lock);
644 SymbolTlValue(GC_PENDING,p)!=T &&
645 SymbolTlValue(GC_PENDING,p)!=NIL) {
650 pthread_mutex_unlock(&all_threads_lock);
653 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
654 maybe_let_the_world_go();
660 set_csp_from_context(struct thread *self, os_context_t *ctx)
662 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
663 gc_assert((void **)self->control_stack_start
665 < (void **)self->control_stack_end);
666 *self->csp_around_foreign_call = (lispobj) sp;
670 thread_pitstop(os_context_t *ctxptr)
672 struct thread* self = arch_os_get_current_thread();
673 boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
675 odxprint(safepoints,"pitstop [%p]", ctxptr);
677 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
678 /* Free qrl to let know we're ready... */
679 WITH_STATE_SEM(self) {
680 pthread_mutex_unlock(thread_qrl(self));
681 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
682 pthread_mutex_lock(thread_qrl(self));
683 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
685 /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
686 pit-stop always waits for GC end) */
687 set_thread_csp_access(self,1);
689 if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
690 set_thread_csp_access(self,1);
691 check_pending_gc(ctxptr);
694 if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
695 maybe_become_stw_initiator(0) && !in_race_p()) {
697 set_thread_csp_access(self,1);
698 check_pending_gc(ctxptr);
699 gc_start_the_world();
701 /* An innocent thread which is not an initiator _and_ is
703 odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
704 if (!set_thread_csp_access(self,1)) {
705 if (os_get_csp(self))
706 lose("thread_pitstop: would lose csp");
707 set_csp_from_context(self, ctxptr);
708 pthread_mutex_unlock(thread_qrl(self));
709 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
710 *self->csp_around_foreign_call = 0;
711 pthread_mutex_lock(thread_qrl(self));
712 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
714 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
715 set_thread_csp_access(self,1);
716 WITH_GC_AT_SAFEPOINTS_ONLY() {
717 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
718 #ifdef LISP_FEATURE_SB_THRUPTION
719 while (check_pending_thruptions(ctxptr))
727 #ifdef LISP_FEATURE_SB_THRUPTION
728 while(check_pending_thruptions(ctxptr));
733 thread_edge(os_context_t *ctxptr)
735 struct thread *self = arch_os_get_current_thread();
736 set_thread_csp_access(self,1);
737 if (os_get_csp(self)) {
738 if (!self->pc_around_foreign_call)
739 return; /* trivialize */
740 odxprint(safepoints,"edge leaving [%p]", ctxptr);
741 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
742 #ifdef LISP_FEATURE_SB_THRUPTION
743 if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
744 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
745 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
746 set_thread_csp_access(self,1);
747 WITH_GC_AT_SAFEPOINTS_ONLY() {
748 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
749 while (check_pending_thruptions(ctxptr))
755 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
756 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
757 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
758 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
763 odxprint(safepoints,"edge entering [%p]", ctxptr);
764 #ifdef LISP_FEATURE_SB_THRUPTION
765 while(check_pending_thruptions(ctxptr))
768 if (os_get_csp(self))
769 lose("thread_edge: would lose csp");
770 set_csp_from_context(self, ctxptr);
771 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
772 pthread_mutex_unlock(thread_qrl(self));
773 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
774 *self->csp_around_foreign_call = 0;
775 pthread_mutex_lock(thread_qrl(self));
776 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
778 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
779 pthread_mutex_unlock(thread_qrl(self));
780 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
781 *self->csp_around_foreign_call = 0;
782 pthread_mutex_lock(thread_qrl(self));
783 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
789 /* thread_register_gc_trigger --
791 Called by GENCGC in each thread where GC_PENDING becomes T because
792 allocated memory size has crossed the threshold in
793 auto_gc_trigger. For the new collective GC sequence, its first call
794 marks a process-wide beginning of GC.
797 thread_register_gc_trigger()
799 odxprint(misc, "/thread_register_gc_trigger");
800 struct thread* self = arch_os_get_current_thread();
801 /* This function should be called instead of former
802 set_pseudo_atomic_interrupted(), e.g. never with true
804 gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
806 /* unmap GC page, signal other threads... */
807 maybe_become_stw_initiator(0);
812 #ifdef LISP_FEATURE_SB_THRUPTION
813 /* wake_thread(thread) -- ensure a thruption delivery to
817 wake_thread_posix(os_thread_t os_thread)
820 struct thread *thread;
821 struct thread *self = arch_os_get_current_thread();
823 /* Must not and need not attempt to signal ourselves while we're the
825 if (self->os_thread == os_thread) {
826 SetTlSymbolValue(THRUPTION_PENDING,T,self);
827 WITH_GC_AT_SAFEPOINTS_ONLY()
828 while (check_pending_thruptions(0 /* ignore the sigmask */))
833 /* We are not in a signal handler here, so need to block signals
836 block_deferrable_signals(0, &oldset);
838 if (!maybe_become_stw_initiator(1) || in_race_p()) {
839 /* we are not able to wake the thread up, but the STW initiator
840 * will take care of it (kludge: unless it is in foreign code).
841 * Let's at least try to get our return value right. */
842 pthread_mutex_lock(&all_threads_lock);
843 for_each_thread (thread)
844 if (thread->os_thread == os_thread) {
848 pthread_mutex_unlock(&all_threads_lock);
853 /* we hold the all_threads lock */
854 for_each_thread (thread)
855 if (thread->os_thread == os_thread) {
856 /* it's still alive... */
859 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
860 if (SymbolTlValue(GC_PENDING,thread) == T
861 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
864 if (os_get_csp(thread)) {
865 /* ... and in foreign code. Push it into a safety
867 int status = pthread_kill(os_thread, SIGPIPE);
869 lose("wake_thread_posix: pthread_kill failed with %d\n",
875 /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
876 gc_start_the_world();
879 pthread_sigmask(SIG_SETMASK, &oldset, 0);
880 return found ? 0 : -1;
882 #endif /* LISP_FEATURE_SB_THRUPTION */
885 thread_in_safety_transition(os_context_t *ctx)
887 FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
892 thread_in_lisp_raised(os_context_t *ctx)
894 FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
899 thread_interrupted(os_context_t *ctx)
901 FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
906 os_get_csp(struct thread* th)
908 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
910 th->csp_around_foreign_call,
911 *(void***)th->csp_around_foreign_call,
912 th->control_stack_start,
913 th->control_stack_end));
914 return *(void***)th->csp_around_foreign_call;
918 #ifndef LISP_FEATURE_WIN32
920 # ifdef LISP_FEATURE_SB_THRUPTION
922 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
924 struct thread *self = arch_os_get_current_thread();
926 if (!os_get_csp(self))
927 /* In Lisp code. Do not run thruptions asynchronously. The
928 * next safepoint will take care of it. */
931 /* In C code. As a rule, we assume that running thruptions is OK. */
932 fake_foreign_function_call(ctx);
933 thread_in_safety_transition(ctx);
934 undo_fake_foreign_function_call(ctx);
938 /* Designed to be of the same type as call_into_lisp. Ignores its
941 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
943 #if trap_GlobalSafepoint != 0x1a
944 # error trap_GlobalSafepoint mismatch
946 asm("int3; .byte 0x1a;");
951 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
953 #if trap_CspSafepoint != 0x1b
954 # error trap_CspSafepoint mismatch
956 asm("int3; .byte 0x1b;");
961 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
963 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
965 GC_SAFEPOINT_PAGE_ADDR,
966 arch_os_get_current_thread()->csp_around_foreign_call));
968 struct thread *self = arch_os_get_current_thread();
970 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
971 /* We're on the altstack and don't want to run Lisp code. */
972 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
976 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
977 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
981 /* not a safepoint */
984 #endif /* LISP_FEATURE_WIN32 */
987 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
989 struct thread* th = arch_os_get_current_thread();
991 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
993 WITH_GC_AT_SAFEPOINTS_ONLY()
994 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
997 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */