2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 odxprint(misc, "map_gc_page");
57 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
59 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
65 odxprint(misc, "unmap_gc_page");
66 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
68 #endif /* !LISP_FEATURE_WIN32 */
73 /* Thread may gc if all of these are true:
74 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
75 * 2) GC_PENDING != :in-progress (outside of recursion protection)
76 * Note that we are in a safepoint here, which is always outside of PA. */
78 struct thread *self = arch_os_get_current_thread();
79 return (SymbolValue(GC_INHIBIT, self) == NIL
80 && (SymbolTlValue(GC_PENDING, self) == T ||
81 SymbolTlValue(GC_PENDING, self) == NIL));
84 #ifdef LISP_FEATURE_SB_THRUPTION
86 thread_may_thrupt(os_context_t *ctx)
88 struct thread * self = arch_os_get_current_thread();
89 /* Thread may be interrupted if all of these are true:
90 * 1) Deferrables are unblocked in the context of the signal that
91 * went into the safepoint. -- Otherwise the surrounding code
92 * didn't want to be interrupted by a signal, so presumably it didn't
93 * want to be INTERRUPT-THREADed either.
94 * (See interrupt_handle_pending for an exception.)
95 * 2) On POSIX: There is no pending signal. This is important even
96 * after checking the sigmask, since we could be in the
97 * handle_pending trap following re-enabling of interrupts.
98 * Signals are unblocked in that case, but the signal is still
99 * pending; we want to run GC before handling the signal and
100 * therefore entered this safepoint. But the thruption would call
101 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
102 * trap, leading to recursion.
103 * 3) INTERRUPTS_ENABLED is non-nil.
104 * 4) No GC pending; it takes precedence.
105 * Note that we are in a safepoint here, which is always outside of PA. */
107 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
110 if (SymbolValue(GC_PENDING, self) != NIL)
113 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
116 #ifdef LISP_FEATURE_WIN32
117 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
120 /* ctx is NULL if the caller wants to ignore the sigmask. */
121 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
123 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
127 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
128 /* This special case prevents TERMINATE-THREAD from hitting
129 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
130 * deferrables are already unblocked there. Further
131 * investigation may be in order. */
137 // returns 0 if skipped, 1 otherwise
139 check_pending_thruptions(os_context_t *ctx)
141 struct thread *p = arch_os_get_current_thread();
143 gc_assert(!os_get_csp(p));
145 if (!thread_may_thrupt(ctx))
147 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
149 SetSymbolValue(THRUPTION_PENDING, NIL, p);
152 block_deferrable_signals(0, &oldset);
154 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
156 pthread_sigmask(SIG_SETMASK, &oldset, 0);
162 on_stack_p(struct thread *th, void *esp)
164 return (void *)th->control_stack_start
166 < (void *)th->control_stack_end;
169 #ifndef LISP_FEATURE_WIN32
170 /* (Technically, we still allocate an altstack even on Windows. Since
171 * Windows has a contiguous stack with an automatic guard page of
172 * user-configurable size instead of an alternative stack though, the
173 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
175 on_altstack_p(struct thread *th, void *esp)
177 void *start = (void *)th+dynamic_values_bytes;
178 void *end = (char *)start + 32*SIGSTKSZ;
179 return start <= esp && esp < end;
184 assert_on_stack(struct thread *th, void *esp)
186 if (on_stack_p(th, esp))
188 #ifndef LISP_FEATURE_WIN32
189 if (on_altstack_p(th, esp))
190 lose("thread %p: esp on altstack: %p", th, esp);
192 lose("thread %p: bogus esp: %p", th, esp);
195 // returns 0 if skipped, 1 otherwise
197 check_pending_gc(os_context_t *ctx)
199 odxprint(misc, "check_pending_gc");
200 struct thread * self = arch_os_get_current_thread();
204 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
205 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
206 (SymbolValue(GC_PENDING,self) == NIL))) {
207 SetSymbolValue(IN_SAFEPOINT,NIL,self);
209 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
210 if ((SymbolTlValue(GC_PENDING, self) == T)) {
211 lispobj gc_happened = NIL;
213 bind_variable(IN_SAFEPOINT,T,self);
214 block_deferrable_signals(NULL,&sigset);
215 if(SymbolTlValue(GC_PENDING,self)==T)
216 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
217 unbind_variable(IN_SAFEPOINT,self);
218 thread_sigmask(SIG_SETMASK,&sigset,NULL);
219 if (gc_happened == T) {
220 /* POST_GC wants to enable interrupts */
221 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
222 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
223 odxprint(misc, "going to call POST_GC");
224 funcall0(StaticSymbolFunction(POST_GC));
233 /* Several ideas on interthread signalling should be
234 tried. Implementation below was chosen for its moderate size and
237 Mutex is the only (conventional) system synchronization primitive
238 used by it. Some of the code below looks weird with this
239 limitation; rwlocks, Windows Event Objects, or perhaps pthread
240 barriers could be used to improve clarity.
242 No condvars here: our pthreads_win32 is great, but it doesn't
243 provide wait morphing optimization; let's avoid extra context
244 switches and extra contention. */
246 struct gc_dispatcher {
248 /* Held by the first thread that decides to signal all others, for
249 the entire period while common GC safepoint page is
250 unmapped. This thread is called `STW (stop-the-world)
252 pthread_mutex_t mx_gpunmapped;
254 /* Held by STW initiator while it updates th_stw_initiator and
255 takes other locks in this structure */
256 pthread_mutex_t mx_gptransition;
258 /* Held by STW initiator until the world should be started (GC
259 complete, thruptions delivered). */
260 pthread_mutex_t mx_gcing;
262 /* Held by a SUB-GC's gc_stop_the_world() when thread in SUB-GC
263 holds the GC Lisp-level mutex, but _couldn't_ become STW
264 initiator (i.e. another thread is already stopping the
266 pthread_mutex_t mx_subgc;
268 /* First thread (at this round) that decided to stop the world */
269 struct thread *th_stw_initiator;
271 /* Thread running SUB-GC under the `supervision' of STW
273 struct thread *th_subgc;
275 /* Stop counter. Nested gc-stop-the-world and gc-start-the-world
276 work without thundering herd. */
279 /* Thruption flag: Iff true, current STW initiator is delivering
280 thruptions and not GCing. */
284 /* mutexes lazy initialized, other data initially zeroed */
285 .mx_gpunmapped = PTHREAD_MUTEX_INITIALIZER,
286 .mx_gptransition = PTHREAD_MUTEX_INITIALIZER,
287 .mx_gcing = PTHREAD_MUTEX_INITIALIZER,
288 .mx_subgc = PTHREAD_MUTEX_INITIALIZER,
292 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
293 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
294 if `writable' is true.
296 Return true if there is a non-null value in the flag.
298 When a thread enters C code or leaves it, a per-thread location is
299 modified. That machine word serves as a not-in-Lisp flag; for
300 convenience, when in C, it's filled with a topmost stack location
301 that may contain Lisp data. When thread is in Lisp, the word
304 GENCGC uses each thread's flag value for conservative garbage collection.
306 There is a full VM page reserved for this word; page permissions
307 are switched to read-only for race-free examine + wait + use
309 static inline boolean
310 set_thread_csp_access(struct thread* p, boolean writable)
312 os_protect((os_vm_address_t) p->csp_around_foreign_call,
313 THREAD_CSP_PAGE_SIZE,
314 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
315 : (OS_VM_PROT_READ));
316 return !!*p->csp_around_foreign_call;
320 /* maybe_become_stw_initiator -- if there is no stop-the-world action
321 in progress, begin it by unmapping GC page, and record current
322 thread as STW initiator.
324 `thruption' flag affects some subtleties of stop/start methods:
325 waiting for other threads allowing GC; setting and clearing
326 STOP_FOR_GC_PENDING, GC_PENDING, THRUPTION_PENDING, etc.
328 Return true if current thread becomes a GC initiator, or already
329 _is_ a STW initiator.
331 Unlike gc_stop_the_world and gc_start_the_world (that should be
332 used in matching pairs), maybe_become_stw_initiator is idempotent
333 within a stop-restart cycle. With this call, a thread may `reserve
334 the right' to stop the world as early as it wants. */
336 static inline boolean
337 maybe_become_stw_initiator(boolean thruption)
339 struct thread* self = arch_os_get_current_thread();
341 /* Double-checked locking. Possible word tearing on some
342 architectures, FIXME FIXME, but let's think of it when GENCGC
343 and threaded SBCL is ported to them. */
344 if (!gc_dispatcher.th_stw_initiator) {
345 odxprint(misc,"NULL STW BEFORE GPTRANSITION");
346 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
347 /* We hold mx_gptransition. Is there no STW initiator yet? */
348 if (!gc_dispatcher.th_stw_initiator) {
349 odxprint(misc,"NULL STW IN GPTRANSITION, REPLACING");
351 gc_dispatcher.th_stw_initiator = self;
352 gc_dispatcher.thruption = thruption;
354 /* hold mx_gcing until we restart the world */
355 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
357 /* and mx_gpunmapped until we remap common GC page */
358 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
360 /* we unmap it; other threads running Lisp code will now
364 /* stop counter; the world is not stopped yet. */
365 gc_dispatcher.stopped = 0;
367 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
369 return gc_dispatcher.th_stw_initiator == self;
373 /* maybe_let_the_world_go -- if current thread is a STW initiator,
374 unlock internal GC structures, and return true. */
375 static inline boolean
376 maybe_let_the_world_go()
378 struct thread* self = arch_os_get_current_thread();
379 if (gc_dispatcher.th_stw_initiator == self) {
380 pthread_mutex_lock(&gc_dispatcher.mx_gptransition);
381 if (gc_dispatcher.th_stw_initiator == self) {
382 gc_dispatcher.th_stw_initiator = NULL;
384 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
385 pthread_mutex_unlock(&gc_dispatcher.mx_gptransition);
393 /* gc_stop_the_world -- become STW initiator (waiting for other GCs to
394 complete if necessary), and make sure all other threads are either
395 stopped or gc-safe (i.e. running foreign calls).
397 If GC initiator already exists, gc_stop_the_world() either waits
398 for its completion, or cooperates with it: e.g. concurrent pending
399 thruption handler allows (SUB-GC) to complete under its
402 Code sections bounded by gc_stop_the_world and gc_start_the_world
403 may be nested; inner calls don't stop or start threads,
404 decrementing or incrementing the stop counter instead. */
408 struct thread* self = arch_os_get_current_thread(), *p;
410 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
411 /* If GC is enabled, this thread may wait for current STW
412 initiator without causing deadlock. */
413 if (!maybe_become_stw_initiator(0)) {
414 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
415 maybe_become_stw_initiator(0);
416 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
418 /* Now _this thread_ should be STW initiator */
419 gc_assert(self == gc_dispatcher.th_stw_initiator);
421 /* GC inhibited; e.g. we are inside SUB-GC */
422 if (!maybe_become_stw_initiator(0)) {
423 /* Some trouble. Inside SUB-GC, holding the Lisp-side
424 mutex, but some other thread is stopping the world. */
425 if (gc_dispatcher.thruption) {
426 /* Thruption. Wait until it's delivered */
427 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
428 /* Warning: mx_gcing is held recursively. */
429 gc_assert(maybe_become_stw_initiator(0));
430 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
432 /* In SUB-GC, holding mutex; other thread wants to
434 if (gc_dispatcher.th_subgc == self) {
435 /* There is an outer gc_stop_the_world() by _this_
436 thread, running subordinately to initiator.
437 Just increase stop counter. */
438 ++gc_dispatcher.stopped;
441 /* Register as subordinate collector thread: take
443 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
444 ++gc_dispatcher.stopped;
446 /* Unlocking thread's own thread_qrl() designates
447 `time to examine me' to other threads. */
448 pthread_mutex_unlock(thread_qrl(self));
450 /* STW (GC) initiator thread will see our thread needs
451 to finish GC. It will stop the world and itself,
452 and unlock its qrl. */
453 pthread_mutex_lock(thread_qrl(gc_dispatcher.th_stw_initiator));
458 thruption = gc_dispatcher.thruption; /* Thruption or GC? */
459 if (!gc_dispatcher.stopped++) {
460 /* Outermost stop: signal other threads */
461 pthread_mutex_lock(&all_threads_lock);
462 /* Phase 1: ensure all threads are aware of the need to stop,
463 or locked in the foreign code. */
465 pthread_mutex_t *p_qrl = thread_qrl(p);
469 /* Read-protect p's flag */
470 if (!set_thread_csp_access(p,0)) {
471 odxprint(safepoints,"taking qrl %p of %p", p_qrl, p);
472 /* Thread is in Lisp, so it should trap (either in
473 Lisp or in Lisp->FFI transition). Trap handler
474 unlocks thread_qrl(p); when it happens, we're safe
475 to examine that thread. */
476 pthread_mutex_lock(p_qrl);
477 odxprint(safepoints,"taken qrl %p of %p", p_qrl, p);
478 /* Mark thread for the future: should we collect, or
479 wait for its final permission? */
480 if (SymbolTlValue(GC_INHIBIT,p)!=T) {
481 SetTlSymbolValue(GC_SAFE,T,p);
483 SetTlSymbolValue(GC_SAFE,NIL,p);
485 pthread_mutex_unlock(p_qrl);
487 /* In C; we just disabled writing. */
489 if (SymbolTlValue(GC_INHIBIT,p)==T) {
490 /* GC inhibited there */
491 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
492 /* Enable writing. Such threads trap by
493 pending thruption when WITHOUT-GCING
495 set_thread_csp_access(p,1);
496 SetTlSymbolValue(GC_SAFE,NIL,p);
498 /* Thread allows concurrent GC. It runs in C
499 (not a mutator), its in-Lisp flag is
500 read-only (so it traps on return). */
501 SetTlSymbolValue(GC_SAFE,T,p);
506 /* All threads are ready (GC_SAFE == T) or notified (GC_SAFE == NIL). */
508 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
509 /* Threads with GC inhibited -- continued */
510 odxprint(safepoints,"after remapping GC page %p",self);
512 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
514 struct thread* priority_gc = NULL;
518 if (SymbolTlValue(GC_SAFE,p)!=T) {
519 /* Wait for thread to `park'. NB it _always_ does
520 it with a pending interrupt trap, so CSP locking is
522 odxprint(safepoints,"waiting final parking %p (qrl %p)",p, thread_qrl(p));
524 pthread_mutex_lock(thread_qrl(p));
525 if (SymbolTlValue(GC_INHIBIT,p)==T) {
526 /* Concurrent GC invoked manually */
527 gc_assert(!priority_gc); /* Should be at most one at a time */
530 pthread_mutex_unlock(thread_qrl(p));
534 lose("gc_stop_the_world: no SP in parked thread: %p", p);
537 /* This thread is managing the entire process, so it
538 has to allow manually-invoked GC to complete */
539 if (!set_thread_csp_access(self,1)) {
541 *self->csp_around_foreign_call = (lispobj)__builtin_frame_address(0);
543 pthread_mutex_unlock(thread_qrl(self));
544 /* Priority GC should take over, holding
545 mx_subgc until it's done. */
546 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
548 pthread_mutex_lock(thread_qrl(self));
549 *self->csp_around_foreign_call = 0;
550 SetTlSymbolValue(GC_PENDING,NIL,self);
551 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
554 pthread_mutex_unlock(thread_qrl(self));
555 /* Priority GC should take over, holding
556 mx_subgc until it's done. */
557 pthread_mutex_lock(&gc_dispatcher.mx_subgc);
559 pthread_mutex_lock(thread_qrl(self));
561 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
569 /* gc_start_the_world() -- restart all other threads if the call
570 matches the _outermost_ gc_stop_the_world(), or decrement the stop
575 struct thread* self = arch_os_get_current_thread(), *p;
576 boolean thruption = gc_dispatcher.thruption;
577 if (gc_dispatcher.th_stw_initiator != self) {
578 odxprint(misc,"Unmapper %p self %p",gc_dispatcher.th_stw_initiator,self);
579 gc_assert (gc_dispatcher.th_subgc == self);
580 if (--gc_dispatcher.stopped == 1) {
581 gc_dispatcher.th_subgc = NULL;
582 pthread_mutex_unlock(&gc_dispatcher.mx_subgc);
583 /* GC initiator may continue now */
584 pthread_mutex_unlock(thread_qrl(gc_dispatcher.th_stw_initiator));
589 gc_assert(gc_dispatcher.th_stw_initiator == self);
591 if (!--gc_dispatcher.stopped) {
594 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,p);
595 SetTlSymbolValue(GC_PENDING,NIL,p);
598 #ifdef LISP_FEATURE_SB_THRUPTION
599 SymbolTlValue(THRUPTION_PENDING,p)!=T
601 1 /* trivially no thruption pending */
603 || SymbolTlValue(INTERRUPTS_ENABLED,p)!=T)
604 set_thread_csp_access(p,1);
606 pthread_mutex_unlock(&all_threads_lock);
607 /* Release everyone */
608 maybe_let_the_world_go();
613 /* in_race_p() -- return TRUE if no other thread is inside SUB-GC with
614 GC-PENDING :IN-PROGRESS. Used to prevent deadlock between manual
615 SUB-GC, auto-gc and thruption. */
616 static inline boolean
619 struct thread* self = arch_os_get_current_thread(), *p;
621 pthread_mutex_lock(&all_threads_lock);
624 SymbolTlValue(GC_PENDING,p)!=T &&
625 SymbolTlValue(GC_PENDING,p)!=NIL) {
630 pthread_mutex_unlock(&all_threads_lock);
633 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
634 maybe_let_the_world_go();
640 set_csp_from_context(struct thread *self, os_context_t *ctx)
642 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
643 gc_assert((void **)self->control_stack_start
645 < (void **)self->control_stack_end);
646 *self->csp_around_foreign_call = (lispobj) sp;
650 thread_pitstop(os_context_t *ctxptr)
652 struct thread* self = arch_os_get_current_thread();
653 boolean inhibitor = (SymbolTlValue(GC_INHIBIT,self)==T);
655 odxprint(safepoints,"pitstop [%p]", ctxptr);
657 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
658 /* Free qrl to let know we're ready... */
659 WITH_STATE_SEM(self) {
660 pthread_mutex_unlock(thread_qrl(self));
661 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
662 pthread_mutex_lock(thread_qrl(self));
663 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
665 /* Enable FF-CSP recording (not hurt: will gc at pit-stop, and
666 pit-stop always waits for GC end) */
667 set_thread_csp_access(self,1);
669 if (self == gc_dispatcher.th_stw_initiator && gc_dispatcher.stopped) {
670 set_thread_csp_access(self,1);
671 check_pending_gc(ctxptr);
674 if ((SymbolTlValue(GC_PENDING,self)!=NIL) &&
675 maybe_become_stw_initiator(0) && !in_race_p()) {
677 set_thread_csp_access(self,1);
678 check_pending_gc(ctxptr);
679 gc_start_the_world();
681 /* An innocent thread which is not an initiator _and_ is
683 odxprint(safepoints,"pitstop yielding [%p]", ctxptr);
684 if (!set_thread_csp_access(self,1)) {
685 if (os_get_csp(self))
686 lose("thread_pitstop: would lose csp");
687 set_csp_from_context(self, ctxptr);
688 pthread_mutex_unlock(thread_qrl(self));
689 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
690 *self->csp_around_foreign_call = 0;
691 pthread_mutex_lock(thread_qrl(self));
692 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
694 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
695 set_thread_csp_access(self,1);
696 WITH_GC_AT_SAFEPOINTS_ONLY() {
697 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
698 #ifdef LISP_FEATURE_SB_THRUPTION
699 while (check_pending_thruptions(ctxptr))
707 #ifdef LISP_FEATURE_SB_THRUPTION
708 while(check_pending_thruptions(ctxptr));
713 thread_edge(os_context_t *ctxptr)
715 struct thread *self = arch_os_get_current_thread();
716 set_thread_csp_access(self,1);
717 if (os_get_csp(self)) {
718 if (!self->pc_around_foreign_call)
719 return; /* trivialize */
720 odxprint(safepoints,"edge leaving [%p]", ctxptr);
721 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
722 #ifdef LISP_FEATURE_SB_THRUPTION
723 if (SymbolTlValue(THRUPTION_PENDING,self)==T &&
724 SymbolTlValue(INTERRUPTS_ENABLED,self)==T) {
725 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
726 set_thread_csp_access(self,1);
727 WITH_GC_AT_SAFEPOINTS_ONLY() {
728 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
729 while (check_pending_thruptions(ctxptr))
735 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
736 odxprint(safepoints,"edge leaving [%p] took gcing", ctxptr);
737 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
738 odxprint(safepoints,"edge leaving [%p] released gcing", ctxptr);
743 odxprint(safepoints,"edge entering [%p]", ctxptr);
744 #ifdef LISP_FEATURE_SB_THRUPTION
745 while(check_pending_thruptions(ctxptr))
748 if (os_get_csp(self))
749 lose("thread_edge: would lose csp");
750 set_csp_from_context(self, ctxptr);
751 if (SymbolTlValue(GC_INHIBIT,self)!=T) {
752 pthread_mutex_unlock(thread_qrl(self));
753 pthread_mutex_lock(&gc_dispatcher.mx_gcing);
754 *self->csp_around_foreign_call = 0;
755 pthread_mutex_lock(thread_qrl(self));
756 pthread_mutex_unlock(&gc_dispatcher.mx_gcing);
758 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
759 pthread_mutex_unlock(thread_qrl(self));
760 pthread_mutex_lock(&gc_dispatcher.mx_gpunmapped);
761 *self->csp_around_foreign_call = 0;
762 pthread_mutex_lock(thread_qrl(self));
763 pthread_mutex_unlock(&gc_dispatcher.mx_gpunmapped);
769 /* thread_register_gc_trigger --
771 Called by GENCGC in each thread where GC_PENDING becomes T because
772 allocated memory size has crossed the threshold in
773 auto_gc_trigger. For the new collective GC sequence, its first call
774 marks a process-wide beginning of GC.
777 thread_register_gc_trigger()
779 odxprint(misc, "/thread_register_gc_trigger");
780 struct thread* self = arch_os_get_current_thread();
781 /* This function should be called instead of former
782 set_pseudo_atomic_interrupted(), e.g. never with true
784 gc_assert(SymbolTlValue(GC_INHIBIT,self)!=T);
786 /* unmap GC page, signal other threads... */
787 maybe_become_stw_initiator(0);
792 #ifdef LISP_FEATURE_SB_THRUPTION
793 /* wake_thread(thread) -- ensure a thruption delivery to
797 wake_thread_posix(os_thread_t os_thread)
800 struct thread *thread;
801 struct thread *self = arch_os_get_current_thread();
803 /* Must not and need not attempt to signal ourselves while we're the
805 if (self->os_thread == os_thread) {
806 SetTlSymbolValue(THRUPTION_PENDING,T,self);
807 WITH_GC_AT_SAFEPOINTS_ONLY()
808 while (check_pending_thruptions(0 /* ignore the sigmask */))
813 /* We are not in a signal handler here, so need to block signals
816 block_deferrable_signals(0, &oldset);
818 if (!maybe_become_stw_initiator(1) || in_race_p()) {
819 /* we are not able to wake the thread up, but the STW initiator
820 * will take care of it (kludge: unless it is in foreign code).
821 * Let's at least try to get our return value right. */
822 pthread_mutex_lock(&all_threads_lock);
823 for_each_thread (thread)
824 if (thread->os_thread == os_thread) {
828 pthread_mutex_unlock(&all_threads_lock);
833 /* we hold the all_threads lock */
834 for_each_thread (thread)
835 if (thread->os_thread == os_thread) {
836 /* it's still alive... */
839 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
840 if (SymbolTlValue(GC_PENDING,thread) == T
841 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
844 if (os_get_csp(thread)) {
845 /* ... and in foreign code. Push it into a safety
847 int status = pthread_kill(os_thread, SIGPIPE);
849 lose("wake_thread_posix: pthread_kill failed with %d\n",
855 /* If it was alive but in Lisp, the pit stop takes care of thruptions. */
856 gc_start_the_world();
859 pthread_sigmask(SIG_SETMASK, &oldset, 0);
860 return found ? 0 : -1;
862 #endif /* LISP_FEATURE_SB_THRUPTION */
865 thread_in_safety_transition(os_context_t *ctx)
867 FSHOW_SIGNAL((stderr, "thread_in_safety_transition\n"));
872 thread_in_lisp_raised(os_context_t *ctx)
874 FSHOW_SIGNAL((stderr, "thread_in_lisp_raised\n"));
879 thread_interrupted(os_context_t *ctx)
881 FSHOW_SIGNAL((stderr, "thread_interrupted\n"));
886 os_get_csp(struct thread* th)
888 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
890 th->csp_around_foreign_call,
891 *(void***)th->csp_around_foreign_call,
892 th->control_stack_start,
893 th->control_stack_end));
894 return *(void***)th->csp_around_foreign_call;
898 #ifndef LISP_FEATURE_WIN32
900 # ifdef LISP_FEATURE_SB_THRUPTION
902 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
904 struct thread *self = arch_os_get_current_thread();
906 if (!os_get_csp(self))
907 /* In Lisp code. Do not run thruptions asynchronously. The
908 * next safepoint will take care of it. */
911 /* In C code. As a rule, we assume that running thruptions is OK. */
912 fake_foreign_function_call(ctx);
913 thread_in_safety_transition(ctx);
914 undo_fake_foreign_function_call(ctx);
918 /* Designed to be of the same type as call_into_lisp. Ignores its
921 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
923 #if trap_GlobalSafepoint != 0x1a
924 # error trap_GlobalSafepoint mismatch
926 asm("int3; .byte 0x1a;");
931 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
933 #if trap_CspSafepoint != 0x1b
934 # error trap_CspSafepoint mismatch
936 asm("int3; .byte 0x1b;");
941 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
943 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
945 GC_SAFEPOINT_PAGE_ADDR,
946 arch_os_get_current_thread()->csp_around_foreign_call));
948 struct thread *self = arch_os_get_current_thread();
950 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
951 /* We're on the altstack and don't want to run Lisp code. */
952 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
956 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
957 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
961 /* not a safepoint */
964 #endif /* LISP_FEATURE_WIN32 */
967 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
969 struct thread* th = arch_os_get_current_thread();
971 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
973 WITH_GC_AT_SAFEPOINTS_ONLY()
974 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
977 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */