2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
62 odxprint(misc, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
65 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
71 odxprint(misc, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
74 #endif /* !LISP_FEATURE_WIN32 */
76 /* Planned state progressions:
80 * unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
84 * happens when a master thread enters its trap.
86 * The only blocker for flight mode is the master thread itself
87 * (GC_FLIGHT can't be left until the master thread traps).
91 * happens after each (other) thread is notified, i.e. it will
92 * eventually stop (already stopped). map_gc_page().
94 * Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
98 * happens when every gc-inhibitor comes to completion (that's
99 * normally pending interrupt trap).
101 * NB gc_stop_the_world, if it happens in non-master thread, "takes
102 * over" as a master, also deregistering itself as a blocker
103 * (i.e. it's ready to leave GC_INVOKED, but now it objects to
104 * leaving GC_COLLECT; this "usurpation" doesn't require any change
105 * to GC_COLLECT counter: for the counter, it's immaterial _which_
106 * thread is waiting).
110 * happens at gc_start_the_world (that should always happen in the
113 * Any thread waiting until GC end now continues.
116 /* Flag: conditions are initialized */
119 /* Per-process lock for gc_state */
120 pthread_mutex_t lock;
122 /* Conditions: one per phase */
123 pthread_cond_t phase_cond[GC_NPHASES];
125 /* For each [current or future] phase, a number of threads not yet ready to
127 int phase_wait[GC_NPHASES];
129 /* Master thread controlling the topmost stop/gc/start sequence */
130 struct thread* master;
131 struct thread* collector;
133 /* Current GC phase */
137 static struct gc_state gc_state = {
138 .lock = PTHREAD_MUTEX_INITIALIZER,
145 odxprint(safepoints,"GC state [%p] to be locked",gc_state.lock);
146 gc_assert(0==pthread_mutex_lock(&gc_state.lock));
147 if (gc_state.master) {
148 fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d\n",
149 gc_state.master,arch_os_get_current_thread(),gc_state.phase);
150 odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
152 gc_assert(!gc_state.master);
153 gc_state.master = arch_os_get_current_thread();
154 if (!gc_state.initialized) {
156 for (i=GC_NONE; i<GC_NPHASES; ++i)
157 pthread_cond_init(&gc_state.phase_cond[i],NULL);
158 gc_state.initialized = 1;
160 odxprint(safepoints,"GC state [%p] locked in phase %d",gc_state.lock, gc_state.phase);
166 odxprint(safepoints,"GC state to be unlocked in phase %d",gc_state.phase);
167 gc_assert(arch_os_get_current_thread()==gc_state.master);
168 gc_state.master = NULL;
169 gc_assert(0==pthread_mutex_unlock(&gc_state.lock));
170 odxprint(safepoints,"%s","GC state unlocked");
174 gc_state_wait(gc_phase_t phase)
176 struct thread* self = arch_os_get_current_thread();
177 odxprint(safepoints,"Waiting for %d -> %d [%d holders]",
178 gc_state.phase,phase,gc_state.phase_wait[gc_state.phase]);
179 gc_assert(gc_state.master == self);
180 gc_state.master = NULL;
181 while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET)))
182 pthread_cond_wait(&gc_state.phase_cond[phase],&gc_state.lock);
183 gc_assert(gc_state.master == NULL);
184 gc_state.master = self;
188 set_csp_from_context(struct thread *self, os_context_t *ctx)
190 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
191 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
192 /* On POSIX platforms, it is sufficient to investigate only the part
193 * of the stack that was live before the interrupt, because in
194 * addition, we consider interrupt contexts explicitly. On Windows,
195 * however, we do not keep an explicit stack of exception contexts,
196 * and instead arrange for the conservative stack scan to also cover
197 * the context implicitly. The obvious way to do that is to start
198 * at the context itself: */
199 #ifdef LISP_FEATURE_WIN32
200 gc_assert((void **) ctx < sp);
203 gc_assert((void **)self->control_stack_start
205 < (void **)self->control_stack_end);
207 /* Note that the exact value doesn't matter much here, since
208 * platforms with precise GC use get_csp() only as a boolean -- the
209 * precise GC already keeps track of the stack pointer itself. */
210 void **sp = (void **) 0xEEEEEEEE;
212 *self->csp_around_foreign_call = (lispobj) sp;
216 static inline gc_phase_t gc_phase_next(gc_phase_t old) {
217 return (old+1) % GC_NPHASES;
220 static inline gc_phase_t thread_gc_phase(struct thread* p)
222 boolean inhibit = (SymbolTlValue(GC_INHIBIT,p)==T)||
223 (SymbolTlValue(IN_WITHOUT_GCING,p)==IN_WITHOUT_GCING);
226 (SymbolTlValue(GC_PENDING,p)!=T&& SymbolTlValue(GC_PENDING,p)!=NIL);
229 inprogress ? (gc_state.collector && (gc_state.collector != p)
230 ? GC_NONE : GC_QUIET)
231 : (inhibit ? GC_INVOKED : GC_NONE);
234 static inline void thread_gc_promote(struct thread* p, gc_phase_t cur, gc_phase_t old) {
236 gc_state.phase_wait[old]--;
237 if (cur != GC_NONE) {
238 gc_state.phase_wait[cur]++;
241 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
244 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
245 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
246 if `writable' is true.
248 Return true if there is a non-null value in the flag.
250 When a thread enters C code or leaves it, a per-thread location is
251 modified. That machine word serves as a not-in-Lisp flag; for
252 convenience, when in C, it's filled with a topmost stack location
253 that may contain Lisp data. When thread is in Lisp, the word
256 GENCGC uses each thread's flag value for conservative garbage collection.
258 There is a full VM page reserved for this word; page permissions
259 are switched to read-only for race-free examine + wait + use
261 static inline boolean
262 set_thread_csp_access(struct thread* p, boolean writable)
264 os_protect((os_vm_address_t) p->csp_around_foreign_call,
265 THREAD_CSP_PAGE_SIZE,
266 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
267 : (OS_VM_PROT_READ));
268 return !!*p->csp_around_foreign_call;
271 static inline void gc_notify_early()
273 struct thread *self = arch_os_get_current_thread(), *p;
274 odxprint(safepoints,"%s","global notification");
275 pthread_mutex_lock(&all_threads_lock);
279 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
280 if (!set_thread_csp_access(p,0)) {
281 thread_gc_promote(p, gc_state.phase, GC_NONE);
283 thread_gc_promote(p, thread_gc_phase(p), GC_NONE);
286 pthread_mutex_unlock(&all_threads_lock);
289 static inline void gc_notify_final()
292 odxprint(safepoints,"%s","global notification");
293 gc_state.phase_wait[gc_state.phase]=0;
294 pthread_mutex_lock(&all_threads_lock);
296 if (p == gc_state.collector)
298 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
299 if (!set_thread_csp_access(p,0)) {
300 thread_gc_promote(p, gc_state.phase, GC_NONE);
303 pthread_mutex_unlock(&all_threads_lock);
306 static inline void gc_done()
308 struct thread *self = arch_os_get_current_thread(), *p;
309 boolean inhibit = (SymbolTlValue(GC_INHIBIT,self)==T);
311 odxprint(safepoints,"%s","global denotification");
312 pthread_mutex_lock(&all_threads_lock);
314 if (inhibit && (SymbolTlValue(GC_PENDING,p)==T))
315 SetTlSymbolValue(GC_PENDING,NIL,p);
316 set_thread_csp_access(p,1);
318 pthread_mutex_unlock(&all_threads_lock);
321 static inline void gc_handle_phase()
323 odxprint(safepoints,"Entering phase %d",gc_state.phase);
324 switch (gc_state.phase) {
350 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
351 * `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
352 * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
354 static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
355 odxprint(safepoints,"GC advance request %d -> %d in phase %d",old,cur,gc_state.phase);
358 if (cur == gc_state.phase)
360 if (old < gc_state.phase)
362 if (old != GC_NONE) {
363 gc_state.phase_wait[old]--;
364 odxprint(safepoints,"%d holders of phase %d without me",gc_state.phase_wait[old],old);
366 if (cur != GC_NONE) {
367 gc_state.phase_wait[cur]++;
368 odxprint(safepoints,"%d holders of phase %d with me",gc_state.phase_wait[cur],cur);
370 /* roll forth as long as there's no waiters */
371 while (gc_state.phase_wait[gc_state.phase]==0
372 && gc_state.phase != cur) {
373 gc_state.phase = gc_phase_next(gc_state.phase);
374 odxprint(safepoints,"no blockers, direct advance to %d",gc_state.phase);
376 pthread_cond_broadcast(&gc_state.phase_cond[gc_state.phase]);
378 odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
383 thread_register_gc_trigger()
385 odxprint(misc, "/thread_register_gc_trigger");
386 struct thread *self = arch_os_get_current_thread();
388 if (gc_state.phase == GC_NONE &&
389 SymbolTlValue(IN_SAFEPOINT,self)!=T &&
390 thread_gc_phase(self)==GC_NONE) {
391 gc_advance(GC_FLIGHT,GC_NONE);
399 /* Thread may gc if all of these are true:
400 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
401 * 2) GC_PENDING != :in-progress (outside of recursion protection)
402 * Note that we are in a safepoint here, which is always outside of PA. */
404 struct thread *self = arch_os_get_current_thread();
405 return (SymbolValue(GC_INHIBIT, self) == NIL
406 && (SymbolTlValue(GC_PENDING, self) == T ||
407 SymbolTlValue(GC_PENDING, self) == NIL));
410 #ifdef LISP_FEATURE_SB_THRUPTION
412 thread_may_thrupt(os_context_t *ctx)
414 struct thread * self = arch_os_get_current_thread();
415 /* Thread may be interrupted if all of these are true:
416 * 1) Deferrables are unblocked in the context of the signal that
417 * went into the safepoint. -- Otherwise the surrounding code
418 * didn't want to be interrupted by a signal, so presumably it didn't
419 * want to be INTERRUPT-THREADed either.
420 * (See interrupt_handle_pending for an exception.)
421 * 2) On POSIX: There is no pending signal. This is important even
422 * after checking the sigmask, since we could be in the
423 * handle_pending trap following re-enabling of interrupts.
424 * Signals are unblocked in that case, but the signal is still
425 * pending; we want to run GC before handling the signal and
426 * therefore entered this safepoint. But the thruption would call
427 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
428 * trap, leading to recursion.
429 * 3) INTERRUPTS_ENABLED is non-nil.
430 * 4) No GC pending; it takes precedence.
431 * Note that we are in a safepoint here, which is always outside of PA. */
433 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
436 if (SymbolValue(GC_PENDING, self) != NIL)
439 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
442 #ifdef LISP_FEATURE_WIN32
443 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
446 /* ctx is NULL if the caller wants to ignore the sigmask. */
447 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
449 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
453 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
454 /* This special case prevents TERMINATE-THREAD from hitting
455 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
456 * deferrables are already unblocked there. Further
457 * investigation may be in order. */
463 // returns 0 if skipped, 1 otherwise
465 check_pending_thruptions(os_context_t *ctx)
467 struct thread *p = arch_os_get_current_thread();
469 #ifdef LISP_FEATURE_WIN32
470 pthread_t pself = p->os_thread;
472 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
473 * in the self-kill case; instead we do it here while also clearing the
475 if (pself->pending_signal_set)
476 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
477 SetSymbolValue(THRUPTION_PENDING, T, p);
480 if (!thread_may_thrupt(ctx))
482 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
484 SetSymbolValue(THRUPTION_PENDING, NIL, p);
486 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
487 int was_in_lisp = !foreign_function_call_active_p(p);
490 lose("self-kill bug");
491 fake_foreign_function_call(ctx);
495 #ifdef LISP_FEATURE_WIN32
496 oldset = pself->blocked_signal_set;
497 pself->blocked_signal_set = deferrable_sigset;
498 if (ctx) fake_foreign_function_call(ctx);
501 block_deferrable_signals(0, &oldset);
504 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
506 #ifdef LISP_FEATURE_WIN32
507 if (ctx) undo_fake_foreign_function_call(ctx);
508 pself->blocked_signal_set = oldset;
509 if (ctx) ctx->sigmask = oldset;
511 pthread_sigmask(SIG_SETMASK, &oldset, 0);
514 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
516 undo_fake_foreign_function_call(ctx);
524 on_stack_p(struct thread *th, void *esp)
526 return (void *)th->control_stack_start
528 < (void *)th->control_stack_end;
531 #ifndef LISP_FEATURE_WIN32
532 /* (Technically, we still allocate an altstack even on Windows. Since
533 * Windows has a contiguous stack with an automatic guard page of
534 * user-configurable size instead of an alternative stack though, the
535 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
537 on_altstack_p(struct thread *th, void *esp)
539 void *start = (void *)th+dynamic_values_bytes;
540 void *end = (char *)start + 32*SIGSTKSZ;
541 return start <= esp && esp < end;
546 assert_on_stack(struct thread *th, void *esp)
548 if (on_stack_p(th, esp))
550 #ifndef LISP_FEATURE_WIN32
551 if (on_altstack_p(th, esp))
552 lose("thread %p: esp on altstack: %p", th, esp);
554 lose("thread %p: bogus esp: %p", th, esp);
557 // returns 0 if skipped, 1 otherwise
559 check_pending_gc(os_context_t *ctx)
561 odxprint(misc, "check_pending_gc");
562 struct thread * self = arch_os_get_current_thread();
566 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
567 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
568 (SymbolValue(GC_PENDING,self) == NIL))) {
569 SetSymbolValue(IN_SAFEPOINT,NIL,self);
571 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
572 if ((SymbolTlValue(GC_PENDING, self) == T)) {
573 lispobj gc_happened = NIL;
575 bind_variable(IN_SAFEPOINT,T,self);
576 block_deferrable_signals(NULL,&sigset);
577 if(SymbolTlValue(GC_PENDING,self)==T)
578 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
579 unbind_variable(IN_SAFEPOINT,self);
580 thread_sigmask(SIG_SETMASK,&sigset,NULL);
581 if (gc_happened == T) {
582 /* POST_GC wants to enable interrupts */
583 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
584 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
585 odxprint(misc, "going to call POST_GC");
586 funcall0(StaticSymbolFunction(POST_GC));
596 void thread_in_lisp_raised(os_context_t *ctxptr)
598 struct thread *self = arch_os_get_current_thread();
600 odxprint(safepoints,"%s","thread_in_lisp_raised");
603 if (gc_state.phase == GC_FLIGHT &&
604 SymbolTlValue(GC_PENDING,self)==T &&
605 thread_gc_phase(self)==GC_NONE &&
606 thread_may_gc() && SymbolTlValue(IN_SAFEPOINT,self)!=T) {
607 set_csp_from_context(self, ctxptr);
608 gc_advance(GC_QUIET,GC_FLIGHT);
609 set_thread_csp_access(self,1);
610 if (gc_state.collector) {
611 gc_advance(GC_NONE,GC_QUIET);
613 *self->csp_around_foreign_call = 0;
614 SetTlSymbolValue(GC_PENDING,T,self);
617 check_pending_gc(ctxptr);
618 #ifdef LISP_FEATURE_SB_THRUPTION
619 while(check_pending_thruptions(ctxptr));
623 if (gc_state.phase == GC_FLIGHT) {
624 gc_state_wait(GC_MESSAGE);
626 phase = thread_gc_phase(self);
627 if (phase == GC_NONE) {
628 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
629 set_thread_csp_access(self,1);
630 set_csp_from_context(self, ctxptr);
631 if (gc_state.phase <= GC_SETTLED)
632 gc_advance(phase,gc_state.phase);
634 gc_state_wait(phase);
635 *self->csp_around_foreign_call = 0;
637 check_pending_gc(ctxptr);
638 #ifdef LISP_FEATURE_SB_THRUPTION
639 while(check_pending_thruptions(ctxptr));
642 gc_advance(phase,gc_state.phase);
643 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
648 void thread_in_safety_transition(os_context_t *ctxptr)
650 struct thread *self = arch_os_get_current_thread();
652 odxprint(safepoints,"%s","GC safety transition");
654 if (set_thread_csp_access(self,1)) {
655 gc_state_wait(thread_gc_phase(self));
657 #ifdef LISP_FEATURE_SB_THRUPTION
658 while(check_pending_thruptions(ctxptr));
661 gc_phase_t phase = thread_gc_phase(self);
662 if (phase == GC_NONE) {
663 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
664 set_csp_from_context(self, ctxptr);
665 if (gc_state.phase <= GC_SETTLED)
666 gc_advance(phase,gc_state.phase);
668 gc_state_wait(phase);
669 *self->csp_around_foreign_call = 0;
671 gc_advance(phase,gc_state.phase);
672 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
678 void thread_interrupted(os_context_t *ctxptr)
680 struct thread *self = arch_os_get_current_thread();
682 odxprint(safepoints,"%s","pending interrupt trap");
684 if (gc_state.phase != GC_NONE) {
685 if (set_thread_csp_access(self,1)) {
687 thread_in_safety_transition(ctxptr);
690 thread_in_lisp_raised(ctxptr);
695 check_pending_gc(ctxptr);
696 #ifdef LISP_FEATURE_SB_THRUPTION
697 while(check_pending_thruptions(ctxptr));
704 struct thread* self = arch_os_get_current_thread();
705 odxprint(safepoints, "stop the world");
707 gc_state.collector = self;
708 gc_state.phase_wait[GC_QUIET]++;
710 switch(gc_state.phase) {
712 gc_advance(GC_QUIET,gc_state.phase);
716 gc_state_wait(GC_QUIET);
718 gc_state.phase_wait[GC_QUIET]=1;
719 gc_advance(GC_COLLECT,GC_QUIET);
724 lose("Stopping the world in unexpected state %d",gc_state.phase);
727 set_thread_csp_access(self,1);
729 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
733 void gc_start_the_world()
735 odxprint(safepoints,"%s","start the world");
737 gc_state.collector = NULL;
738 SetSymbolValue(IN_WITHOUT_GCING,IN_WITHOUT_GCING,
739 arch_os_get_current_thread());
740 gc_advance(GC_NONE,GC_COLLECT);
745 #ifdef LISP_FEATURE_SB_THRUPTION
746 /* wake_thread(thread) -- ensure a thruption delivery to
749 # ifdef LISP_FEATURE_WIN32
752 wake_thread_io(struct thread * thread)
754 SetEvent(thread->private_events.events[1]);
755 win32_maybe_interrupt_io(thread);
759 wake_thread_win32(struct thread *thread)
761 struct thread *self = arch_os_get_current_thread();
763 wake_thread_io(thread);
765 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
768 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
770 if ((SymbolTlValue(GC_PENDING,thread)==T)||
771 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
774 wake_thread_io(thread);
775 pthread_mutex_unlock(&all_threads_lock);
778 if (gc_state.phase == GC_NONE) {
779 gc_advance(GC_INVOKED,GC_NONE);
780 gc_advance(GC_NONE,GC_INVOKED);
784 pthread_mutex_lock(&all_threads_lock);
789 wake_thread_posix(os_thread_t os_thread)
792 struct thread *thread;
793 struct thread *self = arch_os_get_current_thread();
795 /* Must not and need not attempt to signal ourselves while we're the
797 if (self->os_thread == os_thread) {
798 SetTlSymbolValue(THRUPTION_PENDING,T,self);
799 WITH_GC_AT_SAFEPOINTS_ONLY()
800 while (check_pending_thruptions(0 /* ignore the sigmask */))
805 /* We are not in a signal handler here, so need to block signals
808 block_deferrable_signals(0, &oldset);
811 if (gc_state.phase == GC_NONE) {
812 odxprint(safepoints, "wake_thread_posix: invoking");
813 gc_advance(GC_INVOKED,GC_NONE);
815 /* only if in foreign code, notify using signal */
816 pthread_mutex_lock(&all_threads_lock);
817 for_each_thread (thread)
818 if (thread->os_thread == os_thread) {
819 /* it's still alive... */
822 odxprint(safepoints, "wake_thread_posix: found");
823 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
824 if (SymbolTlValue(GC_PENDING,thread) == T
825 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
828 if (os_get_csp(thread)) {
829 odxprint(safepoints, "wake_thread_posix: kill");
830 /* ... and in foreign code. Push it into a safety
832 int status = pthread_kill(os_thread, SIGPIPE);
834 lose("wake_thread_posix: pthread_kill failed with %d\n",
839 pthread_mutex_unlock(&all_threads_lock);
841 gc_advance(GC_NONE,GC_INVOKED);
843 odxprint(safepoints, "wake_thread_posix: passive");
844 /* We are not able to wake the thread up actively, but maybe
845 * some other thread will take care of it. Kludge: Unless it is
846 * in foreign code. Let's at least try to get our return value
848 pthread_mutex_lock(&all_threads_lock);
849 for_each_thread (thread)
850 if (thread->os_thread == os_thread) {
851 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
855 pthread_mutex_unlock(&all_threads_lock);
859 odxprint(safepoints, "wake_thread_posix leaving, found=%d", found);
860 pthread_sigmask(SIG_SETMASK, &oldset, 0);
861 return found ? 0 : -1;
863 #endif /* !LISP_FEATURE_WIN32 */
864 #endif /* LISP_FEATURE_SB_THRUPTION */
867 os_get_csp(struct thread* th)
869 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
871 th->csp_around_foreign_call,
872 *(void***)th->csp_around_foreign_call,
873 th->control_stack_start,
874 th->control_stack_end));
875 return *(void***)th->csp_around_foreign_call;
879 #ifndef LISP_FEATURE_WIN32
881 # ifdef LISP_FEATURE_SB_THRUPTION
883 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
885 struct thread *self = arch_os_get_current_thread();
887 void *transition_sp = os_get_csp(self);
889 /* In Lisp code. Do not run thruptions asynchronously. The
890 * next safepoint will take care of it. */
893 #ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
894 if (!foreign_function_call_active_p(self))
895 lose("csp && !ffca");
898 /* In C code. As a rule, we assume that running thruptions is OK. */
899 *self->csp_around_foreign_call = 0;
900 thread_in_lisp_raised(ctx);
901 *self->csp_around_foreign_call = transition_sp;
905 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
907 /* Designed to be of the same type as call_into_lisp. Ignores its
910 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
912 #if trap_GlobalSafepoint != 0x1a
913 # error trap_GlobalSafepoint mismatch
915 asm("int3; .byte 0x1a;");
920 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
922 #if trap_CspSafepoint != 0x1b
923 # error trap_CspSafepoint mismatch
925 asm("int3; .byte 0x1b;");
929 #endif /* C_STACK_IS_CONTROL_STACK */
932 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
934 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
936 GC_SAFEPOINT_PAGE_ADDR,
937 arch_os_get_current_thread()->csp_around_foreign_call));
939 struct thread *self = arch_os_get_current_thread();
941 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
942 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
943 /* We're on the altstack and don't want to run Lisp code. */
944 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
946 if (foreign_function_call_active_p(self)) lose("GSP trap in C?");
947 fake_foreign_function_call(ctx);
948 thread_in_lisp_raised(ctx);
949 undo_fake_foreign_function_call(ctx);
954 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
955 #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
956 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
958 if (!foreign_function_call_active_p(self)) lose("CSP trap in Lisp?");
959 thread_in_safety_transition(ctx);
964 /* not a safepoint */
967 #endif /* LISP_FEATURE_WIN32 */
970 callback_wrapper_trampoline(
971 #if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
972 /* On the x86oid backends, the assembly wrapper happens to not pass
973 * in ENTER_ALIEN_CALLBACK explicitly for safepoints. However, the
974 * platforms with precise GC are tricky enough already, and I want
975 * to minimize the read-time conditionals. For those platforms, I'm
976 * only replacing funcall3 with callback_wrapper_trampoline while
977 * keeping the arguments unchanged. --DFL */
978 lispobj __attribute__((__unused__)) fun,
980 lispobj arg0, lispobj arg1, lispobj arg2)
982 struct thread* th = arch_os_get_current_thread();
984 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
986 #ifdef LISP_FEATURE_WIN32
987 /* arg2 is the pointer to a return value, which sits on the stack */
988 th->carried_base_pointer = (os_context_register_t) *(((void**)arg2)-1);
991 WITH_GC_AT_SAFEPOINTS_ONLY()
992 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
995 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */