2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
13 #ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
17 #ifndef LISP_FEATURE_WIN32
23 #include <sys/types.h>
24 #ifndef LISP_FEATURE_WIN32
27 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
28 #include <mach/mach.h>
29 #include <mach/mach_error.h>
30 #include <mach/mach_types.h>
36 #include "target-arch-os.h"
40 #include "genesis/cons.h"
41 #include "genesis/fdefn.h"
44 #include "gc-internal.h"
45 #include "pseudo-atomic.h"
46 #include "interrupt.h"
49 #if !defined(LISP_FEATURE_WIN32)
50 /* win32-os.c covers these, but there is no unixlike-os.c, so the normal
51 * definition goes here. Fixme: (Why) don't these work for Windows?
56 os_validate(GC_SAFEPOINT_PAGE_ADDR, 4);
62 odxprint(misc, "map_gc_page");
63 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
65 OS_VM_PROT_READ | OS_VM_PROT_WRITE);
71 odxprint(misc, "unmap_gc_page");
72 os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, 4, OS_VM_PROT_NONE);
74 #endif /* !LISP_FEATURE_WIN32 */
76 /* Planned state progressions:
80 * unmap_gc_page(). No blockers (GC_NONE can be left at any * moment).
84 * happens when a master thread enters its trap.
86 * The only blocker for flight mode is the master thread itself
87 * (GC_FLIGHT can't be left until the master thread traps).
91 * happens after each (other) thread is notified, i.e. it will
92 * eventually stop (already stopped). map_gc_page().
94 * Each thread with empty CSP disagrees to leave GC_MESSAGE phase.
98 * happens when every gc-inhibitor comes to completion (that's
99 * normally pending interrupt trap).
101 * NB gc_stop_the_world, if it happens in non-master thread, "takes
102 * over" as a master, also deregistering itself as a blocker
103 * (i.e. it's ready to leave GC_INVOKED, but now it objects to
104 * leaving GC_COLLECT; this "usurpation" doesn't require any change
105 * to GC_COLLECT counter: for the counter, it's immaterial _which_
106 * thread is waiting).
110 * happens at gc_start_the_world (that should always happen in the
113 * Any thread waiting until GC end now continues.
116 /* Flag: conditions are initialized */
119 /* Per-process lock for gc_state */
120 pthread_mutex_t lock;
122 /* Conditions: one per phase */
123 pthread_cond_t phase_cond[GC_NPHASES];
125 /* For each [current or future] phase, a number of threads not yet ready to
127 int phase_wait[GC_NPHASES];
129 /* Master thread controlling the topmost stop/gc/start sequence */
130 struct thread* master;
131 struct thread* collector;
133 /* Current GC phase */
137 static struct gc_state gc_state = {
138 .lock = PTHREAD_MUTEX_INITIALIZER,
145 odxprint(safepoints,"GC state [%p] to be locked",gc_state.lock);
146 gc_assert(0==pthread_mutex_lock(&gc_state.lock));
147 if (gc_state.master) {
148 fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d\n",
149 gc_state.master,arch_os_get_current_thread(),gc_state.phase);
150 odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
152 gc_assert(!gc_state.master);
153 gc_state.master = arch_os_get_current_thread();
154 if (!gc_state.initialized) {
156 for (i=GC_NONE; i<GC_NPHASES; ++i)
157 pthread_cond_init(&gc_state.phase_cond[i],NULL);
158 gc_state.initialized = 1;
160 odxprint(safepoints,"GC state [%p] locked in phase %d",gc_state.lock, gc_state.phase);
166 odxprint(safepoints,"GC state to be unlocked in phase %d",gc_state.phase);
167 gc_assert(arch_os_get_current_thread()==gc_state.master);
168 gc_state.master = NULL;
169 gc_assert(0==pthread_mutex_unlock(&gc_state.lock));
170 odxprint(safepoints,"%s","GC state unlocked");
174 gc_state_wait(gc_phase_t phase)
176 struct thread* self = arch_os_get_current_thread();
177 odxprint(safepoints,"Waiting for %d -> %d [%d holders]",
178 gc_state.phase,phase,gc_state.phase_wait[gc_state.phase]);
179 gc_assert(gc_state.master == self);
180 gc_state.master = NULL;
181 while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET)))
182 pthread_cond_wait(&gc_state.phase_cond[phase],&gc_state.lock);
183 gc_assert(gc_state.master == NULL);
184 gc_state.master = self;
188 set_csp_from_context(struct thread *self, os_context_t *ctx)
190 void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
191 /* On POSIX platforms, it is sufficient to investigate only the part
192 * of the stack that was live before the interrupt, because in
193 * addition, we consider interrupt contexts explicitly. On Windows,
194 * however, we do not keep an explicit stack of exception contexts,
195 * and instead arrange for the conservative stack scan to also cover
196 * the context implicitly. The obvious way to do that is to start
197 * at the context itself: */
198 #ifdef LISP_FEATURE_WIN32
199 gc_assert((void **) ctx < sp);
202 gc_assert((void **)self->control_stack_start
204 < (void **)self->control_stack_end);
205 *self->csp_around_foreign_call = (lispobj) sp;
209 static inline gc_phase_t gc_phase_next(gc_phase_t old) {
210 return (old+1) % GC_NPHASES;
213 static inline gc_phase_t thread_gc_phase(struct thread* p)
215 boolean inhibit = (SymbolTlValue(GC_INHIBIT,p)==T)||
216 (SymbolTlValue(IN_WITHOUT_GCING,p)==IN_WITHOUT_GCING);
219 (SymbolTlValue(GC_PENDING,p)!=T&& SymbolTlValue(GC_PENDING,p)!=NIL);
222 inprogress ? (gc_state.collector && (gc_state.collector != p)
223 ? GC_NONE : GC_QUIET)
224 : (inhibit ? GC_INVOKED : GC_NONE);
227 static inline void thread_gc_promote(struct thread* p, gc_phase_t cur, gc_phase_t old) {
229 gc_state.phase_wait[old]--;
230 if (cur != GC_NONE) {
231 gc_state.phase_wait[cur]++;
234 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,p);
237 /* set_thread_csp_access -- alter page permissions for not-in-Lisp
238 flag (Lisp Stack Top) of the thread `p'. The flag may be modified
239 if `writable' is true.
241 Return true if there is a non-null value in the flag.
243 When a thread enters C code or leaves it, a per-thread location is
244 modified. That machine word serves as a not-in-Lisp flag; for
245 convenience, when in C, it's filled with a topmost stack location
246 that may contain Lisp data. When thread is in Lisp, the word
249 GENCGC uses each thread's flag value for conservative garbage collection.
251 There is a full VM page reserved for this word; page permissions
252 are switched to read-only for race-free examine + wait + use
254 static inline boolean
255 set_thread_csp_access(struct thread* p, boolean writable)
257 os_protect((os_vm_address_t) p->csp_around_foreign_call,
258 THREAD_CSP_PAGE_SIZE,
259 writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
260 : (OS_VM_PROT_READ));
261 return !!*p->csp_around_foreign_call;
264 static inline void gc_notify_early()
266 struct thread *self = arch_os_get_current_thread(), *p;
267 odxprint(safepoints,"%s","global notification");
268 pthread_mutex_lock(&all_threads_lock);
272 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
273 if (!set_thread_csp_access(p,0)) {
274 thread_gc_promote(p, gc_state.phase, GC_NONE);
276 thread_gc_promote(p, thread_gc_phase(p), GC_NONE);
279 pthread_mutex_unlock(&all_threads_lock);
282 static inline void gc_notify_final()
285 odxprint(safepoints,"%s","global notification");
286 gc_state.phase_wait[gc_state.phase]=0;
287 pthread_mutex_lock(&all_threads_lock);
289 if (p == gc_state.collector)
291 odxprint(safepoints,"notifying thread %p csp %p",p,*p->csp_around_foreign_call);
292 if (!set_thread_csp_access(p,0)) {
293 thread_gc_promote(p, gc_state.phase, GC_NONE);
296 pthread_mutex_unlock(&all_threads_lock);
299 static inline void gc_done()
301 struct thread *self = arch_os_get_current_thread(), *p;
302 boolean inhibit = (SymbolTlValue(GC_INHIBIT,self)==T);
304 odxprint(safepoints,"%s","global denotification");
305 pthread_mutex_lock(&all_threads_lock);
307 if (inhibit && (SymbolTlValue(GC_PENDING,p)==T))
308 SetTlSymbolValue(GC_PENDING,NIL,p);
309 set_thread_csp_access(p,1);
311 pthread_mutex_unlock(&all_threads_lock);
314 static inline void gc_handle_phase()
316 odxprint(safepoints,"Entering phase %d",gc_state.phase);
317 switch (gc_state.phase) {
343 /* become ready to leave the <old> phase, but unready to leave the <new> phase;
344 * `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
345 * can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
347 static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
348 odxprint(safepoints,"GC advance request %d -> %d in phase %d",old,cur,gc_state.phase);
351 if (cur == gc_state.phase)
353 if (old < gc_state.phase)
355 if (old != GC_NONE) {
356 gc_state.phase_wait[old]--;
357 odxprint(safepoints,"%d holders of phase %d without me",gc_state.phase_wait[old],old);
359 if (cur != GC_NONE) {
360 gc_state.phase_wait[cur]++;
361 odxprint(safepoints,"%d holders of phase %d with me",gc_state.phase_wait[cur],cur);
363 /* roll forth as long as there's no waiters */
364 while (gc_state.phase_wait[gc_state.phase]==0
365 && gc_state.phase != cur) {
366 gc_state.phase = gc_phase_next(gc_state.phase);
367 odxprint(safepoints,"no blockers, direct advance to %d",gc_state.phase);
369 pthread_cond_broadcast(&gc_state.phase_cond[gc_state.phase]);
371 odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
376 thread_register_gc_trigger()
378 odxprint(misc, "/thread_register_gc_trigger");
379 struct thread *self = arch_os_get_current_thread();
381 if (gc_state.phase == GC_NONE &&
382 SymbolTlValue(IN_SAFEPOINT,self)!=T &&
383 thread_gc_phase(self)==GC_NONE) {
384 gc_advance(GC_FLIGHT,GC_NONE);
392 /* Thread may gc if all of these are true:
393 * 1) GC_INHIBIT == NIL (outside of protected part of without-gcing)
394 * 2) GC_PENDING != :in-progress (outside of recursion protection)
395 * Note that we are in a safepoint here, which is always outside of PA. */
397 struct thread *self = arch_os_get_current_thread();
398 return (SymbolValue(GC_INHIBIT, self) == NIL
399 && (SymbolTlValue(GC_PENDING, self) == T ||
400 SymbolTlValue(GC_PENDING, self) == NIL));
403 #ifdef LISP_FEATURE_SB_THRUPTION
405 thread_may_thrupt(os_context_t *ctx)
407 struct thread * self = arch_os_get_current_thread();
408 /* Thread may be interrupted if all of these are true:
409 * 1) Deferrables are unblocked in the context of the signal that
410 * went into the safepoint. -- Otherwise the surrounding code
411 * didn't want to be interrupted by a signal, so presumably it didn't
412 * want to be INTERRUPT-THREADed either.
413 * (See interrupt_handle_pending for an exception.)
414 * 2) On POSIX: There is no pending signal. This is important even
415 * after checking the sigmask, since we could be in the
416 * handle_pending trap following re-enabling of interrupts.
417 * Signals are unblocked in that case, but the signal is still
418 * pending; we want to run GC before handling the signal and
419 * therefore entered this safepoint. But the thruption would call
420 * ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
421 * trap, leading to recursion.
422 * 3) INTERRUPTS_ENABLED is non-nil.
423 * 4) No GC pending; it takes precedence.
424 * Note that we are in a safepoint here, which is always outside of PA. */
426 if (SymbolValue(INTERRUPTS_ENABLED, self) == NIL)
429 if (SymbolValue(GC_PENDING, self) != NIL)
432 if (SymbolValue(STOP_FOR_GC_PENDING, self) != NIL)
435 #ifdef LISP_FEATURE_WIN32
436 if (deferrables_blocked_p(&self->os_thread->blocked_signal_set))
439 /* ctx is NULL if the caller wants to ignore the sigmask. */
440 if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
442 if (SymbolValue(INTERRUPT_PENDING, self) != NIL)
446 if (SymbolValue(RESTART_CLUSTERS, self) == NIL)
447 /* This special case prevents TERMINATE-THREAD from hitting
448 * during INITIAL-THREAD-FUNCTION before it's ready. Curiously,
449 * deferrables are already unblocked there. Further
450 * investigation may be in order. */
456 // returns 0 if skipped, 1 otherwise
458 check_pending_thruptions(os_context_t *ctx)
460 struct thread *p = arch_os_get_current_thread();
462 #ifdef LISP_FEATURE_WIN32
463 pthread_t pself = p->os_thread;
465 /* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
466 * in the self-kill case; instead we do it here while also clearing the
468 if (pself->pending_signal_set)
469 if (__sync_fetch_and_and(&pself->pending_signal_set,0))
470 SetSymbolValue(THRUPTION_PENDING, T, p);
473 if (!thread_may_thrupt(ctx))
475 if (SymbolValue(THRUPTION_PENDING, p) == NIL)
477 SetSymbolValue(THRUPTION_PENDING, NIL, p);
479 #ifdef LISP_FEATURE_WIN32
480 oldset = pself->blocked_signal_set;
481 pself->blocked_signal_set = deferrable_sigset;
482 if (ctx) fake_foreign_function_call(ctx);
485 block_deferrable_signals(0, &oldset);
488 funcall0(StaticSymbolFunction(RUN_INTERRUPTION));
490 #ifdef LISP_FEATURE_WIN32
491 if (ctx) undo_fake_foreign_function_call(ctx);
492 pself->blocked_signal_set = oldset;
493 if (ctx) ctx->sigmask = oldset;
495 pthread_sigmask(SIG_SETMASK, &oldset, 0);
502 on_stack_p(struct thread *th, void *esp)
504 return (void *)th->control_stack_start
506 < (void *)th->control_stack_end;
509 #ifndef LISP_FEATURE_WIN32
510 /* (Technically, we still allocate an altstack even on Windows. Since
511 * Windows has a contiguous stack with an automatic guard page of
512 * user-configurable size instead of an alternative stack though, the
513 * SBCL-allocated altstack doesn't actually apply and won't be used.) */
515 on_altstack_p(struct thread *th, void *esp)
517 void *start = (void *)th+dynamic_values_bytes;
518 void *end = (char *)start + 32*SIGSTKSZ;
519 return start <= esp && esp < end;
524 assert_on_stack(struct thread *th, void *esp)
526 if (on_stack_p(th, esp))
528 #ifndef LISP_FEATURE_WIN32
529 if (on_altstack_p(th, esp))
530 lose("thread %p: esp on altstack: %p", th, esp);
532 lose("thread %p: bogus esp: %p", th, esp);
535 // returns 0 if skipped, 1 otherwise
537 check_pending_gc(os_context_t *ctx)
539 odxprint(misc, "check_pending_gc");
540 struct thread * self = arch_os_get_current_thread();
544 if ((SymbolValue(IN_SAFEPOINT,self) == T) &&
545 ((SymbolValue(GC_INHIBIT,self) == NIL) &&
546 (SymbolValue(GC_PENDING,self) == NIL))) {
547 SetSymbolValue(IN_SAFEPOINT,NIL,self);
549 if (thread_may_gc() && (SymbolValue(IN_SAFEPOINT, self) == NIL)) {
550 if ((SymbolTlValue(GC_PENDING, self) == T)) {
551 lispobj gc_happened = NIL;
553 bind_variable(IN_SAFEPOINT,T,self);
554 block_deferrable_signals(NULL,&sigset);
555 if(SymbolTlValue(GC_PENDING,self)==T)
556 gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
557 unbind_variable(IN_SAFEPOINT,self);
558 thread_sigmask(SIG_SETMASK,&sigset,NULL);
559 if (gc_happened == T) {
560 /* POST_GC wants to enable interrupts */
561 if (SymbolValue(INTERRUPTS_ENABLED,self) == T ||
562 SymbolValue(ALLOW_WITH_INTERRUPTS,self) == T) {
563 odxprint(misc, "going to call POST_GC");
564 funcall0(StaticSymbolFunction(POST_GC));
574 void thread_in_lisp_raised(os_context_t *ctxptr)
576 struct thread *self = arch_os_get_current_thread();
578 odxprint(safepoints,"%s","thread_in_lisp_raised");
581 if (gc_state.phase == GC_FLIGHT &&
582 SymbolTlValue(GC_PENDING,self)==T &&
583 thread_gc_phase(self)==GC_NONE &&
584 thread_may_gc() && SymbolTlValue(IN_SAFEPOINT,self)!=T) {
585 set_csp_from_context(self, ctxptr);
586 gc_advance(GC_QUIET,GC_FLIGHT);
587 set_thread_csp_access(self,1);
588 if (gc_state.collector) {
589 gc_advance(GC_NONE,GC_QUIET);
591 *self->csp_around_foreign_call = 0;
592 SetTlSymbolValue(GC_PENDING,T,self);
595 check_pending_gc(ctxptr);
596 #ifdef LISP_FEATURE_SB_THRUPTION
597 while(check_pending_thruptions(ctxptr));
601 if (gc_state.phase == GC_FLIGHT) {
602 gc_state_wait(GC_MESSAGE);
604 phase = thread_gc_phase(self);
605 if (phase == GC_NONE) {
606 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
607 set_thread_csp_access(self,1);
608 set_csp_from_context(self, ctxptr);
609 if (gc_state.phase <= GC_SETTLED)
610 gc_advance(phase,gc_state.phase);
612 gc_state_wait(phase);
613 *self->csp_around_foreign_call = 0;
615 check_pending_gc(ctxptr);
616 #ifdef LISP_FEATURE_SB_THRUPTION
617 while(check_pending_thruptions(ctxptr));
620 gc_advance(phase,gc_state.phase);
621 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
626 void thread_in_safety_transition(os_context_t *ctxptr)
628 struct thread *self = arch_os_get_current_thread();
630 odxprint(safepoints,"%s","GC safety transition");
632 if (set_thread_csp_access(self,1)) {
633 gc_state_wait(thread_gc_phase(self));
635 #ifdef LISP_FEATURE_SB_THRUPTION
636 while(check_pending_thruptions(ctxptr));
639 gc_phase_t phase = thread_gc_phase(self);
640 if (phase == GC_NONE) {
641 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
642 set_csp_from_context(self, ctxptr);
643 if (gc_state.phase <= GC_SETTLED)
644 gc_advance(phase,gc_state.phase);
646 gc_state_wait(phase);
647 *self->csp_around_foreign_call = 0;
649 gc_advance(phase,gc_state.phase);
650 SetTlSymbolValue(STOP_FOR_GC_PENDING,T,self);
656 void thread_interrupted(os_context_t *ctxptr)
658 struct thread *self = arch_os_get_current_thread();
660 odxprint(safepoints,"%s","pending interrupt trap");
662 if (gc_state.phase != GC_NONE) {
663 if (set_thread_csp_access(self,1)) {
665 thread_in_safety_transition(ctxptr);
668 thread_in_lisp_raised(ctxptr);
673 check_pending_gc(ctxptr);
674 #ifdef LISP_FEATURE_SB_THRUPTION
675 while(check_pending_thruptions(ctxptr));
682 struct thread* self = arch_os_get_current_thread();
683 odxprint(safepoints, "stop the world");
685 gc_state.collector = self;
686 gc_state.phase_wait[GC_QUIET]++;
688 switch(gc_state.phase) {
690 gc_advance(GC_QUIET,gc_state.phase);
694 gc_state_wait(GC_QUIET);
696 gc_state.phase_wait[GC_QUIET]=1;
697 gc_advance(GC_COLLECT,GC_QUIET);
702 lose("Stopping the world in unexpected state %d",gc_state.phase);
705 set_thread_csp_access(self,1);
707 SetTlSymbolValue(STOP_FOR_GC_PENDING,NIL,self);
711 void gc_start_the_world()
713 odxprint(safepoints,"%s","start the world");
715 gc_state.collector = NULL;
716 SetSymbolValue(IN_WITHOUT_GCING,IN_WITHOUT_GCING,
717 arch_os_get_current_thread());
718 gc_advance(GC_NONE,GC_COLLECT);
723 #ifdef LISP_FEATURE_SB_THRUPTION
724 /* wake_thread(thread) -- ensure a thruption delivery to
727 # ifdef LISP_FEATURE_WIN32
730 wake_thread_io(struct thread * thread)
732 SetEvent(thread->private_events.events[1]);
733 win32_maybe_interrupt_io(thread);
737 wake_thread_win32(struct thread *thread)
739 struct thread *self = arch_os_get_current_thread();
741 wake_thread_io(thread);
743 if (SymbolTlValue(THRUPTION_PENDING,thread)==T)
746 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
748 if ((SymbolTlValue(GC_PENDING,thread)==T)||
749 (SymbolTlValue(STOP_FOR_GC_PENDING,thread)==T))
752 wake_thread_io(thread);
753 pthread_mutex_unlock(&all_threads_lock);
756 if (gc_state.phase == GC_NONE) {
757 gc_advance(GC_INVOKED,GC_NONE);
758 gc_advance(GC_NONE,GC_INVOKED);
762 pthread_mutex_lock(&all_threads_lock);
767 wake_thread_posix(os_thread_t os_thread)
770 struct thread *thread;
771 struct thread *self = arch_os_get_current_thread();
773 /* Must not and need not attempt to signal ourselves while we're the
775 if (self->os_thread == os_thread) {
776 SetTlSymbolValue(THRUPTION_PENDING,T,self);
777 WITH_GC_AT_SAFEPOINTS_ONLY()
778 while (check_pending_thruptions(0 /* ignore the sigmask */))
783 /* We are not in a signal handler here, so need to block signals
786 block_deferrable_signals(0, &oldset);
789 if (gc_state.phase == GC_NONE) {
790 odxprint(safepoints, "wake_thread_posix: invoking");
791 gc_advance(GC_INVOKED,GC_NONE);
793 /* only if in foreign code, notify using signal */
794 pthread_mutex_lock(&all_threads_lock);
795 for_each_thread (thread)
796 if (thread->os_thread == os_thread) {
797 /* it's still alive... */
800 odxprint(safepoints, "wake_thread_posix: found");
801 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
802 if (SymbolTlValue(GC_PENDING,thread) == T
803 || SymbolTlValue(STOP_FOR_GC_PENDING,thread) == T)
806 if (os_get_csp(thread)) {
807 odxprint(safepoints, "wake_thread_posix: kill");
808 /* ... and in foreign code. Push it into a safety
810 int status = pthread_kill(os_thread, SIGPIPE);
812 lose("wake_thread_posix: pthread_kill failed with %d\n",
817 pthread_mutex_unlock(&all_threads_lock);
819 gc_advance(GC_NONE,GC_INVOKED);
821 odxprint(safepoints, "wake_thread_posix: passive");
822 /* We are not able to wake the thread up actively, but maybe
823 * some other thread will take care of it. Kludge: Unless it is
824 * in foreign code. Let's at least try to get our return value
826 pthread_mutex_lock(&all_threads_lock);
827 for_each_thread (thread)
828 if (thread->os_thread == os_thread) {
829 SetTlSymbolValue(THRUPTION_PENDING,T,thread);
833 pthread_mutex_unlock(&all_threads_lock);
837 odxprint(safepoints, "wake_thread_posix leaving, found=%d", found);
838 pthread_sigmask(SIG_SETMASK, &oldset, 0);
839 return found ? 0 : -1;
841 #endif /* !LISP_FEATURE_WIN32 */
842 #endif /* LISP_FEATURE_SB_THRUPTION */
845 os_get_csp(struct thread* th)
847 FSHOW_SIGNAL((stderr, "Thread %p has CSP *(%p) == %p, stack [%p,%p]\n",
849 th->csp_around_foreign_call,
850 *(void***)th->csp_around_foreign_call,
851 th->control_stack_start,
852 th->control_stack_end));
853 return *(void***)th->csp_around_foreign_call;
857 #ifndef LISP_FEATURE_WIN32
859 # ifdef LISP_FEATURE_SB_THRUPTION
861 thruption_handler(int signal, siginfo_t *info, os_context_t *ctx)
863 struct thread *self = arch_os_get_current_thread();
865 void *transition_sp = os_get_csp(self);
867 /* In Lisp code. Do not run thruptions asynchronously. The
868 * next safepoint will take care of it. */
871 /* In C code. As a rule, we assume that running thruptions is OK. */
872 *self->csp_around_foreign_call = 0;
873 thread_in_lisp_raised(ctx);
874 *self->csp_around_foreign_call = transition_sp;
878 /* Designed to be of the same type as call_into_lisp. Ignores its
881 handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs)
883 #if trap_GlobalSafepoint != 0x1a
884 # error trap_GlobalSafepoint mismatch
886 asm("int3; .byte 0x1a;");
891 handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs)
893 #if trap_CspSafepoint != 0x1b
894 # error trap_CspSafepoint mismatch
896 asm("int3; .byte 0x1b;");
901 handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
903 FSHOW_SIGNAL((stderr, "fault_address = %p, sp = %p, &csp = %p\n",
905 GC_SAFEPOINT_PAGE_ADDR,
906 arch_os_get_current_thread()->csp_around_foreign_call));
908 struct thread *self = arch_os_get_current_thread();
910 if (fault_address == (os_vm_address_t) GC_SAFEPOINT_PAGE_ADDR) {
911 /* We're on the altstack and don't want to run Lisp code. */
912 arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
916 if (fault_address == (os_vm_address_t) self->csp_around_foreign_call) {
917 arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
921 /* not a safepoint */
924 #endif /* LISP_FEATURE_WIN32 */
927 callback_wrapper_trampoline(lispobj arg0, lispobj arg1, lispobj arg2)
929 struct thread* th = arch_os_get_current_thread();
931 lose("callback invoked in non-lisp thread. Sorry, that is not supported yet.");
933 WITH_GC_AT_SAFEPOINTS_ONLY()
934 funcall3(SymbolValue(ENTER_ALIEN_CALLBACK, 0), arg0, arg1, arg2);
937 #endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */