+static inline struct thread *arch_os_get_current_thread(void)
+{
+#if !defined(LISP_FEATURE_SB_THREAD)
+ return all_threads;
+
+#elif defined(LISP_FEATURE_X86) && defined(LISP_FEATURE_WIN32)
+ register struct thread *me=0;
+ __asm__ ("movl %%fs:0xE10+(4*63), %0" : "=r"(me) :);
+ return me;
+
+#else
+ if (!all_threads)
+ /* no need to bother */
+ return 0;
+
+ /* Otherwise, use pthreads to find the right value. We do not load
+ * directly from %fs:this even on x86 platforms (like Linux and
+ * Solaris) with dependable %fs, because we want to return NULL if
+ * called by a non-Lisp thread, and %fs would not be initialized
+ * suitably in that case. */
+ struct thread *th;
+# ifdef LISP_FEATURE_GCC_TLS
+ th = current_thread;
+# else
+ th = pthread_getspecific(specials);
+# endif
+
+# if defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
+ /* If enabled by make-config (currently Darwin and FreeBSD only),
+ * re-setup %fs. This is an out-of-line call, and potentially
+ * expensive.*/
+ if (th)
+ arch_os_load_ldt(th);
+# endif
+
+ return th;
+#endif
+}
+
+#if defined(LISP_FEATURE_MACH_EXCEPTION_HANDLER)
+extern kern_return_t mach_lisp_thread_init(struct thread *thread);
+extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
+#endif
+
+typedef struct init_thread_data {
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+ struct gcing_safety safety;
+#endif
+ void *dummy;
+} init_thread_data;
+
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+void thread_in_safety_transition(os_context_t *ctx);
+void thread_in_lisp_raised(os_context_t *ctx);
+void thread_interrupted(os_context_t *ctx);
+void thread_pitstop(os_context_t *ctxptr);
+extern void thread_register_gc_trigger();
+
+# ifdef LISP_FEATURE_SB_THRUPTION
+int wake_thread(os_thread_t os_thread);
+# ifdef LISP_FEATURE_WIN32
+void wake_thread_win32(struct thread *thread);
+# else
+int wake_thread_posix(os_thread_t os_thread);
+# endif
+# endif
+
+static inline
+void push_gcing_safety(struct gcing_safety *into)
+{
+ struct thread* th = arch_os_get_current_thread();
+ asm volatile ("");
+ if ((into->csp_around_foreign_call =
+ *th->csp_around_foreign_call)) {
+ *th->csp_around_foreign_call = 0;
+ asm volatile ("");
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ into->pc_around_foreign_call = th->pc_around_foreign_call;
+ th->pc_around_foreign_call = 0;
+ asm volatile ("");
+#endif
+ } else {
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ into->pc_around_foreign_call = 0;
+#endif
+ }
+}
+
+static inline
+void pop_gcing_safety(struct gcing_safety *from)
+{
+ struct thread* th = arch_os_get_current_thread();
+ if (from->csp_around_foreign_call) {
+ asm volatile ("");
+ *th->csp_around_foreign_call = from->csp_around_foreign_call;
+ asm volatile ("");
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ th->pc_around_foreign_call = from->pc_around_foreign_call;
+ asm volatile ("");
+#endif
+ }
+}
+
+/* Even with just -O1, gcc optimizes the jumps in this "loop" away
+ * entirely, giving the ability to define WITH-FOO-style macros. */
+#define RUN_BODY_ONCE(prefix, finally_do) \
+ int prefix##done = 0; \
+ for (; !prefix##done; finally_do, prefix##done = 1)
+
+#define WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(var) \
+ struct gcing_safety var; \
+ push_gcing_safety(&var); \
+ RUN_BODY_ONCE(var, pop_gcing_safety(&var))
+
+#define WITH_GC_AT_SAFEPOINTS_ONLY() \
+ WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(sbcl__gc_safety)
+
+#define WITH_STATE_SEM_hygenic(var, thread) \
+ os_sem_wait((thread)->state_sem, "thread_state"); \
+ RUN_BODY_ONCE(var, os_sem_post((thread)->state_sem, "thread_state"))
+
+#define WITH_STATE_SEM(thread) \
+ WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
+
+int check_pending_thruptions(os_context_t *ctx);
+
+#endif
+
+extern void create_initial_thread(lispobj);
+
+#ifdef LISP_FEATURE_SB_THREAD
+extern pthread_mutex_t all_threads_lock;
+#endif