+/* This only works for static symbols. */
+static inline lispobj
+StaticSymbolFunction(lispobj sym)
+{
+ return ((struct fdefn *)native_pointer(SymbolValue(sym, 0)))->fun;
+}
+
+/* These are for use during GC, on the current thread, or on prenatal
+ * threads only. */
+#if defined(LISP_FEATURE_SB_THREAD)
+#define get_binding_stack_pointer(thread) \
+ ((thread)->binding_stack_pointer)
+#define set_binding_stack_pointer(thread,value) \
+ ((thread)->binding_stack_pointer = (lispobj *)(value))
+#define access_control_stack_pointer(thread) \
+ ((thread)->control_stack_pointer)
+# if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+#define access_control_frame_pointer(thread) \
+ ((thread)->control_frame_pointer)
+# endif
+#elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+#define get_binding_stack_pointer(thread) \
+ SymbolValue(BINDING_STACK_POINTER, thread)
+#define set_binding_stack_pointer(thread,value) \
+ SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread)
+#define access_control_stack_pointer(thread) \
+ (current_control_stack_pointer)
+#else
+#define get_binding_stack_pointer(thread) \
+ (current_binding_stack_pointer)
+#define set_binding_stack_pointer(thread,value) \
+ (current_binding_stack_pointer = (lispobj *)(value))
+#define access_control_stack_pointer(thread) \
+ (current_control_stack_pointer)
+#define access_control_frame_pointer(thread) \
+ (current_control_frame_pointer)
+#endif
+
+#if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS)
+extern __thread struct thread *current_thread;
+#endif
+
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+# define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
+#else
+# define THREAD_CSP_PAGE_SIZE 0
+#endif
+
+#ifdef LISP_FEATURE_WIN32
+/*
+ * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
+ * so define it arbitrarily
+ */
+#define SIGSTKSZ 1024
+#endif
+
+#define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
+ ALIEN_STACK_SIZE + \
+ sizeof(struct nonpointer_thread_data) + \
+ dynamic_values_bytes + \
+ 32 * SIGSTKSZ + \
+ THREAD_ALIGNMENT_BYTES + \
+ THREAD_CSP_PAGE_SIZE)
+
+#if defined(LISP_FEATURE_WIN32)
+static inline struct thread* arch_os_get_current_thread()
+ __attribute__((__const__));
+#endif
+
+/* This is clearly per-arch and possibly even per-OS code, but we can't
+ * put it somewhere sensible like x86-linux-os.c because it needs too
+ * much stuff like struct thread and all_threads to be defined, which
+ * usually aren't by that time. So, it's here instead. Sorry */
+
+static inline struct thread *arch_os_get_current_thread(void)
+{
+#if defined(LISP_FEATURE_SB_THREAD)
+#if defined(LISP_FEATURE_X86)
+ register struct thread *me=0;
+#if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
+ __asm__ ("movl %%fs:0xE10+(4*63), %0" : "=r"(me) :);
+ return me;
+#endif
+ if(all_threads) {
+#if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
+ sel_t sel;
+ struct thread *th = pthread_getspecific(specials);
+ sel.index = th->tls_cookie;
+ sel.rpl = USER_PRIV;
+ sel.ti = SEL_LDT;
+ __asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel));
+#elif defined(LISP_FEATURE_FREEBSD)
+#ifdef LISP_FEATURE_GCC_TLS
+ struct thread *th = current_thread;
+#else
+ struct thread *th = pthread_getspecific(specials);
+#endif
+#ifdef LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS
+ unsigned int sel = LSEL(th->tls_cookie, SEL_UPL);
+ unsigned int fs = rfs();
+
+ /* Load FS only if it's necessary. Modifying a selector
+ * causes privilege checking and it takes long time. */
+ if (fs != sel)
+ load_fs(sel);
+#endif
+ return th;
+#endif
+ __asm__ ("movl %%fs:%c1,%0" : "=r" (me)
+ : "i" (offsetof (struct thread,this)));
+ }
+ return me;
+#else
+#ifdef LISP_FEATURE_GCC_TLS
+ return current_thread;
+#else
+ return pthread_getspecific(specials);
+#endif
+#endif /* x86 */
+#else
+ return all_threads;
+#endif
+}
+
+#if defined(LISP_FEATURE_MACH_EXCEPTION_HANDLER)
+extern kern_return_t mach_lisp_thread_init(struct thread *thread);
+extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
+#endif
+
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+void thread_in_safety_transition(os_context_t *ctx);
+void thread_in_lisp_raised(os_context_t *ctx);
+void thread_interrupted(os_context_t *ctx);
+void thread_pitstop(os_context_t *ctxptr);
+extern void thread_register_gc_trigger();
+
+# ifdef LISP_FEATURE_SB_THRUPTION
+int wake_thread(os_thread_t os_thread);
+# ifdef LISP_FEATURE_WIN32
+void wake_thread_win32(struct thread *thread);
+# else
+int wake_thread_posix(os_thread_t os_thread);
+# endif
+# endif
+
+#define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock)
+
+static inline
+void push_gcing_safety(struct gcing_safety *into)
+{
+ struct thread* th = arch_os_get_current_thread();
+ asm volatile ("");
+ if ((into->csp_around_foreign_call =
+ *th->csp_around_foreign_call)) {
+ *th->csp_around_foreign_call = 0;
+ asm volatile ("");
+ into->pc_around_foreign_call = th->pc_around_foreign_call;
+ th->pc_around_foreign_call = 0;
+ asm volatile ("");
+ } else {
+ into->pc_around_foreign_call = 0;
+ }
+}
+
+static inline
+void pop_gcing_safety(struct gcing_safety *from)
+{
+ struct thread* th = arch_os_get_current_thread();
+ if (from->csp_around_foreign_call) {
+ asm volatile ("");
+ *th->csp_around_foreign_call = from->csp_around_foreign_call;
+ asm volatile ("");
+ th->pc_around_foreign_call = from->pc_around_foreign_call;
+ asm volatile ("");
+ }
+}
+
+/* Even with just -O1, gcc optimizes the jumps in this "loop" away
+ * entirely, giving the ability to define WITH-FOO-style macros. */
+#define RUN_BODY_ONCE(prefix, finally_do) \
+ int prefix##done = 0; \
+ for (; !prefix##done; finally_do, prefix##done = 1)
+
+#define WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(var) \
+ struct gcing_safety var; \
+ push_gcing_safety(&var); \
+ RUN_BODY_ONCE(var, pop_gcing_safety(&var))
+
+#define WITH_GC_AT_SAFEPOINTS_ONLY() \
+ WITH_GC_AT_SAFEPOINTS_ONLY_hygenic(sbcl__gc_safety)
+
+#define WITH_STATE_SEM_hygenic(var, thread) \
+ os_sem_wait((thread)->state_sem, "thread_state"); \
+ RUN_BODY_ONCE(var, os_sem_post((thread)->state_sem, "thread_state"))
+
+#define WITH_STATE_SEM(thread) \
+ WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
+
+int check_pending_thruptions(os_context_t *ctx);
+
+#endif
+
+extern void create_initial_thread(lispobj);
+
+#ifdef LISP_FEATURE_SB_THREAD
+extern pthread_mutex_t all_threads_lock;
+#endif