+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
+#endif
+
+static void
+link_thread(struct thread *th)
+{
+ if (all_threads) all_threads->prev=th;
+ th->next=all_threads;
+ th->prev=0;
+ all_threads=th;
+}
+
+#ifdef LISP_FEATURE_SB_THREAD
+static void
+unlink_thread(struct thread *th)
+{
+ if (th->prev)
+ th->prev->next = th->next;
+ else
+ all_threads = th->next;
+ if (th->next)
+ th->next->prev = th->prev;
+}
+#endif
+
+static int
+initial_thread_trampoline(struct thread *th)
+{
+ lispobj function;
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ lispobj *args = NULL;
+#endif
+ function = th->no_tls_value_marker;
+ th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
+ if(arch_os_thread_init(th)==0) return 1;
+ link_thread(th);
+ th->os_thread=thread_self();
+#ifndef LISP_FEATURE_WIN32
+ protect_control_stack_guard_page(1);
+#endif
+
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ return call_into_lisp_first_time(function,args,0);
+#else
+ return funcall0(function);
+#endif
+}
+
+#define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
+ ALIEN_STACK_SIZE + dynamic_values_bytes + \
+ 32 * SIGSTKSZ)
+
+#ifdef LISP_FEATURE_SB_THREAD
+
+#ifdef QUEUE_FREEABLE_THREAD_STACKS
+
+static void
+queue_freeable_thread_stack(struct thread *thread_to_be_cleaned_up)
+{
+ if (thread_to_be_cleaned_up) {
+ pthread_mutex_lock(&freeable_stack_lock);
+ if (freeable_stack_queue) {
+ struct freeable_stack *new_freeable_stack = 0, *next;
+ next = freeable_stack_queue;
+ while (next->next) {
+ next = next->next;
+ }
+ new_freeable_stack = (struct freeable_stack *)
+ os_validate(0, sizeof(struct freeable_stack));
+ new_freeable_stack->next = NULL;
+ new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
+ new_freeable_stack->stack = (os_vm_address_t)
+ thread_to_be_cleaned_up->control_stack_start;
+ next->next = new_freeable_stack;
+ freeable_stack_count++;
+ } else {
+ struct freeable_stack *new_freeable_stack = 0;
+ new_freeable_stack = (struct freeable_stack *)
+ os_validate(0, sizeof(struct freeable_stack));
+ new_freeable_stack->next = NULL;
+ new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
+ new_freeable_stack->stack = (os_vm_address_t)
+ thread_to_be_cleaned_up->control_stack_start;
+ freeable_stack_queue = new_freeable_stack;
+ freeable_stack_count++;
+ }
+ pthread_mutex_unlock(&freeable_stack_lock);
+ }
+}
+
+#define FREEABLE_STACK_QUEUE_SIZE 4
+
+static void
+free_freeable_stacks() {
+ if (freeable_stack_queue && (freeable_stack_count > FREEABLE_STACK_QUEUE_SIZE)) {
+ struct freeable_stack* old;
+ pthread_mutex_lock(&freeable_stack_lock);
+ old = freeable_stack_queue;
+ freeable_stack_queue = old->next;
+ freeable_stack_count--;
+ gc_assert(pthread_join(old->os_thread, NULL) == 0);
+ FSHOW((stderr, "freeing thread %x stack\n", old->os_thread));
+ os_invalidate(old->stack, THREAD_STRUCT_SIZE);
+ os_invalidate((os_vm_address_t)old, sizeof(struct freeable_stack));
+ pthread_mutex_unlock(&freeable_stack_lock);
+ }
+}
+
+#elif defined(CREATE_CLEANUP_THREAD)
+static void *
+cleanup_thread(void *arg)
+{
+ struct freeable_stack *freeable = arg;
+ pthread_t self = pthread_self();
+
+ FSHOW((stderr, "/cleaner thread(%p): joining %p\n",
+ self, freeable->os_thread));
+ gc_assert(pthread_join(freeable->os_thread, NULL) == 0);
+ FSHOW((stderr, "/cleaner thread(%p): free stack %p\n",
+ self, freeable->stack));
+ os_invalidate(freeable->stack, THREAD_STRUCT_SIZE);
+ free(freeable);
+
+ pthread_detach(self);