+#define THREAD_STRUCT_SIZE (THREAD_CONTROL_STACK_SIZE + BINDING_STACK_SIZE + \
+ ALIEN_STACK_SIZE + dynamic_values_bytes + \
+ 32 * SIGSTKSZ)
+
+#ifdef LISP_FEATURE_SB_THREAD
+
+#ifdef QUEUE_FREEABLE_THREAD_STACKS
+
+static void
+queue_freeable_thread_stack(struct thread *thread_to_be_cleaned_up)
+{
+ if (thread_to_be_cleaned_up) {
+ pthread_mutex_lock(&freeable_stack_lock);
+ if (freeable_stack_queue) {
+ struct freeable_stack *new_freeable_stack = 0, *next;
+ next = freeable_stack_queue;
+ while (next->next) {
+ next = next->next;
+ }
+ new_freeable_stack = (struct freeable_stack *)
+ os_validate(0, sizeof(struct freeable_stack));
+ new_freeable_stack->next = NULL;
+ new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
+ new_freeable_stack->stack = (os_vm_address_t)
+ thread_to_be_cleaned_up->control_stack_start;
+ next->next = new_freeable_stack;
+ freeable_stack_count++;
+ } else {
+ struct freeable_stack *new_freeable_stack = 0;
+ new_freeable_stack = (struct freeable_stack *)
+ os_validate(0, sizeof(struct freeable_stack));
+ new_freeable_stack->next = NULL;
+ new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
+ new_freeable_stack->stack = (os_vm_address_t)
+ thread_to_be_cleaned_up->control_stack_start;
+ freeable_stack_queue = new_freeable_stack;
+ freeable_stack_count++;
+ }
+ pthread_mutex_unlock(&freeable_stack_lock);
+ }
+}
+
+#define FREEABLE_STACK_QUEUE_SIZE 4
+
+static void
+free_freeable_stacks() {
+ if (freeable_stack_queue && (freeable_stack_count > FREEABLE_STACK_QUEUE_SIZE)) {
+ struct freeable_stack* old;
+ pthread_mutex_lock(&freeable_stack_lock);
+ old = freeable_stack_queue;
+ freeable_stack_queue = old->next;
+ freeable_stack_count--;
+ gc_assert(pthread_join(old->os_thread, NULL) == 0);
+ FSHOW((stderr, "freeing thread %x stack\n", old->os_thread));
+ os_invalidate(old->stack, THREAD_STRUCT_SIZE);
+ os_invalidate((os_vm_address_t)old, sizeof(struct freeable_stack));
+ pthread_mutex_unlock(&freeable_stack_lock);
+ }
+}
+
+#elif defined(CREATE_CLEANUP_THREAD)
+static void *
+cleanup_thread(void *arg)
+{
+ struct freeable_stack *freeable = arg;
+ pthread_t self = pthread_self();
+
+ FSHOW((stderr, "/cleaner thread(%p): joining %p\n",
+ self, freeable->os_thread));
+ gc_assert(pthread_join(freeable->os_thread, NULL) == 0);
+ FSHOW((stderr, "/cleaner thread(%p): free stack %p\n",
+ self, freeable->stack));
+ os_invalidate(freeable->stack, THREAD_STRUCT_SIZE);
+ free(freeable);
+
+ pthread_detach(self);
+
+ return NULL;
+}
+
+static void
+create_cleanup_thread(struct thread *thread_to_be_cleaned_up)
+{
+ pthread_t thread;
+ int result;
+
+ if (thread_to_be_cleaned_up) {
+ struct freeable_stack *freeable =
+ malloc(sizeof(struct freeable_stack));
+ gc_assert(freeable != NULL);
+ freeable->os_thread = thread_to_be_cleaned_up->os_thread;
+ freeable->stack =
+ (os_vm_address_t) thread_to_be_cleaned_up->control_stack_start;
+ result = pthread_create(&thread, NULL, cleanup_thread, freeable);
+ gc_assert(result == 0);
+ sched_yield();
+ }
+}
+
+#else
+static void
+free_thread_stack_later(struct thread *thread_to_be_cleaned_up)
+{
+ struct freeable_stack *new_freeable_stack = 0;
+ if (thread_to_be_cleaned_up) {
+ new_freeable_stack = (struct freeable_stack *)
+ os_validate(0, sizeof(struct freeable_stack));
+ new_freeable_stack->os_thread = thread_to_be_cleaned_up->os_thread;
+ new_freeable_stack->stack = (os_vm_address_t)
+ thread_to_be_cleaned_up->control_stack_start;
+ }
+ new_freeable_stack = (struct freeable_stack *)
+ swap_lispobjs((lispobj *)(void *)&freeable_stack,
+ (lispobj)new_freeable_stack);
+ if (new_freeable_stack) {
+ FSHOW((stderr,"/reaping %p\n", (void*) new_freeable_stack->os_thread));
+ /* Under NPTL pthread_join really waits until the thread
+ * exists and the stack can be safely freed. This is sadly not
+ * mandated by the pthread spec. */
+ gc_assert(pthread_join(new_freeable_stack->os_thread, NULL) == 0);
+ os_invalidate(new_freeable_stack->stack, THREAD_STRUCT_SIZE);
+ os_invalidate((os_vm_address_t) new_freeable_stack,
+ sizeof(struct freeable_stack));
+ }
+}
+#endif
+
+/* this is the first thing that runs in the child (which is why the
+ * silly calling convention). Basically it calls the user's requested
+ * lisp function after doing arch_os_thread_init and whatever other
+ * bookkeeping needs to be done
+ */
+int
+new_thread_trampoline(struct thread *th)
+{
+ lispobj function;
+ int result, lock_ret;
+
+ FSHOW((stderr,"/creating thread %lu\n", thread_self()));
+ function = th->no_tls_value_marker;
+ th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
+ if(arch_os_thread_init(th)==0) {
+ /* FIXME: handle error */
+ lose("arch_os_thread_init failed\n");
+ }
+
+ th->os_thread=thread_self();
+ protect_control_stack_guard_page(1);
+ /* Since GC can only know about this thread from the all_threads
+ * list and we're just adding this thread to it there is no danger
+ * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
+ * not). */
+ lock_ret = pthread_mutex_lock(&all_threads_lock);
+ gc_assert(lock_ret == 0);
+ link_thread(th);
+ lock_ret = pthread_mutex_unlock(&all_threads_lock);
+ gc_assert(lock_ret == 0);
+
+ result = funcall0(function);
+
+ /* Block GC */
+ block_blockable_signals();
+ th->state=STATE_DEAD;
+
+ /* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
+ * thread, but since we are already dead it won't wait long. */
+ lock_ret = pthread_mutex_lock(&all_threads_lock);
+ gc_assert(lock_ret == 0);
+
+ gc_alloc_update_page_tables(0, &th->alloc_region);
+ unlink_thread(th);
+ pthread_mutex_unlock(&all_threads_lock);
+ gc_assert(lock_ret == 0);
+
+ if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
+ os_invalidate((os_vm_address_t)th->interrupt_data,
+ (sizeof (struct interrupt_data)));
+
+#ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
+ FSHOW((stderr, "Deallocating mach port %x\n", THREAD_STRUCT_TO_EXCEPTION_PORT(th)));
+ mach_port_move_member(mach_task_self(),
+ THREAD_STRUCT_TO_EXCEPTION_PORT(th),
+ MACH_PORT_NULL);
+ mach_port_deallocate(mach_task_self(),
+ THREAD_STRUCT_TO_EXCEPTION_PORT(th));
+ mach_port_destroy(mach_task_self(),
+ THREAD_STRUCT_TO_EXCEPTION_PORT(th));
+#endif
+
+#ifdef QUEUE_FREEABLE_THREAD_STACKS
+ queue_freeable_thread_stack(th);
+#elif defined(CREATE_CLEANUP_THREAD)
+ create_cleanup_thread(th);
+#else
+ free_thread_stack_later(th);
+#endif
+
+ FSHOW((stderr,"/exiting thread %p\n", thread_self()));
+ return result;
+}
+
+#endif /* LISP_FEATURE_SB_THREAD */
+
+static void
+free_thread_struct(struct thread *th)
+{
+ if (th->interrupt_data)
+ os_invalidate((os_vm_address_t) th->interrupt_data,
+ (sizeof (struct interrupt_data)));
+ os_invalidate((os_vm_address_t) th->control_stack_start,
+ THREAD_STRUCT_SIZE);
+}
+