extern int linux_no_threads_p;
#ifdef LISP_FEATURE_SB_THREAD
-
pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
extern lispobj call_into_lisp_first_time(lispobj fun, lispobj *args, int nargs);
#endif
-#endif
-
static void
link_thread(struct thread *th)
{
- th->os_thread=thread_self();
if (all_threads) all_threads->prev=th;
th->next=all_threads;
th->prev=0;
all_threads=th;
- protect_control_stack_guard_page(1);
}
#ifdef LISP_FEATURE_SB_THREAD
th->no_tls_value_marker = NO_TLS_VALUE_MARKER_WIDETAG;
if(arch_os_thread_init(th)==0) return 1;
link_thread(th);
+ th->os_thread=thread_self();
+ protect_control_stack_guard_page(1);
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
return call_into_lisp_first_time(function,args,0);
lose("arch_os_thread_init failed\n");
}
+ th->os_thread=thread_self();
+ protect_control_stack_guard_page(1);
/* Since GC can only know about this thread from the all_threads
* list and we're just adding this thread to it there is no danger
- * of deadlocking even with SIG_STOP_FOR_GC blocked. */
+ * of deadlocking even with SIG_STOP_FOR_GC blocked (which it is
+ * not). */
pthread_mutex_lock(&all_threads_lock);
link_thread(th);
pthread_mutex_unlock(&all_threads_lock);
/* SIG_STOP_FOR_GC is blocked and GC might be waiting for this
* thread, but since we are already dead it won't wait long. */
pthread_mutex_lock(&all_threads_lock);
+ gc_alloc_update_page_tables(0, &th->alloc_region);
unlink_thread(th);
pthread_mutex_unlock(&all_threads_lock);
- gc_alloc_update_page_tables(0, &th->alloc_region);
if(th->tls_cookie>=0) arch_os_thread_cleanup(th);
os_invalidate((os_vm_address_t)th->interrupt_data,
(sizeof (struct interrupt_data)));
union per_thread_data *per_thread;
struct thread *th=0; /* subdue gcc */
void *spaces=0;
+#ifdef LISP_FEATURE_SB_THREAD
+ int i;
+#endif
/* may as well allocate all the spaces at once: it saves us from
* having to decide what to do if only some of the allocations
BINDING_STACK_SIZE+
ALIEN_STACK_SIZE);
- if(all_threads) {
- memcpy(per_thread,arch_os_get_current_thread(),
- dynamic_values_bytes);
- } else {
#ifdef LISP_FEATURE_SB_THREAD
- int i;
- for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
- per_thread->dynamic_values[i]=NO_TLS_VALUE_MARKER_WIDETAG;
+ for(i = 0; i < (dynamic_values_bytes / sizeof(lispobj)); i++)
+ per_thread->dynamic_values[i] = NO_TLS_VALUE_MARKER_WIDETAG;
+ if (all_threads == 0) {
if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG) {
SetSymbolValue
(FREE_TLS_INDEX,
+ /* FIXME: should be MAX_INTERRUPTS -1 ? */
make_fixnum(MAX_INTERRUPTS+
sizeof(struct thread)/sizeof(lispobj)),
0);
STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
#endif
#undef STATIC_TLS_INIT
-#endif
}
+#endif
th=&per_thread->thread;
th->control_stack_start = spaces;
if(linux_no_threads_p) return 0;
- th=create_thread_struct(initial_function);
+ /* Assuming that a fresh thread struct has no lisp objects in it,
+ * linking it to all_threads can be left to the thread itself
+ * without fear of gc lossage. initial_function violates this
+ * assumption and must stay pinned until the child starts up. */
+ th = create_thread_struct(initial_function);
if(th==0) return 0;
if (create_os_thread(th,&kid_tid)) {
th->os_thread));
/* stop all other threads by sending them SIG_STOP_FOR_GC */
for(p=all_threads; p; p=p->next) {
+ gc_assert(p->os_thread != 0);
if((p!=th) && ((p->state==STATE_RUNNING))) {
FSHOW_SIGNAL((stderr,"/gc_stop_the_world: suspending %lu\n",
p->os_thread));
FSHOW_SIGNAL((stderr,"/gc_stop_the_world:signals sent\n"));
/* wait for the running threads to stop or finish */
for(p=all_threads;p;) {
- gc_assert(p->os_thread!=0);
- if((p==th) || (p->state==STATE_SUSPENDED) ||
- (p->state==STATE_DEAD)) {
- p=p->next;
- } else {
+ if((p!=th) && (p->state==STATE_RUNNING)) {
sched_yield();
+ } else {
+ p=p->next;
}
}
FSHOW_SIGNAL((stderr,"/gc_stop_the_world:end\n"));