+u32 local_ldt_copy[LDT_ENTRIES*LDT_ENTRY_SIZE/sizeof(u32)];
+
+/* XXX this could be conditionally compiled based on some
+ * "debug-friendly" flag. But it doesn't really make stuff slower,
+ * just the runtime gets fractionally larger */
+
+void debug_get_ldt()
+{
+ int n=modify_ldt (0, local_ldt_copy, sizeof local_ldt_copy);
+ printf("%d bytes in ldt: print/x local_ldt_copy\n", n);
+}
+
+lispobj modify_ldt_lock; /* protect all calls to modify_ldt */
+
+int arch_os_thread_init(struct thread *thread) {
+ stack_t sigstack;
+#ifdef LISP_FEATURE_SB_THREAD
+ /* this must be called from a function that has an exclusive lock
+ * on all_threads
+ */
+ struct modify_ldt_ldt_s ldt_entry = {
+ 1, 0, 0, /* index, address, length filled in later */
+ 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
+ };
+ int n;
+ get_spinlock(&modify_ldt_lock,thread);
+ n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy);
+ /* get next free ldt entry */
+
+ if(n) {
+ u32 *p;
+ for(n=0,p=local_ldt_copy;*p;p+=LDT_ENTRY_SIZE/sizeof(u32))
+ n++;
+ }
+ ldt_entry.entry_number=n;
+ ldt_entry.base_addr=(unsigned long) thread;
+ ldt_entry.limit=dynamic_values_bytes;
+ ldt_entry.limit_in_pages=0;
+ if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
+ modify_ldt_lock=0;
+ /* modify_ldt call failed: something magical is not happening */
+ return -1;
+ }
+ __asm__ __volatile__ ("movw %w0, %%fs" : : "q"
+ ((n << 3) /* selector number */
+ + (1 << 2) /* TI set = LDT */
+ + 3)); /* privilege level */
+ thread->tls_cookie=n;
+ modify_ldt_lock=0;
+
+ if(n<0) return 0;