#include <sys/stat.h>
#include <unistd.h>
#include <asm/ldt.h>
-#include <linux/unistd.h>
+#include <sys/syscall.h>
#include <sys/mman.h>
#include <linux/version.h>
#include "thread.h" /* dynamic_values_bytes */
#define user_desc modify_ldt_ldt_s
#endif
-_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount );
+#define modify_ldt sbcl_modify_ldt
+static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
+{
+ return syscall (SYS_modify_ldt, func, ptr, bytecount);
+}
#include "validate.h"
size_t os_vm_page_size;
printf("%d bytes in ldt: print/x local_ldt_copy\n", n);
}
-volatile lispobj modify_ldt_lock; /* protect all calls to modify_ldt */
+#ifdef LISP_FEATURE_SB_THREAD
+pthread_mutex_t modify_ldt_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
int arch_os_thread_init(struct thread *thread) {
stack_t sigstack;
#ifdef LISP_FEATURE_SB_THREAD
- /* FIXME Lock ordering rules: all_threads_lock must usually be
- * held when getting modify_ldt_lock
- */
struct user_desc ldt_entry = {
1, 0, 0, /* index, address, length filled in later */
1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
};
int n;
- get_spinlock(&modify_ldt_lock,(long)thread);
+ thread_mutex_lock(&modify_ldt_lock);
n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy);
/* get next free ldt entry */
ldt_entry.limit=dynamic_values_bytes;
ldt_entry.limit_in_pages=0;
if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
+ thread_mutex_unlock(&modify_ldt_lock);
/* modify_ldt call failed: something magical is not happening */
- return -1;
+ return 0;
}
__asm__ __volatile__ ("movw %w0, %%fs" : : "q"
((n << 3) /* selector number */
+ (1 << 2) /* TI set = LDT */
+ 3)); /* privilege level */
thread->tls_cookie=n;
- modify_ldt_lock=0;
+ pthread_mutex_unlock(&modify_ldt_lock);
+
+ /* now %fs:0 refers to the current thread. Useful! Less usefully,
+ * Linux/x86 isn't capable of reporting a faulting si_addr on a
+ * segment as defined above (whereas faults on the segment that %gs
+ * usually points are reported just fine...). As a special
+ * workaround, we store each thread structure's absolute address as
+ * as slot in itself, so that within the thread,
+ * movl %fs:SELFPTR_OFFSET,x
+ * stores the absolute address of %fs:0 into x.
+ */
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+ thread->selfptr = thread;
+#endif
if(n<0) return 0;
+#ifdef LISP_FEATURE_GCC_TLS
+ current_thread = thread;
+#else
pthread_setspecific(specials,thread);
#endif
+#endif
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
/* Signal handlers are run on the control stack, so if it is exhausted
* we had better use an alternate stack for whatever signal tells us
sigstack.ss_sp=((void *) thread)+dynamic_values_bytes;
sigstack.ss_flags=0;
sigstack.ss_size = 32*SIGSTKSZ;
- sigaltstack(&sigstack,0);
- if(sigaltstack(&sigstack,0)<0) {
+ if(sigaltstack(&sigstack,0)<0)
lose("Cannot sigaltstack: %s\n",strerror(errno));
- }
#endif
return 1;
}
0, 0, 0,
0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0
};
+ int result;
ldt_entry.entry_number=thread->tls_cookie;
- get_spinlock(&modify_ldt_lock,(long)thread);
- if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
- /* modify_ldt call failed: something magical is not happening */
- return 0;
- }
- modify_ldt_lock=0;
- return 1;
+ thread_mutex_lock(&modify_ldt_lock);
+ result = modify_ldt(1, &ldt_entry, sizeof (ldt_entry));
+ thread_mutex_unlock(&modify_ldt_lock);
+ return result;
}
void
os_restore_fp_control(os_context_t *context)
{
- asm ("fldcw %0" : : "m" (context->uc_mcontext.fpregs->cw));
+ if (context->uc_mcontext.fpregs)
+ asm ("fldcw %0" : : "m" (context->uc_mcontext.fpregs->cw));
}
void