* seized before all accesses to generations[] or to parts of
* page_table[] that other threads may want to see */
-static lispobj free_pages_lock=0;
+#ifdef LISP_FEATURE_SB_THREAD
+static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
\f
/*
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,(long) alloc_region);
+ thread_mutex_lock(&free_pages_lock);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
(lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
0);
}
- release_spinlock(&free_pages_lock);
+ thread_mutex_unlock(&free_pages_lock);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
next_page = first_page+1;
- get_spinlock(&free_pages_lock,(long) alloc_region);
+ thread_mutex_lock(&free_pages_lock);
if (alloc_region->free_pointer != alloc_region->start_addr) {
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
page_table[next_page].allocated = FREE_PAGE_FLAG;
next_page++;
}
- release_spinlock(&free_pages_lock);
+ thread_mutex_unlock(&free_pages_lock);
/* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
long bytes_used;
long next_page;
- get_spinlock(&free_pages_lock,(long) alloc_region);
+ thread_mutex_lock(&free_pages_lock);
if (unboxed) {
first_page =
SetSymbolValue(ALLOCATION_POINTER,
(lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
}
- release_spinlock(&free_pages_lock);
+ thread_mutex_unlock(&free_pages_lock);
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
long bytes_found;
long num_pages;
long large_p=(nbytes>=large_object_size);
- gc_assert(free_pages_lock);
+ /* FIXME: assert(free_pages_lock is held); */
/* Search for a contiguous free space of at least nbytes. If it's
* a large object then align it on a page boundary by searching
static sigset_t deferrable_sigset;
static sigset_t blockable_sigset;
-inline static void check_blockables_blocked_or_lose()
+void
+check_blockables_blocked_or_lose()
{
/* Get the current sigmask, by blocking the empty set. */
sigset_t empty,current;
/* FIXME: do not rely on NSIG being a multiple of 8 */
#define REAL_SIGSET_SIZE_BYTES ((NSIG/8))
+extern void check_blockables_blocked_or_lose();
+
static inline void
sigcopyset(sigset_t *new, sigset_t *old)
{
#define thread_self pthread_self
#define thread_kill pthread_kill
#define thread_sigmask pthread_sigmask
+#define thread_mutex_lock(l) pthread_mutex_lock(l)
+#define thread_mutex_unlock(l) pthread_mutex_unlock(l)
#else
#define thread_self getpid
#define thread_kill kill
#define thread_sigmask sigprocmask
+#define thread_mutex_lock(l)
+#define thread_mutex_unlock(l)
#endif
extern void create_initial_thread(lispobj);
printf("%d bytes in ldt: print/x local_ldt_copy\n", n);
}
-volatile lispobj modify_ldt_lock; /* protect all calls to modify_ldt */
+#ifdef LISP_FEATURE_SB_THREAD
+pthread_mutex_t modify_ldt_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
int arch_os_thread_init(struct thread *thread) {
stack_t sigstack;
#ifdef LISP_FEATURE_SB_THREAD
- /* FIXME Lock ordering rules: all_threads_lock must usually be
- * held when getting modify_ldt_lock
- */
struct user_desc ldt_entry = {
1, 0, 0, /* index, address, length filled in later */
1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
};
int n;
- get_spinlock(&modify_ldt_lock,(long)thread);
+ check_blockables_blocked_or_lose();
+ thread_mutex_lock(&modify_ldt_lock);
n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy);
/* get next free ldt entry */
ldt_entry.limit=dynamic_values_bytes;
ldt_entry.limit_in_pages=0;
if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
+ thread_mutex_unlock(&modify_ldt_lock);
/* modify_ldt call failed: something magical is not happening */
return 0;
}
+ (1 << 2) /* TI set = LDT */
+ 3)); /* privilege level */
thread->tls_cookie=n;
- modify_ldt_lock=0;
+ pthread_mutex_unlock(&modify_ldt_lock);
if(n<0) return 0;
pthread_setspecific(specials,thread);
0, 0, 0,
0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0
};
+ int result;
+ check_blockables_blocked_or_lose();
ldt_entry.entry_number=thread->tls_cookie;
- get_spinlock(&modify_ldt_lock,(long)thread);
- if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
- /* modify_ldt call failed: something magical is not happening */
- return 0;
- }
- modify_ldt_lock=0;
- return 1;
+ thread_mutex_lock(&modify_ldt_lock);
+ result = modify_ldt(1, &ldt_entry, sizeof (ldt_entry));
+ thread_mutex_unlock(&modify_ldt_lock);
+ return result;
}
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"0.9.5.34"
+"0.9.5.35"