(sb!alien:define-alien-routine ("lutex_lock" %lutex-lock)
int (lutex unsigned-long))
+ (sb!alien:define-alien-routine ("lutex_trylock" %lutex-trylock)
+ int (lutex unsigned-long))
+
(sb!alien:define-alien-routine ("lutex_unlock" %lutex-unlock)
int (lutex unsigned-long))
(format *debug-io* "Thread: ~A~%" *current-thread*)
(sb!debug:backtrace most-positive-fixnum *debug-io*)
(force-output *debug-io*))
- ;; FIXME: sb-lutex and (not wait-p)
#!+sb-lutex
- (when wait-p
- (with-lutex-address (lutex (mutex-lutex mutex))
- (%lutex-lock lutex))
+ (when (zerop (with-lutex-address (lutex (mutex-lutex mutex))
+ (if wait-p
+ (%lutex-lock lutex)
+ (%lutex-trylock lutex))))
(setf (mutex-value mutex) new-value))
#!-sb-lutex
(let (old)
(prev :c-type "struct lutex *" :length 1)
(mutex :c-type "pthread_mutex_t *"
:length 1)
+ (mutexattr :c-type "pthread_mutexattr_t *"
+ :length 1)
(condition-variable :c-type "pthread_cond_t *"
:length 1))
# runtime.
LINKFLAGS += -dynamic -export-dynamic
+# use libthr (1:1 threading). libpthread (m:n threading) does not work.
ifdef LISP_FEATURE_SB_THREAD
- OS_LIBS += -lpthread
+ #OS_LIBS += -lpthread
+ OS_LIBS += -lthr
endif
#if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_SB_LUTEX)
+#include <errno.h>
#include <stdlib.h>
#include "runtime.h"
int ret;
struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
+ lutex->mutexattr = malloc(sizeof(pthread_mutexattr_t));
+ lutex_assert(lutex->mutexattr != 0);
+
+ ret = pthread_mutexattr_init(lutex->mutexattr);
+ lutex_assert(ret == 0);
+
+ /* The default type of mutex is implementation dependent.
+ * We use PTHREAD_MUTEX_ERRORCHECK so that locking on mutexes
+ * locked by the same thread does not cause deadlocks. */
+ /* FIXME: pthread_mutexattr_settype is available on SUSv2 level
+ * implementations. Can be used without checking? */
+ ret = pthread_mutexattr_settype(lutex->mutexattr,
+ PTHREAD_MUTEX_ERRORCHECK);
+ lutex_assert(ret == 0);
+
lutex->mutex = malloc(sizeof(pthread_mutex_t));
lutex_assert(lutex->mutex != 0);
- ret = pthread_mutex_init(lutex->mutex, NULL);
+ ret = pthread_mutex_init(lutex->mutex, lutex->mutexattr);
lutex_assert(ret == 0);
lutex->condition_variable = malloc(sizeof(pthread_cond_t));
struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
ret = thread_mutex_lock(lutex->mutex);
+ /* The mutex is locked by the same thread. */
+ if (ret == EDEADLK)
+ return ret;
+ lutex_assert(ret == 0);
+
+ return ret;
+}
+
+int
+lutex_trylock (tagged_lutex_t tagged_lutex)
+{
+ int ret = 0;
+ struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
+
+ ret = pthread_mutex_trylock(lutex->mutex);
+ /* The mutex is locked */
+ if (ret == EDEADLK || ret == EBUSY)
+ return ret;
lutex_assert(ret == 0);
return ret;
struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
ret = thread_mutex_unlock(lutex->mutex);
+ /* Unlocking unlocked mutex would occur as:
+ * (with-mutex (mutex) (cond-wait cond mutex)) */
+ if (ret == EPERM)
+ return ret;
lutex_assert(ret == 0);
return ret;
lutex->mutex = NULL;
}
+ if (lutex->mutexattr) {
+ pthread_mutexattr_destroy(lutex->mutexattr);
+ free(lutex->mutexattr);
+ lutex->mutexattr = NULL;
+ }
+
return 0;
}
#endif
#define QUEUE_FREEABLE_THREAD_STACKS
#endif
+#ifdef LISP_FEATURE_FREEBSD
+#define CREATE_CLEANUP_THREAD
+#define LOCK_CREATE_THREAD
+#endif
+
#define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
struct freeable_stack {
#ifdef LISP_FEATURE_SB_THREAD
pthread_mutex_t all_threads_lock = PTHREAD_MUTEX_INITIALIZER;
+#ifdef LOCK_CREATE_THREAD
+static pthread_mutex_t create_thread_lock = PTHREAD_MUTEX_INITIALIZER;
+#endif
#endif
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
}
}
+#elif defined(CREATE_CLEANUP_THREAD)
+static void *
+cleanup_thread(void *arg)
+{
+ struct freeable_stack *freeable = arg;
+ pthread_t self = pthread_self();
+
+ FSHOW((stderr, "/cleaner thread(%p): joining %p\n",
+ self, freeable->os_thread));
+ gc_assert(pthread_join(freeable->os_thread, NULL) == 0);
+ FSHOW((stderr, "/cleaner thread(%p): free stack %p\n",
+ self, freeable->stack));
+ os_invalidate(freeable->stack, THREAD_STRUCT_SIZE);
+ free(freeable);
+
+ pthread_detach(self);
+
+ return NULL;
+}
+
+static void
+create_cleanup_thread(struct thread *thread_to_be_cleaned_up)
+{
+ pthread_t thread;
+ int result;
+
+ if (thread_to_be_cleaned_up) {
+ struct freeable_stack *freeable =
+ malloc(sizeof(struct freeable_stack));
+ gc_assert(freeable != NULL);
+ freeable->os_thread = thread_to_be_cleaned_up->os_thread;
+ freeable->stack =
+ (os_vm_address_t) thread_to_be_cleaned_up->control_stack_start;
+ result = pthread_create(&thread, NULL, cleanup_thread, freeable);
+ gc_assert(result == 0);
+ sched_yield();
+ }
+}
+
#else
static void
free_thread_stack_later(struct thread *thread_to_be_cleaned_up)
#ifdef QUEUE_FREEABLE_THREAD_STACKS
queue_freeable_thread_stack(th);
+#elif defined(CREATE_CLEANUP_THREAD)
+ create_cleanup_thread(th);
#else
free_thread_stack_later(th);
#endif
FSHOW_SIGNAL((stderr,"/create_os_thread: creating new thread\n"));
+#ifdef LOCK_CREATE_THREAD
+ retcode = pthread_mutex_lock(&create_thread_lock);
+ gc_assert(retcode == 0);
+ FSHOW_SIGNAL((stderr,"/create_os_thread: got lock\n"));
+#endif
sigemptyset(&newset);
/* Blocking deferrable signals is enough, no need to block
* SIG_STOP_FOR_GC because the child process is not linked onto
free_freeable_stacks();
#endif
thread_sigmask(SIG_SETMASK,&oldset,0);
+#ifdef LOCK_CREATE_THREAD
+ retcode = pthread_mutex_unlock(&create_thread_lock);
+ gc_assert(retcode == 0);
+ FSHOW_SIGNAL((stderr,"/create_os_thread: released lock\n"));
+#endif
return r;
}
{
struct thread *p,*th=arch_os_get_current_thread();
int status, lock_ret;
+#ifdef LOCK_CREATE_THREAD
+ /* KLUDGE: Stopping the thread during pthread_create() causes deadlock
+ * on FreeBSD. */
+ FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on create_thread_lock, thread=%lu\n",
+ th->os_thread));
+ lock_ret = pthread_mutex_lock(&create_thread_lock);
+ gc_assert(lock_ret == 0);
+ FSHOW_SIGNAL((stderr,"/gc_stop_the_world:got create_thread_lock, thread=%lu\n",
+ th->os_thread));
+#endif
FSHOW_SIGNAL((stderr,"/gc_stop_the_world:waiting on lock, thread=%lu\n",
th->os_thread));
/* keep threads from starting while the world is stopped. */
lock_ret = pthread_mutex_unlock(&all_threads_lock);
gc_assert(lock_ret == 0);
+#ifdef LOCK_CREATE_THREAD
+ lock_ret = pthread_mutex_unlock(&create_thread_lock);
+ gc_assert(lock_ret == 0);
+#endif
FSHOW_SIGNAL((stderr,"/gc_start_the_world:end\n"));
}
sel.rpl = USER_PRIV;
sel.ti = SEL_LDT;
__asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel));
+#elif defined(LISP_FEATURE_FREEBSD) && defined(LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS)
+ struct thread *th = pthread_getspecific(specials);
+ unsigned int sel = LSEL(th->tls_cookie, SEL_UPL);
+ unsigned int fs = rfs();
+
+ /* Load FS only if it's necessary. Modifying a selector
+ * causes privilege checking and it takes long time. */
+ if (fs != sel)
+ load_fs(sel);
+ return th;
#endif
__asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
: "i" (offsetof (struct thread,this)));
}
FSHOW_SIGNAL((stderr, "/ TLS: Allocated LDT %x\n", n));
sel = LSEL(n, SEL_UPL);
- __asm__ __volatile__ ("mov %0, %%fs" : : "r"(sel));
+ load_fs(sel);
thread->tls_cookie=n;
pthread_setspecific(specials,thread);
#ifndef _X86_BSD_OS_H
#define _X86_BSD_OS_H
+#ifdef LISP_FEATURE_FREEBSD
+#include <machine/segments.h>
+#include <machine/cpufunc.h>
+#endif
+
static inline os_context_t *arch_os_get_context(void **void_context) {
return (os_context_t *) *void_context;
}
(force-output)
(sb-ext:quit :unix-status 1)))))))
-(let* ((nanosleep-errno (progn
+;; (nanosleep -1 0) does not fail on FreeBSD
+(let* (#-freebsd
+ (nanosleep-errno (progn
(sb-unix:nanosleep -1 0)
(sb-unix::get-errno)))
(open-errno (progn
(sb-unix::get-errno)))
(threads
(list
+ #-freebsd
(exercise-syscall (lambda () (sb-unix:nanosleep -1 0)) nanosleep-errno)
(exercise-syscall (lambda () (open "no-such-file"
:if-does-not-exist nil))
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"0.9.18.61"
+"0.9.18.62"