Further work towards use of win32 file HANDLEs
[sbcl.git] / src / runtime / thread.h
index c8c15e5..88b03bc 100644 (file)
@@ -11,6 +11,9 @@
 #ifdef LISP_FEATURE_GENCGC
 #include "gencgc-alloc-region.h"
 #endif
+#ifdef LISP_FEATURE_WIN32
+#include "win32-thread-private-events.h"
+#endif
 #include "genesis/symbol.h"
 #include "genesis/static-symbols.h"
 
@@ -66,7 +69,9 @@ extern struct threads_suspend_info suspend_info;
 
 struct gcing_safety {
     lispobj csp_around_foreign_call;
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
     lispobj* pc_around_foreign_call;
+#endif
 };
 
 int handle_safepoint_violation(os_context_t *context, os_vm_address_t addr);
@@ -99,41 +104,12 @@ union per_thread_data {
  */
 struct nonpointer_thread_data
 {
-#ifdef LISP_FEATURE_SB_THREAD
+#if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_SB_SAFEPOINT)
     os_sem_t state_sem;
     os_sem_t state_not_running_sem;
     os_sem_t state_not_stopped_sem;
-# ifdef LISP_FEATURE_SB_SAFEPOINT
-   /* For safepoint-based builds, together with thread's
-    * csp_around_foreign_call pointer target, thread_qrl(thread) makes
-    * `quickly revokable lock'. Unlike most mutexes, this one is
-    * normally locked; by convention, other thread may read and use the
-    * thread's FFI-CSP location _either_ when the former holds the
-    * lock(mutex) _or_ when page permissions for FFI-CSP location were
-    * set to read-only.
-    *
-    * Combined semantic of QRL is not the same as the semantic of mutex
-    * returned by this function; rather, the mutex, when released by the
-    * owning thread, provides an edge-triggered notification of QRL
-    * release, which is represented by writing non-null
-    * csp_around_foreign_call.
-    *
-    * When owner thread is `in Lisp' (i.e. a heap mutator), its FFI-CSP
-    * contains null, otherwise it points to the top of C stack that
-    * should be preserved by GENCGC. If another thread needs to wait for
-    * mutator state change with `in Lisp => in C' direction, it disables
-    * FFI-CSP overwrite using page protection, and takes the mutex
-    * returned by thread_qrl(). Page fault handler normally ends up in a
-    * routine releasing this mutex and waiting for some appropriate
-    * event to take it back.
-    *
-    * This way, each thread may modify its own FFI-CSP content freely
-    * without memory barriers (paying with exception handling overhead
-    * whenever a contention happens). */
-    pthread_mutex_t qrl_lock;
-# endif
 #else
-    /* An unused field follows, to ensure that the struct in non-empty
+    /* An unused field follows, to ensure that the struct is non-empty
      * for non-GCC compilers. */
     int unused;
 #endif
@@ -255,32 +231,47 @@ StaticSymbolFunction(lispobj sym)
 #define access_control_frame_pointer(thread) \
     ((thread)->control_frame_pointer)
 #  endif
-#elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+#else
+#  if defined(BINDING_STACK_POINTER)
 #define get_binding_stack_pointer(thread)       \
     SymbolValue(BINDING_STACK_POINTER, thread)
 #define set_binding_stack_pointer(thread,value) \
     SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread)
-#define access_control_stack_pointer(thread)    \
-    (current_control_stack_pointer)
-#else
+#  else
 #define get_binding_stack_pointer(thread)       \
     (current_binding_stack_pointer)
 #define set_binding_stack_pointer(thread,value) \
     (current_binding_stack_pointer = (lispobj *)(value))
-#define access_control_stack_pointer(thread) \
+#  endif
+#define access_control_stack_pointer(thread)    \
     (current_control_stack_pointer)
+#  if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
 #define access_control_frame_pointer(thread) \
     (current_control_frame_pointer)
+#  endif
 #endif
 
 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS)
 extern __thread struct thread *current_thread;
 #endif
 
-#ifdef LISP_FEATURE_SB_SAFEPOINT
-# define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
-#else
+#ifndef LISP_FEATURE_SB_SAFEPOINT
 # define THREAD_CSP_PAGE_SIZE 0
+#elif defined(LISP_FEATURE_PPC)
+  /* BACKEND_PAGE_BYTES is nice and large on this platform, but therefore
+   * does not fit into an immediate, making it awkward to access the page
+   * relative to the thread-tn... */
+# define THREAD_CSP_PAGE_SIZE 4096
+#else
+# define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
+#endif
+
+#ifdef LISP_FEATURE_WIN32
+/*
+ * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway,
+ * so define it arbitrarily
+ */
+#define SIGSTKSZ 1024
 #endif
 
 #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \
@@ -291,6 +282,11 @@ extern __thread struct thread *current_thread;
                             THREAD_ALIGNMENT_BYTES +                    \
                             THREAD_CSP_PAGE_SIZE)
 
+#if defined(LISP_FEATURE_WIN32)
+static inline struct thread* arch_os_get_current_thread()
+    __attribute__((__const__));
+#endif
+
 /* This is clearly per-arch and possibly even per-OS code, but we can't
  * put it somewhere sensible like x86-linux-os.c because it needs too
  * much stuff like struct thread and all_threads to be defined, which
@@ -301,6 +297,10 @@ static inline struct thread *arch_os_get_current_thread(void)
 #if defined(LISP_FEATURE_SB_THREAD)
 #if defined(LISP_FEATURE_X86)
     register struct thread *me=0;
+#if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
+    __asm__ ("movl %%fs:0xE10+(4*63), %0" : "=r"(me) :);
+    return me;
+#endif
     if(all_threads) {
 #if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS)
         sel_t sel;
@@ -326,7 +326,7 @@ static inline struct thread *arch_os_get_current_thread(void)
 #endif
         return th;
 #endif
-        __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
+        __asm__ ("movl %%fs:%c1,%0" : "=r" (me)
                  : "i" (offsetof (struct thread,this)));
     }
     return me;
@@ -350,10 +350,18 @@ extern kern_return_t mach_lisp_thread_destroy(struct thread *thread);
 #ifdef LISP_FEATURE_SB_SAFEPOINT
 void thread_in_safety_transition(os_context_t *ctx);
 void thread_in_lisp_raised(os_context_t *ctx);
+void thread_interrupted(os_context_t *ctx);
 void thread_pitstop(os_context_t *ctxptr);
 extern void thread_register_gc_trigger();
 
-#define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock)
+# ifdef LISP_FEATURE_SB_THRUPTION
+int wake_thread(os_thread_t os_thread);
+#  ifdef LISP_FEATURE_WIN32
+void wake_thread_win32(struct thread *thread);
+#  else
+int wake_thread_posix(os_thread_t os_thread);
+#  endif
+# endif
 
 static inline
 void push_gcing_safety(struct gcing_safety *into)
@@ -364,11 +372,15 @@ void push_gcing_safety(struct gcing_safety *into)
          *th->csp_around_foreign_call)) {
         *th->csp_around_foreign_call = 0;
         asm volatile ("");
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
         into->pc_around_foreign_call = th->pc_around_foreign_call;
         th->pc_around_foreign_call = 0;
         asm volatile ("");
+#endif
     } else {
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
         into->pc_around_foreign_call = 0;
+#endif
     }
 }
 
@@ -380,8 +392,10 @@ void pop_gcing_safety(struct gcing_safety *from)
         asm volatile ("");
         *th->csp_around_foreign_call = from->csp_around_foreign_call;
         asm volatile ("");
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
         th->pc_around_foreign_call = from->pc_around_foreign_call;
         asm volatile ("");
+#endif
     }
 }
 
@@ -406,9 +420,10 @@ void pop_gcing_safety(struct gcing_safety *from)
 #define WITH_STATE_SEM(thread)                                     \
     WITH_STATE_SEM_hygenic(sbcl__state_sem, thread)
 
+int check_pending_thruptions(os_context_t *ctx);
+
 #endif
 
-extern boolean is_some_thread_local_addr(os_vm_address_t addr);
 extern void create_initial_thread(lispobj);
 
 #ifdef LISP_FEATURE_SB_THREAD