X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fthread.h;h=1a004c07e75f8e8cde0b85b5bbb044529250dff3;hb=bf40ae88bc289fd765a33861cc4bc0853ed483ba;hp=c8c15e5c2be9269f10123df2968ae8ddd2df38d8;hpb=e6f4c7523aa628ece995ee01879d3fb90eed6d9f;p=sbcl.git diff --git a/src/runtime/thread.h b/src/runtime/thread.h index c8c15e5..1a004c0 100644 --- a/src/runtime/thread.h +++ b/src/runtime/thread.h @@ -11,6 +11,9 @@ #ifdef LISP_FEATURE_GENCGC #include "gencgc-alloc-region.h" #endif +#ifdef LISP_FEATURE_WIN32 +#include "win32-thread-private-events.h" +#endif #include "genesis/symbol.h" #include "genesis/static-symbols.h" @@ -66,7 +69,9 @@ extern struct threads_suspend_info suspend_info; struct gcing_safety { lispobj csp_around_foreign_call; +#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK lispobj* pc_around_foreign_call; +#endif }; int handle_safepoint_violation(os_context_t *context, os_vm_address_t addr); @@ -99,41 +104,12 @@ union per_thread_data { */ struct nonpointer_thread_data { -#ifdef LISP_FEATURE_SB_THREAD +#if defined(LISP_FEATURE_SB_THREAD) && !defined(LISP_FEATURE_SB_SAFEPOINT) os_sem_t state_sem; os_sem_t state_not_running_sem; os_sem_t state_not_stopped_sem; -# ifdef LISP_FEATURE_SB_SAFEPOINT - /* For safepoint-based builds, together with thread's - * csp_around_foreign_call pointer target, thread_qrl(thread) makes - * `quickly revokable lock'. Unlike most mutexes, this one is - * normally locked; by convention, other thread may read and use the - * thread's FFI-CSP location _either_ when the former holds the - * lock(mutex) _or_ when page permissions for FFI-CSP location were - * set to read-only. - * - * Combined semantic of QRL is not the same as the semantic of mutex - * returned by this function; rather, the mutex, when released by the - * owning thread, provides an edge-triggered notification of QRL - * release, which is represented by writing non-null - * csp_around_foreign_call. - * - * When owner thread is `in Lisp' (i.e. a heap mutator), its FFI-CSP - * contains null, otherwise it points to the top of C stack that - * should be preserved by GENCGC. If another thread needs to wait for - * mutator state change with `in Lisp => in C' direction, it disables - * FFI-CSP overwrite using page protection, and takes the mutex - * returned by thread_qrl(). Page fault handler normally ends up in a - * routine releasing this mutex and waiting for some appropriate - * event to take it back. - * - * This way, each thread may modify its own FFI-CSP content freely - * without memory barriers (paying with exception handling overhead - * whenever a contention happens). */ - pthread_mutex_t qrl_lock; -# endif #else - /* An unused field follows, to ensure that the struct in non-empty + /* An unused field follows, to ensure that the struct is non-empty * for non-GCC compilers. */ int unused; #endif @@ -255,32 +231,47 @@ StaticSymbolFunction(lispobj sym) #define access_control_frame_pointer(thread) \ ((thread)->control_frame_pointer) # endif -#elif defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) +#else +# if defined(BINDING_STACK_POINTER) #define get_binding_stack_pointer(thread) \ SymbolValue(BINDING_STACK_POINTER, thread) #define set_binding_stack_pointer(thread,value) \ SetSymbolValue(BINDING_STACK_POINTER, (lispobj)(value), thread) -#define access_control_stack_pointer(thread) \ - (current_control_stack_pointer) -#else +# else #define get_binding_stack_pointer(thread) \ (current_binding_stack_pointer) #define set_binding_stack_pointer(thread,value) \ (current_binding_stack_pointer = (lispobj *)(value)) -#define access_control_stack_pointer(thread) \ +# endif +#define access_control_stack_pointer(thread) \ (current_control_stack_pointer) +# if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) #define access_control_frame_pointer(thread) \ (current_control_frame_pointer) +# endif #endif #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_GCC_TLS) extern __thread struct thread *current_thread; #endif -#ifdef LISP_FEATURE_SB_SAFEPOINT -# define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES -#else +#ifndef LISP_FEATURE_SB_SAFEPOINT # define THREAD_CSP_PAGE_SIZE 0 +#elif defined(LISP_FEATURE_PPC) + /* BACKEND_PAGE_BYTES is nice and large on this platform, but therefore + * does not fit into an immediate, making it awkward to access the page + * relative to the thread-tn... */ +# define THREAD_CSP_PAGE_SIZE 4096 +#else +# define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES +#endif + +#ifdef LISP_FEATURE_WIN32 +/* + * Win32 doesn't have SIGSTKSZ, and we're not switching stacks anyway, + * so define it arbitrarily + */ +#define SIGSTKSZ 1024 #endif #define THREAD_STRUCT_SIZE (thread_control_stack_size + BINDING_STACK_SIZE + \ @@ -291,6 +282,11 @@ extern __thread struct thread *current_thread; THREAD_ALIGNMENT_BYTES + \ THREAD_CSP_PAGE_SIZE) +#if defined(LISP_FEATURE_WIN32) +static inline struct thread* arch_os_get_current_thread() + __attribute__((__const__)); +#endif + /* This is clearly per-arch and possibly even per-OS code, but we can't * put it somewhere sensible like x86-linux-os.c because it needs too * much stuff like struct thread and all_threads to be defined, which @@ -298,47 +294,41 @@ extern __thread struct thread *current_thread; static inline struct thread *arch_os_get_current_thread(void) { -#if defined(LISP_FEATURE_SB_THREAD) -#if defined(LISP_FEATURE_X86) +#if !defined(LISP_FEATURE_SB_THREAD) + return all_threads; + +#elif defined(LISP_FEATURE_X86) && defined(LISP_FEATURE_WIN32) register struct thread *me=0; - if(all_threads) { -#if defined(LISP_FEATURE_DARWIN) && defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS) - sel_t sel; - struct thread *th = pthread_getspecific(specials); - sel.index = th->tls_cookie; - sel.rpl = USER_PRIV; - sel.ti = SEL_LDT; - __asm__ __volatile__ ("movw %w0, %%fs" : : "r"(sel)); -#elif defined(LISP_FEATURE_FREEBSD) -#ifdef LISP_FEATURE_GCC_TLS - struct thread *th = current_thread; -#else - struct thread *th = pthread_getspecific(specials); -#endif -#ifdef LISP_FEATURE_RESTORE_TLS_SEGMENT_REGISTER_FROM_TLS - unsigned int sel = LSEL(th->tls_cookie, SEL_UPL); - unsigned int fs = rfs(); - - /* Load FS only if it's necessary. Modifying a selector - * causes privilege checking and it takes long time. */ - if (fs != sel) - load_fs(sel); -#endif - return th; -#endif - __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me) - : "i" (offsetof (struct thread,this))); - } + __asm__ ("movl %%fs:0xE10+(4*63), %0" : "=r"(me) :); return me; + #else -#ifdef LISP_FEATURE_GCC_TLS - return current_thread; -#else - return pthread_getspecific(specials); + +# if defined(LISP_FEATURE_X86) + if (!all_threads) return 0; #endif -#endif /* x86 */ -#else - return all_threads; + + /* Otherwise, use pthreads to find the right value. We do not load + * directly from %fs:this even on x86 platforms (like Linux and + * Solaris) with dependable %fs, because we want to return NULL if + * called by a non-Lisp thread, and %fs would not be initialized + * suitably in that case. */ + struct thread *th; +# ifdef LISP_FEATURE_GCC_TLS + th = current_thread; +# else + th = pthread_getspecific(specials); +# endif + +# if defined(LISP_FEATURE_RESTORE_FS_SEGMENT_REGISTER_FROM_TLS) + /* If enabled by make-config (currently Darwin and FreeBSD only), + * re-setup %fs. This is an out-of-line call, and potentially + * expensive.*/ + if (th) + arch_os_load_ldt(th); +# endif + + return th; #endif } @@ -347,13 +337,28 @@ extern kern_return_t mach_lisp_thread_init(struct thread *thread); extern kern_return_t mach_lisp_thread_destroy(struct thread *thread); #endif +typedef struct init_thread_data { + sigset_t oldset; +#ifdef LISP_FEATURE_SB_SAFEPOINT + struct gcing_safety safety; +#endif +} init_thread_data; + #ifdef LISP_FEATURE_SB_SAFEPOINT void thread_in_safety_transition(os_context_t *ctx); void thread_in_lisp_raised(os_context_t *ctx); +void thread_interrupted(os_context_t *ctx); void thread_pitstop(os_context_t *ctxptr); extern void thread_register_gc_trigger(); -#define thread_qrl(th) (&(th)->nonpointer_data->qrl_lock) +# ifdef LISP_FEATURE_SB_THRUPTION +int wake_thread(os_thread_t os_thread); +# ifdef LISP_FEATURE_WIN32 +void wake_thread_win32(struct thread *thread); +# else +int wake_thread_posix(os_thread_t os_thread); +# endif +# endif static inline void push_gcing_safety(struct gcing_safety *into) @@ -364,11 +369,15 @@ void push_gcing_safety(struct gcing_safety *into) *th->csp_around_foreign_call)) { *th->csp_around_foreign_call = 0; asm volatile (""); +#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK into->pc_around_foreign_call = th->pc_around_foreign_call; th->pc_around_foreign_call = 0; asm volatile (""); +#endif } else { +#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK into->pc_around_foreign_call = 0; +#endif } } @@ -380,8 +389,10 @@ void pop_gcing_safety(struct gcing_safety *from) asm volatile (""); *th->csp_around_foreign_call = from->csp_around_foreign_call; asm volatile (""); +#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK th->pc_around_foreign_call = from->pc_around_foreign_call; asm volatile (""); +#endif } } @@ -406,9 +417,18 @@ void pop_gcing_safety(struct gcing_safety *from) #define WITH_STATE_SEM(thread) \ WITH_STATE_SEM_hygenic(sbcl__state_sem, thread) +int check_pending_thruptions(os_context_t *ctx); + +void attach_os_thread(init_thread_data *); +void detach_os_thread(init_thread_data *); + +# if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32) + +void signal_handler_callback(lispobj, int, void *, void *); +# endif + #endif -extern boolean is_some_thread_local_addr(os_vm_address_t addr); extern void create_initial_thread(lispobj); #ifdef LISP_FEATURE_SB_THREAD