X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fwin32-os.c;h=b9b1ebed564c9b05b436a988998e291fc48d64ae;hb=eac461c1f1ca91cfe282c779291d582ed6b336cb;hp=8c85f7f07ec49e59d75ff07fb722475689e0f4f2;hpb=7fb597b585fc715537ea644f7d84440eca217ca1;p=sbcl.git diff --git a/src/runtime/win32-os.c b/src/runtime/win32-os.c index 8c85f7f..b9b1ebe 100644 --- a/src/runtime/win32-os.c +++ b/src/runtime/win32-os.c @@ -26,11 +26,13 @@ * yet. */ +#include #include +#include #include #include +#include #include "sbcl.h" -#include "./signal.h" #include "os.h" #include "arch.h" #include "globals.h" @@ -38,80 +40,390 @@ #include "interrupt.h" #include "interr.h" #include "lispregs.h" -#include "monitor.h" +#include "runtime.h" #include "alloc.h" #include "genesis/primitive-objects.h" +#include "dynbind.h" #include -#include #include #include #include +#include +#include + #include +#include #include "validate.h" #include "thread.h" -size_t os_vm_page_size; +#include "cpputil.h" + +#ifndef LISP_FEATURE_SB_THREAD +/* dummy definition to reduce ifdef clutter */ +#define WITH_GC_AT_SAFEPOINTS_ONLY() if (0) ; else +#endif +os_vm_size_t os_vm_page_size; #include "gc.h" #include "gencgc-internal.h" +#include #if 0 int linux_sparc_siginfo_bug = 0; int linux_supports_futex=0; #endif +#include +#include + +/* missing definitions for modern mingws */ +#ifndef EH_UNWINDING +#define EH_UNWINDING 0x02 +#endif +#ifndef EH_EXIT_UNWIND +#define EH_EXIT_UNWIND 0x04 +#endif + +/* Tired of writing arch_os_get_current_thread each time. */ +#define this_thread (arch_os_get_current_thread()) + +/* wrappers for winapi calls that must be successful (like SBCL's + * (aver ...) form). */ + +/* win_aver function: basic building block for miscellaneous + * ..AVER.. macrology (below) */ + +/* To do: These routines used to be "customizable" with dyndebug_init() + * and variables like dyndebug_survive_aver, dyndebug_skip_averlax based + * on environment variables. Those features got lost on the way, but + * ought to be reintroduced. */ + +static inline +intptr_t win_aver(intptr_t value, char* comment, char* file, int line, + int justwarn) +{ + if (!value) { + LPSTR errorMessage = ""; + DWORD errorCode = GetLastError(), allocated=0; + int posixerrno = errno; + const char* posixstrerror = strerror(errno); + char* report_template = + "Expression unexpectedly false: %s:%d\n" + " ... %s\n" + " ===> returned #X%p, \n" + " (in thread %p)" + " ... Win32 thinks:\n" + " ===> code %u, message => %s\n" + " ... CRT thinks:\n" + " ===> code %u, message => %s\n"; + + allocated = + FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER| + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + errorCode, + MAKELANGID(LANG_ENGLISH,SUBLANG_ENGLISH_US), + (LPSTR)&errorMessage, + 1024u, + NULL); + + if (justwarn) { + fprintf(stderr, report_template, + file, line, + comment, value, + this_thread, + (unsigned)errorCode, errorMessage, + posixerrno, posixstrerror); + } else { + lose(report_template, + file, line, + comment, value, + this_thread, + (unsigned)errorCode, errorMessage, + posixerrno, posixstrerror); + } + if (allocated) + LocalFree(errorMessage); + } + return value; +} + +/* sys_aver function: really tiny adaptor of win_aver for + * "POSIX-parody" CRT results ("lowio" and similar stuff): + * negative number means something... negative. */ +static inline +intptr_t sys_aver(long value, char* comment, char* file, int line, + int justwarn) +{ + win_aver((intptr_t)(value>=0),comment,file,line,justwarn); + return value; +} + +/* Check for (call) result being boolean true. (call) may be arbitrary + * expression now; massive attack of gccisms ensures transparent type + * conversion back and forth, so the type of AVER(expression) is the + * type of expression. Value is the same _if_ it can be losslessly + * converted to (void*) and back. + * + * Failed AVER() is normally fatal. Well, unless dyndebug_survive_aver + * flag is set. */ + +#define AVER(call) \ + ({ __typeof__(call) __attribute__((unused)) me = \ + (__typeof__(call)) \ + win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 0); \ + me;}) + +/* AVERLAX(call): do the same check as AVER did, but be mild on + * failure: print an annoying unrequested message to stderr, and + * continue. With dyndebug_skip_averlax flag, AVERLAX stop even to + * check and complain. */ + +#define AVERLAX(call) \ + ({ __typeof__(call) __attribute__((unused)) me = \ + (__typeof__(call)) \ + win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 1); \ + me;}) + +/* Now, when failed AVER... prints both errno and GetLastError(), two + * variants of "POSIX/lowio" style checks below are almost useless + * (they build on sys_aver like the two above do on win_aver). */ + +#define CRT_AVER_NONNEGATIVE(call) \ + ({ __typeof__(call) __attribute__((unused)) me = \ + (__typeof__(call)) \ + sys_aver((call), #call, __FILE__, __LINE__, 0); \ + me;}) + +#define CRT_AVERLAX_NONNEGATIVE(call) \ + ({ __typeof__(call) __attribute__((unused)) me = \ + (__typeof__(call)) \ + sys_aver((call), #call, __FILE__, __LINE__, 1); \ + me;}) + +/* to be removed */ +#define CRT_AVER(booly) \ + ({ __typeof__(booly) __attribute__((unused)) me = (booly); \ + sys_aver((booly)?0:-1, #booly, __FILE__, __LINE__, 0); \ + me;}) + +const char * t_nil_s(lispobj symbol); + +/* + * The following signal-mask-related alien routines are called from Lisp: + */ + +/* As of win32, deferrables _do_ matter. gc_signal doesn't. */ +unsigned long block_deferrables_and_return_mask() +{ + sigset_t sset; + block_deferrable_signals(0, &sset); + return (unsigned long)sset; +} + +#if defined(LISP_FEATURE_SB_THREAD) +void apply_sigmask(unsigned long sigmask) +{ + sigset_t sset = (sigset_t)sigmask; + pthread_sigmask(SIG_SETMASK, &sset, 0); +} +#endif + /* The exception handling function looks like this: */ EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *, struct lisp_exception_frame *, CONTEXT *, void *); +/* handle_exception is defined further in this file, but since SBCL + * 1.0.1.24, it doesn't get registered as SEH handler directly anymore, + * not even by wos_install_interrupt_handlers. Instead, x86-assem.S + * provides exception_handler_wrapper; we install it here, and each + * exception frame on nested funcall()s also points to it. + */ + void *base_seh_frame; static void *get_seh_frame(void) { void* retval; - asm volatile ("movl %%fs:0,%0": "=r" (retval)); +#ifdef LISP_FEATURE_X86 + asm volatile ("mov %%fs:0,%0": "=r" (retval)); +#else + asm volatile ("mov %%gs:0,%0": "=r" (retval)); +#endif return retval; } static void set_seh_frame(void *frame) { - asm volatile ("movl %0,%%fs:0": : "r" (frame)); +#ifdef LISP_FEATURE_X86 + asm volatile ("mov %0,%%fs:0": : "r" (frame)); +#else + asm volatile ("mov %0,%%gs:0": : "r" (frame)); +#endif } -static struct lisp_exception_frame *find_our_seh_frame(void) +#if defined(LISP_FEATURE_SB_THREAD) + +/* Permit loads from GC_SAFEPOINT_PAGE_ADDR (NB page state change is + * "synchronized" with the memory region content/availability -- + * e.g. you won't see other CPU flushing buffered writes after WP -- + * but there is some window when other thread _seem_ to trap AFTER + * access is granted. You may think of it something like "OS enters + * SEH handler too slowly" -- what's important is there's no implicit + * synchronization between VirtualProtect caller and other thread's + * SEH handler, hence no ordering of events. VirtualProtect is + * implicitly synchronized with protected memory contents (only). + * + * The last fact may be potentially used with many benefits e.g. for + * foreign call speed, but we don't use it for now: almost the only + * fact relevant to the current signalling protocol is "sooner or + * later everyone will trap [everyone will stop trapping]". + * + * An interesting source on page-protection-based inter-thread + * communication is a well-known paper by Dave Dice, Hui Huang, + * Mingyao Yang: ``Asymmetric Dekker Synchronization''. Last time + * I checked it was available at + * http://home.comcast.net/~pjbishop/Dave/Asymmetric-Dekker-Synchronization.txt + */ +void map_gc_page() { - struct lisp_exception_frame *frame = get_seh_frame(); + DWORD oldProt; + AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj), + PAGE_READWRITE, &oldProt)); +} + +void unmap_gc_page() +{ + DWORD oldProt; + AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj), + PAGE_NOACCESS, &oldProt)); +} - while (frame->handler != handle_exception) - frame = frame->next_frame; +#endif - return frame; +#if defined(LISP_FEATURE_SB_THREAD) +/* We want to get a slot in TIB that (1) is available at constant + offset, (2) is our private property, so libraries wouldn't legally + override it, (3) contains something predefined for threads created + out of our sight. + + Low 64 TLS slots are adressable directly, starting with + FS:[#xE10]. When SBCL runtime is initialized, some of the low slots + may be already in use by its prerequisite DLLs, as DllMain()s and + TLS callbacks have been called already. But slot 63 is unlikely to + be reached at this point: one slot per DLL that needs it is the + common practice, and many system DLLs use predefined TIB-based + areas outside conventional TLS storage and don't need TLS slots. + With our current dependencies, even slot 2 is observed to be free + (as of WinXP and wine). + + Now we'll call TlsAlloc() repeatedly until slot 63 is officially + assigned to us, then TlsFree() all other slots for normal use. TLS + slot 63, alias FS:[#.(+ #xE10 (* 4 63))], now belongs to us. + + To summarize, let's list the assumptions we make: + + - TIB, which is FS segment base, contains first 64 TLS slots at the + offset #xE10 (i.e. TIB layout compatibility); + - TLS slots are allocated from lower to higher ones; + - All libraries together with CRT startup have not requested 64 + slots yet. + + All these assumptions together don't seem to be less warranted than + the availability of TIB arbitrary data slot for our use. There are + some more reasons to prefer slot 63 over TIB arbitrary data: (1) if + our assumptions for slot 63 are violated, it will be detected at + startup instead of causing some system-specific unreproducible + problems afterwards, depending on OS and loaded foreign libraries; + (2) if getting slot 63 reliably with our current approach will + become impossible for some future Windows version, we can add TLS + callback directory to SBCL binary; main image TLS callback is + started before _any_ TLS slot is allocated by libraries, and + some C compiler vendors rely on this fact. */ + +void os_preinit() +{ +#ifdef LISP_FEATURE_X86 + DWORD slots[TLS_MINIMUM_AVAILABLE]; + DWORD key; + int n_slots = 0, i; + for (i=0; i BACKEND_PAGE_BYTES? + system_info.dwPageSize : BACKEND_PAGE_BYTES; +#if defined(LISP_FEATURE_X86) + fast_bzero_pointer = fast_bzero_detect; +#endif + os_number_of_processors = system_info.dwNumberOfProcessors; base_seh_frame = get_seh_frame(); + + resolve_optional_imports(); } +static inline boolean local_thread_stack_address_p(os_vm_address_t address) +{ + return this_thread && + (((((u64)address >= (u64)this_thread->os_address) && + ((u64)address < ((u64)this_thread)-THREAD_CSP_PAGE_SIZE))|| + (((u64)address >= (u64)this_thread->control_stack_start)&& + ((u64)address < (u64)this_thread->control_stack_end)))); +} /* * So we have three fun scenarios here. @@ -142,31 +454,40 @@ os_validate(os_vm_address_t addr, os_vm_size_t len) if (!addr) { /* the simple case first */ - os_vm_address_t real_addr; - if (!(real_addr = VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE))) { - perror("VirtualAlloc"); - return 0; - } - - return real_addr; + return + AVERLAX(VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE)); } - if (!VirtualQuery(addr, &mem_info, sizeof mem_info)) { - perror("VirtualQuery"); + if (!AVERLAX(VirtualQuery(addr, &mem_info, sizeof mem_info))) return 0; - } - if ((mem_info.State == MEM_RESERVE) && (mem_info.RegionSize >=len)) return addr; + if ((mem_info.State == MEM_RESERVE) && (mem_info.RegionSize >=len)) { + /* It would be correct to return here. However, support for Wine + * is beneficial, and Wine has a strange behavior in this + * department. It reports all memory below KERNEL32.DLL as + * reserved, but disallows MEM_COMMIT. + * + * Let's work around it: reserve the region we need for a second + * time. The second reservation is documented to fail on normal NT + * family, but it will succeed on Wine if this region is + * actually free. + */ + VirtualAlloc(addr, len, MEM_RESERVE, PAGE_EXECUTE_READWRITE); + /* If it is wine, the second call has succeded, and now the region + * is really reserved. */ + return addr; + } if (mem_info.State == MEM_RESERVE) { fprintf(stderr, "validation of reserved space too short.\n"); fflush(stderr); + /* Oddly, we do not treat this assertion as fatal; hence also the + * provision for MEM_RESERVE in the following code, I suppose: */ } - if (!VirtualAlloc(addr, len, (mem_info.State == MEM_RESERVE)? MEM_COMMIT: MEM_RESERVE, PAGE_EXECUTE_READWRITE)) { - perror("VirtualAlloc"); + if (!AVERLAX(VirtualAlloc(addr, len, (mem_info.State == MEM_RESERVE)? + MEM_COMMIT: MEM_RESERVE, PAGE_EXECUTE_READWRITE))) return 0; - } return addr; } @@ -174,26 +495,63 @@ os_validate(os_vm_address_t addr, os_vm_size_t len) /* * For os_invalidate(), we merely decommit the memory rather than * freeing the address space. This loses when freeing per-thread - * data and related memory since it leaks address space. It's not - * too lossy, however, since the two scenarios I'm aware of are - * fd-stream buffers, which are pooled rather than torched, and - * thread information, which I hope to pool (since windows creates - * threads at its own whim, and we probably want to be able to - * have them callback without funky magic on the part of the user, - * and full-on thread allocation is fairly heavyweight). Someone - * will probably shoot me down on this with some pithy comment on - * the use of (setf symbol-value) on a special variable. I'm happy - * for them. + * data and related memory since it leaks address space. + * + * So far the original comment (author unknown). It used to continue as + * follows: + * + * It's not too lossy, however, since the two scenarios I'm aware of + * are fd-stream buffers, which are pooled rather than torched, and + * thread information, which I hope to pool (since windows creates + * threads at its own whim, and we probably want to be able to have + * them callback without funky magic on the part of the user, and + * full-on thread allocation is fairly heavyweight). + * + * But: As it turns out, we are no longer content with decommitting + * without freeing, and have now grown a second function + * os_invalidate_free(), sort of a really_os_invalidate(). + * + * As discussed on #lisp, this is not a satisfactory solution, and probably + * ought to be rectified in the following way: + * + * - Any cases currently going through the non-freeing version of + * os_invalidate() are ultimately meant for zero-filling applications. + * Replace those use cases with an os_revalidate_bzero() or similarly + * named function, which explicitly takes care of that aspect of + * the semantics. + * + * - The remaining uses of os_invalidate should actually free, and once + * the above is implemented, we can rename os_invalidate_free back to + * just os_invalidate(). + * + * So far the new plan, as yet unimplemented. -- DFL */ void os_invalidate(os_vm_address_t addr, os_vm_size_t len) { - if (!VirtualFree(addr, len, MEM_DECOMMIT)) { - perror("VirtualFree"); - } + AVERLAX(VirtualFree(addr, len, MEM_DECOMMIT)); +} + +void +os_invalidate_free(os_vm_address_t addr, os_vm_size_t len) +{ + AVERLAX(VirtualFree(addr, 0, MEM_RELEASE)); } +void +os_invalidate_free_by_any_address(os_vm_address_t addr, os_vm_size_t len) +{ + MEMORY_BASIC_INFORMATION minfo; + AVERLAX(VirtualQuery(addr, &minfo, sizeof minfo)); + AVERLAX(minfo.AllocationBase); + AVERLAX(VirtualFree(minfo.AllocationBase, 0, MEM_RELEASE)); +} + +#define maybe_open_osfhandle _open_osfhandle +#define maybe_get_osfhandle _get_osfhandle +#define FDTYPE int + /* * os_map() is called to map a chunk of the core file into memory. * @@ -210,23 +568,14 @@ os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len) { os_vm_size_t count; - fprintf(stderr, "os_map: %d, 0x%x, %p, 0x%x.\n", fd, offset, addr, len); - fflush(stderr); - - if (!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE)) { - perror("VirtualAlloc"); - lose("os_map: VirtualAlloc failure"); - } + AVER(VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE)|| + VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT, + PAGE_EXECUTE_READWRITE)); - if (lseek(fd, offset, SEEK_SET) == -1) { - lose("os_map: Seek failure."); - } + CRT_AVER_NONNEGATIVE(lseek(fd, offset, SEEK_SET)); count = read(fd, addr, len); - if (count != len) { - fprintf(stderr, "expected 0x%x, read 0x%x.\n", len, count); - lose("os_map: Failed to read enough bytes."); - } + CRT_AVER( count == len ); return addr; } @@ -247,10 +596,13 @@ os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot) { DWORD old_prot; - if (!VirtualProtect(address, length, os_protect_modes[prot], &old_prot)) { - fprintf(stderr, "VirtualProtect failed, code 0x%lx.\n", GetLastError()); - fflush(stderr); - } + DWORD new_prot = os_protect_modes[prot]; + AVER(VirtualProtect(address, length, new_prot, &old_prot)|| + (VirtualAlloc(address, length, MEM_COMMIT, new_prot) && + VirtualProtect(address, length, new_prot, &old_prot))); + odxprint(misc,"Protecting %p + %p vmaccess %d " + "newprot %08x oldprot %08x", + address,length,prot,new_prot,old_prot); } /* FIXME: Now that FOO_END, rather than FOO_SIZE, is the fundamental @@ -259,201 +611,288 @@ os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot) static boolean in_range_p(os_vm_address_t a, lispobj sbeg, size_t slen) { - char* beg = (char*)((long)sbeg); - char* end = (char*)((long)sbeg) + slen; + char* beg = (char*)((uword_t)sbeg); + char* end = (char*)((uword_t)sbeg) + slen; char* adr = (char*)a; return (adr >= beg && adr < end); } boolean +is_linkage_table_addr(os_vm_address_t addr) +{ + return in_range_p(addr, LINKAGE_TABLE_SPACE_START, LINKAGE_TABLE_SPACE_END); +} + +static boolean is_some_thread_local_addr(os_vm_address_t addr); + +boolean is_valid_lisp_addr(os_vm_address_t addr) { - struct thread *th; if(in_range_p(addr, READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE) || in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) || - in_range_p(addr, DYNAMIC_SPACE_START , DYNAMIC_SPACE_SIZE)) + in_range_p(addr, DYNAMIC_SPACE_START , dynamic_space_size) || + is_some_thread_local_addr(addr)) return 1; + return 0; +} + +/* test if an address is within thread-local space */ +static boolean +is_thread_local_addr(struct thread* th, os_vm_address_t addr) +{ + /* Assuming that this is correct, it would warrant further comment, + * I think. Based on what our call site is doing, we have been + * tasked to check for the address of a lisp object; not merely any + * foreign address within the thread's area. Indeed, this used to + * be a check for control and binding stack only, rather than the + * full thread "struct". So shouldn't the THREAD_STRUCT_SIZE rather + * be (thread_control_stack_size+BINDING_STACK_SIZE) instead? That + * would also do away with the LISP_FEATURE_SB_THREAD case. Or does + * it simply not matter? --DFL */ + ptrdiff_t diff = ((char*)th->os_address)-(char*)addr; + return diff > (ptrdiff_t)0 && diff < (ptrdiff_t)THREAD_STRUCT_SIZE +#ifdef LISP_FEATURE_SB_THREAD + && addr != (os_vm_address_t) th->csp_around_foreign_call +#endif + ; +} + +static boolean +is_some_thread_local_addr(os_vm_address_t addr) +{ + boolean result = 0; +#ifdef LISP_FEATURE_SB_THREAD + struct thread *th; + pthread_mutex_lock(&all_threads_lock); for_each_thread(th) { - if(((os_vm_address_t)th->control_stack_start <= addr) && (addr < (os_vm_address_t)th->control_stack_end)) - return 1; - if(in_range_p(addr, (unsigned long)th->binding_stack_start, BINDING_STACK_SIZE)) - return 1; + if(is_thread_local_addr(th,addr)) { + result = 1; + break; + } } - return 0; + pthread_mutex_unlock(&all_threads_lock); +#endif + return result; } -/* - * any OS-dependent special low-level handling for signals - */ /* A tiny bit of interrupt.c state we want our paws on. */ extern boolean internal_errors_enabled; -/* - * FIXME: There is a potential problem with foreign code here. - * If we are running foreign code instead of lisp code and an - * exception occurs we arrange a call into Lisp. If the - * foreign code has installed an exception handler, we run the - * very great risk of throwing through their exception handler - * without asking it to unwind. This is more a problem with - * non-sigtrap (EXCEPTION_BREAKPOINT) exceptions, as they could - * reasonably be expected to happen in foreign code. We need to - * figure out the exception handler unwind semantics and adhere - * to them (probably by abusing the Lisp unwind-protect system) - * if we are going to handle this scenario correctly. - * - * A good explanation of the exception handling semantics is - * http://win32assembly.online.fr/Exceptionhandling.html . - * We will also need to handle this ourselves when foreign - * code tries to unwind -us-. - * - * When unwinding through foreign code we should unwind the - * Lisp stack to the entry from foreign code, then unwind the - * foreign code stack to the entry from Lisp, then resume - * unwinding in Lisp. - */ +extern void exception_handler_wrapper(); -EXCEPTION_DISPOSITION sigtrap_emulator(CONTEXT *context, - struct lisp_exception_frame *exception_frame) +void +c_level_backtrace(const char* header, int depth) { - if (*((char *)context->Eip + 1) == trap_ContextRestore) { - /* - * This is the cleanup for what is immediately below, and - * for the generic exception handling further below. We - * have to memcpy() the original context (emulated sigtrap - * or normal exception) over our context and resume it. - */ - memcpy(context, &exception_frame->context, sizeof(CONTEXT)); - return ExceptionContinueExecution; + void* frame; + int n = 0; + void** lastseh; + + for (lastseh = get_seh_frame(); lastseh && (lastseh!=(void*)-1); + lastseh = *lastseh); + + fprintf(stderr, "Backtrace: %s (thread %p)\n", header, this_thread); + for (frame = __builtin_frame_address(0); frame; frame=*(void**)frame) + { + if ((n++)>depth) + return; + fprintf(stderr, "[#%02d]: ebp = 0x%p, ret = 0x%p\n",n, + frame, ((void**)frame)[1]); + } +} - } else { /* Not a trap_ContextRestore, must be a sigtrap. */ - /* sigtrap_trampoline is defined in x86-assem.S. */ - extern void sigtrap_trampoline; +#ifdef LISP_FEATURE_X86 +#define voidreg(ctxptr,name) ((void*)((ctxptr)->E##name)) +#else +#define voidreg(ctxptr,name) ((void*)((ctxptr)->R##name)) +#endif - /* - * Unlike some other operating systems, Win32 leaves EIP - * pointing to the breakpoint instruction. - */ - context->Eip++; - /* - * We're not on an alternate stack like we would be in some - * other operating systems, and we don't want to risk leaking - * any important resources if we throw out of the sigtrap - * handler, so we need to copy off our context to a "safe" - * place and then monkey with the return EIP to point to a - * trampoline which calls another function which copies the - * context out to a really-safe place and then calls the real - * sigtrap handler. When the real sigtrap handler returns, the - * trampoline then contains another breakpoint with a code of - * trap_ContextRestore (see above). Essentially the same - * mechanism is used by the generic exception path. There is - * a small window of opportunity between us copying the - * context to the "safe" place and the sigtrap wrapper copying - * it to the really-safe place (allocated in its stack frame) - * during which the context can be smashed. The only scenario - * I can come up with for this, however, involves a stack - * overflow occuring at just the wrong time (which makes one - * wonder how stack overflow exceptions even happen, given - * that we don't switch stacks for exception processing...) - */ - memcpy(&exception_frame->context, context, sizeof(CONTEXT)); - context->Eax = context->Eip; - context->Eip = (unsigned long)&sigtrap_trampoline; +#if defined(LISP_FEATURE_X86) +static int +handle_single_step(os_context_t *ctx) +{ + if (!single_stepping) + return -1; - /* and return */ - return ExceptionContinueExecution; - } -} + /* We are doing a displaced instruction. At least function + * end breakpoints use this. */ + WITH_GC_AT_SAFEPOINTS_ONLY () /* Todo: Is it really gc-unsafe? */ + restore_breakpoint_from_single_step(ctx); -void sigtrap_wrapper(void) -{ - /* - * This is the wrapper around the sigtrap handler called from - * the trampoline returned to from the function above. - * - * There actually is a point to some of the commented-out code - * in this function, although it really belongs to the callback - * wrappers. Once it is installed there, it can probably be - * removed from here. - */ + return 0; +} +#endif - extern void sigtrap_handler(int signal, siginfo_t *info, void *context); +#ifdef LISP_FEATURE_UD2_BREAKPOINTS +#define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_ILLEGAL_INSTRUCTION +#define TRAP_CODE_WIDTH 2 +#else +#define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_BREAKPOINT +#define TRAP_CODE_WIDTH 1 +#endif -/* volatile struct { */ -/* void *handler[2]; */ - CONTEXT context; -/* } handler; */ +static int +handle_breakpoint_trap(os_context_t *ctx, struct thread* self) +{ +#ifdef LISP_FEATURE_UD2_BREAKPOINTS + if (((unsigned short *)((ctx->win32_context)->Eip))[0] != 0x0b0f) + return -1; +#endif - struct lisp_exception_frame *frame = find_our_seh_frame(); + /* Unlike some other operating systems, Win32 leaves EIP + * pointing to the breakpoint instruction. */ + (*os_context_pc_addr(ctx)) += TRAP_CODE_WIDTH; + + /* Now EIP points just after the INT3 byte and aims at the + * 'kind' value (eg trap_Cerror). */ + unsigned trap = *(unsigned char *)(*os_context_pc_addr(ctx)); + +#ifdef LISP_FEATURE_SB_THREAD + /* Before any other trap handler: gc_safepoint ensures that + inner alloc_sap for passing the context won't trap on + pseudo-atomic. */ + if (trap == trap_PendingInterrupt) { + /* Done everything needed for this trap, except EIP + adjustment */ + arch_skip_instruction(ctx); + thread_interrupted(ctx); + return 0; + } +#endif -/* wos_install_interrupt_handlers(handler); */ -/* handler.handler[0] = get_seh_frame(); */ -/* handler.handler[1] = &handle_exception; */ -/* set_seh_frame(&handler); */ + /* This is just for info in case the monitor wants to print an + * approximation. */ + access_control_stack_pointer(self) = + (lispobj *)*os_context_sp_addr(ctx); - memcpy(&context, &frame->context, sizeof(CONTEXT)); - sigtrap_handler(0, NULL, &context); - memcpy(&frame->context, &context, sizeof(CONTEXT)); + WITH_GC_AT_SAFEPOINTS_ONLY() { +#if defined(LISP_FEATURE_SB_THREAD) + block_blockable_signals(0,&ctx->sigmask); +#endif + handle_trap(ctx, trap); +#if defined(LISP_FEATURE_SB_THREAD) + thread_sigmask(SIG_SETMASK,&ctx->sigmask,NULL); +#endif + } -/* set_seh_frame(handler.handler[0]); */ + /* Done, we're good to go! */ + return 0; } -EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *exception_record, - struct lisp_exception_frame *exception_frame, - CONTEXT *context, - void *dc) /* FIXME: What's dc again? */ +static int +handle_access_violation(os_context_t *ctx, + EXCEPTION_RECORD *exception_record, + void *fault_address, + struct thread* self) { + CONTEXT *win32_context = ctx->win32_context; + +#if defined(LISP_FEATURE_X86) + odxprint(pagefaults, + "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, " + "Addr %p Access %d\n", + self, + win32_context->Eip, + win32_context->Esp, + win32_context->Esi, + win32_context->Edi, + fault_address, + exception_record->ExceptionInformation[0]); +#else + odxprint(pagefaults, + "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, " + "Addr %p Access %d\n", + self, + win32_context->Rip, + win32_context->Rsp, + win32_context->Rsi, + win32_context->Rdi, + fault_address, + exception_record->ExceptionInformation[0]); +#endif - /* For EXCEPTION_ACCESS_VIOLATION only. */ - void *fault_address = (void *)exception_record->ExceptionInformation[1]; + /* Stack: This case takes care of our various stack exhaustion + * protect pages (with the notable exception of the control stack!). */ + if (self && local_thread_stack_address_p(fault_address)) { + if (handle_guard_page_triggered(ctx, fault_address)) + return 0; /* gc safety? */ + goto try_recommit; + } - if (exception_record->ExceptionCode == EXCEPTION_BREAKPOINT) { - /* Pick off sigtrap case first. */ - return sigtrap_emulator(context, exception_frame); + /* Safepoint pages */ +#ifdef LISP_FEATURE_SB_THREAD + if (fault_address == (void *) GC_SAFEPOINT_PAGE_ADDR) { + thread_in_lisp_raised(ctx); + return 0; + } - } else if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && - is_valid_lisp_addr(fault_address)) { - /* Pick off GC-related memory fault next. */ - MEMORY_BASIC_INFORMATION mem_info; + if ((((u64)fault_address) == ((u64)self->csp_around_foreign_call))){ + thread_in_safety_transition(ctx); + return 0; + } +#endif - if (!VirtualQuery(fault_address, &mem_info, sizeof mem_info)) { - fprintf(stderr, "VirtualQuery: 0x%lx.\n", GetLastError()); - lose("handle_exception: VirtualQuery failure"); + /* dynamic space */ + page_index_t index = find_page_index(fault_address); + if (index != -1) { + /* + * Now, if the page is supposedly write-protected and this + * is a write, tell the gc that it's been hit. + */ + if (page_table[index].write_protected) { + gencgc_handle_wp_violation(fault_address); + } else { + AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size), + os_vm_page_size, + MEM_COMMIT, PAGE_EXECUTE_READWRITE)); } + return 0; + } - if (mem_info.State == MEM_RESERVE) { - /* First use new page, lets get some memory for it. */ - if (!VirtualAlloc(mem_info.BaseAddress, os_vm_page_size, - MEM_COMMIT, PAGE_EXECUTE_READWRITE)) { - fprintf(stderr, "VirtualAlloc: 0x%lx.\n", GetLastError()); - lose("handle_exception: VirtualAlloc failure"); - - } else { - /* - * Now, if the page is supposedly write-protected and this - * is a write, tell the gc that it's been hit. - * - * FIXME: Are we supposed to fall-through to the Lisp - * exception handler if the gc doesn't take the wp violation? - */ - if (exception_record->ExceptionInformation[0]) { - int index = find_page_index(fault_address); - if ((index != -1) && (page_table[index].write_protected)) { - gencgc_handle_wp_violation(fault_address); - } - } - return ExceptionContinueExecution; - } - - } else if (gencgc_handle_wp_violation(fault_address)) { - /* gc accepts the wp violation, so resume where we left off. */ - return ExceptionContinueExecution; - } + if (fault_address == undefined_alien_address) + return -1; + + /* linkage table or a "valid_lisp_addr" outside of dynamic space (?) */ + if (is_linkage_table_addr(fault_address) + || is_valid_lisp_addr(fault_address)) + goto try_recommit; + + return -1; + +try_recommit: + /* First use of a new page, lets get some memory for it. */ + +#if defined(LISP_FEATURE_X86) + AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size), + os_vm_page_size, + MEM_COMMIT, PAGE_EXECUTE_READWRITE) + ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%08lx\n", + fault_address, win32_context->Eip) && + (c_level_backtrace("BT",5), + fake_foreign_function_call(ctx), + lose("Lispy backtrace"), + 0))); +#else + AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size), + os_vm_page_size, + MEM_COMMIT, PAGE_EXECUTE_READWRITE) + ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%p\n", + fault_address, (void*)win32_context->Rip) && + (c_level_backtrace("BT",5), + fake_foreign_function_call(ctx), + lose("Lispy backtrace"), + 0))); +#endif - /* All else failed, drop through to the lisp-side exception handler. */ - } + return 0; +} +static void +signal_internal_error_or_lose(os_context_t *ctx, + EXCEPTION_RECORD *exception_record, + void *fault_address) +{ /* * If we fall through to here then we need to either forward * the exception to the lisp-side exception handler if it's @@ -461,43 +900,51 @@ EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *exception_record, */ if (internal_errors_enabled) { - /* exception_trampoline is defined in x86-assem.S. */ - extern void exception_trampoline; - - /* - * We're making the somewhat arbitrary decision that - * having internal errors enabled means that lisp has - * sufficient marbles to be able to handle exceptions. - * - * Exceptions aren't supposed to happen during cold - * init or reinit anyway. - */ - - /* - * We use the same mechanism as the sigtrap emulator above - * with just a couple changes. We obviously use a different - * trampoline and wrapper function, we kill out any live - * floating point exceptions, and we save off the exception - * record as well as the context. - */ - - /* Save off context and exception information */ - memcpy(&exception_frame->context, context, sizeof(CONTEXT)); - memcpy(&exception_frame->exception, exception_record, sizeof(EXCEPTION_RECORD)); - - /* Set up to activate trampoline when we return */ - context->Eax = context->Eip; - context->Eip = (unsigned long)&exception_trampoline; - - /* Make sure a floating-point trap doesn't kill us */ - context->FloatSave.StatusWord &= ~0x3f; + lispobj context_sap; + lispobj exception_record_sap; + + asm("fnclex"); + /* We're making the somewhat arbitrary decision that having + * internal errors enabled means that lisp has sufficient + * marbles to be able to handle exceptions, but exceptions + * aren't supposed to happen during cold init or reinit + * anyway. */ + +#if defined(LISP_FEATURE_SB_THREAD) + block_blockable_signals(0,&ctx->sigmask); +#endif + fake_foreign_function_call(ctx); + + WITH_GC_AT_SAFEPOINTS_ONLY() { + /* Allocate the SAP objects while the "interrupts" are still + * disabled. */ + context_sap = alloc_sap(ctx); + exception_record_sap = alloc_sap(exception_record); +#if defined(LISP_FEATURE_SB_THREAD) + thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL); +#endif - /* And return */ - return ExceptionContinueExecution; + /* The exception system doesn't automatically clear pending + * exceptions, so we lose as soon as we execute any FP + * instruction unless we do this first. */ + /* Call into lisp to handle things. */ + funcall2(StaticSymbolFunction(HANDLE_WIN32_EXCEPTION), + context_sap, + exception_record_sap); + } + /* If Lisp doesn't nlx, we need to put things back. */ + undo_fake_foreign_function_call(ctx); +#if defined(LISP_FEATURE_SB_THREAD) + thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL); +#endif + /* FIXME: HANDLE-WIN32-EXCEPTION should be allowed to decline */ + return; } - fprintf(stderr, "Exception Code: 0x%lx.\n", exception_record->ExceptionCode); - fprintf(stderr, "Faulting IP: 0x%lx.\n", (DWORD)exception_record->ExceptionAddress); + fprintf(stderr, "Exception Code: 0x%p.\n", + (void*)(intptr_t)exception_record->ExceptionCode); + fprintf(stderr, "Faulting IP: 0x%p.\n", + (void*)(intptr_t)exception_record->ExceptionAddress); if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { MEMORY_BASIC_INFORMATION mem_info; @@ -505,56 +952,115 @@ EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *exception_record, fprintf(stderr, "page status: 0x%lx.\n", mem_info.State); } - fprintf(stderr, "Was writing: %ld, where: 0x%lx.\n", - exception_record->ExceptionInformation[0], - (DWORD)fault_address); + fprintf(stderr, "Was writing: %p, where: 0x%p.\n", + (void*)exception_record->ExceptionInformation[0], + fault_address); } fflush(stderr); - fake_foreign_function_call(context); - monitor_or_something(); - - return ExceptionContinueSearch; + fake_foreign_function_call(ctx); + lose("Exception too early in cold init, cannot continue."); } -void handle_win32_exception_wrapper(void) +/* + * A good explanation of the exception handling semantics is + * http://win32assembly.online.fr/Exceptionhandling.html (possibly defunct) + * or: + * http://www.microsoft.com/msj/0197/exception/exception.aspx + */ + +EXCEPTION_DISPOSITION +handle_exception(EXCEPTION_RECORD *exception_record, + struct lisp_exception_frame *exception_frame, + CONTEXT *win32_context, + void *dispatcher_context) { - struct lisp_exception_frame *frame = find_our_seh_frame(); - CONTEXT context; - EXCEPTION_RECORD exception_record; - lispobj context_sap; - lispobj exception_record_sap; + if (!win32_context) + /* Not certain why this should be possible, but let's be safe... */ + return ExceptionContinueSearch; + + if (exception_record->ExceptionFlags & (EH_UNWINDING | EH_EXIT_UNWIND)) { + /* If we're being unwound, be graceful about it. */ + + /* Undo any dynamic bindings. */ + unbind_to_here(exception_frame->bindstack_pointer, + arch_os_get_current_thread()); + return ExceptionContinueSearch; + } - memcpy(&context, &frame->context, sizeof(CONTEXT)); - memcpy(&exception_record, &frame->exception, sizeof(EXCEPTION_RECORD)); + DWORD lastError = GetLastError(); + DWORD lastErrno = errno; + DWORD code = exception_record->ExceptionCode; + struct thread* self = arch_os_get_current_thread(); - fake_foreign_function_call(&context); + os_context_t context, *ctx = &context; + context.win32_context = win32_context; +#if defined(LISP_FEATURE_SB_THREAD) + context.sigmask = self ? self->os_thread->blocked_signal_set : 0; +#endif - /* Allocate the SAP objects while the "interrupts" are still - * disabled. */ - context_sap = alloc_sap(&context); - exception_record_sap = alloc_sap(&exception_record); + /* For EXCEPTION_ACCESS_VIOLATION only. */ + void *fault_address = (void *)exception_record->ExceptionInformation[1]; - funcall2(SymbolFunction(HANDLE_WIN32_EXCEPTION), context_sap, - exception_record_sap); + odxprint(seh, + "SEH: rec %p, ctxptr %p, rip %p, fault %p\n" + "... code %p, rcx %p, fp-tags %p\n\n", + exception_record, + win32_context, + voidreg(win32_context,ip), + fault_address, + (void*)(intptr_t)code, + voidreg(win32_context,cx), + win32_context->FloatSave.TagWord); + + /* This function had become unwieldy. Let's cut it down into + * pieces based on the different exception codes. Each exception + * code handler gets the chance to decline by returning non-zero if it + * isn't happy: */ + + int rc; + switch (code) { + case EXCEPTION_ACCESS_VIOLATION: + rc = handle_access_violation( + ctx, exception_record, fault_address, self); + break; + + case SBCL_EXCEPTION_BREAKPOINT: + rc = handle_breakpoint_trap(ctx, self); + break; + +#if defined(LISP_FEATURE_X86) + case EXCEPTION_SINGLE_STEP: + rc = handle_single_step(ctx); + break; +#endif - undo_fake_foreign_function_call(&context); + default: + rc = -1; + } + + if (rc) + /* All else failed, drop through to the lisp-side exception handler. */ + signal_internal_error_or_lose(ctx, exception_record, fault_address); - memcpy(&frame->context, &context, sizeof(CONTEXT)); + errno = lastErrno; + SetLastError(lastError); + return ExceptionContinueExecution; } void wos_install_interrupt_handlers(struct lisp_exception_frame *handler) { +#ifdef LISP_FEATURE_X86 handler->next_frame = get_seh_frame(); - handler->handler = &handle_exception; + handler->handler = (void*)exception_handler_wrapper; set_seh_frame(handler); -} - -void bcopy(const void *src, void *dest, size_t n) -{ - MoveMemory(dest, src, n); +#else + static int once = 0; + if (!once++) + AddVectoredExceptionHandler(1,veh); +#endif } /* @@ -581,24 +1087,748 @@ void *memcpy(void *dest, const void *src, size_t n) return dest; } +char *dirname(char *path) +{ + static char buf[PATH_MAX + 1]; + size_t pathlen = strlen(path); + int i; + + if (pathlen >= sizeof(buf)) { + lose("Pathname too long in dirname.\n"); + return NULL; + } + + strcpy(buf, path); + for (i = pathlen; i >= 0; --i) { + if (buf[i] == '/' || buf[i] == '\\') { + buf[i] = '\0'; + break; + } + } + + return buf; +} + +/* Unofficial but widely used property of console handles: they have + #b11 in two minor bits, opposed to other handles, that are + machine-word-aligned. Properly emulated even on wine. + + Console handles are special in many aspects, e.g. they aren't NTDLL + system handles: kernel32 redirects console operations to CSRSS + requests. Using the hack below to distinguish console handles is + justified, as it's the only method that won't hang during + outstanding reads, won't try to lock NT kernel object (if there is + one; console isn't), etc. */ +int +console_handle_p(HANDLE handle) +{ + return (handle != NULL)&& + (handle != INVALID_HANDLE_VALUE)&& + ((((int)(intptr_t)handle)&3)==3); +} + +/* Atomically mark current thread as (probably) doing synchronous I/O + * on handle, if no cancellation is requested yet (and return TRUE), + * otherwise clear thread's I/O cancellation flag and return false. + */ +static +boolean io_begin_interruptible(HANDLE handle) +{ + /* No point in doing it unless OS supports cancellation from other + * threads */ + if (!ptr_CancelIoEx) + return 1; + + if (!__sync_bool_compare_and_swap(&this_thread->synchronous_io_handle_and_flag, + 0, handle)) { + ResetEvent(this_thread->private_events.events[0]); + this_thread->synchronous_io_handle_and_flag = 0; + return 0; + } + return 1; +} + +/* Unmark current thread as (probably) doing synchronous I/O; if an + * I/O cancellation was requested, postpone it until next + * io_begin_interruptible */ +static void +io_end_interruptible(HANDLE handle) +{ + if (!ptr_CancelIoEx) + return; + __sync_bool_compare_and_swap(&this_thread->synchronous_io_handle_and_flag, + handle, 0); +} + +/* Documented limit for ReadConsole/WriteConsole is 64K bytes. + Real limit observed on W2K-SP3 is somewhere in between 32KiB and 64Kib... +*/ +#define MAX_CONSOLE_TCHARS 16384 + +int +win32_write_unicode_console(HANDLE handle, void * buf, int count) +{ + DWORD written = 0; + DWORD nchars; + BOOL result; + nchars = count>>1; + if (nchars>MAX_CONSOLE_TCHARS) nchars = MAX_CONSOLE_TCHARS; + + if (!io_begin_interruptible(handle)) { + errno = EINTR; + return -1; + } + result = WriteConsoleW(handle,buf,nchars,&written,NULL); + io_end_interruptible(handle); + + if (result) { + if (!written) { + errno = EINTR; + return -1; + } else { + return 2*written; + } + } else { + DWORD err = GetLastError(); + odxprint(io,"WriteConsole fails => %u\n", err); + errno = (err==ERROR_OPERATION_ABORTED ? EINTR : EIO); + return -1; + } +} + +/* + * (AK writes:) + * + * It may be unobvious, but (probably) the most straightforward way of + * providing some sane CL:LISTEN semantics for line-mode console + * channel requires _dedicated input thread_. + * + * LISTEN should return true iff the next (READ-CHAR) won't have to + * wait. As our console may be shared with another process, entirely + * out of our control, looking at the events in PeekConsoleEvent + * result (and searching for #\Return) doesn't cut it. + * + * We decided that console input thread must do something smarter than + * a bare loop of continuous ReadConsoleW(). On Unix, user experience + * with the terminal is entirely unaffected by the fact that some + * process does (or doesn't) call read(); the situation on MS Windows + * is different. + * + * Echo output and line editing present on MS Windows while some + * process is waiting in ReadConsole(); otherwise all input events are + * buffered. If our thread were calling ReadConsole() all the time, it + * would feel like Unix cooked mode. + * + * But we don't write a Unix emulator here, even if it sometimes feels + * like that; therefore preserving this aspect of console I/O seems a + * good thing to us. + * + * LISTEN itself becomes trivial with dedicated input thread, but the + * goal stated above -- provide `native' user experience with blocked + * console -- don't play well with this trivial implementation. + * + * What's currently implemented is a compromise, looking as something + * in between Unix cooked mode and Win32 line mode. + * + * 1. As long as no console I/O function is called (incl. CL:LISTEN), + * console looks `blocked': no echo, no line editing. + * + * 2. (READ-CHAR), (READ-SEQUENCE) and other functions doing real + * input result in the ReadConsole request (in a dedicated thread); + * + * 3. Once ReadConsole is called, it is not cancelled in the + * middle. In line mode, it returns when key is hit (or + * something like that happens). Therefore, if line editing and echo + * output had a chance to happen, console won't look `blocked' until + * the line is entered (even if line input was triggered by + * (READ-CHAR)). + * + * 4. LISTEN may request ReadConsole too (if no other thread is + * reading the console and no data are queued). It's the only case + * when the console becomes `unblocked' without any actual input + * requested by Lisp code. LISTEN check if there is at least one + * input event in PeekConsole queue; unless there is such an event, + * ReadConsole is not triggered by LISTEN. + * + * 5. Console-reading Lisp thread now may be interrupted immediately; + * ReadConsole call itself, however, continues until the line is + * entered. + */ + +struct { + WCHAR buffer[MAX_CONSOLE_TCHARS]; + DWORD head, tail; + pthread_mutex_t lock; + pthread_cond_t cond_has_data; + pthread_cond_t cond_has_client; + pthread_t thread; + boolean initialized; + HANDLE handle; + boolean in_progress; +} ttyinput = {.lock = PTHREAD_MUTEX_INITIALIZER}; + +static void* +tty_read_line_server() +{ + pthread_mutex_lock(&ttyinput.lock); + while (ttyinput.handle) { + DWORD nchars; + BOOL ok; + + while (!ttyinput.in_progress) + pthread_cond_wait(&ttyinput.cond_has_client,&ttyinput.lock); + + pthread_mutex_unlock(&ttyinput.lock); + + ok = ReadConsoleW(ttyinput.handle, + &ttyinput.buffer[ttyinput.tail], + MAX_CONSOLE_TCHARS-ttyinput.tail, + &nchars,NULL); + + pthread_mutex_lock(&ttyinput.lock); + + if (ok) { + ttyinput.tail += nchars; + pthread_cond_broadcast(&ttyinput.cond_has_data); + } + ttyinput.in_progress = 0; + } + pthread_mutex_unlock(&ttyinput.lock); + return NULL; +} + +static boolean +tty_maybe_initialize_unlocked(HANDLE handle) +{ + if (!ttyinput.initialized) { + if (!DuplicateHandle(GetCurrentProcess(),handle, + GetCurrentProcess(),&ttyinput.handle, + 0,FALSE,DUPLICATE_SAME_ACCESS)) { + return 0; + } + pthread_cond_init(&ttyinput.cond_has_data,NULL); + pthread_cond_init(&ttyinput.cond_has_client,NULL); + pthread_create(&ttyinput.thread,NULL,tty_read_line_server,NULL); + ttyinput.initialized = 1; + } + return 1; +} + +boolean +win32_tty_listen(HANDLE handle) +{ + boolean result = 0; + INPUT_RECORD ir; + DWORD nevents; + pthread_mutex_lock(&ttyinput.lock); + if (!tty_maybe_initialize_unlocked(handle)) + result = 0; + + if (ttyinput.in_progress) { + result = 0; + } else { + if (ttyinput.head != ttyinput.tail) { + result = 1; + } else { + if (PeekConsoleInput(ttyinput.handle,&ir,1,&nevents) && nevents) { + ttyinput.in_progress = 1; + pthread_cond_broadcast(&ttyinput.cond_has_client); + } + } + } + pthread_mutex_unlock(&ttyinput.lock); + return result; +} + +static int +tty_read_line_client(HANDLE handle, void* buf, int count) +{ + int result = 0; + int nchars = count / sizeof(WCHAR); + sigset_t pendset; + + if (!nchars) + return 0; + if (nchars>MAX_CONSOLE_TCHARS) + nchars=MAX_CONSOLE_TCHARS; + + count = nchars*sizeof(WCHAR); + + pthread_mutex_lock(&ttyinput.lock); + + if (!tty_maybe_initialize_unlocked(handle)) { + result = -1; + errno = EIO; + goto unlock; + } + + while (!result) { + while (ttyinput.head == ttyinput.tail) { + if (!io_begin_interruptible(ttyinput.handle)) { + ttyinput.in_progress = 0; + result = -1; + errno = EINTR; + goto unlock; + } else { + if (!ttyinput.in_progress) { + /* We are to wait */ + ttyinput.in_progress=1; + /* wake console reader */ + pthread_cond_broadcast(&ttyinput.cond_has_client); + } + pthread_cond_wait(&ttyinput.cond_has_data, &ttyinput.lock); + io_end_interruptible(ttyinput.handle); + } + } + result = sizeof(WCHAR)*(ttyinput.tail-ttyinput.head); + if (result > count) { + result = count; + } + if (result) { + if (result > 0) { + DWORD nch,offset = 0; + LPWSTR ubuf = buf; + + memcpy(buf,&ttyinput.buffer[ttyinput.head],count); + ttyinput.head += (result / sizeof(WCHAR)); + if (ttyinput.head == ttyinput.tail) + ttyinput.head = ttyinput.tail = 0; + + for (nch=0;nch + * ... stuffs its handle into its structure. + * B.. + * ... calls us to wake the thread, finds the handle. + * But just before we actually call CancelSynchronousIo/CancelIoEx, + * something weird happens in the scheduler and the system is + * so extremely busy that the interrupter doesn't get scheduled + * for a while, giving the interruptee lots of time to continue. + * A.. Didn't actually have to block, calls io_end_interruptible (in + * which the handle flag already invalid, but it doesn't care + * about that and still continues). + * ... Proceeds to do unrelated I/O, e.g. goes into FFI code + * (possible, because the CSP page hasn't been armed yet), which + * does I/O from a C library, completely unrelated to SBCL's + * routines. + * B.. The scheduler gives us time for the interrupter again. + * We call CancelSynchronousIo/CancelIoEx. + * A.. Interruptee gets an expected error in unrelated I/O during FFI. + * Interruptee's C code is unhappy and dies. + * + * Note that CancelSynchronousIo and CancelIoEx have a rather different + * effect here. In the normal (CancelIoEx) case, we only ever kill + * I/O on the file handle in question. I think we could ask users + * to please not both use Lisp streams (unix-read/write) _and_ FFI code + * on the same file handle in quick succession. + * + * CancelSynchronousIo seems more dangerous though. Here we interrupt + * I/O on any other handle, even ones we're not actually responsible for, + * because this functions deals with the thread handle, not the file + * handle. + * + * Options: + * - Use mutexes. Somewhere, somehow. Presumably one mutex per + * target thread, acquired around win32_maybe_interrupt_io and + * io_end_interruptible. (That's one mutex use per I/O + * operation, but I can't imagine that compared to our FFI overhead + * that's much of a problem.) + * - In io_end_interruptible, detect that the flag has been + * invalidated, and in that case, do something clever (what?) to + * wait for the imminent gc_stop_the_world, which implicitly tells + * us that win32_maybe_interrupt_io must have exited. Except if + * some _third_ thread is also beginning to call interrupt-thread + * and wake_thread at the same time...? + * - Revert the whole CancelSynchronousIo business after all. + * - I'm wrong and everything is OK already. + */ + if (ptr_CancelIoEx) { + HANDLE h = (HANDLE) + InterlockedExchangePointer((volatile LPVOID *) + &th->synchronous_io_handle_and_flag, + (LPVOID)INVALID_HANDLE_VALUE); + if (h && (h!=INVALID_HANDLE_VALUE)) { + if (console_handle_p(h)) { + pthread_mutex_lock(&ttyinput.lock); + pthread_cond_broadcast(&ttyinput.cond_has_data); + pthread_mutex_unlock(&ttyinput.lock); + } + if (ptr_CancelSynchronousIo) { + pthread_mutex_lock(&th->os_thread->fiber_lock); + done = ptr_CancelSynchronousIo(th->os_thread->fiber_group->handle); + pthread_mutex_unlock(&th->os_thread->fiber_lock); + } + return (!!done)|(!!ptr_CancelIoEx(h,NULL)); + } + } + return 0; +} + +static const LARGE_INTEGER zero_large_offset = {.QuadPart = 0LL}; + +int +win32_unix_write(FDTYPE fd, void * buf, int count) +{ + HANDLE handle; + DWORD written_bytes; + OVERLAPPED overlapped; + struct thread * self = arch_os_get_current_thread(); + BOOL waitInGOR; + LARGE_INTEGER file_position; + BOOL seekable; + BOOL ok; + + handle =(HANDLE)maybe_get_osfhandle(fd); + if (console_handle_p(handle)) + return win32_write_unicode_console(handle,buf,count); + + overlapped.hEvent = self->private_events.events[0]; + seekable = SetFilePointerEx(handle, + zero_large_offset, + &file_position, + FILE_CURRENT); + if (seekable) { + overlapped.Offset = file_position.LowPart; + overlapped.OffsetHigh = file_position.HighPart; + } else { + overlapped.Offset = 0; + overlapped.OffsetHigh = 0; + } + if (!io_begin_interruptible(handle)) { + errno = EINTR; + return -1; + } + ok = WriteFile(handle, buf, count, &written_bytes, &overlapped); + io_end_interruptible(handle); + + if (ok) { + goto done_something; + } else { + DWORD errorCode = GetLastError(); + if (errorCode==ERROR_OPERATION_ABORTED) { + GetOverlappedResult(handle,&overlapped,&written_bytes,FALSE); + errno = EINTR; + return -1; + } + if (errorCode!=ERROR_IO_PENDING) { + errno = EIO; + return -1; + } else { + if(WaitForMultipleObjects(2,self->private_events.events, + FALSE,INFINITE) != WAIT_OBJECT_0) { + CancelIo(handle); + waitInGOR = TRUE; + } else { + waitInGOR = FALSE; + } + if (!GetOverlappedResult(handle,&overlapped,&written_bytes,waitInGOR)) { + if (GetLastError()==ERROR_OPERATION_ABORTED) { + errno = EINTR; + } else { + errno = EIO; + } + return -1; + } else { + goto done_something; + } + } + } + done_something: + if (seekable) { + file_position.QuadPart += written_bytes; + SetFilePointerEx(handle,file_position,NULL,FILE_BEGIN); + } + return written_bytes; +} + +int +win32_unix_read(FDTYPE fd, void * buf, int count) +{ + HANDLE handle; + OVERLAPPED overlapped = {.Internal=0}; + DWORD read_bytes = 0; + struct thread * self = arch_os_get_current_thread(); + DWORD errorCode = 0; + BOOL waitInGOR = FALSE; + BOOL ok = FALSE; + LARGE_INTEGER file_position; + BOOL seekable; + + handle = (HANDLE)maybe_get_osfhandle(fd); + + if (console_handle_p(handle)) + return win32_read_unicode_console(handle,buf,count); + + overlapped.hEvent = self->private_events.events[0]; + /* If it has a position, we won't try overlapped */ + seekable = SetFilePointerEx(handle, + zero_large_offset, + &file_position, + FILE_CURRENT); + if (seekable) { + overlapped.Offset = file_position.LowPart; + overlapped.OffsetHigh = file_position.HighPart; + } else { + overlapped.Offset = 0; + overlapped.OffsetHigh = 0; + } + if (!io_begin_interruptible(handle)) { + errno = EINTR; + return -1; + } + ok = ReadFile(handle,buf,count,&read_bytes, &overlapped); + io_end_interruptible(handle); + if (ok) { + /* immediately */ + goto done_something; + } else { + errorCode = GetLastError(); + if (errorCode == ERROR_HANDLE_EOF || + errorCode == ERROR_BROKEN_PIPE || + errorCode == ERROR_NETNAME_DELETED) { + read_bytes = 0; + goto done_something; + } + if (errorCode==ERROR_OPERATION_ABORTED) { + GetOverlappedResult(handle,&overlapped,&read_bytes,FALSE); + errno = EINTR; + return -1; + } + if (errorCode!=ERROR_IO_PENDING) { + /* is it some _real_ error? */ + errno = EIO; + return -1; + } else { + int ret; + if( (ret = WaitForMultipleObjects(2,self->private_events.events, + FALSE,INFINITE)) != WAIT_OBJECT_0) { + CancelIo(handle); + waitInGOR = TRUE; + /* Waiting for IO only */ + } else { + waitInGOR = FALSE; + } + ok = GetOverlappedResult(handle,&overlapped,&read_bytes,waitInGOR); + if (!ok) { + errorCode = GetLastError(); + if (errorCode == ERROR_HANDLE_EOF || + errorCode == ERROR_BROKEN_PIPE || + errorCode == ERROR_NETNAME_DELETED) { + read_bytes = 0; + goto done_something; + } else { + if (errorCode == ERROR_OPERATION_ABORTED) + errno = EINTR; /* that's it. */ + else + errno = EIO; /* something unspecific */ + return -1; + } + } else + goto done_something; + } + } + done_something: + if (seekable) { + file_position.QuadPart += read_bytes; + SetFilePointerEx(handle,file_position,NULL,FILE_BEGIN); + } + return read_bytes; +} + /* This is a manually-maintained version of ldso_stubs.S. */ +void __stdcall RtlUnwind(void *, void *, void *, void *); /* I don't have winternl.h */ + void scratch(void) { - strerror(42); - asin(0); - acos(0); - sinh(0); - cosh(0); - hypot(0, 0); - write(0, 0, 0); + LARGE_INTEGER la = {{0}}; + closesocket(0); + CloseHandle(0); + shutdown(0, 0); + SetHandleInformation(0, 0, 0); + GetHandleInformation(0, 0); + getsockopt(0, 0, 0, 0, 0); + FlushConsoleInputBuffer(0); + FormatMessageA(0, 0, 0, 0, 0, 0, 0); + FreeLibrary(0); + GetACP(); + GetConsoleCP(); + GetConsoleOutputCP(); + GetCurrentProcess(); + GetExitCodeProcess(0, 0); + GetLastError(); + GetOEMCP(); + GetProcAddress(0, 0); + GetProcessTimes(0, 0, 0, 0, 0); + GetSystemTimeAsFileTime(0); + LoadLibrary(0); + LocalFree(0); + PeekConsoleInput(0, 0, 0, 0); + PeekNamedPipe(0, 0, 0, 0, 0, 0); + ReadFile(0, 0, 0, 0, 0); + Sleep(0); + WriteFile(0, 0, 0, 0, 0); + _get_osfhandle(0); + _open_osfhandle(0, 0); + _rmdir(0); + _pipe(0,0,0); + access(0,0); close(0); - rename(0,0); - getcwd(0,0); dup(0); - LoadLibrary(0); - GetProcAddress(0, 0); - mkdir(0); + isatty(0); + strerror(42); + write(0, 0, 0); + RtlUnwind(0, 0, 0, 0); + MapViewOfFile(0,0,0,0,0); + UnmapViewOfFile(0); + FlushViewOfFile(0,0); + SetFilePointerEx(0, la, 0, 0); + DuplicateHandle(0, 0, 0, 0, 0, 0, 0); + #ifndef LISP_FEATURE_SB_UNICODE + CreateDirectoryA(0,0); + CreateFileMappingA(0,0,0,0,0,0); + CreateFileA(0,0,0,0,0,0,0); + GetComputerNameA(0, 0); + GetCurrentDirectoryA(0,0); + GetEnvironmentVariableA(0, 0, 0); + GetFileAttributesA(0); + GetVersionExA(0); + MoveFileA(0,0); + SHGetFolderPathA(0, 0, 0, 0, 0); + SetCurrentDirectoryA(0); + SetEnvironmentVariableA(0, 0); + #else + CreateDirectoryW(0,0); + CreateFileMappingW(0,0,0,0,0,0); + CreateFileW(0,0,0,0,0,0,0); + FormatMessageW(0, 0, 0, 0, 0, 0, 0); + GetComputerNameW(0, 0); + GetCurrentDirectoryW(0,0); + GetEnvironmentVariableW(0, 0, 0); + GetFileAttributesW(0); + GetVersionExW(0); + MoveFileW(0,0); + SHGetFolderPathW(0, 0, 0, 0, 0); + SetCurrentDirectoryW(0); + SetEnvironmentVariableW(0, 0); + #endif + _exit(0); +} + +char * +os_get_runtime_executable_path(int external) +{ + char path[MAX_PATH + 1]; + DWORD bufsize = sizeof(path); + DWORD size; + + if ((size = GetModuleFileNameA(NULL, path, bufsize)) == 0) + return NULL; + else if (size == bufsize && GetLastError() == ERROR_INSUFFICIENT_BUFFER) + return NULL; + + return copied_string(path); } +#ifdef LISP_FEATURE_SB_THREAD + +int +win32_wait_object_or_signal(HANDLE waitFor) +{ + struct thread * self = arch_os_get_current_thread(); + HANDLE handles[2]; + handles[0] = waitFor; + handles[1] = self->private_events.events[1]; + return + WaitForMultipleObjects(2,handles, FALSE,INFINITE); +} + +/* + * Portability glue for win32 waitable timers. + * + * One may ask: Why is there a wrapper in C when the calls are so + * obvious that Lisp could do them directly (as it did on Windows)? + * + * But the answer is that on POSIX platforms, we now emulate the win32 + * calls and hide that emulation behind this os_* abstraction. + */ +HANDLE +os_create_wtimer() +{ + return CreateWaitableTimer(0, 0, 0); +} + +int +os_wait_for_wtimer(HANDLE handle) +{ + return win32_wait_object_or_signal(handle); +} + +void +os_close_wtimer(HANDLE handle) +{ + CloseHandle(handle); +} + +void +os_set_wtimer(HANDLE handle, int sec, int nsec) +{ + /* in units of 100ns, i.e. 0.1us. Negative for relative semantics. */ + long long dueTime + = -(((long long) sec) * 10000000 + + ((long long) nsec + 99) / 100); + SetWaitableTimer(handle, (LARGE_INTEGER*) &dueTime, 0, 0, 0, 0); +} + +void +os_cancel_wtimer(HANDLE handle) +{ + CancelWaitableTimer(handle); +} +#endif + /* EOF */