2 * the Win32 incarnation of OS-dependent routines. See also
3 * $(sbcl_arch)-win32-os.c
5 * This file (along with os.h) exports an OS-independent interface to
6 * the operating system VM facilities. Surprise surprise, this
7 * interface looks a lot like the Mach interface (but simpler in some
8 * places). For some operating systems, a subset of these functions
9 * will have to be emulated.
13 * This software is part of the SBCL system. See the README file for
16 * This software is derived from the CMU CL system, which was
17 * written at Carnegie Mellon University and released into the
18 * public domain. The software is in the public domain and is
19 * provided with absolutely no warranty. See the COPYING and CREDITS
20 * files for more information.
24 * This file was copied from the Linux version of the same, and
25 * likely still has some linuxisms in it have haven't been elimiated
32 #include <sys/param.h>
40 #include "interrupt.h"
45 #include "genesis/primitive-objects.h"
48 #include <sys/types.h>
62 #ifndef LISP_FEATURE_SB_THREAD
63 /* dummy definition to reduce ifdef clutter */
64 #define WITH_GC_AT_SAFEPOINTS_ONLY() if (0) ; else
67 os_vm_size_t os_vm_page_size;
70 #include "gencgc-internal.h"
74 int linux_sparc_siginfo_bug = 0;
75 int linux_supports_futex=0;
81 /* missing definitions for modern mingws */
83 #define EH_UNWINDING 0x02
85 #ifndef EH_EXIT_UNWIND
86 #define EH_EXIT_UNWIND 0x04
89 /* Tired of writing arch_os_get_current_thread each time. */
90 #define this_thread (arch_os_get_current_thread())
92 /* wrappers for winapi calls that must be successful (like SBCL's
93 * (aver ...) form). */
95 /* win_aver function: basic building block for miscellaneous
96 * ..AVER.. macrology (below) */
98 /* To do: These routines used to be "customizable" with dyndebug_init()
99 * and variables like dyndebug_survive_aver, dyndebug_skip_averlax based
100 * on environment variables. Those features got lost on the way, but
101 * ought to be reintroduced. */
104 intptr_t win_aver(intptr_t value, char* comment, char* file, int line,
108 LPSTR errorMessage = "<FormatMessage failed>";
109 DWORD errorCode = GetLastError(), allocated=0;
110 int posixerrno = errno;
111 const char* posixstrerror = strerror(errno);
112 char* report_template =
113 "Expression unexpectedly false: %s:%d\n"
115 " ===> returned #X%p, \n"
117 " ... Win32 thinks:\n"
118 " ===> code %u, message => %s\n"
120 " ===> code %u, message => %s\n";
123 FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER|
124 FORMAT_MESSAGE_FROM_SYSTEM,
127 MAKELANGID(LANG_ENGLISH,SUBLANG_ENGLISH_US),
128 (LPSTR)&errorMessage,
133 fprintf(stderr, report_template,
137 (unsigned)errorCode, errorMessage,
138 posixerrno, posixstrerror);
140 lose(report_template,
144 (unsigned)errorCode, errorMessage,
145 posixerrno, posixstrerror);
148 LocalFree(errorMessage);
153 /* sys_aver function: really tiny adaptor of win_aver for
154 * "POSIX-parody" CRT results ("lowio" and similar stuff):
155 * negative number means something... negative. */
157 intptr_t sys_aver(long value, char* comment, char* file, int line,
160 win_aver((intptr_t)(value>=0),comment,file,line,justwarn);
164 /* Check for (call) result being boolean true. (call) may be arbitrary
165 * expression now; massive attack of gccisms ensures transparent type
166 * conversion back and forth, so the type of AVER(expression) is the
167 * type of expression. Value is the same _if_ it can be losslessly
168 * converted to (void*) and back.
170 * Failed AVER() is normally fatal. Well, unless dyndebug_survive_aver
174 ({ __typeof__(call) __attribute__((unused)) me = \
176 win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 0); \
179 /* AVERLAX(call): do the same check as AVER did, but be mild on
180 * failure: print an annoying unrequested message to stderr, and
181 * continue. With dyndebug_skip_averlax flag, AVERLAX stop even to
182 * check and complain. */
184 #define AVERLAX(call) \
185 ({ __typeof__(call) __attribute__((unused)) me = \
187 win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 1); \
190 /* Now, when failed AVER... prints both errno and GetLastError(), two
191 * variants of "POSIX/lowio" style checks below are almost useless
192 * (they build on sys_aver like the two above do on win_aver). */
194 #define CRT_AVER_NONNEGATIVE(call) \
195 ({ __typeof__(call) __attribute__((unused)) me = \
197 sys_aver((call), #call, __FILE__, __LINE__, 0); \
200 #define CRT_AVERLAX_NONNEGATIVE(call) \
201 ({ __typeof__(call) __attribute__((unused)) me = \
203 sys_aver((call), #call, __FILE__, __LINE__, 1); \
207 #define CRT_AVER(booly) \
208 ({ __typeof__(booly) __attribute__((unused)) me = (booly); \
209 sys_aver((booly)?0:-1, #booly, __FILE__, __LINE__, 0); \
212 const char * t_nil_s(lispobj symbol);
215 * The following signal-mask-related alien routines are called from Lisp:
218 /* As of win32, deferrables _do_ matter. gc_signal doesn't. */
219 unsigned long block_deferrables_and_return_mask()
222 block_deferrable_signals(0, &sset);
223 return (unsigned long)sset;
226 #if defined(LISP_FEATURE_SB_THREAD)
227 void apply_sigmask(unsigned long sigmask)
229 sigset_t sset = (sigset_t)sigmask;
230 pthread_sigmask(SIG_SETMASK, &sset, 0);
234 /* The exception handling function looks like this: */
235 EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *,
236 struct lisp_exception_frame *,
239 /* handle_exception is defined further in this file, but since SBCL
240 * 1.0.1.24, it doesn't get registered as SEH handler directly anymore,
241 * not even by wos_install_interrupt_handlers. Instead, x86-assem.S
242 * provides exception_handler_wrapper; we install it here, and each
243 * exception frame on nested funcall()s also points to it.
247 void *base_seh_frame;
249 static void *get_seh_frame(void)
252 #ifdef LISP_FEATURE_X86
253 asm volatile ("mov %%fs:0,%0": "=r" (retval));
255 asm volatile ("mov %%gs:0,%0": "=r" (retval));
260 static void set_seh_frame(void *frame)
262 #ifdef LISP_FEATURE_X86
263 asm volatile ("mov %0,%%fs:0": : "r" (frame));
265 asm volatile ("mov %0,%%gs:0": : "r" (frame));
269 #if defined(LISP_FEATURE_SB_THREAD)
271 /* Permit loads from GC_SAFEPOINT_PAGE_ADDR (NB page state change is
272 * "synchronized" with the memory region content/availability --
273 * e.g. you won't see other CPU flushing buffered writes after WP --
274 * but there is some window when other thread _seem_ to trap AFTER
275 * access is granted. You may think of it something like "OS enters
276 * SEH handler too slowly" -- what's important is there's no implicit
277 * synchronization between VirtualProtect caller and other thread's
278 * SEH handler, hence no ordering of events. VirtualProtect is
279 * implicitly synchronized with protected memory contents (only).
281 * The last fact may be potentially used with many benefits e.g. for
282 * foreign call speed, but we don't use it for now: almost the only
283 * fact relevant to the current signalling protocol is "sooner or
284 * later everyone will trap [everyone will stop trapping]".
286 * An interesting source on page-protection-based inter-thread
287 * communication is a well-known paper by Dave Dice, Hui Huang,
288 * Mingyao Yang: ``Asymmetric Dekker Synchronization''. Last time
289 * I checked it was available at
290 * http://home.comcast.net/~pjbishop/Dave/Asymmetric-Dekker-Synchronization.txt
295 AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj),
296 PAGE_READWRITE, &oldProt));
302 AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj),
303 PAGE_NOACCESS, &oldProt));
308 #if defined(LISP_FEATURE_SB_THREAD)
309 /* We want to get a slot in TIB that (1) is available at constant
310 offset, (2) is our private property, so libraries wouldn't legally
311 override it, (3) contains something predefined for threads created
314 Low 64 TLS slots are adressable directly, starting with
315 FS:[#xE10]. When SBCL runtime is initialized, some of the low slots
316 may be already in use by its prerequisite DLLs, as DllMain()s and
317 TLS callbacks have been called already. But slot 63 is unlikely to
318 be reached at this point: one slot per DLL that needs it is the
319 common practice, and many system DLLs use predefined TIB-based
320 areas outside conventional TLS storage and don't need TLS slots.
321 With our current dependencies, even slot 2 is observed to be free
322 (as of WinXP and wine).
324 Now we'll call TlsAlloc() repeatedly until slot 63 is officially
325 assigned to us, then TlsFree() all other slots for normal use. TLS
326 slot 63, alias FS:[#.(+ #xE10 (* 4 63))], now belongs to us.
328 To summarize, let's list the assumptions we make:
330 - TIB, which is FS segment base, contains first 64 TLS slots at the
331 offset #xE10 (i.e. TIB layout compatibility);
332 - TLS slots are allocated from lower to higher ones;
333 - All libraries together with CRT startup have not requested 64
336 All these assumptions together don't seem to be less warranted than
337 the availability of TIB arbitrary data slot for our use. There are
338 some more reasons to prefer slot 63 over TIB arbitrary data: (1) if
339 our assumptions for slot 63 are violated, it will be detected at
340 startup instead of causing some system-specific unreproducible
341 problems afterwards, depending on OS and loaded foreign libraries;
342 (2) if getting slot 63 reliably with our current approach will
343 become impossible for some future Windows version, we can add TLS
344 callback directory to SBCL binary; main image TLS callback is
345 started before _any_ TLS slot is allocated by libraries, and
346 some C compiler vendors rely on this fact. */
350 #ifdef LISP_FEATURE_X86
351 DWORD slots[TLS_MINIMUM_AVAILABLE];
354 for (i=0; i<TLS_MINIMUM_AVAILABLE; ++i) {
356 if (key == OUR_TLS_INDEX) {
357 if (TlsGetValue(key)!=NULL)
358 lose("TLS slot assertion failed: fresh slot value is not NULL");
359 TlsSetValue(OUR_TLS_INDEX, (void*)(intptr_t)0xFEEDBAC4);
360 if ((intptr_t)(void*)arch_os_get_current_thread()!=(intptr_t)0xFEEDBAC4)
361 lose("TLS slot assertion failed: TIB layout change detected");
362 TlsSetValue(OUR_TLS_INDEX, NULL);
365 slots[n_slots++]=key;
367 for (i=0; i<n_slots; ++i) {
370 if (key!=OUR_TLS_INDEX) {
371 lose("TLS slot assertion failed: slot 63 is unavailable "
372 "(last TlsAlloc() returned %u)",key);
376 #endif /* LISP_FEATURE_SB_THREAD */
378 int os_number_of_processors = 1;
380 void os_init(char *argv[], char *envp[])
382 SYSTEM_INFO system_info;
383 GetSystemInfo(&system_info);
384 os_vm_page_size = system_info.dwPageSize > BACKEND_PAGE_BYTES?
385 system_info.dwPageSize : BACKEND_PAGE_BYTES;
386 #if defined(LISP_FEATURE_X86)
387 fast_bzero_pointer = fast_bzero_detect;
389 os_number_of_processors = system_info.dwNumberOfProcessors;
391 base_seh_frame = get_seh_frame();
394 static inline boolean local_thread_stack_address_p(os_vm_address_t address)
396 return this_thread &&
397 (((((u64)address >= (u64)this_thread->os_address) &&
398 ((u64)address < ((u64)this_thread)-THREAD_CSP_PAGE_SIZE))||
399 (((u64)address >= (u64)this_thread->control_stack_start)&&
400 ((u64)address < (u64)this_thread->control_stack_end))));
404 * So we have three fun scenarios here.
406 * First, we could be being called to reserve the memory areas
407 * during initialization (prior to loading the core file).
409 * Second, we could be being called by the GC to commit a page
410 * that has just been decommitted (for easy zero-fill).
412 * Third, we could be being called by create_thread_struct()
413 * in order to create the sundry and various stacks.
415 * The third case is easy to pick out because it passes an
418 * The second case is easy to pick out because it will be for
419 * a range of memory that is MEM_RESERVE rather than MEM_FREE.
421 * The second case is also an easy implement, because we leave
422 * the memory as reserved (since we do lazy commits).
426 os_validate(os_vm_address_t addr, os_vm_size_t len)
428 MEMORY_BASIC_INFORMATION mem_info;
431 /* the simple case first */
433 AVERLAX(VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE));
436 if (!AVERLAX(VirtualQuery(addr, &mem_info, sizeof mem_info)))
439 if ((mem_info.State == MEM_RESERVE) && (mem_info.RegionSize >=len)) {
440 /* It would be correct to return here. However, support for Wine
441 * is beneficial, and Wine has a strange behavior in this
442 * department. It reports all memory below KERNEL32.DLL as
443 * reserved, but disallows MEM_COMMIT.
445 * Let's work around it: reserve the region we need for a second
446 * time. The second reservation is documented to fail on normal NT
447 * family, but it will succeed on Wine if this region is
450 VirtualAlloc(addr, len, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
451 /* If it is wine, the second call has succeded, and now the region
452 * is really reserved. */
456 if (mem_info.State == MEM_RESERVE) {
457 fprintf(stderr, "validation of reserved space too short.\n");
459 /* Oddly, we do not treat this assertion as fatal; hence also the
460 * provision for MEM_RESERVE in the following code, I suppose: */
463 if (!AVERLAX(VirtualAlloc(addr, len, (mem_info.State == MEM_RESERVE)?
464 MEM_COMMIT: MEM_RESERVE, PAGE_EXECUTE_READWRITE)))
471 * For os_invalidate(), we merely decommit the memory rather than
472 * freeing the address space. This loses when freeing per-thread
473 * data and related memory since it leaks address space.
475 * So far the original comment (author unknown). It used to continue as
478 * It's not too lossy, however, since the two scenarios I'm aware of
479 * are fd-stream buffers, which are pooled rather than torched, and
480 * thread information, which I hope to pool (since windows creates
481 * threads at its own whim, and we probably want to be able to have
482 * them callback without funky magic on the part of the user, and
483 * full-on thread allocation is fairly heavyweight).
485 * But: As it turns out, we are no longer content with decommitting
486 * without freeing, and have now grown a second function
487 * os_invalidate_free(), sort of a really_os_invalidate().
489 * As discussed on #lisp, this is not a satisfactory solution, and probably
490 * ought to be rectified in the following way:
492 * - Any cases currently going through the non-freeing version of
493 * os_invalidate() are ultimately meant for zero-filling applications.
494 * Replace those use cases with an os_revalidate_bzero() or similarly
495 * named function, which explicitly takes care of that aspect of
498 * - The remaining uses of os_invalidate should actually free, and once
499 * the above is implemented, we can rename os_invalidate_free back to
500 * just os_invalidate().
502 * So far the new plan, as yet unimplemented. -- DFL
506 os_invalidate(os_vm_address_t addr, os_vm_size_t len)
508 AVERLAX(VirtualFree(addr, len, MEM_DECOMMIT));
512 os_invalidate_free(os_vm_address_t addr, os_vm_size_t len)
514 AVERLAX(VirtualFree(addr, 0, MEM_RELEASE));
518 os_invalidate_free_by_any_address(os_vm_address_t addr, os_vm_size_t len)
520 MEMORY_BASIC_INFORMATION minfo;
521 AVERLAX(VirtualQuery(addr, &minfo, sizeof minfo));
522 AVERLAX(minfo.AllocationBase);
523 AVERLAX(VirtualFree(minfo.AllocationBase, 0, MEM_RELEASE));
527 * os_map() is called to map a chunk of the core file into memory.
529 * Unfortunately, Windows semantics completely screws this up, so
530 * we just add backing store from the swapfile to where the chunk
531 * goes and read it up like a normal file. We could consider using
532 * a lazy read (demand page) setup, but that would mean keeping an
533 * open file pointer for the core indefinately (and be one more
534 * thing to maintain).
538 os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
542 AVER(VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE)||
543 VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT,
544 PAGE_EXECUTE_READWRITE));
546 CRT_AVER_NONNEGATIVE(lseek(fd, offset, SEEK_SET));
548 count = read(fd, addr, len);
549 CRT_AVER( count == len );
554 static DWORD os_protect_modes[8] = {
561 PAGE_EXECUTE_READWRITE,
562 PAGE_EXECUTE_READWRITE,
566 os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
570 DWORD new_prot = os_protect_modes[prot];
571 AVER(VirtualProtect(address, length, new_prot, &old_prot)||
572 (VirtualAlloc(address, length, MEM_COMMIT, new_prot) &&
573 VirtualProtect(address, length, new_prot, &old_prot)));
574 odxprint(misc,"Protecting %p + %p vmaccess %d "
575 "newprot %08x oldprot %08x",
576 address,length,prot,new_prot,old_prot);
579 /* FIXME: Now that FOO_END, rather than FOO_SIZE, is the fundamental
580 * description of a space, we could probably punt this and just do
581 * (FOO_START <= x && x < FOO_END) everywhere it's called. */
583 in_range_p(os_vm_address_t a, lispobj sbeg, size_t slen)
585 char* beg = (char*)((uword_t)sbeg);
586 char* end = (char*)((uword_t)sbeg) + slen;
587 char* adr = (char*)a;
588 return (adr >= beg && adr < end);
592 is_linkage_table_addr(os_vm_address_t addr)
594 return in_range_p(addr, LINKAGE_TABLE_SPACE_START, LINKAGE_TABLE_SPACE_END);
597 static boolean is_some_thread_local_addr(os_vm_address_t addr);
600 is_valid_lisp_addr(os_vm_address_t addr)
602 if(in_range_p(addr, READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE) ||
603 in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) ||
604 in_range_p(addr, DYNAMIC_SPACE_START , dynamic_space_size) ||
605 is_some_thread_local_addr(addr))
610 /* test if an address is within thread-local space */
612 is_thread_local_addr(struct thread* th, os_vm_address_t addr)
614 /* Assuming that this is correct, it would warrant further comment,
615 * I think. Based on what our call site is doing, we have been
616 * tasked to check for the address of a lisp object; not merely any
617 * foreign address within the thread's area. Indeed, this used to
618 * be a check for control and binding stack only, rather than the
619 * full thread "struct". So shouldn't the THREAD_STRUCT_SIZE rather
620 * be (thread_control_stack_size+BINDING_STACK_SIZE) instead? That
621 * would also do away with the LISP_FEATURE_SB_THREAD case. Or does
622 * it simply not matter? --DFL */
623 ptrdiff_t diff = ((char*)th->os_address)-(char*)addr;
624 return diff > (ptrdiff_t)0 && diff < (ptrdiff_t)THREAD_STRUCT_SIZE
625 #ifdef LISP_FEATURE_SB_THREAD
626 && addr != (os_vm_address_t) th->csp_around_foreign_call
632 is_some_thread_local_addr(os_vm_address_t addr)
635 #ifdef LISP_FEATURE_SB_THREAD
637 pthread_mutex_lock(&all_threads_lock);
638 for_each_thread(th) {
639 if(is_thread_local_addr(th,addr)) {
644 pthread_mutex_unlock(&all_threads_lock);
650 /* A tiny bit of interrupt.c state we want our paws on. */
651 extern boolean internal_errors_enabled;
653 extern void exception_handler_wrapper();
656 c_level_backtrace(const char* header, int depth)
662 for (lastseh = get_seh_frame(); lastseh && (lastseh!=(void*)-1);
665 fprintf(stderr, "Backtrace: %s (thread %p)\n", header, this_thread);
666 for (frame = __builtin_frame_address(0); frame; frame=*(void**)frame)
670 fprintf(stderr, "[#%02d]: ebp = 0x%p, ret = 0x%p\n",n,
671 frame, ((void**)frame)[1]);
675 #ifdef LISP_FEATURE_X86
676 #define voidreg(ctxptr,name) ((void*)((ctxptr)->E##name))
678 #define voidreg(ctxptr,name) ((void*)((ctxptr)->R##name))
682 #if defined(LISP_FEATURE_X86)
684 handle_single_step(os_context_t *ctx)
686 if (!single_stepping)
689 /* We are doing a displaced instruction. At least function
690 * end breakpoints use this. */
691 WITH_GC_AT_SAFEPOINTS_ONLY () /* Todo: Is it really gc-unsafe? */
692 restore_breakpoint_from_single_step(ctx);
698 #ifdef LISP_FEATURE_UD2_BREAKPOINTS
699 #define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_ILLEGAL_INSTRUCTION
700 #define TRAP_CODE_WIDTH 2
702 #define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_BREAKPOINT
703 #define TRAP_CODE_WIDTH 1
707 handle_breakpoint_trap(os_context_t *ctx, struct thread* self)
709 #ifdef LISP_FEATURE_UD2_BREAKPOINTS
710 if (((unsigned short *)((ctx->win32_context)->Eip))[0] != 0x0b0f)
714 /* Unlike some other operating systems, Win32 leaves EIP
715 * pointing to the breakpoint instruction. */
716 (*os_context_pc_addr(ctx)) += TRAP_CODE_WIDTH;
718 /* Now EIP points just after the INT3 byte and aims at the
719 * 'kind' value (eg trap_Cerror). */
720 unsigned trap = *(unsigned char *)(*os_context_pc_addr(ctx));
722 #ifdef LISP_FEATURE_SB_THREAD
723 /* Before any other trap handler: gc_safepoint ensures that
724 inner alloc_sap for passing the context won't trap on
726 if (trap == trap_PendingInterrupt) {
727 /* Done everything needed for this trap, except EIP
729 arch_skip_instruction(ctx);
730 thread_interrupted(ctx);
735 /* This is just for info in case the monitor wants to print an
737 access_control_stack_pointer(self) =
738 (lispobj *)*os_context_sp_addr(ctx);
740 WITH_GC_AT_SAFEPOINTS_ONLY() {
741 #if defined(LISP_FEATURE_SB_THREAD)
742 block_blockable_signals(0,&ctx->sigmask);
744 handle_trap(ctx, trap);
745 #if defined(LISP_FEATURE_SB_THREAD)
746 thread_sigmask(SIG_SETMASK,&ctx->sigmask,NULL);
750 /* Done, we're good to go! */
755 handle_access_violation(os_context_t *ctx,
756 EXCEPTION_RECORD *exception_record,
760 CONTEXT *win32_context = ctx->win32_context;
762 #if defined(LISP_FEATURE_X86)
764 "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, "
765 "Addr %p Access %d\n",
772 exception_record->ExceptionInformation[0]);
775 "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, "
776 "Addr %p Access %d\n",
783 exception_record->ExceptionInformation[0]);
786 /* Stack: This case takes care of our various stack exhaustion
787 * protect pages (with the notable exception of the control stack!). */
788 if (self && local_thread_stack_address_p(fault_address)) {
789 if (handle_guard_page_triggered(ctx, fault_address))
790 return 0; /* gc safety? */
794 /* Safepoint pages */
795 #ifdef LISP_FEATURE_SB_THREAD
796 if (fault_address == (void *) GC_SAFEPOINT_PAGE_ADDR) {
797 thread_in_lisp_raised(ctx);
801 if ((((u64)fault_address) == ((u64)self->csp_around_foreign_call))){
802 thread_in_safety_transition(ctx);
808 page_index_t index = find_page_index(fault_address);
811 * Now, if the page is supposedly write-protected and this
812 * is a write, tell the gc that it's been hit.
814 if (page_table[index].write_protected) {
815 gencgc_handle_wp_violation(fault_address);
817 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
819 MEM_COMMIT, PAGE_EXECUTE_READWRITE));
824 if (fault_address == undefined_alien_address)
827 /* linkage table or a "valid_lisp_addr" outside of dynamic space (?) */
828 if (is_linkage_table_addr(fault_address)
829 || is_valid_lisp_addr(fault_address))
835 /* First use of a new page, lets get some memory for it. */
837 #if defined(LISP_FEATURE_X86)
838 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
840 MEM_COMMIT, PAGE_EXECUTE_READWRITE)
841 ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%08lx\n",
842 fault_address, win32_context->Eip) &&
843 (c_level_backtrace("BT",5),
844 fake_foreign_function_call(ctx),
845 lose("Lispy backtrace"),
848 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
850 MEM_COMMIT, PAGE_EXECUTE_READWRITE)
851 ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%p\n",
852 fault_address, (void*)win32_context->Rip) &&
853 (c_level_backtrace("BT",5),
854 fake_foreign_function_call(ctx),
855 lose("Lispy backtrace"),
863 signal_internal_error_or_lose(os_context_t *ctx,
864 EXCEPTION_RECORD *exception_record,
868 * If we fall through to here then we need to either forward
869 * the exception to the lisp-side exception handler if it's
870 * set up, or drop to LDB.
873 if (internal_errors_enabled) {
875 lispobj exception_record_sap;
878 /* We're making the somewhat arbitrary decision that having
879 * internal errors enabled means that lisp has sufficient
880 * marbles to be able to handle exceptions, but exceptions
881 * aren't supposed to happen during cold init or reinit
884 #if defined(LISP_FEATURE_SB_THREAD)
885 block_blockable_signals(0,&ctx->sigmask);
887 fake_foreign_function_call(ctx);
889 WITH_GC_AT_SAFEPOINTS_ONLY() {
890 /* Allocate the SAP objects while the "interrupts" are still
892 context_sap = alloc_sap(ctx);
893 exception_record_sap = alloc_sap(exception_record);
894 #if defined(LISP_FEATURE_SB_THREAD)
895 thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL);
898 /* The exception system doesn't automatically clear pending
899 * exceptions, so we lose as soon as we execute any FP
900 * instruction unless we do this first. */
901 /* Call into lisp to handle things. */
902 funcall2(StaticSymbolFunction(HANDLE_WIN32_EXCEPTION),
904 exception_record_sap);
906 /* If Lisp doesn't nlx, we need to put things back. */
907 undo_fake_foreign_function_call(ctx);
908 #if defined(LISP_FEATURE_SB_THREAD)
909 thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL);
911 /* FIXME: HANDLE-WIN32-EXCEPTION should be allowed to decline */
915 fprintf(stderr, "Exception Code: 0x%p.\n",
916 (void*)(intptr_t)exception_record->ExceptionCode);
917 fprintf(stderr, "Faulting IP: 0x%p.\n",
918 (void*)(intptr_t)exception_record->ExceptionAddress);
919 if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
920 MEMORY_BASIC_INFORMATION mem_info;
922 if (VirtualQuery(fault_address, &mem_info, sizeof mem_info)) {
923 fprintf(stderr, "page status: 0x%lx.\n", mem_info.State);
926 fprintf(stderr, "Was writing: %p, where: 0x%p.\n",
927 (void*)exception_record->ExceptionInformation[0],
933 fake_foreign_function_call(ctx);
934 lose("Exception too early in cold init, cannot continue.");
938 * A good explanation of the exception handling semantics is
939 * http://win32assembly.online.fr/Exceptionhandling.html (possibly defunct)
941 * http://www.microsoft.com/msj/0197/exception/exception.aspx
944 EXCEPTION_DISPOSITION
945 handle_exception(EXCEPTION_RECORD *exception_record,
946 struct lisp_exception_frame *exception_frame,
947 CONTEXT *win32_context,
948 void *dispatcher_context)
951 /* Not certain why this should be possible, but let's be safe... */
952 return ExceptionContinueSearch;
954 if (exception_record->ExceptionFlags & (EH_UNWINDING | EH_EXIT_UNWIND)) {
955 /* If we're being unwound, be graceful about it. */
957 /* Undo any dynamic bindings. */
958 unbind_to_here(exception_frame->bindstack_pointer,
959 arch_os_get_current_thread());
960 return ExceptionContinueSearch;
963 DWORD lastError = GetLastError();
964 DWORD lastErrno = errno;
965 DWORD code = exception_record->ExceptionCode;
966 struct thread* self = arch_os_get_current_thread();
968 os_context_t context, *ctx = &context;
969 context.win32_context = win32_context;
970 #if defined(LISP_FEATURE_SB_THREAD)
971 context.sigmask = self ? self->os_thread->blocked_signal_set : 0;
974 /* For EXCEPTION_ACCESS_VIOLATION only. */
975 void *fault_address = (void *)exception_record->ExceptionInformation[1];
978 "SEH: rec %p, ctxptr %p, rip %p, fault %p\n"
979 "... code %p, rcx %p, fp-tags %p\n\n",
982 voidreg(win32_context,ip),
984 (void*)(intptr_t)code,
985 voidreg(win32_context,cx),
986 win32_context->FloatSave.TagWord);
988 /* This function had become unwieldy. Let's cut it down into
989 * pieces based on the different exception codes. Each exception
990 * code handler gets the chance to decline by returning non-zero if it
995 case EXCEPTION_ACCESS_VIOLATION:
996 rc = handle_access_violation(
997 ctx, exception_record, fault_address, self);
1000 case SBCL_EXCEPTION_BREAKPOINT:
1001 rc = handle_breakpoint_trap(ctx, self);
1004 #if defined(LISP_FEATURE_X86)
1005 case EXCEPTION_SINGLE_STEP:
1006 rc = handle_single_step(ctx);
1015 /* All else failed, drop through to the lisp-side exception handler. */
1016 signal_internal_error_or_lose(ctx, exception_record, fault_address);
1019 SetLastError(lastError);
1020 return ExceptionContinueExecution;
1024 wos_install_interrupt_handlers(struct lisp_exception_frame *handler)
1026 #ifdef LISP_FEATURE_X86
1027 handler->next_frame = get_seh_frame();
1028 handler->handler = (void*)exception_handler_wrapper;
1029 set_seh_frame(handler);
1031 static int once = 0;
1033 AddVectoredExceptionHandler(1,veh);
1038 * The stubs below are replacements for the windows versions,
1039 * which can -fail- when used in our memory spaces because they
1040 * validate the memory spaces they are passed in a way that
1041 * denies our exception handler a chance to run.
1044 void *memmove(void *dest, const void *src, size_t n)
1048 for (i = 0; i < n; i++) *(((char *)dest)+i) = *(((char *)src)+i);
1050 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
1055 void *memcpy(void *dest, const void *src, size_t n)
1057 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
1061 char *dirname(char *path)
1063 static char buf[PATH_MAX + 1];
1064 size_t pathlen = strlen(path);
1067 if (pathlen >= sizeof(buf)) {
1068 lose("Pathname too long in dirname.\n");
1073 for (i = pathlen; i >= 0; --i) {
1074 if (buf[i] == '/' || buf[i] == '\\') {
1083 /* This is a manually-maintained version of ldso_stubs.S. */
1085 void __stdcall RtlUnwind(void *, void *, void *, void *); /* I don't have winternl.h */
1090 FlushConsoleInputBuffer(0);
1091 FormatMessageA(0, 0, 0, 0, 0, 0, 0);
1095 GetConsoleOutputCP();
1096 GetCurrentProcess();
1097 GetExitCodeProcess(0, 0);
1100 GetProcAddress(0, 0);
1101 GetProcessTimes(0, 0, 0, 0, 0);
1102 GetSystemTimeAsFileTime(0);
1105 PeekConsoleInput(0, 0, 0, 0);
1106 PeekNamedPipe(0, 0, 0, 0, 0, 0);
1107 ReadFile(0, 0, 0, 0, 0);
1109 WriteFile(0, 0, 0, 0, 0);
1119 RtlUnwind(0, 0, 0, 0);
1120 MapViewOfFile(0,0,0,0,0);
1122 FlushViewOfFile(0,0);
1123 #ifndef LISP_FEATURE_SB_UNICODE
1124 CreateDirectoryA(0,0);
1125 CreateFileMappingA(0,0,0,0,0,0);
1126 CreateFileA(0,0,0,0,0,0,0);
1127 GetComputerNameA(0, 0);
1128 GetCurrentDirectoryA(0,0);
1129 GetEnvironmentVariableA(0, 0, 0);
1130 GetFileAttributesA(0);
1133 SHGetFolderPathA(0, 0, 0, 0, 0);
1134 SetCurrentDirectoryA(0);
1135 SetEnvironmentVariableA(0, 0);
1137 CreateDirectoryW(0,0);
1138 CreateFileMappingW(0,0,0,0,0,0);
1139 CreateFileW(0,0,0,0,0,0,0);
1140 FormatMessageW(0, 0, 0, 0, 0, 0, 0);
1141 GetComputerNameW(0, 0);
1142 GetCurrentDirectoryW(0,0);
1143 GetEnvironmentVariableW(0, 0, 0);
1144 GetFileAttributesW(0);
1147 SHGetFolderPathW(0, 0, 0, 0, 0);
1148 SetCurrentDirectoryW(0);
1149 SetEnvironmentVariableW(0, 0);
1155 os_get_runtime_executable_path(int external)
1157 char path[MAX_PATH + 1];
1158 DWORD bufsize = sizeof(path);
1161 if ((size = GetModuleFileNameA(NULL, path, bufsize)) == 0)
1163 else if (size == bufsize && GetLastError() == ERROR_INSUFFICIENT_BUFFER)
1166 return copied_string(path);
1169 #ifdef LISP_FEATURE_SB_THREAD
1172 win32_wait_object_or_signal(HANDLE waitFor)
1174 struct thread * self = arch_os_get_current_thread();
1176 handles[0] = waitFor;
1177 handles[1] = self->private_events.events[1];
1179 WaitForMultipleObjects(2,handles, FALSE,INFINITE);
1183 * Portability glue for win32 waitable timers.
1185 * One may ask: Why is there a wrapper in C when the calls are so
1186 * obvious that Lisp could do them directly (as it did on Windows)?
1188 * But the answer is that on POSIX platforms, we now emulate the win32
1189 * calls and hide that emulation behind this os_* abstraction.
1194 return CreateWaitableTimer(0, 0, 0);
1198 os_wait_for_wtimer(HANDLE handle)
1200 return win32_wait_object_or_signal(handle);
1204 os_close_wtimer(HANDLE handle)
1206 CloseHandle(handle);
1210 os_set_wtimer(HANDLE handle, int sec, int nsec)
1212 /* in units of 100ns, i.e. 0.1us. Negative for relative semantics. */
1214 = -(((long long) sec) * 10000000
1215 + ((long long) nsec + 99) / 100);
1216 SetWaitableTimer(handle, (LARGE_INTEGER*) &dueTime, 0, 0, 0, 0);
1220 os_cancel_wtimer(HANDLE handle)
1222 CancelWaitableTimer(handle);