2 * very-low-level utilities for runtime support
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
16 #define LANGUAGE_ASSEMBLY
19 #include "genesis/closure.h"
20 #include "genesis/funcallable-instance.h"
21 #include "genesis/fdefn.h"
22 #include "genesis/static-symbols.h"
23 #include "genesis/symbol.h"
24 #include "genesis/thread.h"
26 /* Minimize conditionalization for different OS naming schemes.
28 * (As of sbcl-0.8.10, this seems no longer to be much of an issue,
29 * since everyone has converged on ELF. If this generality really
30 * turns out not to matter, perhaps it's just clutter we could get
31 * rid of? -- WHN 2004-04-18)
33 * (Except Win32, which is unlikely ever to be ELF, sorry. -- AB 2005-12-08)
35 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
36 #define GNAME(var) var
38 #define GNAME(var) _##var
41 /* Get the right type of alignment. Linux, FreeBSD and NetBSD (but not OpenBSD)
42 * want alignment in bytes.
44 * (As in the GNAME() definitions above, as of sbcl-0.8.10, this seems
45 * no longer to be much of an issue, since everyone has converged on
46 * the same value. If this generality really turns out not to
47 * matter any more, perhaps it's just clutter we could get
48 * rid of? -- WHN 2004-04-18)
50 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__sun) || defined(LISP_FEATURE_WIN32)
53 #define align_16byte 16
57 #define align_16byte 4
61 * The assembler used for win32 doesn't like .type or .size directives,
62 * so we want to conditionally kill them out. So let's wrap them in macros
63 * that are defined to be no-ops on win32. Hopefully this still works on
66 #if !defined(LISP_FEATURE_WIN32) && !defined(LISP_FEATURE_DARWIN)
67 #define TYPE(name) .type name,@function
68 #define SIZE(name) .size name,.-name
75 * x86/darwin (as of MacOS X 10.4.5) doesn't reliably file signal
76 * handlers (SIGTRAP or Mach exception handlers) for 0xCC, wo we have
77 * to use ud2 instead. ud2 is an undefined opcode, #x0b0f, or
78 * 0F 0B in low-endian notation, that causes SIGILL to fire. We check
79 * for this instruction in the SIGILL handler and if we see it, we
80 * advance the EIP by two bytes to skip over ud2 instruction and
81 * call sigtrap_handler. */
82 #if defined(LISP_FEATURE_UD2_BREAKPOINTS)
91 .globl GNAME(all_threads)
94 * A call to call_into_c preserves esi, edi, and ebp.
95 * (The C function will preserve ebx, esi, edi, and ebp across its
96 * function call, but we trash ebx ourselves by using it to save the
97 * return Lisp address.)
99 * Return values are in eax and maybe edx for quads, or st(0) for
102 * This should work for Lisp calls C calls Lisp calls C..
104 * FIXME & OAOOM: This duplicates call-out in src/compiler/x86/c-call.lisp,
105 * so if you tweak this, change that too!
108 .align align_16byte,0x90
109 .globl GNAME(call_into_c)
110 TYPE(GNAME(call_into_c))
112 /* Save the return Lisp address in ebx. */
115 /* Setup the NPX for C */
125 call *%eax # normal callout using Lisp stack
126 movl %eax,%ecx # remember integer return value
128 /* Check for a return FP value. */
135 /* The return value is in eax, or eax,edx? */
136 /* Set up the NPX stack for Lisp. */
137 fldz # Ensure no regs are empty.
146 /* Restore the return value. */
147 movl %ecx,%eax # maybe return value
153 /* The return result is in st(0). */
154 /* Set up the NPX stack for Lisp, placing the result in st(0). */
155 fldz # Ensure no regs are empty.
162 fxch %st(7) # Move the result back to st(0).
164 /* We don't need to restore eax, because the result is in st(0). */
166 /* Return. FIXME: It would be nice to restructure this to use RET. */
169 SIZE(GNAME(call_into_c))
173 .globl GNAME(call_into_lisp_first_time)
174 TYPE(GNAME(call_into_lisp_first_time))
176 /* We don't worry too much about saving registers
177 * here, because we never expect to return from the initial call to lisp
180 .align align_16byte,0x90
181 GNAME(call_into_lisp_first_time):
182 pushl %ebp # Save old frame pointer.
183 movl %esp,%ebp # Establish new frame.
184 #ifndef LISP_FEATURE_WIN32
185 movl GNAME(all_threads),%eax
186 /* pthread machinery takes care of this for other threads */
187 movl THREAD_CONTROL_STACK_END_OFFSET(%eax) ,%esp
189 /* Win32 -really- doesn't like you switching stacks out from under it. */
190 movl GNAME(all_threads),%eax
195 .globl GNAME(call_into_lisp)
196 TYPE(GNAME(call_into_lisp))
198 /* The C conventions require that ebx, esi, edi, and ebp be preserved
199 * across function calls. */
201 .align align_16byte,0x90
202 GNAME(call_into_lisp):
203 pushl %ebp # Save old frame pointer.
204 movl %esp,%ebp # Establish new frame.
206 /* Save the NPX state */
207 fwait # Catch any pending NPX exceptions.
208 subl $108,%esp # Make room for the NPX state.
209 fnsave (%esp) # save and reset NPX
211 movl (%esp),%eax # Load NPX control word.
212 andl $0xfffff2ff,%eax # Set rounding mode to nearest.
213 orl $0x00000200,%eax # Set precision to 64 bits. (53-bit mantissa)
215 fldcw (%esp) # Recover modes.
218 fldz # Ensure no FP regs are empty.
227 /* Save C regs: ebx esi edi. */
232 /* Clear descriptor regs. */
233 xorl %eax,%eax # lexenv
234 xorl %ebx,%ebx # available
235 xorl %ecx,%ecx # arg count
236 xorl %edx,%edx # first arg
237 xorl %edi,%edi # second arg
238 xorl %esi,%esi # third arg
240 /* no longer in function call */
241 movl %esp,%ebx # remember current stack
242 pushl %ebx # Save entry stack on (maybe) new stack.
244 /* Establish Lisp args. */
245 movl 8(%ebp),%eax # lexenv?
246 movl 12(%ebp),%ebx # address of arg vec
247 movl 16(%ebp),%ecx # num args
248 shll $2,%ecx # Make num args into fixnum.
251 movl (%ebx),%edx # arg0
254 movl 4(%ebx),%edi # arg1
257 movl 8(%ebx),%esi # arg2
259 /* Registers eax, ecx, edx, edi, and esi are now live. */
261 #ifdef LISP_FEATURE_WIN32
262 /* Establish an SEH frame. */
263 #ifdef LISP_FEATURE_SB_THREAD
264 /* FIXME: need to save BSP here. */
265 #error "need to save BSP here, but don't know how yet."
267 pushl BINDING_STACK_POINTER + SYMBOL_VALUE_OFFSET
269 pushl $GNAME(exception_handler_wrapper)
274 /* Alloc new frame. */
275 push %ebp # Dummy for return address
276 push %ebp # fp in save location S1
277 mov %esp,%ebp # The current sp marks start of new frame.
278 sub $4,%esp # Ensure 3 slots are allocated, two above.
280 call *CLOSURE_FUN_OFFSET(%eax)
282 /* If the function returned multiple values, it will return to
283 this point. Lose them */
287 /* A singled value function returns here */
289 #ifdef LISP_FEATURE_WIN32
290 /* Remove our SEH frame. */
295 /* Restore the stack, in case there was a stack change. */
298 /* Restore C regs: ebx esi edi. */
303 /* Restore the NPX state. */
308 movl %edx,%eax # c-val
310 SIZE(GNAME(call_into_lisp))
312 /* support for saving and restoring the NPX state from C */
314 .globl GNAME(fpu_save)
315 TYPE(GNAME(fpu_save))
319 fnsave (%eax) # Save the NPX state. (resets NPX)
321 SIZE(GNAME(fpu_save))
323 .globl GNAME(fpu_restore)
324 TYPE(GNAME(fpu_restore))
328 frstor (%eax) # Restore the NPX state.
330 SIZE(GNAME(fpu_restore))
333 * the undefined-function trampoline
336 .align align_16byte,0x90
337 .globl GNAME(undefined_tramp)
338 TYPE(GNAME(undefined_tramp))
339 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
340 GNAME(undefined_tramp):
341 pop 4(%ebp) # Save return PC for backtrace.
345 .byte UNDEFINED_FUN_ERROR
346 .byte sc_DescriptorReg # eax in the Descriptor-reg SC
348 SIZE(GNAME(undefined_tramp))
350 /* KLUDGE: FIND-ESCAPED-FRAME (SYS:SRC;CODE;DEBUG-INT.LISP) needs
351 * to know the name of the function immediately following the
352 * undefined-function trampoline. */
355 * the closure trampoline
358 .align align_16byte,0x90
359 .globl GNAME(closure_tramp)
360 TYPE(GNAME(closure_tramp))
361 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
362 GNAME(closure_tramp):
363 movl FDEFN_FUN_OFFSET(%eax),%eax
364 /* FIXME: The '*' after "jmp" in the next line is from PVE's
365 * patch posted to the CMU CL mailing list Oct 6, 1999. It looks
366 * reasonable, and it certainly seems as though if CMU CL needs it,
367 * SBCL needs it too, but I haven't actually verified that it's
368 * right. It would be good to find a way to force the flow of
369 * control through here to test it. */
370 jmp *CLOSURE_FUN_OFFSET(%eax)
371 SIZE(GNAME(closure_tramp))
374 .align align_16byte,0x90
375 .globl GNAME(funcallable_instance_tramp)
376 TYPE(GNAME(funcallable_instance_tramp))
377 GNAME(funcallable_instance_tramp):
378 movl FUNCALLABLE_INSTANCE_FUNCTION_OFFSET(%eax),%eax
379 /* KLUDGE: on this platform, whatever kind of function is in %rax
380 * now, the first word of it contains the address to jump to. */
381 jmp *CLOSURE_FUN_OFFSET(%eax)
382 SIZE(GNAME(funcallable_instance_tramp))
385 * fun-end breakpoint magic
389 * For an explanation of the magic involved in function-end
390 * breakpoints, see the implementation in ppc-assem.S.
394 .globl GNAME(fun_end_breakpoint_guts)
396 GNAME(fun_end_breakpoint_guts):
397 /* Multiple Value return */
398 jc multiple_value_return
399 /* Single value return: The eventual return will now use the
400 multiple values return convention but with a return values
402 movl %esp,%ebx # Setup ebx - the ofp.
403 subl $4,%esp # Allocate one stack slot for the return value
404 movl $4,%ecx # Setup ecx for one return value.
405 movl $(NIL),%edi # default second value
406 movl $(NIL),%esi # default third value
408 multiple_value_return:
410 .globl GNAME(fun_end_breakpoint_trap)
411 GNAME(fun_end_breakpoint_trap):
413 .byte trap_FunEndBreakpoint
414 hlt # We should never return here.
416 .globl GNAME(fun_end_breakpoint_end)
417 GNAME(fun_end_breakpoint_end):
420 .globl GNAME(do_pending_interrupt)
421 TYPE(GNAME(do_pending_interrupt))
422 .align align_16byte,0x90
423 GNAME(do_pending_interrupt):
425 .byte trap_PendingInterrupt
427 SIZE(GNAME(do_pending_interrupt))
429 /* Allocate bytes and return the start of the allocated space
430 * in the specified destination register.
432 * In the general case the size will be in the destination register.
434 * All registers must be preserved except the destination.
435 * The C conventions will preserve ebx, esi, edi, and ebp.
436 * So only eax, ecx, and edx need special care here.
438 * ALLOC factors out the logic of calling alloc(): stack alignment, etc.
440 * DEFINE_ALLOC_TO_FOO defines an alloction routine.
443 #ifdef LISP_FEATURE_DARWIN
444 #define ALLOC(size) \
445 pushl %ebp; /* Save EBP */ \
446 movl %esp,%ebp; /* Save ESP to EBP */ \
447 pushl $0; /* Reserve space for arg */ \
448 andl $0xfffffff0,%esp; /* Align stack to 16bytes */ \
449 movl size, (%esp); /* Argument to alloc */ \
451 movl %ebp,%esp; /* Restore ESP from EBP */ \
452 popl %ebp; /* Restore EBP */
454 #define ALLOC(size) \
455 pushl size; /* Argument to alloc */ \
457 addl $4,%esp; /* Pop argument */
460 #define DEFINE_ALLOC_TO_EAX(name,size) \
461 .globl GNAME(name); \
463 .align align_16byte,0x90; \
465 pushl %ecx; /* Save ECX and EDX */ \
468 popl %edx; /* Restore ECX and EDX */ \
473 #define DEFINE_ALLOC_TO_ECX(name,size) \
474 .globl GNAME(name); \
476 .align align_16byte,0x90; \
478 pushl %eax; /* Save EAX and EDX */ \
481 movl %eax,%ecx; /* Result to destination */ \
487 #define DEFINE_ALLOC_TO_EDX(name,size) \
488 .globl GNAME(name); \
490 .align align_16byte,0x90; \
492 pushl %eax; /* Save EAX and ECX */ \
495 movl %eax,%edx; /* Restore EAX and ECX */ \
501 #define DEFINE_ALLOC_TO_REG(name,reg,size) \
502 .globl GNAME(name); \
504 .align align_16byte,0x90; \
506 pushl %eax; /* Save EAX, ECX, and EDX */ \
510 movl %eax,reg; /* Restore them */ \
517 DEFINE_ALLOC_TO_EAX(alloc_to_eax,%eax)
518 DEFINE_ALLOC_TO_EAX(alloc_8_to_eax,$8)
519 DEFINE_ALLOC_TO_EAX(alloc_16_to_eax,$16)
521 DEFINE_ALLOC_TO_ECX(alloc_to_ecx,%ecx)
522 DEFINE_ALLOC_TO_ECX(alloc_8_to_ecx,$8)
523 DEFINE_ALLOC_TO_ECX(alloc_16_to_ecx,$16)
525 DEFINE_ALLOC_TO_EDX(alloc_to_edx,%edx)
526 DEFINE_ALLOC_TO_EDX(alloc_8_to_edx,$8)
527 DEFINE_ALLOC_TO_EDX(alloc_16_to_edx,$16)
529 DEFINE_ALLOC_TO_REG(alloc_to_ebx,%ebx,%ebx)
530 DEFINE_ALLOC_TO_REG(alloc_8_to_ebx,%ebx,$8)
531 DEFINE_ALLOC_TO_REG(alloc_16_to_ebx,%ebx,$16)
533 DEFINE_ALLOC_TO_REG(alloc_to_esi,%esi,%esi)
534 DEFINE_ALLOC_TO_REG(alloc_8_to_esi,%esi,$8)
535 DEFINE_ALLOC_TO_REG(alloc_16_to_esi,%esi,$16)
537 DEFINE_ALLOC_TO_REG(alloc_to_edi,%edi,%edi)
538 DEFINE_ALLOC_TO_REG(alloc_8_to_edi,%edi,$8)
539 DEFINE_ALLOC_TO_REG(alloc_16_to_edi,%edi,$16)
541 /* Called from lisp when an inline allocation overflows.
542 * Every register except the result needs to be preserved.
543 * We depend on C to preserve ebx, esi, edi, and ebp.
544 * But where necessary must save eax, ecx, edx. */
546 #ifdef LISP_FEATURE_SB_THREAD
547 #define START_REGION %fs:THREAD_ALLOC_REGION_OFFSET
549 #define START_REGION GNAME(boxed_region)
552 #define ALLOC_OVERFLOW(size) \
553 /* Calculate the size for the allocation. */ \
554 subl START_REGION,size; \
557 /* This routine handles an overflow with eax=crfp+size. So the
560 .globl GNAME(alloc_overflow_eax)
561 TYPE(GNAME(alloc_overflow_eax))
562 GNAME(alloc_overflow_eax):
563 pushl %ecx # Save ecx
564 pushl %edx # Save edx
566 popl %edx # Restore edx.
567 popl %ecx # Restore ecx.
569 SIZE(GNAME(alloc_overflow_eax))
572 .globl GNAME(alloc_overflow_ecx)
573 TYPE(GNAME(alloc_overflow_ecx))
574 GNAME(alloc_overflow_ecx):
575 pushl %eax # Save eax
576 pushl %edx # Save edx
578 movl %eax,%ecx # setup the destination.
579 popl %edx # Restore edx.
580 popl %eax # Restore eax.
582 SIZE(GNAME(alloc_overflow_ecx))
585 .globl GNAME(alloc_overflow_edx)
586 TYPE(GNAME(alloc_overflow_edx))
587 GNAME(alloc_overflow_edx):
588 pushl %eax # Save eax
589 pushl %ecx # Save ecx
591 movl %eax,%edx # setup the destination.
592 popl %ecx # Restore ecx.
593 popl %eax # Restore eax.
595 SIZE(GNAME(alloc_overflow_edx))
597 /* This routine handles an overflow with ebx=crfp+size. So the
600 .globl GNAME(alloc_overflow_ebx)
601 TYPE(GNAME(alloc_overflow_ebx))
602 GNAME(alloc_overflow_ebx):
603 pushl %eax # Save eax
604 pushl %ecx # Save ecx
605 pushl %edx # Save edx
607 movl %eax,%ebx # setup the destination.
608 popl %edx # Restore edx.
609 popl %ecx # Restore ecx.
610 popl %eax # Restore eax.
612 SIZE(GNAME(alloc_overflow_ebx))
614 /* This routine handles an overflow with esi=crfp+size. So the
617 .globl GNAME(alloc_overflow_esi)
618 TYPE(GNAME(alloc_overflow_esi))
619 GNAME(alloc_overflow_esi):
620 pushl %eax # Save eax
621 pushl %ecx # Save ecx
622 pushl %edx # Save edx
624 movl %eax,%esi # setup the destination.
625 popl %edx # Restore edx.
626 popl %ecx # Restore ecx.
627 popl %eax # Restore eax.
629 SIZE(GNAME(alloc_overflow_esi))
632 .globl GNAME(alloc_overflow_edi)
633 TYPE(GNAME(alloc_overflow_edi))
634 GNAME(alloc_overflow_edi):
635 pushl %eax # Save eax
636 pushl %ecx # Save ecx
637 pushl %edx # Save edx
639 movl %eax,%edi # setup the destination.
640 popl %edx # Restore edx.
641 popl %ecx # Restore ecx.
642 popl %eax # Restore eax.
644 SIZE(GNAME(alloc_overflow_edi))
647 #ifdef LISP_FEATURE_WIN32
648 /* The guts of the exception-handling system doesn't use
649 * frame pointers, which manages to throw off backtraces
650 * rather badly. So here we grab the (known-good) EBP
651 * and EIP from the exception context and use it to fake
652 * up a stack frame which will skip over the system SEH
655 .globl GNAME(exception_handler_wrapper)
656 TYPE(GNAME(exception_handler_wrapper))
657 GNAME(exception_handler_wrapper):
658 /* Context layout is: */
659 /* 7 dwords before FSA. (0x1c) */
660 /* 8 dwords and 0x50 bytes in the FSA. (0x70/0x8c) */
661 /* 4 dwords segregs. (0x10/0x9c) */
662 /* 6 dwords non-stack GPRs. (0x18/0xb4) */
665 #define CONTEXT_EBP_OFFSET 0xb4
666 #define CONTEXT_EIP_OFFSET 0xb8
667 /* some other stuff we don't care about. */
669 movl 0x10(%esp), %ebp /* context */
670 pushl CONTEXT_EIP_OFFSET(%ebp)
671 pushl CONTEXT_EBP_OFFSET(%ebp)
677 call GNAME(handle_exception)
681 SIZE(GNAME(exception_handler_wrapper))
684 #ifdef LISP_FEATURE_DARWIN
686 .globl GNAME(call_into_lisp_tramp)
687 TYPE(GNAME(call_into_lisp_tramp))
688 GNAME(call_into_lisp_tramp):
689 /* 1. build the stack frame from the block that's pointed to by ECX
692 4. call the function via call_into_lisp
694 pushl 0(%ecx) /* return address */
699 pushl 32(%ecx) /* eflags */
700 pushl 28(%ecx) /* EAX */
701 pushl 20(%ecx) /* ECX */
702 pushl 16(%ecx) /* EDX */
703 pushl 24(%ecx) /* EBX */
704 pushl $0 /* popal is going to ignore esp */
705 pushl %ebp /* is this right?? */
706 pushl 12(%ecx) /* ESI */
707 pushl 8(%ecx) /* EDI */
708 pushl $0 /* args for call_into_lisp */
710 pushl 4(%ecx) /* function to call */
712 /* free our save block */
713 pushl %ecx /* reserve sufficient space on stack for args */
715 andl $0xfffffff0, %esp /* align stack */
718 call GNAME(os_invalidate)
720 /* call call_into_lisp */
722 call GNAME(call_into_lisp)
724 /* Clean up our mess */
731 SIZE(call_into_lisp_tramp)
734 .align align_16byte,0x90
735 .globl GNAME(post_signal_tramp)
736 TYPE(GNAME(post_signal_tramp))
737 GNAME(post_signal_tramp):
738 /* this is notionally the second half of a function whose first half
739 * doesn't exist. This is where call_into_lisp returns when called
740 * using return_to_lisp_function */
741 addl $12,%esp /* clear call_into_lisp args from stack */
742 popal /* restore registers */
744 #ifdef LISP_FEATURE_DARWIN
745 /* skip two padding words */
750 SIZE(GNAME(post_signal_tramp))
753 /* fast_bzero implementations and code to detect which implementation
757 .globl GNAME(fast_bzero_pointer)
760 GNAME(fast_bzero_pointer):
761 /* Variable containing a pointer to the bzero function to use.
762 * Initially points to a basic function. Change this variable
763 * to fast_bzero_detect if OS supports SSE. */
764 .long GNAME(fast_bzero_base)
767 .align align_16byte,0x90
768 .globl GNAME(fast_bzero)
769 TYPE(GNAME(fast_bzero))
771 /* Indirect function call */
772 jmp *GNAME(fast_bzero_pointer)
773 SIZE(GNAME(fast_bzero))
777 .align align_16byte,0x90
778 .globl GNAME(fast_bzero_detect)
779 TYPE(GNAME(fast_bzero_detect))
780 GNAME(fast_bzero_detect):
781 /* Decide whether to use SSE, MMX or REP version */
782 push %eax /* CPUID uses EAX-EDX */
788 test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
790 /* Originally there was another case here for using the
791 * MOVNTQ instruction for processors that supported MMX but
792 * not SSE2. This turned out to be a loss especially on
793 * Athlons (where this instruction is apparently microcoded
794 * somewhat slowly). So for simplicity revert to REP STOSL
795 * for all non-SSE2 processors.
798 movl $(GNAME(fast_bzero_base)), GNAME(fast_bzero_pointer)
801 movl $(GNAME(fast_bzero_sse)), GNAME(fast_bzero_pointer)
809 jmp *GNAME(fast_bzero_pointer)
811 SIZE(GNAME(fast_bzero_detect))
815 .align align_16byte,0x90
816 .globl GNAME(fast_bzero_sse)
817 TYPE(GNAME(fast_bzero_sse))
819 GNAME(fast_bzero_sse):
820 /* A fast routine for zero-filling blocks of memory that are
821 * guaranteed to start and end at a 4096-byte aligned address.
823 push %esi /* Save temporary registers */
825 mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
826 mov 12(%esp), %edi /* Parameter: start address */
827 shr $6, %esi /* Amount of 64-byte blocks to copy */
828 jz Lend_sse /* If none, stop */
829 movups %xmm7, -16(%esp) /* Save XMM register */
830 xorps %xmm7, %xmm7 /* Zero the XMM register */
835 /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
836 * non-caching double-quadword moving variant, i.e. the memory areas
837 * we're touching are not fetched into the L1 cache, since we're just
838 * going to overwrite the memory soon anyway.
840 movntdq %xmm7, 0(%edi)
841 movntdq %xmm7, 16(%edi)
842 movntdq %xmm7, 32(%edi)
843 movntdq %xmm7, 48(%edi)
845 add $64, %edi /* Advance pointer */
846 dec %esi /* Decrement 64-byte block count */
848 movups -16(%esp), %xmm7 /* Restore the XMM register */
849 sfence /* Ensure that weakly ordered writes are flushed. */
851 mov 12(%esp), %esi /* Parameter: start address */
852 prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
853 * since it's likely to be used immediately. */
854 pop %edi /* Restore temp registers */
857 SIZE(GNAME(fast_bzero_sse))
861 .align align_16byte,0x90
862 .globl GNAME(fast_bzero_base)
863 TYPE(GNAME(fast_bzero_base))
865 GNAME(fast_bzero_base):
866 /* A fast routine for zero-filling blocks of memory that are
867 * guaranteed to start and end at a 4096-byte aligned address.
869 push %eax /* Save temporary registers */
872 mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
873 mov 16(%esp), %edi /* Parameter: start address */
874 xor %eax, %eax /* Zero EAX */
875 shr $2, %ecx /* Amount of 4-byte blocks to copy */
879 stosl /* Store EAX to *EDI, ECX times, incrementing
880 * EDI by 4 after each store */
883 pop %edi /* Restore temp registers */
887 SIZE(GNAME(fast_bzero_base))
890 /* When LISP_FEATURE_C_STACK_IS_CONTROL_STACK, we cannot safely scrub
891 * the control stack from C, largely due to not knowing where the
892 * active stack frame ends. On such platforms, we reimplement the
893 * core scrubbing logic in assembly, in this case here:
896 .align align_16byte,0x90
897 .globl GNAME(arch_scrub_control_stack)
898 TYPE(GNAME(arch_scrub_control_stack))
899 GNAME(arch_scrub_control_stack):
900 /* We are passed three parameters:
901 * A (struct thread *) at [ESP+4],
902 * the address of the guard page at [ESP+8], and
903 * the address of the hard guard page at [ESP+12].
904 * We may trash EAX, ECX, and EDX with impunity.
905 * [ESP] is our return address, [ESP-4] is the first
906 * stack slot to scrub. */
908 /* We start by setting up our scrub pointer in EAX, our
909 * guard page upper bound in ECX, and our hard guard
910 * page upper bound in EDX. */
912 mov GNAME(os_vm_page_size),%edx
917 /* We need to do a memory operation relative to the
918 * thread pointer, so put it in %ecx and our guard
919 * page upper bound in 4(%esp). */
922 /* Now we begin our main scrub loop. */
925 /* If we're about to scrub the hard guard page, exit. */
927 jae ascs_check_guard_page
931 ascs_check_guard_page:
932 /* If we're about to scrub the guard page, and the guard
933 * page is protected, exit. */
938 cmpl $(NIL), THREAD_CONTROL_STACK_GUARD_PAGE_PROTECTED_OFFSET(%ecx)
941 /* Clear memory backwards to the start of the (4KiB) page */
948 /* If we're about to hit the hard guard page, exit. */
952 /* If the next (previous?) 4KiB page contains a non-zero
953 * word, continue scrubbing. */
963 SIZE(GNAME(arch_scrub_control_stack))