2 * very-low-level utilities for runtime support
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
16 #define LANGUAGE_ASSEMBLY
19 #include "genesis/closure.h"
20 #include "genesis/fdefn.h"
21 #include "genesis/static-symbols.h"
22 #include "genesis/symbol.h"
23 #include "genesis/thread.h"
25 /* Minimize conditionalization for different OS naming schemes.
27 * (As of sbcl-0.8.10, this seems no longer to be much of an issue,
28 * since everyone has converged on ELF. If this generality really
29 * turns out not to matter, perhaps it's just clutter we could get
30 * rid of? -- WHN 2004-04-18)
32 * (Except Win32, which is unlikely ever to be ELF, sorry. -- AB 2005-12-08)
34 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
35 #define GNAME(var) var
37 #define GNAME(var) _##var
40 /* Get the right type of alignment. Linux, FreeBSD and NetBSD (but not OpenBSD)
41 * want alignment in bytes.
43 * (As in the GNAME() definitions above, as of sbcl-0.8.10, this seems
44 * no longer to be much of an issue, since everyone has converged on
45 * the same value. If this generality really turns out not to
46 * matter any more, perhaps it's just clutter we could get
47 * rid of? -- WHN 2004-04-18)
49 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__sun) || defined(LISP_FEATURE_WIN32)
52 #define align_16byte 16
56 #define align_16byte 4
60 * The assembler used for win32 doesn't like .type or .size directives,
61 * so we want to conditionally kill them out. So let's wrap them in macros
62 * that are defined to be no-ops on win32. Hopefully this still works on
65 #ifndef LISP_FEATURE_WIN32
66 #define TYPE(name) .type name,@function
67 #define SIZE(name) .size name,.-name
74 .global GNAME(foreign_function_call_active)
75 .global GNAME(all_threads)
78 * A call to call_into_c preserves esi, edi, and ebp.
79 * (The C function will preserve ebx, esi, edi, and ebp across its
80 * function call, but we trash ebx ourselves by using it to save the
81 * return Lisp address.)
83 * Return values are in eax and maybe edx for quads, or st(0) for
86 * This should work for Lisp calls C calls Lisp calls C..
89 .align align_16byte,0x90
90 .global GNAME(call_into_c)
91 TYPE(GNAME(call_into_c))
93 movl $1,GNAME(foreign_function_call_active)
95 /* Save the return Lisp address in ebx. */
98 /* Setup the NPX for C */
108 #ifdef LISP_FEATURE_WIN32
112 call *%eax # normal callout using Lisp stack
114 movl %eax,%ecx # remember integer return value
116 /* Check for a return FP value. */
123 /* The return value is in eax, or eax,edx? */
124 /* Set up the NPX stack for Lisp. */
125 fldz # Ensure no regs are empty.
134 /* Restore the return value. */
135 movl %ecx,%eax # maybe return value
137 movl $0,GNAME(foreign_function_call_active)
142 /* The return result is in st(0). */
143 /* Set up the NPX stack for Lisp, placing the result in st(0). */
144 fldz # Ensure no regs are empty.
151 fxch %st(7) # Move the result back to st(0).
153 /* We don't need to restore eax, because the result is in st(0). */
155 movl $0,GNAME(foreign_function_call_active)
159 SIZE(GNAME(call_into_c))
163 .global GNAME(call_into_lisp_first_time)
164 TYPE(GNAME(call_into_lisp_first_time))
166 /* The *ALIEN-STACK* pointer is set up on the first call_into_lisp when
167 * the stack changes. We don't worry too much about saving registers
168 * here, because we never expect to return from the initial call to lisp
171 .align align_16byte,0x90
172 GNAME(call_into_lisp_first_time):
173 pushl %ebp # Save old frame pointer.
174 movl %esp,%ebp # Establish new frame.
175 #ifndef LISP_FEATURE_WIN32
176 movl %esp,ALIEN_STACK + SYMBOL_VALUE_OFFSET
177 movl GNAME(all_threads),%eax
178 movl THREAD_CONTROL_STACK_START_OFFSET(%eax) ,%esp
179 /* don't think too hard about what happens if we get interrupted
181 addl $THREAD_CONTROL_STACK_SIZE-4,%esp
183 /* Win32 -really- doesn't like you switching stacks out from under it. */
184 movl GNAME(all_threads),%eax
189 .global GNAME(call_into_lisp)
190 TYPE(GNAME(call_into_lisp))
192 /* The C conventions require that ebx, esi, edi, and ebp be preserved
193 * across function calls. */
195 .align align_16byte,0x90
196 GNAME(call_into_lisp):
197 pushl %ebp # Save old frame pointer.
198 movl %esp,%ebp # Establish new frame.
200 /* Save the NPX state */
201 fwait # Catch any pending NPX exceptions.
202 subl $108,%esp # Make room for the NPX state.
203 fnsave (%esp) # save and reset NPX
205 movl (%esp),%eax # Load NPX control word.
206 andl $0xfffff2ff,%eax # Set rounding mode to nearest.
207 orl $0x00000200,%eax # Set precision to 64 bits. (53-bit mantissa)
209 fldcw (%esp) # Recover modes.
212 fldz # Ensure no FP regs are empty.
221 /* Save C regs: ebx esi edi. */
226 /* Clear descriptor regs. */
227 xorl %eax,%eax # lexenv
228 xorl %ebx,%ebx # available
229 xorl %ecx,%ecx # arg count
230 xorl %edx,%edx # first arg
231 xorl %edi,%edi # second arg
232 xorl %esi,%esi # third arg
234 /* no longer in function call */
235 movl %eax, GNAME(foreign_function_call_active)
237 movl %esp,%ebx # remember current stack
238 pushl %ebx # Save entry stack on (maybe) new stack.
240 /* Establish Lisp args. */
241 movl 8(%ebp),%eax # lexenv?
242 movl 12(%ebp),%ebx # address of arg vec
243 movl 16(%ebp),%ecx # num args
244 shll $2,%ecx # Make num args into fixnum.
247 movl (%ebx),%edx # arg0
250 movl 4(%ebx),%edi # arg1
253 movl 8(%ebx),%esi # arg2
255 /* Registers eax, ecx, edx, edi, and esi are now live. */
257 /* Alloc new frame. */
258 mov %esp,%ebx # The current sp marks start of new frame.
259 push %ebp # fp in save location S0
260 sub $8,%esp # Ensure 3 slots are allocated, one above.
261 mov %ebx,%ebp # Switch to new frame.
263 call *CLOSURE_FUN_OFFSET(%eax)
265 /* If the function returned multiple values, it will return to
266 this point. Lose them */
270 /* A singled value function returns here */
272 /* Restore the stack, in case there was a stack change. */
275 /* Restore C regs: ebx esi edi. */
280 /* Restore the NPX state. */
285 movl %edx,%eax # c-val
287 SIZE(GNAME(call_into_lisp))
289 /* support for saving and restoring the NPX state from C */
291 .global GNAME(fpu_save)
292 TYPE(GNAME(fpu_save))
296 fnsave (%eax) # Save the NPX state. (resets NPX)
298 SIZE(GNAME(fpu_save))
300 .global GNAME(fpu_restore)
301 TYPE(GNAME(fpu_restore))
305 frstor (%eax) # Restore the NPX state.
307 SIZE(GNAME(fpu_restore))
310 * the undefined-function trampoline
313 .align align_4byte,0x90
314 .global GNAME(undefined_tramp)
315 TYPE(GNAME(undefined_tramp))
316 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
317 GNAME(undefined_tramp):
321 .byte UNDEFINED_FUN_ERROR
322 .byte sc_DescriptorReg # eax in the Descriptor-reg SC
324 SIZE(GNAME(undefined_tramp))
327 * the closure trampoline
330 .align align_4byte,0x90
331 .global GNAME(closure_tramp)
332 TYPE(GNAME(closure_tramp))
333 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
334 GNAME(closure_tramp):
335 movl FDEFN_FUN_OFFSET(%eax),%eax
336 /* FIXME: The '*' after "jmp" in the next line is from PVE's
337 * patch posted to the CMU CL mailing list Oct 6, 1999. It looks
338 * reasonable, and it certainly seems as though if CMU CL needs it,
339 * SBCL needs it too, but I haven't actually verified that it's
340 * right. It would be good to find a way to force the flow of
341 * control through here to test it. */
342 jmp *CLOSURE_FUN_OFFSET(%eax)
343 SIZE(GNAME(closure_tramp))
346 * fun-end breakpoint magic
349 .global GNAME(fun_end_breakpoint_guts)
351 GNAME(fun_end_breakpoint_guts):
352 /* Multiple Value return */
353 jc multiple_value_return
354 /* Single value return: The eventual return will now use the
355 multiple values return convention but with a return values
357 movl %esp,%ebx # Setup ebx - the ofp.
358 subl $4,%esp # Allocate one stack slot for the return value
359 movl $4,%ecx # Setup ecx for one return value.
360 movl $NIL,%edi # default second value
361 movl $NIL,%esi # default third value
363 multiple_value_return:
365 .global GNAME(fun_end_breakpoint_trap)
366 GNAME(fun_end_breakpoint_trap):
368 .byte trap_FunEndBreakpoint
369 hlt # We should never return here.
371 .global GNAME(fun_end_breakpoint_end)
372 GNAME(fun_end_breakpoint_end):
375 .global GNAME(do_pending_interrupt)
376 TYPE(GNAME(do_pending_interrupt))
377 .align align_4byte,0x90
378 GNAME(do_pending_interrupt):
380 .byte trap_PendingInterrupt
382 SIZE(GNAME(do_pending_interrupt))
386 * Allocate bytes and return the start of the allocated space
387 * in the specified destination register.
389 * In the general case the size will be in the destination register.
391 * All registers must be preserved except the destination.
392 * The C conventions will preserve ebx, esi, edi, and ebp.
393 * So only eax, ecx, and edx need special care here.
396 .globl GNAME(alloc_to_eax)
397 TYPE(GNAME(alloc_to_eax))
398 .align align_4byte,0x90
400 pushl %ecx # Save ecx and edx as C could destroy them.
402 pushl %eax # Push the size.
404 addl $4,%esp # Pop the size arg.
405 popl %edx # Restore ecx and edx.
408 SIZE(GNAME(alloc_to_eax))
410 .globl GNAME(alloc_8_to_eax)
411 TYPE(GNAME(alloc_8_to_eax))
412 .align align_4byte,0x90
413 GNAME(alloc_8_to_eax):
414 pushl %ecx # Save ecx and edx as C could destroy them.
416 pushl $8 # Push the size.
418 addl $4,%esp # Pop the size arg.
419 popl %edx # Restore ecx and edx.
422 SIZE(GNAME(alloc_8_to_eax))
424 .globl GNAME(alloc_8_to_eax)
425 TYPE(GNAME(alloc_8_to_eax))
426 .align align_4byte,0x90
428 .globl GNAME(alloc_16_to_eax)
429 TYPE(GNAME(alloc_16_to_eax))
430 .align align_4byte,0x90
431 GNAME(alloc_16_to_eax):
432 pushl %ecx # Save ecx and edx as C could destroy them.
434 pushl $16 # Push the size.
436 addl $4,%esp # Pop the size arg.
437 popl %edx # Restore ecx and edx.
440 SIZE(GNAME(alloc_16_to_eax))
442 .globl GNAME(alloc_to_ecx)
443 TYPE(GNAME(alloc_to_ecx))
444 .align align_4byte,0x90
446 pushl %eax # Save eax and edx as C could destroy them.
448 pushl %ecx # Push the size.
450 addl $4,%esp # Pop the size arg.
451 movl %eax,%ecx # Set up the destination.
452 popl %edx # Restore eax and edx.
455 SIZE(GNAME(alloc_to_ecx))
457 .globl GNAME(alloc_8_to_ecx)
458 TYPE(GNAME(alloc_8_to_ecx))
459 .align align_4byte,0x90
460 GNAME(alloc_8_to_ecx):
461 pushl %eax # Save eax and edx as C could destroy them.
463 pushl $8 # Push the size.
465 addl $4,%esp # Pop the size arg.
466 movl %eax,%ecx # Set up the destination.
467 popl %edx # Restore eax and edx.
470 SIZE(GNAME(alloc_8_to_ecx))
472 .globl GNAME(alloc_16_to_ecx)
473 TYPE(GNAME(alloc_16_to_ecx))
474 .align align_4byte,0x90
475 GNAME(alloc_16_to_ecx):
476 pushl %eax # Save eax and edx as C could destroy them.
478 pushl $16 # Push the size.
480 addl $4,%esp # Pop the size arg.
481 movl %eax,%ecx # Set up the destination.
482 popl %edx # Restore eax and edx.
485 SIZE(GNAME(alloc_16_to_ecx))
488 .globl GNAME(alloc_to_edx)
489 TYPE(GNAME(alloc_to_edx))
490 .align align_4byte,0x90
492 pushl %eax # Save eax and ecx as C could destroy them.
494 pushl %edx # Push the size.
496 addl $4,%esp # Pop the size arg.
497 movl %eax,%edx # Set up the destination.
498 popl %ecx # Restore eax and ecx.
501 SIZE(GNAME(alloc_to_edx))
503 .globl GNAME(alloc_8_to_edx)
504 TYPE(GNAME(alloc_8_to_edx))
505 .align align_4byte,0x90
506 GNAME(alloc_8_to_edx):
507 pushl %eax # Save eax and ecx as C could destroy them.
509 pushl $8 # Push the size.
511 addl $4,%esp # Pop the size arg.
512 movl %eax,%edx # Set up the destination.
513 popl %ecx # Restore eax and ecx.
516 SIZE(GNAME(alloc_8_to_edx))
518 .globl GNAME(alloc_16_to_edx)
519 TYPE(GNAME(alloc_16_to_edx))
520 .align align_4byte,0x90
521 GNAME(alloc_16_to_edx):
522 pushl %eax # Save eax and ecx as C could destroy them.
524 pushl $16 # Push the size.
526 addl $4,%esp # Pop the size arg.
527 movl %eax,%edx # Set up the destination.
528 popl %ecx # Restore eax and ecx.
531 SIZE(GNAME(alloc_16_to_edx))
535 .globl GNAME(alloc_to_ebx)
536 TYPE(GNAME(alloc_to_ebx))
537 .align align_4byte,0x90
539 pushl %eax # Save eax, ecx, and edx as C could destroy them.
542 pushl %ebx # Push the size.
544 addl $4,%esp # Pop the size arg.
545 movl %eax,%ebx # Set up the destination.
546 popl %edx # Restore eax, ecx and edx.
550 SIZE(GNAME(alloc_to_ebx))
552 .globl GNAME(alloc_8_to_ebx)
553 TYPE(GNAME(alloc_8_to_ebx))
554 .align align_4byte,0x90
555 GNAME(alloc_8_to_ebx):
556 pushl %eax # Save eax, ecx, and edx as C could destroy them.
559 pushl $8 # Push the size.
561 addl $4,%esp # Pop the size arg.
562 movl %eax,%ebx # Set up the destination.
563 popl %edx # Restore eax, ecx and edx.
567 SIZE(GNAME(alloc_8_to_ebx))
569 .globl GNAME(alloc_16_to_ebx)
570 TYPE(GNAME(alloc_16_to_ebx))
571 .align align_4byte,0x90
572 GNAME(alloc_16_to_ebx):
573 pushl %eax # Save eax, ecx, and edx as C could destroy them.
576 pushl $16 # Push the size
578 addl $4,%esp # pop the size arg.
579 movl %eax,%ebx # setup the destination.
580 popl %edx # Restore eax, ecx and edx.
584 SIZE(GNAME(alloc_16_to_ebx))
588 .globl GNAME(alloc_to_esi)
589 TYPE(GNAME(alloc_to_esi))
590 .align align_4byte,0x90
592 pushl %eax # Save eax, ecx, and edx as C could destroy them.
595 pushl %esi # Push the size
597 addl $4,%esp # pop the size arg.
598 movl %eax,%esi # setup the destination.
599 popl %edx # Restore eax, ecx and edx.
603 SIZE(GNAME(alloc_to_esi))
605 .globl GNAME(alloc_8_to_esi)
606 TYPE(GNAME(alloc_8_to_esi))
607 .align align_4byte,0x90
608 GNAME(alloc_8_to_esi):
609 pushl %eax # Save eax, ecx, and edx as C could destroy them.
612 pushl $8 # Push the size
614 addl $4,%esp # pop the size arg.
615 movl %eax,%esi # setup the destination.
616 popl %edx # Restore eax, ecx and edx.
620 SIZE(GNAME(alloc_8_to_esi))
622 .globl GNAME(alloc_16_to_esi)
623 TYPE(GNAME(alloc_16_to_esi))
624 .align align_4byte,0x90
625 GNAME(alloc_16_to_esi):
626 pushl %eax # Save eax, ecx, and edx as C could destroy them.
629 pushl $16 # Push the size
631 addl $4,%esp # pop the size arg.
632 movl %eax,%esi # setup the destination.
633 popl %edx # Restore eax, ecx and edx.
637 SIZE(GNAME(alloc_16_to_esi))
640 .globl GNAME(alloc_to_edi)
641 TYPE(GNAME(alloc_to_edi))
642 .align align_4byte,0x90
644 pushl %eax # Save eax, ecx, and edx as C could destroy them.
647 pushl %edi # Push the size
649 addl $4,%esp # pop the size arg.
650 movl %eax,%edi # setup the destination.
651 popl %edx # Restore eax, ecx and edx.
655 SIZE(GNAME(alloc_to_edi))
657 .globl GNAME(alloc_8_to_edi)
658 TYPE(GNAME(alloc_8_to_edi))
659 .align align_4byte,0x90
660 GNAME(alloc_8_to_edi):
661 pushl %eax # Save eax, ecx, and edx as C could destroy them.
664 pushl $8 # Push the size
666 addl $4,%esp # pop the size arg.
667 movl %eax,%edi # setup the destination.
668 popl %edx # Restore eax, ecx and edx.
672 SIZE(GNAME(alloc_8_to_edi))
674 .globl GNAME(alloc_16_to_edi)
675 TYPE(GNAME(alloc_16_to_edi))
676 .align align_4byte,0x90
677 GNAME(alloc_16_to_edi):
678 pushl %eax # Save eax, ecx, and edx as C could destroy them.
681 pushl $16 # Push the size
683 addl $4,%esp # pop the size arg.
684 movl %eax,%edi # setup the destination.
685 popl %edx # Restore eax, ecx and edx.
689 SIZE(GNAME(alloc_16_to_edi))
692 /* Called from lisp when an inline allocation overflows.
693 Every register except the result needs to be preserved.
694 We depend on C to preserve ebx, esi, edi, and ebp.
695 But where necessary must save eax, ecx, edx. */
697 #ifdef LISP_FEATURE_SB_THREAD
698 #define START_REGION %fs:THREAD_ALLOC_REGION_OFFSET
700 #define START_REGION GNAME(boxed_region)
703 /* This routine handles an overflow with eax=crfp+size. So the
706 .globl GNAME(alloc_overflow_eax)
707 TYPE(GNAME(alloc_overflow_eax))
708 GNAME(alloc_overflow_eax):
709 pushl %ecx # Save ecx
710 pushl %edx # Save edx
711 /* Calculate the size for the allocation. */
712 subl START_REGION,%eax
713 pushl %eax # Push the size
715 addl $4,%esp # pop the size arg.
716 popl %edx # Restore edx.
717 popl %ecx # Restore ecx.
719 SIZE(GNAME(alloc_overflow_eax))
722 .globl GNAME(alloc_overflow_ecx)
723 TYPE(GNAME(alloc_overflow_ecx))
724 GNAME(alloc_overflow_ecx):
725 pushl %eax # Save eax
726 pushl %edx # Save edx
727 /* Calculate the size for the allocation. */
728 subl START_REGION,%ecx
729 pushl %ecx # Push the size
731 addl $4,%esp # pop the size arg.
732 movl %eax,%ecx # setup the destination.
733 popl %edx # Restore edx.
734 popl %eax # Restore eax.
736 SIZE(GNAME(alloc_overflow_ecx))
739 .globl GNAME(alloc_overflow_edx)
740 TYPE(GNAME(alloc_overflow_edx))
741 GNAME(alloc_overflow_edx):
742 pushl %eax # Save eax
743 pushl %ecx # Save ecx
744 /* Calculate the size for the allocation. */
745 subl START_REGION,%edx
746 pushl %edx # Push the size
748 addl $4,%esp # pop the size arg.
749 movl %eax,%edx # setup the destination.
750 popl %ecx # Restore ecx.
751 popl %eax # Restore eax.
753 SIZE(GNAME(alloc_overflow_edx))
755 /* This routine handles an overflow with ebx=crfp+size. So the
758 .globl GNAME(alloc_overflow_ebx)
759 TYPE(GNAME(alloc_overflow_ebx))
760 GNAME(alloc_overflow_ebx):
761 pushl %eax # Save eax
762 pushl %ecx # Save ecx
763 pushl %edx # Save edx
764 /* Calculate the size for the allocation. */
765 subl START_REGION,%ebx
766 pushl %ebx # Push the size
768 addl $4,%esp # pop the size arg.
769 movl %eax,%ebx # setup the destination.
770 popl %edx # Restore edx.
771 popl %ecx # Restore ecx.
772 popl %eax # Restore eax.
774 SIZE(GNAME(alloc_overflow_ebx))
776 /* This routine handles an overflow with esi=crfp+size. So the
779 .globl GNAME(alloc_overflow_esi)
780 TYPE(GNAME(alloc_overflow_esi))
781 GNAME(alloc_overflow_esi):
782 pushl %eax # Save eax
783 pushl %ecx # Save ecx
784 pushl %edx # Save edx
785 /* Calculate the size for the allocation. */
786 subl START_REGION,%esi
787 pushl %esi # Push the size
789 addl $4,%esp # pop the size arg.
790 movl %eax,%esi # setup the destination.
791 popl %edx # Restore edx.
792 popl %ecx # Restore ecx.
793 popl %eax # Restore eax.
795 SIZE(GNAME(alloc_overflow_esi))
798 .globl GNAME(alloc_overflow_edi)
799 TYPE(GNAME(alloc_overflow_edi))
800 GNAME(alloc_overflow_edi):
801 pushl %eax # Save eax
802 pushl %ecx # Save ecx
803 pushl %edx # Save edx
804 /* Calculate the size for the allocation. */
805 subl START_REGION,%edi
806 pushl %edi # Push the size
808 addl $4,%esp # pop the size arg.
809 movl %eax,%edi # setup the destination.
810 popl %edx # Restore edx.
811 popl %ecx # Restore ecx.
812 popl %eax # Restore eax.
814 SIZE(GNAME(alloc_overflow_edi))
816 .align align_4byte,0x90
817 .globl GNAME(post_signal_tramp)
818 TYPE(GNAME(post_signal_tramp))
819 GNAME(post_signal_tramp):
820 /* this is notionally the second half of a function whose first half
821 * doesn't exist. This is where call_into_lisp returns when called
822 * using return_to_lisp_function */
823 addl $12,%esp /* clear call_into_lisp args from stack */
824 popal /* restore registers */
828 SIZE(GNAME(post_signal_tramp))
830 #ifdef LISP_FEATURE_WIN32
832 * This is part of the funky magic for exception handling on win32.
833 * see sigtrap_emulator() in win32-os.c for details.
835 .global GNAME(sigtrap_trampoline)
836 GNAME(sigtrap_trampoline):
840 call GNAME(sigtrap_wrapper)
844 .byte trap_ContextRestore
845 hlt # We should never return here.
848 * This is part of the funky magic for exception handling on win32.
849 * see handle_exception() in win32-os.c for details.
851 .global GNAME(exception_trampoline)
852 GNAME(exception_trampoline):
856 call GNAME(handle_win32_exception_wrapper)
860 .byte trap_ContextRestore
861 hlt # We should never return here.
864 /* fast_bzero implementations and code to detect which implementation
868 .global GNAME(fast_bzero_pointer)
871 GNAME(fast_bzero_pointer):
872 /* Variable containing a pointer to the bzero function to use.
873 * Initially points to a basic function. Change this variable
874 * to fast_bzero_detect if OS supports SSE. */
875 .long GNAME(fast_bzero_base)
878 .align align_8byte,0x90
879 .global GNAME(fast_bzero)
880 TYPE(GNAME(fast_bzero))
882 /* Indirect function call */
883 jmp *GNAME(fast_bzero_pointer)
884 SIZE(GNAME(fast_bzero))
888 .align align_8byte,0x90
889 .global GNAME(fast_bzero_detect)
890 TYPE(GNAME(fast_bzero_detect))
891 GNAME(fast_bzero_detect):
892 /* Decide whether to use SSE, MMX or REP version */
893 push %eax /* CPUID uses EAX-EDX */
899 test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
901 /* Originally there was another case here for using the
902 * MOVNTQ instruction for processors that supported MMX but
903 * not SSE2. This turned out to be a loss especially on
904 * Athlons (where this instruction is apparently microcoded
905 * somewhat slowly). So for simplicity revert to REP STOSL
906 * for all non-SSE2 processors.
909 movl $GNAME(fast_bzero_base), GNAME(fast_bzero_pointer)
912 movl $GNAME(fast_bzero_sse), GNAME(fast_bzero_pointer)
920 jmp *GNAME(fast_bzero_pointer)
922 SIZE(GNAME(fast_bzero_detect))
926 .align align_8byte,0x90
927 .global GNAME(fast_bzero_sse)
928 TYPE(GNAME(fast_bzero_sse))
930 GNAME(fast_bzero_sse):
931 /* A fast routine for zero-filling blocks of memory that are
932 * guaranteed to start and end at a 4096-byte aligned address.
934 push %esi /* Save temporary registers */
936 mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
937 mov 12(%esp), %edi /* Parameter: start address */
938 shr $6, %esi /* Amount of 64-byte blocks to copy */
939 jz Lend_sse /* If none, stop */
940 movups %xmm7, -16(%esp) /* Save XMM register */
941 xorps %xmm7, %xmm7 /* Zero the XMM register */
946 /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
947 * non-caching double-quadword moving variant, i.e. the memory areas
948 * we're touching are not fetched into the L1 cache, since we're just
949 * going to overwrite the memory soon anyway.
951 movntdq %xmm7, 0(%edi)
952 movntdq %xmm7, 16(%edi)
953 movntdq %xmm7, 32(%edi)
954 movntdq %xmm7, 48(%edi)
956 add $64, %edi /* Advance pointer */
957 dec %esi /* Decrement 64-byte block count */
959 movups -16(%esp), %xmm7 /* Restore the XMM register */
960 sfence /* Ensure that weakly ordered writes are flushed. */
962 mov 12(%esp), %esi /* Parameter: start address */
963 prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
964 * since it's likely to be used immediately. */
965 pop %edi /* Restore temp registers */
968 SIZE(GNAME(fast_bzero_sse))
972 .align align_8byte,0x90
973 .global GNAME(fast_bzero_base)
974 TYPE(GNAME(fast_bzero_base))
976 GNAME(fast_bzero_base):
977 /* A fast routine for zero-filling blocks of memory that are
978 * guaranteed to start and end at a 4096-byte aligned address.
980 push %eax /* Save temporary registers */
983 mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
984 mov 16(%esp), %edi /* Parameter: start address */
985 xor %eax, %eax /* Zero EAX */
986 shr $2, %ecx /* Amount of 4-byte blocks to copy */
988 cld /* Set direction of STOSL to increment */
989 rep stosl /* Store EAX to *EDI, ECX times, incrementing
990 * EDI by 4 after each store */
992 pop %edi /* Restore temp registers */
996 SIZE(GNAME(fast_bzero_base))