2 * very-low-level utilities for runtime support
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
16 #define LANGUAGE_ASSEMBLY
19 #include "genesis/closure.h"
20 #include "genesis/fdefn.h"
21 #include "genesis/static-symbols.h"
22 #include "genesis/symbol.h"
23 #include "genesis/thread.h"
25 /* Minimize conditionalization for different OS naming schemes.
27 * (As of sbcl-0.8.10, this seems no longer to be much of an issue,
28 * since everyone has converged on ELF. If this generality really
29 * turns out not to matter, perhaps it's just clutter we could get
30 * rid of? -- WHN 2004-04-18)
32 * (Except Win32, which is unlikely ever to be ELF, sorry. -- AB 2005-12-08)
34 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
35 #define GNAME(var) var
37 #define GNAME(var) _##var
40 /* Get the right type of alignment. Linux, FreeBSD and NetBSD (but not OpenBSD)
41 * want alignment in bytes.
43 * (As in the GNAME() definitions above, as of sbcl-0.8.10, this seems
44 * no longer to be much of an issue, since everyone has converged on
45 * the same value. If this generality really turns out not to
46 * matter any more, perhaps it's just clutter we could get
47 * rid of? -- WHN 2004-04-18)
49 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__sun) || defined(LISP_FEATURE_WIN32)
52 #define align_16byte 16
56 #define align_16byte 4
60 * The assembler used for win32 doesn't like .type or .size directives,
61 * so we want to conditionally kill them out. So let's wrap them in macros
62 * that are defined to be no-ops on win32. Hopefully this still works on
65 #ifndef LISP_FEATURE_WIN32
66 #define TYPE(name) .type name,@function
67 #define SIZE(name) .size name,.-name
74 .global GNAME(foreign_function_call_active)
75 .global GNAME(all_threads)
78 * A call to call_into_c preserves esi, edi, and ebp.
79 * (The C function will preserve ebx, esi, edi, and ebp across its
80 * function call, but we trash ebx ourselves by using it to save the
81 * return Lisp address.)
83 * Return values are in eax and maybe edx for quads, or st(0) for
86 * This should work for Lisp calls C calls Lisp calls C..
89 .align align_16byte,0x90
90 .global GNAME(call_into_c)
91 TYPE(GNAME(call_into_c))
93 movl $1,GNAME(foreign_function_call_active)
95 /* Save the return Lisp address in ebx. */
98 /* Setup the NPX for C */
108 #ifdef LISP_FEATURE_WIN32
112 call *%eax # normal callout using Lisp stack
114 movl %eax,%ecx # remember integer return value
116 /* Check for a return FP value. */
123 /* The return value is in eax, or eax,edx? */
124 /* Set up the NPX stack for Lisp. */
125 fldz # Ensure no regs are empty.
134 /* Restore the return value. */
135 movl %ecx,%eax # maybe return value
137 movl $0,GNAME(foreign_function_call_active)
142 /* The return result is in st(0). */
143 /* Set up the NPX stack for Lisp, placing the result in st(0). */
144 fldz # Ensure no regs are empty.
151 fxch %st(7) # Move the result back to st(0).
153 /* We don't need to restore eax, because the result is in st(0). */
155 movl $0,GNAME(foreign_function_call_active)
159 SIZE(GNAME(call_into_c))
163 .global GNAME(call_into_lisp_first_time)
164 TYPE(GNAME(call_into_lisp_first_time))
166 /* The *ALIEN-STACK* pointer is set up on the first call_into_lisp when
167 * the stack changes. We don't worry too much about saving registers
168 * here, because we never expect to return from the initial call to lisp
171 .align align_16byte,0x90
172 GNAME(call_into_lisp_first_time):
173 pushl %ebp # Save old frame pointer.
174 movl %esp,%ebp # Establish new frame.
175 #ifndef LISP_FEATURE_WIN32
176 movl %esp,ALIEN_STACK + SYMBOL_VALUE_OFFSET
177 movl GNAME(all_threads),%eax
178 movl THREAD_CONTROL_STACK_START_OFFSET(%eax) ,%esp
179 /* don't think too hard about what happens if we get interrupted
181 addl $THREAD_CONTROL_STACK_SIZE-4,%esp
183 /* Win32 -really- doesn't like you switching stacks out from under it. */
184 movl GNAME(all_threads),%eax
189 .global GNAME(call_into_lisp)
190 TYPE(GNAME(call_into_lisp))
192 /* The C conventions require that ebx, esi, edi, and ebp be preserved
193 * across function calls. */
195 .align align_16byte,0x90
196 GNAME(call_into_lisp):
197 pushl %ebp # Save old frame pointer.
198 movl %esp,%ebp # Establish new frame.
200 /* Save the NPX state */
201 fwait # Catch any pending NPX exceptions.
202 subl $108,%esp # Make room for the NPX state.
203 fnsave (%esp) # save and reset NPX
205 movl (%esp),%eax # Load NPX control word.
206 andl $0xfffff2ff,%eax # Set rounding mode to nearest.
207 orl $0x00000200,%eax # Set precision to 64 bits. (53-bit mantissa)
209 fldcw (%esp) # Recover modes.
212 fldz # Ensure no FP regs are empty.
221 /* Save C regs: ebx esi edi. */
226 /* Clear descriptor regs. */
227 xorl %eax,%eax # lexenv
228 xorl %ebx,%ebx # available
229 xorl %ecx,%ecx # arg count
230 xorl %edx,%edx # first arg
231 xorl %edi,%edi # second arg
232 xorl %esi,%esi # third arg
234 /* no longer in function call */
235 movl %eax, GNAME(foreign_function_call_active)
237 movl %esp,%ebx # remember current stack
238 pushl %ebx # Save entry stack on (maybe) new stack.
240 /* Establish Lisp args. */
241 movl 8(%ebp),%eax # lexenv?
242 movl 12(%ebp),%ebx # address of arg vec
243 movl 16(%ebp),%ecx # num args
244 shll $2,%ecx # Make num args into fixnum.
247 movl (%ebx),%edx # arg0
250 movl 4(%ebx),%edi # arg1
253 movl 8(%ebx),%esi # arg2
255 /* Registers eax, ecx, edx, edi, and esi are now live. */
257 /* Alloc new frame. */
258 mov %esp,%ebx # The current sp marks start of new frame.
259 push %ebp # fp in save location S0
260 sub $8,%esp # Ensure 3 slots are allocated, one above.
261 mov %ebx,%ebp # Switch to new frame.
263 call *CLOSURE_FUN_OFFSET(%eax)
265 /* If the function returned multiple values, it will return to
266 this point. Lose them */
268 /* A singled value function returns here */
270 /* Restore the stack, in case there was a stack change. */
273 /* Restore C regs: ebx esi edi. */
278 /* Restore the NPX state. */
283 movl %edx,%eax # c-val
285 SIZE(GNAME(call_into_lisp))
287 /* support for saving and restoring the NPX state from C */
289 .global GNAME(fpu_save)
290 TYPE(GNAME(fpu_save))
294 fnsave (%eax) # Save the NPX state. (resets NPX)
296 SIZE(GNAME(fpu_save))
298 .global GNAME(fpu_restore)
299 TYPE(GNAME(fpu_restore))
303 frstor (%eax) # Restore the NPX state.
305 SIZE(GNAME(fpu_restore))
308 * the undefined-function trampoline
311 .align align_4byte,0x90
312 .global GNAME(undefined_tramp)
313 TYPE(GNAME(undefined_tramp))
314 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
315 GNAME(undefined_tramp):
319 .byte UNDEFINED_FUN_ERROR
320 .byte sc_DescriptorReg # eax in the Descriptor-reg SC
322 SIZE(GNAME(undefined_tramp))
325 * the closure trampoline
328 .align align_4byte,0x90
329 .global GNAME(closure_tramp)
330 TYPE(GNAME(closure_tramp))
331 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
332 GNAME(closure_tramp):
333 movl FDEFN_FUN_OFFSET(%eax),%eax
334 /* FIXME: The '*' after "jmp" in the next line is from PVE's
335 * patch posted to the CMU CL mailing list Oct 6, 1999. It looks
336 * reasonable, and it certainly seems as though if CMU CL needs it,
337 * SBCL needs it too, but I haven't actually verified that it's
338 * right. It would be good to find a way to force the flow of
339 * control through here to test it. */
340 jmp *CLOSURE_FUN_OFFSET(%eax)
341 SIZE(GNAME(closure_tramp))
344 * fun-end breakpoint magic
347 .global GNAME(fun_end_breakpoint_guts)
349 GNAME(fun_end_breakpoint_guts):
350 /* Multiple Value return */
351 jmp multiple_value_return
352 /* Single value return: The eventual return will now use the
353 multiple values return convention but with a return values
355 movl %esp,%ebx # Setup ebx - the ofp.
356 subl $4,%esp # Allocate one stack slot for the return value
357 movl $4,%ecx # Setup ecx for one return value.
358 movl $NIL,%edi # default second value
359 movl $NIL,%esi # default third value
361 multiple_value_return:
363 .global GNAME(fun_end_breakpoint_trap)
364 GNAME(fun_end_breakpoint_trap):
366 .byte trap_FunEndBreakpoint
367 hlt # We should never return here.
369 .global GNAME(fun_end_breakpoint_end)
370 GNAME(fun_end_breakpoint_end):
373 .global GNAME(do_pending_interrupt)
374 TYPE(GNAME(do_pending_interrupt))
375 .align align_4byte,0x90
376 GNAME(do_pending_interrupt):
378 .byte trap_PendingInterrupt
380 SIZE(GNAME(do_pending_interrupt))
384 * Allocate bytes and return the start of the allocated space
385 * in the specified destination register.
387 * In the general case the size will be in the destination register.
389 * All registers must be preserved except the destination.
390 * The C conventions will preserve ebx, esi, edi, and ebp.
391 * So only eax, ecx, and edx need special care here.
394 .globl GNAME(alloc_to_eax)
395 TYPE(GNAME(alloc_to_eax))
396 .align align_4byte,0x90
398 pushl %ecx # Save ecx and edx as C could destroy them.
400 pushl %eax # Push the size.
402 addl $4,%esp # Pop the size arg.
403 popl %edx # Restore ecx and edx.
406 SIZE(GNAME(alloc_to_eax))
408 .globl GNAME(alloc_8_to_eax)
409 TYPE(GNAME(alloc_8_to_eax))
410 .align align_4byte,0x90
411 GNAME(alloc_8_to_eax):
412 pushl %ecx # Save ecx and edx as C could destroy them.
414 pushl $8 # Push the size.
416 addl $4,%esp # Pop the size arg.
417 popl %edx # Restore ecx and edx.
420 SIZE(GNAME(alloc_8_to_eax))
422 .globl GNAME(alloc_8_to_eax)
423 TYPE(GNAME(alloc_8_to_eax))
424 .align align_4byte,0x90
426 .globl GNAME(alloc_16_to_eax)
427 TYPE(GNAME(alloc_16_to_eax))
428 .align align_4byte,0x90
429 GNAME(alloc_16_to_eax):
430 pushl %ecx # Save ecx and edx as C could destroy them.
432 pushl $16 # Push the size.
434 addl $4,%esp # Pop the size arg.
435 popl %edx # Restore ecx and edx.
438 SIZE(GNAME(alloc_16_to_eax))
440 .globl GNAME(alloc_to_ecx)
441 TYPE(GNAME(alloc_to_ecx))
442 .align align_4byte,0x90
444 pushl %eax # Save eax and edx as C could destroy them.
446 pushl %ecx # Push the size.
448 addl $4,%esp # Pop the size arg.
449 movl %eax,%ecx # Set up the destination.
450 popl %edx # Restore eax and edx.
453 SIZE(GNAME(alloc_to_ecx))
455 .globl GNAME(alloc_8_to_ecx)
456 TYPE(GNAME(alloc_8_to_ecx))
457 .align align_4byte,0x90
458 GNAME(alloc_8_to_ecx):
459 pushl %eax # Save eax and edx as C could destroy them.
461 pushl $8 # Push the size.
463 addl $4,%esp # Pop the size arg.
464 movl %eax,%ecx # Set up the destination.
465 popl %edx # Restore eax and edx.
468 SIZE(GNAME(alloc_8_to_ecx))
470 .globl GNAME(alloc_16_to_ecx)
471 TYPE(GNAME(alloc_16_to_ecx))
472 .align align_4byte,0x90
473 GNAME(alloc_16_to_ecx):
474 pushl %eax # Save eax and edx as C could destroy them.
476 pushl $16 # Push the size.
478 addl $4,%esp # Pop the size arg.
479 movl %eax,%ecx # Set up the destination.
480 popl %edx # Restore eax and edx.
483 SIZE(GNAME(alloc_16_to_ecx))
486 .globl GNAME(alloc_to_edx)
487 TYPE(GNAME(alloc_to_edx))
488 .align align_4byte,0x90
490 pushl %eax # Save eax and ecx as C could destroy them.
492 pushl %edx # Push the size.
494 addl $4,%esp # Pop the size arg.
495 movl %eax,%edx # Set up the destination.
496 popl %ecx # Restore eax and ecx.
499 SIZE(GNAME(alloc_to_edx))
501 .globl GNAME(alloc_8_to_edx)
502 TYPE(GNAME(alloc_8_to_edx))
503 .align align_4byte,0x90
504 GNAME(alloc_8_to_edx):
505 pushl %eax # Save eax and ecx as C could destroy them.
507 pushl $8 # Push the size.
509 addl $4,%esp # Pop the size arg.
510 movl %eax,%edx # Set up the destination.
511 popl %ecx # Restore eax and ecx.
514 SIZE(GNAME(alloc_8_to_edx))
516 .globl GNAME(alloc_16_to_edx)
517 TYPE(GNAME(alloc_16_to_edx))
518 .align align_4byte,0x90
519 GNAME(alloc_16_to_edx):
520 pushl %eax # Save eax and ecx as C could destroy them.
522 pushl $16 # Push the size.
524 addl $4,%esp # Pop the size arg.
525 movl %eax,%edx # Set up the destination.
526 popl %ecx # Restore eax and ecx.
529 SIZE(GNAME(alloc_16_to_edx))
533 .globl GNAME(alloc_to_ebx)
534 TYPE(GNAME(alloc_to_ebx))
535 .align align_4byte,0x90
537 pushl %eax # Save eax, ecx, and edx as C could destroy them.
540 pushl %ebx # Push the size.
542 addl $4,%esp # Pop the size arg.
543 movl %eax,%ebx # Set up the destination.
544 popl %edx # Restore eax, ecx and edx.
548 SIZE(GNAME(alloc_to_ebx))
550 .globl GNAME(alloc_8_to_ebx)
551 TYPE(GNAME(alloc_8_to_ebx))
552 .align align_4byte,0x90
553 GNAME(alloc_8_to_ebx):
554 pushl %eax # Save eax, ecx, and edx as C could destroy them.
557 pushl $8 # Push the size.
559 addl $4,%esp # Pop the size arg.
560 movl %eax,%ebx # Set up the destination.
561 popl %edx # Restore eax, ecx and edx.
565 SIZE(GNAME(alloc_8_to_ebx))
567 .globl GNAME(alloc_16_to_ebx)
568 TYPE(GNAME(alloc_16_to_ebx))
569 .align align_4byte,0x90
570 GNAME(alloc_16_to_ebx):
571 pushl %eax # Save eax, ecx, and edx as C could destroy them.
574 pushl $16 # Push the size
576 addl $4,%esp # pop the size arg.
577 movl %eax,%ebx # setup the destination.
578 popl %edx # Restore eax, ecx and edx.
582 SIZE(GNAME(alloc_16_to_ebx))
586 .globl GNAME(alloc_to_esi)
587 TYPE(GNAME(alloc_to_esi))
588 .align align_4byte,0x90
590 pushl %eax # Save eax, ecx, and edx as C could destroy them.
593 pushl %esi # Push the size
595 addl $4,%esp # pop the size arg.
596 movl %eax,%esi # setup the destination.
597 popl %edx # Restore eax, ecx and edx.
601 SIZE(GNAME(alloc_to_esi))
603 .globl GNAME(alloc_8_to_esi)
604 TYPE(GNAME(alloc_8_to_esi))
605 .align align_4byte,0x90
606 GNAME(alloc_8_to_esi):
607 pushl %eax # Save eax, ecx, and edx as C could destroy them.
610 pushl $8 # Push the size
612 addl $4,%esp # pop the size arg.
613 movl %eax,%esi # setup the destination.
614 popl %edx # Restore eax, ecx and edx.
618 SIZE(GNAME(alloc_8_to_esi))
620 .globl GNAME(alloc_16_to_esi)
621 TYPE(GNAME(alloc_16_to_esi))
622 .align align_4byte,0x90
623 GNAME(alloc_16_to_esi):
624 pushl %eax # Save eax, ecx, and edx as C could destroy them.
627 pushl $16 # Push the size
629 addl $4,%esp # pop the size arg.
630 movl %eax,%esi # setup the destination.
631 popl %edx # Restore eax, ecx and edx.
635 SIZE(GNAME(alloc_16_to_esi))
638 .globl GNAME(alloc_to_edi)
639 TYPE(GNAME(alloc_to_edi))
640 .align align_4byte,0x90
642 pushl %eax # Save eax, ecx, and edx as C could destroy them.
645 pushl %edi # Push the size
647 addl $4,%esp # pop the size arg.
648 movl %eax,%edi # setup the destination.
649 popl %edx # Restore eax, ecx and edx.
653 SIZE(GNAME(alloc_to_edi))
655 .globl GNAME(alloc_8_to_edi)
656 TYPE(GNAME(alloc_8_to_edi))
657 .align align_4byte,0x90
658 GNAME(alloc_8_to_edi):
659 pushl %eax # Save eax, ecx, and edx as C could destroy them.
662 pushl $8 # Push the size
664 addl $4,%esp # pop the size arg.
665 movl %eax,%edi # setup the destination.
666 popl %edx # Restore eax, ecx and edx.
670 SIZE(GNAME(alloc_8_to_edi))
672 .globl GNAME(alloc_16_to_edi)
673 TYPE(GNAME(alloc_16_to_edi))
674 .align align_4byte,0x90
675 GNAME(alloc_16_to_edi):
676 pushl %eax # Save eax, ecx, and edx as C could destroy them.
679 pushl $16 # Push the size
681 addl $4,%esp # pop the size arg.
682 movl %eax,%edi # setup the destination.
683 popl %edx # Restore eax, ecx and edx.
687 SIZE(GNAME(alloc_16_to_edi))
690 /* Called from lisp when an inline allocation overflows.
691 Every register except the result needs to be preserved.
692 We depend on C to preserve ebx, esi, edi, and ebp.
693 But where necessary must save eax, ecx, edx. */
695 #ifdef LISP_FEATURE_SB_THREAD
696 #define START_REGION %fs:THREAD_ALLOC_REGION_OFFSET
698 #define START_REGION GNAME(boxed_region)
701 /* This routine handles an overflow with eax=crfp+size. So the
704 .globl GNAME(alloc_overflow_eax)
705 TYPE(GNAME(alloc_overflow_eax))
706 GNAME(alloc_overflow_eax):
707 pushl %ecx # Save ecx
708 pushl %edx # Save edx
709 /* Calculate the size for the allocation. */
710 subl START_REGION,%eax
711 pushl %eax # Push the size
713 addl $4,%esp # pop the size arg.
714 popl %edx # Restore edx.
715 popl %ecx # Restore ecx.
717 SIZE(GNAME(alloc_overflow_eax))
720 .globl GNAME(alloc_overflow_ecx)
721 TYPE(GNAME(alloc_overflow_ecx))
722 GNAME(alloc_overflow_ecx):
723 pushl %eax # Save eax
724 pushl %edx # Save edx
725 /* Calculate the size for the allocation. */
726 subl START_REGION,%ecx
727 pushl %ecx # Push the size
729 addl $4,%esp # pop the size arg.
730 movl %eax,%ecx # setup the destination.
731 popl %edx # Restore edx.
732 popl %eax # Restore eax.
734 SIZE(GNAME(alloc_overflow_ecx))
737 .globl GNAME(alloc_overflow_edx)
738 TYPE(GNAME(alloc_overflow_edx))
739 GNAME(alloc_overflow_edx):
740 pushl %eax # Save eax
741 pushl %ecx # Save ecx
742 /* Calculate the size for the allocation. */
743 subl START_REGION,%edx
744 pushl %edx # Push the size
746 addl $4,%esp # pop the size arg.
747 movl %eax,%edx # setup the destination.
748 popl %ecx # Restore ecx.
749 popl %eax # Restore eax.
751 SIZE(GNAME(alloc_overflow_edx))
753 /* This routine handles an overflow with ebx=crfp+size. So the
756 .globl GNAME(alloc_overflow_ebx)
757 TYPE(GNAME(alloc_overflow_ebx))
758 GNAME(alloc_overflow_ebx):
759 pushl %eax # Save eax
760 pushl %ecx # Save ecx
761 pushl %edx # Save edx
762 /* Calculate the size for the allocation. */
763 subl START_REGION,%ebx
764 pushl %ebx # Push the size
766 addl $4,%esp # pop the size arg.
767 movl %eax,%ebx # setup the destination.
768 popl %edx # Restore edx.
769 popl %ecx # Restore ecx.
770 popl %eax # Restore eax.
772 SIZE(GNAME(alloc_overflow_ebx))
774 /* This routine handles an overflow with esi=crfp+size. So the
777 .globl GNAME(alloc_overflow_esi)
778 TYPE(GNAME(alloc_overflow_esi))
779 GNAME(alloc_overflow_esi):
780 pushl %eax # Save eax
781 pushl %ecx # Save ecx
782 pushl %edx # Save edx
783 /* Calculate the size for the allocation. */
784 subl START_REGION,%esi
785 pushl %esi # Push the size
787 addl $4,%esp # pop the size arg.
788 movl %eax,%esi # setup the destination.
789 popl %edx # Restore edx.
790 popl %ecx # Restore ecx.
791 popl %eax # Restore eax.
793 SIZE(GNAME(alloc_overflow_esi))
796 .globl GNAME(alloc_overflow_edi)
797 TYPE(GNAME(alloc_overflow_edi))
798 GNAME(alloc_overflow_edi):
799 pushl %eax # Save eax
800 pushl %ecx # Save ecx
801 pushl %edx # Save edx
802 /* Calculate the size for the allocation. */
803 subl START_REGION,%edi
804 pushl %edi # Push the size
806 addl $4,%esp # pop the size arg.
807 movl %eax,%edi # setup the destination.
808 popl %edx # Restore edx.
809 popl %ecx # Restore ecx.
810 popl %eax # Restore eax.
812 SIZE(GNAME(alloc_overflow_edi))
814 .align align_4byte,0x90
815 .globl GNAME(post_signal_tramp)
816 TYPE(GNAME(post_signal_tramp))
817 GNAME(post_signal_tramp):
818 /* this is notionally the second half of a function whose first half
819 * doesn't exist. This is where call_into_lisp returns when called
820 * using return_to_lisp_function */
821 addl $12,%esp /* clear call_into_lisp args from stack */
822 popal /* restore registers */
826 SIZE(GNAME(post_signal_tramp))
828 #ifdef LISP_FEATURE_WIN32
830 * This is part of the funky magic for exception handling on win32.
831 * see sigtrap_emulator() in win32-os.c for details.
833 .global GNAME(sigtrap_trampoline)
834 GNAME(sigtrap_trampoline):
838 call GNAME(sigtrap_wrapper)
842 .byte trap_ContextRestore
843 hlt # We should never return here.
846 * This is part of the funky magic for exception handling on win32.
847 * see handle_exception() in win32-os.c for details.
849 .global GNAME(exception_trampoline)
850 GNAME(exception_trampoline):
854 call GNAME(handle_win32_exception_wrapper)
858 .byte trap_ContextRestore
859 hlt # We should never return here.
862 /* fast_bzero implementations and code to detect which implementation
866 .global GNAME(fast_bzero_pointer)
869 GNAME(fast_bzero_pointer):
870 /* Variable containing a pointer to the bzero function to use.
871 * Initially points to a basic function. Change this variable
872 * to fast_bzero_detect if OS supports SSE. */
873 .long GNAME(fast_bzero_base)
876 .align align_8byte,0x90
877 .global GNAME(fast_bzero)
878 TYPE(GNAME(fast_bzero))
880 /* Indirect function call */
881 jmp *GNAME(fast_bzero_pointer)
882 SIZE(GNAME(fast_bzero))
886 .align align_8byte,0x90
887 .global GNAME(fast_bzero_detect)
888 TYPE(GNAME(fast_bzero_detect))
889 GNAME(fast_bzero_detect):
890 /* Decide whether to use SSE, MMX or REP version */
891 push %eax /* CPUID uses EAX-EDX */
897 test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
899 /* Originally there was another case here for using the
900 * MOVNTQ instruction for processors that supported MMX but
901 * not SSE2. This turned out to be a loss especially on
902 * Athlons (where this instruction is apparently microcoded
903 * somewhat slowly). So for simplicity revert to REP STOSL
904 * for all non-SSE2 processors.
907 movl $GNAME(fast_bzero_base), GNAME(fast_bzero_pointer)
910 movl $GNAME(fast_bzero_sse), GNAME(fast_bzero_pointer)
918 jmp *GNAME(fast_bzero_pointer)
920 SIZE(GNAME(fast_bzero_detect))
924 .align align_8byte,0x90
925 .global GNAME(fast_bzero_sse)
926 TYPE(GNAME(fast_bzero_sse))
928 GNAME(fast_bzero_sse):
929 /* A fast routine for zero-filling blocks of memory that are
930 * guaranteed to start and end at a 4096-byte aligned address.
932 push %esi /* Save temporary registers */
934 mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
935 mov 12(%esp), %edi /* Parameter: start address */
936 shr $6, %esi /* Amount of 64-byte blocks to copy */
937 jz Lend_sse /* If none, stop */
938 movups %xmm7, -16(%esp) /* Save XMM register */
939 xorps %xmm7, %xmm7 /* Zero the XMM register */
944 /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
945 * non-caching double-quadword moving variant, i.e. the memory areas
946 * we're touching are not fetched into the L1 cache, since we're just
947 * going to overwrite the memory soon anyway.
949 movntdq %xmm7, 0(%edi)
950 movntdq %xmm7, 16(%edi)
951 movntdq %xmm7, 32(%edi)
952 movntdq %xmm7, 48(%edi)
954 add $64, %edi /* Advance pointer */
955 dec %esi /* Decrement 64-byte block count */
957 movups -16(%esp), %xmm7 /* Restore the XMM register */
958 sfence /* Ensure that weakly ordered writes are flushed. */
960 mov 12(%esp), %esi /* Parameter: start address */
961 prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
962 * since it's likely to be used immediately. */
963 pop %edi /* Restore temp registers */
966 SIZE(GNAME(fast_bzero_sse))
970 .align align_8byte,0x90
971 .global GNAME(fast_bzero_base)
972 TYPE(GNAME(fast_bzero_base))
974 GNAME(fast_bzero_base):
975 /* A fast routine for zero-filling blocks of memory that are
976 * guaranteed to start and end at a 4096-byte aligned address.
978 push %eax /* Save temporary registers */
981 mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
982 mov 16(%esp), %edi /* Parameter: start address */
983 xor %eax, %eax /* Zero EAX */
984 shr $2, %ecx /* Amount of 4-byte blocks to copy */
986 cld /* Set direction of STOSL to increment */
987 rep stosl /* Store EAX to *EDI, ECX times, incrementing
988 * EDI by 4 after each store */
990 pop %edi /* Restore temp registers */
994 SIZE(GNAME(fast_bzero_base))