2 * very-low-level utilities for runtime support
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
16 #define LANGUAGE_ASSEMBLY
19 #include "genesis/closure.h"
20 #include "genesis/fdefn.h"
21 #include "genesis/static-symbols.h"
22 #include "genesis/symbol.h"
23 #include "genesis/thread.h"
25 /* Minimize conditionalization for different OS naming schemes.
27 * (As of sbcl-0.8.10, this seems no longer to be much of an issue,
28 * since everyone has converged on ELF. If this generality really
29 * turns out not to matter, perhaps it's just clutter we could get
30 * rid of? -- WHN 2004-04-18)
32 * (Except Win32, which is unlikely ever to be ELF, sorry. -- AB 2005-12-08)
34 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
35 #define GNAME(var) var
37 #define GNAME(var) _##var
40 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
41 #define GNAMEDOLLAR(var) $##var
43 #define GNAMEDOLLAR(var) $_##var
46 #if defined __linux__ || defined __FreeBSD__ || defined __NetBSD__ || defined __OpenBSD__ || defined __sun
47 #define DOLLARLITERAL(var) $##var
49 #define DOLLARLITERAL(var) $##(var)
52 /* Get the right type of alignment. Linux, FreeBSD and NetBSD (but not OpenBSD)
53 * want alignment in bytes.
55 * (As in the GNAME() definitions above, as of sbcl-0.8.10, this seems
56 * no longer to be much of an issue, since everyone has converged on
57 * the same value. If this generality really turns out not to
58 * matter any more, perhaps it's just clutter we could get
59 * rid of? -- WHN 2004-04-18)
61 #if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__sun) || defined(LISP_FEATURE_WIN32)
64 #define align_16byte 16
68 #define align_16byte 4
72 * The assembler used for win32 doesn't like .type or .size directives,
73 * so we want to conditionally kill them out. So let's wrap them in macros
74 * that are defined to be no-ops on win32. Hopefully this still works on
77 #if !defined(LISP_FEATURE_WIN32) && !defined(LISP_FEATURE_DARWIN)
78 #define TYPE(name) .type name,@function
79 #define SIZE(name) .size name,.-name
85 #if defined(LISP_FEATURE_DARWIN)
92 .globl GNAME(foreign_function_call_active)
93 .globl GNAME(all_threads)
96 * A call to call_into_c preserves esi, edi, and ebp.
97 * (The C function will preserve ebx, esi, edi, and ebp across its
98 * function call, but we trash ebx ourselves by using it to save the
99 * return Lisp address.)
101 * Return values are in eax and maybe edx for quads, or st(0) for
104 * This should work for Lisp calls C calls Lisp calls C..
107 .align align_16byte,0x90
108 .globl GNAME(call_into_c)
109 TYPE(GNAME(call_into_c))
111 movl $1,GNAME(foreign_function_call_active)
113 /* Save the return Lisp address in ebx. */
116 /* Setup the NPX for C */
126 #ifdef LISP_FEATURE_WIN32
130 #ifdef LISP_FEATURE_DARWIN
131 andl $0xfffffff0,%esp # align stack to 16-byte boundary before calling C
133 call *%eax # normal callout using Lisp stack
135 movl %eax,%ecx # remember integer return value
137 /* Check for a return FP value. */
144 /* The return value is in eax, or eax,edx? */
145 /* Set up the NPX stack for Lisp. */
146 fldz # Ensure no regs are empty.
155 /* Restore the return value. */
156 movl %ecx,%eax # maybe return value
158 movl $0,GNAME(foreign_function_call_active)
163 /* The return result is in st(0). */
164 /* Set up the NPX stack for Lisp, placing the result in st(0). */
165 fldz # Ensure no regs are empty.
172 fxch %st(7) # Move the result back to st(0).
174 /* We don't need to restore eax, because the result is in st(0). */
176 movl $0,GNAME(foreign_function_call_active)
180 SIZE(GNAME(call_into_c))
184 .globl GNAME(call_into_lisp_first_time)
185 TYPE(GNAME(call_into_lisp_first_time))
187 /* The *ALIEN-STACK* pointer is set up on the first call_into_lisp when
188 * the stack changes. We don't worry too much about saving registers
189 * here, because we never expect to return from the initial call to lisp
192 .align align_16byte,0x90
193 GNAME(call_into_lisp_first_time):
194 pushl %ebp # Save old frame pointer.
195 movl %esp,%ebp # Establish new frame.
196 #ifndef LISP_FEATURE_WIN32
197 movl %esp,ALIEN_STACK + SYMBOL_VALUE_OFFSET
198 movl GNAME(all_threads),%eax
199 movl THREAD_CONTROL_STACK_START_OFFSET(%eax) ,%esp
200 /* don't think too hard about what happens if we get interrupted
202 addl DOLLARLITERAL(THREAD_CONTROL_STACK_SIZE),%esp
204 /* Win32 -really- doesn't like you switching stacks out from under it. */
205 movl GNAME(all_threads),%eax
210 .globl GNAME(call_into_lisp)
211 TYPE(GNAME(call_into_lisp))
213 /* The C conventions require that ebx, esi, edi, and ebp be preserved
214 * across function calls. */
216 .align align_16byte,0x90
217 GNAME(call_into_lisp):
218 pushl %ebp # Save old frame pointer.
219 movl %esp,%ebp # Establish new frame.
221 /* Save the NPX state */
222 fwait # Catch any pending NPX exceptions.
223 subl $108,%esp # Make room for the NPX state.
224 fnsave (%esp) # save and reset NPX
226 movl (%esp),%eax # Load NPX control word.
227 andl $0xfffff2ff,%eax # Set rounding mode to nearest.
228 orl $0x00000200,%eax # Set precision to 64 bits. (53-bit mantissa)
230 fldcw (%esp) # Recover modes.
233 fldz # Ensure no FP regs are empty.
242 /* Save C regs: ebx esi edi. */
247 /* Clear descriptor regs. */
248 xorl %eax,%eax # lexenv
249 xorl %ebx,%ebx # available
250 xorl %ecx,%ecx # arg count
251 xorl %edx,%edx # first arg
252 xorl %edi,%edi # second arg
253 xorl %esi,%esi # third arg
255 /* no longer in function call */
256 movl %eax, GNAME(foreign_function_call_active)
258 movl %esp,%ebx # remember current stack
259 pushl %ebx # Save entry stack on (maybe) new stack.
261 /* Establish Lisp args. */
262 movl 8(%ebp),%eax # lexenv?
263 movl 12(%ebp),%ebx # address of arg vec
264 movl 16(%ebp),%ecx # num args
265 shll $2,%ecx # Make num args into fixnum.
268 movl (%ebx),%edx # arg0
271 movl 4(%ebx),%edi # arg1
274 movl 8(%ebx),%esi # arg2
276 /* Registers eax, ecx, edx, edi, and esi are now live. */
278 /* Alloc new frame. */
279 mov %esp,%ebx # The current sp marks start of new frame.
280 push %ebp # fp in save location S0
281 sub $8,%esp # Ensure 3 slots are allocated, one above.
282 mov %ebx,%ebp # Switch to new frame.
284 call *CLOSURE_FUN_OFFSET(%eax)
286 /* If the function returned multiple values, it will return to
287 this point. Lose them */
291 /* A singled value function returns here */
293 /* Restore the stack, in case there was a stack change. */
296 /* Restore C regs: ebx esi edi. */
301 /* Restore the NPX state. */
306 movl %edx,%eax # c-val
308 SIZE(GNAME(call_into_lisp))
310 /* support for saving and restoring the NPX state from C */
312 .globl GNAME(fpu_save)
313 TYPE(GNAME(fpu_save))
317 fnsave (%eax) # Save the NPX state. (resets NPX)
319 SIZE(GNAME(fpu_save))
321 .globl GNAME(fpu_restore)
322 TYPE(GNAME(fpu_restore))
326 frstor (%eax) # Restore the NPX state.
328 SIZE(GNAME(fpu_restore))
331 * the undefined-function trampoline
334 .align align_4byte,0x90
335 .globl GNAME(undefined_tramp)
336 TYPE(GNAME(undefined_tramp))
337 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
338 GNAME(undefined_tramp):
342 .byte UNDEFINED_FUN_ERROR
343 .byte sc_DescriptorReg # eax in the Descriptor-reg SC
345 SIZE(GNAME(undefined_tramp))
348 * the closure trampoline
351 .align align_4byte,0x90
352 .globl GNAME(closure_tramp)
353 TYPE(GNAME(closure_tramp))
354 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
355 GNAME(closure_tramp):
356 movl FDEFN_FUN_OFFSET(%eax),%eax
357 /* FIXME: The '*' after "jmp" in the next line is from PVE's
358 * patch posted to the CMU CL mailing list Oct 6, 1999. It looks
359 * reasonable, and it certainly seems as though if CMU CL needs it,
360 * SBCL needs it too, but I haven't actually verified that it's
361 * right. It would be good to find a way to force the flow of
362 * control through here to test it. */
363 jmp *CLOSURE_FUN_OFFSET(%eax)
364 SIZE(GNAME(closure_tramp))
367 * fun-end breakpoint magic
370 .globl GNAME(fun_end_breakpoint_guts)
372 GNAME(fun_end_breakpoint_guts):
373 /* Multiple Value return */
374 jc multiple_value_return
375 /* Single value return: The eventual return will now use the
376 multiple values return convention but with a return values
378 movl %esp,%ebx # Setup ebx - the ofp.
379 subl $4,%esp # Allocate one stack slot for the return value
380 movl $4,%ecx # Setup ecx for one return value.
381 movl DOLLARLITERAL(NIL),%edi # default second value
382 movl DOLLARLITERAL(NIL),%esi # default third value
384 multiple_value_return:
386 .globl GNAME(fun_end_breakpoint_trap)
387 GNAME(fun_end_breakpoint_trap):
389 .byte trap_FunEndBreakpoint
390 hlt # We should never return here.
392 .globl GNAME(fun_end_breakpoint_end)
393 GNAME(fun_end_breakpoint_end):
396 .globl GNAME(do_pending_interrupt)
397 TYPE(GNAME(do_pending_interrupt))
398 .align align_4byte,0x90
399 GNAME(do_pending_interrupt):
401 .byte trap_PendingInterrupt
403 SIZE(GNAME(do_pending_interrupt))
407 * Allocate bytes and return the start of the allocated space
408 * in the specified destination register.
410 * In the general case the size will be in the destination register.
412 * All registers must be preserved except the destination.
413 * The C conventions will preserve ebx, esi, edi, and ebp.
414 * So only eax, ecx, and edx need special care here.
417 .globl GNAME(alloc_to_eax)
418 TYPE(GNAME(alloc_to_eax))
419 .align align_4byte,0x90
421 pushl %ecx # Save ecx and edx as C could destroy them.
423 pushl %eax # Push the size.
425 addl $4,%esp # Pop the size arg.
426 popl %edx # Restore ecx and edx.
429 SIZE(GNAME(alloc_to_eax))
431 .globl GNAME(alloc_8_to_eax)
432 TYPE(GNAME(alloc_8_to_eax))
433 .align align_4byte,0x90
434 GNAME(alloc_8_to_eax):
435 pushl %ecx # Save ecx and edx as C could destroy them.
437 pushl $8 # Push the size.
439 addl $4,%esp # Pop the size arg.
440 popl %edx # Restore ecx and edx.
443 SIZE(GNAME(alloc_8_to_eax))
445 .globl GNAME(alloc_8_to_eax)
446 TYPE(GNAME(alloc_8_to_eax))
447 .align align_4byte,0x90
449 .globl GNAME(alloc_16_to_eax)
450 TYPE(GNAME(alloc_16_to_eax))
451 .align align_4byte,0x90
452 GNAME(alloc_16_to_eax):
453 pushl %ecx # Save ecx and edx as C could destroy them.
455 pushl $16 # Push the size.
457 addl $4,%esp # Pop the size arg.
458 popl %edx # Restore ecx and edx.
461 SIZE(GNAME(alloc_16_to_eax))
463 .globl GNAME(alloc_to_ecx)
464 TYPE(GNAME(alloc_to_ecx))
465 .align align_4byte,0x90
467 pushl %eax # Save eax and edx as C could destroy them.
469 pushl %ecx # Push the size.
471 addl $4,%esp # Pop the size arg.
472 movl %eax,%ecx # Set up the destination.
473 popl %edx # Restore eax and edx.
476 SIZE(GNAME(alloc_to_ecx))
478 .globl GNAME(alloc_8_to_ecx)
479 TYPE(GNAME(alloc_8_to_ecx))
480 .align align_4byte,0x90
481 GNAME(alloc_8_to_ecx):
482 pushl %eax # Save eax and edx as C could destroy them.
484 pushl $8 # Push the size.
486 addl $4,%esp # Pop the size arg.
487 movl %eax,%ecx # Set up the destination.
488 popl %edx # Restore eax and edx.
491 SIZE(GNAME(alloc_8_to_ecx))
493 .globl GNAME(alloc_16_to_ecx)
494 TYPE(GNAME(alloc_16_to_ecx))
495 .align align_4byte,0x90
496 GNAME(alloc_16_to_ecx):
497 pushl %eax # Save eax and edx as C could destroy them.
499 pushl $16 # Push the size.
501 addl $4,%esp # Pop the size arg.
502 movl %eax,%ecx # Set up the destination.
503 popl %edx # Restore eax and edx.
506 SIZE(GNAME(alloc_16_to_ecx))
509 .globl GNAME(alloc_to_edx)
510 TYPE(GNAME(alloc_to_edx))
511 .align align_4byte,0x90
513 pushl %eax # Save eax and ecx as C could destroy them.
515 pushl %edx # Push the size.
517 addl $4,%esp # Pop the size arg.
518 movl %eax,%edx # Set up the destination.
519 popl %ecx # Restore eax and ecx.
522 SIZE(GNAME(alloc_to_edx))
524 .globl GNAME(alloc_8_to_edx)
525 TYPE(GNAME(alloc_8_to_edx))
526 .align align_4byte,0x90
527 GNAME(alloc_8_to_edx):
528 pushl %eax # Save eax and ecx as C could destroy them.
530 pushl $8 # Push the size.
532 addl $4,%esp # Pop the size arg.
533 movl %eax,%edx # Set up the destination.
534 popl %ecx # Restore eax and ecx.
537 SIZE(GNAME(alloc_8_to_edx))
539 .globl GNAME(alloc_16_to_edx)
540 TYPE(GNAME(alloc_16_to_edx))
541 .align align_4byte,0x90
542 GNAME(alloc_16_to_edx):
543 pushl %eax # Save eax and ecx as C could destroy them.
545 pushl $16 # Push the size.
547 addl $4,%esp # Pop the size arg.
548 movl %eax,%edx # Set up the destination.
549 popl %ecx # Restore eax and ecx.
552 SIZE(GNAME(alloc_16_to_edx))
556 .globl GNAME(alloc_to_ebx)
557 TYPE(GNAME(alloc_to_ebx))
558 .align align_4byte,0x90
560 pushl %eax # Save eax, ecx, and edx as C could destroy them.
563 pushl %ebx # Push the size.
565 addl $4,%esp # Pop the size arg.
566 movl %eax,%ebx # Set up the destination.
567 popl %edx # Restore eax, ecx and edx.
571 SIZE(GNAME(alloc_to_ebx))
573 .globl GNAME(alloc_8_to_ebx)
574 TYPE(GNAME(alloc_8_to_ebx))
575 .align align_4byte,0x90
576 GNAME(alloc_8_to_ebx):
577 pushl %eax # Save eax, ecx, and edx as C could destroy them.
580 pushl $8 # Push the size.
582 addl $4,%esp # Pop the size arg.
583 movl %eax,%ebx # Set up the destination.
584 popl %edx # Restore eax, ecx and edx.
588 SIZE(GNAME(alloc_8_to_ebx))
590 .globl GNAME(alloc_16_to_ebx)
591 TYPE(GNAME(alloc_16_to_ebx))
592 .align align_4byte,0x90
593 GNAME(alloc_16_to_ebx):
594 pushl %eax # Save eax, ecx, and edx as C could destroy them.
597 pushl $16 # Push the size
599 addl $4,%esp # pop the size arg.
600 movl %eax,%ebx # setup the destination.
601 popl %edx # Restore eax, ecx and edx.
605 SIZE(GNAME(alloc_16_to_ebx))
609 .globl GNAME(alloc_to_esi)
610 TYPE(GNAME(alloc_to_esi))
611 .align align_4byte,0x90
613 pushl %eax # Save eax, ecx, and edx as C could destroy them.
616 pushl %esi # Push the size
618 addl $4,%esp # pop the size arg.
619 movl %eax,%esi # setup the destination.
620 popl %edx # Restore eax, ecx and edx.
624 SIZE(GNAME(alloc_to_esi))
626 .globl GNAME(alloc_8_to_esi)
627 TYPE(GNAME(alloc_8_to_esi))
628 .align align_4byte,0x90
629 GNAME(alloc_8_to_esi):
630 pushl %eax # Save eax, ecx, and edx as C could destroy them.
633 pushl $8 # Push the size
635 addl $4,%esp # pop the size arg.
636 movl %eax,%esi # setup the destination.
637 popl %edx # Restore eax, ecx and edx.
641 SIZE(GNAME(alloc_8_to_esi))
643 .globl GNAME(alloc_16_to_esi)
644 TYPE(GNAME(alloc_16_to_esi))
645 .align align_4byte,0x90
646 GNAME(alloc_16_to_esi):
647 pushl %eax # Save eax, ecx, and edx as C could destroy them.
650 pushl $16 # Push the size
652 addl $4,%esp # pop the size arg.
653 movl %eax,%esi # setup the destination.
654 popl %edx # Restore eax, ecx and edx.
658 SIZE(GNAME(alloc_16_to_esi))
661 .globl GNAME(alloc_to_edi)
662 TYPE(GNAME(alloc_to_edi))
663 .align align_4byte,0x90
665 pushl %eax # Save eax, ecx, and edx as C could destroy them.
668 pushl %edi # Push the size
670 addl $4,%esp # pop the size arg.
671 movl %eax,%edi # setup the destination.
672 popl %edx # Restore eax, ecx and edx.
676 SIZE(GNAME(alloc_to_edi))
678 .globl GNAME(alloc_8_to_edi)
679 TYPE(GNAME(alloc_8_to_edi))
680 .align align_4byte,0x90
681 GNAME(alloc_8_to_edi):
682 pushl %eax # Save eax, ecx, and edx as C could destroy them.
685 pushl $8 # Push the size
687 addl $4,%esp # pop the size arg.
688 movl %eax,%edi # setup the destination.
689 popl %edx # Restore eax, ecx and edx.
693 SIZE(GNAME(alloc_8_to_edi))
695 .globl GNAME(alloc_16_to_edi)
696 TYPE(GNAME(alloc_16_to_edi))
697 .align align_4byte,0x90
698 GNAME(alloc_16_to_edi):
699 pushl %eax # Save eax, ecx, and edx as C could destroy them.
702 pushl $16 # Push the size
704 addl $4,%esp # pop the size arg.
705 movl %eax,%edi # setup the destination.
706 popl %edx # Restore eax, ecx and edx.
710 SIZE(GNAME(alloc_16_to_edi))
713 /* Called from lisp when an inline allocation overflows.
714 Every register except the result needs to be preserved.
715 We depend on C to preserve ebx, esi, edi, and ebp.
716 But where necessary must save eax, ecx, edx. */
718 #ifdef LISP_FEATURE_SB_THREAD
719 #define START_REGION %fs:THREAD_ALLOC_REGION_OFFSET
721 #define START_REGION GNAME(boxed_region)
724 /* This routine handles an overflow with eax=crfp+size. So the
727 .globl GNAME(alloc_overflow_eax)
728 TYPE(GNAME(alloc_overflow_eax))
729 GNAME(alloc_overflow_eax):
730 pushl %ecx # Save ecx
731 pushl %edx # Save edx
732 /* Calculate the size for the allocation. */
733 subl START_REGION,%eax
734 pushl %eax # Push the size
736 addl $4,%esp # pop the size arg.
737 popl %edx # Restore edx.
738 popl %ecx # Restore ecx.
740 SIZE(GNAME(alloc_overflow_eax))
743 .globl GNAME(alloc_overflow_ecx)
744 TYPE(GNAME(alloc_overflow_ecx))
745 GNAME(alloc_overflow_ecx):
746 pushl %eax # Save eax
747 pushl %edx # Save edx
748 /* Calculate the size for the allocation. */
749 subl START_REGION,%ecx
750 pushl %ecx # Push the size
752 addl $4,%esp # pop the size arg.
753 movl %eax,%ecx # setup the destination.
754 popl %edx # Restore edx.
755 popl %eax # Restore eax.
757 SIZE(GNAME(alloc_overflow_ecx))
760 .globl GNAME(alloc_overflow_edx)
761 TYPE(GNAME(alloc_overflow_edx))
762 GNAME(alloc_overflow_edx):
763 pushl %eax # Save eax
764 pushl %ecx # Save ecx
765 /* Calculate the size for the allocation. */
766 subl START_REGION,%edx
767 pushl %edx # Push the size
769 addl $4,%esp # pop the size arg.
770 movl %eax,%edx # setup the destination.
771 popl %ecx # Restore ecx.
772 popl %eax # Restore eax.
774 SIZE(GNAME(alloc_overflow_edx))
776 /* This routine handles an overflow with ebx=crfp+size. So the
779 .globl GNAME(alloc_overflow_ebx)
780 TYPE(GNAME(alloc_overflow_ebx))
781 GNAME(alloc_overflow_ebx):
782 pushl %eax # Save eax
783 pushl %ecx # Save ecx
784 pushl %edx # Save edx
785 /* Calculate the size for the allocation. */
786 subl START_REGION,%ebx
787 pushl %ebx # Push the size
789 addl $4,%esp # pop the size arg.
790 movl %eax,%ebx # setup the destination.
791 popl %edx # Restore edx.
792 popl %ecx # Restore ecx.
793 popl %eax # Restore eax.
795 SIZE(GNAME(alloc_overflow_ebx))
797 /* This routine handles an overflow with esi=crfp+size. So the
800 .globl GNAME(alloc_overflow_esi)
801 TYPE(GNAME(alloc_overflow_esi))
802 GNAME(alloc_overflow_esi):
803 pushl %eax # Save eax
804 pushl %ecx # Save ecx
805 pushl %edx # Save edx
806 /* Calculate the size for the allocation. */
807 subl START_REGION,%esi
808 pushl %esi # Push the size
810 addl $4,%esp # pop the size arg.
811 movl %eax,%esi # setup the destination.
812 popl %edx # Restore edx.
813 popl %ecx # Restore ecx.
814 popl %eax # Restore eax.
816 SIZE(GNAME(alloc_overflow_esi))
819 .globl GNAME(alloc_overflow_edi)
820 TYPE(GNAME(alloc_overflow_edi))
821 GNAME(alloc_overflow_edi):
822 pushl %eax # Save eax
823 pushl %ecx # Save ecx
824 pushl %edx # Save edx
825 /* Calculate the size for the allocation. */
826 subl START_REGION,%edi
827 pushl %edi # Push the size
829 addl $4,%esp # pop the size arg.
830 movl %eax,%edi # setup the destination.
831 popl %edx # Restore edx.
832 popl %ecx # Restore ecx.
833 popl %eax # Restore eax.
835 SIZE(GNAME(alloc_overflow_edi))
837 .align align_4byte,0x90
838 .globl GNAME(post_signal_tramp)
839 TYPE(GNAME(post_signal_tramp))
840 GNAME(post_signal_tramp):
841 /* this is notionally the second half of a function whose first half
842 * doesn't exist. This is where call_into_lisp returns when called
843 * using return_to_lisp_function */
844 addl $12,%esp /* clear call_into_lisp args from stack */
845 popal /* restore registers */
849 SIZE(GNAME(post_signal_tramp))
851 #ifdef LISP_FEATURE_WIN32
853 * This is part of the funky magic for exception handling on win32.
854 * see sigtrap_emulator() in win32-os.c for details.
856 .globl GNAME(sigtrap_trampoline)
857 GNAME(sigtrap_trampoline):
861 call GNAME(sigtrap_wrapper)
865 .byte trap_ContextRestore
866 hlt # We should never return here.
869 * This is part of the funky magic for exception handling on win32.
870 * see handle_exception() in win32-os.c for details.
872 .globl GNAME(exception_trampoline)
873 GNAME(exception_trampoline):
877 call GNAME(handle_win32_exception_wrapper)
881 .byte trap_ContextRestore
882 hlt # We should never return here.
885 /* fast_bzero implementations and code to detect which implementation
889 .globl GNAME(fast_bzero_pointer)
892 GNAME(fast_bzero_pointer):
893 /* Variable containing a pointer to the bzero function to use.
894 * Initially points to a basic function. Change this variable
895 * to fast_bzero_detect if OS supports SSE. */
896 .long GNAME(fast_bzero_base)
899 .align align_8byte,0x90
900 .globl GNAME(fast_bzero)
901 TYPE(GNAME(fast_bzero))
903 /* Indirect function call */
904 jmp *GNAME(fast_bzero_pointer)
905 SIZE(GNAME(fast_bzero))
909 .align align_8byte,0x90
910 .globl GNAME(fast_bzero_detect)
911 TYPE(GNAME(fast_bzero_detect))
912 GNAME(fast_bzero_detect):
913 /* Decide whether to use SSE, MMX or REP version */
914 push %eax /* CPUID uses EAX-EDX */
920 test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
922 /* Originally there was another case here for using the
923 * MOVNTQ instruction for processors that supported MMX but
924 * not SSE2. This turned out to be a loss especially on
925 * Athlons (where this instruction is apparently microcoded
926 * somewhat slowly). So for simplicity revert to REP STOSL
927 * for all non-SSE2 processors.
930 movl GNAMEDOLLAR(fast_bzero_base), GNAME(fast_bzero_pointer)
933 movl GNAMEDOLLAR(fast_bzero_sse), GNAME(fast_bzero_pointer)
941 jmp *GNAME(fast_bzero_pointer)
943 SIZE(GNAME(fast_bzero_detect))
947 .align align_8byte,0x90
948 .globl GNAME(fast_bzero_sse)
949 TYPE(GNAME(fast_bzero_sse))
951 GNAME(fast_bzero_sse):
952 /* A fast routine for zero-filling blocks of memory that are
953 * guaranteed to start and end at a 4096-byte aligned address.
955 push %esi /* Save temporary registers */
957 mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
958 mov 12(%esp), %edi /* Parameter: start address */
959 shr $6, %esi /* Amount of 64-byte blocks to copy */
960 jz Lend_sse /* If none, stop */
961 movups %xmm7, -16(%esp) /* Save XMM register */
962 xorps %xmm7, %xmm7 /* Zero the XMM register */
967 /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
968 * non-caching double-quadword moving variant, i.e. the memory areas
969 * we're touching are not fetched into the L1 cache, since we're just
970 * going to overwrite the memory soon anyway.
972 movntdq %xmm7, 0(%edi)
973 movntdq %xmm7, 16(%edi)
974 movntdq %xmm7, 32(%edi)
975 movntdq %xmm7, 48(%edi)
977 add $64, %edi /* Advance pointer */
978 dec %esi /* Decrement 64-byte block count */
980 movups -16(%esp), %xmm7 /* Restore the XMM register */
981 sfence /* Ensure that weakly ordered writes are flushed. */
983 mov 12(%esp), %esi /* Parameter: start address */
984 prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
985 * since it's likely to be used immediately. */
986 pop %edi /* Restore temp registers */
989 SIZE(GNAME(fast_bzero_sse))
993 .align align_8byte,0x90
994 .globl GNAME(fast_bzero_base)
995 TYPE(GNAME(fast_bzero_base))
997 GNAME(fast_bzero_base):
998 /* A fast routine for zero-filling blocks of memory that are
999 * guaranteed to start and end at a 4096-byte aligned address.
1001 push %eax /* Save temporary registers */
1004 mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
1005 mov 16(%esp), %edi /* Parameter: start address */
1006 xor %eax, %eax /* Zero EAX */
1007 shr $2, %ecx /* Amount of 4-byte blocks to copy */
1009 cld /* Set direction of STOSL to increment */
1012 stosl /* Store EAX to *EDI, ECX times, incrementing
1013 * EDI by 4 after each store */
1016 pop %edi /* Restore temp registers */
1020 SIZE(GNAME(fast_bzero_base))