* files for more information.
*/
\f
-#include "x86-validate.h"
-
#define LANGUAGE_ASSEMBLY
+#include "validate.h"
#include "sbcl.h"
/* Minimize conditionalization for different OS naming schemes. */
mov %ebx,%ebp # Switch to new frame.
/* Indirect the closure. */
- call *CLOSURE_FUNCTION_OFFSET(%eax)
+ call *CLOSURE_FUN_OFFSET(%eax)
/* Multi-value return; blow off any extra values. */
mov %ebx, %esp
.global GNAME(closure_tramp)
.type GNAME(closure_tramp),@function
GNAME(closure_tramp):
- movl FDEFN_FUNCTION_OFFSET(%eax),%eax
+ movl FDEFN_FUN_OFFSET(%eax),%eax
/* FIXME: The '*' after "jmp" in the next line is from PVE's
* patch posted to the CMU CL mailing list Oct 6, 1999. It looks
* reasonable, and it certainly seems as though if CMU CL needs it,
* SBCL needs it too, but I haven't actually verified that it's
* right. It would be good to find a way to force the flow of
* control through here to test it. */
- jmp *CLOSURE_FUNCTION_OFFSET(%eax)
+ jmp *CLOSURE_FUN_OFFSET(%eax)
.size GNAME(closure_tramp), .-GNAME(closure_tramp)
/*
- * function-end breakpoint magic
+ * fun-end breakpoint magic
*/
.text
- .global GNAME(function_end_breakpoint_guts)
+ .global GNAME(fun_end_breakpoint_guts)
.align align_4byte
-GNAME(function_end_breakpoint_guts):
+GNAME(fun_end_breakpoint_guts):
/* Multiple Value return */
jmp multiple_value_return
/* Single value return: The eventual return will now use the
multiple_value_return:
- .global GNAME(function_end_breakpoint_trap)
-GNAME(function_end_breakpoint_trap):
+ .global GNAME(fun_end_breakpoint_trap)
+GNAME(fun_end_breakpoint_trap):
int3
- .byte trap_FunctionEndBreakpoint
+ .byte trap_FunEndBreakpoint
hlt # We should never return here.
- .global GNAME(function_end_breakpoint_end)
-GNAME(function_end_breakpoint_end):
+ .global GNAME(fun_end_breakpoint_end)
+GNAME(fun_end_breakpoint_end):
\f
.global GNAME(do_pending_interrupt)
ret
.size GNAME(do_pending_interrupt),.-GNAME(do_pending_interrupt)
\f
-#ifdef WANT_CGC
-/* This is a copy function which is optimized for the Pentium and
- * works OK on 486 as well. This assumes (does not check) that the
- * input byte count is a multiple of 8 bytes (one Lisp object).
- * This code takes advantage of pairing in the Pentium as well
- * as the 128-bit cache line.
- */
- .global GNAME(fastcopy16)
- .type GNAME(fastcopy16),@function
- .align align_4byte,0x90
-GNAME(fastcopy16):
- pushl %ebp
- movl %esp,%ebp
- movl 8(%ebp), %edx # dst
- movl 12(%ebp),%eax # src
- movl 16(%ebp),%ecx # bytes
- pushl %ebx
- pushl %esi
- pushl %edi
- movl %edx,%edi
- movl %eax,%esi
- sarl $3,%ecx # number 8-byte units
- testl $1,%ecx # odd?
- jz Lquad
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- leal 8(%esi),%esi
- leal 8(%edi),%edi
-Lquad: sarl $1,%ecx # count 16-byte units
- jz Lend
- movl %ecx,%ebp # use ebp for loop counter
- .align align_16byte,0x90
-Ltop:
- movl (%edi),%eax # prefetch! MAJOR Pentium win..
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax, (%edi)
- movl %ebx, 4(%edi)
- movl %ecx, 8(%edi)
- movl %edx,12(%edi)
- leal 16(%esi),%esi
- leal 16(%edi),%edi
- decl %ebp
- jnz Ltop # non-prefixed jump saves cycles
-Lend:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- .size GNAME(fastcopy16),.-GNAME(fastcopy16)
-#endif
-\f
#ifdef GENCGC
/* This is a fast bzero using the FPU. The first argument is the start
* address which needs to be aligned on an 8 byte boundary, the second