* for this instruction in the SIGILL handler and if we see it, we
* advance the EIP by two bytes to skip over ud2 instruction and
* call sigtrap_handler. */
-#if defined(LISP_FEATURE_DARWIN)
+#if defined(LISP_FEATURE_UD2_BREAKPOINTS)
#define END()
#define TRAP ud2
#else
fstp %st(0)
fstp %st(0)
- cld # clear out DF: Darwin, Solaris and Win32 at
- # least need this, and it should not hurt others
-
call *%eax # normal callout using Lisp stack
movl %eax,%ecx # remember integer return value
/* Check for a return FP value. */
fxam
- fnstsw %eax
+ fnstsw %ax
andl $0x4500,%eax
cmpl $0x4100,%eax
jne Lfp_rtn_value
.globl GNAME(call_into_lisp_first_time)
TYPE(GNAME(call_into_lisp_first_time))
-/* The *ALIEN-STACK* pointer is set up on the first call_into_lisp when
- * the stack changes. We don't worry too much about saving registers
+/* We don't worry too much about saving registers
* here, because we never expect to return from the initial call to lisp
* anyway */
pushl %ebp # Save old frame pointer.
movl %esp,%ebp # Establish new frame.
#ifndef LISP_FEATURE_WIN32
- movl %esp,ALIEN_STACK + SYMBOL_VALUE_OFFSET
movl GNAME(all_threads),%eax
- movl THREAD_CONTROL_STACK_START_OFFSET(%eax) ,%esp
- /* don't think too hard about what happens if we get interrupted
- * here */
- addl $(THREAD_CONTROL_STACK_SIZE),%esp
+ /* pthread machinery takes care of this for other threads */
+ movl THREAD_CONTROL_STACK_END_OFFSET(%eax) ,%esp
#else
/* Win32 -really- doesn't like you switching stacks out from under it. */
movl GNAME(all_threads),%eax
#endif
/* Alloc new frame. */
- mov %esp,%ebx # The current sp marks start of new frame.
- push %ebp # fp in save location S0
- sub $8,%esp # Ensure 3 slots are allocated, one above.
- mov %ebx,%ebp # Switch to new frame.
+ push %ebp # Dummy for return address
+ push %ebp # fp in save location S1
+ mov %esp,%ebp # The current sp marks start of new frame.
+ sub $4,%esp # Ensure 3 slots are allocated, two above.
call *CLOSURE_FUN_OFFSET(%eax)
* the undefined-function trampoline
*/
.text
- .align align_4byte,0x90
+ .align align_16byte,0x90
.globl GNAME(undefined_tramp)
TYPE(GNAME(undefined_tramp))
.byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
GNAME(undefined_tramp):
+ pop 4(%ebp) # Save return PC for backtrace.
TRAP
.byte trap_Error
.byte 2
ret
SIZE(GNAME(undefined_tramp))
+/* KLUDGE: FIND-ESCAPED-FRAME (SYS:SRC;CODE;DEBUG-INT.LISP) needs
+ * to know the name of the function immediately following the
+ * undefined-function trampoline. */
+
/*
* the closure trampoline
*/
.text
- .align align_4byte,0x90
+ .align align_16byte,0x90
.globl GNAME(closure_tramp)
TYPE(GNAME(closure_tramp))
.byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
SIZE(GNAME(closure_tramp))
.text
- .align align_4byte,0x90
+ .align align_16byte,0x90
.globl GNAME(funcallable_instance_tramp)
TYPE(GNAME(funcallable_instance_tramp))
GNAME(funcallable_instance_tramp):
/*
* fun-end breakpoint magic
*/
+
+/*
+ * For an explanation of the magic involved in function-end
+ * breakpoints, see the implementation in ppc-assem.S.
+ */
+
.text
.globl GNAME(fun_end_breakpoint_guts)
- .align align_4byte
+ .align align_16byte
GNAME(fun_end_breakpoint_guts):
/* Multiple Value return */
jc multiple_value_return
\f
.globl GNAME(do_pending_interrupt)
TYPE(GNAME(do_pending_interrupt))
- .align align_4byte,0x90
+ .align align_16byte,0x90
GNAME(do_pending_interrupt):
TRAP
.byte trap_PendingInterrupt
* So only eax, ecx, and edx need special care here.
*
* ALLOC factors out the logic of calling alloc(): stack alignment, etc.
- * The TMP argument must be a preserved register different from the the
- * SIZE argument (if it is a register.)
*
* DEFINE_ALLOC_TO_FOO defines an alloction routine.
*/
#ifdef LISP_FEATURE_DARWIN
-#define ALLOC(size,tmp) \
- pushl tmp; /* Save TMP */ \
- movl %esp,tmp; /* Save ESP to TMP */ \
- andl $0xfffffff0,%esp; /* Align stack */ \
- pushl $0; /* Padding */ \
- pushl size; /* Argument to alloc */ \
- cld; /* Clear DF */ \
+#define ALLOC(size) \
+ pushl %ebp; /* Save EBP */ \
+ movl %esp,%ebp; /* Save ESP to EBP */ \
+ pushl $0; /* Reserve space for arg */ \
+ andl $0xfffffff0,%esp; /* Align stack to 16bytes */ \
+ movl size, (%esp); /* Argument to alloc */ \
call GNAME(alloc); \
- movl tmp,%esp; /* Restore ESP from TMP */ \
- popl tmp; /* Restore TMP */
+ movl %ebp,%esp; /* Restore ESP from EBP */ \
+ popl %ebp; /* Restore EBP */
#else
-#define ALLOC(size,tmp) \
+#define ALLOC(size) \
pushl size; /* Argument to alloc */ \
- cld; /* Clear DF */ \
call GNAME(alloc); \
addl $4,%esp; /* Pop argument */
#endif
#define DEFINE_ALLOC_TO_EAX(name,size) \
.globl GNAME(name); \
TYPE(GNAME(name)); \
- .align align_4byte,0x90; \
+ .align align_16byte,0x90; \
GNAME(name): \
pushl %ecx; /* Save ECX and EDX */ \
pushl %edx; \
- ALLOC(size,%esi) \
+ ALLOC(size) \
popl %edx; /* Restore ECX and EDX */ \
popl %ecx; \
ret; \
#define DEFINE_ALLOC_TO_ECX(name,size) \
.globl GNAME(name); \
TYPE(GNAME(name)); \
- .align align_4byte,0x90; \
+ .align align_16byte,0x90; \
GNAME(name): \
pushl %eax; /* Save EAX and EDX */ \
pushl %edx; \
- ALLOC(size,%esi) \
+ ALLOC(size) \
movl %eax,%ecx; /* Result to destination */ \
popl %edx; \
popl %eax; \
#define DEFINE_ALLOC_TO_EDX(name,size) \
.globl GNAME(name); \
TYPE(GNAME(name)); \
- .align align_4byte,0x90; \
+ .align align_16byte,0x90; \
GNAME(name): \
pushl %eax; /* Save EAX and ECX */ \
pushl %ecx; \
- ALLOC(size,%edi) \
+ ALLOC(size) \
movl %eax,%edx; /* Restore EAX and ECX */ \
popl %ecx; \
popl %eax; \
ret; \
SIZE(GNAME(name))
-#define DEFINE_ALLOC_TO_REG(name,reg,size,tmp) \
+#define DEFINE_ALLOC_TO_REG(name,reg,size) \
.globl GNAME(name); \
TYPE(GNAME(name)); \
- .align align_4byte,0x90; \
+ .align align_16byte,0x90; \
GNAME(name): \
pushl %eax; /* Save EAX, ECX, and EDX */ \
pushl %ecx; \
pushl %edx; \
- ALLOC(size,tmp) \
+ ALLOC(size) \
movl %eax,reg; /* Restore them */ \
popl %edx; \
popl %ecx; \
DEFINE_ALLOC_TO_EDX(alloc_8_to_edx,$8)
DEFINE_ALLOC_TO_EDX(alloc_16_to_edx,$16)
-DEFINE_ALLOC_TO_REG(alloc_to_ebx,%ebx,%ebx,%edi)
-DEFINE_ALLOC_TO_REG(alloc_8_to_ebx,%ebx,$8,%edi)
-DEFINE_ALLOC_TO_REG(alloc_16_to_ebx,%ebx,$16,%esi)
+DEFINE_ALLOC_TO_REG(alloc_to_ebx,%ebx,%ebx)
+DEFINE_ALLOC_TO_REG(alloc_8_to_ebx,%ebx,$8)
+DEFINE_ALLOC_TO_REG(alloc_16_to_ebx,%ebx,$16)
-DEFINE_ALLOC_TO_REG(alloc_to_esi,%esi,%esi,%edi)
-DEFINE_ALLOC_TO_REG(alloc_8_to_esi,%esi,$8,%edi)
-DEFINE_ALLOC_TO_REG(alloc_16_to_esi,%esi,$16,%edi)
+DEFINE_ALLOC_TO_REG(alloc_to_esi,%esi,%esi)
+DEFINE_ALLOC_TO_REG(alloc_8_to_esi,%esi,$8)
+DEFINE_ALLOC_TO_REG(alloc_16_to_esi,%esi,$16)
-DEFINE_ALLOC_TO_REG(alloc_to_edi,%edi,%edi,%esi)
-DEFINE_ALLOC_TO_REG(alloc_8_to_edi,%edi,$8,%esi)
-DEFINE_ALLOC_TO_REG(alloc_16_to_edi,%edi,$16,%esi)
+DEFINE_ALLOC_TO_REG(alloc_to_edi,%edi,%edi)
+DEFINE_ALLOC_TO_REG(alloc_8_to_edi,%edi,$8)
+DEFINE_ALLOC_TO_REG(alloc_16_to_edi,%edi,$16)
/* Called from lisp when an inline allocation overflows.
* Every register except the result needs to be preserved.
#define START_REGION GNAME(boxed_region)
#endif
-#define ALLOC_OVERFLOW(size,tmp) \
+#define ALLOC_OVERFLOW(size) \
/* Calculate the size for the allocation. */ \
subl START_REGION,size; \
- ALLOC(size,tmp)
+ ALLOC(size)
/* This routine handles an overflow with eax=crfp+size. So the
size=eax-crfp. */
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_eax)
TYPE(GNAME(alloc_overflow_eax))
GNAME(alloc_overflow_eax):
pushl %ecx # Save ecx
pushl %edx # Save edx
- ALLOC_OVERFLOW(%eax,%esi)
+ ALLOC_OVERFLOW(%eax)
popl %edx # Restore edx.
popl %ecx # Restore ecx.
ret
SIZE(GNAME(alloc_overflow_eax))
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_ecx)
TYPE(GNAME(alloc_overflow_ecx))
GNAME(alloc_overflow_ecx):
pushl %eax # Save eax
pushl %edx # Save edx
- ALLOC_OVERFLOW(%ecx,%esi)
+ ALLOC_OVERFLOW(%ecx)
movl %eax,%ecx # setup the destination.
popl %edx # Restore edx.
popl %eax # Restore eax.
ret
SIZE(GNAME(alloc_overflow_ecx))
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_edx)
TYPE(GNAME(alloc_overflow_edx))
GNAME(alloc_overflow_edx):
pushl %eax # Save eax
pushl %ecx # Save ecx
- ALLOC_OVERFLOW(%edx,%esi)
+ ALLOC_OVERFLOW(%edx)
movl %eax,%edx # setup the destination.
popl %ecx # Restore ecx.
popl %eax # Restore eax.
/* This routine handles an overflow with ebx=crfp+size. So the
size=ebx-crfp. */
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_ebx)
TYPE(GNAME(alloc_overflow_ebx))
GNAME(alloc_overflow_ebx):
pushl %eax # Save eax
pushl %ecx # Save ecx
pushl %edx # Save edx
- ALLOC_OVERFLOW(%ebx,%edi)
+ ALLOC_OVERFLOW(%ebx)
movl %eax,%ebx # setup the destination.
popl %edx # Restore edx.
popl %ecx # Restore ecx.
/* This routine handles an overflow with esi=crfp+size. So the
size=esi-crfp. */
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_esi)
TYPE(GNAME(alloc_overflow_esi))
GNAME(alloc_overflow_esi):
pushl %eax # Save eax
pushl %ecx # Save ecx
pushl %edx # Save edx
- ALLOC_OVERFLOW(%esi,%edi)
+ ALLOC_OVERFLOW(%esi)
movl %eax,%esi # setup the destination.
popl %edx # Restore edx.
popl %ecx # Restore ecx.
ret
SIZE(GNAME(alloc_overflow_esi))
- .align align_4byte
+ .align align_16byte
.globl GNAME(alloc_overflow_edi)
TYPE(GNAME(alloc_overflow_edi))
GNAME(alloc_overflow_edi):
pushl %eax # Save eax
pushl %ecx # Save ecx
pushl %edx # Save edx
- ALLOC_OVERFLOW(%edi,%esi)
+ ALLOC_OVERFLOW(%edi)
movl %eax,%edi # setup the destination.
popl %edx # Restore edx.
popl %ecx # Restore ecx.
* and EIP from the exception context and use it to fake
* up a stack frame which will skip over the system SEH
* code. */
- .align align_4byte
+ .align align_16byte
.globl GNAME(exception_handler_wrapper)
TYPE(GNAME(exception_handler_wrapper))
GNAME(exception_handler_wrapper):
#endif
#ifdef LISP_FEATURE_DARWIN
- .align align_4byte
+ .align align_16byte
.globl GNAME(call_into_lisp_tramp)
TYPE(GNAME(call_into_lisp_tramp))
GNAME(call_into_lisp_tramp):
SIZE(call_into_lisp_tramp)
#endif
- .align align_4byte,0x90
+ .align align_16byte,0x90
.globl GNAME(post_signal_tramp)
TYPE(GNAME(post_signal_tramp))
GNAME(post_signal_tramp):
\f
.globl GNAME(fast_bzero_pointer)
.data
- .align align_4byte
+ .align align_16byte
GNAME(fast_bzero_pointer):
/* Variable containing a pointer to the bzero function to use.
* Initially points to a basic function. Change this variable
.long GNAME(fast_bzero_base)
\f
.text
- .align align_8byte,0x90
+ .align align_16byte,0x90
.globl GNAME(fast_bzero)
TYPE(GNAME(fast_bzero))
GNAME(fast_bzero):
\f
.text
- .align align_8byte,0x90
+ .align align_16byte,0x90
.globl GNAME(fast_bzero_detect)
TYPE(GNAME(fast_bzero_detect))
GNAME(fast_bzero_detect):
\f
.text
- .align align_8byte,0x90
+ .align align_16byte,0x90
.globl GNAME(fast_bzero_sse)
TYPE(GNAME(fast_bzero_sse))
\f
.text
- .align align_8byte,0x90
+ .align align_16byte,0x90
.globl GNAME(fast_bzero_base)
TYPE(GNAME(fast_bzero_base))
xor %eax, %eax /* Zero EAX */
shr $2, %ecx /* Amount of 4-byte blocks to copy */
jz Lend_base
- cld /* Set direction of STOSL to increment */
rep
stosl /* Store EAX to *EDI, ECX times, incrementing
pop %eax
ret
SIZE(GNAME(fast_bzero_base))
-
-\f
+
+\f
+/* When LISP_FEATURE_C_STACK_IS_CONTROL_STACK, we cannot safely scrub
+ * the control stack from C, largely due to not knowing where the
+ * active stack frame ends. On such platforms, we reimplement the
+ * core scrubbing logic in assembly, in this case here:
+ */
+ .text
+ .align align_16byte,0x90
+ .globl GNAME(arch_scrub_control_stack)
+ TYPE(GNAME(arch_scrub_control_stack))
+GNAME(arch_scrub_control_stack):
+ /* We are passed three parameters:
+ * A (struct thread *) at [ESP+4],
+ * the address of the guard page at [ESP+8], and
+ * the address of the hard guard page at [ESP+12].
+ * We may trash EAX, ECX, and EDX with impunity.
+ * [ESP] is our return address, [ESP-4] is the first
+ * stack slot to scrub. */
+
+ /* We start by setting up our scrub pointer in EAX, our
+ * guard page upper bound in ECX, and our hard guard
+ * page upper bound in EDX. */
+ lea -4(%esp), %eax
+ mov GNAME(os_vm_page_size),%edx
+ mov %edx, %ecx
+ add 8(%esp), %ecx
+ add 12(%esp), %edx
+
+ /* We need to do a memory operation relative to the
+ * thread pointer, so put it in %ecx and our guard
+ * page upper bound in 4(%esp). */
+ xchg 4(%esp), %ecx
+
+ /* Now we begin our main scrub loop. */
+ascs_outer_loop:
+
+ /* If we're about to scrub the hard guard page, exit. */
+ cmp %edx, %eax
+ jae ascs_check_guard_page
+ cmp 12(%esp), %eax
+ ja ascs_finished
+
+ascs_check_guard_page:
+ /* If we're about to scrub the guard page, and the guard
+ * page is protected, exit. */
+ cmp 4(%esp), %eax
+ jae ascs_clear_loop
+ cmp 8(%esp), %eax
+ jbe ascs_clear_loop
+ cmpl $(NIL), THREAD_CONTROL_STACK_GUARD_PAGE_PROTECTED_OFFSET(%ecx)
+ jne ascs_finished
+
+ /* Clear memory backwards to the start of the (4KiB) page */
+ascs_clear_loop:
+ movl $0, (%eax)
+ test $0xfff, %eax
+ lea -4(%eax), %eax
+ jnz ascs_clear_loop
+
+ /* If we're about to hit the hard guard page, exit. */
+ cmp %edx, %eax
+ jae ascs_finished
+
+ /* If the next (previous?) 4KiB page contains a non-zero
+ * word, continue scrubbing. */
+ascs_check_loop:
+ testl $-1, (%eax)
+ jnz ascs_outer_loop
+ test $0xfff, %eax
+ lea -4(%eax), %eax
+ jnz ascs_check_loop
+
+ascs_finished:
+ ret
+ SIZE(GNAME(arch_scrub_control_stack))
+\f
END()
-
\ No newline at end of file