struct thread *th = arch_os_get_current_thread();
os_vm_address_t guard_page_address = CONTROL_STACK_GUARD_PAGE(th);
os_vm_address_t hard_guard_page_address = CONTROL_STACK_HARD_GUARD_PAGE(th);
- lispobj *sp;
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
- sp = (lispobj *)&sp - 1;
+ /* On these targets scrubbing from C is a bad idea, so we punt to
+ * a routine in $ARCH-assem.S. */
+ extern void arch_scrub_control_stack(struct thread *, os_vm_address_t, os_vm_address_t);
+ arch_scrub_control_stack(th, guard_page_address, hard_guard_page_address);
#else
- sp = access_control_stack_pointer(th);
-#endif
+ lispobj *sp = access_control_stack_pointer(th);
scrub:
if ((((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size)) &&
((os_vm_address_t)sp >= hard_guard_page_address)) ||
goto scrub;
} while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1));
#endif
+#endif /* LISP_FEATURE_C_STACK_IS_CONTROL_STACK */
}
\f
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
ret
SIZE(GNAME(fast_bzero))
+\f
+/* When LISP_FEATURE_C_STACK_IS_CONTROL_STACK, we cannot safely scrub
+ * the control stack from C, largely due to not knowing where the
+ * active stack frame ends. On such platforms, we reimplement the
+ * core scrubbing logic in assembly, in this case here:
+ */
+ .text
+ .align align_16byte,0x90
+ .globl GNAME(arch_scrub_control_stack)
+ TYPE(GNAME(arch_scrub_control_stack))
+GNAME(arch_scrub_control_stack):
+ /* We are passed three parameters:
+ * A (struct thread *) in RDI,
+ * the address of the guard page in RSI, and
+ * the address of the hard guard page in RDX.
+ * We may trash RAX, RCX, and R8-R11 with impunity.
+ * [RSP] is our return address, [RSP-8] is the first
+ * stack slot to scrub. */
+
+ /* We start by setting up our scrub pointer in RAX, our
+ * guard page upper bound in R8, and our hard guard
+ * page upper bound in R9. */
+ lea -8(%rsp), %rax
+#ifdef LISP_FEATURE_DARWIN
+ mov GSYM(GNAME(os_vm_page_size)),%r9
+#else
+ mov os_vm_page_size,%r9
+#endif
+ lea (%rsi,%r9), %r8
+ lea (%rdx,%r9), %r9
+
+ /* Now we begin our main scrub loop. */
+ascs_outer_loop:
+
+ /* If we're about to scrub the hard guard page, exit. */
+ cmp %r9, %rax
+ jae ascs_check_guard_page
+ cmp %rax, %rdx
+ jbe ascs_finished
+
+ascs_check_guard_page:
+ /* If we're about to scrub the guard page, and the guard
+ * page is protected, exit. */
+ cmp %r8, %rax
+ jae ascs_clear_loop
+ cmp %rax, %rsi
+ ja ascs_clear_loop
+ cmpq $(NIL), THREAD_CONTROL_STACK_GUARD_PAGE_PROTECTED_OFFSET(%rdi)
+ jne ascs_finished
+
+ /* Clear memory backwards to the start of the (4KiB) page */
+ascs_clear_loop:
+ movq $0, (%rax)
+ test $0xfff, %rax
+ lea -8(%rax), %rax
+ jnz ascs_clear_loop
+
+ /* If we're about to hit the hard guard page, exit. */
+ cmp %r9, %rax
+ jae ascs_finished
+
+ /* If the next (previous?) 4KiB page contains a non-zero
+ * word, continue scrubbing. */
+ascs_check_loop:
+ testq $-1, (%rax)
+ jnz ascs_outer_loop
+ test $0xfff, %rax
+ lea -8(%rax), %rax
+ jnz ascs_check_loop
+
+ascs_finished:
+ ret
+ SIZE(GNAME(arch_scrub_control_stack))
+\f
END()
pop %eax
ret
SIZE(GNAME(fast_bzero_base))
-
-\f
+
+\f
+/* When LISP_FEATURE_C_STACK_IS_CONTROL_STACK, we cannot safely scrub
+ * the control stack from C, largely due to not knowing where the
+ * active stack frame ends. On such platforms, we reimplement the
+ * core scrubbing logic in assembly, in this case here:
+ */
+ .text
+ .align align_16byte,0x90
+ .globl GNAME(arch_scrub_control_stack)
+ TYPE(GNAME(arch_scrub_control_stack))
+GNAME(arch_scrub_control_stack):
+ /* We are passed three parameters:
+ * A (struct thread *) at [ESP+4],
+ * the address of the guard page at [ESP+8], and
+ * the address of the hard guard page at [ESP+12].
+ * We may trash EAX, ECX, and EDX with impunity.
+ * [ESP] is our return address, [ESP-4] is the first
+ * stack slot to scrub. */
+
+ /* We start by setting up our scrub pointer in EAX, our
+ * guard page upper bound in ECX, and our hard guard
+ * page upper bound in EDX. */
+ lea -4(%esp), %eax
+ mov GNAME(os_vm_page_size),%edx
+ mov %edx, %ecx
+ add 8(%esp), %ecx
+ add 12(%esp), %edx
+
+ /* We need to do a memory operation relative to the
+ * thread pointer, so put it in %ecx and our guard
+ * page upper bound in 4(%esp). */
+ xchg 4(%esp), %ecx
+
+ /* Now we begin our main scrub loop. */
+ascs_outer_loop:
+
+ /* If we're about to scrub the hard guard page, exit. */
+ cmp %edx, %eax
+ jae ascs_check_guard_page
+ cmp 12(%esp), %eax
+ ja ascs_finished
+
+ascs_check_guard_page:
+ /* If we're about to scrub the guard page, and the guard
+ * page is protected, exit. */
+ cmp 4(%esp), %eax
+ jae ascs_clear_loop
+ cmp 8(%esp), %eax
+ jbe ascs_clear_loop
+ cmpl $(NIL), THREAD_CONTROL_STACK_GUARD_PAGE_PROTECTED_OFFSET(%ecx)
+ jne ascs_finished
+
+ /* Clear memory backwards to the start of the (4KiB) page */
+ascs_clear_loop:
+ movl $0, (%eax)
+ test $0xfff, %eax
+ lea -4(%eax), %eax
+ jnz ascs_clear_loop
+
+ /* If we're about to hit the hard guard page, exit. */
+ cmp %edx, %eax
+ jae ascs_finished
+
+ /* If the next (previous?) 4KiB page contains a non-zero
+ * word, continue scrubbing. */
+ascs_check_loop:
+ testl $-1, (%eax)
+ jnz ascs_outer_loop
+ test $0xfff, %eax
+ lea -4(%eax), %eax
+ jnz ascs_check_loop
+
+ascs_finished:
+ ret
+ SIZE(GNAME(arch_scrub_control_stack))
+\f
END()