-
#ifdef LISP_FEATURE_SB_THREAD
#include <architecture/i386/table.h>
#include <i386/user_ldt.h>
kern_return_t mach_thread_init(mach_port_t thread_exception_port);
-void sigill_handler(int signal, siginfo_t *siginfo, void *void_context);
-void sigtrap_handler(int signal, siginfo_t *siginfo, void *void_context);
-void memory_fault_handler(int signal, siginfo_t *siginfo, void *void_context);
+void sigill_handler(int signal, siginfo_t *siginfo, os_context_t *context);
+void sigtrap_handler(int signal, siginfo_t *siginfo, os_context_t *context);
+void memory_fault_handler(int signal, siginfo_t *siginfo,
+ os_context_t *context);
/* exc_server handles mach exception messages from the kernel and
* calls catch exception raise. We use the system-provided
* the old ESP value and other register state when activated. The
* first part of this is the recovery trampoline, which loads ESP from
* EBP, pops EBP, and returns. */
-asm(".globl _stack_allocation_recover; .align 4; _stack_allocation_recover: mov %rbp, %rsp; pop %rsi; pop %rdi; pop \
-%rdx; pop %rcx; pop %r8; pop %r9; pop %rbp; ret;");
+asm(".globl _stack_allocation_recover; \
+ .align 4; \
+ _stack_allocation_recover: \
+ lea -48(%rbp), %rsp; \
+ pop %rsi; \
+ pop %rdi; \
+ pop %rdx; \
+ pop %rcx; \
+ pop %r8; \
+ pop %r9; \
+ pop %rbp; \
+ ret;");
void open_stack_allocation(x86_thread_state64_t *context)
{
push_context(context->rip, context);
push_context(context->rbp, context);
+ context->rbp = context->rsp;
push_context(context->r9, context);
push_context(context->r8, context);
push_context(context->rsi, context);
push_context(context->rdi, context);
- context->rbp = context->rsp;
context->rip = (u64) stack_allocation_recover;
align_context_stack(context);
build_fake_signal_context(context, thread_state, float_state);
- block_blockable_signals();
+ block_blockable_signals(0, 0);
handler(signal, siginfo, context);
#endif
void
-control_stack_exhausted_handler(int signal, siginfo_t *siginfo, void *void_context) {
- os_context_t *context = arch_os_get_context(&void_context);
-
+control_stack_exhausted_handler(int signal, siginfo_t *siginfo,
+ os_context_t *context) {
unblock_signals_in_context_and_maybe_warn(context);
arrange_return_to_lisp_function
(context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
}
void
-undefined_alien_handler(int signal, siginfo_t *siginfo, void *void_context) {
- os_context_t *context = arch_os_get_context(&void_context);
-
+undefined_alien_handler(int signal, siginfo_t *siginfo, os_context_t *context) {
arrange_return_to_lisp_function
(context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
}
* protection so the error handler has some headroom, protect the
* previous page so that we can catch returns from the guard page
* and restore it. */
- protect_control_stack_guard_page(0, th);
- protect_control_stack_return_guard_page(1, th);
+ lower_thread_control_stack_guard_page(th);
backup_thread_state = thread_state;
open_stack_allocation(&thread_state);
* unprotect this one. This works even if we somehow missed
* the return-guard-page, and hit it on our way to new
* exhaustion instead. */
- protect_control_stack_guard_page(1, th);
- protect_control_stack_return_guard_page(0, th);
+ reset_thread_control_stack_guard_page(th);
}
else if (addr >= undefined_alien_address &&
addr < undefined_alien_address + os_vm_page_size) {
/* mach_msg_server should never return, but it should dispatch mach
* exceptions to our catch_exception_raise function
*/
- abort();
+ lose("mach_msg_server returned");
}
/* Sets up the thread that will listen for mach exceptions. note that