*/
os_context_t *context;
-#if MAC_OS_X_VERSION_10_5
- struct __darwin_mcontext32 *regs;
-#else
- struct mcontext *regs;
-#endif
+ mcontext_t *regs;
context = (os_context_t*) os_validate(0, sizeof(os_context_t));
-#if MAC_OS_X_VERSION_10_5
- regs = (struct __darwin_mcontext32*) os_validate(0, sizeof(struct __darwin_mcontext32));
-#else
- regs = (struct mcontext*) os_validate(0, sizeof(struct mcontext));
-#endif
+ regs = (mcontext_t*) os_validate(0, sizeof(mcontext_t));
context->uc_mcontext = regs;
/* when BSD signals are fired, they mask they signals in sa_mask
update_thread_state_from_context(thread_state, float_state, context);
os_invalidate((os_vm_address_t)context, sizeof(os_context_t));
-#if MAC_OS_X_VERSION_10_5
- os_invalidate((os_vm_address_t)regs, sizeof(struct __darwin_mcontext32));
-#else
- os_invalidate((os_vm_address_t)regs, sizeof(struct mcontext));
-#endif
+ os_invalidate((os_vm_address_t)regs, sizeof(mcontext_t));
/* Trap to restore the signal context. */
asm volatile ("movl %0, %%eax; movl %1, %%ebx; .long 0xffff0b0f"
/* Initialize the new state */
new_state = *thread_state;
open_stack_allocation(&new_state);
+ stack_allocate(&new_state, 256);
/* Save old state */
save_thread_state = (x86_thread_state32_t *)stack_allocate(&new_state, sizeof(*save_thread_state));
*save_thread_state = *thread_state;
control_stack_exhausted_handler(int signal, siginfo_t *siginfo, void *void_context) {
os_context_t *context = arch_os_get_context(&void_context);
+ unblock_signals_in_context_and_maybe_warn(context);
arrange_return_to_lisp_function
- (context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
+ (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
}
void
os_context_t *context = arch_os_get_context(&void_context);
arrange_return_to_lisp_function
- (context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
+ (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
}
kern_return_t
protect_control_stack_return_guard_page_thread(0, th);
break;
}
- /* Get vm_region info */
- region_addr = (vm_address_t)code_vector[1];
- info_count = VM_REGION_BASIC_INFO_COUNT;
- if ((ret = vm_region(mach_task_self(),
- ®ion_addr,
- ®ion_size,
- VM_REGION_BASIC_INFO,
- (vm_region_info_t)®ion_info,
- &info_count,
- ®ion_name)))
- lose("vm_region (VM_REGION_BASIC_INFO) failed failed %d\n", ret);
- /* Check if still protected */
- if ((region_info.protection & OS_VM_PROT_ALL) == 0) {
- /* KLUDGE:
- * If two threads fault on the same page, the protection
- * is cleared as the first thread runs memory_fault_handler.
- * Grep for "not marked as write-protected" in gencgc.c
- */
- ret = KERN_SUCCESS;
- break;
- }
/* Regular memory fault */
handler = memory_fault_handler;
break;