changes in sbcl-1.0.27 relative to 1.0.26:
* new port: support added for x86-64 OpenBSD. (thanks to Josh Elsasser)
* new port: support added for x86-64 Solaris. (thanks to Alex Viskovatoff)
+ * improvement: the system either recovers from stack exhaustion or dies
+ properly as opposed to leaving the user uncertain of whether the handler
+ trampled on some random memory next to the stack or having to rely on
+ --lose-on-corruption (which is still a good idea to use in production
+ because stack exhaustion can happen in signal handlers which will likely
+ lead to hangs.)
* bug fix: a type error is signaled for attempts to use the LOOP
keyword ACROSS for a NIL value. (thanks to Daniel Lowe)
* bug fix: fix gc related interrupt handling bug on ppc (regression from
(initial-offset (logand csp (1- bytes-per-scrub-unit)))
(end-of-stack
(- (sap-int (sb!di::descriptor-sap sb!vm:*control-stack-end*))
- sb!c:*backend-page-bytes*)))
+ (* 2 sb!c:*backend-page-bytes*))))
(labels
((scrub (ptr offset count)
(declare (type system-area-pointer ptr)
#!+stack-grows-downward-not-upward
(let* ((csp (sap-int (sb!c::control-stack-pointer-sap)))
- (end-of-stack (+ (sap-int (sb!di::descriptor-sap sb!vm:*control-stack-start*))
- sb!c:*backend-page-bytes*))
+ (end-of-stack (+ (sap-int
+ (sb!di::descriptor-sap sb!vm:*control-stack-start*))
+ (* 2 sb!c:*backend-page-bytes*)))
(initial-offset (logand csp (1- bytes-per-scrub-unit))))
(labels
((scrub (ptr offset count)
{
struct thread *th=arch_os_get_current_thread();
- /* note the os_context hackery here. When the signal handler returns,
- * it won't go back to what it was doing ... */
- if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
- addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
+ if(addr >= CONTROL_STACK_HARD_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
+ lose("Control stack exhausted");
+ }
+ else if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
+ addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
/* We hit the end of the control stack: disable guard page
* protection so the error handler has some headroom, protect the
* previous page so that we can catch returns from the guard page
* and restore it. */
- corruption_warning_and_maybe_lose("Control stack exhausted");
protect_control_stack_guard_page(0, NULL);
protect_control_stack_return_guard_page(1, NULL);
+ fprintf(stderr, "INFO: Control stack guard page unprotected\n");
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
/* For the unfortunate case, when the control stack is
* unprotect this one. This works even if we somehow missed
* the return-guard-page, and hit it on our way to new
* exhaustion instead. */
- fprintf(stderr, "INFO: Control stack guard page reprotected\n");
protect_control_stack_guard_page(1, NULL);
protect_control_stack_return_guard_page(0, NULL);
+ fprintf(stderr, "INFO: Control stack guard page reprotected\n");
return 1;
}
+ else if(addr >= BINDING_STACK_HARD_GUARD_PAGE(th) &&
+ addr < BINDING_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
+ lose("Binding stack exhausted");
+ }
else if(addr >= BINDING_STACK_GUARD_PAGE(th) &&
addr < BINDING_STACK_GUARD_PAGE(th) + os_vm_page_size) {
- corruption_warning_and_maybe_lose("Binding stack exhausted");
protect_binding_stack_guard_page(0, NULL);
protect_binding_stack_return_guard_page(1, NULL);
+ fprintf(stderr, "INFO: Binding stack guard page unprotected\n");
/* For the unfortunate case, when the binding stack is
* exhausted in a signal handler. */
}
else if(addr >= BINDING_STACK_RETURN_GUARD_PAGE(th) &&
addr < BINDING_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
- fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
protect_binding_stack_guard_page(1, NULL);
protect_binding_stack_return_guard_page(0, NULL);
+ fprintf(stderr, "INFO: Binding stack guard page reprotected\n");
return 1;
}
+ else if(addr >= ALIEN_STACK_HARD_GUARD_PAGE(th) &&
+ addr < ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size) {
+ lose("Alien stack exhausted");
+ }
else if(addr >= ALIEN_STACK_GUARD_PAGE(th) &&
addr < ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size) {
- corruption_warning_and_maybe_lose("Alien stack exhausted");
protect_alien_stack_guard_page(0, NULL);
protect_alien_stack_return_guard_page(1, NULL);
+ fprintf(stderr, "INFO: Alien stack guard page unprotected\n");
/* For the unfortunate case, when the alien stack is
* exhausted in a signal handler. */
}
else if(addr >= ALIEN_STACK_RETURN_GUARD_PAGE(th) &&
addr < ALIEN_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
- fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
protect_alien_stack_guard_page(1, NULL);
protect_alien_stack_return_guard_page(0, NULL);
+ fprintf(stderr, "INFO: Alien stack guard page reprotected\n");
return 1;
}
else if (addr >= undefined_alien_address &&
link_thread(th);
th->os_thread=thread_self();
#ifndef LISP_FEATURE_WIN32
+ protect_control_stack_hard_guard_page(1, NULL);
+ protect_binding_stack_hard_guard_page(1, NULL);
+ protect_alien_stack_hard_guard_page(1, NULL);
protect_control_stack_guard_page(1, NULL);
protect_binding_stack_guard_page(1, NULL);
protect_alien_stack_guard_page(1, NULL);
protect_page(page_name(thread), protect_p, flags); \
}
+DEF_PROTECT_PAGE(control_stack_hard_guard_page,
+ CONTROL_STACK_HARD_GUARD_PAGE,
+ OS_VM_PROT_NONE)
DEF_PROTECT_PAGE(control_stack_guard_page,
CONTROL_STACK_GUARD_PAGE,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE)
DEF_PROTECT_PAGE(control_stack_return_guard_page,
CONTROL_STACK_RETURN_GUARD_PAGE,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE)
+
+DEF_PROTECT_PAGE(binding_stack_hard_guard_page,
+ BINDING_STACK_HARD_GUARD_PAGE,
+ OS_VM_PROT_NONE)
DEF_PROTECT_PAGE(binding_stack_guard_page,
BINDING_STACK_GUARD_PAGE,
OS_VM_PROT_NONE)
DEF_PROTECT_PAGE(binding_stack_return_guard_page,
BINDING_STACK_RETURN_GUARD_PAGE,
OS_VM_PROT_NONE)
+
+DEF_PROTECT_PAGE(alien_stack_hard_guard_page,
+ ALIEN_STACK_HARD_GUARD_PAGE,
+ OS_VM_PROT_NONE)
DEF_PROTECT_PAGE(alien_stack_guard_page,
ALIEN_STACK_GUARD_PAGE,
OS_VM_PROT_NONE)
#if !defined(LANGUAGE_ASSEMBLY)
#include <thread.h>
#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
-#define CONTROL_STACK_GUARD_PAGE(th) \
+
+#define CONTROL_STACK_HARD_GUARD_PAGE(th) \
((os_vm_address_t)(th->control_stack_start))
+#define CONTROL_STACK_GUARD_PAGE(th) \
+ (CONTROL_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size)
#define CONTROL_STACK_RETURN_GUARD_PAGE(th) \
(CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size)
-#define ALIEN_STACK_GUARD_PAGE(th) \
+
+#define ALIEN_STACK_HARD_GUARD_PAGE(th) \
((os_vm_address_t)(th->alien_stack_start))
+#define ALIEN_STACK_GUARD_PAGE(th) \
+ (ALIEN_STACK_HARD_GUARD_PAGE(th) + os_vm_page_size)
#define ALIEN_STACK_RETURN_GUARD_PAGE(th) \
(ALIEN_STACK_GUARD_PAGE(th) + os_vm_page_size)
+
#else
-#define CONTROL_STACK_GUARD_PAGE(th) \
+
+#define CONTROL_STACK_HARD_GUARD_PAGE(th) \
(((os_vm_address_t)(th->control_stack_end)) - os_vm_page_size)
+#define CONTROL_STACK_GUARD_PAGE(th) \
+ (CONTROL_STACK_HARD_GUARD_PAGE(th) - os_vm_page_size)
#define CONTROL_STACK_RETURN_GUARD_PAGE(th) \
(CONTROL_STACK_GUARD_PAGE(th) - os_vm_page_size)
-#define ALIEN_STACK_GUARD_PAGE(th) \
+
+#define ALIEN_STACK_HARD_GUARD_PAGE(th) \
(((os_vm_address_t)th->alien_stack_start) + ALIEN_STACK_SIZE - \
os_vm_page_size)
+#define ALIEN_STACK_GUARD_PAGE(th) \
+ (ALIEN_STACK_HARD_GUARD_PAGE(th) - os_vm_page_size)
#define ALIEN_STACK_RETURN_GUARD_PAGE(th) \
(ALIEN_STACK_GUARD_PAGE(th) - os_vm_page_size)
+
#endif
-#define BINDING_STACK_GUARD_PAGE(th) \
+#define BINDING_STACK_HARD_GUARD_PAGE(th) \
(((os_vm_address_t)th->binding_stack_start) + BINDING_STACK_SIZE - \
os_vm_page_size)
+#define BINDING_STACK_GUARD_PAGE(th) \
+ (BINDING_STACK_HARD_GUARD_PAGE(th) - os_vm_page_size)
#define BINDING_STACK_RETURN_GUARD_PAGE(th) \
(BINDING_STACK_GUARD_PAGE(th) - os_vm_page_size)
extern void validate(void);
extern void
+protect_control_stack_hard_guard_page(int protect_p, struct thread *thread);
+extern void
protect_control_stack_guard_page(int protect_p, struct thread *thread);
extern void
protect_control_stack_return_guard_page(int protect_p, struct thread *thread);
extern void
+protect_binding_stack_hard_guard_page(int protect_p, struct thread *thread);
+extern void
protect_binding_stack_guard_page(int protect_p, struct thread *thread);
extern void
protect_binding_stack_return_guard_page(int protect_p, struct thread *thread);
extern void
+protect_alien_stack_hard_guard_page(int protect_p, struct thread *thread);
+extern void
protect_alien_stack_guard_page(int protect_p, struct thread *thread);
extern void
protect_alien_stack_return_guard_page(int protect_p, struct thread *thread);
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"1.0.26.18"
+"1.0.26.19"