#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
+#include "alloc.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#include "genesis/fdefn.h"
boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
-unsigned long large_object_size = 4 * PAGE_BYTES;
+long large_object_size = 4 * PAGE_BYTES;
\f
/*
/* An array of page structures is allocated on gc initialization.
* This helps quickly map between an address its page structure.
* page_table_pages is set from the size of the dynamic space. */
-unsigned page_table_pages;
+page_index_t page_table_pages;
struct page *page_table;
/* To map addresses to page structures the address of the first page
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
- /* we can do this after releasing free_pages_lock */
- if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.\n", p);
- }
- }
- }
-
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
PAGE_BYTES*(1+last_page-first_page),
}
zero_dirty_pages(first_page, last_page);
+
+ /* we can do this after releasing free_pages_lock */
+ if (gencgc_zero_check) {
+ long *p;
+ for (p = (long *)alloc_region->start_addr;
+ p < (long *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ p, alloc_region->start_addr, alloc_region->end_addr);
+ }
+ }
+ }
}
/* If the record_new_objects flag is 2 then all new regions created
}
else {
/* FIXME: assert free_pages_lock held */
- thread_mutex_unlock(&free_pages_lock);
- funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
+ (void)thread_mutex_unlock(&free_pages_lock);
+ funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
alloc_number(available), alloc_number(requested));
lose("HEAP-EXHAUSTED-ERROR fell through");
}
{
void *new_free_pointer;
- if(nbytes>=large_object_size)
+ if (nbytes>=large_object_size)
return gc_alloc_large(nbytes,unboxed_p,my_region);
/* Check whether there is room in the current alloc region. */
old_value - displacement;
}
} else {
- fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
+ /* This used to just print a note to stderr, but a bogus fixup seems to
+ * indicate real heap corruption, so a hard hailure is in order. */
+ lose("fixup vector %p has a bad widetag: %d\n", fixups_vector, widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
static long
scav_weak_pointer(lispobj *where, lispobj object)
{
- struct weak_pointer *wp = weak_pointers;
- /* Push the weak pointer onto the list of weak pointers.
- * Do I have to watch for duplicates? Originally this was
- * part of trans_weak_pointer but that didn't work in the
- * case where the WP was in a promoted region.
+ /* Since we overwrite the 'next' field, we have to make
+ * sure not to do so for pointers already in the list.
+ * Instead of searching the list of weak_pointers each
+ * time, we ensure that next is always NULL when the weak
+ * pointer isn't in the list, and not NULL otherwise.
+ * Since we can't use NULL to denote end of list, we
+ * use a pointer back to the same weak_pointer.
*/
+ struct weak_pointer * wp = (struct weak_pointer*)where;
- /* Check whether it's already in the list. */
- while (wp != NULL) {
- if (wp == (struct weak_pointer*)where) {
- break;
- }
- wp = wp->next;
- }
- if (wp == NULL) {
- /* Add it to the start of the list. */
- wp = (struct weak_pointer*)where;
- if (wp->next != weak_pointers) {
- wp->next = weak_pointers;
- } else {
- /*SHOW("avoided write to weak pointer");*/
- }
+ if (NULL == wp->next) {
+ wp->next = weak_pointers;
weak_pointers = wp;
+ if (NULL == wp->next)
+ wp->next = wp;
}
/* Do not let GC scavenge the value slot of the weak pointer.
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-/* Is there any possibility that pointer is a valid Lisp object
- * reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing?
- * This is called from preserve_pointers() */
+/* Helper for valid_lisp_pointer_p and
+ * possibly_valid_dynamic_space_pointer.
+ *
+ * pointer is the pointer to validate, and start_addr is the address
+ * of the enclosing object.
+ */
static int
-possibly_valid_dynamic_space_pointer(lispobj *pointer)
+looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
{
- lispobj *start_addr;
-
- /* Find the object start address. */
- if ((start_addr = search_dynamic_space(pointer)) == NULL) {
- return 0;
- }
-
/* We need to allow raw pointers into Code objects for return
* addresses. This will also pick up pointers to functions in code
* objects. */
- if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
+ if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG)
/* XXX could do some further checks here */
return 1;
- }
- /* If it's not a return address then it needs to be a valid Lisp
- * pointer. */
if (!is_lisp_pointer((lispobj)pointer)) {
return 0;
}
/* Check that the object pointed to is consistent with the pointer
- * low tag.
- */
+ * low tag. */
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
/* Start_addr should be the enclosing code object, or a closure
return 1;
}
+/* Used by the debugger to validate possibly bogus pointers before
+ * calling MAKE-LISP-OBJ on them.
+ *
+ * FIXME: We would like to make this perfect, because if the debugger
+ * constructs a reference to a bugs lisp object, and it ends up in a
+ * location scavenged by the GC all hell breaks loose.
+ *
+ * Whereas possibly_valid_dynamic_space_pointer has to be conservative
+ * and return true for all valid pointers, this could actually be eager
+ * and lie about a few pointers without bad results... but that should
+ * be reflected in the name.
+ */
+int
+valid_lisp_pointer_p(lispobj *pointer)
+{
+ lispobj *start;
+ if (((start=search_dynamic_space(pointer))!=NULL) ||
+ ((start=search_static_space(pointer))!=NULL) ||
+ ((start=search_read_only_space(pointer))!=NULL))
+ return looks_like_valid_lisp_pointer_p(pointer, start);
+ else
+ return 0;
+}
+
+/* Is there any possibility that pointer is a valid Lisp object
+ * reference, and/or something else (e.g. subroutine call return
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
+static int
+possibly_valid_dynamic_space_pointer(lispobj *pointer)
+{
+ lispobj *start_addr;
+
+ /* Find the object start address. */
+ if ((start_addr = search_dynamic_space(pointer)) == NULL) {
+ return 0;
+ }
+
+ return looks_like_valid_lisp_pointer_p(pointer, start_addr);
+}
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
#else
esp = (void **)((void *)&raise);
#endif
- for (ptr = ((void **)th->control_stack_end)-1; ptr > esp; ptr--) {
+ for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
preserve_pointer(*ptr);
}
}
* The check for a GC trigger is only performed when the current
* region is full, so in most cases it's not needed. */
-char *
+lispobj *
alloc(long nbytes)
{
struct thread *thread=arch_os_get_current_thread();
* catch GENCGC-related write-protect violations
*/
-void unhandled_sigmemoryfault(void);
+void unhandled_sigmemoryfault(void* addr);
/* Depending on which OS we're running under, different signals might
* be raised for a violation of write protection in the heap. This
/* It can be helpful to be able to put a breakpoint on this
* case to help diagnose low-level problems. */
- unhandled_sigmemoryfault();
+ unhandled_sigmemoryfault(fault_addr);
/* not within the dynamic space -- not our responsibility */
return 0;
* are about to let Lisp deal with it. It's basically just a
* convenient place to set a gdb breakpoint. */
void
-unhandled_sigmemoryfault()
+unhandled_sigmemoryfault(void *addr)
{}
void gc_alloc_update_all_page_tables(void)