boolean check_code_fixups = 0;
/* Should we check that newly allocated regions are zero filled? */
-boolean gencgc_zero_check = 1;
+boolean gencgc_zero_check = 0;
/* Should we check that the free space is zero filled? */
-boolean gencgc_enable_verify_zero_fill = 1;
+boolean gencgc_enable_verify_zero_fill = 0;
/* Should we check that free pages are zero filled during gc_free_heap
* called after Lisp PURIFY? */
-boolean gencgc_zero_check_during_free_heap = 1;
+boolean gencgc_zero_check_during_free_heap = 0;
\f
/*
* GC structures and variables
static void *heap_base = NULL;
/* Calculate the start address for the given page number. */
-inline void *
-page_address(int page_num)
+inline void
+*page_address(int page_num)
{
return (heap_base + (page_num * 4096));
}
/* the first page that gc_alloc_unboxed checks on its next call */
int alloc_unboxed_start_page;
- /* the first page that we look at for boxed large allocations
- (Although we always allocate after the boxed_region.) */
+ /* the first page that gc_alloc_large (boxed) considers on its next
+ * call. (Although it always allocates after the boxed_region.) */
int alloc_large_start_page;
- /* the first page that we look at for unboxed large allocations
- * (Although we always allocate after the current_unboxed_region.) */
+ /* the first page that gc_alloc_large (unboxed) considers on its
+ * next call. (Although it always allocates after the
+ * current_unboxed_region.) */
int alloc_large_unboxed_start_page;
/* the bytes allocated to this generation */
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
-/* Reset the alloc_region. This indicates that it's safe to call
- * gc_alloc_new_region() on it, and impossible to allocate space from
- * until gc_alloc_new_region() is called on it. (The reset values are
- * chosen so that attempts to allocate space from it will fail
- * (because free_pointer == end_addr) and cause gc_alloc_new_region()
- * to be called before retrying.) */
-void
-reset_alloc_region(struct alloc_region *alloc_region)
-{
- alloc_region->first_page = 0;
- alloc_region->last_page = -1;
- alloc_region->start_addr =
- alloc_region->free_pointer =
- alloc_region->end_addr =
- page_address(0);
- /* REMOVEME: last-ditch sanity check for postcondition */
- gc_assert(alloc_region_is_completely_reset(alloc_region));
-}
-
-/* Does *alloc_region look exactly like it does after
- * reset_alloc_region() has munged it? */
-int
-alloc_region_is_completely_reset(struct alloc_region *alloc_region)
-{
- return
- alloc_region->first_page == 0
- && alloc_region->last_page == -1
- && alloc_region->start_addr == alloc_region->free_pointer
- && alloc_region->free_pointer == alloc_region->end_addr;
-}
-
-/* Is *alloc_region in a state which it could only have gotten into by
- * having reset_alloc_region() munge it, as it does in preparation for
- * having gc_alloc_new_region() operate on it? I.e. are at least some
- * key fields distinctively munged, even if some others aren't?
- *
- * This test is different from alloc_region_is_completely_reset(). In
- * particular, if you reset the region, and then accidentally scribble
- * on some of its fields, this test will be true while the other test
- * is false. Around sbcl-0.6.12.8, merging the Alpha patches, this
- * difference became important because of some problems with the
- * global current_region_free_pointer being used to scribble on
- * alloc_region.free_pointer after the alloc_region had been reset and
- * before gc_alloc_new_region() was called. */
-int
-alloc_region_looks_reset(struct alloc_region *alloc_region)
-{
- return
- alloc_region->first_page == 0
- && alloc_region->last_page == -1;
-}
-
-/* (should only be needed for debugging or assertion failure reporting) */
-void
-fprint_alloc_region(FILE *file, struct alloc_region *alloc_region)
-{
- fprintf(file,
- "alloc_region *0x%0lx:
- first_page=0x%08lx, last_page=0x%08lx,
- start_addr=0x%08lx, free_pointer=0x%08lx, end_addr=0x%08lx\n",
- (unsigned long)alloc_region,
- (unsigned long)alloc_region->first_page,
- (unsigned long)alloc_region->last_page,
- (unsigned long)alloc_region->start_addr,
- (unsigned long)alloc_region->free_pointer,
- (unsigned long)alloc_region->end_addr);
-}
-
-
/* XX hack. Current Lisp code uses the following. Need copying in/out. */
void *current_region_free_pointer;
void *current_region_end_addr;
-/* the generation currently being allocated to */
+/* The generation currently being allocated to. */
static int gc_alloc_generation;
-/* Set *alloc_region to refer to a new region with room for at least
- * the given number of bytes.
- *
- * Before the call to this function, *alloc_region should have been
- * closed by a call to gc_alloc_update_page_tables(), and will thus be
- * in an empty "reset" state. Upon return from this function, it should
- * no longer be in a reset state.
+/* Find a new region with room for at least the given number of bytes.
*
- * We start by looking at the current generation's alloc_start_page. So
+ * It starts looking at the current generation's alloc_start_page. So
* may pick up from the previous region if there is enough space. This
* keeps the allocation contiguous when scavenging the newspace.
*
+ * The alloc_region should have been closed by a call to
+ * gc_alloc_update_page_tables, and will thus be in an empty state.
+ *
* To assist the scavenging functions write-protected pages are not
* used. Free pages should not be write-protected.
*
* from space can be recognized. Therefore the generation of pages in
* the region are set to gc_alloc_generation. To prevent another
* allocation call using the same pages, all the pages in the region
- * are allocated, although they will initially be empty. */
+ * are allocated, although they will initially be empty.
+ */
static void
gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
int num_pages;
int i;
- /* Check invariant as per the interface definition comment above. */
- if (!alloc_region_is_completely_reset(alloc_region)) {
- fprintf(stderr,
- "Argh! alloc_region not reset in gc_alloc_new_region()\n");
- fprint_alloc_region(stderr, alloc_region);
- lose(0);
- }
+ /*
+ FSHOW((stderr,
+ "/alloc_new_region for %d bytes from gen %d\n",
+ nbytes, gc_alloc_generation));
+ */
+
+ /* Check that the region is in a reset state. */
+ gc_assert((alloc_region->first_page == 0)
+ && (alloc_region->last_page == -1)
+ && (alloc_region->free_pointer == alloc_region->end_addr));
if (unboxed) {
restart_page =
/* Check for a failure. */
if (first_page >= NUM_PAGES) {
fprintf(stderr,
- "Argh! gc_alloc_new_region() failed on first_page, "
- "nbytes=%d.\n",
+ "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
page_table[first_page].first_object_offset = 0;
}
- if (unboxed) {
+ if (unboxed)
gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
- } else {
+ else
gc_assert(page_table[first_page].allocated == BOXED_PAGE);
- }
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
if (last_page+1 > last_used_page)
last_used_page = last_page+1;
}
-
- /* postcondition sanity check*/
- gc_assert(!alloc_region_is_completely_reset(alloc_region));
}
/* If the record_new_objects flag is 2 then all new regions created
max_new_areas = new_areas_index;
}
-/* Update the tables for the alloc_region. The region may be added to
+/* Update the tables for the alloc_region. The region maybe added to
* the new_areas.
*
- * When done the alloc_region is "reset", i.e. set up so that the next
- * quick alloc will fail safely and thus a new region will be
- * allocated. Further it is safe to try to re-update the page table of
- * this reset alloc_region. */
+ * When done the alloc_region is set up so that the next quick alloc
+ * will fail safely and thus a new region will be allocated. Further
+ * it is safe to try to re-update the page table of this reset
+ * alloc_region. */
void
gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
{
if ((first_page == 0) && (alloc_region->last_page == -1))
return;
- next_page = first_page + 1;
+ next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ /* Skip if no bytes were allocated */
if (alloc_region->free_pointer != alloc_region->start_addr) {
-
- /* hunting for invariant violations from the Alpha patches ca.
- * sbcl-0.6.12.8: It's OK -- I think -- for
- * gc_alloc_update_page_tables() to be called on a reset
- * alloc_region, but it's not OK in that case for the
- * alloc_region.free_pointer to have been modified since the
- * reset, i.e. the inequality tested just above.
- * -- WHN 2001-05-14 */
- gc_assert(!alloc_region_looks_reset(alloc_region));
-
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
- /* All the pages used need to be updated. */
+ /* All the pages used need to be updated */
/* Update the first page. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
- if (unboxed) {
+ if (unboxed)
gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
- } else {
+ else
gc_assert(page_table[first_page].allocated == BOXED_PAGE);
- }
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
byte_cnt = 0;
- /* Calculate the number of bytes used in this page. This is
- not always the number of new bytes, unless it was free. */
+ /* Calc. the number of bytes used in this page. This is not always
+ the number of new bytes, unless it was free. */
more = 0;
- bytes_used =
- alloc_region->free_pointer - page_address(first_page);
- if (bytes_used > 4096) {
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
bytes_used = 4096;
more = 1;
}
byte_cnt += bytes_used;
- /* All the rest of the pages should be free. We need to set their
+ /* All the rest of the pages should be free. Need to set their
first_object_offset pointer to the start of the region, and set
the bytes_used. */
while (more) {
alloc_region->start_addr - page_address(next_page));
/* Calculate the number of bytes used in this page. */
- /* FIXME: This code is duplicated about 20 lines above, in
- * order to be executed on the first pass. Isn't
- * there some way to move that duplicated block into the
- * while() loop, converting it into repeat..until? */
more = 0;
- bytes_used =
- alloc_region->free_pointer - page_address(next_page);
- if (bytes_used > 4096) {
+ if ((bytes_used = (alloc_region->free_pointer
+ - page_address(next_page)))>4096) {
bytes_used = 4096;
more = 1;
}
next_page++;
}
- region_size =
- alloc_region->free_pointer - alloc_region->start_addr;
+ region_size = alloc_region->free_pointer - alloc_region->start_addr;
bytes_allocated += region_size;
generations[gc_alloc_generation].bytes_allocated += region_size;
gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
/* Set the generations alloc restart page to the last page of
- * the region. */
- if (unboxed) {
+ the region. */
+ if (unboxed)
generations[gc_alloc_generation].alloc_unboxed_start_page =
next_page-1;
- } else {
+ else
generations[gc_alloc_generation].alloc_start_page = next_page-1;
- }
/* Add the region to the new_areas if requested. */
- if (!unboxed) {
+ if (!unboxed)
add_new_area(first_page,orig_first_page_bytes_used, region_size);
- }
/*
FSHOW((stderr,
region_size,
gc_alloc_generation));
*/
- } else {
- /* No bytes were allocated. Unallocate the first_page if there
- * are 0 bytes_used. */
+ }
+ else
+ /* No bytes allocated. Unallocate the first_page if there are 0
+ bytes_used. */
if (page_table[first_page].bytes_used == 0)
page_table[first_page].allocated = FREE_PAGE;
- }
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
next_page++;
}
- reset_alloc_region(alloc_region);
+ /* Reset the alloc_region. */
+ alloc_region->first_page = 0;
+ alloc_region->last_page = -1;
+ alloc_region->start_addr = page_address(0);
+ alloc_region->free_pointer = page_address(0);
+ alloc_region->end_addr = page_address(0);
}
static inline void *gc_quick_alloc(int nbytes);
/* Allocate a possibly large object. */
-static void *
-gc_alloc_possibly_large(int nbytes,
- int unboxed,
- struct alloc_region *alloc_region)
+static void
+*gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
int first_page;
int last_page;
/*
FSHOW((stderr,
- "/gc_alloc_possibly_large for %d bytes (large=%d) from gen %d\n",
- nbytes, large, gc_alloc_generation));
+ "/gc_alloc_large for %d bytes from gen %d\n",
+ nbytes, gc_alloc_generation));
*/
/* If the object is small, and there is room in the current region
then allocation it in the current region. */
if (!large
- && ((alloc_region->end_addr - alloc_region->free_pointer) >= nbytes))
+ && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
return gc_quick_alloc(nbytes);
/* Search for a contiguous free region of at least nbytes. If it's a
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
if (unboxed)
- restart_page =
- generations[gc_alloc_generation].alloc_large_unboxed_start_page;
+ restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
else
restart_page = generations[gc_alloc_generation].alloc_large_start_page;
if (restart_page <= alloc_region->last_page)
if (first_page >= NUM_PAGES) {
fprintf(stderr,
- "Argh! gc_alloc_possibly_large failed (first_page), "
- "nbytes=%d.\n",
+ "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
/* Check for a failure */
if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
fprintf(stderr,
- "Argh! gc_alloc_possibly_large failed (restart_page), "
- "nbytes=%d.\n",
+ "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
/*
if (large)
FSHOW((stderr,
- "/gc_alloc_possibly_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
+ "/gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
gc_alloc_generation,
nbytes,
bytes_found,
/* Allocate bytes from the boxed_region. It first checks if there is
* room, if not then it calls gc_alloc_new_region to find a new region
* with enough space. A pointer to the start of the region is returned. */
-static void *
-gc_alloc(int nbytes)
+static void
+*gc_alloc(int nbytes)
{
void *new_free_pointer;
* saving, then allocate a large object. */
/* FIXME: "32" should be a named parameter. */
if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
- return gc_alloc_possibly_large(nbytes, 0, &boxed_region);
+ return gc_alloc_large(nbytes, 0, &boxed_region);
/* Else find a new region. */
/* Allocate space from the boxed_region. If there is not enough free
* space then call gc_alloc to do the job. A pointer to the start of
* the region is returned. */
-static inline void *
-gc_quick_alloc(int nbytes)
+static inline void
+*gc_quick_alloc(int nbytes)
{
void *new_free_pointer;
return((void *)new_obj);
}
- /* Else call gc_alloc(). */
- return gc_alloc(nbytes);
+ /* Else call gc_alloc */
+ return (gc_alloc(nbytes));
}
/* Allocate space for the boxed object. If it is a large object then
* do a large alloc else allocate from the current region. If there is
* not enough free space then call gc_alloc to do the job. A pointer
* to the start of the region is returned. */
-static inline void *
-gc_quick_alloc_large(int nbytes)
+static inline void
+*gc_quick_alloc_large(int nbytes)
{
void *new_free_pointer;
if (nbytes >= large_object_size)
- return gc_alloc_possibly_large(nbytes, 0, &boxed_region);
+ return gc_alloc_large(nbytes, 0, &boxed_region);
/* Check whether there is room in the current region. */
new_free_pointer = boxed_region.free_pointer + nbytes;
return (gc_alloc(nbytes));
}
-static void *
-gc_alloc_unboxed(int nbytes)
+static void
+*gc_alloc_unboxed(int nbytes)
{
void *new_free_pointer;
/* If there is a bit of room left in the current region then
allocate a large object. */
if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
- return gc_alloc_possibly_large(nbytes,1,&unboxed_region);
+ return gc_alloc_large(nbytes,1,&unboxed_region);
/* Else find a new region. */
return((void *) NIL); /* dummy value: return something ... */
}
-static inline void *
-gc_quick_alloc_unboxed(int nbytes)
+static inline void
+*gc_quick_alloc_unboxed(int nbytes)
{
void *new_free_pointer;
* enough free space then call gc_alloc to do the job.
*
* A pointer to the start of the region is returned. */
-static inline void *
-gc_quick_alloc_unboxed_possibly_large(int nbytes)
+static inline void
+*gc_quick_alloc_large_unboxed(int nbytes)
{
void *new_free_pointer;
if (nbytes >= large_object_size)
- return gc_alloc_possibly_large(nbytes,1,&unboxed_region);
+ return gc_alloc_large(nbytes,1,&unboxed_region);
/* Check whether there is room in the current region. */
new_free_pointer = unboxed_region.free_pointer + nbytes;
tag = LowtagOf(object);
/* Allocate space. */
- new = gc_quick_alloc_unboxed_possibly_large(nwords*4);
+ new = gc_quick_alloc_large_unboxed(nwords*4);
dest = new;
source = (lispobj *) PTR(object);
object = *start;
-/* FSHOW((stderr, "/Scavenge: %p, %ld\n", start, nwords)); */
+/* FSHOW((stderr, "Scavenge: %p, %ld\n", start, nwords)); */
gc_assert(object != 0x01); /* not a forwarding pointer */
/* It's ok if it's byte compiled code. The trace table offset will
* be a fixnum if it's x86 compiled code - check. */
if (code->trace_table_offset & 0x3) {
- FSHOW((stderr, "/sniffing byte compiled code object at %x\n", code));
+ FSHOW((stderr, "/Sniffing byte compiled code object at %x.\n", code));
return;
}
(kv_vector[2*i] != empty_symbol))) {
/*FSHOW((stderr,
- "/EQ key %d moved from %x to %x; index %d to %d\n",
+ "* EQ key %d moved from %x to %x; index %d to %d\n",
i, old_key, new_key, old_index, new_index));*/
if (index_vector[old_index] != 0) {
gc_assert(Pointerp(object));
#if defined(DEBUG_WEAK)
- FSHOW((stderr, "/transporting weak pointer from 0x%08x\n", object));
+ FSHOW((stderr, "Transporting weak pointer from 0x%08x\n", object));
#endif
/* Need to remember where all the weak pointers are that have */
case type_BaseChar:
if (gencgc_verbose)
FSHOW((stderr,
- "/Wo3: %x %x %x\n",
+ "*Wo3: %x %x %x\n",
pointer, start_addr, *start_addr));
return 0;
case type_ByteCodeClosure:
if (gencgc_verbose)
FSHOW((stderr,
- "/Wo4: %x %x %x\n",
+ "*Wo4: %x %x %x\n",
pointer, start_addr, *start_addr));
return 0;
case type_InstanceHeader:
if (gencgc_verbose)
FSHOW((stderr,
- "/Wo5: %x %x %x\n",
+ "*Wo5: %x %x %x\n",
pointer, start_addr, *start_addr));
return 0;
default:
if (gencgc_verbose)
FSHOW((stderr,
- "/W?: %x %x %x\n",
+ "*W?: %x %x %x\n",
pointer, start_addr, *start_addr));
return 0;
}
|| (((unsigned)addr & 0xfff)
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
- "/weird? ignore ptr 0x%x to freed area of large object\n",
+ "weird? ignore ptr 0x%x to freed area of large object\n",
addr));
return;
}
}
if (gencgc_verbose > 1) {
FSHOW((stderr,
- "/scavenging %d words of control stack %d of length %d words.\n",
+ "scavenging %d words of control stack %d of length %d words.\n",
length, i, vector_length));
}
for (j = 0; j < length; j++) {
if ((all_wp != 0) && (a1 != bytes_allocated)) {
FSHOW((stderr,
- "/alloc'ed over %d to %d\n",
+ "alloc'ed over %d to %d\n",
i, last_page));
FSHOW((stderr,
"/page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
current_new_areas_index = new_areas_index;
/*FSHOW((stderr,
- "/The first scan is finished; current_new_areas_index=%d.\n",
+ "The first scan is finished; current_new_areas_index=%d.\n",
current_new_areas_index));*/
while (current_new_areas_index > 0) {
current_new_areas_index = new_areas_index;
/*FSHOW((stderr,
- "/The re-scan has finished; current_new_areas_index=%d.\n",
+ "The re-scan has finished; current_new_areas_index=%d.\n",
current_new_areas_index));*/
}
return bytes_freed;
}
\f
-#if 0 /* not used as of sbcl-0.6.12.8 */
/* Print some information about a pointer at the given address. */
static void
print_ptr(lispobj *addr)
*(addr+3),
*(addr+4));
}
-#endif
extern int undefined_tramp;
}
}
+/* External entry point for verify_zero_fill */
+void
+gencgc_verify_zero_fill(void)
+{
+ /* Flush the alloc regions updating the tables. */
+ boxed_region.free_pointer = current_region_free_pointer;
+ gc_alloc_update_page_tables(0, &boxed_region);
+ gc_alloc_update_page_tables(1, &unboxed_region);
+ SHOW("verifying zero fill");
+ verify_zero_fill();
+ current_region_free_pointer = boxed_region.free_pointer;
+ current_region_end_addr = boxed_region.end_addr;
+}
+
static void
verify_dynamic_space(void)
{
generations[generation].alloc_large_unboxed_start_page = 0;
if (generation >= verify_gens) {
- SHOW("verifying");
+ if (gencgc_verbose)
+ SHOW("verifying");
verify_gc();
verify_dynamic_space();
}
generations[generation].bytes_allocated
+ generations[generation].bytes_consed_between_gc;
- if (raise) {
+ if (raise)
generations[generation].num_gc = 0;
- } else {
+ else
++generations[generation].num_gc;
- }
}
/* Update last_free_page then ALLOCATION_POINTER */
int last_page = -1;
int i;
- FSHOW((stderr,
- "/entering update_x86_dynamic_space_free_pointer(), "
- "old value=0x%lx\n",
- (long)SymbolValue(ALLOCATION_POINTER)));
for (i = 0; i < NUM_PAGES; i++)
if ((page_table[i].allocated != FREE_PAGE)
&& (page_table[i].bytes_used != 0))
last_page = i;
- last_free_page = last_page + 1;
+ last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
(lispobj)(((char *)heap_base) + last_free_page*4096));
-
- FSHOW((stderr,
- "/leaving update_x86_dynamic_space_free_pointer(), "
- "new value=0x%lx\n",
- (long)SymbolValue(ALLOCATION_POINTER)));
-
return 0; /* dummy value: return something ... */
}
int gen_to_wp;
int i;
- /* We're about to modify boxed_region in a way which would mess up its
- * nice tidy reset state if it is currently reset, so make sure it
- * isn't currently reset: */
- gc_assert(!alloc_region_looks_reset(&boxed_region));
-
boxed_region.free_pointer = current_region_free_pointer;
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
/* Verify the new objects created by Lisp code. */
if (pre_verify_gen_0) {
- SHOW("pre-checking generation 0\n");
+ SHOW((stderr, "pre-checking generation 0\n"));
verify_generation(0);
}
if (gencgc_verbose > 1) {
FSHOW((stderr,
- "/starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
+ "Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
gen,
raise,
generations[gen].bytes_allocated,
generations[gen].cum_sum_bytes_allocated = 0;
if (gencgc_verbose > 1) {
- FSHOW((stderr, "/GC of generation %d finished:\n", gen));
+ FSHOW((stderr, "GC of generation %d finished:\n", gen));
print_generation_stats(0);
}
if (gencgc_verbose > 1)
print_generation_stats(0);
- /* Initialize gc_alloc(). */
+ /* Initialize gc_alloc */
gc_alloc_generation = 0;
- reset_alloc_region(&boxed_region);
- reset_alloc_region(&unboxed_region);
+ boxed_region.first_page = 0;
+ boxed_region.last_page = -1;
+ boxed_region.start_addr = page_address(0);
+ boxed_region.free_pointer = page_address(0);
+ boxed_region.end_addr = page_address(0);
+
+ unboxed_region.first_page = 0;
+ unboxed_region.last_page = -1;
+ unboxed_region.start_addr = page_address(0);
+ unboxed_region.free_pointer = page_address(0);
+ unboxed_region.end_addr = page_address(0);
#if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */
zero_stack();
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
- SHOW("checking after free_heap\n");
+ if (gencgc_verbose)
+ SHOW("checking after free_heap\n");
verify_gc();
}
}
int addr = DYNAMIC_SPACE_START;
int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
- SHOW("entering gencgc_pickup_dynamic()");
-
/* Initialize the first region. */
do {
page_table[page].allocated = BOXED_PAGE;
current_region_free_pointer = boxed_region.free_pointer;
current_region_end_addr = boxed_region.end_addr;
-
- SHOW("returning from gencgc_pickup_dynamic()");
}
\f
/* a counter for how deep we are in alloc(..) calls */
{
int page_index = find_page_index(fault_addr);
- /* (When the write barrier is working right, this message is just
- * a distraction; but when you're trying to get the write barrier
- * to work, or grok what it's doing, it can be very handy.) */
#if defined QSHOW_SIGNALS
- FSHOW((stderr, "/heap WP violation? fault_addr=0x%0lx, page_index=%d\n",
- (unsigned long)fault_addr, page_index));
+ FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
+ fault_addr, page_index));
#endif
/* Check whether the fault is within the dynamic space. */