#if defined(LUTEX_WIDETAG)
#include "pthread-lutex.h"
#endif
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+#include "genesis/cons.h"
+#endif
/* forward declarations */
page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
* scratch space by the collector, and should never get collected.
*/
enum {
- HIGHEST_NORMAL_GENERATION = 5,
- PSEUDO_STATIC_GENERATION,
- SCRATCH_GENERATION,
+ SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
NUM_GENERATIONS
};
boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
+#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
+long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
+long large_object_size = 4 * GENCGC_CARD_BYTES;
+#else
long large_object_size = 4 * PAGE_BYTES;
+#endif
\f
/*
inline void *
page_address(page_index_t page_num)
{
- return (heap_base + (page_num * PAGE_BYTES));
+ return (heap_base + (page_num * GENCGC_CARD_BYTES));
}
/* Calculate the address where the allocation region associated with
{
if (addr >= heap_base) {
page_index_t index = ((pointer_sized_uint_t)addr -
- (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
+ (pointer_sized_uint_t)heap_base) / GENCGC_CARD_BYTES;
if (index < page_table_pages)
return (index);
}
npage_bytes(long npages)
{
gc_assert(npages>=0);
- return ((unsigned long)npages)*PAGE_BYTES;
+ return ((unsigned long)npages)*GENCGC_CARD_BYTES;
}
/* Check that X is a higher address than Y and return offset from Y to
return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
}
-/* a structure to hold the state of a generation */
+/* a structure to hold the state of a generation
+ *
+ * CAUTION: If you modify this, make sure to touch up the alien
+ * definition in src/code/gc.lisp accordingly. ...or better yes,
+ * deal with the FIXME there...
+ */
struct generation {
/* the first page that gc_alloc() checks on its next call */
/* the number of GCs since the last raise */
int num_gc;
- /* the average age after which a GC will raise objects to the
+ /* the number of GCs to run on the generations before raising objects to the
* next generation */
- int trigger_age;
+ int number_of_gcs_before_promotion;
/* the cumulative sum of the bytes allocated to this generation. It is
* cleared after a GC on this generations, and update before new
/* a minimum average memory age before a GC will occur helps
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
- double min_av_mem_age;
+ double minimum_age_before_gc;
/* A linked list of lutex structures in this generation, used for
* implementing lutex finalization. */
static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
+extern unsigned long gencgc_release_granularity;
+unsigned long gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
+
+extern unsigned long gencgc_alloc_granularity;
+unsigned long gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
+
\f
/*
* miscellaneous heap functions
}
/* Return the average age of the memory in a generation. */
-static double
-gen_av_mem_age(generation_index_t gen)
+extern double
+generation_average_age(generation_index_t gen)
{
if (generations[gen].bytes_allocated == 0)
return 0.0;
/ ((double)generations[gen].bytes_allocated);
}
-/* The verbose argument controls how much to print: 0 for normal
- * level of detail; 1 for debugging. */
extern void
-print_generation_stats() /* FIXME: should take FILE argument, or construct a string */
+write_generation_stats(FILE *file)
{
generation_index_t i;
fpu_save(fpu_state);
/* Print the heap stats. */
- fprintf(stderr,
+ fprintf(file,
" Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
for (i = 0; i < SCRATCH_GENERATION; i++) {
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
- fprintf(stderr,
+ fprintf(file,
" %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
generations[i].alloc_start_page,
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
- gen_av_mem_age(i));
+ generation_average_age(i));
}
- fprintf(stderr," Total bytes allocated = %lu\n", bytes_allocated);
- fprintf(stderr," Dynamic-space-size bytes = %u\n", dynamic_space_size);
+ fprintf(file," Total bytes allocated = %lu\n", bytes_allocated);
+ fprintf(file," Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
fpu_restore(fpu_state);
}
+
+extern void
+write_heap_exhaustion_report(FILE *file, long available, long requested,
+ struct thread *thread)
+{
+ fprintf(file,
+ "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
+ gc_active_p ? "garbage collection" : "allocation",
+ available,
+ requested);
+ write_generation_stats(file);
+ fprintf(file, "GC control variables:\n");
+ fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
+ SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
+ (SymbolValue(GC_PENDING, thread) == T) ?
+ "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
+ "false" : "in progress"));
+#ifdef LISP_FEATURE_SB_THREAD
+ fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n",
+ SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
+#endif
+}
+
+extern void
+print_generation_stats(void)
+{
+ write_generation_stats(stderr);
+}
+
+extern char* gc_logfile;
+char * gc_logfile = NULL;
+
+extern void
+log_generation_stats(char *logfile, char *header)
+{
+ if (logfile) {
+ FILE * log = fopen(logfile, "a");
+ if (log) {
+ fprintf(log, "%s\n", header);
+ write_generation_stats(log);
+ fclose(log);
+ } else {
+ fprintf(stderr, "Could not open gc logfile: %s\n", logfile);
+ fflush(stderr);
+ }
+ }
+}
+
+extern void
+report_heap_exhaustion(long available, long requested, struct thread *th)
+{
+ if (gc_logfile) {
+ FILE * log = fopen(gc_logfile, "a");
+ if (log) {
+ write_heap_exhaustion_report(log, available, requested, th);
+ fclose(log);
+ } else {
+ fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile);
+ fflush(stderr);
+ }
+ }
+ /* Always to stderr as well. */
+ write_heap_exhaustion_report(stderr, available, requested, th);
+}
\f
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
if (start > end)
return;
+ gc_assert(length >= gencgc_release_granularity);
+ gc_assert((length % gencgc_release_granularity) == 0);
+
os_invalidate(addr, length);
new_addr = os_validate(addr, length);
if (new_addr == NULL || new_addr != addr) {
}
+static void
+zero_and_mark_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ zero_pages(start, end);
+ for (i = start; i <= end; i++)
+ page_table[i].need_to_zero = 0;
+}
+
/* Zero the pages from START to END (inclusive), except for those
* pages that are known to already zeroed. Mark all pages in the
* ranges as non-zeroed.
*/
static void
zero_dirty_pages(page_index_t start, page_index_t end) {
- page_index_t i;
+ page_index_t i, j;
for (i = start; i <= end; i++) {
- if (page_table[i].need_to_zero == 1) {
- zero_pages(start, end);
- break;
- }
+ if (!page_table[i].need_to_zero) continue;
+ for (j = i+1; (j <= end) && (page_table[j].need_to_zero); j++);
+ zero_pages(i, j-1);
+ i = j;
}
for (i = start; i <= end; i++) {
gc_assert(ret == 0);
first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
- bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ bytes_found=(GENCGC_CARD_BYTES - page_table[first_page].bytes_used)
+ npage_bytes(last_page-first_page);
/* Set up the alloc_region. */
more = 0;
if ((bytes_used = void_diff(alloc_region->free_pointer,
page_address(first_page)))
- >PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ >GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
if ((bytes_used = void_diff(alloc_region->free_pointer,
- page_address(next_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ page_address(next_page)))>GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
/* Calc. the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ if ((bytes_used = nbytes+orig_first_page_bytes_used) > GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
- if (bytes_used > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ if (bytes_used > GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
* the danger that we bounce back here before the error has been
* handled, or indeed even printed.
*/
- fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
- gc_active_p ? "garbage collection" : "allocation",
- available, requested);
+ report_heap_exhaustion(available, requested, thread);
if (gc_active_p || (available == 0)) {
/* If we are in GC, or totally out of memory there is no way
* to sanely transfer control to the lisp-side of things.
*/
- print_generation_stats();
- fprintf(stderr, "GC control variables:\n");
- fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
- SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
- (SymbolValue(GC_PENDING, thread) == T) ?
- "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
- "false" : "in progress"));
-#ifdef LISP_FEATURE_SB_THREAD
- fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n",
- SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
-#endif
lose("Heap exhausted, game over.");
}
else {
{
page_index_t first_page, last_page;
page_index_t restart_page = *restart_page_ptr;
+ long nbytes_goal = nbytes;
long bytes_found = 0;
long most_bytes_found = 0;
+ page_index_t most_bytes_found_from, most_bytes_found_to;
+ int small_object = nbytes < GENCGC_CARD_BYTES;
/* FIXME: assert(free_pages_lock is held); */
+ if (nbytes_goal < gencgc_alloc_granularity)
+ nbytes_goal = gencgc_alloc_granularity;
+
/* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
gc_assert(nbytes>=0);
- if (((unsigned long)nbytes)>=PAGE_BYTES) {
- /* Search for a contiguous free space of at least nbytes,
- * aligned on a page boundary. The page-alignment is strictly
- * speaking needed only for objects at least large_object_size
- * bytes in size. */
- do {
- first_page = restart_page;
- while ((first_page < page_table_pages) &&
- page_allocated_p(first_page))
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism.
+ *
+ * For other objects, we guarantee that they start on their own
+ * page boundary.
+ */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ bytes_found = 0;
+ if (page_free_p(first_page)) {
+ gc_assert(0 == page_table[first_page].bytes_used);
+ bytes_found = GENCGC_CARD_BYTES;
+ } else if (small_object &&
+ (page_table[first_page].allocated == page_type_flag) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0)) {
+ bytes_found = GENCGC_CARD_BYTES - page_table[first_page].bytes_used;
+ if (bytes_found < nbytes) {
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
first_page++;
-
- last_page = first_page;
- bytes_found = PAGE_BYTES;
- while ((bytes_found < nbytes) &&
- (last_page < (page_table_pages-1)) &&
- page_free_p(last_page+1)) {
- last_page++;
- bytes_found += PAGE_BYTES;
- gc_assert(0 == page_table[last_page].bytes_used);
- gc_assert(0 == page_table[last_page].write_protected);
+ continue;
}
- if (bytes_found > most_bytes_found)
- most_bytes_found = bytes_found;
- restart_page = last_page + 1;
- } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
-
- } else {
- /* Search for a page with at least nbytes of space. We prefer
- * not to split small objects on multiple pages, to reduce the
- * number of contiguous allocation regions spaning multiple
- * pages: this helps avoid excessive conservativism. */
- first_page = restart_page;
- while (first_page < page_table_pages) {
- if (page_free_p(first_page))
- {
- gc_assert(0 == page_table[first_page].bytes_used);
- bytes_found = PAGE_BYTES;
- break;
- }
- else if ((page_table[first_page].allocated == page_type_flag) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0))
- {
- bytes_found = PAGE_BYTES
- - page_table[first_page].bytes_used;
- if (bytes_found > most_bytes_found)
- most_bytes_found = bytes_found;
- if (bytes_found >= nbytes)
- break;
- }
+ } else {
first_page++;
+ continue;
}
- last_page = first_page;
- restart_page = first_page + 1;
+
+ gc_assert(page_table[first_page].write_protected == 0);
+ for (last_page = first_page+1;
+ ((last_page < page_table_pages) &&
+ page_free_p(last_page) &&
+ (bytes_found < nbytes_goal));
+ last_page++) {
+ bytes_found += GENCGC_CARD_BYTES;
+ gc_assert(0 == page_table[last_page].bytes_used);
+ gc_assert(0 == page_table[last_page].write_protected);
+ }
+
+ if (bytes_found > most_bytes_found) {
+ most_bytes_found = bytes_found;
+ most_bytes_found_from = first_page;
+ most_bytes_found_to = last_page;
+ }
+ if (bytes_found >= nbytes_goal)
+ break;
+
+ first_page = last_page;
}
+ bytes_found = most_bytes_found;
+ restart_page = first_page + 1;
+
/* Check for a failure */
if (bytes_found < nbytes) {
gc_assert(restart_page >= page_table_pages);
gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
}
- gc_assert(page_table[first_page].write_protected == 0);
-
- *restart_page_ptr = first_page;
- return last_page;
+ *restart_page_ptr = most_bytes_found_from;
+ return most_bytes_found_to-1;
}
/* Allocate bytes. All the rest of the special-purpose allocation
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
+ /* Should have been unprotected by unprotect_oldspace(). */
+ gc_assert(page_table[next_page].write_protected == 0);
page_table[next_page].gen = new_space;
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_boxed_p(next_page) &&
page_table[next_page].large_object &&
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
page_table[next_page].gen = new_space;
page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
(lispobj *)pointer));
}
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
/* Helper for valid_lisp_pointer_p and
* possibly_valid_dynamic_space_pointer.
*
* header. */
switch (widetag_of(*start_addr)) {
case CODE_HEADER_WIDETAG:
- /* This case is probably caught above. */
- break;
+ /* Make sure we actually point to a function in the code object,
+ * as opposed to a random point there. */
+ if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*(pointer-FUN_POINTER_LOWTAG)))
+ return 1;
+ else
+ return 0;
case CLOSURE_HEADER_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
if ((unsigned long)pointer !=
}
break;
case OTHER_POINTER_LOWTAG:
+
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ /* The all-architecture test below is good as far as it goes,
+ * but an LRA object is similar to a FUN-POINTER: It is
+ * embedded within a CODE-OBJECT pointed to by start_addr, and
+ * cannot be found by simply walking the heap, therefore we
+ * need to check for it. -- AB, 2010-Jun-04 */
+ if ((widetag_of(start_addr[0]) == CODE_HEADER_WIDETAG)) {
+ lispobj *potential_lra =
+ (lispobj *)(((unsigned long)pointer) - OTHER_POINTER_LOWTAG);
+ if ((widetag_of(potential_lra[0]) == RETURN_PC_HEADER_WIDETAG) &&
+ ((potential_lra - HeaderValue(potential_lra[0])) == start_addr)) {
+ return 1; /* It's as good as we can verify. */
+ }
+ }
+#endif
+
if ((unsigned long)pointer !=
((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) {
if (gencgc_verbose) {
return 0;
}
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
* address) which should prevent us from moving the referred-to thing?
return looks_like_valid_lisp_pointer_p(pointer, start_addr);
}
+#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
page_table[next_page].allocated = boxed;
/* Shouldn't be write-protected at this stage. Essential that the
* pages aren't. */
gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+ if (((unsigned long)addr & (GENCGC_CARD_BYTES - 1)) >
page_table[addr_page_index].bytes_used)
return;
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
* probability that random garbage will be bogusly interpreted as
- * a pointer which prevents a page from moving. */
+ * a pointer which prevents a page from moving.
+ *
+ * This only needs to happen on x86oids, where this is used for
+ * conservative roots. Non-x86oid systems only ever call this
+ * function on known-valid lisp objects. */
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
if (!(code_page_p(addr_page_index)
|| (is_lisp_pointer((lispobj)addr) &&
possibly_valid_dynamic_space_pointer(addr))))
return;
+#endif
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
while (page_table[first_page].region_start_offset != 0) {
--first_page;
/* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[first_page].bytes_used == GENCGC_CARD_BYTES);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned long)addr & (PAGE_BYTES - 1))
+ || (((unsigned long)addr & (GENCGC_CARD_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < PAGE_BYTES)
- /* ..or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[i].bytes_used < GENCGC_CARD_BYTES)
+ /* ..or it is CARD_BYTES and is the last in the block */
|| page_free_p(i+1)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
/* Check that the page is now static. */
gc_assert(page_table[addr_page_index].dont_move != 0);
}
-
-#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
\f
/* If the given page is not write-protected, then scan it for pointers
* to younger generations or the top temp. generation, if no
/*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
os_protect((void *)page_addr,
- PAGE_BYTES,
+ GENCGC_CARD_BYTES,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
for (last_page = i; ; last_page++) {
write_protected =
write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
unprotect_oldspace(void)
{
page_index_t i;
+ void *region_addr = 0;
+ void *page_addr = 0;
+ unsigned long region_bytes = 0;
for (i = 0; i < last_free_page; i++) {
if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == from_space)) {
- void *page_start;
-
- page_start = (void *)page_address(i);
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
+ page_addr = page_address(i);
+ if (!region_addr) {
+ /* First region. */
+ region_addr = page_addr;
+ region_bytes = GENCGC_CARD_BYTES;
+ } else if (region_addr + region_bytes == page_addr) {
+ /* Region continue. */
+ region_bytes += GENCGC_CARD_BYTES;
+ } else {
+ /* Unprotect previous region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ /* First page in new region. */
+ region_addr = page_addr;
+ region_bytes = GENCGC_CARD_BYTES;
+ }
}
}
}
+ if (region_addr) {
+ /* Unprotect last region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ }
}
/* Work through all the pages and free any in from_space. This
page_table[last_page].bytes_used;
page_table[last_page].allocated = FREE_PAGE_FLAG;
page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
+ /* Should already be unprotected by unprotect_oldspace(). */
+ gc_assert(!page_table[last_page].write_protected);
last_page++;
}
while ((last_page < last_free_page)
}
#endif
+static int
+is_in_stack_space(lispobj ptr)
+{
+ /* For space verification: Pointers can be valid if they point
+ * to a thread stack space. This would be faster if the thread
+ * structures had page-table entries as if they were part of
+ * the heap space. */
+ struct thread *th;
+ for_each_thread(th) {
+ if ((th->control_stack_start <= (lispobj *)ptr) &&
+ (th->control_stack_end >= (lispobj *)ptr)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
static void
verify_space(lispobj *start, size_t words)
{
* page. XX Could check the offset too. */
if (page_allocated_p(page_index)
&& (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.\n", thing, start);
+ lose ("Ptr %p @ %p sees free page.\n", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start);
+ lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start);
}
/* Check that its not in the RO space as it would then be a
* pointer from the RO to the dynamic space. */
if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x\n",
+ lose("ptr to dynamic space %p from RO space %x\n",
thing, start);
}
/* Does it point to a plausible object? This check slows
* dynamically. */
/*
if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x\n", thing, start);
+ lose("ptr %p to invalid object %p\n", thing, start);
}
*/
} else {
+ extern void funcallable_instance_tramp;
/* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space) {
- lose("Ptr %x @ %x sees junk.\n", thing, start);
+ if (!to_readonly_space && !to_static_space
+ && (thing != (lispobj)&funcallable_instance_tramp)
+ && !is_in_stack_space(thing)) {
+ lose("Ptr %p @ %p sees junk.\n", thing, start);
}
}
} else {
/* Only when enabled */
&& verify_dynamic_code_check) {
FSHOW((stderr,
- "/code object at %x in the dynamic space\n",
+ "/code object at %p in the dynamic space\n",
start));
}
break;
default:
- lose("Unhandled widetag 0x%x at 0x%x\n",
+ lose("Unhandled widetag %p at %p\n",
widetag_of(*start), start);
}
}
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
}
}
} else {
- long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
+ long free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
long *start_addr = (long *)((unsigned long)page_address(page)
+ page_table[page].bytes_used);
}
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
-
static void
-scavenge_control_stack()
+scavenge_control_stack(struct thread *th)
{
- unsigned long control_stack_size;
-
- /* This is going to be a big problem when we try to port threads
- * to PPC... CLH */
- struct thread *th = arch_os_get_current_thread();
lispobj *control_stack =
(lispobj *)(th->control_stack_start);
+ unsigned long control_stack_size =
+ access_control_stack_pointer(th) - control_stack;
- control_stack_size = current_control_stack_pointer - control_stack;
scavenge(control_stack, control_stack_size);
}
-
-/* Scavenging Interrupt Contexts */
-
-static int boxed_registers[] = BOXED_REGISTERS;
-
-static void
-scavenge_interrupt_context(os_context_t * context)
-{
- int i;
-
-#ifdef reg_LIP
- unsigned long lip;
- unsigned long lip_offset;
- int lip_register_pair;
-#endif
- unsigned long pc_code_offset;
-
-#ifdef ARCH_HAS_LINK_REGISTER
- unsigned long lr_code_offset;
-#endif
-#ifdef ARCH_HAS_NPC_REGISTER
- unsigned long npc_code_offset;
#endif
-#ifdef reg_LIP
- /* Find the LIP's register pair and calculate it's offset */
- /* before we scavenge the context. */
-
- /*
- * I (RLT) think this is trying to find the boxed register that is
- * closest to the LIP address, without going past it. Usually, it's
- * reg_CODE or reg_LRA. But sometimes, nothing can be found.
- */
- lip = *os_context_register_addr(context, reg_LIP);
- lip_offset = 0x7FFFFFFF;
- lip_register_pair = -1;
- for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- unsigned long reg;
- long offset;
- int index;
-
- index = boxed_registers[i];
- reg = *os_context_register_addr(context, index);
- if ((reg & ~((1L<<N_LOWTAG_BITS)-1)) <= lip) {
- offset = lip - reg;
- if (offset < lip_offset) {
- lip_offset = offset;
- lip_register_pair = index;
- }
- }
- }
-#endif /* reg_LIP */
-
- /* Compute the PC's offset from the start of the CODE */
- /* register. */
- pc_code_offset = *os_context_pc_addr(context)
- - *os_context_register_addr(context, reg_CODE);
-#ifdef ARCH_HAS_NPC_REGISTER
- npc_code_offset = *os_context_npc_addr(context)
- - *os_context_register_addr(context, reg_CODE);
-#endif /* ARCH_HAS_NPC_REGISTER */
-
-#ifdef ARCH_HAS_LINK_REGISTER
- lr_code_offset =
- *os_context_lr_addr(context) -
- *os_context_register_addr(context, reg_CODE);
-#endif
-
- /* Scanvenge all boxed registers in the context. */
- for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- int index;
- lispobj foo;
-
- index = boxed_registers[i];
- foo = *os_context_register_addr(context, index);
- scavenge(&foo, 1);
- *os_context_register_addr(context, index) = foo;
-
- scavenge((lispobj*) &(*os_context_register_addr(context, index)), 1);
- }
-
-#ifdef reg_LIP
- /* Fix the LIP */
-
- /*
- * But what happens if lip_register_pair is -1?
- * *os_context_register_addr on Solaris (see
- * solaris_register_address in solaris-os.c) will return
- * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
- * that what we really want? My guess is that that is not what we
- * want, so if lip_register_pair is -1, we don't touch reg_LIP at
- * all. But maybe it doesn't really matter if LIP is trashed?
- */
- if (lip_register_pair >= 0) {
- *os_context_register_addr(context, reg_LIP) =
- *os_context_register_addr(context, lip_register_pair)
- + lip_offset;
- }
-#endif /* reg_LIP */
-
- /* Fix the PC if it was in from space */
- if (from_space_p(*os_context_pc_addr(context)))
- *os_context_pc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + pc_code_offset;
-
-#ifdef ARCH_HAS_LINK_REGISTER
- /* Fix the LR ditto; important if we're being called from
- * an assembly routine that expects to return using blr, otherwise
- * harmless */
- if (from_space_p(*os_context_lr_addr(context)))
- *os_context_lr_addr(context) =
- *os_context_register_addr(context, reg_CODE) + lr_code_offset;
-#endif
-
-#ifdef ARCH_HAS_NPC_REGISTER
- if (from_space_p(*os_context_npc_addr(context)))
- *os_context_npc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + npc_code_offset;
-#endif /* ARCH_HAS_NPC_REGISTER */
-}
-
-void
-scavenge_interrupt_contexts(void)
-{
- int i, index;
- os_context_t *context;
-
- struct thread *th=arch_os_get_current_thread();
-
- index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0));
-
-#if defined(DEBUG_PRINT_CONTEXT_INDEX)
- printf("Number of active contexts: %d\n", index);
-#endif
-
- for (i = 0; i < index; i++) {
- context = th->interrupt_contexts[i];
- scavenge_interrupt_context(context);
- }
-}
-
-#endif
-
-#if defined(LISP_FEATURE_SB_THREAD)
+#if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
static void
preserve_context_registers (os_context_t *c)
{
unsigned long bytes_freed;
page_index_t i;
unsigned long static_space_size;
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
struct thread *th;
-#endif
+
gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
/* The oldest generation can't be raised. */
}
}
}
+#else
+ /* Non-x86oid systems don't have "conservative roots" as such, but
+ * the same mechanism is used for objects pinned for use by alien
+ * code. */
+ for_each_thread(th) {
+ lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th);
+ while (pin_list != NIL) {
+ struct cons *list_entry =
+ (struct cons *)native_pointer(pin_list);
+ preserve_pointer(list_entry->car);
+ pin_list = list_entry->cdr;
+ }
+ }
#endif
#if QSHOW
* If not x86, we need to scavenge the interrupt context(s) and the
* control stack.
*/
- scavenge_interrupt_contexts();
- scavenge_control_stack();
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ scavenge_interrupt_contexts(th);
+ scavenge_control_stack(th);
+ }
+
+ /* Scrub the unscavenged control stack space, so that we can't run
+ * into any stale pointers in a later GC (this is done by the
+ * stop-for-gc handler in the other threads). */
+ scrub_control_stack();
+ }
#endif
/* Scavenge the Lisp functions of the interrupt handlers, taking
}
static void
-remap_free_pages (page_index_t from, page_index_t to)
+remap_page_range (page_index_t from, page_index_t to)
+{
+ /* There's a mysterious Solaris/x86 problem with using mmap
+ * tricks for memory zeroing. See sbcl-devel thread
+ * "Re: patch: standalone executable redux".
+ */
+#if defined(LISP_FEATURE_SUNOS)
+ zero_and_mark_pages(from, to);
+#else
+ const page_index_t
+ release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
+ release_mask = release_granularity-1,
+ end = to+1,
+ aligned_from = (from+release_mask)&~release_mask,
+ aligned_end = (end&~release_mask);
+
+ if (aligned_from < aligned_end) {
+ zero_pages_with_mmap(aligned_from, aligned_end-1);
+ if (aligned_from != from)
+ zero_and_mark_pages(from, aligned_from-1);
+ if (aligned_end != end)
+ zero_and_mark_pages(aligned_end, end-1);
+ } else {
+ zero_and_mark_pages(from, to);
+ }
+#endif
+}
+
+static void
+remap_free_pages (page_index_t from, page_index_t to, int forcibly)
{
page_index_t first_page, last_page;
+ if (forcibly)
+ return remap_page_range(from, to);
+
for (first_page = from; first_page <= to; first_page++) {
if (page_allocated_p(first_page) ||
- (page_table[first_page].need_to_zero == 0)) {
+ (page_table[first_page].need_to_zero == 0))
continue;
- }
last_page = first_page + 1;
while (page_free_p(last_page) &&
- (last_page < to) &&
- (page_table[last_page].need_to_zero == 1)) {
+ (last_page <= to) &&
+ (page_table[last_page].need_to_zero == 1))
last_page++;
- }
- /* There's a mysterious Solaris/x86 problem with using mmap
- * tricks for memory zeroing. See sbcl-devel thread
- * "Re: patch: standalone executable redux".
- */
-#if defined(LISP_FEATURE_SUNOS)
- zero_pages(first_page, last_page-1);
-#else
- zero_pages_with_mmap(first_page, last_page-1);
-#endif
+ remap_page_range(first_page, last_page-1);
first_page = last_page;
}
static page_index_t high_water_mark = 0;
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
+ log_generation_stats(gc_logfile, "=== GC Start ===");
gc_active_p = 1;
} else {
raise =
(gen < last_gen)
- || (generations[gen].num_gc >= generations[gen].trigger_age);
+ || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
}
if (gencgc_verbose > 1) {
&& raise
&& (generations[gen].bytes_allocated
> generations[gen].gc_trigger)
- && (gen_av_mem_age(gen)
- > generations[gen].min_av_mem_age))));
+ && (generation_average_age(gen)
+ > generations[gen].minimum_age_before_gc))));
/* Now if gen-1 was raised all generations before gen are empty.
* If it wasn't raised then all generations before gen-1 are empty.
if (gen > small_generation_limit) {
if (last_free_page > high_water_mark)
high_water_mark = last_free_page;
- remap_free_pages(0, high_water_mark);
+ remap_free_pages(0, high_water_mark, 0);
high_water_mark = 0;
}
gc_active_p = 0;
+ log_generation_stats(gc_logfile, "=== GC End ===");
SHOW("returning from collect_garbage");
}
void
gc_free_heap(void)
{
- page_index_t page;
+ page_index_t page, last_page;
if (gencgc_verbose > 1) {
SHOW("entering gc_free_heap");
/* Skip free pages which should already be zero filled. */
if (page_allocated_p(page)) {
void *page_start, *addr;
-
- /* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
- * should not be write-protected -- except that the
- * generation is used for the current region but it sets
- * that up. */
- page_table[page].allocated = FREE_PAGE_FLAG;
- page_table[page].bytes_used = 0;
+ for (last_page = page;
+ (last_page < page_table_pages) && page_allocated_p(last_page);
+ last_page++) {
+ /* Mark the page free. The other slots are assumed invalid
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
+ * should not be write-protected -- except that the
+ * generation is used for the current region but it sets
+ * that up. */
+ page_table[page].allocated = FREE_PAGE_FLAG;
+ page_table[page].bytes_used = 0;
+ page_table[page].write_protected = 0;
+ }
#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
* about this change. */
- /* Zero the page. */
page_start = (void *)page_address(page);
-
- /* First, remove any write-protection. */
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page].write_protected = 0;
-
- os_invalidate(page_start,PAGE_BYTES);
- addr = os_validate(page_start,PAGE_BYTES);
- if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
- page_start,
- addr);
- }
-#else
- page_table[page].write_protected = 0;
+ os_protect(page_start, npage_bytes(last_page-page), OS_VM_PROT_ALL);
+ remap_free_pages(page, last_page-1, 1);
+ page = last_page-1;
#endif
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
page_start = (long *)page_address(page);
- for (i=0; i<1024; i++) {
+ for (i=0; i<GENCGC_CARD_BYTES/sizeof(long); i++) {
if (page_start[i] != 0) {
lose("free region not zero at %x\n", page_start + i);
}
/* Compute the number of pages needed for the dynamic space.
* Dynamic space size should be aligned on page size. */
- page_table_pages = dynamic_space_size/PAGE_BYTES;
+ page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
+ /* The page_table must be allocated using "calloc" to initialize
+ * the page structures correctly. There used to be a separate
+ * initialization loop (now commented out; see below) but that was
+ * unnecessary and did hurt startup time. */
page_table = calloc(page_table_pages, sizeof(struct page));
gc_assert(page_table);
heap_base = (void*)DYNAMIC_SPACE_START;
- /* Initialize each page structure. */
- for (i = 0; i < page_table_pages; i++) {
- /* Initialize all pages as free. */
- page_table[i].allocated = FREE_PAGE_FLAG;
- page_table[i].bytes_used = 0;
-
- /* Pages are not write-protected at startup. */
- page_table[i].write_protected = 0;
+ /* The page structures are initialized implicitly when page_table
+ * is allocated with "calloc" above. Formerly we had the following
+ * explicit initialization here (comments converted to C99 style
+ * for readability as C's block comments don't nest):
+ *
+ * // Initialize each page structure.
+ * for (i = 0; i < page_table_pages; i++) {
+ * // Initialize all pages as free.
+ * page_table[i].allocated = FREE_PAGE_FLAG;
+ * page_table[i].bytes_used = 0;
+ *
+ * // Pages are not write-protected at startup.
+ * page_table[i].write_protected = 0;
+ * }
+ *
+ * Without this loop the image starts up much faster when dynamic
+ * space is large -- which it is on 64-bit platforms already by
+ * default -- and when "calloc" for large arrays is implemented
+ * using copy-on-write of a page of zeroes -- which it is at least
+ * on Linux. In this case the pages that page_table_pages is stored
+ * in are mapped and cleared not before the corresponding part of
+ * dynamic space is used. For example, this saves clearing 16 MB of
+ * memory at startup if the page size is 4 KB and the size of
+ * dynamic space is 4 GB.
+ * FREE_PAGE_FLAG must be 0 for this to work correctly which is
+ * asserted below: */
+ {
+ /* Compile time assertion: If triggered, declares an array
+ * of dimension -1 forcing a syntax error. The intent of the
+ * assignment is to avoid an "unused variable" warning. */
+ char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1];
+ assert_free_page_flag_0[0] = assert_free_page_flag_0[0];
}
bytes_allocated = 0;
generations[i].cum_sum_bytes_allocated = 0;
/* the tune-able parameters */
generations[i].bytes_consed_between_gc = 2000000;
- generations[i].trigger_age = 1;
- generations[i].min_av_mem_age = 0.75;
+ generations[i].number_of_gcs_before_promotion = 1;
+ generations[i].minimum_age_before_gc = 0.75;
generations[i].lutexes = NULL;
}
generation_index_t gen = PSEUDO_STATIC_GENERATION;
do {
lispobj *first,*ptr= (lispobj *)page_address(page);
- page_table[page].gen = gen;
- page_table[page].bytes_used = PAGE_BYTES;
- page_table[page].large_object = 0;
- page_table[page].write_protected = 0;
- page_table[page].write_protected_cleared = 0;
- page_table[page].dont_move = 0;
- page_table[page].need_to_zero = 1;
+
+ if (!gencgc_partial_pickup || page_allocated_p(page)) {
+ /* It is possible, though rare, for the saved page table
+ * to contain free pages below alloc_ptr. */
+ page_table[page].gen = gen;
+ page_table[page].bytes_used = GENCGC_CARD_BYTES;
+ page_table[page].large_object = 0;
+ page_table[page].write_protected = 0;
+ page_table[page].write_protected_cleared = 0;
+ page_table[page].dont_move = 0;
+ page_table[page].need_to_zero = 1;
+ }
if (!gencgc_partial_pickup) {
page_table[page].allocated = BOXED_PAGE_FLAG;
gc_assert(ret == 0);
if (page_table[page_index].write_protected) {
/* Unprotect the page. */
- os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
page_table[page_index].write_protected_cleared = 1;
page_table[page_index].write_protected = 0;
} else {
if (page_free_p(i)) {
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(i),
- PAGE_BYTES,
+ GENCGC_CARD_BYTES,
OS_VM_PROT_ALL);
#endif
zero_pages(i, i);