os_vm_size_t large_object_size = 4 * PAGE_BYTES;
#endif
+/* Largest allocation seen since last GC. */
+os_vm_size_t large_allocation = 0;
+
\f
/*
* debugging
size_t size;
};
static struct new_area (*new_areas)[];
-static long new_areas_index;
-long max_new_areas;
+static size_t new_areas_index;
+size_t max_new_areas;
/* Add a new area to new_areas. */
static void
add_new_area(page_index_t first_page, size_t offset, size_t size)
{
- unsigned long new_area_start,c;
- long i;
+ size_t new_area_start, c, i;
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
- unsigned long area_end =
+ size_t area_end =
npage_bytes((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
}
static inline void *
-gc_quick_alloc_large(long nbytes)
-{
- return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
-}
-
-static inline void *
gc_alloc_unboxed(long nbytes)
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
-
-static inline void *
-gc_quick_alloc_large_unboxed(long nbytes)
-{
- return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
-}
\f
-
-/* Copy a large boxed object. If the object is in a large object
- * region then it is simply promoted, else it is copied. If it's large
- * enough then it's copied to a large object region.
+/* Copy a large object. If the object is in a large object region then
+ * it is simply promoted, else it is copied. If it's large enough then
+ * it's copied to a large object region.
*
- * Vectors may have shrunk. If the object is not copied the space
- * needs to be reclaimed, and the page_tables corrected. */
-lispobj
-copy_large_object(lispobj object, long nwords)
+ * Bignums and vectors may have shrunk. If the object is not copied
+ * the space needs to be reclaimed, and the page_tables corrected. */
+static lispobj
+general_copy_large_object(lispobj object, long nwords, boolean boxedp)
{
int tag;
lispobj *new;
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
+ if ((nwords > 1024*1024) && gencgc_verbose) {
+ FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
+ nwords*N_WORD_BYTES));
+ }
- /* Check whether it's in a large object region. */
+ /* Check whether it's a large object. */
first_page = find_page_index((void *)object);
gc_assert(first_page >= 0);
if (page_table[first_page].large_object) {
-
- /* Promote the object. */
-
- unsigned long remaining_bytes;
+ /* Promote the object. Note: Unboxed objects may have been
+ * allocated to a BOXED region so it may be necessary to
+ * change the region to UNBOXED. */
+ os_vm_size_t remaining_bytes;
+ os_vm_size_t bytes_freed;
page_index_t next_page;
- unsigned long bytes_freed;
- unsigned long old_bytes_used;
+ page_bytes_t old_bytes_used;
- /* Note: Any page write-protection must be removed, else a
+ /* FIXME: This comment is somewhat stale.
+ *
+ * Note: Any page write-protection must be removed, else a
* later scavenge_newspace may incorrectly not scavenge these
* pages. This would not be necessary if they are added to the
* new areas, but let's do it for them all (they'll probably
* be written anyway?). */
gc_assert(page_table[first_page].region_start_offset == 0);
-
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
+
while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
- /* Should have been unprotected by unprotect_oldspace(). */
- gc_assert(page_table[next_page].write_protected == 0);
-
+ /* Should have been unprotected by unprotect_oldspace()
+ * for boxed objects, and after promotion unboxed ones
+ * should not be on protected pages at all. */
+ gc_assert(!page_table[next_page].write_protected);
+
+ if (boxedp)
+ gc_assert(page_boxed_p(next_page));
+ else {
+ gc_assert(page_allocated_no_region_p(next_page));
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ }
page_table[next_page].gen = new_space;
remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
- /* Now only one page remains, but the object may have shrunk
- * so there may be more unused pages which will be freed. */
+ /* Now only one page remains, but the object may have shrunk so
+ * there may be more unused pages which will be freed. */
- /* The object may have shrunk but shouldn't have grown. */
+ /* Object may have shrunk but shouldn't have grown - check. */
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_boxed_p(next_page));
+
+ if (boxedp)
+ gc_assert(page_boxed_p(next_page));
+ else
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
- page_boxed_p(next_page) &&
+ /* FIXME: It is not obvious to me why this is necessary
+ * as a loop condition: it seems to me that the
+ * region_start_offset test should be sufficient, but
+ * experimentally that is not the case. --NS
+ * 2011-11-28 */
+ (boxedp ?
+ page_boxed_p(next_page) :
+ page_allocated_no_region_p(next_page)) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
- /* Checks out OK, free the page. Don't need to bother zeroing
+ /* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected as they
- * should be zero filled. */
+ * object. These pages shouldn't be write-protected, even if
+ * boxed they should be zero filled. */
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
}
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+ if ((bytes_freed > 0) && gencgc_verbose) {
+ FSHOW((stderr,
+ "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
+ bytes_freed));
+ }
+
+ generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
+ bytes_freed;
- generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+ generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
bytes_allocated -= bytes_freed;
/* Add the region to the new_areas if requested. */
- add_new_area(first_page,0,nwords*N_WORD_BYTES);
+ if (boxedp)
+ add_new_area(first_page,0,nwords*N_WORD_BYTES);
return(object);
+
} else {
/* Get tag of object. */
tag = lowtag_of(object);
/* Allocate space. */
- new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
+ new = gc_general_alloc(nwords*N_WORD_BYTES,
+ (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
+ ALLOC_QUICK);
+ /* Copy the object. */
memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
/* Return Lisp pointer of new object. */
}
}
+lispobj
+copy_large_object(lispobj object, long nwords)
+{
+ return general_copy_large_object(object, nwords, 1);
+}
+
+lispobj
+copy_large_unboxed_object(lispobj object, long nwords)
+{
+ return general_copy_large_object(object, nwords, 0);
+}
+
/* to copy unboxed objects */
lispobj
copy_unboxed_object(lispobj object, long nwords)
/* Return Lisp pointer of new object. */
return ((lispobj) new) | tag;
}
-
-/* to copy large unboxed objects
- *
- * If the object is in a large object region then it is simply
- * promoted, else it is copied. If it's large enough then it's copied
- * to a large object region.
- *
- * Bignums and vectors may have shrunk. If the object is not copied
- * the space needs to be reclaimed, and the page_tables corrected.
- *
- * KLUDGE: There's a lot of cut-and-paste duplication between this
- * function and copy_large_object(..). -- WHN 20000619 */
-lispobj
-copy_large_unboxed_object(lispobj object, long nwords)
-{
- int tag;
- lispobj *new;
- page_index_t first_page;
-
- gc_assert(is_lisp_pointer(object));
- gc_assert(from_space_p(object));
- gc_assert((nwords & 0x01) == 0);
-
- if ((nwords > 1024*1024) && gencgc_verbose) {
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
- nwords*N_WORD_BYTES));
- }
-
- /* Check whether it's a large object. */
- first_page = find_page_index((void *)object);
- gc_assert(first_page >= 0);
-
- if (page_table[first_page].large_object) {
- /* Promote the object. Note: Unboxed objects may have been
- * allocated to a BOXED region so it may be necessary to
- * change the region to UNBOXED. */
- unsigned long remaining_bytes;
- page_index_t next_page;
- unsigned long bytes_freed;
- unsigned long old_bytes_used;
-
- gc_assert(page_table[first_page].region_start_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > GENCGC_CARD_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_allocated_no_region_p(next_page));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].region_start_offset ==
- npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= GENCGC_CARD_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk so
- * there may be more unused pages which will be freed. */
-
- /* Object may have shrunk but shouldn't have grown - check. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == GENCGC_CARD_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- page_allocated_no_region_p(next_page) &&
- page_table[next_page].large_object &&
- (page_table[next_page].region_start_offset ==
- npage_bytes(next_page - first_page))) {
- /* Checks out OK, free the page. Don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected, even if
- * boxed they should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- if ((bytes_freed > 0) && gencgc_verbose) {
- FSHOW((stderr,
- "/copy_large_unboxed bytes_freed=%d\n",
- bytes_freed));
- }
-
- generations[from_space].bytes_allocated -=
- nwords*N_WORD_BYTES + bytes_freed;
- generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
- bytes_allocated -= bytes_freed;
-
- return(object);
- }
- else {
- /* Get tag of object. */
- tag = lowtag_of(object);
-
- /* Allocate space. */
- new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
-
- /* Copy the object. */
- memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
- /* Return Lisp pointer of new object. */
- return ((lispobj) new) | tag;
- }
-}
-
-
-
\f
/*
static void
scavenge_newspace_generation(generation_index_t generation)
{
- long i;
+ size_t i;
/* the new_areas array currently being written to by gc_alloc() */
struct new_area (*current_new_areas)[] = &new_areas_1;
- long current_new_areas_index;
+ size_t current_new_areas_index;
/* the new_areas created by the previous scavenge cycle */
struct new_area (*previous_new_areas)[] = NULL;
- long previous_new_areas_index;
+ size_t previous_new_areas_index;
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
}
}
-#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
-static void
-scavenge_control_stack(struct thread *th)
-{
- lispobj *control_stack =
- (lispobj *)(th->control_stack_start);
- unsigned long control_stack_size =
- access_control_stack_pointer(th) - control_stack;
-
- scavenge(control_stack, control_stack_size);
-}
-#endif
-
#if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
static void
preserve_context_registers (os_context_t *c)
collect_garbage(generation_index_t last_gen)
{
generation_index_t gen = 0, i;
- int raise;
+ int raise, more = 0;
int gen_to_wp;
/* The largest value of last_free_page seen since the time
* remap_free_pages was called. */
do {
/* Collect the generation. */
- if (gen >= gencgc_oldest_gen_to_gc) {
- /* Never raise the oldest generation. */
+ if (more || (gen >= gencgc_oldest_gen_to_gc)) {
+ /* Never raise the oldest generation. Never raise the extra generation
+ * collected due to more-flag. */
raise = 0;
+ more = 0;
} else {
raise =
(gen < last_gen)
|| (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
+ /* If we would not normally raise this one, but we're
+ * running low on space in comparison to the object-sizes
+ * we've been seeing, raise it and collect the next one
+ * too. */
+ if (!raise && gen == last_gen) {
+ more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
+ raise = more;
+ }
}
if (gencgc_verbose > 1) {
gen++;
} while ((gen <= gencgc_oldest_gen_to_gc)
&& ((gen < last_gen)
- || ((gen <= gencgc_oldest_gen_to_gc)
- && raise
+ || more
+ || (raise
&& (generations[gen].bytes_allocated
> generations[gen].gc_trigger)
&& (generation_average_age(gen)
update_dynamic_space_free_pointer();
- auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ /* Update auto_gc_trigger. Make sure we trigger the next GC before
+ * running out of heap! */
+ if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated)
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ else
+ auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
+
if(gencgc_verbose)
- fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
auto_gc_trigger);
/* If we did a big GC (arbitrarily defined as gen > 1), release memory
}
gc_active_p = 0;
+ large_allocation = 0;
log_generation_stats(gc_logfile, "=== GC End ===");
SHOW("returning from collect_garbage");
/* Must be inside a PA section. */
gc_assert(get_pseudo_atomic_atomic(thread));
+ if (nbytes > large_allocation)
+ large_allocation = nbytes;
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
/* we have to go the long way around, it seems. Check whether we
* should GC in the near future
*/
- if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
+ if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) {
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
*
* Return true if this signal is a normal generational GC thing that
* we were able to handle, or false if it was abnormal and control
- * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
+ * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
+ *
+ * We have two control flags for this: one causes us to ignore faults
+ * on unprotected pages completely, and the second complains to stderr
+ * but allows us to continue without losing.
+ */
+extern boolean ignore_memoryfaults_on_unprotected_pages;
+boolean ignore_memoryfaults_on_unprotected_pages = 0;
+
+extern boolean continue_after_memoryfault_on_unprotected_pages;
+boolean continue_after_memoryfault_on_unprotected_pages = 0;
int
gencgc_handle_wp_violation(void* fault_addr)
os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
page_table[page_index].write_protected_cleared = 1;
page_table[page_index].write_protected = 0;
- } else {
+ } else if (!ignore_memoryfaults_on_unprotected_pages) {
/* The only acceptable reason for this signal on a heap
* access is that GENCGC write-protected the page.
* However, if two CPUs hit a wp page near-simultaneously,
* we had better not have the second one lose here if it
* does this test after the first one has already set wp=0
*/
- if(page_table[page_index].write_protected_cleared != 1)
- lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
- page_index, boxed_region.first_page,
- boxed_region.last_page);
+ if(page_table[page_index].write_protected_cleared != 1) {
+ void lisp_backtrace(int frames);
+ lisp_backtrace(10);
+ fprintf(stderr,
+ "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
+ " boxed_region.first_page: %"PAGE_INDEX_FMT","
+ " boxed_region.last_page %"PAGE_INDEX_FMT"\n"
+ " page.region_start_offset: %"OS_VM_SIZE_FMT"\n"
+ " page.bytes_used: %"PAGE_BYTES_FMT"\n"
+ " page.allocated: %d\n"
+ " page.write_protected: %d\n"
+ " page.write_protected_cleared: %d\n"
+ " page.generation: %d\n",
+ fault_addr,
+ page_index,
+ boxed_region.first_page,
+ boxed_region.last_page,
+ page_table[page_index].region_start_offset,
+ page_table[page_index].bytes_used,
+ page_table[page_index].allocated,
+ page_table[page_index].write_protected,
+ page_table[page_index].write_protected_cleared,
+ page_table[page_index].gen);
+ if (!continue_after_memoryfault_on_unprotected_pages)
+ lose("Feh.\n");
+ }
}
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);