static boolean conservative_stack = 1;
/* An array of page structures is allocated on gc initialization.
- * This helps quickly map between an address its page structure.
+ * This helps to quickly map between an address and its page structure.
* page_table_pages is set from the size of the dynamic space. */
page_index_t page_table_pages;
struct page *page_table;
static void
add_new_area(page_index_t first_page, size_t offset, size_t size)
{
- size_t new_area_start, c, i;
+ size_t new_area_start, c;
+ ssize_t i;
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
page_index_t pi1 = find_page_index((void*)addr);
if (pi1 != -1)
- fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
- (unsigned long) addr,
+ fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
+ addr,
pi1,
page_table[pi1].allocated,
page_table[pi1].gen,
/* Update auto_gc_trigger. Make sure we trigger the next GC before
* running out of heap! */
- if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated)
+ if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
else
auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
generations[i].num_gc = 0;
generations[i].cum_sum_bytes_allocated = 0;
/* the tune-able parameters */
- generations[i].bytes_consed_between_gc = bytes_consed_between_gcs;
+ generations[i].bytes_consed_between_gc
+ = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
generations[i].number_of_gcs_before_promotion = 1;
generations[i].minimum_age_before_gc = 0.75;
}
#endif
void *new_obj;
void *new_free_pointer;
+ os_vm_size_t trigger_bytes = 0;
gc_assert(nbytes>0);
return(new_obj); /* yup */
}
+ /* We don't want to count nbytes against auto_gc_trigger unless we
+ * have to: it speeds up the tenuring of objects and slows down
+ * allocation. However, unless we do so when allocating _very_
+ * large objects we are in danger of exhausting the heap without
+ * running sufficient GCs.
+ */
+ if (nbytes >= bytes_consed_between_gcs)
+ trigger_bytes = nbytes;
+
/* we have to go the long way around, it seems. Check whether we
* should GC in the near future
*/
- if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) {
+ if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */