os_vm_size_t large_object_size = 4 * PAGE_BYTES;
#endif
+/* Largest allocation seen since last GC. */
+os_vm_size_t large_allocation = 0;
+
\f
/*
* debugging
size_t size;
};
static struct new_area (*new_areas)[];
-static long new_areas_index;
-long max_new_areas;
+static size_t new_areas_index;
+size_t max_new_areas;
/* Add a new area to new_areas. */
static void
add_new_area(page_index_t first_page, size_t offset, size_t size)
{
- unsigned long new_area_start,c;
- long i;
+ size_t new_area_start, c, i;
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
- unsigned long area_end =
+ size_t area_end =
npage_bytes((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
lispobj
copy_unboxed_object(lispobj object, long nwords)
{
- long tag;
- lispobj *new;
-
- gc_assert(is_lisp_pointer(object));
- gc_assert(from_space_p(object));
- gc_assert((nwords & 0x01) == 0);
-
- /* Get tag of object. */
- tag = lowtag_of(object);
-
- /* Allocate space. */
- new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES);
-
- memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
- /* Return Lisp pointer of new object. */
- return ((lispobj) new) | tag;
+ return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
}
\f
static void
scavenge_newspace_generation(generation_index_t generation)
{
- long i;
+ size_t i;
/* the new_areas array currently being written to by gc_alloc() */
struct new_area (*current_new_areas)[] = &new_areas_1;
- long current_new_areas_index;
+ size_t current_new_areas_index;
/* the new_areas created by the previous scavenge cycle */
struct new_area (*previous_new_areas)[] = NULL;
- long previous_new_areas_index;
+ size_t previous_new_areas_index;
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
}
}
-#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
-static void
-scavenge_control_stack(struct thread *th)
-{
- lispobj *control_stack =
- (lispobj *)(th->control_stack_start);
- unsigned long control_stack_size =
- access_control_stack_pointer(th) - control_stack;
-
- scavenge(control_stack, control_stack_size);
-}
-#endif
-
#if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
static void
preserve_context_registers (os_context_t *c)
collect_garbage(generation_index_t last_gen)
{
generation_index_t gen = 0, i;
- int raise;
+ int raise, more = 0;
int gen_to_wp;
/* The largest value of last_free_page seen since the time
* remap_free_pages was called. */
do {
/* Collect the generation. */
- if (gen >= gencgc_oldest_gen_to_gc) {
- /* Never raise the oldest generation. */
+ if (more || (gen >= gencgc_oldest_gen_to_gc)) {
+ /* Never raise the oldest generation. Never raise the extra generation
+ * collected due to more-flag. */
raise = 0;
+ more = 0;
} else {
raise =
(gen < last_gen)
|| (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
+ /* If we would not normally raise this one, but we're
+ * running low on space in comparison to the object-sizes
+ * we've been seeing, raise it and collect the next one
+ * too. */
+ if (!raise && gen == last_gen) {
+ more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
+ raise = more;
+ }
}
if (gencgc_verbose > 1) {
gen++;
} while ((gen <= gencgc_oldest_gen_to_gc)
&& ((gen < last_gen)
- || ((gen <= gencgc_oldest_gen_to_gc)
- && raise
+ || more
+ || (raise
&& (generations[gen].bytes_allocated
> generations[gen].gc_trigger)
&& (generation_average_age(gen)
update_dynamic_space_free_pointer();
- auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ /* Update auto_gc_trigger. Make sure we trigger the next GC before
+ * running out of heap! */
+ if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated)
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ else
+ auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
+
if(gencgc_verbose)
fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
auto_gc_trigger);
}
gc_active_p = 0;
+ large_allocation = 0;
log_generation_stats(gc_logfile, "=== GC End ===");
SHOW("returning from collect_garbage");
/* Must be inside a PA section. */
gc_assert(get_pseudo_atomic_atomic(thread));
+ if (nbytes > large_allocation)
+ large_allocation = nbytes;
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
/* we have to go the long way around, it seems. Check whether we
* should GC in the near future
*/
- if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
+ if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) {
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */