gencgc: scale generation.bytes_consed_between_gc to number of gens
[sbcl.git] / src / runtime / gencgc.c
index 6e49fde..a73bdca 100644 (file)
@@ -79,13 +79,16 @@ boolean enable_page_protection = 1;
 
 /* the minimum size (in bytes) for a large object*/
 #if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
-long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+os_vm_size_t large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
 #elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
-long large_object_size = 4 * GENCGC_CARD_BYTES;
+os_vm_size_t large_object_size = 4 * GENCGC_CARD_BYTES;
 #else
-long large_object_size = 4 * PAGE_BYTES;
+os_vm_size_t large_object_size = 4 * PAGE_BYTES;
 #endif
 
+/* Largest allocation seen since last GC. */
+os_vm_size_t large_allocation = 0;
+
 \f
 /*
  * debugging
@@ -165,7 +168,7 @@ boolean gc_active_p = 0;
 static boolean conservative_stack = 1;
 
 /* An array of page structures is allocated on gc initialization.
- * This helps quickly map between an address its page structure.
+ * This helps to quickly map between an address and its page structure.
  * page_table_pages is set from the size of the dynamic space. */
 page_index_t page_table_pages;
 struct page *page_table;
@@ -872,15 +875,11 @@ gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_
 
     /* we can do this after releasing free_pages_lock */
     if (gencgc_zero_check) {
-        long *p;
-        for (p = (long *)alloc_region->start_addr;
-             p < (long *)alloc_region->end_addr; p++) {
+        word_t *p;
+        for (p = (word_t *)alloc_region->start_addr;
+             p < (word_t *)alloc_region->end_addr; p++) {
             if (*p != 0) {
-                /* KLUDGE: It would be nice to use %lx and explicit casts
-                 * (long) in code like this, so that it is less likely to
-                 * break randomly when running on a machine with different
-                 * word sizes. -- WHN 19991129 */
-                lose("The new region at %x is not zero (start=%p, end=%p).\n",
+                lose("The new region is not zero at %p (start=%p, end=%p).\n",
                      p, alloc_region->start_addr, alloc_region->end_addr);
             }
         }
@@ -911,15 +910,15 @@ struct new_area {
     size_t size;
 };
 static struct new_area (*new_areas)[];
-static long new_areas_index;
-long max_new_areas;
+static size_t new_areas_index;
+size_t max_new_areas;
 
 /* Add a new area to new_areas. */
 static void
 add_new_area(page_index_t first_page, size_t offset, size_t size)
 {
-    unsigned long new_area_start,c;
-    long i;
+    size_t new_area_start, c;
+    ssize_t i;
 
     /* Ignore if full. */
     if (new_areas_index >= NUM_NEW_AREAS)
@@ -943,7 +942,7 @@ add_new_area(page_index_t first_page, size_t offset, size_t size)
     /* Search backwards for a prior area that this follows from. If
        found this will save adding a new area. */
     for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
-        unsigned long area_end =
+        size_t area_end =
             npage_bytes((*new_areas)[i].page)
             + (*new_areas)[i].offset
             + (*new_areas)[i].size;
@@ -1120,13 +1119,11 @@ static inline void *gc_quick_alloc(long nbytes);
 void *
 gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
 {
-    page_index_t first_page;
-    page_index_t last_page;
-    int orig_first_page_bytes_used;
-    long byte_cnt;
-    int more;
-    unsigned long bytes_used;
-    page_index_t next_page;
+    boolean more;
+    page_index_t first_page, next_page, last_page;
+    page_bytes_t orig_first_page_bytes_used;
+    os_vm_size_t byte_cnt;
+    os_vm_size_t bytes_used;
     int ret;
 
     ret = thread_mutex_lock(&free_pages_lock);
@@ -1414,12 +1411,6 @@ gc_quick_alloc(long nbytes)
 }
 
 static inline void *
-gc_quick_alloc_large(long nbytes)
-{
-    return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
-}
-
-static inline void *
 gc_alloc_unboxed(long nbytes)
 {
     return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
@@ -1430,166 +1421,15 @@ gc_quick_alloc_unboxed(long nbytes)
 {
     return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
 }
-
-static inline void *
-gc_quick_alloc_large_unboxed(long nbytes)
-{
-    return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
-}
 \f
-
-/* Copy a large boxed object. If the object is in a large object
- * region then it is simply promoted, else it is copied. If it's large
- * enough then it's copied to a large object region.
- *
- * Vectors may have shrunk. If the object is not copied the space
- * needs to be reclaimed, and the page_tables corrected. */
-lispobj
-copy_large_object(lispobj object, long nwords)
-{
-    int tag;
-    lispobj *new;
-    page_index_t first_page;
-
-    gc_assert(is_lisp_pointer(object));
-    gc_assert(from_space_p(object));
-    gc_assert((nwords & 0x01) == 0);
-
-
-    /* Check whether it's in a large object region. */
-    first_page = find_page_index((void *)object);
-    gc_assert(first_page >= 0);
-
-    if (page_table[first_page].large_object) {
-
-        /* Promote the object. */
-
-        unsigned long remaining_bytes;
-        page_index_t next_page;
-        unsigned long bytes_freed;
-        unsigned long old_bytes_used;
-
-        /* Note: Any page write-protection must be removed, else a
-         * later scavenge_newspace may incorrectly not scavenge these
-         * pages. This would not be necessary if they are added to the
-         * new areas, but let's do it for them all (they'll probably
-         * be written anyway?). */
-
-        gc_assert(page_table[first_page].region_start_offset == 0);
-
-        next_page = first_page;
-        remaining_bytes = nwords*N_WORD_BYTES;
-        while (remaining_bytes > GENCGC_CARD_BYTES) {
-            gc_assert(page_table[next_page].gen == from_space);
-            gc_assert(page_boxed_p(next_page));
-            gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].region_start_offset ==
-                      npage_bytes(next_page-first_page));
-            gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-            /* Should have been unprotected by unprotect_oldspace(). */
-            gc_assert(page_table[next_page].write_protected == 0);
-
-            page_table[next_page].gen = new_space;
-
-            remaining_bytes -= GENCGC_CARD_BYTES;
-            next_page++;
-        }
-
-        /* Now only one page remains, but the object may have shrunk
-         * so there may be more unused pages which will be freed. */
-
-        /* The object may have shrunk but shouldn't have grown. */
-        gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
-        page_table[next_page].gen = new_space;
-        gc_assert(page_boxed_p(next_page));
-
-        /* Adjust the bytes_used. */
-        old_bytes_used = page_table[next_page].bytes_used;
-        page_table[next_page].bytes_used = remaining_bytes;
-
-        bytes_freed = old_bytes_used - remaining_bytes;
-
-        /* Free any remaining pages; needs care. */
-        next_page++;
-        while ((old_bytes_used == GENCGC_CARD_BYTES) &&
-               (page_table[next_page].gen == from_space) &&
-               page_boxed_p(next_page) &&
-               page_table[next_page].large_object &&
-               (page_table[next_page].region_start_offset ==
-                npage_bytes(next_page - first_page))) {
-            /* Checks out OK, free the page. Don't need to bother zeroing
-             * pages as this should have been done before shrinking the
-             * object. These pages shouldn't be write-protected as they
-             * should be zero filled. */
-            gc_assert(page_table[next_page].write_protected == 0);
-
-            old_bytes_used = page_table[next_page].bytes_used;
-            page_table[next_page].allocated = FREE_PAGE_FLAG;
-            page_table[next_page].bytes_used = 0;
-            bytes_freed += old_bytes_used;
-            next_page++;
-        }
-
-        generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
-            + bytes_freed;
-        generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
-        bytes_allocated -= bytes_freed;
-
-        /* Add the region to the new_areas if requested. */
-        add_new_area(first_page,0,nwords*N_WORD_BYTES);
-
-        return(object);
-    } else {
-        /* Get tag of object. */
-        tag = lowtag_of(object);
-
-        /* Allocate space. */
-        new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
-
-        memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
-        /* Return Lisp pointer of new object. */
-        return ((lispobj) new) | tag;
-    }
-}
-
-/* to copy unboxed objects */
-lispobj
-copy_unboxed_object(lispobj object, long nwords)
-{
-    long tag;
-    lispobj *new;
-
-    gc_assert(is_lisp_pointer(object));
-    gc_assert(from_space_p(object));
-    gc_assert((nwords & 0x01) == 0);
-
-    /* Get tag of object. */
-    tag = lowtag_of(object);
-
-    /* Allocate space. */
-    new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES);
-
-    memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
-    /* Return Lisp pointer of new object. */
-    return ((lispobj) new) | tag;
-}
-
-/* to copy large unboxed objects
- *
- * If the object is in a large object region then it is simply
- * promoted, else it is copied. If it's large enough then it's copied
- * to a large object region.
+/* Copy a large object. If the object is in a large object region then
+ * it is simply promoted, else it is copied. If it's large enough then
+ * it's copied to a large object region.
  *
  * Bignums and vectors may have shrunk. If the object is not copied
- * the space needs to be reclaimed, and the page_tables corrected.
- *
- * KLUDGE: There's a lot of cut-and-paste duplication between this
- * function and copy_large_object(..). -- WHN 20000619 */
-lispobj
-copy_large_unboxed_object(lispobj object, long nwords)
+ * the space needs to be reclaimed, and the page_tables corrected. */
+static lispobj
+general_copy_large_object(lispobj object, long nwords, boolean boxedp)
 {
     int tag;
     lispobj *new;
@@ -1600,7 +1440,7 @@ copy_large_unboxed_object(lispobj object, long nwords)
     gc_assert((nwords & 0x01) == 0);
 
     if ((nwords > 1024*1024) && gencgc_verbose) {
-        FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
+        FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
                nwords*N_WORD_BYTES));
     }
 
@@ -1612,25 +1452,42 @@ copy_large_unboxed_object(lispobj object, long nwords)
         /* Promote the object. Note: Unboxed objects may have been
          * allocated to a BOXED region so it may be necessary to
          * change the region to UNBOXED. */
-        unsigned long remaining_bytes;
+        os_vm_size_t remaining_bytes;
+        os_vm_size_t bytes_freed;
         page_index_t next_page;
-        unsigned long bytes_freed;
-        unsigned long old_bytes_used;
+        page_bytes_t old_bytes_used;
 
-        gc_assert(page_table[first_page].region_start_offset == 0);
+        /* FIXME: This comment is somewhat stale.
+         *
+         * Note: Any page write-protection must be removed, else a
+         * later scavenge_newspace may incorrectly not scavenge these
+         * pages. This would not be necessary if they are added to the
+         * new areas, but let's do it for them all (they'll probably
+         * be written anyway?). */
 
+        gc_assert(page_table[first_page].region_start_offset == 0);
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
+
         while (remaining_bytes > GENCGC_CARD_BYTES) {
             gc_assert(page_table[next_page].gen == from_space);
-            gc_assert(page_allocated_no_region_p(next_page));
             gc_assert(page_table[next_page].large_object);
             gc_assert(page_table[next_page].region_start_offset ==
                       npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-
+            /* Should have been unprotected by unprotect_oldspace()
+             * for boxed objects, and after promotion unboxed ones
+             * should not be on protected pages at all. */
+            gc_assert(!page_table[next_page].write_protected);
+
+            if (boxedp)
+                gc_assert(page_boxed_p(next_page));
+            else {
+                gc_assert(page_allocated_no_region_p(next_page));
+                page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+            }
             page_table[next_page].gen = new_space;
-            page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+
             remaining_bytes -= GENCGC_CARD_BYTES;
             next_page++;
         }
@@ -1642,7 +1499,11 @@ copy_large_unboxed_object(lispobj object, long nwords)
         gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
 
         page_table[next_page].gen = new_space;
-        page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+
+        if (boxedp)
+            gc_assert(page_boxed_p(next_page));
+        else
+            page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
 
         /* Adjust the bytes_used. */
         old_bytes_used = page_table[next_page].bytes_used;
@@ -1654,7 +1515,14 @@ copy_large_unboxed_object(lispobj object, long nwords)
         next_page++;
         while ((old_bytes_used == GENCGC_CARD_BYTES) &&
                (page_table[next_page].gen == from_space) &&
-               page_allocated_no_region_p(next_page) &&
+               /* FIXME: It is not obvious to me why this is necessary
+                * as a loop condition: it seems to me that the
+                * region_start_offset test should be sufficient, but
+                * experimentally that is not the case. --NS
+                * 2011-11-28 */
+               (boxedp ?
+                page_boxed_p(next_page) :
+                page_allocated_no_region_p(next_page)) &&
                page_table[next_page].large_object &&
                (page_table[next_page].region_start_offset ==
                 npage_bytes(next_page - first_page))) {
@@ -1673,23 +1541,29 @@ copy_large_unboxed_object(lispobj object, long nwords)
 
         if ((bytes_freed > 0) && gencgc_verbose) {
             FSHOW((stderr,
-                   "/copy_large_unboxed bytes_freed=%d\n",
+                   "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
                    bytes_freed));
         }
 
-        generations[from_space].bytes_allocated -=
-            nwords*N_WORD_BYTES + bytes_freed;
+        generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
+            + bytes_freed;
         generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
         bytes_allocated -= bytes_freed;
 
+        /* Add the region to the new_areas if requested. */
+        if (boxedp)
+            add_new_area(first_page,0,nwords*N_WORD_BYTES);
+
         return(object);
-    }
-    else {
+
+    } else {
         /* Get tag of object. */
         tag = lowtag_of(object);
 
         /* Allocate space. */
-        new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
+        new = gc_general_alloc(nwords*N_WORD_BYTES,
+                               (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
+                               ALLOC_QUICK);
 
         /* Copy the object. */
         memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
@@ -1699,8 +1573,24 @@ copy_large_unboxed_object(lispobj object, long nwords)
     }
 }
 
+lispobj
+copy_large_object(lispobj object, long nwords)
+{
+    return general_copy_large_object(object, nwords, 1);
+}
 
+lispobj
+copy_large_unboxed_object(lispobj object, long nwords)
+{
+    return general_copy_large_object(object, nwords, 0);
+}
 
+/* to copy unboxed objects */
+lispobj
+copy_unboxed_object(lispobj object, long nwords)
+{
+    return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
+}
 \f
 
 /*
@@ -1721,13 +1611,13 @@ static lispobj trans_boxed(lispobj object);
  * Currently only absolute fixups to the constant vector, or to the
  * code area are checked. */
 void
-sniff_code_object(struct code *code, unsigned long displacement)
+sniff_code_object(struct code *code, os_vm_size_t displacement)
 {
 #ifdef LISP_FEATURE_X86
     long nheader_words, ncode_words, nwords;
-    void *p;
-    void *constants_start_addr = NULL, *constants_end_addr;
-    void *code_start_addr, *code_end_addr;
+    os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
+    os_vm_address_t code_start_addr, code_end_addr;
+    os_vm_address_t code_addr = (os_vm_address_t)code;
     int fixup_found = 0;
 
     if (!check_code_fixups)
@@ -1739,10 +1629,10 @@ sniff_code_object(struct code *code, unsigned long displacement)
     nheader_words = HeaderValue(*(lispobj *)code);
     nwords = ncode_words + nheader_words;
 
-    constants_start_addr = (void *)code + 5*N_WORD_BYTES;
-    constants_end_addr = (void *)code + nheader_words*N_WORD_BYTES;
-    code_start_addr = (void *)code + nheader_words*N_WORD_BYTES;
-    code_end_addr = (void *)code + nwords*N_WORD_BYTES;
+    constants_start_addr = code_addr + 5*N_WORD_BYTES;
+    constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
+    code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
+    code_end_addr = code_addr + nwords*N_WORD_BYTES;
 
     /* Work through the unboxed code. */
     for (p = code_start_addr; p < code_end_addr; p++) {
@@ -1759,8 +1649,8 @@ sniff_code_object(struct code *code, unsigned long displacement)
         /* Check for code references. */
         /* Check for a 32 bit word that looks like an absolute
            reference to within the code adea of the code object. */
-        if ((data >= (code_start_addr-displacement))
-            && (data < (code_end_addr-displacement))) {
+        if ((data >= (void*)(code_start_addr-displacement))
+            && (data < (void*)(code_end_addr-displacement))) {
             /* function header */
             if ((d4 == 0x5e)
                 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
@@ -1802,8 +1692,8 @@ sniff_code_object(struct code *code, unsigned long displacement)
         /* Check for a 32 bit word that looks like an absolute
            reference to within the constant vector. Constant references
            will be aligned. */
-        if ((data >= (constants_start_addr-displacement))
-            && (data < (constants_end_addr-displacement))
+        if ((data >= (void*)(constants_start_addr-displacement))
+            && (data < (void*)(constants_end_addr-displacement))
             && (((unsigned)data & 0x3) == 0)) {
             /*  Mov eax,m32 */
             if (d1 == 0xa1) {
@@ -1901,11 +1791,12 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
 /* x86-64 uses pc-relative addressing instead of this kludge */
 #ifndef LISP_FEATURE_X86_64
     long nheader_words, ncode_words, nwords;
-    void *constants_start_addr, *constants_end_addr;
-    void *code_start_addr, *code_end_addr;
+    os_vm_address_t constants_start_addr, constants_end_addr;
+    os_vm_address_t code_start_addr, code_end_addr;
+    os_vm_address_t code_addr = (os_vm_address_t)new_code;
+    os_vm_address_t old_addr = (os_vm_address_t)old_code;
+    os_vm_size_t displacement = code_addr - old_addr;
     lispobj fixups = NIL;
-    unsigned long displacement =
-        (unsigned long)new_code - (unsigned long)old_code;
     struct vector *fixups_vector;
 
     ncode_words = fixnum_value(new_code->code_size);
@@ -1914,10 +1805,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
     /* FSHOW((stderr,
              "/compiled code object at %x: header words = %d, code words = %d\n",
              new_code, nheader_words, ncode_words)); */
-    constants_start_addr = (void *)new_code + 5*N_WORD_BYTES;
-    constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES;
-    code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES;
-    code_end_addr = (void *)new_code + nwords*N_WORD_BYTES;
+    constants_start_addr = code_addr + 5*N_WORD_BYTES;
+    constants_end_addr = code_addr + nheader_words*N_WORD_BYTES;
+    code_start_addr = code_addr + nheader_words*N_WORD_BYTES;
+    code_end_addr = code_addr + nwords*N_WORD_BYTES;
     /*
     FSHOW((stderr,
            "/const start = %x, end = %x\n",
@@ -1965,24 +1856,22 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
         long length = fixnum_value(fixups_vector->length);
         long i;
         for (i = 0; i < length; i++) {
-            unsigned long offset = fixups_vector->data[i];
+            long offset = fixups_vector->data[i];
             /* Now check the current value of offset. */
-            unsigned long old_value =
-                *(unsigned long *)((unsigned long)code_start_addr + offset);
+            os_vm_address_t old_value = *(os_vm_address_t *)(code_start_addr + offset);
 
             /* If it's within the old_code object then it must be an
              * absolute fixup (relative ones are not saved) */
-            if ((old_value >= (unsigned long)old_code)
-                && (old_value < ((unsigned long)old_code
-                                 + nwords*N_WORD_BYTES)))
+            if ((old_value >= old_addr)
+                && (old_value < (old_addr + nwords*N_WORD_BYTES)))
                 /* So add the dispacement. */
-                *(unsigned long *)((unsigned long)code_start_addr + offset) =
+                *(os_vm_address_t *)(code_start_addr + offset) =
                     old_value + displacement;
             else
                 /* It is outside the old code object so it must be a
                  * relative fixup (absolute fixups are not saved). So
                  * subtract the displacement. */
-                *(unsigned long *)((unsigned long)code_start_addr + offset) =
+                *(os_vm_address_t *)(code_start_addr + offset) =
                     old_value - displacement;
         }
     } else {
@@ -2717,15 +2606,15 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
 static void
 scavenge_newspace_generation(generation_index_t generation)
 {
-    long i;
+    size_t i;
 
     /* the new_areas array currently being written to by gc_alloc() */
     struct new_area (*current_new_areas)[] = &new_areas_1;
-    long current_new_areas_index;
+    size_t current_new_areas_index;
 
     /* the new_areas created by the previous scavenge cycle */
     struct new_area (*previous_new_areas)[] = NULL;
-    long previous_new_areas_index;
+    size_t previous_new_areas_index;
 
     /* Flush the current regions updating the tables. */
     gc_alloc_update_all_page_tables();
@@ -2959,8 +2848,8 @@ print_ptr(lispobj *addr)
     page_index_t pi1 = find_page_index((void*)addr);
 
     if (pi1 != -1)
-        fprintf(stderr,"  %x: page %d  alloc %d  gen %d  bytes_used %d  offset %lu  dont_move %d\n",
-                (unsigned long) addr,
+        fprintf(stderr,"  %p: page %d  alloc %d  gen %d  bytes_used %d  offset %lu  dont_move %d\n",
+                addr,
                 pi1,
                 page_table[pi1].allocated,
                 page_table[pi1].gen,
@@ -3413,19 +3302,6 @@ write_protect_generation_pages(generation_index_t generation)
     }
 }
 
-#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
-static void
-scavenge_control_stack(struct thread *th)
-{
-    lispobj *control_stack =
-        (lispobj *)(th->control_stack_start);
-    unsigned long control_stack_size =
-        access_control_stack_pointer(th) - control_stack;
-
-    scavenge(control_stack, control_stack_size);
-}
-#endif
-
 #if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
 static void
 preserve_context_registers (os_context_t *c)
@@ -3845,7 +3721,7 @@ void
 collect_garbage(generation_index_t last_gen)
 {
     generation_index_t gen = 0, i;
-    int raise;
+    int raise, more = 0;
     int gen_to_wp;
     /* The largest value of last_free_page seen since the time
      * remap_free_pages was called. */
@@ -3878,13 +3754,23 @@ collect_garbage(generation_index_t last_gen)
     do {
         /* Collect the generation. */
 
-        if (gen >= gencgc_oldest_gen_to_gc) {
-            /* Never raise the oldest generation. */
+        if (more || (gen >= gencgc_oldest_gen_to_gc)) {
+            /* Never raise the oldest generation. Never raise the extra generation
+             * collected due to more-flag. */
             raise = 0;
+            more = 0;
         } else {
             raise =
                 (gen < last_gen)
                 || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
+            /* If we would not normally raise this one, but we're
+             * running low on space in comparison to the object-sizes
+             * we've been seeing, raise it and collect the next one
+             * too. */
+            if (!raise && gen == last_gen) {
+                more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated);
+                raise = more;
+            }
         }
 
         if (gencgc_verbose > 1) {
@@ -3917,8 +3803,8 @@ collect_garbage(generation_index_t last_gen)
         gen++;
     } while ((gen <= gencgc_oldest_gen_to_gc)
              && ((gen < last_gen)
-                 || ((gen <= gencgc_oldest_gen_to_gc)
-                     && raise
+                 || more
+                 || (raise
                      && (generations[gen].bytes_allocated
                          > generations[gen].gc_trigger)
                      && (generation_average_age(gen)
@@ -3960,9 +3846,15 @@ collect_garbage(generation_index_t last_gen)
 
     update_dynamic_space_free_pointer();
 
-    auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+    /* Update auto_gc_trigger. Make sure we trigger the next GC before
+     * running out of heap! */
+    if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
+        auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+    else
+        auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
+
     if(gencgc_verbose)
-        fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+        fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
                 auto_gc_trigger);
 
     /* If we did a big GC (arbitrarily defined as gen > 1), release memory
@@ -3976,6 +3868,7 @@ collect_garbage(generation_index_t last_gen)
     }
 
     gc_active_p = 0;
+    large_allocation = 0;
 
     log_generation_stats(gc_logfile, "=== GC End ===");
     SHOW("returning from collect_garbage");
@@ -4145,7 +4038,8 @@ gc_init(void)
         generations[i].num_gc = 0;
         generations[i].cum_sum_bytes_allocated = 0;
         /* the tune-able parameters */
-        generations[i].bytes_consed_between_gc = bytes_consed_between_gcs;
+        generations[i].bytes_consed_between_gc
+            = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
         generations[i].number_of_gcs_before_promotion = 1;
         generations[i].minimum_age_before_gc = 0.75;
     }
@@ -4233,6 +4127,7 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg
 #endif
     void *new_obj;
     void *new_free_pointer;
+    os_vm_size_t trigger_bytes = 0;
 
     gc_assert(nbytes>0);
 
@@ -4243,6 +4138,9 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg
     /* Must be inside a PA section. */
     gc_assert(get_pseudo_atomic_atomic(thread));
 
+    if (nbytes > large_allocation)
+        large_allocation = nbytes;
+
     /* maybe we can do this quickly ... */
     new_free_pointer = region->free_pointer + nbytes;
     if (new_free_pointer <= region->end_addr) {
@@ -4251,10 +4149,19 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg
         return(new_obj);        /* yup */
     }
 
+    /* We don't want to count nbytes against auto_gc_trigger unless we
+     * have to: it speeds up the tenuring of objects and slows down
+     * allocation. However, unless we do so when allocating _very_
+     * large objects we are in danger of exhausting the heap without
+     * running sufficient GCs.
+     */
+    if (nbytes >= bytes_consed_between_gcs)
+        trigger_bytes = nbytes;
+
     /* we have to go the long way around, it seems. Check whether we
      * should GC in the near future
      */
-    if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
+    if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
         /* Don't flood the system with interrupts if the need to gc is
          * already noted. This can happen for example when SUB-GC
          * allocates or after a gc triggered in a WITHOUT-GCING. */
@@ -4344,7 +4251,17 @@ void unhandled_sigmemoryfault(void* addr);
  *
  * Return true if this signal is a normal generational GC thing that
  * we were able to handle, or false if it was abnormal and control
- * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
+ * should fall through to the general SIGSEGV/SIGBUS/whatever logic.
+ *
+ * We have two control flags for this: one causes us to ignore faults
+ * on unprotected pages completely, and the second complains to stderr
+ * but allows us to continue without losing.
+ */
+extern boolean ignore_memoryfaults_on_unprotected_pages;
+boolean ignore_memoryfaults_on_unprotected_pages = 0;
+
+extern boolean continue_after_memoryfault_on_unprotected_pages;
+boolean continue_after_memoryfault_on_unprotected_pages = 0;
 
 int
 gencgc_handle_wp_violation(void* fault_addr)
@@ -4375,17 +4292,39 @@ gencgc_handle_wp_violation(void* fault_addr)
             os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
             page_table[page_index].write_protected_cleared = 1;
             page_table[page_index].write_protected = 0;
-        } else {
+        } else if (!ignore_memoryfaults_on_unprotected_pages) {
             /* The only acceptable reason for this signal on a heap
              * access is that GENCGC write-protected the page.
              * However, if two CPUs hit a wp page near-simultaneously,
              * we had better not have the second one lose here if it
              * does this test after the first one has already set wp=0
              */
-            if(page_table[page_index].write_protected_cleared != 1)
-                lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
-                     page_index, boxed_region.first_page,
-                     boxed_region.last_page);
+            if(page_table[page_index].write_protected_cleared != 1) {
+                void lisp_backtrace(int frames);
+                lisp_backtrace(10);
+                fprintf(stderr,
+                        "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
+                        "  boxed_region.first_page: %"PAGE_INDEX_FMT","
+                        "  boxed_region.last_page %"PAGE_INDEX_FMT"\n"
+                        "  page.region_start_offset: %"OS_VM_SIZE_FMT"\n"
+                        "  page.bytes_used: %"PAGE_BYTES_FMT"\n"
+                        "  page.allocated: %d\n"
+                        "  page.write_protected: %d\n"
+                        "  page.write_protected_cleared: %d\n"
+                        "  page.generation: %d\n",
+                        fault_addr,
+                        page_index,
+                        boxed_region.first_page,
+                        boxed_region.last_page,
+                        page_table[page_index].region_start_offset,
+                        page_table[page_index].bytes_used,
+                        page_table[page_index].allocated,
+                        page_table[page_index].write_protected,
+                        page_table[page_index].write_protected_cleared,
+                        page_table[page_index].gen);
+                if (!continue_after_memoryfault_on_unprotected_pages)
+                    lose("Feh.\n");
+            }
         }
         ret = thread_mutex_unlock(&free_pages_lock);
         gc_assert(ret == 0);