gencgc: fix regression from 137ba2db2d362f03754ccd080ddbe96f7e3c5dc7
[sbcl.git] / src / runtime / gencgc.c
index a5f1dc0..acc79f5 100644 (file)
@@ -79,11 +79,11 @@ boolean enable_page_protection = 1;
 
 /* the minimum size (in bytes) for a large object*/
 #if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
-long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+os_vm_size_t large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
 #elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
-long large_object_size = 4 * GENCGC_CARD_BYTES;
+os_vm_size_t large_object_size = 4 * GENCGC_CARD_BYTES;
 #else
-long large_object_size = 4 * PAGE_BYTES;
+os_vm_size_t large_object_size = 4 * PAGE_BYTES;
 #endif
 
 \f
@@ -245,17 +245,17 @@ find_page_index(void *addr)
     return (-1);
 }
 
-static size_t
+static os_vm_size_t
 npage_bytes(page_index_t npages)
 {
     gc_assert(npages>=0);
-    return ((unsigned long)npages)*GENCGC_CARD_BYTES;
+    return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES;
 }
 
 /* Check that X is a higher address than Y and return offset from Y to
  * X in bytes. */
-static inline
-size_t void_diff(void *x, void *y)
+static inline os_vm_size_t
+void_diff(void *x, void *y)
 {
     gc_assert(x >= y);
     return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
@@ -350,11 +350,11 @@ static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif
 
-extern unsigned long gencgc_release_granularity;
-unsigned long gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
+extern os_vm_size_t gencgc_release_granularity;
+os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
 
-extern unsigned long gencgc_alloc_granularity;
-unsigned long gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
+extern os_vm_size_t gencgc_alloc_granularity;
+os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
 
 \f
 /*
@@ -408,11 +408,11 @@ count_dont_move_pages(void)
 
 /* Work through the pages and add up the number of bytes used for the
  * given generation. */
-static unsigned long
+static os_vm_size_t
 count_generation_bytes_allocated (generation_index_t gen)
 {
     page_index_t i;
-    unsigned long result = 0;
+    os_vm_size_t result = 0;
     for (i = 0; i < last_free_page; i++) {
         if (page_allocated_p(i)
             && (page_table[i].gen == gen))
@@ -498,18 +498,20 @@ write_generation_stats(FILE *file)
                 " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
                 boxed_cnt, unboxed_cnt, large_boxed_cnt,
                 large_unboxed_cnt, pinned_cnt);
-        fprintf(file
-                " %8ld %5ld %8ld %4ld %3d %7.4f\n",
+        fprintf(file,
+                " %8"OS_VM_SIZE_FMT
+                " %5"OS_VM_SIZE_FMT
+                " %8"OS_VM_SIZE_FMT
+                " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
                 generations[i].bytes_allocated,
-                (npage_bytes(count_generation_pages(i))
-                 - generations[i].bytes_allocated),
+                (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
                 generations[i].gc_trigger,
                 count_write_protect_generation_pages(i),
                 generations[i].num_gc,
                 generation_average_age(i));
     }
-    fprintf(file,"   Total bytes allocated    = %lu\n", (unsigned long)bytes_allocated);
-    fprintf(file,"   Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
+    fprintf(file,"   Total bytes allocated    = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
+    fprintf(file,"   Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
 
     fpu_restore(fpu_state);
 }
@@ -590,7 +592,7 @@ void fast_bzero(void*, size_t); /* in <arch>-assem.S */
 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
     page_index_t i;
     void *addr = page_address(start), *new_addr;
-    size_t length = npage_bytes(1+end-start);
+    os_vm_size_t length = npage_bytes(1+end-start);
 
     if (start > end)
       return;
@@ -787,7 +789,7 @@ gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_
 {
     page_index_t first_page;
     page_index_t last_page;
-    unsigned long bytes_found;
+    os_vm_size_t bytes_found;
     page_index_t i;
     int ret;
 
@@ -870,15 +872,11 @@ gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_
 
     /* we can do this after releasing free_pages_lock */
     if (gencgc_zero_check) {
-        long *p;
-        for (p = (long *)alloc_region->start_addr;
-             p < (long *)alloc_region->end_addr; p++) {
+        word_t *p;
+        for (p = (word_t *)alloc_region->start_addr;
+             p < (word_t *)alloc_region->end_addr; p++) {
             if (*p != 0) {
-                /* KLUDGE: It would be nice to use %lx and explicit casts
-                 * (long) in code like this, so that it is less likely to
-                 * break randomly when running on a machine with different
-                 * word sizes. -- WHN 19991129 */
-                lose("The new region at %x is not zero (start=%p, end=%p).\n",
+                lose("The new region is not zero at %p (start=%p, end=%p).\n",
                      p, alloc_region->start_addr, alloc_region->end_addr);
             }
         }
@@ -986,13 +984,13 @@ add_new_area(page_index_t first_page, size_t offset, size_t size)
 void
 gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
 {
-    int more;
+    boolean more;
     page_index_t first_page;
     page_index_t next_page;
-    unsigned long bytes_used;
-    unsigned long orig_first_page_bytes_used;
-    unsigned long region_size;
-    unsigned long byte_cnt;
+    os_vm_size_t bytes_used;
+    os_vm_size_t region_size;
+    os_vm_size_t byte_cnt;
+    page_bytes_t orig_first_page_bytes_used;
     int ret;
 
 
@@ -1118,13 +1116,11 @@ static inline void *gc_quick_alloc(long nbytes);
 void *
 gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
 {
-    page_index_t first_page;
-    page_index_t last_page;
-    int orig_first_page_bytes_used;
-    long byte_cnt;
-    int more;
-    unsigned long bytes_used;
-    page_index_t next_page;
+    boolean more;
+    page_index_t first_page, next_page, last_page;
+    page_bytes_t orig_first_page_bytes_used;
+    os_vm_size_t byte_cnt;
+    os_vm_size_t bytes_used;
     int ret;
 
     ret = thread_mutex_lock(&free_pages_lock);
@@ -1266,27 +1262,29 @@ gc_heap_exhausted_error_or_lose (long available, long requested)
 }
 
 page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long bytes,
                       int page_type_flag)
 {
-    page_index_t first_page, last_page;
-    page_index_t restart_page = *restart_page_ptr;
-    long nbytes_goal = nbytes;
-    long bytes_found = 0;
-    long most_bytes_found = 0;
-    page_index_t most_bytes_found_from, most_bytes_found_to;
-    int small_object = nbytes < GENCGC_CARD_BYTES;
+    page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
+    page_index_t first_page, last_page, restart_page = *restart_page_ptr;
+    os_vm_size_t nbytes = bytes;
+    os_vm_size_t nbytes_goal = nbytes;
+    os_vm_size_t bytes_found = 0;
+    os_vm_size_t most_bytes_found = 0;
+    boolean small_object = nbytes < GENCGC_CARD_BYTES;
     /* FIXME: assert(free_pages_lock is held); */
 
     if (nbytes_goal < gencgc_alloc_granularity)
-            nbytes_goal = gencgc_alloc_granularity;
+        nbytes_goal = gencgc_alloc_granularity;
 
     /* Toggled by gc_and_save for heap compaction, normally -1. */
     if (gencgc_alloc_start_page != -1) {
         restart_page = gencgc_alloc_start_page;
     }
 
-    gc_assert(nbytes>=0);
+    /* FIXME: This is on bytes instead of nbytes pending cleanup of
+     * long from the interface. */
+    gc_assert(bytes>=0);
     /* Search for a page with at least nbytes of space. We prefer
      * not to split small objects on multiple pages, to reduce the
      * number of contiguous allocation regions spaning multiple
@@ -1350,6 +1348,7 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
         gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
     }
 
+    gc_assert(most_bytes_found_to);
     *restart_page_ptr = most_bytes_found_from;
     return most_bytes_found_to-1;
 }
@@ -1409,12 +1408,6 @@ gc_quick_alloc(long nbytes)
 }
 
 static inline void *
-gc_quick_alloc_large(long nbytes)
-{
-    return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
-}
-
-static inline void *
 gc_alloc_unboxed(long nbytes)
 {
     return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
@@ -1425,22 +1418,15 @@ gc_quick_alloc_unboxed(long nbytes)
 {
     return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
 }
-
-static inline void *
-gc_quick_alloc_large_unboxed(long nbytes)
-{
-    return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
-}
 \f
-
-/* Copy a large boxed object. If the object is in a large object
- * region then it is simply promoted, else it is copied. If it's large
- * enough then it's copied to a large object region.
+/* Copy a large object. If the object is in a large object region then
+ * it is simply promoted, else it is copied. If it's large enough then
+ * it's copied to a large object region.
  *
- * Vectors may have shrunk. If the object is not copied the space
- * needs to be reclaimed, and the page_tables corrected. */
-lispobj
-copy_large_object(lispobj object, long nwords)
+ * Bignums and vectors may have shrunk. If the object is not copied
+ * the space needs to be reclaimed, and the page_tables corrected. */
+static lispobj
+general_copy_large_object(lispobj object, long nwords, boolean boxedp)
 {
     int tag;
     lispobj *new;
@@ -1450,54 +1436,71 @@ copy_large_object(lispobj object, long nwords)
     gc_assert(from_space_p(object));
     gc_assert((nwords & 0x01) == 0);
 
+    if ((nwords > 1024*1024) && gencgc_verbose) {
+        FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
+               nwords*N_WORD_BYTES));
+    }
 
-    /* Check whether it's in a large object region. */
+    /* Check whether it's a large object. */
     first_page = find_page_index((void *)object);
     gc_assert(first_page >= 0);
 
     if (page_table[first_page].large_object) {
-
-        /* Promote the object. */
-
-        unsigned long remaining_bytes;
+        /* Promote the object. Note: Unboxed objects may have been
+         * allocated to a BOXED region so it may be necessary to
+         * change the region to UNBOXED. */
+        os_vm_size_t remaining_bytes;
+        os_vm_size_t bytes_freed;
         page_index_t next_page;
-        unsigned long bytes_freed;
-        unsigned long old_bytes_used;
+        page_bytes_t old_bytes_used;
 
-        /* Note: Any page write-protection must be removed, else a
+        /* FIXME: This comment is somewhat stale.
+         *
+         * Note: Any page write-protection must be removed, else a
          * later scavenge_newspace may incorrectly not scavenge these
          * pages. This would not be necessary if they are added to the
          * new areas, but let's do it for them all (they'll probably
          * be written anyway?). */
 
         gc_assert(page_table[first_page].region_start_offset == 0);
-
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
+
         while (remaining_bytes > GENCGC_CARD_BYTES) {
             gc_assert(page_table[next_page].gen == from_space);
-            gc_assert(page_boxed_p(next_page));
             gc_assert(page_table[next_page].large_object);
             gc_assert(page_table[next_page].region_start_offset ==
                       npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-            /* Should have been unprotected by unprotect_oldspace(). */
-            gc_assert(page_table[next_page].write_protected == 0);
-
+            /* Should have been unprotected by unprotect_oldspace()
+             * for boxed objects, and after promotion unboxed ones
+             * should not be on protected pages at all. */
+            gc_assert(!page_table[next_page].write_protected);
+
+            if (boxedp)
+                gc_assert(page_boxed_p(next_page));
+            else {
+                gc_assert(page_allocated_no_region_p(next_page));
+                page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+            }
             page_table[next_page].gen = new_space;
 
             remaining_bytes -= GENCGC_CARD_BYTES;
             next_page++;
         }
 
-        /* Now only one page remains, but the object may have shrunk
-         * so there may be more unused pages which will be freed. */
+        /* Now only one page remains, but the object may have shrunk so
+         * there may be more unused pages which will be freed. */
 
-        /* The object may have shrunk but shouldn't have grown. */
+        /* Object may have shrunk but shouldn't have grown - check. */
         gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
 
         page_table[next_page].gen = new_space;
-        gc_assert(page_boxed_p(next_page));
+
+        if (boxedp)
+            gc_assert(page_boxed_p(next_page));
+        else
+            page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
 
         /* Adjust the bytes_used. */
         old_bytes_used = page_table[next_page].bytes_used;
@@ -1509,14 +1512,21 @@ copy_large_object(lispobj object, long nwords)
         next_page++;
         while ((old_bytes_used == GENCGC_CARD_BYTES) &&
                (page_table[next_page].gen == from_space) &&
-               page_boxed_p(next_page) &&
+               /* FIXME: It is not obvious to me why this is necessary
+                * as a loop condition: it seems to me that the
+                * region_start_offset test should be sufficient, but
+                * experimentally that is not the case. --NS
+                * 2011-11-28 */
+               (boxedp ?
+                page_boxed_p(next_page) :
+                page_allocated_no_region_p(next_page)) &&
                page_table[next_page].large_object &&
                (page_table[next_page].region_start_offset ==
                 npage_bytes(next_page - first_page))) {
-            /* Checks out OK, free the page. Don't need to bother zeroing
+            /* Checks out OK, free the page. Don't need to both zeroing
              * pages as this should have been done before shrinking the
-             * object. These pages shouldn't be write-protected as they
-             * should be zero filled. */
+             * object. These pages shouldn't be write-protected, even if
+             * boxed they should be zero filled. */
             gc_assert(page_table[next_page].write_protected == 0);
 
             old_bytes_used = page_table[next_page].bytes_used;
@@ -1526,22 +1536,33 @@ copy_large_object(lispobj object, long nwords)
             next_page++;
         }
 
-        generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+        if ((bytes_freed > 0) && gencgc_verbose) {
+            FSHOW((stderr,
+                   "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
+                   bytes_freed));
+        }
+
+        generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
             + bytes_freed;
-        generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+        generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
         bytes_allocated -= bytes_freed;
 
         /* Add the region to the new_areas if requested. */
-        add_new_area(first_page,0,nwords*N_WORD_BYTES);
+        if (boxedp)
+            add_new_area(first_page,0,nwords*N_WORD_BYTES);
 
         return(object);
+
     } else {
         /* Get tag of object. */
         tag = lowtag_of(object);
 
         /* Allocate space. */
-        new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
+        new = gc_general_alloc(nwords*N_WORD_BYTES,
+                               (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
+                               ALLOC_QUICK);
 
+        /* Copy the object. */
         memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
 
         /* Return Lisp pointer of new object. */
@@ -1549,6 +1570,18 @@ copy_large_object(lispobj object, long nwords)
     }
 }
 
+lispobj
+copy_large_object(lispobj object, long nwords)
+{
+    return general_copy_large_object(object, nwords, 1);
+}
+
+lispobj
+copy_large_unboxed_object(lispobj object, long nwords)
+{
+    return general_copy_large_object(object, nwords, 0);
+}
+
 /* to copy unboxed objects */
 lispobj
 copy_unboxed_object(lispobj object, long nwords)
@@ -1571,131 +1604,6 @@ copy_unboxed_object(lispobj object, long nwords)
     /* Return Lisp pointer of new object. */
     return ((lispobj) new) | tag;
 }
-
-/* to copy large unboxed objects
- *
- * If the object is in a large object region then it is simply
- * promoted, else it is copied. If it's large enough then it's copied
- * to a large object region.
- *
- * Bignums and vectors may have shrunk. If the object is not copied
- * the space needs to be reclaimed, and the page_tables corrected.
- *
- * KLUDGE: There's a lot of cut-and-paste duplication between this
- * function and copy_large_object(..). -- WHN 20000619 */
-lispobj
-copy_large_unboxed_object(lispobj object, long nwords)
-{
-    int tag;
-    lispobj *new;
-    page_index_t first_page;
-
-    gc_assert(is_lisp_pointer(object));
-    gc_assert(from_space_p(object));
-    gc_assert((nwords & 0x01) == 0);
-
-    if ((nwords > 1024*1024) && gencgc_verbose) {
-        FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
-               nwords*N_WORD_BYTES));
-    }
-
-    /* Check whether it's a large object. */
-    first_page = find_page_index((void *)object);
-    gc_assert(first_page >= 0);
-
-    if (page_table[first_page].large_object) {
-        /* Promote the object. Note: Unboxed objects may have been
-         * allocated to a BOXED region so it may be necessary to
-         * change the region to UNBOXED. */
-        unsigned long remaining_bytes;
-        page_index_t next_page;
-        unsigned long bytes_freed;
-        unsigned long old_bytes_used;
-
-        gc_assert(page_table[first_page].region_start_offset == 0);
-
-        next_page = first_page;
-        remaining_bytes = nwords*N_WORD_BYTES;
-        while (remaining_bytes > GENCGC_CARD_BYTES) {
-            gc_assert(page_table[next_page].gen == from_space);
-            gc_assert(page_allocated_no_region_p(next_page));
-            gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].region_start_offset ==
-                      npage_bytes(next_page-first_page));
-            gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-
-            page_table[next_page].gen = new_space;
-            page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-            remaining_bytes -= GENCGC_CARD_BYTES;
-            next_page++;
-        }
-
-        /* Now only one page remains, but the object may have shrunk so
-         * there may be more unused pages which will be freed. */
-
-        /* Object may have shrunk but shouldn't have grown - check. */
-        gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
-        page_table[next_page].gen = new_space;
-        page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
-        /* Adjust the bytes_used. */
-        old_bytes_used = page_table[next_page].bytes_used;
-        page_table[next_page].bytes_used = remaining_bytes;
-
-        bytes_freed = old_bytes_used - remaining_bytes;
-
-        /* Free any remaining pages; needs care. */
-        next_page++;
-        while ((old_bytes_used == GENCGC_CARD_BYTES) &&
-               (page_table[next_page].gen == from_space) &&
-               page_allocated_no_region_p(next_page) &&
-               page_table[next_page].large_object &&
-               (page_table[next_page].region_start_offset ==
-                npage_bytes(next_page - first_page))) {
-            /* Checks out OK, free the page. Don't need to both zeroing
-             * pages as this should have been done before shrinking the
-             * object. These pages shouldn't be write-protected, even if
-             * boxed they should be zero filled. */
-            gc_assert(page_table[next_page].write_protected == 0);
-
-            old_bytes_used = page_table[next_page].bytes_used;
-            page_table[next_page].allocated = FREE_PAGE_FLAG;
-            page_table[next_page].bytes_used = 0;
-            bytes_freed += old_bytes_used;
-            next_page++;
-        }
-
-        if ((bytes_freed > 0) && gencgc_verbose) {
-            FSHOW((stderr,
-                   "/copy_large_unboxed bytes_freed=%d\n",
-                   bytes_freed));
-        }
-
-        generations[from_space].bytes_allocated -=
-            nwords*N_WORD_BYTES + bytes_freed;
-        generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
-        bytes_allocated -= bytes_freed;
-
-        return(object);
-    }
-    else {
-        /* Get tag of object. */
-        tag = lowtag_of(object);
-
-        /* Allocate space. */
-        new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
-
-        /* Copy the object. */
-        memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
-        /* Return Lisp pointer of new object. */
-        return ((lispobj) new) | tag;
-    }
-}
-
-
-
 \f
 
 /*
@@ -3957,7 +3865,7 @@ collect_garbage(generation_index_t last_gen)
 
     auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
     if(gencgc_verbose)
-        fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+        fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
                 auto_gc_trigger);
 
     /* If we did a big GC (arbitrarily defined as gen > 1), release memory
@@ -3993,7 +3901,7 @@ gc_free_heap(void)
     for (page = 0; page < page_table_pages; page++) {
         /* Skip free pages which should already be zero filled. */
         if (page_allocated_p(page)) {
-            void *page_start, *addr;
+            void *page_start;
             for (last_page = page;
                  (last_page < page_table_pages) && page_allocated_p(last_page);
                  last_page++) {