1.0.20.6: smaller allocation regions & reduced pinning
[sbcl.git] / src / runtime / gencgc.c
index 3161230..4f48966 100644 (file)
 #include "validate.h"
 #include "lispregs.h"
 #include "arch.h"
-#include "fixnump.h"
 #include "gc.h"
 #include "gc-internal.h"
 #include "thread.h"
+#include "alloc.h"
 #include "genesis/vector.h"
 #include "genesis/weak-pointer.h"
 #include "genesis/fdefn.h"
@@ -79,7 +79,7 @@ enum {
 boolean enable_page_protection = 1;
 
 /* the minimum size (in bytes) for a large object*/
-unsigned long large_object_size = 4 * PAGE_BYTES;
+long large_object_size = 4 * PAGE_BYTES;
 
 \f
 /*
@@ -162,7 +162,7 @@ static boolean conservative_stack = 1;
 /* An array of page structures is allocated on gc initialization.
  * This helps quickly map between an address its page structure.
  * page_table_pages is set from the size of the dynamic space. */
-unsigned page_table_pages;
+page_index_t page_table_pages;
 struct page *page_table;
 
 /* To map addresses to page structures the address of the first page
@@ -176,6 +176,13 @@ page_address(page_index_t page_num)
     return (heap_base + (page_num * PAGE_BYTES));
 }
 
+/* Calculate the address where the allocation region associated with the page starts. */
+inline void *
+page_region_start(page_index_t page_index)
+{
+    return page_address(page_index)+page_table[page_index].first_object_offset;
+}
+
 /* Find the page index within the page_table for the given
  * address. Return -1 on failure. */
 inline page_index_t
@@ -672,21 +679,6 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
     ret = thread_mutex_unlock(&free_pages_lock);
     gc_assert(ret == 0);
 
-    /* we can do this after releasing free_pages_lock */
-    if (gencgc_zero_check) {
-        long *p;
-        for (p = (long *)alloc_region->start_addr;
-             p < (long *)alloc_region->end_addr; p++) {
-            if (*p != 0) {
-                /* KLUDGE: It would be nice to use %lx and explicit casts
-                 * (long) in code like this, so that it is less likely to
-                 * break randomly when running on a machine with different
-                 * word sizes. -- WHN 19991129 */
-                lose("The new region at %x is not zero.\n", p);
-            }
-        }
-    }
-
 #ifdef READ_PROTECT_FREE_PAGES
     os_protect(page_address(first_page),
                PAGE_BYTES*(1+last_page-first_page),
@@ -702,6 +694,22 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
     }
 
     zero_dirty_pages(first_page, last_page);
+
+    /* we can do this after releasing free_pages_lock */
+    if (gencgc_zero_check) {
+        long *p;
+        for (p = (long *)alloc_region->start_addr;
+             p < (long *)alloc_region->end_addr; p++) {
+            if (*p != 0) {
+                /* KLUDGE: It would be nice to use %lx and explicit casts
+                 * (long) in code like this, so that it is less likely to
+                 * break randomly when running on a machine with different
+                 * word sizes. -- WHN 19991129 */
+                lose("The new region at %x is not zero (start=%p, end=%p).\n",
+                     p, alloc_region->start_addr, alloc_region->end_addr);
+            }
+        }
+    }
 }
 
 /* If the record_new_objects flag is 2 then all new regions created
@@ -1095,8 +1103,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested)
     }
     else {
         /* FIXME: assert free_pages_lock held */
-        thread_mutex_unlock(&free_pages_lock);
-        funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
+        (void)thread_mutex_unlock(&free_pages_lock);
+        funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
                  alloc_number(available), alloc_number(requested));
         lose("HEAP-EXHAUSTED-ERROR fell through");
     }
@@ -1105,76 +1113,82 @@ gc_heap_exhausted_error_or_lose (long available, long requested)
 page_index_t
 gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
 {
-    page_index_t first_page;
-    page_index_t last_page;
-    long region_size;
-    page_index_t restart_page=*restart_page_ptr;
-    long bytes_found;
-    long num_pages;
-    int large_p=(nbytes>=large_object_size);
+    page_index_t first_page, last_page;
+    page_index_t restart_page = *restart_page_ptr;
+    long bytes_found = 0;
+    long most_bytes_found = 0;
     /* FIXME: assert(free_pages_lock is held); */
 
-    /* Search for a contiguous free space of at least nbytes. If it's
-     * a large object then align it on a page boundary by searching
-     * for a free page. */
-
+    /* Toggled by gc_and_save for heap compaction, normally -1. */
     if (gencgc_alloc_start_page != -1) {
         restart_page = gencgc_alloc_start_page;
     }
 
-    do {
-        first_page = restart_page;
-        if (large_p)
-            while ((first_page < page_table_pages)
-                   && (page_table[first_page].allocated != FREE_PAGE_FLAG))
-                first_page++;
-        else
-            while (first_page < page_table_pages) {
-                if(page_table[first_page].allocated == FREE_PAGE_FLAG)
-                    break;
-                if((page_table[first_page].allocated ==
-                    (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
-                   (page_table[first_page].large_object == 0) &&
-                   (page_table[first_page].gen == gc_alloc_generation) &&
-                   (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
-                   (page_table[first_page].write_protected == 0) &&
-                   (page_table[first_page].dont_move == 0)) {
-                    break;
-                }
+    if (nbytes>=PAGE_BYTES) {
+        /* Search for a contiguous free space of at least nbytes,
+         * aligned on a page boundary. The page-alignment is strictly
+         * speaking needed only for objects at least large_object_size
+         * bytes in size. */
+        do {
+            first_page = restart_page;
+            while ((first_page < page_table_pages) &&
+                   (page_table[first_page].allocated != FREE_PAGE_FLAG))
                 first_page++;
-            }
 
-        if (first_page >= page_table_pages)
-            gc_heap_exhausted_error_or_lose(0, nbytes);
-
-        gc_assert(page_table[first_page].write_protected == 0);
+            last_page = first_page;
+            bytes_found = PAGE_BYTES;
+            while ((bytes_found < nbytes) &&
+                   (last_page < (page_table_pages-1)) &&
+                   (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+                last_page++;
+                bytes_found += PAGE_BYTES;
+                gc_assert(page_table[last_page].write_protected == 0);
+            }
+            if (bytes_found > most_bytes_found)
+                most_bytes_found = bytes_found;
+            restart_page = last_page + 1;
+        } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
 
-        last_page = first_page;
-        bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
-        num_pages = 1;
-        while (((bytes_found < nbytes)
-                || (!large_p && (num_pages < 2)))
-               && (last_page < (page_table_pages-1))
-               && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
-            last_page++;
-            num_pages++;
-            bytes_found += PAGE_BYTES;
-            gc_assert(page_table[last_page].write_protected == 0);
+    } else {
+        /* Search for a page with at least nbytes of space. We prefer
+         * not to split small objects on multiple pages, to reduce the
+         * number of contiguous allocation regions spaning multiple
+         * pages: this helps avoid excessive conservativism. */
+        first_page = restart_page;
+        while (first_page < page_table_pages) {
+            if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+                {
+                    bytes_found = PAGE_BYTES;
+                    break;
+                }
+            else if ((page_table[first_page].allocated ==
+                      (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+                     (page_table[first_page].large_object == 0) &&
+                     (page_table[first_page].gen == gc_alloc_generation) &&
+                     (page_table[first_page].write_protected == 0) &&
+                     (page_table[first_page].dont_move == 0))
+                {
+                    bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
+                    if (bytes_found > most_bytes_found)
+                        most_bytes_found = bytes_found;
+                    if (bytes_found >= nbytes)
+                        break;
+                }
+            first_page++;
         }
-
-        region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
-            + PAGE_BYTES*(last_page-first_page);
-
-        gc_assert(bytes_found == region_size);
-        restart_page = last_page + 1;
-    } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
+        last_page = first_page;
+        restart_page = first_page + 1;
+    }
 
     /* Check for a failure */
-    if ((restart_page >= page_table_pages) && (bytes_found < nbytes))
-        gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+    if (bytes_found < nbytes) {
+        gc_assert(restart_page >= page_table_pages);
+        gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
+    }
 
-    *restart_page_ptr=first_page;
+    gc_assert(page_table[first_page].write_protected == 0);
 
+    *restart_page_ptr = first_page;
     return last_page;
 }
 
@@ -1187,7 +1201,7 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
 {
     void *new_free_pointer;
 
-    if(nbytes>=large_object_size)
+    if (nbytes>=large_object_size)
         return gc_alloc_large(nbytes,unboxed_p,my_region);
 
     /* Check whether there is room in the current alloc region. */
@@ -1569,6 +1583,8 @@ sniff_code_object(struct code *code, unsigned long displacement)
     if (!check_code_fixups)
         return;
 
+    FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
+
     ncode_words = fixnum_value(code->code_size);
     nheader_words = HeaderValue(*(lispobj *)code);
     nwords = ncode_words + nheader_words;
@@ -1816,7 +1832,9 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
                     old_value - displacement;
         }
     } else {
-        fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
+        /* This used to just print a note to stderr, but a bogus fixup seems to
+         * indicate real heap corruption, so a hard hailure is in order. */
+        lose("fixup vector %p has a bad widetag: %d\n", fixups_vector, widetag_of(fixups_vector->header));
     }
 
     /* Check for possible errors. */
@@ -2050,29 +2068,21 @@ size_lutex(lispobj *where)
 static long
 scav_weak_pointer(lispobj *where, lispobj object)
 {
-    struct weak_pointer *wp = weak_pointers;
-    /* Push the weak pointer onto the list of weak pointers.
-     * Do I have to watch for duplicates? Originally this was
-     * part of trans_weak_pointer but that didn't work in the
-     * case where the WP was in a promoted region.
+    /* Since we overwrite the 'next' field, we have to make
+     * sure not to do so for pointers already in the list.
+     * Instead of searching the list of weak_pointers each
+     * time, we ensure that next is always NULL when the weak
+     * pointer isn't in the list, and not NULL otherwise.
+     * Since we can't use NULL to denote end of list, we
+     * use a pointer back to the same weak_pointer.
      */
+    struct weak_pointer * wp = (struct weak_pointer*)where;
 
-    /* Check whether it's already in the list. */
-    while (wp != NULL) {
-        if (wp == (struct weak_pointer*)where) {
-            break;
-        }
-        wp = wp->next;
-    }
-    if (wp == NULL) {
-        /* Add it to the start of the list. */
-        wp = (struct weak_pointer*)where;
-        if (wp->next != weak_pointers) {
-            wp->next = weak_pointers;
-        } else {
-            /*SHOW("avoided write to weak pointer");*/
-        }
+    if (NULL == wp->next) {
+        wp->next = weak_pointers;
         weak_pointers = wp;
+        if (NULL == wp->next)
+            wp->next = wp;
     }
 
     /* Do not let GC scavenge the value slot of the weak pointer.
@@ -2118,8 +2128,7 @@ search_dynamic_space(void *pointer)
     if ((page_index == -1) ||
         (page_table[page_index].allocated == FREE_PAGE_FLAG))
         return NULL;
-    start = (lispobj *)((void *)page_address(page_index)
-                        + page_table[page_index].first_object_offset);
+    start = (lispobj *)page_region_start(page_index);
     return (gc_search_space(start,
                             (((lispobj *)pointer)+2)-start,
                             (lispobj *)pointer));
@@ -2186,20 +2195,8 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
             return 0;
         }
         /* Is it plausible cons? */
-        if ((is_lisp_pointer(start_addr[0])
-            || (fixnump(start_addr[0]))
-            || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
-            || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
-            || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
-           && (is_lisp_pointer(start_addr[1])
-               || (fixnump(start_addr[1]))
-               || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
-               || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
-               || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+        if ((is_lisp_pointer(start_addr[0]) || is_lisp_immediate(start_addr[0])) &&
+            (is_lisp_pointer(start_addr[1]) || is_lisp_immediate(start_addr[1])))
             break;
         else {
             if (gencgc_verbose)
@@ -2662,9 +2659,7 @@ preserve_pointer(void *addr)
 #if 0
     /* I think this'd work just as well, but without the assertions.
      * -dan 2004.01.01 */
-    first_page=
-        find_page_index(page_address(addr_page_index)+
-                        page_table[addr_page_index].first_object_offset);
+    first_page = find_page_index(page_region_start(addr_page_index))
 #else
     first_page = addr_page_index;
     while (page_table[first_page].first_object_offset != 0) {
@@ -3001,9 +2996,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
                         - page_table[i].first_object_offset)/N_WORD_BYTES;
                 new_areas_ignore_page = last_page;
 
-                scavenge(page_address(i) +
-                         page_table[i].first_object_offset,
-                         size);
+                scavenge(page_region_start(i), size);
 
             }
             i = last_page;
@@ -3506,15 +3499,14 @@ verify_space(lispobj *start, size_t words)
 #ifdef LUTEX_WIDETAG
                 case LUTEX_WIDETAG:
 #endif
+#ifdef NO_TLS_VALUE_MARKER_WIDETAG
+                case NO_TLS_VALUE_MARKER_WIDETAG:
+#endif
                     count = (sizetab[widetag_of(*start)])(start);
                     break;
 
                 default:
-                    FSHOW((stderr,
-                           "/Unhandled widetag 0x%x at 0x%x\n",
-                           widetag_of(*start), start));
-                    fflush(stderr);
-                    gc_abort();
+                    lose("Unhandled widetag 0x%x at 0x%x\n", widetag_of(*start), start);
                 }
             }
         }
@@ -4449,8 +4441,7 @@ gc_free_heap(void)
 
     if (verify_after_free_heap) {
         /* Check whether purify has left any bad pointers. */
-        if (gencgc_verbose)
-            SHOW("checking after free_heap\n");
+        FSHOW((stderr, "checking after free_heap\n"));
         verify_gc();
     }
 }
@@ -4589,7 +4580,7 @@ gc_initialize_pointers(void)
  * The check for a GC trigger is only performed when the current
  * region is full, so in most cases it's not needed. */
 
-char *
+lispobj *
 alloc(long nbytes)
 {
     struct thread *thread=arch_os_get_current_thread();
@@ -4661,6 +4652,7 @@ alloc(long nbytes)
     alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
     if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
         if ((signed long) alloc_signal <= 0) {
+            SetSymbolValue(ALLOC_SIGNAL, T, thread);
 #ifdef LISP_FEATURE_SB_THREAD
             kill_thread_safely(thread->os_thread, SIGPROF);
 #else
@@ -4682,7 +4674,7 @@ alloc(long nbytes)
  * catch GENCGC-related write-protect violations
  */
 
-void unhandled_sigmemoryfault(void);
+void unhandled_sigmemoryfault(void* addr);
 
 /* Depending on which OS we're running under, different signals might
  * be raised for a violation of write protection in the heap. This
@@ -4709,7 +4701,7 @@ gencgc_handle_wp_violation(void* fault_addr)
 
         /* It can be helpful to be able to put a breakpoint on this
          * case to help diagnose low-level problems. */
-        unhandled_sigmemoryfault();
+        unhandled_sigmemoryfault(fault_addr);
 
         /* not within the dynamic space -- not our responsibility */
         return 0;
@@ -4740,7 +4732,7 @@ gencgc_handle_wp_violation(void* fault_addr)
  * are about to let Lisp deal with it. It's basically just a
  * convenient place to set a gdb breakpoint. */
 void
-unhandled_sigmemoryfault()
+unhandled_sigmemoryfault(void *addr)
 {}
 
 void gc_alloc_update_all_page_tables(void)