1.0.23.40: export page sizes to C with LU suffix
[sbcl.git] / src / runtime / gencgc.c
index 9fb8e44..a13bbeb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * GENerational Conservative Garbage Collector for SBCL x86
+ * GENerational Conservative Garbage Collector for SBCL
  */
 
 /*
@@ -24,6 +24,7 @@
  *   <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
  */
 
+#include <stdlib.h>
 #include <stdio.h>
 #include <signal.h>
 #include <errno.h>
 #include "validate.h"
 #include "lispregs.h"
 #include "arch.h"
-#include "fixnump.h"
 #include "gc.h"
 #include "gc-internal.h"
 #include "thread.h"
+#include "alloc.h"
 #include "genesis/vector.h"
 #include "genesis/weak-pointer.h"
 #include "genesis/fdefn.h"
 #include "genesis/hash-table.h"
 #include "genesis/instance.h"
 #include "genesis/layout.h"
-
-#ifdef LUTEX_WIDETAG
-#include "genesis/lutex.h"
+#include "gencgc.h"
+#if defined(LUTEX_WIDETAG)
+#include "pthread-lutex.h"
 #endif
 
 /* forward declarations */
 page_index_t  gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
-                                    int unboxed);
+                                    int page_type_flag);
 
 \f
 /*
@@ -78,7 +79,7 @@ enum {
 boolean enable_page_protection = 1;
 
 /* the minimum size (in bytes) for a large object*/
-unsigned long large_object_size = 4 * PAGE_BYTES;
+long large_object_size = 4 * PAGE_BYTES;
 
 \f
 /*
@@ -144,7 +145,6 @@ boolean gencgc_partial_pickup = 0;
 
 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
 unsigned long bytes_allocated = 0;
-extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
 unsigned long auto_gc_trigger = 0;
 
 /* the source and destination generations. These are set before a GC starts
@@ -159,21 +159,58 @@ boolean gc_active_p = 0;
  * saving a core), don't scan the stack / mark pages dont_move. */
 static boolean conservative_stack = 1;
 
-/* An array of page structures is statically allocated.
+/* An array of page structures is allocated on gc initialization.
  * This helps quickly map between an address its page structure.
- * NUM_PAGES is set from the size of the dynamic space. */
-struct page page_table[NUM_PAGES];
+ * page_table_pages is set from the size of the dynamic space. */
+page_index_t page_table_pages;
+struct page *page_table;
+
+static inline boolean page_allocated_p(page_index_t page) {
+    return (page_table[page].allocated != FREE_PAGE_FLAG);
+}
+
+static inline boolean page_no_region_p(page_index_t page) {
+    return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG);
+}
+
+static inline boolean page_allocated_no_region_p(page_index_t page) {
+    return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG))
+            && page_no_region_p(page));
+}
+
+static inline boolean page_free_p(page_index_t page) {
+    return (page_table[page].allocated == FREE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_p(page_index_t page) {
+    return (page_table[page].allocated & BOXED_PAGE_FLAG);
+}
+
+static inline boolean code_page_p(page_index_t page) {
+    return (page_table[page].allocated & CODE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_no_region_p(page_index_t page) {
+    return page_boxed_p(page) && page_no_region_p(page);
+}
+
+static inline boolean page_unboxed_p(page_index_t page) {
+    /* Both flags set == boxed code page */
+    return ((page_table[page].allocated & UNBOXED_PAGE_FLAG)
+            && !page_boxed_p(page));
+}
+
+static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
+    return (page_boxed_no_region_p(page)
+            && (page_table[page].bytes_used != 0)
+            && !page_table[page].dont_move
+            && (page_table[page].gen == generation));
+}
 
 /* To map addresses to page structures the address of the first page
  * is needed. */
 static void *heap_base = NULL;
 
-#if N_WORD_BITS == 32
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
-#elif N_WORD_BITS == 64
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
-#endif
-
 /* Calculate the start address for the given page number. */
 inline void *
 page_address(page_index_t page_num)
@@ -181,22 +218,44 @@ page_address(page_index_t page_num)
     return (heap_base + (page_num * PAGE_BYTES));
 }
 
+/* Calculate the address where the allocation region associated with
+ * the page starts. */
+static inline void *
+page_region_start(page_index_t page_index)
+{
+    return page_address(page_index)-page_table[page_index].region_start_offset;
+}
+
 /* Find the page index within the page_table for the given
  * address. Return -1 on failure. */
 inline page_index_t
 find_page_index(void *addr)
 {
-    page_index_t index = addr-heap_base;
-
-    if (index >= 0) {
-        index = ((unsigned long)index)/PAGE_BYTES;
-        if (index < NUM_PAGES)
+    if (addr >= heap_base) {
+        page_index_t index = ((pointer_sized_uint_t)addr -
+                              (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
+        if (index < page_table_pages)
             return (index);
     }
-
     return (-1);
 }
 
+static size_t
+npage_bytes(long npages)
+{
+    gc_assert(npages>=0);
+    return ((unsigned long)npages)*PAGE_BYTES;
+}
+
+/* Check that X is a higher address than Y and return offset from Y to
+ * X in bytes. */
+static inline
+size_t void_diff(void *x, void *y)
+{
+    gc_assert(x >= y);
+    return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
+}
+
 /* a structure to hold the state of a generation */
 struct generation {
 
@@ -216,13 +275,13 @@ struct generation {
     page_index_t alloc_large_unboxed_start_page;
 
     /* the bytes allocated to this generation */
-    long bytes_allocated;
+    unsigned long bytes_allocated;
 
     /* the number of bytes at which to trigger a GC */
-    long gc_trigger;
+    unsigned long gc_trigger;
 
     /* to calculate a new level for gc_trigger */
-    long bytes_consed_between_gc;
+    unsigned long bytes_consed_between_gc;
 
     /* the number of GCs since the last raise */
     int num_gc;
@@ -236,7 +295,7 @@ struct generation {
      * objects are added from a GC of a younger generation. Dividing by
      * the bytes_allocated will give the average age of the memory in
      * this generation since its last GC. */
-    long cum_sum_bytes_allocated;
+    unsigned long cum_sum_bytes_allocated;
 
     /* a minimum average memory age before a GC will occur helps
      * prevent a GC when a large number of new live objects have been
@@ -277,15 +336,16 @@ generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION;
  * integrated with the Lisp code. */
 page_index_t last_free_page;
 \f
+#ifdef LISP_FEATURE_SB_THREAD
 /* This lock is to prevent multiple threads from simultaneously
  * allocating new regions which overlap each other.  Note that the
  * majority of GC is single-threaded, but alloc() may be called from
  * >1 thread at a time and must be thread-safe.  This lock must be
  * seized before all accesses to generations[] or to parts of
  * page_table[] that other threads may want to see */
-
-#ifdef LISP_FEATURE_SB_THREAD
 static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
+/* This lock is used to protect non-thread-local allocation. */
+static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif
 
 \f
@@ -299,10 +359,10 @@ static long
 count_write_protect_generation_pages(generation_index_t generation)
 {
     page_index_t i;
-    long count = 0;
+    unsigned long count = 0;
 
     for (i = 0; i < last_free_page; i++)
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].gen == generation)
             && (page_table[i].write_protected == 1))
             count++;
@@ -317,7 +377,7 @@ count_generation_pages(generation_index_t generation)
     long count = 0;
 
     for (i = 0; i < last_free_page; i++)
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].gen == generation))
             count++;
     return count;
@@ -330,7 +390,7 @@ count_dont_move_pages(void)
     page_index_t i;
     long count = 0;
     for (i = 0; i < last_free_page; i++) {
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].dont_move != 0)) {
             ++count;
         }
@@ -341,13 +401,13 @@ count_dont_move_pages(void)
 
 /* Work through the pages and add up the number of bytes used for the
  * given generation. */
-static long
+static unsigned long
 count_generation_bytes_allocated (generation_index_t gen)
 {
     page_index_t i;
-    long result = 0;
+    unsigned long result = 0;
     for (i = 0; i < last_free_page; i++) {
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].gen == gen))
             result += page_table[i].bytes_used;
     }
@@ -408,7 +468,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
 
                 /* Count the number of boxed pages within the given
                  * generation. */
-                if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+                if (page_boxed_p(j)) {
                     if (page_table[j].large_object)
                         large_boxed_cnt++;
                     else
@@ -417,7 +477,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
                 if(page_table[j].dont_move) pinned_cnt++;
                 /* Count the number of unboxed pages within the given
                  * generation. */
-                if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+                if (page_unboxed_p(j)) {
                     if (page_table[j].large_object)
                         large_unboxed_cnt++;
                     else
@@ -440,13 +500,15 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
                 large_unboxed_cnt,
                 pinned_cnt,
                 generations[i].bytes_allocated,
-                (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
+                (npage_bytes(count_generation_pages(i))
+                 - generations[i].bytes_allocated),
                 generations[i].gc_trigger,
                 count_write_protect_generation_pages(i),
                 generations[i].num_gc,
                 gen_av_mem_age(i));
     }
-    fprintf(stderr,"   Total bytes allocated=%ld\n", bytes_allocated);
+    fprintf(stderr,"   Total bytes allocated    = %lu\n", bytes_allocated);
+    fprintf(stderr,"   Dynamic-space-size bytes = %lu\n", dynamic_space_size);
 
     fpu_restore(fpu_state);
 }
@@ -462,8 +524,8 @@ void fast_bzero(void*, size_t); /* in <arch>-assem.S */
  */
 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
     int i;
-    void *addr = (void *) page_address(start), *new_addr;
-    size_t length = PAGE_BYTES*(1+end-start);
+    void *addr = page_address(start), *new_addr;
+    size_t length = npage_bytes(1+end-start);
 
     if (start > end)
       return;
@@ -471,7 +533,8 @@ void zero_pages_with_mmap(page_index_t start, page_index_t end) {
     os_invalidate(addr, length);
     new_addr = os_validate(addr, length);
     if (new_addr == NULL || new_addr != addr) {
-        lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr);
+        lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
+             start, new_addr);
     }
 
     for (i = start; i <= end; i++) {
@@ -488,9 +551,9 @@ zero_pages(page_index_t start, page_index_t end) {
       return;
 
 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-    fast_bzero(page_address(start), PAGE_BYTES*(1+end-start));
+    fast_bzero(page_address(start), npage_bytes(1+end-start));
 #else
-    bzero(page_address(start), PAGE_BYTES*(1+end-start));
+    bzero(page_address(start), npage_bytes(1+end-start));
 #endif
 
 }
@@ -570,6 +633,55 @@ struct alloc_region unboxed_region;
 /* The generation currently being allocated to. */
 static generation_index_t gc_alloc_generation;
 
+static inline page_index_t
+generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
+{
+    if (large) {
+        if (UNBOXED_PAGE_FLAG == page_type_flag) {
+            return generations[generation].alloc_large_unboxed_start_page;
+        } else if (BOXED_PAGE_FLAG & page_type_flag) {
+            /* Both code and data. */
+            return generations[generation].alloc_large_start_page;
+        } else {
+            lose("bad page type flag: %d", page_type_flag);
+        }
+    } else {
+        if (UNBOXED_PAGE_FLAG == page_type_flag) {
+            return generations[generation].alloc_unboxed_start_page;
+        } else if (BOXED_PAGE_FLAG & page_type_flag) {
+            /* Both code and data. */
+            return generations[generation].alloc_start_page;
+        } else {
+            lose("bad page_type_flag: %d", page_type_flag);
+        }
+    }
+}
+
+static inline void
+set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
+                                page_index_t page)
+{
+    if (large) {
+        if (UNBOXED_PAGE_FLAG == page_type_flag) {
+            generations[generation].alloc_large_unboxed_start_page = page;
+        } else if (BOXED_PAGE_FLAG & page_type_flag) {
+            /* Both code and data. */
+            generations[generation].alloc_large_start_page = page;
+        } else {
+            lose("bad page type flag: %d", page_type_flag);
+        }
+    } else {
+        if (UNBOXED_PAGE_FLAG == page_type_flag) {
+            generations[generation].alloc_unboxed_start_page = page;
+        } else if (BOXED_PAGE_FLAG & page_type_flag) {
+            /* Both code and data. */
+            generations[generation].alloc_start_page = page;
+        } else {
+            lose("bad page type flag: %d", page_type_flag);
+        }
+    }
+}
+
 /* Find a new region with room for at least the given number of bytes.
  *
  * It starts looking at the current generation's alloc_start_page. So
@@ -594,11 +706,11 @@ static generation_index_t gc_alloc_generation;
  * are allocated, although they will initially be empty.
  */
 static void
-gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
 {
     page_index_t first_page;
     page_index_t last_page;
-    long bytes_found;
+    unsigned long bytes_found;
     page_index_t i;
     int ret;
 
@@ -614,16 +726,10 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
               && (alloc_region->free_pointer == alloc_region->end_addr));
     ret = thread_mutex_lock(&free_pages_lock);
     gc_assert(ret == 0);
-    if (unboxed) {
-        first_page =
-            generations[gc_alloc_generation].alloc_unboxed_start_page;
-    } else {
-        first_page =
-            generations[gc_alloc_generation].alloc_start_page;
-    }
-    last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+    first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
+    last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
     bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
-            + PAGE_BYTES*(last_page-first_page);
+            + npage_bytes(last_page-first_page);
 
     /* Set up the alloc_region. */
     alloc_region->first_page = first_page;
@@ -637,64 +743,41 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
 
     /* The first page may have already been in use. */
     if (page_table[first_page].bytes_used == 0) {
-        if (unboxed)
-            page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
-        else
-            page_table[first_page].allocated = BOXED_PAGE_FLAG;
+        page_table[first_page].allocated = page_type_flag;
         page_table[first_page].gen = gc_alloc_generation;
         page_table[first_page].large_object = 0;
-        page_table[first_page].first_object_offset = 0;
+        page_table[first_page].region_start_offset = 0;
     }
 
-    if (unboxed)
-        gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
-    else
-        gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+    gc_assert(page_table[first_page].allocated == page_type_flag);
     page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
 
     gc_assert(page_table[first_page].gen == gc_alloc_generation);
     gc_assert(page_table[first_page].large_object == 0);
 
     for (i = first_page+1; i <= last_page; i++) {
-        if (unboxed)
-            page_table[i].allocated = UNBOXED_PAGE_FLAG;
-        else
-            page_table[i].allocated = BOXED_PAGE_FLAG;
+        page_table[i].allocated = page_type_flag;
         page_table[i].gen = gc_alloc_generation;
         page_table[i].large_object = 0;
         /* This may not be necessary for unboxed regions (think it was
          * broken before!) */
-        page_table[i].first_object_offset =
-            alloc_region->start_addr - page_address(i);
+        page_table[i].region_start_offset =
+            void_diff(page_address(i),alloc_region->start_addr);
         page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
     }
     /* Bump up last_free_page. */
     if (last_page+1 > last_free_page) {
         last_free_page = last_page+1;
-        /* do we only want to call this on special occasions? like for boxed_region? */
-        set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+        /* do we only want to call this on special occasions? like for
+         * boxed_region? */
+        set_alloc_pointer((lispobj)page_address(last_free_page));
     }
     ret = thread_mutex_unlock(&free_pages_lock);
     gc_assert(ret == 0);
 
-    /* we can do this after releasing free_pages_lock */
-    if (gencgc_zero_check) {
-        long *p;
-        for (p = (long *)alloc_region->start_addr;
-             p < (long *)alloc_region->end_addr; p++) {
-            if (*p != 0) {
-                /* KLUDGE: It would be nice to use %lx and explicit casts
-                 * (long) in code like this, so that it is less likely to
-                 * break randomly when running on a machine with different
-                 * word sizes. -- WHN 19991129 */
-                lose("The new region at %x is not zero.\n", p);
-            }
-        }
-    }
-
 #ifdef READ_PROTECT_FREE_PAGES
     os_protect(page_address(first_page),
-               PAGE_BYTES*(1+last_page-first_page),
+               npage_bytes(1+last_page-first_page),
                OS_VM_PROT_ALL);
 #endif
 
@@ -707,6 +790,22 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
     }
 
     zero_dirty_pages(first_page, last_page);
+
+    /* we can do this after releasing free_pages_lock */
+    if (gencgc_zero_check) {
+        long *p;
+        for (p = (long *)alloc_region->start_addr;
+             p < (long *)alloc_region->end_addr; p++) {
+            if (*p != 0) {
+                /* KLUDGE: It would be nice to use %lx and explicit casts
+                 * (long) in code like this, so that it is less likely to
+                 * break randomly when running on a machine with different
+                 * word sizes. -- WHN 19991129 */
+                lose("The new region at %x is not zero (start=%p, end=%p).\n",
+                     p, alloc_region->start_addr, alloc_region->end_addr);
+            }
+        }
+    }
 }
 
 /* If the record_new_objects flag is 2 then all new regions created
@@ -729,8 +828,8 @@ static int record_new_objects = 0;
 static page_index_t new_areas_ignore_page;
 struct new_area {
     page_index_t page;
-    long  offset;
-    long  size;
+    size_t offset;
+    size_t size;
 };
 static struct new_area (*new_areas)[];
 static long new_areas_index;
@@ -738,7 +837,7 @@ long max_new_areas;
 
 /* Add a new area to new_areas. */
 static void
-add_new_area(page_index_t first_page, long offset, long size)
+add_new_area(page_index_t first_page, size_t offset, size_t size)
 {
     unsigned long new_area_start,c;
     long i;
@@ -760,13 +859,13 @@ add_new_area(page_index_t first_page, long offset, long size)
         gc_abort();
     }
 
-    new_area_start = PAGE_BYTES*first_page + offset;
+    new_area_start = npage_bytes(first_page) + offset;
 
     /* Search backwards for a prior area that this follows from. If
        found this will save adding a new area. */
     for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
         unsigned long area_end =
-            PAGE_BYTES*((*new_areas)[i].page)
+            npage_bytes((*new_areas)[i].page)
             + (*new_areas)[i].offset
             + (*new_areas)[i].size;
         /*FSHOW((stderr,
@@ -808,15 +907,15 @@ add_new_area(page_index_t first_page, long offset, long size)
  * it is safe to try to re-update the page table of this reset
  * alloc_region. */
 void
-gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
+gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
 {
     int more;
     page_index_t first_page;
     page_index_t next_page;
-    int bytes_used;
-    long orig_first_page_bytes_used;
-    long region_size;
-    long byte_cnt;
+    unsigned long bytes_used;
+    unsigned long orig_first_page_bytes_used;
+    unsigned long region_size;
+    unsigned long byte_cnt;
     int ret;
 
 
@@ -834,22 +933,21 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         /* some bytes were allocated in the region */
         orig_first_page_bytes_used = page_table[first_page].bytes_used;
 
-        gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+        gc_assert(alloc_region->start_addr ==
+                  (page_address(first_page)
+                   + page_table[first_page].bytes_used));
 
         /* All the pages used need to be updated */
 
         /* Update the first page. */
 
         /* If the page was free then set up the gen, and
-         * first_object_offset. */
+         * region_start_offset. */
         if (page_table[first_page].bytes_used == 0)
-            gc_assert(page_table[first_page].first_object_offset == 0);
+            gc_assert(page_table[first_page].region_start_offset == 0);
         page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
 
-        if (unboxed)
-            gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
-        else
-            gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+        gc_assert(page_table[first_page].allocated & page_type_flag);
         gc_assert(page_table[first_page].gen == gc_alloc_generation);
         gc_assert(page_table[first_page].large_object == 0);
 
@@ -858,7 +956,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         /* Calculate the number of bytes used in this page. This is not
          * always the number of new bytes, unless it was free. */
         more = 0;
-        if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+        if ((bytes_used = void_diff(alloc_region->free_pointer,
+                                    page_address(first_page)))
+            >PAGE_BYTES) {
             bytes_used = PAGE_BYTES;
             more = 1;
         }
@@ -866,26 +966,24 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         byte_cnt += bytes_used;
 
 
-        /* All the rest of the pages should be free. We need to set their
-         * first_object_offset pointer to the start of the region, and set
-         * the bytes_used. */
+        /* All the rest of the pages should be free. We need to set
+         * their region_start_offset pointer to the start of the
+         * region, and set the bytes_used. */
         while (more) {
             page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
-            if (unboxed)
-                gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
-            else
-                gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+            gc_assert(page_table[next_page].allocated & page_type_flag);
             gc_assert(page_table[next_page].bytes_used == 0);
             gc_assert(page_table[next_page].gen == gc_alloc_generation);
             gc_assert(page_table[next_page].large_object == 0);
 
-            gc_assert(page_table[next_page].first_object_offset ==
-                      alloc_region->start_addr - page_address(next_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      void_diff(page_address(next_page),
+                                alloc_region->start_addr));
 
             /* Calculate the number of bytes used in this page. */
             more = 0;
-            if ((bytes_used = (alloc_region->free_pointer
-                               - page_address(next_page)))>PAGE_BYTES) {
+            if ((bytes_used = void_diff(alloc_region->free_pointer,
+                                        page_address(next_page)))>PAGE_BYTES) {
                 bytes_used = PAGE_BYTES;
                 more = 1;
             }
@@ -895,7 +993,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
             next_page++;
         }
 
-        region_size = alloc_region->free_pointer - alloc_region->start_addr;
+        region_size = void_diff(alloc_region->free_pointer,
+                                alloc_region->start_addr);
         bytes_allocated += region_size;
         generations[gc_alloc_generation].bytes_allocated += region_size;
 
@@ -903,14 +1002,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
 
         /* Set the generations alloc restart page to the last page of
          * the region. */
-        if (unboxed)
-            generations[gc_alloc_generation].alloc_unboxed_start_page =
-                next_page-1;
-        else
-            generations[gc_alloc_generation].alloc_start_page = next_page-1;
+        set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
 
         /* Add the region to the new_areas if requested. */
-        if (!unboxed)
+        if (BOXED_PAGE_FLAG & page_type_flag)
             add_new_area(first_page,orig_first_page_bytes_used, region_size);
 
         /*
@@ -944,7 +1039,7 @@ static inline void *gc_quick_alloc(long nbytes);
 
 /* Allocate a possibly large object. */
 void *
-gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
 {
     page_index_t first_page;
     page_index_t last_page;
@@ -958,44 +1053,30 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     ret = thread_mutex_lock(&free_pages_lock);
     gc_assert(ret == 0);
 
-    if (unboxed) {
-        first_page =
-            generations[gc_alloc_generation].alloc_large_unboxed_start_page;
-    } else {
-        first_page = generations[gc_alloc_generation].alloc_large_start_page;
-    }
+    first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
     if (first_page <= alloc_region->last_page) {
         first_page = alloc_region->last_page+1;
     }
 
-    last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+    last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
 
     gc_assert(first_page > alloc_region->last_page);
-    if (unboxed)
-        generations[gc_alloc_generation].alloc_large_unboxed_start_page =
-            last_page;
-    else
-        generations[gc_alloc_generation].alloc_large_start_page = last_page;
+
+    set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
 
     /* Set up the pages. */
     orig_first_page_bytes_used = page_table[first_page].bytes_used;
 
     /* If the first page was free then set up the gen, and
-     * first_object_offset. */
+     * region_start_offset. */
     if (page_table[first_page].bytes_used == 0) {
-        if (unboxed)
-            page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
-        else
-            page_table[first_page].allocated = BOXED_PAGE_FLAG;
+        page_table[first_page].allocated = page_type_flag;
         page_table[first_page].gen = gc_alloc_generation;
-        page_table[first_page].first_object_offset = 0;
+        page_table[first_page].region_start_offset = 0;
         page_table[first_page].large_object = 1;
     }
 
-    if (unboxed)
-        gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
-    else
-        gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+    gc_assert(page_table[first_page].allocated == page_type_flag);
     gc_assert(page_table[first_page].gen == gc_alloc_generation);
     gc_assert(page_table[first_page].large_object == 1);
 
@@ -1014,24 +1095,22 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     next_page = first_page+1;
 
     /* All the rest of the pages should be free. We need to set their
-     * first_object_offset pointer to the start of the region, and
-     * set the bytes_used. */
+     * region_start_offset pointer to the start of the region, and set
+     * the bytes_used. */
     while (more) {
-        gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
+        gc_assert(page_free_p(next_page));
         gc_assert(page_table[next_page].bytes_used == 0);
-        if (unboxed)
-            page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-        else
-            page_table[next_page].allocated = BOXED_PAGE_FLAG;
+        page_table[next_page].allocated = page_type_flag;
         page_table[next_page].gen = gc_alloc_generation;
         page_table[next_page].large_object = 1;
 
-        page_table[next_page].first_object_offset =
-            orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
+        page_table[next_page].region_start_offset =
+            npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
 
         /* Calculate the number of bytes used in this page. */
         more = 0;
-        if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+        bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
+        if (bytes_used > PAGE_BYTES) {
             bytes_used = PAGE_BYTES;
             more = 1;
         }
@@ -1048,20 +1127,20 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     generations[gc_alloc_generation].bytes_allocated += nbytes;
 
     /* Add the region to the new_areas if requested. */
-    if (!unboxed)
+    if (BOXED_PAGE_FLAG & page_type_flag)
         add_new_area(first_page,orig_first_page_bytes_used,nbytes);
 
     /* Bump up last_free_page */
     if (last_page+1 > last_free_page) {
         last_free_page = last_page+1;
-        set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+        set_alloc_pointer((lispobj)(page_address(last_free_page)));
     }
     ret = thread_mutex_unlock(&free_pages_lock);
     gc_assert(ret == 0);
 
 #ifdef READ_PROTECT_FREE_PAGES
     os_protect(page_address(first_page),
-               PAGE_BYTES*(1+last_page-first_page),
+               npage_bytes(1+last_page-first_page),
                OS_VM_PROT_ALL);
 #endif
 
@@ -1076,101 +1155,119 @@ void
 gc_heap_exhausted_error_or_lose (long available, long requested)
 {
     /* Write basic information before doing anything else: if we don't
-     * call to lisp this is a must, and even if we do there is always the
-     * danger that we bounce back here before the error has been handled,
-     * or indeed even printed.
+     * call to lisp this is a must, and even if we do there is always
+     * the danger that we bounce back here before the error has been
+     * handled, or indeed even printed.
      */
     fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
-            gc_active_p ? "garbage collection" : "allocation", available, requested);
+            gc_active_p ? "garbage collection" : "allocation",
+            available, requested);
     if (gc_active_p || (available == 0)) {
         /* If we are in GC, or totally out of memory there is no way
          * to sanely transfer control to the lisp-side of things.
          */
+        struct thread *thread = arch_os_get_current_thread();
         print_generation_stats(1);
+        fprintf(stderr, "GC control variables:\n");
+        fprintf(stderr, "          *GC-INHIBIT* = %s\n          *GC-PENDING* = %s\n",
+                SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
+                SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true");
+#ifdef LISP_FEATURE_SB_THREAD
+        fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n",
+                SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
+#endif
         lose("Heap exhausted, game over.");
     }
     else {
         /* FIXME: assert free_pages_lock held */
-        thread_mutex_unlock(&free_pages_lock);
-        funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
-                 make_fixnum(available), make_fixnum(requested));
+        (void)thread_mutex_unlock(&free_pages_lock);
+        funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
+                 alloc_number(available), alloc_number(requested));
         lose("HEAP-EXHAUSTED-ERROR fell through");
     }
 }
 
 page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag)
 {
-    page_index_t first_page;
-    page_index_t last_page;
-    long region_size;
-    page_index_t restart_page=*restart_page_ptr;
-    long bytes_found;
-    long num_pages;
-    int large_p=(nbytes>=large_object_size);
+    page_index_t first_page, last_page;
+    page_index_t restart_page = *restart_page_ptr;
+    long bytes_found = 0;
+    long most_bytes_found = 0;
     /* FIXME: assert(free_pages_lock is held); */
 
-    /* Search for a contiguous free space of at least nbytes. If it's
-     * a large object then align it on a page boundary by searching
-     * for a free page. */
-
+    /* Toggled by gc_and_save for heap compaction, normally -1. */
     if (gencgc_alloc_start_page != -1) {
         restart_page = gencgc_alloc_start_page;
     }
 
-    do {
-        first_page = restart_page;
-        if (large_p)
-            while ((first_page < NUM_PAGES)
-                   && (page_table[first_page].allocated != FREE_PAGE_FLAG))
-                first_page++;
-        else
-            while (first_page < NUM_PAGES) {
-                if(page_table[first_page].allocated == FREE_PAGE_FLAG)
-                    break;
-                if((page_table[first_page].allocated ==
-                    (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
-                   (page_table[first_page].large_object == 0) &&
-                   (page_table[first_page].gen == gc_alloc_generation) &&
-                   (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
-                   (page_table[first_page].write_protected == 0) &&
-                   (page_table[first_page].dont_move == 0)) {
-                    break;
-                }
+    if (nbytes>=PAGE_BYTES) {
+        /* Search for a contiguous free space of at least nbytes,
+         * aligned on a page boundary. The page-alignment is strictly
+         * speaking needed only for objects at least large_object_size
+         * bytes in size. */
+        do {
+            first_page = restart_page;
+            while ((first_page < page_table_pages) &&
+                   page_allocated_p(first_page))
                 first_page++;
-            }
 
-        if (first_page >= NUM_PAGES)
-            gc_heap_exhausted_error_or_lose(0, nbytes);
-
-        gc_assert(page_table[first_page].write_protected == 0);
+            last_page = first_page;
+            bytes_found = PAGE_BYTES;
+            while ((bytes_found < nbytes) &&
+                   (last_page < (page_table_pages-1)) &&
+                   page_free_p(last_page+1)) {
+                last_page++;
+                bytes_found += PAGE_BYTES;
+                gc_assert(0 == page_table[last_page].bytes_used);
+                gc_assert(0 == page_table[last_page].write_protected);
+            }
+            if (bytes_found > most_bytes_found)
+                most_bytes_found = bytes_found;
+            restart_page = last_page + 1;
+        } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
 
-        last_page = first_page;
-        bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
-        num_pages = 1;
-        while (((bytes_found < nbytes)
-                || (!large_p && (num_pages < 2)))
-               && (last_page < (NUM_PAGES-1))
-               && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
-            last_page++;
-            num_pages++;
-            bytes_found += PAGE_BYTES;
-            gc_assert(page_table[last_page].write_protected == 0);
+    } else {
+        /* Search for a page with at least nbytes of space. We prefer
+         * not to split small objects on multiple pages, to reduce the
+         * number of contiguous allocation regions spaning multiple
+         * pages: this helps avoid excessive conservativism. */
+        first_page = restart_page;
+        while (first_page < page_table_pages) {
+            if (page_free_p(first_page))
+                {
+                    gc_assert(0 == page_table[first_page].bytes_used);
+                    bytes_found = PAGE_BYTES;
+                    break;
+                }
+            else if ((page_table[first_page].allocated == page_type_flag) &&
+                     (page_table[first_page].large_object == 0) &&
+                     (page_table[first_page].gen == gc_alloc_generation) &&
+                     (page_table[first_page].write_protected == 0) &&
+                     (page_table[first_page].dont_move == 0))
+                {
+                    bytes_found = PAGE_BYTES
+                        - page_table[first_page].bytes_used;
+                    if (bytes_found > most_bytes_found)
+                        most_bytes_found = bytes_found;
+                    if (bytes_found >= nbytes)
+                        break;
+                }
+            first_page++;
         }
-
-        region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
-            + PAGE_BYTES*(last_page-first_page);
-
-        gc_assert(bytes_found == region_size);
-        restart_page = last_page + 1;
-    } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
+        last_page = first_page;
+        restart_page = first_page + 1;
+    }
 
     /* Check for a failure */
-    if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes))
-        gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+    if (bytes_found < nbytes) {
+        gc_assert(restart_page >= page_table_pages);
+        gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
+    }
 
-    *restart_page_ptr=first_page;
+    gc_assert(page_table[first_page].write_protected == 0);
 
+    *restart_page_ptr = first_page;
     return last_page;
 }
 
@@ -1178,13 +1275,13 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
  * functions will eventually call this  */
 
 void *
-gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
+gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region,
                      int quick_p)
 {
     void *new_free_pointer;
 
-    if(nbytes>=large_object_size)
-        return gc_alloc_large(nbytes,unboxed_p,my_region);
+    if (nbytes>=large_object_size)
+        return gc_alloc_large(nbytes, page_type_flag, my_region);
 
     /* Check whether there is room in the current alloc region. */
     new_free_pointer = my_region->free_pointer + nbytes;
@@ -1200,11 +1297,11 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
         /* Unless a `quick' alloc was requested, check whether the
            alloc region is almost empty. */
         if (!quick_p &&
-            (my_region->end_addr - my_region->free_pointer) <= 32) {
+            void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
             /* If so, finished with the current region. */
-            gc_alloc_update_page_tables(unboxed_p, my_region);
+            gc_alloc_update_page_tables(page_type_flag, my_region);
             /* Set up a new region. */
-            gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
+            gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
         }
 
         return((void *)new_obj);
@@ -1213,60 +1310,45 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
     /* Else not enough free space in the current region: retry with a
      * new region. */
 
-    gc_alloc_update_page_tables(unboxed_p, my_region);
-    gc_alloc_new_region(nbytes, unboxed_p, my_region);
-    return gc_alloc_with_region(nbytes,unboxed_p,my_region,0);
+    gc_alloc_update_page_tables(page_type_flag, my_region);
+    gc_alloc_new_region(nbytes, page_type_flag, my_region);
+    return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
 }
 
 /* these are only used during GC: all allocation from the mutator calls
  * alloc() -> gc_alloc_with_region() with the appropriate per-thread
  * region */
 
-void *
-gc_general_alloc(long nbytes,int unboxed_p,int quick_p)
-{
-    struct alloc_region *my_region =
-      unboxed_p ? &unboxed_region : &boxed_region;
-    return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
-}
-
 static inline void *
 gc_quick_alloc(long nbytes)
 {
-    return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+    return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK);
 }
 
 static inline void *
 gc_quick_alloc_large(long nbytes)
 {
-    return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+    return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
 }
 
 static inline void *
 gc_alloc_unboxed(long nbytes)
 {
-    return gc_general_alloc(nbytes,ALLOC_UNBOXED,0);
+    return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
 }
 
 static inline void *
 gc_quick_alloc_unboxed(long nbytes)
 {
-    return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+    return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
 }
 
 static inline void *
 gc_quick_alloc_large_unboxed(long nbytes)
 {
-    return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+    return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
 }
 \f
-/*
- * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
- */
-
-extern long (*scavtab[256])(lispobj *where, lispobj object);
-extern lispobj (*transother[256])(lispobj object);
-extern long (*sizetab[256])(lispobj *where);
 
 /* Copy a large boxed object. If the object is in a large object
  * region then it is simply promoted, else it is copied. If it's large
@@ -1294,10 +1376,10 @@ copy_large_object(lispobj object, long nwords)
 
         /* Promote the object. */
 
-        long remaining_bytes;
+        unsigned long remaining_bytes;
         page_index_t next_page;
-        long bytes_freed;
-        long old_bytes_used;
+        unsigned long bytes_freed;
+        unsigned long old_bytes_used;
 
         /* Note: Any page write-protection must be removed, else a
          * later scavenge_newspace may incorrectly not scavenge these
@@ -1305,16 +1387,16 @@ copy_large_object(lispobj object, long nwords)
          * new areas, but let's do it for them all (they'll probably
          * be written anyway?). */
 
-        gc_assert(page_table[first_page].first_object_offset == 0);
+        gc_assert(page_table[first_page].region_start_offset == 0);
 
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
         while (remaining_bytes > PAGE_BYTES) {
             gc_assert(page_table[next_page].gen == from_space);
-            gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+            gc_assert(page_boxed_p(next_page));
             gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].first_object_offset==
-                      -PAGE_BYTES*(next_page-first_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
             page_table[next_page].gen = new_space;
@@ -1336,7 +1418,7 @@ copy_large_object(lispobj object, long nwords)
         gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
 
         page_table[next_page].gen = new_space;
-        gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+        gc_assert(page_boxed_p(next_page));
 
         /* Adjust the bytes_used. */
         old_bytes_used = page_table[next_page].bytes_used;
@@ -1348,10 +1430,10 @@ copy_large_object(lispobj object, long nwords)
         next_page++;
         while ((old_bytes_used == PAGE_BYTES) &&
                (page_table[next_page].gen == from_space) &&
-               (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+               page_boxed_p(next_page) &&
                page_table[next_page].large_object &&
-               (page_table[next_page].first_object_offset ==
-                -(next_page - first_page)*PAGE_BYTES)) {
+               (page_table[next_page].region_start_offset ==
+                npage_bytes(next_page - first_page))) {
             /* Checks out OK, free the page. Don't need to bother zeroing
              * pages as this should have been done before shrinking the
              * object. These pages shouldn't be write-protected as they
@@ -1365,8 +1447,8 @@ copy_large_object(lispobj object, long nwords)
             next_page++;
         }
 
-        generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
-          bytes_freed;
+        generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+            + bytes_freed;
         generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
         bytes_allocated -= bytes_freed;
 
@@ -1434,7 +1516,8 @@ copy_large_unboxed_object(lispobj object, long nwords)
     gc_assert((nwords & 0x01) == 0);
 
     if ((nwords > 1024*1024) && gencgc_verbose)
-        FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
+        FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
+               nwords*N_WORD_BYTES));
 
     /* Check whether it's a large object. */
     first_page = find_page_index((void *)object);
@@ -1444,22 +1527,21 @@ copy_large_unboxed_object(lispobj object, long nwords)
         /* Promote the object. Note: Unboxed objects may have been
          * allocated to a BOXED region so it may be necessary to
          * change the region to UNBOXED. */
-        long remaining_bytes;
+        unsigned long remaining_bytes;
         page_index_t next_page;
-        long bytes_freed;
-        long old_bytes_used;
+        unsigned long bytes_freed;
+        unsigned long old_bytes_used;
 
-        gc_assert(page_table[first_page].first_object_offset == 0);
+        gc_assert(page_table[first_page].region_start_offset == 0);
 
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
         while (remaining_bytes > PAGE_BYTES) {
             gc_assert(page_table[next_page].gen == from_space);
-            gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
-                      || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+            gc_assert(page_allocated_no_region_p(next_page));
             gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].first_object_offset==
-                      -PAGE_BYTES*(next_page-first_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
             page_table[next_page].gen = new_space;
@@ -1487,11 +1569,10 @@ copy_large_unboxed_object(lispobj object, long nwords)
         next_page++;
         while ((old_bytes_used == PAGE_BYTES) &&
                (page_table[next_page].gen == from_space) &&
-               ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
-                || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+               page_allocated_no_region_p(next_page) &&
                page_table[next_page].large_object &&
-               (page_table[next_page].first_object_offset ==
-                -(next_page - first_page)*PAGE_BYTES)) {
+               (page_table[next_page].region_start_offset ==
+                npage_bytes(next_page - first_page))) {
             /* Checks out OK, free the page. Don't need to both zeroing
              * pages as this should have been done before shrinking the
              * object. These pages shouldn't be write-protected, even if
@@ -1510,7 +1591,8 @@ copy_large_unboxed_object(lispobj object, long nwords)
                    "/copy_large_unboxed bytes_freed=%d\n",
                    bytes_freed));
 
-        generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+        generations[from_space].bytes_allocated -=
+            nwords*N_WORD_BYTES + bytes_freed;
         generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
         bytes_allocated -= bytes_freed;
 
@@ -1565,6 +1647,8 @@ sniff_code_object(struct code *code, unsigned long displacement)
     if (!check_code_fixups)
         return;
 
+    FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
+
     ncode_words = fixnum_value(code->code_size);
     nheader_words = HeaderValue(*(lispobj *)code);
     nwords = ncode_words + nheader_words;
@@ -1593,7 +1677,8 @@ sniff_code_object(struct code *code, unsigned long displacement)
             && (data < (code_end_addr-displacement))) {
             /* function header */
             if ((d4 == 0x5e)
-                && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+                && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
+                    (unsigned)code)) {
                 /* Skip the function header */
                 p += 6*4 - 4 - 1;
                 continue;
@@ -1733,7 +1818,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
     void *constants_start_addr, *constants_end_addr;
     void *code_start_addr, *code_end_addr;
     lispobj fixups = NIL;
-    unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code;
+    unsigned long displacement =
+        (unsigned long)new_code - (unsigned long)old_code;
     struct vector *fixups_vector;
 
     ncode_words = fixnum_value(new_code->code_size);
@@ -1781,7 +1867,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
         (fixups_vector->header == 0x01)) {
         /* If so, then follow it. */
         /*SHOW("following pointer to a forwarding pointer");*/
-        fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
+        fixups_vector =
+            (struct vector *)native_pointer((lispobj)fixups_vector->length);
     }
 
     /*SHOW("got fixups");*/
@@ -1800,7 +1887,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
             /* If it's within the old_code object then it must be an
              * absolute fixup (relative ones are not saved) */
             if ((old_value >= (unsigned long)old_code)
-                && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES)))
+                && (old_value < ((unsigned long)old_code
+                                 + nwords*N_WORD_BYTES)))
                 /* So add the dispacement. */
                 *(unsigned long *)((unsigned long)code_start_addr + offset) =
                     old_value + displacement;
@@ -1812,7 +1900,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
                     old_value - displacement;
         }
     } else {
-        fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
+        /* This used to just print a note to stderr, but a bogus fixup seems to
+         * indicate real heap corruption, so a hard hailure is in order. */
+        lose("fixup vector %p has a bad widetag: %d\n",
+             fixups_vector, widetag_of(fixups_vector->header));
     }
 
     /* Check for possible errors. */
@@ -1858,237 +1949,6 @@ trans_unboxed_large(lispobj object)
 
 \f
 /*
- * vector-like objects
- */
-
-
-/* FIXME: What does this mean? */
-int gencgc_hash = 1;
-
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
-static long
-scav_vector(lispobj *where, lispobj object)
-{
-    unsigned long kv_length;
-    lispobj *kv_vector;
-    unsigned long length = 0; /* (0 = dummy to stop GCC warning) */
-    struct hash_table *hash_table;
-    lispobj empty_symbol;
-    unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
-    unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
-    unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
-    lispobj weak_p_obj;
-    unsigned long next_vector_length = 0;
-
-    /* FIXME: A comment explaining this would be nice. It looks as
-     * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
-     * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
-    if (HeaderValue(object) != subtype_VectorValidHashing)
-        return 1;
-
-    if (!gencgc_hash) {
-        /* This is set for backward compatibility. FIXME: Do we need
-         * this any more? */
-        *where =
-            (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
-        return 1;
-    }
-
-    kv_length = fixnum_value(where[1]);
-    kv_vector = where + 2;  /* Skip the header and length. */
-    /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
-
-    /* Scavenge element 0, which may be a hash-table structure. */
-    scavenge(where+2, 1);
-    if (!is_lisp_pointer(where[2])) {
-        lose("no pointer at %x in hash table\n", where[2]);
-    }
-    hash_table = (struct hash_table *)native_pointer(where[2]);
-    /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
-    if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
-        lose("hash table not instance (%x at %x)\n",
-             hash_table->header,
-             hash_table);
-    }
-
-    /* Scavenge element 1, which should be some internal symbol that
-     * the hash table code reserves for marking empty slots. */
-    scavenge(where+3, 1);
-    if (!is_lisp_pointer(where[3])) {
-        lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]);
-    }
-    empty_symbol = where[3];
-    /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
-    if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) !=
-        SYMBOL_HEADER_WIDETAG) {
-        lose("not a symbol where empty-hash-table-slot symbol expected: %x\n",
-             *(lispobj *)native_pointer(empty_symbol));
-    }
-
-    /* Scavenge hash table, which will fix the positions of the other
-     * needed objects. */
-    scavenge((lispobj *)hash_table,
-             sizeof(struct hash_table) / sizeof(lispobj));
-
-    /* Cross-check the kv_vector. */
-    if (where != (lispobj *)native_pointer(hash_table->table)) {
-        lose("hash_table table!=this table %x\n", hash_table->table);
-    }
-
-    /* WEAK-P */
-    weak_p_obj = hash_table->weak_p;
-
-    /* index vector */
-    {
-        lispobj index_vector_obj = hash_table->index_vector;
-
-        if (is_lisp_pointer(index_vector_obj) &&
-            (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
-                 SIMPLE_ARRAY_WORD_WIDETAG)) {
-            index_vector =
-                ((unsigned long *)native_pointer(index_vector_obj)) + 2;
-            /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
-            length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
-            /*FSHOW((stderr, "/length = %d\n", length));*/
-        } else {
-            lose("invalid index_vector %x\n", index_vector_obj);
-        }
-    }
-
-    /* next vector */
-    {
-        lispobj next_vector_obj = hash_table->next_vector;
-
-        if (is_lisp_pointer(next_vector_obj) &&
-            (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
-             SIMPLE_ARRAY_WORD_WIDETAG)) {
-            next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2;
-            /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
-            next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
-            /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
-        } else {
-            lose("invalid next_vector %x\n", next_vector_obj);
-        }
-    }
-
-    /* maybe hash vector */
-    {
-        lispobj hash_vector_obj = hash_table->hash_vector;
-
-        if (is_lisp_pointer(hash_vector_obj) &&
-            (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
-             SIMPLE_ARRAY_WORD_WIDETAG)){
-            hash_vector =
-                ((unsigned long *)native_pointer(hash_vector_obj)) + 2;
-            /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
-            gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
-                      == next_vector_length);
-        } else {
-            hash_vector = NULL;
-            /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
-        }
-    }
-
-    /* These lengths could be different as the index_vector can be a
-     * different length from the others, a larger index_vector could help
-     * reduce collisions. */
-    gc_assert(next_vector_length*2 == kv_length);
-
-    /* now all set up.. */
-
-    /* Work through the KV vector. */
-    {
-        long i;
-        for (i = 1; i < next_vector_length; i++) {
-            lispobj old_key = kv_vector[2*i];
-
-#if N_WORD_BITS == 32
-            unsigned long old_index = (old_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
-            unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
-#endif
-
-            /* Scavenge the key and value. */
-            scavenge(&kv_vector[2*i],2);
-
-            /* Check whether the key has moved and is EQ based. */
-            {
-                lispobj new_key = kv_vector[2*i];
-#if N_WORD_BITS == 32
-                unsigned long new_index = (new_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
-                unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
-#endif
-
-                if ((old_index != new_index) &&
-                    ((!hash_vector) ||
-                     (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) &&
-                    ((new_key != empty_symbol) ||
-                     (kv_vector[2*i] != empty_symbol))) {
-
-                     /*FSHOW((stderr,
-                            "* EQ key %d moved from %x to %x; index %d to %d\n",
-                            i, old_key, new_key, old_index, new_index));*/
-
-                    if (index_vector[old_index] != 0) {
-                         /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
-                        /* Unlink the key from the old_index chain. */
-                        if (index_vector[old_index] == i) {
-                            /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
-                            index_vector[old_index] = next_vector[i];
-                            /* Link it into the needing rehash chain. */
-                            next_vector[i] = fixnum_value(hash_table->needing_rehash);
-                            hash_table->needing_rehash = make_fixnum(i);
-                            /*SHOW("P2");*/
-                        } else {
-                            unsigned long prior = index_vector[old_index];
-                            unsigned long next = next_vector[prior];
-
-                            /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
-                            while (next != 0) {
-                                 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
-                                if (next == i) {
-                                    /* Unlink it. */
-                                    next_vector[prior] = next_vector[next];
-                                    /* Link it into the needing rehash
-                                     * chain. */
-                                    next_vector[next] =
-                                        fixnum_value(hash_table->needing_rehash);
-                                    hash_table->needing_rehash = make_fixnum(next);
-                                    /*SHOW("/P3");*/
-                                    break;
-                                }
-                                prior = next;
-                                next = next_vector[next];
-                            }
-                        }
-                    }
-                }
-            }
-        }
-    }
-    return (CEILING(kv_length + 2, 2));
-}
-
-#else
-
-static long
-scav_vector(lispobj *where, lispobj object)
-{
-    if (HeaderValue(object) == subtype_VectorValidHashing) {
-        *where =
-            (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
-    }
-    return 1;
-}
-
-#endif
-
-\f
-/*
  * Lutexes. Using the normal finalization machinery for finalizing
  * lutexes is tricky, since the finalization depends on working lutexes.
  * So we track the lutexes in the GC and finalize them manually.
@@ -2174,7 +2034,7 @@ reap_lutexes (generation_index_t gen) {
     while (lutex) {
         struct lutex *next = lutex->next;
         if (!lutex->live) {
-            lutex_destroy(lutex);
+            lutex_destroy((tagged_lutex_t) lutex);
             gencgc_unregister_lutex(lutex);
         }
         lutex = next;
@@ -2233,7 +2093,7 @@ scav_lutex(lispobj *where, lispobj object)
 static lispobj
 trans_lutex(lispobj object)
 {
-    struct lutex *lutex = native_pointer(object);
+    struct lutex *lutex = (struct lutex *) native_pointer(object);
     lispobj copied;
     size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
     gc_assert(is_lisp_pointer(object));
@@ -2241,13 +2101,14 @@ trans_lutex(lispobj object)
 
     /* Update the links, since the lutex moved in memory. */
     if (lutex->next) {
-        lutex->next->prev = native_pointer(copied);
+        lutex->next->prev = (struct lutex *) native_pointer(copied);
     }
 
     if (lutex->prev) {
-        lutex->prev->next = native_pointer(copied);
+        lutex->prev->next = (struct lutex *) native_pointer(copied);
     } else {
-        generations[lutex->gen].lutexes = native_pointer(copied);
+        generations[lutex->gen].lutexes =
+          (struct lutex *) native_pointer(copied);
     }
 
     return copied;
@@ -2276,29 +2137,21 @@ size_lutex(lispobj *where)
 static long
 scav_weak_pointer(lispobj *where, lispobj object)
 {
-    struct weak_pointer *wp = weak_pointers;
-    /* Push the weak pointer onto the list of weak pointers.
-     * Do I have to watch for duplicates? Originally this was
-     * part of trans_weak_pointer but that didn't work in the
-     * case where the WP was in a promoted region.
+    /* Since we overwrite the 'next' field, we have to make
+     * sure not to do so for pointers already in the list.
+     * Instead of searching the list of weak_pointers each
+     * time, we ensure that next is always NULL when the weak
+     * pointer isn't in the list, and not NULL otherwise.
+     * Since we can't use NULL to denote end of list, we
+     * use a pointer back to the same weak_pointer.
      */
+    struct weak_pointer * wp = (struct weak_pointer*)where;
 
-    /* Check whether it's already in the list. */
-    while (wp != NULL) {
-        if (wp == (struct weak_pointer*)where) {
-            break;
-        }
-        wp = wp->next;
-    }
-    if (wp == NULL) {
-        /* Add it to the start of the list. */
-        wp = (struct weak_pointer*)where;
-        if (wp->next != weak_pointers) {
-            wp->next = weak_pointers;
-        } else {
-            /*SHOW("avoided write to weak pointer");*/
-        }
+    if (NULL == wp->next) {
+        wp->next = weak_pointers;
         weak_pointers = wp;
+        if (NULL == wp->next)
+            wp->next = wp;
     }
 
     /* Do not let GC scavenge the value slot of the weak pointer.
@@ -2341,47 +2194,31 @@ search_dynamic_space(void *pointer)
     lispobj *start;
 
     /* The address may be invalid, so do some checks. */
-    if ((page_index == -1) ||
-        (page_table[page_index].allocated == FREE_PAGE_FLAG))
+    if ((page_index == -1) || page_free_p(page_index))
         return NULL;
-    start = (lispobj *)((void *)page_address(page_index)
-                        + page_table[page_index].first_object_offset);
+    start = (lispobj *)page_region_start(page_index);
     return (gc_search_space(start,
                             (((lispobj *)pointer)+2)-start,
                             (lispobj *)pointer));
 }
 
-/* Is there any possibility that pointer is a valid Lisp object
- * reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing?
- * This is called from preserve_pointers() */
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
+/* Helper for valid_lisp_pointer_p and
+ * possibly_valid_dynamic_space_pointer.
+ *
+ * pointer is the pointer to validate, and start_addr is the address
+ * of the enclosing object.
+ */
 static int
-possibly_valid_dynamic_space_pointer(lispobj *pointer)
+looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
 {
-    lispobj *start_addr;
-
-    /* Find the object start address. */
-    if ((start_addr = search_dynamic_space(pointer)) == NULL) {
-        return 0;
-    }
-
-    /* We need to allow raw pointers into Code objects for return
-     * addresses. This will also pick up pointers to functions in code
-     * objects. */
-    if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
-        /* XXX could do some further checks here */
-        return 1;
-    }
-
-    /* If it's not a return address then it needs to be a valid Lisp
-     * pointer. */
     if (!is_lisp_pointer((lispobj)pointer)) {
         return 0;
     }
 
     /* Check that the object pointed to is consistent with the pointer
-     * low tag.
-     */
+     * low tag. */
     switch (lowtag_of((lispobj)pointer)) {
     case FUN_POINTER_LOWTAG:
         /* Start_addr should be the enclosing code object, or a closure
@@ -2419,20 +2256,10 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer)
             return 0;
         }
         /* Is it plausible cons? */
-        if ((is_lisp_pointer(start_addr[0])
-            || (fixnump(start_addr[0]))
-            || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
-            || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
-            || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
-           && (is_lisp_pointer(start_addr[1])
-               || (fixnump(start_addr[1]))
-               || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
-               || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
-               || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+        if ((is_lisp_pointer(start_addr[0]) ||
+             is_lisp_immediate(start_addr[0])) &&
+            (is_lisp_pointer(start_addr[1]) ||
+             is_lisp_immediate(start_addr[1])))
             break;
         else {
             if (gencgc_verbose)
@@ -2623,7 +2450,46 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer)
     return 1;
 }
 
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+/* Used by the debugger to validate possibly bogus pointers before
+ * calling MAKE-LISP-OBJ on them.
+ *
+ * FIXME: We would like to make this perfect, because if the debugger
+ * constructs a reference to a bugs lisp object, and it ends up in a
+ * location scavenged by the GC all hell breaks loose.
+ *
+ * Whereas possibly_valid_dynamic_space_pointer has to be conservative
+ * and return true for all valid pointers, this could actually be eager
+ * and lie about a few pointers without bad results... but that should
+ * be reflected in the name.
+ */
+int
+valid_lisp_pointer_p(lispobj *pointer)
+{
+    lispobj *start;
+    if (((start=search_dynamic_space(pointer))!=NULL) ||
+        ((start=search_static_space(pointer))!=NULL) ||
+        ((start=search_read_only_space(pointer))!=NULL))
+        return looks_like_valid_lisp_pointer_p(pointer, start);
+    else
+        return 0;
+}
+
+/* Is there any possibility that pointer is a valid Lisp object
+ * reference, and/or something else (e.g. subroutine call return
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
+static int
+possibly_valid_dynamic_space_pointer(lispobj *pointer)
+{
+    lispobj *start_addr;
+
+    /* Find the object start address. */
+    if ((start_addr = search_dynamic_space(pointer)) == NULL) {
+        return 0;
+    }
+
+    return looks_like_valid_lisp_pointer_p(pointer, start_addr);
+}
 
 /* Adjust large bignum and vector objects. This will adjust the
  * allocated region if the size has shrunk, and move unboxed objects
@@ -2639,9 +2505,9 @@ maybe_adjust_large_object(lispobj *where)
     page_index_t next_page;
     long nwords;
 
-    long remaining_bytes;
-    long bytes_freed;
-    long old_bytes_used;
+    unsigned long remaining_bytes;
+    unsigned long bytes_freed;
+    unsigned long old_bytes_used;
 
     int boxed;
 
@@ -2727,17 +2593,16 @@ maybe_adjust_large_object(lispobj *where)
      * but lets do it for them all (they'll probably be written
      * anyway?). */
 
-    gc_assert(page_table[first_page].first_object_offset == 0);
+    gc_assert(page_table[first_page].region_start_offset == 0);
 
     next_page = first_page;
     remaining_bytes = nwords*N_WORD_BYTES;
     while (remaining_bytes > PAGE_BYTES) {
         gc_assert(page_table[next_page].gen == from_space);
-        gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
-                  || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
+        gc_assert(page_allocated_no_region_p(next_page));
         gc_assert(page_table[next_page].large_object);
-        gc_assert(page_table[next_page].first_object_offset ==
-                  -PAGE_BYTES*(next_page-first_page));
+        gc_assert(page_table[next_page].region_start_offset ==
+                  npage_bytes(next_page-first_page));
         gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
         page_table[next_page].allocated = boxed;
@@ -2769,11 +2634,10 @@ maybe_adjust_large_object(lispobj *where)
     next_page++;
     while ((old_bytes_used == PAGE_BYTES) &&
            (page_table[next_page].gen == from_space) &&
-           ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
-            || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+           page_allocated_no_region_p(next_page) &&
            page_table[next_page].large_object &&
-           (page_table[next_page].first_object_offset ==
-            -(next_page - first_page)*PAGE_BYTES)) {
+           (page_table[next_page].region_start_offset ==
+            npage_bytes(next_page - first_page))) {
         /* It checks out OK, free the page. We don't need to both zeroing
          * pages as this should have been done before shrinking the
          * object. These pages shouldn't be write protected as they
@@ -2799,8 +2663,6 @@ maybe_adjust_large_object(lispobj *where)
     return;
 }
 
-#endif
-
 /* Take a possible pointer to a Lisp object and mark its page in the
  * page_table so that it will not be relocated during a GC.
  *
@@ -2814,8 +2676,6 @@ maybe_adjust_large_object(lispobj *where)
  * It is also assumed that the current gc_alloc() region has been
  * flushed and the tables updated. */
 
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
 static void
 preserve_pointer(void *addr)
 {
@@ -2826,7 +2686,7 @@ preserve_pointer(void *addr)
 
     /* quick check 1: Address is quite likely to have been invalid. */
     if ((addr_page_index == -1)
-        || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+        || page_free_p(addr_page_index)
         || (page_table[addr_page_index].bytes_used == 0)
         || (page_table[addr_page_index].gen != from_space)
         /* Skip if already marked dont_move. */
@@ -2840,7 +2700,8 @@ preserve_pointer(void *addr)
     /* quick check 2: Check the offset within the page.
      *
      */
-    if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
+    if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+        page_table[addr_page_index].bytes_used)
         return;
 
     /* Filter out anything which can't be a pointer to a Lisp object
@@ -2849,7 +2710,9 @@ preserve_pointer(void *addr)
      * expensive but important, since it vastly reduces the
      * probability that random garbage will be bogusly interpreted as
      * a pointer which prevents a page from moving. */
-    if (!(possibly_valid_dynamic_space_pointer(addr)))
+    if (!(code_page_p(addr_page_index)
+          || (is_lisp_pointer(addr) &&
+              possibly_valid_dynamic_space_pointer(addr))))
         return;
 
     /* Find the beginning of the region.  Note that there may be
@@ -2860,12 +2723,10 @@ preserve_pointer(void *addr)
 #if 0
     /* I think this'd work just as well, but without the assertions.
      * -dan 2004.01.01 */
-    first_page=
-        find_page_index(page_address(addr_page_index)+
-                        page_table[addr_page_index].first_object_offset);
+    first_page = find_page_index(page_region_start(addr_page_index))
 #else
     first_page = addr_page_index;
-    while (page_table[first_page].first_object_offset != 0) {
+    while (page_table[first_page].region_start_offset != 0) {
         --first_page;
         /* Do some checks. */
         gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
@@ -2882,7 +2743,7 @@ preserve_pointer(void *addr)
          * free area in which case it's ignored here. Note it gets
          * through the valid pointer test above because the tail looks
          * like conses. */
-        if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+        if (page_free_p(addr_page_index)
             || (page_table[addr_page_index].bytes_used == 0)
             /* Check the offset within the page. */
             || (((unsigned long)addr & (PAGE_BYTES - 1))
@@ -2921,10 +2782,10 @@ preserve_pointer(void *addr)
         /* Check whether this is the last page in this contiguous block.. */
         if ((page_table[i].bytes_used < PAGE_BYTES)
             /* ..or it is PAGE_BYTES and is the last in the block */
-            || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+            || page_free_p(i+1)
             || (page_table[i+1].bytes_used == 0) /* next page free */
             || (page_table[i+1].gen != from_space) /* diff. gen */
-            || (page_table[i+1].first_object_offset == 0))
+            || (page_table[i+1].region_start_offset == 0))
             break;
     }
 
@@ -2932,7 +2793,7 @@ preserve_pointer(void *addr)
     gc_assert(page_table[addr_page_index].dont_move != 0);
 }
 
-#endif
+#endif  // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
 
 \f
 /* If the given page is not write-protected, then scan it for pointers
@@ -2958,14 +2819,14 @@ update_page_write_prot(page_index_t page)
     long num_words = page_table[page].bytes_used / N_WORD_BYTES;
 
     /* Shouldn't be a free page. */
-    gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
+    gc_assert(page_allocated_p(page));
     gc_assert(page_table[page].bytes_used != 0);
 
     /* Skip if it's already write-protected, pinned, or unboxed */
     if (page_table[page].write_protected
         /* FIXME: What's the reason for not write-protecting pinned pages? */
         || page_table[page].dont_move
-        || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
+        || page_unboxed_p(page))
         return (0);
 
     /* Scan the page for pointers to younger generations or the
@@ -2978,7 +2839,7 @@ update_page_write_prot(page_index_t page)
         /* Check that it's in the dynamic space */
         if (index != -1)
             if (/* Does it point to a younger or the temp. generation? */
-                ((page_table[index].allocated != FREE_PAGE_FLAG)
+                (page_allocated_p(index)
                  && (page_table[index].bytes_used != 0)
                  && ((page_table[index].gen < gen)
                      || (page_table[index].gen == SCRATCH_GENERATION)))
@@ -3046,13 +2907,13 @@ scavenge_generations(generation_index_t from, generation_index_t to)
 #define SC_GEN_CK 0
 #if SC_GEN_CK
     /* Clear the write_protected_cleared flags on all pages. */
-    for (i = 0; i < NUM_PAGES; i++)
+    for (i = 0; i < page_table_pages; i++)
         page_table[i].write_protected_cleared = 0;
 #endif
 
     for (i = 0; i < last_free_page; i++) {
         generation_index_t generation = page_table[i].gen;
-        if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+        if (page_boxed_p(i)
             && (page_table[i].bytes_used != 0)
             && (generation != new_space)
             && (generation >= from)
@@ -3061,7 +2922,7 @@ scavenge_generations(generation_index_t from, generation_index_t to)
             int write_protected=1;
 
             /* This should be the start of a region */
-            gc_assert(page_table[i].first_object_offset == 0);
+            gc_assert(page_table[i].region_start_offset == 0);
 
             /* Now work forward until the end of the region */
             for (last_page = i; ; last_page++) {
@@ -3069,16 +2930,17 @@ scavenge_generations(generation_index_t from, generation_index_t to)
                     write_protected && page_table[last_page].write_protected;
                 if ((page_table[last_page].bytes_used < PAGE_BYTES)
                     /* Or it is PAGE_BYTES and is the last in the block */
-                    || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+                    || (!page_boxed_p(last_page+1))
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
             }
             if (!write_protected) {
                 scavenge(page_address(i),
-                         (page_table[last_page].bytes_used +
-                          (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+                         ((unsigned long)(page_table[last_page].bytes_used
+                                          + npage_bytes(last_page-i)))
+                         /N_WORD_BYTES);
 
                 /* Now scan the pages and write protect those that
                  * don't have pointers to younger generations. */
@@ -3100,16 +2962,16 @@ scavenge_generations(generation_index_t from, generation_index_t to)
 #if SC_GEN_CK
     /* Check that none of the write_protected pages in this generation
      * have been written to. */
-    for (i = 0; i < NUM_PAGES; i++) {
-        if ((page_table[i].allocation != FREE_PAGE_FLAG)
+    for (i = 0; i < page_table_pages; i++) {
+        if (page_allocated_p(i)
             && (page_table[i].bytes_used != 0)
             && (page_table[i].gen == generation)
             && (page_table[i].write_protected_cleared != 0)) {
             FSHOW((stderr, "/scavenge_generation() %d\n", generation));
             FSHOW((stderr,
-                   "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
+                   "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
                     page_table[i].bytes_used,
-                    page_table[i].first_object_offset,
+                    page_table[i].region_start_offset,
                     page_table[i].dont_move));
             lose("write to protected page %d in scavenge_generation()\n", i);
         }
@@ -3155,7 +3017,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
            generation));
     for (i = 0; i < last_free_page; i++) {
         /* Note that this skips over open regions when it encounters them. */
-        if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+        if (page_boxed_p(i)
             && (page_table[i].bytes_used != 0)
             && (page_table[i].gen == generation)
             && ((page_table[i].write_protected == 0)
@@ -3165,7 +3027,8 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
             page_index_t last_page;
             int all_wp=1;
 
-            /* The scavenge will start at the first_object_offset of page i.
+            /* The scavenge will start at the region_start_offset of
+             * page i.
              *
              * We need to find the full extent of this contiguous
              * block in case objects span pages.
@@ -3183,25 +3046,23 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
                  * contiguous block */
                 if ((page_table[last_page].bytes_used < PAGE_BYTES)
                     /* Or it is PAGE_BYTES and is the last in the block */
-                    || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+                    || (!page_boxed_p(last_page+1))
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
             }
 
             /* Do a limited check for write-protected pages.  */
             if (!all_wp) {
-                long size;
-
-                size = (page_table[last_page].bytes_used
-                        + (last_page-i)*PAGE_BYTES
-                        - page_table[i].first_object_offset)/N_WORD_BYTES;
+                long nwords = (((unsigned long)
+                               (page_table[last_page].bytes_used
+                                + npage_bytes(last_page-i)
+                                + page_table[i].region_start_offset))
+                               / N_WORD_BYTES);
                 new_areas_ignore_page = last_page;
 
-                scavenge(page_address(i) +
-                         page_table[i].first_object_offset,
-                         size);
+                scavenge(page_region_start(i), nwords);
 
             }
             i = last_page;
@@ -3243,6 +3104,13 @@ scavenge_newspace_generation(generation_index_t generation)
     /* Record all new areas now. */
     record_new_objects = 2;
 
+    /* Give a chance to weak hash tables to make other objects live.
+     * FIXME: The algorithm implemented here for weak hash table gcing
+     * is O(W^2+N) as Bruno Haible warns in
+     * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
+     * see "Implementation 2". */
+    scav_weak_hash_tables();
+
     /* Flush the current regions updating the tables. */
     gc_alloc_update_all_page_tables();
 
@@ -3281,8 +3149,8 @@ scavenge_newspace_generation(generation_index_t generation)
             if (gencgc_verbose)
                 SHOW("new_areas overflow, doing full scavenge");
 
-            /* Don't need to record new areas that get scavenge anyway
-             * during scavenge_newspace_generation_one_scan. */
+            /* Don't need to record new areas that get scavenged
+             * anyway during scavenge_newspace_generation_one_scan. */
             record_new_objects = 1;
 
             scavenge_newspace_generation_one_scan(generation);
@@ -3290,6 +3158,8 @@ scavenge_newspace_generation(generation_index_t generation)
             /* Record all new areas now. */
             record_new_objects = 2;
 
+            scav_weak_hash_tables();
+
             /* Flush the current regions updating the tables. */
             gc_alloc_update_all_page_tables();
 
@@ -3297,13 +3167,15 @@ scavenge_newspace_generation(generation_index_t generation)
 
             /* Work through previous_new_areas. */
             for (i = 0; i < previous_new_areas_index; i++) {
-                long page = (*previous_new_areas)[i].page;
-                long offset = (*previous_new_areas)[i].offset;
-                long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+                page_index_t page = (*previous_new_areas)[i].page;
+                size_t offset = (*previous_new_areas)[i].offset;
+                size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES;
                 gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
                 scavenge(page_address(page)+offset, size);
             }
 
+            scav_weak_hash_tables();
+
             /* Flush the current regions updating the tables. */
             gc_alloc_update_all_page_tables();
         }
@@ -3321,8 +3193,8 @@ scavenge_newspace_generation(generation_index_t generation)
 #if SC_NS_GEN_CK
     /* Check that none of the write_protected pages in this generation
      * have been written to. */
-    for (i = 0; i < NUM_PAGES; i++) {
-        if ((page_table[i].allocation != FREE_PAGE_FLAG)
+    for (i = 0; i < page_table_pages; i++) {
+        if (page_allocated_p(i)
             && (page_table[i].bytes_used != 0)
             && (page_table[i].gen == generation)
             && (page_table[i].write_protected_cleared != 0)
@@ -3345,7 +3217,7 @@ unprotect_oldspace(void)
     page_index_t i;
 
     for (i = 0; i < last_free_page; i++) {
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].bytes_used != 0)
             && (page_table[i].gen == from_space)) {
             void *page_start;
@@ -3366,10 +3238,10 @@ unprotect_oldspace(void)
  * assumes that all objects have been copied or promoted to an older
  * generation. Bytes_allocated and the generation bytes_allocated
  * counter are updated. The number of bytes freed is returned. */
-static long
+static unsigned long
 free_oldspace(void)
 {
-    long bytes_freed = 0;
+    unsigned long bytes_freed = 0;
     page_index_t first_page, last_page;
 
     first_page = 0;
@@ -3377,7 +3249,7 @@ free_oldspace(void)
     do {
         /* Find a first page for the next region of pages. */
         while ((first_page < last_free_page)
-               && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+               && (page_free_p(first_page)
                    || (page_table[first_page].bytes_used == 0)
                    || (page_table[first_page].gen != from_space)))
             first_page++;
@@ -3409,13 +3281,13 @@ free_oldspace(void)
             last_page++;
         }
         while ((last_page < last_free_page)
-               && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+               && page_allocated_p(last_page)
                && (page_table[last_page].bytes_used != 0)
                && (page_table[last_page].gen == from_space));
 
 #ifdef READ_PROTECT_FREE_PAGES
         os_protect(page_address(first_page),
-                   PAGE_BYTES*(last_page-first_page),
+                   npage_bytes(last_page-first_page),
                    OS_VM_PROT_NONE);
 #endif
         first_page = last_page;
@@ -3434,13 +3306,13 @@ print_ptr(lispobj *addr)
     page_index_t pi1 = find_page_index((void*)addr);
 
     if (pi1 != -1)
-        fprintf(stderr,"  %x: page %d  alloc %d  gen %d  bytes_used %d  offset %d  dont_move %d\n",
+        fprintf(stderr,"  %x: page %d  alloc %d  gen %d  bytes_used %d  offset %lu  dont_move %d\n",
                 (unsigned long) addr,
                 pi1,
                 page_table[pi1].allocated,
                 page_table[pi1].gen,
                 page_table[pi1].bytes_used,
-                page_table[pi1].first_object_offset,
+                page_table[pi1].region_start_offset,
                 page_table[pi1].dont_move);
     fprintf(stderr,"  %x %x %x %x (%x) %x %x %x %x\n",
             *(addr-4),
@@ -3455,13 +3327,6 @@ print_ptr(lispobj *addr)
 }
 #endif
 
-#if defined(LISP_FEATURE_PPC)
-extern int closure_tramp;
-extern int undefined_tramp;
-#else
-extern int undefined_tramp;
-#endif
-
 static void
 verify_space(lispobj *start, size_t words)
 {
@@ -3487,7 +3352,7 @@ verify_space(lispobj *start, size_t words)
             if (page_index != -1) {
                 /* If it's within the dynamic space it should point to a used
                  * page. XX Could check the offset too. */
-                if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+                if (page_allocated_p(page_index)
                     && (page_table[page_index].bytes_used == 0))
                     lose ("Ptr %x @ %x sees free page.\n", thing, start);
                 /* Check that it doesn't point to a forwarding pointer! */
@@ -3516,14 +3381,7 @@ verify_space(lispobj *start, size_t words)
                 */
             } else {
                 /* Verify that it points to another valid space. */
-                if (!to_readonly_space && !to_static_space &&
-#if defined(LISP_FEATURE_PPC)
-                    !((thing == &closure_tramp) ||
-                      (thing == &undefined_tramp))
-#else
-                    thing != (unsigned long)&undefined_tramp
-#endif
-                    ) {
+                if (!to_readonly_space && !to_static_space) {
                     lose("Ptr %x @ %x sees junk.\n", thing, start);
                 }
             }
@@ -3567,8 +3425,10 @@ verify_space(lispobj *start, size_t words)
                             count = 1;
                             break;
                         }
-                        nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots;
-                        verify_space(start + 1, ntotal - fixnum_value(nuntagged));
+                        nuntagged = ((struct layout *)
+                                     native_pointer(layout))->n_untagged_slots;
+                        verify_space(start + 1,
+                                     ntotal - fixnum_value(nuntagged));
                         count = ntotal + 1;
                         break;
                     }
@@ -3616,7 +3476,8 @@ verify_space(lispobj *start, size_t words)
                         while (fheaderl != NIL) {
                             fheaderp =
                                 (struct simple_fun *) native_pointer(fheaderl);
-                            gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+                            gc_assert(widetag_of(fheaderp->header) ==
+                                      SIMPLE_FUN_HEADER_WIDETAG);
                             verify_space(&fheaderp->name, 1);
                             verify_space(&fheaderp->arglist, 1);
                             verify_space(&fheaderp->type, 1);
@@ -3707,15 +3568,15 @@ verify_space(lispobj *start, size_t words)
 #ifdef LUTEX_WIDETAG
                 case LUTEX_WIDETAG:
 #endif
+#ifdef NO_TLS_VALUE_MARKER_WIDETAG
+                case NO_TLS_VALUE_MARKER_WIDETAG:
+#endif
                     count = (sizetab[widetag_of(*start)])(start);
                     break;
 
                 default:
-                    FSHOW((stderr,
-                           "/Unhandled widetag 0x%x at 0x%x\n",
-                           widetag_of(*start), start));
-                    fflush(stderr);
-                    gc_abort();
+                    lose("Unhandled widetag 0x%x at 0x%x\n",
+                         widetag_of(*start), start);
                 }
             }
         }
@@ -3756,14 +3617,14 @@ verify_generation(generation_index_t generation)
     page_index_t i;
 
     for (i = 0; i < last_free_page; i++) {
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
+        if (page_allocated_p(i)
             && (page_table[i].bytes_used != 0)
             && (page_table[i].gen == generation)) {
             page_index_t last_page;
             int region_allocation = page_table[i].allocated;
 
             /* This should be the start of a contiguous block */
-            gc_assert(page_table[i].first_object_offset == 0);
+            gc_assert(page_table[i].region_start_offset == 0);
 
             /* Need to find the full extent of this contiguous block in case
                objects span pages. */
@@ -3778,11 +3639,14 @@ verify_generation(generation_index_t generation)
                     || (page_table[last_page+1].allocated != region_allocation)
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
 
-            verify_space(page_address(i), (page_table[last_page].bytes_used
-                                           + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+            verify_space(page_address(i),
+                         ((unsigned long)
+                          (page_table[last_page].bytes_used
+                           + npage_bytes(last_page-i)))
+                         / N_WORD_BYTES);
             i = last_page;
         }
     }
@@ -3795,7 +3659,7 @@ verify_zero_fill(void)
     page_index_t page;
 
     for (page = 0; page < last_free_page; page++) {
-        if (page_table[page].allocated == FREE_PAGE_FLAG) {
+        if (page_free_p(page)) {
             /* The whole page should be zero filled. */
             long *start_addr = (long *)page_address(page);
             long size = 1024;
@@ -3853,10 +3717,7 @@ write_protect_generation_pages(generation_index_t generation)
     gc_assert(generation < SCRATCH_GENERATION);
 
     for (start = 0; start < last_free_page; start++) {
-        if ((page_table[start].allocated == BOXED_PAGE_FLAG)
-            && (page_table[start].bytes_used != 0)
-            && !page_table[start].dont_move
-            && (page_table[start].gen == generation))  {
+        if (protect_page_p(start, generation)) {
             void *page_start;
             page_index_t last;
 
@@ -3864,10 +3725,7 @@ write_protect_generation_pages(generation_index_t generation)
             page_table[start].write_protected = 1;
 
             for (last = start + 1; last < last_free_page; last++) {
-                if ((page_table[last].allocated != BOXED_PAGE_FLAG)
-                    || (page_table[last].bytes_used == 0)
-                    || page_table[last].dont_move
-                    || (page_table[last].gen != generation))
+                if (!protect_page_p(last, generation))
                   break;
                 page_table[last].write_protected = 1;
             }
@@ -3875,7 +3733,7 @@ write_protect_generation_pages(generation_index_t generation)
             page_start = (void *)page_address(start);
 
             os_protect(page_start,
-                       PAGE_BYTES * (last - start),
+                       npage_bytes(last - start),
                        OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
 
             start = last;
@@ -3891,6 +3749,8 @@ write_protect_generation_pages(generation_index_t generation)
     }
 }
 
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+
 static void
 scavenge_control_stack()
 {
@@ -3906,7 +3766,6 @@ scavenge_control_stack()
     scavenge(control_stack, control_stack_size);
 }
 
-#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
 /* Scavenging Interrupt Contexts */
 
 static int boxed_registers[] = BOXED_REGISTERS;
@@ -3961,9 +3820,11 @@ scavenge_interrupt_context(os_context_t * context)
 
     /* Compute the PC's offset from the start of the CODE */
     /* register. */
-    pc_code_offset = *os_context_pc_addr(context) - *os_context_register_addr(context, reg_CODE);
+    pc_code_offset = *os_context_pc_addr(context)
+        - *os_context_register_addr(context, reg_CODE);
 #ifdef ARCH_HAS_NPC_REGISTER
-    npc_code_offset = *os_context_npc_addr(context) - *os_context_register_addr(context, reg_CODE);
+    npc_code_offset = *os_context_npc_addr(context)
+        - *os_context_register_addr(context, reg_CODE);
 #endif /* ARCH_HAS_NPC_REGISTER */
 
 #ifdef ARCH_HAS_LINK_REGISTER
@@ -3989,22 +3850,25 @@ scavenge_interrupt_context(os_context_t * context)
     /* Fix the LIP */
 
     /*
-     * But what happens if lip_register_pair is -1?  *os_context_register_addr on Solaris
-     * (see solaris_register_address in solaris-os.c) will return
-     * &context->uc_mcontext.gregs[2].  But gregs[2] is REG_nPC.  Is
-     * that what we really want?  My guess is that that is not what we
+     * But what happens if lip_register_pair is -1?
+     * *os_context_register_addr on Solaris (see
+     * solaris_register_address in solaris-os.c) will return
+     * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
+     * that what we really want? My guess is that that is not what we
      * want, so if lip_register_pair is -1, we don't touch reg_LIP at
-     * all.  But maybe it doesn't really matter if LIP is trashed?
+     * all. But maybe it doesn't really matter if LIP is trashed?
      */
     if (lip_register_pair >= 0) {
         *os_context_register_addr(context, reg_LIP) =
-            *os_context_register_addr(context, lip_register_pair) + lip_offset;
+            *os_context_register_addr(context, lip_register_pair)
+            + lip_offset;
     }
 #endif /* reg_LIP */
 
     /* Fix the PC if it was in from space */
     if (from_space_p(*os_context_pc_addr(context)))
-        *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+        *os_context_pc_addr(context) =
+            *os_context_register_addr(context, reg_CODE) + pc_code_offset;
 
 #ifdef ARCH_HAS_LINK_REGISTER
     /* Fix the LR ditto; important if we're being called from
@@ -4017,7 +3881,8 @@ scavenge_interrupt_context(os_context_t * context)
 
 #ifdef ARCH_HAS_NPC_REGISTER
     if (from_space_p(*os_context_npc_addr(context)))
-        *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+        *os_context_npc_addr(context) =
+            *os_context_register_addr(context, reg_CODE) + npc_code_offset;
 #endif /* ARCH_HAS_NPC_REGISTER */
 }
 
@@ -4060,11 +3925,27 @@ preserve_context_registers (os_context_t *c)
     preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
     preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
     preserve_pointer((void*)*os_context_pc_addr(c));
+#elif defined LISP_FEATURE_X86_64
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RAX));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RCX));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RDX));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RBX));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RSI));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_RDI));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R8));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R9));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R10));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R11));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R12));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R13));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R14));
+    preserve_pointer((void*)*os_context_register_addr(c,reg_R15));
+    preserve_pointer((void*)*os_context_pc_addr(c));
 #else
     #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
 #endif
 #endif
-    for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+    for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
         preserve_pointer(*ptr);
     }
 }
@@ -4078,12 +3959,17 @@ garbage_collect_generation(generation_index_t generation, int raise)
     unsigned long bytes_freed;
     page_index_t i;
     unsigned long static_space_size;
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
     struct thread *th;
+#endif
     gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
 
     /* The oldest generation can't be raised. */
     gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
 
+    /* Check if weak hash tables were processed in the previous GC. */
+    gc_assert(weak_hash_tables == NULL);
+
     /* Initialize the weak pointer list. */
     weak_pointers = NULL;
 
@@ -4170,7 +4056,7 @@ garbage_collect_generation(generation_index_t generation, int raise)
 #else
             esp = (void **)((void *)&raise);
 #endif
-            for (ptr = (void **)th->control_stack_end; ptr > esp;  ptr--) {
+            for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp;  ptr--) {
                 preserve_pointer(*ptr);
             }
         }
@@ -4183,7 +4069,7 @@ garbage_collect_generation(generation_index_t generation, int raise)
         fprintf(stderr,
                 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
                 num_dont_move_pages,
-                num_dont_move_pages * PAGE_BYTES);
+                npage_bytes(num_dont_move_pages);
     }
 #endif
 
@@ -4291,6 +4177,7 @@ garbage_collect_generation(generation_index_t generation, int raise)
     }
 #endif
 
+    scan_weak_hash_tables();
     scan_weak_pointers();
 
     /* Flush the current regions, updating the tables. */
@@ -4349,13 +4236,12 @@ update_dynamic_space_free_pointer(void)
     page_index_t last_page = -1, i;
 
     for (i = 0; i < last_free_page; i++)
-        if ((page_table[i].allocated != FREE_PAGE_FLAG)
-            && (page_table[i].bytes_used != 0))
+        if (page_allocated_p(i) && (page_table[i].bytes_used != 0))
             last_page = i;
 
     last_free_page = last_page+1;
 
-    set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+    set_alloc_pointer((lispobj)(page_address(last_free_page)));
     return 0; /* dummy value: return something ... */
 }
 
@@ -4365,15 +4251,15 @@ remap_free_pages (page_index_t from, page_index_t to)
     page_index_t first_page, last_page;
 
     for (first_page = from; first_page <= to; first_page++) {
-        if (page_table[first_page].allocated != FREE_PAGE_FLAG ||
-            page_table[first_page].need_to_zero == 0) {
+        if (page_allocated_p(first_page) ||
+            (page_table[first_page].need_to_zero == 0)) {
             continue;
         }
 
         last_page = first_page + 1;
-        while (page_table[last_page].allocated == FREE_PAGE_FLAG &&
-               last_page < to &&
-               page_table[last_page].need_to_zero == 1) {
+        while (page_free_p(last_page) &&
+               (last_page < to) &&
+               (page_table[last_page].need_to_zero == 1)) {
             last_page++;
         }
 
@@ -4552,9 +4438,9 @@ gc_free_heap(void)
     if (gencgc_verbose > 1)
         SHOW("entering gc_free_heap");
 
-    for (page = 0; page < NUM_PAGES; page++) {
+    for (page = 0; page < page_table_pages; page++) {
         /* Skip free pages which should already be zero filled. */
-        if (page_table[page].allocated != FREE_PAGE_FLAG) {
+        if (page_allocated_p(page)) {
             void *page_start, *addr;
 
             /* Mark the page free. The other slots are assumed invalid
@@ -4565,7 +4451,8 @@ gc_free_heap(void)
             page_table[page].allocated = FREE_PAGE_FLAG;
             page_table[page].bytes_used = 0;
 
-#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */
+#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
+                            * about this change. */
             /* Zero the page. */
             page_start = (void *)page_address(page);
 
@@ -4587,7 +4474,7 @@ gc_free_heap(void)
             /* Double-check that the page is zero filled. */
             long *page_start;
             page_index_t i;
-            gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+            gc_assert(page_free_p(page));
             gc_assert(page_table[page].bytes_used == 0);
             page_start = (long *)page_address(page);
             for (i=0; i<1024; i++) {
@@ -4627,8 +4514,7 @@ gc_free_heap(void)
 
     if (verify_after_free_heap) {
         /* Check whether purify has left any bad pointers. */
-        if (gencgc_verbose)
-            SHOW("checking after free_heap\n");
+        FSHOW((stderr, "checking after free_heap\n"));
         verify_gc();
     }
 }
@@ -4638,8 +4524,15 @@ gc_init(void)
 {
     page_index_t i;
 
+    /* Compute the number of pages needed for the dynamic space.
+     * Dynamic space size should be aligned on page size. */
+    page_table_pages = dynamic_space_size/PAGE_BYTES;
+    gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
+
+    page_table = calloc(page_table_pages, sizeof(struct page));
+    gc_assert(page_table);
+
     gc_init_tables();
-    scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector;
     scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
     transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
 
@@ -4652,7 +4545,7 @@ gc_init(void)
     heap_base = (void*)DYNAMIC_SPACE_START;
 
     /* Initialize each page structure. */
-    for (i = 0; i < NUM_PAGES; i++) {
+    for (i = 0; i < page_table_pages; i++) {
         /* Initialize all pages as free. */
         page_table[i].allocated = FREE_PAGE_FLAG;
         page_table[i].bytes_used = 0;
@@ -4699,10 +4592,9 @@ static void
 gencgc_pickup_dynamic(void)
 {
     page_index_t page = 0;
-    long alloc_ptr = get_alloc_pointer();
+    void *alloc_ptr = (void *)get_alloc_pointer();
     lispobj *prev=(lispobj *)page_address(page);
     generation_index_t gen = PSEUDO_STATIC_GENERATION;
-
     do {
         lispobj *first,*ptr= (lispobj *)page_address(page);
         page_table[page].allocated = BOXED_PAGE_FLAG;
@@ -4717,11 +4609,11 @@ gencgc_pickup_dynamic(void)
         if (!gencgc_partial_pickup) {
             first=gc_search_space(prev,(ptr+2)-prev,ptr);
             if(ptr == first)  prev=ptr;
-            page_table[page].first_object_offset =
-                (void *)prev - page_address(page);
+            page_table[page].region_start_offset =
+                page_address(page) - (void *)prev;
         }
         page++;
-    } while ((long)page_address(page) < alloc_ptr);
+    } while (page_address(page) < alloc_ptr);
 
 #ifdef LUTEX_WIDETAG
     /* Lutexes have been registered in generation 0 by coreparse, and
@@ -4732,8 +4624,8 @@ gencgc_pickup_dynamic(void)
 
     last_free_page = page;
 
-    generations[gen].bytes_allocated = PAGE_BYTES*page;
-    bytes_allocated = PAGE_BYTES*page;
+    generations[gen].bytes_allocated = npage_bytes(page);
+    bytes_allocated = npage_bytes(page);
 
     gc_alloc_update_all_page_tables();
     write_protect_generation_pages(gen);
@@ -4744,8 +4636,6 @@ gc_initialize_pointers(void)
 {
     gencgc_pickup_dynamic();
 }
-
-
 \f
 
 /* alloc(..) is the external interface for memory allocation. It
@@ -4760,43 +4650,24 @@ gc_initialize_pointers(void)
  * The check for a GC trigger is only performed when the current
  * region is full, so in most cases it's not needed. */
 
-char *
-alloc(long nbytes)
+static inline lispobj *
+general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region,
+                       struct thread *thread)
 {
-    struct thread *thread=arch_os_get_current_thread();
-    struct alloc_region *region=
-#ifdef LISP_FEATURE_SB_THREAD
-        thread ? &(thread->alloc_region) : &boxed_region;
-#else
-        &boxed_region;
+#ifndef LISP_FEATURE_WIN32
+    lispobj alloc_signal;
 #endif
     void *new_obj;
     void *new_free_pointer;
+
     gc_assert(nbytes>0);
 
     /* Check for alignment allocation problems. */
     gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0)
               && ((nbytes & LOWTAG_MASK) == 0));
 
-#if 0
-    if(all_threads)
-        /* there are a few places in the C code that allocate data in the
-         * heap before Lisp starts.  This is before interrupts are enabled,
-         * so we don't need to check for pseudo-atomic */
-#ifdef LISP_FEATURE_SB_THREAD
-        if(!get_psuedo_atomic_atomic(th)) {
-            register u32 fs;
-            fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
-                    th,th->os_thread);
-            __asm__("movl %fs,%0" : "=r" (fs)  : );
-            fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
-                    debug_get_fs(),th->tls_cookie);
-            lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
-        }
-#else
-    gc_assert(get_pseudo_atomic_atomic(th));
-#endif
-#endif
+    /* Must be inside a PA section. */
+    gc_assert(get_pseudo_atomic_atomic(thread));
 
     /* maybe we can do this quickly ... */
     new_free_pointer = region->free_pointer + nbytes;
@@ -4806,11 +4677,10 @@ alloc(long nbytes)
         return(new_obj);        /* yup */
     }
 
-    /* we have to go the long way around, it seems.  Check whether
-     * we should GC in the near future
+    /* we have to go the long way around, it seems. Check whether we
+     * should GC in the near future
      */
     if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
-        gc_assert(get_pseudo_atomic_atomic(thread));
         /* Don't flood the system with interrupts if the need to gc is
          * already noted. This can happen for example when SUB-GC
          * allocates or after a gc triggered in a WITHOUT-GCING. */
@@ -4822,16 +4692,65 @@ alloc(long nbytes)
               set_pseudo_atomic_interrupted(thread);
         }
     }
-    new_obj = gc_alloc_with_region(nbytes,0,region,0);
+    new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
+
+#ifndef LISP_FEATURE_WIN32
+    alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
+    if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
+        if ((signed long) alloc_signal <= 0) {
+            SetSymbolValue(ALLOC_SIGNAL, T, thread);
+#ifdef LISP_FEATURE_SB_THREAD
+            kill_thread_safely(thread->os_thread, SIGPROF);
+#else
+            raise(SIGPROF);
+#endif
+        } else {
+            SetSymbolValue(ALLOC_SIGNAL,
+                           alloc_signal - (1 << N_FIXNUM_TAG_BITS),
+                           thread);
+        }
+    }
+#endif
+
     return (new_obj);
 }
+
+lispobj *
+general_alloc(long nbytes, int page_type_flag)
+{
+    struct thread *thread = arch_os_get_current_thread();
+    /* Select correct region, and call general_alloc_internal with it.
+     * For other then boxed allocation we must lock first, since the
+     * region is shared. */
+    if (BOXED_PAGE_FLAG & page_type_flag) {
+#ifdef LISP_FEATURE_SB_THREAD
+        struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
+#else
+        struct alloc_region *region = &boxed_region;
+#endif
+        return general_alloc_internal(nbytes, page_type_flag, region, thread);
+    } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
+        lispobj * obj;
+        gc_assert(0 == thread_mutex_lock(&allocation_lock));
+        obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
+        gc_assert(0 == thread_mutex_unlock(&allocation_lock));
+        return obj;
+    } else {
+        lose("bad page type flag: %d", page_type_flag);
+    }
+}
+
+lispobj *
+alloc(long nbytes)
+{
+    return general_alloc(nbytes, BOXED_PAGE_FLAG);
+}
 \f
 /*
  * shared support for the OS-dependent signal handlers which
  * catch GENCGC-related write-protect violations
  */
-
-void unhandled_sigmemoryfault(void);
+void unhandled_sigmemoryfault(void* addr);
 
 /* Depending on which OS we're running under, different signals might
  * be raised for a violation of write protection in the heap. This
@@ -4858,7 +4777,7 @@ gencgc_handle_wp_violation(void* fault_addr)
 
         /* It can be helpful to be able to put a breakpoint on this
          * case to help diagnose low-level problems. */
-        unhandled_sigmemoryfault();
+        unhandled_sigmemoryfault(fault_addr);
 
         /* not within the dynamic space -- not our responsibility */
         return 0;
@@ -4878,7 +4797,8 @@ gencgc_handle_wp_violation(void* fault_addr)
              */
             if(page_table[page_index].write_protected_cleared != 1)
                 lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
-                     page_index, boxed_region.first_page, boxed_region.last_page);
+                     page_index, boxed_region.first_page,
+                     boxed_region.last_page);
         }
         /* Don't worry, we can handle it. */
         return 1;
@@ -4889,7 +4809,7 @@ gencgc_handle_wp_violation(void* fault_addr)
  * are about to let Lisp deal with it. It's basically just a
  * convenient place to set a gdb breakpoint. */
 void
-unhandled_sigmemoryfault()
+unhandled_sigmemoryfault(void *addr)
 {}
 
 void gc_alloc_update_all_page_tables(void)
@@ -4897,9 +4817,9 @@ void gc_alloc_update_all_page_tables(void)
     /* Flush the alloc regions updating the tables. */
     struct thread *th;
     for_each_thread(th)
-        gc_alloc_update_page_tables(0, &th->alloc_region);
-    gc_alloc_update_page_tables(1, &unboxed_region);
-    gc_alloc_update_page_tables(0, &boxed_region);
+        gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
+    gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
+    gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
 }
 
 void
@@ -4918,7 +4838,7 @@ zero_all_free_pages()
     page_index_t i;
 
     for (i = 0; i < last_free_page; i++) {
-        if (page_table[i].allocated == FREE_PAGE_FLAG) {
+        if (page_free_p(i)) {
 #ifdef READ_PROTECT_FREE_PAGES
             os_protect(page_address(i),
                        PAGE_BYTES,
@@ -4958,13 +4878,15 @@ prepare_for_final_gc ()
  * function being set to the value of the static symbol
  * SB!VM:RESTART-LISP-FUNCTION */
 void
-gc_and_save(char *filename, int prepend_runtime)
+gc_and_save(char *filename, boolean prepend_runtime,
+            boolean save_runtime_options)
 {
     FILE *file;
     void *runtime_bytes = NULL;
     size_t runtime_size;
 
-    file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, &runtime_size);
+    file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
+                           &runtime_size);
     if (file == NULL)
        return;
 
@@ -4992,7 +4914,7 @@ gc_and_save(char *filename, int prepend_runtime)
     /* The dumper doesn't know that pages need to be zeroed before use. */
     zero_all_free_pages();
     save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
-                       prepend_runtime);
+                       prepend_runtime, save_runtime_options);
     /* Oops. Save still managed to fail. Since we've mangled the stack
      * beyond hope, there's not much we can do.
      * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's