1.0.20.20: fix gencgc on 32 bit platforms with 2gb< heap
[sbcl.git] / src / runtime / gencgc.c
index 2f64812..c9d529c 100644 (file)
@@ -178,10 +178,10 @@ page_address(page_index_t page_num)
 
 /* Calculate the address where the allocation region associated with
  * the page starts. */
-inline void *
+static inline void *
 page_region_start(page_index_t page_index)
 {
-    return page_address(page_index)+page_table[page_index].first_object_offset;
+    return page_address(page_index)-page_table[page_index].region_start_offset;
 }
 
 /* Find the page index within the page_table for the given
@@ -189,17 +189,31 @@ page_region_start(page_index_t page_index)
 inline page_index_t
 find_page_index(void *addr)
 {
-    page_index_t index = addr-heap_base;
-
-    if (index >= 0) {
-        index = ((unsigned long)index)/PAGE_BYTES;
+    if (addr >= heap_base) {
+        page_index_t index = ((pointer_sized_uint_t)addr -
+                              (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
         if (index < page_table_pages)
             return (index);
     }
-
     return (-1);
 }
 
+static size_t
+npage_bytes(long npages)
+{
+    gc_assert(npages>=0);
+    return ((unsigned long)npages)*PAGE_BYTES;
+}
+
+/* Check that X is a higher address than Y and return offset from Y to
+ * X in bytes. */
+static inline
+size_t void_diff(void *x, void *y)
+{
+    gc_assert(x >= y);
+    return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
+}
+
 /* a structure to hold the state of a generation */
 struct generation {
 
@@ -219,13 +233,13 @@ struct generation {
     page_index_t alloc_large_unboxed_start_page;
 
     /* the bytes allocated to this generation */
-    long bytes_allocated;
+    unsigned long bytes_allocated;
 
     /* the number of bytes at which to trigger a GC */
-    long gc_trigger;
+    unsigned long gc_trigger;
 
     /* to calculate a new level for gc_trigger */
-    long bytes_consed_between_gc;
+    unsigned long bytes_consed_between_gc;
 
     /* the number of GCs since the last raise */
     int num_gc;
@@ -239,7 +253,7 @@ struct generation {
      * objects are added from a GC of a younger generation. Dividing by
      * the bytes_allocated will give the average age of the memory in
      * this generation since its last GC. */
-    long cum_sum_bytes_allocated;
+    unsigned long cum_sum_bytes_allocated;
 
     /* a minimum average memory age before a GC will occur helps
      * prevent a GC when a large number of new live objects have been
@@ -302,7 +316,7 @@ static long
 count_write_protect_generation_pages(generation_index_t generation)
 {
     page_index_t i;
-    long count = 0;
+    unsigned long count = 0;
 
     for (i = 0; i < last_free_page; i++)
         if ((page_table[i].allocated != FREE_PAGE_FLAG)
@@ -344,11 +358,11 @@ count_dont_move_pages(void)
 
 /* Work through the pages and add up the number of bytes used for the
  * given generation. */
-static long
+static unsigned long
 count_generation_bytes_allocated (generation_index_t gen)
 {
     page_index_t i;
-    long result = 0;
+    unsigned long result = 0;
     for (i = 0; i < last_free_page; i++) {
         if ((page_table[i].allocated != FREE_PAGE_FLAG)
             && (page_table[i].gen == gen))
@@ -443,7 +457,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
                 large_unboxed_cnt,
                 pinned_cnt,
                 generations[i].bytes_allocated,
-                (count_generation_pages(i)*PAGE_BYTES
+                (npage_bytes(count_generation_pages(i))
                  - generations[i].bytes_allocated),
                 generations[i].gc_trigger,
                 count_write_protect_generation_pages(i),
@@ -466,8 +480,8 @@ void fast_bzero(void*, size_t); /* in <arch>-assem.S */
  */
 void zero_pages_with_mmap(page_index_t start, page_index_t end) {
     int i;
-    void *addr = (void *) page_address(start), *new_addr;
-    size_t length = PAGE_BYTES*(1+end-start);
+    void *addr = page_address(start), *new_addr;
+    size_t length = npage_bytes(1+end-start);
 
     if (start > end)
       return;
@@ -493,9 +507,9 @@ zero_pages(page_index_t start, page_index_t end) {
       return;
 
 #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-    fast_bzero(page_address(start), PAGE_BYTES*(1+end-start));
+    fast_bzero(page_address(start), npage_bytes(1+end-start));
 #else
-    bzero(page_address(start), PAGE_BYTES*(1+end-start));
+    bzero(page_address(start), npage_bytes(1+end-start));
 #endif
 
 }
@@ -603,7 +617,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
 {
     page_index_t first_page;
     page_index_t last_page;
-    long bytes_found;
+    unsigned long bytes_found;
     page_index_t i;
     int ret;
 
@@ -628,7 +642,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
     }
     last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
     bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
-            + PAGE_BYTES*(last_page-first_page);
+            + npage_bytes(last_page-first_page);
 
     /* Set up the alloc_region. */
     alloc_region->first_page = first_page;
@@ -648,7 +662,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
             page_table[first_page].allocated = BOXED_PAGE_FLAG;
         page_table[first_page].gen = gc_alloc_generation;
         page_table[first_page].large_object = 0;
-        page_table[first_page].first_object_offset = 0;
+        page_table[first_page].region_start_offset = 0;
     }
 
     if (unboxed)
@@ -669,8 +683,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
         page_table[i].large_object = 0;
         /* This may not be necessary for unboxed regions (think it was
          * broken before!) */
-        page_table[i].first_object_offset =
-            alloc_region->start_addr - page_address(i);
+        page_table[i].region_start_offset =
+            void_diff(page_address(i),alloc_region->start_addr);
         page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
     }
     /* Bump up last_free_page. */
@@ -678,15 +692,14 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
         last_free_page = last_page+1;
         /* do we only want to call this on special occasions? like for
          * boxed_region? */
-        set_alloc_pointer((lispobj)(((char *)heap_base)
-                                    + last_free_page*PAGE_BYTES));
+        set_alloc_pointer((lispobj)page_address(last_free_page));
     }
     ret = thread_mutex_unlock(&free_pages_lock);
     gc_assert(ret == 0);
 
 #ifdef READ_PROTECT_FREE_PAGES
     os_protect(page_address(first_page),
-               PAGE_BYTES*(1+last_page-first_page),
+               npage_bytes(1+last_page-first_page),
                OS_VM_PROT_ALL);
 #endif
 
@@ -737,8 +750,8 @@ static int record_new_objects = 0;
 static page_index_t new_areas_ignore_page;
 struct new_area {
     page_index_t page;
-    long  offset;
-    long  size;
+    size_t offset;
+    size_t size;
 };
 static struct new_area (*new_areas)[];
 static long new_areas_index;
@@ -746,7 +759,7 @@ long max_new_areas;
 
 /* Add a new area to new_areas. */
 static void
-add_new_area(page_index_t first_page, long offset, long size)
+add_new_area(page_index_t first_page, size_t offset, size_t size)
 {
     unsigned long new_area_start,c;
     long i;
@@ -768,13 +781,13 @@ add_new_area(page_index_t first_page, long offset, long size)
         gc_abort();
     }
 
-    new_area_start = PAGE_BYTES*first_page + offset;
+    new_area_start = npage_bytes(first_page) + offset;
 
     /* Search backwards for a prior area that this follows from. If
        found this will save adding a new area. */
     for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
         unsigned long area_end =
-            PAGE_BYTES*((*new_areas)[i].page)
+            npage_bytes((*new_areas)[i].page)
             + (*new_areas)[i].offset
             + (*new_areas)[i].size;
         /*FSHOW((stderr,
@@ -821,10 +834,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
     int more;
     page_index_t first_page;
     page_index_t next_page;
-    int bytes_used;
-    long orig_first_page_bytes_used;
-    long region_size;
-    long byte_cnt;
+    unsigned long bytes_used;
+    unsigned long orig_first_page_bytes_used;
+    unsigned long region_size;
+    unsigned long byte_cnt;
     int ret;
 
 
@@ -851,9 +864,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         /* Update the first page. */
 
         /* If the page was free then set up the gen, and
-         * first_object_offset. */
+         * region_start_offset. */
         if (page_table[first_page].bytes_used == 0)
-            gc_assert(page_table[first_page].first_object_offset == 0);
+            gc_assert(page_table[first_page].region_start_offset == 0);
         page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
 
         if (unboxed)
@@ -868,8 +881,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         /* Calculate the number of bytes used in this page. This is not
          * always the number of new bytes, unless it was free. */
         more = 0;
-        if ((bytes_used = (alloc_region->free_pointer
-                           - page_address(first_page)))>PAGE_BYTES) {
+        if ((bytes_used = void_diff(alloc_region->free_pointer,
+                                    page_address(first_page)))
+            >PAGE_BYTES) {
             bytes_used = PAGE_BYTES;
             more = 1;
         }
@@ -877,9 +891,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         byte_cnt += bytes_used;
 
 
-        /* All the rest of the pages should be free. We need to set their
-         * first_object_offset pointer to the start of the region, and set
-         * the bytes_used. */
+        /* All the rest of the pages should be free. We need to set
+         * their region_start_offset pointer to the start of the
+         * region, and set the bytes_used. */
         while (more) {
             page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
             if (unboxed)
@@ -890,13 +904,14 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
             gc_assert(page_table[next_page].gen == gc_alloc_generation);
             gc_assert(page_table[next_page].large_object == 0);
 
-            gc_assert(page_table[next_page].first_object_offset ==
-                      alloc_region->start_addr - page_address(next_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      void_diff(page_address(next_page),
+                                alloc_region->start_addr));
 
             /* Calculate the number of bytes used in this page. */
             more = 0;
-            if ((bytes_used = (alloc_region->free_pointer
-                               - page_address(next_page)))>PAGE_BYTES) {
+            if ((bytes_used = void_diff(alloc_region->free_pointer,
+                                        page_address(next_page)))>PAGE_BYTES) {
                 bytes_used = PAGE_BYTES;
                 more = 1;
             }
@@ -906,7 +921,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
             next_page++;
         }
 
-        region_size = alloc_region->free_pointer - alloc_region->start_addr;
+        region_size = void_diff(alloc_region->free_pointer,
+                                alloc_region->start_addr);
         bytes_allocated += region_size;
         generations[gc_alloc_generation].bytes_allocated += region_size;
 
@@ -992,14 +1008,14 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     orig_first_page_bytes_used = page_table[first_page].bytes_used;
 
     /* If the first page was free then set up the gen, and
-     * first_object_offset. */
+     * region_start_offset. */
     if (page_table[first_page].bytes_used == 0) {
         if (unboxed)
             page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
         else
             page_table[first_page].allocated = BOXED_PAGE_FLAG;
         page_table[first_page].gen = gc_alloc_generation;
-        page_table[first_page].first_object_offset = 0;
+        page_table[first_page].region_start_offset = 0;
         page_table[first_page].large_object = 1;
     }
 
@@ -1025,8 +1041,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     next_page = first_page+1;
 
     /* All the rest of the pages should be free. We need to set their
-     * first_object_offset pointer to the start of the region, and
-     * set the bytes_used. */
+     * region_start_offset pointer to the start of the region, and set
+     * the bytes_used. */
     while (more) {
         gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
         gc_assert(page_table[next_page].bytes_used == 0);
@@ -1037,8 +1053,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
         page_table[next_page].gen = gc_alloc_generation;
         page_table[next_page].large_object = 1;
 
-        page_table[next_page].first_object_offset =
-            orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
+        page_table[next_page].region_start_offset =
+            npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
 
         /* Calculate the number of bytes used in this page. */
         more = 0;
@@ -1066,15 +1082,14 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
     /* Bump up last_free_page */
     if (last_page+1 > last_free_page) {
         last_free_page = last_page+1;
-        set_alloc_pointer((lispobj)(((char *)heap_base)
-                                    + last_free_page*PAGE_BYTES));
+        set_alloc_pointer((lispobj)(page_address(last_free_page)));
     }
     ret = thread_mutex_unlock(&free_pages_lock);
     gc_assert(ret == 0);
 
 #ifdef READ_PROTECT_FREE_PAGES
     os_protect(page_address(first_page),
-               PAGE_BYTES*(1+last_page-first_page),
+               npage_bytes(1+last_page-first_page),
                OS_VM_PROT_ALL);
 #endif
 
@@ -1230,7 +1245,7 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
         /* Unless a `quick' alloc was requested, check whether the
            alloc region is almost empty. */
         if (!quick_p &&
-            (my_region->end_addr - my_region->free_pointer) <= 32) {
+            void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
             /* If so, finished with the current region. */
             gc_alloc_update_page_tables(unboxed_p, my_region);
             /* Set up a new region. */
@@ -1317,10 +1332,10 @@ copy_large_object(lispobj object, long nwords)
 
         /* Promote the object. */
 
-        long remaining_bytes;
+        unsigned long remaining_bytes;
         page_index_t next_page;
-        long bytes_freed;
-        long old_bytes_used;
+        unsigned long bytes_freed;
+        unsigned long old_bytes_used;
 
         /* Note: Any page write-protection must be removed, else a
          * later scavenge_newspace may incorrectly not scavenge these
@@ -1328,7 +1343,7 @@ copy_large_object(lispobj object, long nwords)
          * new areas, but let's do it for them all (they'll probably
          * be written anyway?). */
 
-        gc_assert(page_table[first_page].first_object_offset == 0);
+        gc_assert(page_table[first_page].region_start_offset == 0);
 
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
@@ -1336,8 +1351,8 @@ copy_large_object(lispobj object, long nwords)
             gc_assert(page_table[next_page].gen == from_space);
             gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
             gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].first_object_offset==
-                      -PAGE_BYTES*(next_page-first_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
             page_table[next_page].gen = new_space;
@@ -1373,8 +1388,8 @@ copy_large_object(lispobj object, long nwords)
                (page_table[next_page].gen == from_space) &&
                (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
                page_table[next_page].large_object &&
-               (page_table[next_page].first_object_offset ==
-                -(next_page - first_page)*PAGE_BYTES)) {
+               (page_table[next_page].region_start_offset ==
+                npage_bytes(next_page - first_page))) {
             /* Checks out OK, free the page. Don't need to bother zeroing
              * pages as this should have been done before shrinking the
              * object. These pages shouldn't be write-protected as they
@@ -1468,12 +1483,12 @@ copy_large_unboxed_object(lispobj object, long nwords)
         /* Promote the object. Note: Unboxed objects may have been
          * allocated to a BOXED region so it may be necessary to
          * change the region to UNBOXED. */
-        long remaining_bytes;
+        unsigned long remaining_bytes;
         page_index_t next_page;
-        long bytes_freed;
-        long old_bytes_used;
+        unsigned long bytes_freed;
+        unsigned long old_bytes_used;
 
-        gc_assert(page_table[first_page].first_object_offset == 0);
+        gc_assert(page_table[first_page].region_start_offset == 0);
 
         next_page = first_page;
         remaining_bytes = nwords*N_WORD_BYTES;
@@ -1482,8 +1497,8 @@ copy_large_unboxed_object(lispobj object, long nwords)
             gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
                       || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
             gc_assert(page_table[next_page].large_object);
-            gc_assert(page_table[next_page].first_object_offset==
-                      -PAGE_BYTES*(next_page-first_page));
+            gc_assert(page_table[next_page].region_start_offset ==
+                      npage_bytes(next_page-first_page));
             gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
             page_table[next_page].gen = new_space;
@@ -1514,8 +1529,8 @@ copy_large_unboxed_object(lispobj object, long nwords)
                ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
                 || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
                page_table[next_page].large_object &&
-               (page_table[next_page].first_object_offset ==
-                -(next_page - first_page)*PAGE_BYTES)) {
+               (page_table[next_page].region_start_offset ==
+                npage_bytes(next_page - first_page))) {
             /* Checks out OK, free the page. Don't need to both zeroing
              * pages as this should have been done before shrinking the
              * object. These pages shouldn't be write-protected, even if
@@ -2456,9 +2471,9 @@ maybe_adjust_large_object(lispobj *where)
     page_index_t next_page;
     long nwords;
 
-    long remaining_bytes;
-    long bytes_freed;
-    long old_bytes_used;
+    unsigned long remaining_bytes;
+    unsigned long bytes_freed;
+    unsigned long old_bytes_used;
 
     int boxed;
 
@@ -2544,7 +2559,7 @@ maybe_adjust_large_object(lispobj *where)
      * but lets do it for them all (they'll probably be written
      * anyway?). */
 
-    gc_assert(page_table[first_page].first_object_offset == 0);
+    gc_assert(page_table[first_page].region_start_offset == 0);
 
     next_page = first_page;
     remaining_bytes = nwords*N_WORD_BYTES;
@@ -2553,8 +2568,8 @@ maybe_adjust_large_object(lispobj *where)
         gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
                   || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
         gc_assert(page_table[next_page].large_object);
-        gc_assert(page_table[next_page].first_object_offset ==
-                  -PAGE_BYTES*(next_page-first_page));
+        gc_assert(page_table[next_page].region_start_offset ==
+                  npage_bytes(next_page-first_page));
         gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
         page_table[next_page].allocated = boxed;
@@ -2589,8 +2604,8 @@ maybe_adjust_large_object(lispobj *where)
            ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
             || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
            page_table[next_page].large_object &&
-           (page_table[next_page].first_object_offset ==
-            -(next_page - first_page)*PAGE_BYTES)) {
+           (page_table[next_page].region_start_offset ==
+            npage_bytes(next_page - first_page))) {
         /* It checks out OK, free the page. We don't need to both zeroing
          * pages as this should have been done before shrinking the
          * object. These pages shouldn't be write protected as they
@@ -2677,7 +2692,7 @@ preserve_pointer(void *addr)
     first_page = find_page_index(page_region_start(addr_page_index))
 #else
     first_page = addr_page_index;
-    while (page_table[first_page].first_object_offset != 0) {
+    while (page_table[first_page].region_start_offset != 0) {
         --first_page;
         /* Do some checks. */
         gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
@@ -2736,7 +2751,7 @@ preserve_pointer(void *addr)
             || (page_table[i+1].allocated == FREE_PAGE_FLAG)
             || (page_table[i+1].bytes_used == 0) /* next page free */
             || (page_table[i+1].gen != from_space) /* diff. gen */
-            || (page_table[i+1].first_object_offset == 0))
+            || (page_table[i+1].region_start_offset == 0))
             break;
     }
 
@@ -2873,7 +2888,7 @@ scavenge_generations(generation_index_t from, generation_index_t to)
             int write_protected=1;
 
             /* This should be the start of a region */
-            gc_assert(page_table[i].first_object_offset == 0);
+            gc_assert(page_table[i].region_start_offset == 0);
 
             /* Now work forward until the end of the region */
             for (last_page = i; ; last_page++) {
@@ -2884,13 +2899,14 @@ scavenge_generations(generation_index_t from, generation_index_t to)
                     || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
             }
             if (!write_protected) {
                 scavenge(page_address(i),
-                         (page_table[last_page].bytes_used
-                          + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+                         ((unsigned long)(page_table[last_page].bytes_used
+                                          + npage_bytes(last_page-i)))
+                         /N_WORD_BYTES);
 
                 /* Now scan the pages and write protect those that
                  * don't have pointers to younger generations. */
@@ -2919,9 +2935,9 @@ scavenge_generations(generation_index_t from, generation_index_t to)
             && (page_table[i].write_protected_cleared != 0)) {
             FSHOW((stderr, "/scavenge_generation() %d\n", generation));
             FSHOW((stderr,
-                   "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
+                   "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
                     page_table[i].bytes_used,
-                    page_table[i].first_object_offset,
+                    page_table[i].region_start_offset,
                     page_table[i].dont_move));
             lose("write to protected page %d in scavenge_generation()\n", i);
         }
@@ -2977,7 +2993,8 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
             page_index_t last_page;
             int all_wp=1;
 
-            /* The scavenge will start at the first_object_offset of page i.
+            /* The scavenge will start at the region_start_offset of
+             * page i.
              *
              * We need to find the full extent of this contiguous
              * block in case objects span pages.
@@ -2998,20 +3015,20 @@ scavenge_newspace_generation_one_scan(generation_index_t generation)
                     || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
             }
 
             /* Do a limited check for write-protected pages.  */
             if (!all_wp) {
-                long size;
-
-                size = (page_table[last_page].bytes_used
-                        + (last_page-i)*PAGE_BYTES
-                        - page_table[i].first_object_offset)/N_WORD_BYTES;
+                long nwords = (((unsigned long)
+                               (page_table[last_page].bytes_used
+                                + npage_bytes(last_page-i)
+                                + page_table[i].region_start_offset))
+                               / N_WORD_BYTES);
                 new_areas_ignore_page = last_page;
 
-                scavenge(page_region_start(i), size);
+                scavenge(page_region_start(i), nwords);
 
             }
             i = last_page;
@@ -3116,9 +3133,9 @@ scavenge_newspace_generation(generation_index_t generation)
 
             /* Work through previous_new_areas. */
             for (i = 0; i < previous_new_areas_index; i++) {
-                long page = (*previous_new_areas)[i].page;
-                long offset = (*previous_new_areas)[i].offset;
-                long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+                page_index_t page = (*previous_new_areas)[i].page;
+                size_t offset = (*previous_new_areas)[i].offset;
+                size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES;
                 gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
                 scavenge(page_address(page)+offset, size);
             }
@@ -3187,10 +3204,10 @@ unprotect_oldspace(void)
  * assumes that all objects have been copied or promoted to an older
  * generation. Bytes_allocated and the generation bytes_allocated
  * counter are updated. The number of bytes freed is returned. */
-static long
+static unsigned long
 free_oldspace(void)
 {
-    long bytes_freed = 0;
+    unsigned long bytes_freed = 0;
     page_index_t first_page, last_page;
 
     first_page = 0;
@@ -3236,7 +3253,7 @@ free_oldspace(void)
 
 #ifdef READ_PROTECT_FREE_PAGES
         os_protect(page_address(first_page),
-                   PAGE_BYTES*(last_page-first_page),
+                   npage_bytes(last_page-first_page),
                    OS_VM_PROT_NONE);
 #endif
         first_page = last_page;
@@ -3255,13 +3272,13 @@ print_ptr(lispobj *addr)
     page_index_t pi1 = find_page_index((void*)addr);
 
     if (pi1 != -1)
-        fprintf(stderr,"  %x: page %d  alloc %d  gen %d  bytes_used %d  offset %d  dont_move %d\n",
+        fprintf(stderr,"  %x: page %d  alloc %d  gen %d  bytes_used %d  offset %lu  dont_move %d\n",
                 (unsigned long) addr,
                 pi1,
                 page_table[pi1].allocated,
                 page_table[pi1].gen,
                 page_table[pi1].bytes_used,
-                page_table[pi1].first_object_offset,
+                page_table[pi1].region_start_offset,
                 page_table[pi1].dont_move);
     fprintf(stderr,"  %x %x %x %x (%x) %x %x %x %x\n",
             *(addr-4),
@@ -3573,7 +3590,7 @@ verify_generation(generation_index_t generation)
             int region_allocation = page_table[i].allocated;
 
             /* This should be the start of a contiguous block */
-            gc_assert(page_table[i].first_object_offset == 0);
+            gc_assert(page_table[i].region_start_offset == 0);
 
             /* Need to find the full extent of this contiguous block in case
                objects span pages. */
@@ -3588,12 +3605,14 @@ verify_generation(generation_index_t generation)
                     || (page_table[last_page+1].allocated != region_allocation)
                     || (page_table[last_page+1].bytes_used == 0)
                     || (page_table[last_page+1].gen != generation)
-                    || (page_table[last_page+1].first_object_offset == 0))
+                    || (page_table[last_page+1].region_start_offset == 0))
                     break;
 
             verify_space(page_address(i),
-                         (page_table[last_page].bytes_used
-                          + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+                         ((unsigned long)
+                          (page_table[last_page].bytes_used
+                           + npage_bytes(last_page-i)))
+                         / N_WORD_BYTES);
             i = last_page;
         }
     }
@@ -3686,7 +3705,7 @@ write_protect_generation_pages(generation_index_t generation)
             page_start = (void *)page_address(start);
 
             os_protect(page_start,
-                       PAGE_BYTES * (last - start),
+                       npage_bytes(last - start),
                        OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
 
             start = last;
@@ -4022,7 +4041,7 @@ garbage_collect_generation(generation_index_t generation, int raise)
         fprintf(stderr,
                 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
                 num_dont_move_pages,
-                num_dont_move_pages * PAGE_BYTES);
+                npage_bytes(num_dont_move_pages);
     }
 #endif
 
@@ -4195,8 +4214,7 @@ update_dynamic_space_free_pointer(void)
 
     last_free_page = last_page+1;
 
-    set_alloc_pointer((lispobj)(((char *)heap_base)
-                                + last_free_page*PAGE_BYTES));
+    set_alloc_pointer((lispobj)(page_address(last_free_page)));
     return 0; /* dummy value: return something ... */
 }
 
@@ -4482,7 +4500,7 @@ gc_init(void)
     /* Compute the number of pages needed for the dynamic space.
      * Dynamic space size should be aligned on page size. */
     page_table_pages = dynamic_space_size/PAGE_BYTES;
-    gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES);
+    gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
 
     page_table = calloc(page_table_pages, sizeof(struct page));
     gc_assert(page_table);
@@ -4547,7 +4565,7 @@ static void
 gencgc_pickup_dynamic(void)
 {
     page_index_t page = 0;
-    long alloc_ptr = get_alloc_pointer();
+    void *alloc_ptr = (void *)get_alloc_pointer();
     lispobj *prev=(lispobj *)page_address(page);
     generation_index_t gen = PSEUDO_STATIC_GENERATION;
 
@@ -4565,11 +4583,11 @@ gencgc_pickup_dynamic(void)
         if (!gencgc_partial_pickup) {
             first=gc_search_space(prev,(ptr+2)-prev,ptr);
             if(ptr == first)  prev=ptr;
-            page_table[page].first_object_offset =
-                (void *)prev - page_address(page);
+            page_table[page].region_start_offset =
+                page_address(page) - (void *)prev;
         }
         page++;
-    } while ((long)page_address(page) < alloc_ptr);
+    } while (page_address(page) < alloc_ptr);
 
 #ifdef LUTEX_WIDETAG
     /* Lutexes have been registered in generation 0 by coreparse, and
@@ -4580,8 +4598,8 @@ gencgc_pickup_dynamic(void)
 
     last_free_page = page;
 
-    generations[gen].bytes_allocated = PAGE_BYTES*page;
-    bytes_allocated = PAGE_BYTES*page;
+    generations[gen].bytes_allocated = npage_bytes(page);
+    bytes_allocated = npage_bytes(page);
 
     gc_alloc_update_all_page_tables();
     write_protect_generation_pages(gen);