0.8.7.32:
authorWilliam Harold Newman <william.newman@airmail.net>
Fri, 30 Jan 2004 20:55:40 +0000 (20:55 +0000)
committerWilliam Harold Newman <william.newman@airmail.net>
Fri, 30 Jan 2004 20:55:40 +0000 (20:55 +0000)
renamed FOO_PAGE masks to FOO_PAGE_MASK in hopes of reducing
future confusion between wordwise equality and bitwise
flag testing (which seemed to be implicit in one of the
GC fixes in 0.8.7.whatever)

src/code/late-type.lisp
src/runtime/gc-internal.h
src/runtime/gencgc.c
version.lisp-expr

index 799d6e9..d78c38b 100644 (file)
       (multiple-value-bind (equalp certainp)
          (type= (array-type-element-type type1)
                 (array-type-element-type type2))
-       ;; by its nature, the call to TYPE= should never return NIL,
+       ;; By its nature, the call to TYPE= should never return NIL,
        ;; T, as we don't know what the UNKNOWN-TYPE will grow up to
        ;; be.  -- CSR, 2002-08-19
        (aver (not (and (not equalp) certainp)))
 (!define-type-method (array :negate) (type)
   ;; FIXME (and hint to PFD): we're vulnerable here to attacks of the
   ;; form "are (AND ARRAY (NOT (ARRAY T))) and (OR (ARRAY BIT) (ARRAY
-  ;; NIL) (ARRAY CHAR) ...) equivalent?  -- CSR, 2003-12-10
+  ;; NIL) (ARRAY CHAR) ...) equivalent?" -- CSR, 2003-12-10
   (make-negation-type :type type))
 
 (!define-type-method (array :unparse) (type)
index d2b1878..28d0efc 100644 (file)
 #define FUN_RAW_ADDR_OFFSET (6*sizeof(lispobj) - FUN_POINTER_LOWTAG)
 
 /* values for the *_alloc_* parameters */
-#define FREE_PAGE 0
-#define BOXED_PAGE 1
-#define UNBOXED_PAGE 2
-#define OPEN_REGION_PAGE 4
+#define FREE_PAGE_FLAG 0
+#define BOXED_PAGE_FLAG 1
+#define UNBOXED_PAGE_FLAG 2
+#define OPEN_REGION_PAGE_FLAG 4
 
 #define ALLOC_BOXED 0
 #define ALLOC_UNBOXED 1
index b31bc72..2b56009 100644 (file)
@@ -274,7 +274,7 @@ count_write_protect_generation_pages(int generation)
     int count = 0;
 
     for (i = 0; i < last_free_page; i++)
-       if ((page_table[i].allocated != FREE_PAGE)
+       if ((page_table[i].allocated != FREE_PAGE_FLAG)
            && (page_table[i].gen == generation)
            && (page_table[i].write_protected == 1))
            count++;
@@ -372,7 +372,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
 
                /* Count the number of boxed pages within the given
                 * generation. */
-               if (page_table[j].allocated & BOXED_PAGE) {
+               if (page_table[j].allocated & BOXED_PAGE_FLAG) {
                    if (page_table[j].large_object)
                        large_boxed_cnt++;
                    else
@@ -381,7 +381,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */
                if(page_table[j].dont_move) pinned_cnt++;
                /* Count the number of unboxed pages within the given
                 * generation. */
-               if (page_table[j].allocated & UNBOXED_PAGE) {
+               if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
                    if (page_table[j].large_object)
                        large_unboxed_cnt++;
                    else
@@ -533,35 +533,35 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
     /* The first page may have already been in use. */
     if (page_table[first_page].bytes_used == 0) {
        if (unboxed)
-           page_table[first_page].allocated = UNBOXED_PAGE;
+           page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
        else
-           page_table[first_page].allocated = BOXED_PAGE;
+           page_table[first_page].allocated = BOXED_PAGE_FLAG;
        page_table[first_page].gen = gc_alloc_generation;
        page_table[first_page].large_object = 0;
        page_table[first_page].first_object_offset = 0;
     }
 
     if (unboxed)
-       gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+       gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
     else
-       gc_assert(page_table[first_page].allocated == BOXED_PAGE);
-    page_table[first_page].allocated |= OPEN_REGION_PAGE; 
+       gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+    page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; 
 
     gc_assert(page_table[first_page].gen == gc_alloc_generation);
     gc_assert(page_table[first_page].large_object == 0);
 
     for (i = first_page+1; i <= last_page; i++) {
        if (unboxed)
-           page_table[i].allocated = UNBOXED_PAGE;
+           page_table[i].allocated = UNBOXED_PAGE_FLAG;
        else
-           page_table[i].allocated = BOXED_PAGE;
+           page_table[i].allocated = BOXED_PAGE_FLAG;
        page_table[i].gen = gc_alloc_generation;
        page_table[i].large_object = 0;
        /* This may not be necessary for unboxed regions (think it was
         * broken before!) */
        page_table[i].first_object_offset =
            alloc_region->start_addr - page_address(i);
-       page_table[i].allocated |= OPEN_REGION_PAGE ;
+       page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
     }
     /* Bump up last_free_page. */
     if (last_page+1 > last_free_page) {
@@ -722,12 +722,12 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         * first_object_offset. */
        if (page_table[first_page].bytes_used == 0)
            gc_assert(page_table[first_page].first_object_offset == 0);
-       page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
+       page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
 
        if (unboxed)
-           gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+           gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
        else
-           gc_assert(page_table[first_page].allocated == BOXED_PAGE);
+           gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
        gc_assert(page_table[first_page].gen == gc_alloc_generation);
        gc_assert(page_table[first_page].large_object == 0);
 
@@ -748,11 +748,11 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
         * first_object_offset pointer to the start of the region, and set
         * the bytes_used. */
        while (more) {
-           page_table[next_page].allocated &= ~(OPEN_REGION_PAGE);
+           page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
            if (unboxed)
-               gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
+               gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
            else
-               gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+               gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
            gc_assert(page_table[next_page].bytes_used == 0);
            gc_assert(page_table[next_page].gen == gc_alloc_generation);
            gc_assert(page_table[next_page].large_object == 0);
@@ -800,15 +800,15 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
     } else {
        /* There are no bytes allocated. Unallocate the first_page if
         * there are 0 bytes_used. */
-       page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
+       page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
        if (page_table[first_page].bytes_used == 0)
-           page_table[first_page].allocated = FREE_PAGE;
+           page_table[first_page].allocated = FREE_PAGE_FLAG;
     }
 
     /* Unallocate any unused pages. */
     while (next_page <= alloc_region->last_page) {
        gc_assert(page_table[next_page].bytes_used == 0);
-       page_table[next_page].allocated = FREE_PAGE;
+       page_table[next_page].allocated = FREE_PAGE_FLAG;
        next_page++;
     }
     release_spinlock(&free_pages_lock);
@@ -858,18 +858,18 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
      * first_object_offset. */
     if (page_table[first_page].bytes_used == 0) {
        if (unboxed)
-           page_table[first_page].allocated = UNBOXED_PAGE;
+           page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
        else
-           page_table[first_page].allocated = BOXED_PAGE;
+           page_table[first_page].allocated = BOXED_PAGE_FLAG;
        page_table[first_page].gen = gc_alloc_generation;
        page_table[first_page].first_object_offset = 0;
        page_table[first_page].large_object = 1;
     }
 
     if (unboxed)
-       gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+       gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
     else
-       gc_assert(page_table[first_page].allocated == BOXED_PAGE);
+       gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
     gc_assert(page_table[first_page].gen == gc_alloc_generation);
     gc_assert(page_table[first_page].large_object == 1);
 
@@ -891,12 +891,12 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
      * first_object_offset pointer to the start of the region, and
      * set the bytes_used. */
     while (more) {
-       gc_assert(page_table[next_page].allocated == FREE_PAGE);
+       gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
        gc_assert(page_table[next_page].bytes_used == 0);
        if (unboxed)
-           page_table[next_page].allocated = UNBOXED_PAGE;
+           page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
        else
-           page_table[next_page].allocated = BOXED_PAGE;
+           page_table[next_page].allocated = BOXED_PAGE_FLAG;
        page_table[next_page].gen = gc_alloc_generation;
        page_table[next_page].large_object = 1;
 
@@ -956,14 +956,14 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed)
        first_page = restart_page;
        if (large_p)            
            while ((first_page < NUM_PAGES)
-                  && (page_table[first_page].allocated != FREE_PAGE))
+                  && (page_table[first_page].allocated != FREE_PAGE_FLAG))
                first_page++;
        else
            while (first_page < NUM_PAGES) {
-               if(page_table[first_page].allocated == FREE_PAGE)
+               if(page_table[first_page].allocated == FREE_PAGE_FLAG)
                    break;
                if((page_table[first_page].allocated ==
-                   (unboxed ? UNBOXED_PAGE : BOXED_PAGE)) &&
+                   (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
                   (page_table[first_page].large_object == 0) &&
                   (page_table[first_page].gen == gc_alloc_generation) &&
                   (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
@@ -990,7 +990,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed)
        while (((bytes_found < nbytes) 
                || (!large_p && (num_pages < 2)))
               && (last_page < (NUM_PAGES-1))
-              && (page_table[last_page+1].allocated == FREE_PAGE)) {
+              && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
            last_page++;
            num_pages++;
            bytes_found += PAGE_BYTES;
@@ -1150,7 +1150,7 @@ copy_large_object(lispobj object, int nwords)
        remaining_bytes = nwords*4;
        while (remaining_bytes > PAGE_BYTES) {
            gc_assert(page_table[next_page].gen == from_space);
-           gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+           gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
            gc_assert(page_table[next_page].large_object);
            gc_assert(page_table[next_page].first_object_offset==
                      -PAGE_BYTES*(next_page-first_page));
@@ -1175,7 +1175,7 @@ copy_large_object(lispobj object, int nwords)
        gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
 
        page_table[next_page].gen = new_space;
-       gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+       gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
 
        /* Adjust the bytes_used. */
        old_bytes_used = page_table[next_page].bytes_used;
@@ -1187,7 +1187,7 @@ copy_large_object(lispobj object, int nwords)
        next_page++;
        while ((old_bytes_used == PAGE_BYTES) &&
               (page_table[next_page].gen == from_space) &&
-              (page_table[next_page].allocated == BOXED_PAGE) &&
+              (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
               page_table[next_page].large_object &&
               (page_table[next_page].first_object_offset ==
                -(next_page - first_page)*PAGE_BYTES)) {
@@ -1198,7 +1198,7 @@ copy_large_object(lispobj object, int nwords)
            gc_assert(page_table[next_page].write_protected == 0);
 
            old_bytes_used = page_table[next_page].bytes_used;
-           page_table[next_page].allocated = FREE_PAGE;
+           page_table[next_page].allocated = FREE_PAGE_FLAG;
            page_table[next_page].bytes_used = 0;
            bytes_freed += old_bytes_used;
            next_page++;
@@ -1294,15 +1294,15 @@ copy_large_unboxed_object(lispobj object, int nwords)
        remaining_bytes = nwords*4;
        while (remaining_bytes > PAGE_BYTES) {
            gc_assert(page_table[next_page].gen == from_space);
-           gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
-                     || (page_table[next_page].allocated == BOXED_PAGE));
+           gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+                     || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
            gc_assert(page_table[next_page].large_object);
            gc_assert(page_table[next_page].first_object_offset==
                      -PAGE_BYTES*(next_page-first_page));
            gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
 
            page_table[next_page].gen = new_space;
-           page_table[next_page].allocated = UNBOXED_PAGE;
+           page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
            remaining_bytes -= PAGE_BYTES;
            next_page++;
        }
@@ -1314,7 +1314,7 @@ copy_large_unboxed_object(lispobj object, int nwords)
        gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
 
        page_table[next_page].gen = new_space;
-       page_table[next_page].allocated = UNBOXED_PAGE;
+       page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
 
        /* Adjust the bytes_used. */
        old_bytes_used = page_table[next_page].bytes_used;
@@ -1326,8 +1326,8 @@ copy_large_unboxed_object(lispobj object, int nwords)
        next_page++;
        while ((old_bytes_used == PAGE_BYTES) &&
               (page_table[next_page].gen == from_space) &&
-              ((page_table[next_page].allocated == UNBOXED_PAGE)
-               || (page_table[next_page].allocated == BOXED_PAGE)) &&
+              ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+               || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
               page_table[next_page].large_object &&
               (page_table[next_page].first_object_offset ==
                -(next_page - first_page)*PAGE_BYTES)) {
@@ -1338,7 +1338,7 @@ copy_large_unboxed_object(lispobj object, int nwords)
            gc_assert(page_table[next_page].write_protected == 0);
 
            old_bytes_used = page_table[next_page].bytes_used;
-           page_table[next_page].allocated = FREE_PAGE;
+           page_table[next_page].allocated = FREE_PAGE_FLAG;
            page_table[next_page].bytes_used = 0;
            bytes_freed += old_bytes_used;
            next_page++;
@@ -2013,7 +2013,8 @@ search_dynamic_space(lispobj *pointer)
     lispobj *start;
 
     /* The address may be invalid, so do some checks. */
-    if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
+    if ((page_index == -1) ||
+       (page_table[page_index].allocated == FREE_PAGE_FLAG))
        return NULL;
     start = (lispobj *)((void *)page_address(page_index)
                        + page_table[page_index].first_object_offset);
@@ -2277,7 +2278,7 @@ maybe_adjust_large_object(lispobj *where)
     /* Check whether it's a vector or bignum object. */
     switch (widetag_of(where[0])) {
     case SIMPLE_VECTOR_WIDETAG:
-       boxed = BOXED_PAGE;
+       boxed = BOXED_PAGE_FLAG;
        break;
     case BIGNUM_WIDETAG:
     case SIMPLE_BASE_STRING_WIDETAG:
@@ -2318,7 +2319,7 @@ maybe_adjust_large_object(lispobj *where)
 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
     case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
 #endif
-       boxed = UNBOXED_PAGE;
+       boxed = UNBOXED_PAGE_FLAG;
        break;
     default:
        return;
@@ -2342,8 +2343,8 @@ maybe_adjust_large_object(lispobj *where)
     remaining_bytes = nwords*4;
     while (remaining_bytes > PAGE_BYTES) {
        gc_assert(page_table[next_page].gen == from_space);
-       gc_assert((page_table[next_page].allocated == BOXED_PAGE)
-                 || (page_table[next_page].allocated == UNBOXED_PAGE));
+       gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
+                 || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
        gc_assert(page_table[next_page].large_object);
        gc_assert(page_table[next_page].first_object_offset ==
                  -PAGE_BYTES*(next_page-first_page));
@@ -2378,8 +2379,8 @@ maybe_adjust_large_object(lispobj *where)
     next_page++;
     while ((old_bytes_used == PAGE_BYTES) &&
           (page_table[next_page].gen == from_space) &&
-          ((page_table[next_page].allocated == UNBOXED_PAGE)
-           || (page_table[next_page].allocated == BOXED_PAGE)) &&
+          ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+           || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
           page_table[next_page].large_object &&
           (page_table[next_page].first_object_offset ==
            -(next_page - first_page)*PAGE_BYTES)) {
@@ -2390,7 +2391,7 @@ maybe_adjust_large_object(lispobj *where)
        gc_assert(page_table[next_page].write_protected == 0);
 
        old_bytes_used = page_table[next_page].bytes_used;
-       page_table[next_page].allocated = FREE_PAGE;
+       page_table[next_page].allocated = FREE_PAGE_FLAG;
        page_table[next_page].bytes_used = 0;
        bytes_freed += old_bytes_used;
        next_page++;
@@ -2430,13 +2431,13 @@ preserve_pointer(void *addr)
 
     /* quick check 1: Address is quite likely to have been invalid. */
     if ((addr_page_index == -1)
-       || (page_table[addr_page_index].allocated == FREE_PAGE)
+       || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
        || (page_table[addr_page_index].bytes_used == 0)
        || (page_table[addr_page_index].gen != from_space)
        /* Skip if already marked dont_move. */
        || (page_table[addr_page_index].dont_move != 0))
        return;
-    gc_assert(!(page_table[addr_page_index].allocated & OPEN_REGION_PAGE));
+    gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
     /* (Now that we know that addr_page_index is in range, it's
      * safe to index into page_table[] with it.) */
     region_allocation = page_table[addr_page_index].allocated;
@@ -2486,7 +2487,7 @@ preserve_pointer(void *addr)
         * free area in which case it's ignored here. Note it gets
         * through the valid pointer test above because the tail looks
         * like conses. */
-       if ((page_table[addr_page_index].allocated == FREE_PAGE)
+       if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
            || (page_table[addr_page_index].bytes_used == 0)
            /* Check the offset within the page. */
            || (((unsigned)addr & (PAGE_BYTES - 1))
@@ -2525,7 +2526,7 @@ preserve_pointer(void *addr)
        /* Check whether this is the last page in this contiguous block.. */
        if ((page_table[i].bytes_used < PAGE_BYTES)
            /* ..or it is PAGE_BYTES and is the last in the block */
-           || (page_table[i+1].allocated == FREE_PAGE)
+           || (page_table[i+1].allocated == FREE_PAGE_FLAG)
            || (page_table[i+1].bytes_used == 0) /* next page free */
            || (page_table[i+1].gen != from_space) /* diff. gen */
            || (page_table[i+1].first_object_offset == 0))
@@ -2559,13 +2560,13 @@ update_page_write_prot(int page)
     int num_words = page_table[page].bytes_used / 4;
 
     /* Shouldn't be a free page. */
-    gc_assert(page_table[page].allocated != FREE_PAGE);
+    gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
     gc_assert(page_table[page].bytes_used != 0);
 
     /* Skip if it's already write-protected, pinned, or unboxed */
     if (page_table[page].write_protected
        || page_table[page].dont_move
-       || (page_table[page].allocated & UNBOXED_PAGE))
+       || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
        return (0);
 
     /* Scan the page for pointers to younger generations or the
@@ -2578,7 +2579,7 @@ update_page_write_prot(int page)
        /* Check that it's in the dynamic space */
        if (index != -1)
            if (/* Does it point to a younger or the temp. generation? */
-               ((page_table[index].allocated != FREE_PAGE)
+               ((page_table[index].allocated != FREE_PAGE_FLAG)
                 && (page_table[index].bytes_used != 0)
                 && ((page_table[index].gen < gen)
                     || (page_table[index].gen == NUM_GENERATIONS)))
@@ -2653,7 +2654,7 @@ scavenge_generation(int generation)
 #endif
 
     for (i = 0; i < last_free_page; i++) {
-       if ((page_table[i].allocated & BOXED_PAGE)
+       if ((page_table[i].allocated & BOXED_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == generation)) {
            int last_page,j;
@@ -2668,7 +2669,7 @@ scavenge_generation(int generation)
                    write_protected && page_table[last_page].write_protected;
                if ((page_table[last_page].bytes_used < PAGE_BYTES)
                    /* Or it is PAGE_BYTES and is the last in the block */
-                   || (!(page_table[last_page+1].allocated & BOXED_PAGE))
+                   || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
                    || (page_table[last_page+1].bytes_used == 0)
                    || (page_table[last_page+1].gen != generation)
                    || (page_table[last_page+1].first_object_offset == 0))
@@ -2699,7 +2700,7 @@ scavenge_generation(int generation)
     /* Check that none of the write_protected pages in this generation
      * have been written to. */
     for (i = 0; i < NUM_PAGES; i++) {
-       if ((page_table[i].allocation ! =FREE_PAGE)
+       if ((page_table[i].allocation != FREE_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == generation)
            && (page_table[i].write_protected_cleared != 0)) {
@@ -2753,7 +2754,7 @@ scavenge_newspace_generation_one_scan(int generation)
           generation));
     for (i = 0; i < last_free_page; i++) {
        /* Note that this skips over open regions when it encounters them. */
-       if ((page_table[i].allocated & BOXED_PAGE)
+       if ((page_table[i].allocated & BOXED_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == generation)
            && ((page_table[i].write_protected == 0)
@@ -2781,7 +2782,7 @@ scavenge_newspace_generation_one_scan(int generation)
                 * contiguous block */
                if ((page_table[last_page].bytes_used < PAGE_BYTES)
                    /* Or it is PAGE_BYTES and is the last in the block */
-                   || (!(page_table[last_page+1].allocated & BOXED_PAGE))
+                   || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
                    || (page_table[last_page+1].bytes_used == 0)
                    || (page_table[last_page+1].gen != generation)
                    || (page_table[last_page+1].first_object_offset == 0))
@@ -2922,7 +2923,7 @@ scavenge_newspace_generation(int generation)
     /* Check that none of the write_protected pages in this generation
      * have been written to. */
     for (i = 0; i < NUM_PAGES; i++) {
-       if ((page_table[i].allocation != FREE_PAGE)
+       if ((page_table[i].allocation != FREE_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == generation)
            && (page_table[i].write_protected_cleared != 0)
@@ -2945,7 +2946,7 @@ unprotect_oldspace(void)
     int i;
 
     for (i = 0; i < last_free_page; i++) {
-       if ((page_table[i].allocated != FREE_PAGE)
+       if ((page_table[i].allocated != FREE_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == from_space)) {
            void *page_start;
@@ -2978,7 +2979,7 @@ free_oldspace(void)
     do {
        /* Find a first page for the next region of pages. */
        while ((first_page < last_free_page)
-              && ((page_table[first_page].allocated == FREE_PAGE)
+              && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
                   || (page_table[first_page].bytes_used == 0)
                   || (page_table[first_page].gen != from_space)))
            first_page++;
@@ -2994,7 +2995,7 @@ free_oldspace(void)
            bytes_freed += page_table[last_page].bytes_used;
            generations[page_table[last_page].gen].bytes_allocated -=
                page_table[last_page].bytes_used;
-           page_table[last_page].allocated = FREE_PAGE;
+           page_table[last_page].allocated = FREE_PAGE_FLAG;
            page_table[last_page].bytes_used = 0;
 
            /* Remove any write-protection. We should be able to rely
@@ -3010,7 +3011,7 @@ free_oldspace(void)
            last_page++;
        }
        while ((last_page < last_free_page)
-              && (page_table[last_page].allocated != FREE_PAGE)
+              && (page_table[last_page].allocated != FREE_PAGE_FLAG)
               && (page_table[last_page].bytes_used != 0)
               && (page_table[last_page].gen == from_space));
 
@@ -3111,7 +3112,7 @@ verify_space(lispobj *start, size_t words)
            if (page_index != -1) {
                /* If it's within the dynamic space it should point to a used
                 * page. XX Could check the offset too. */
-               if ((page_table[page_index].allocated != FREE_PAGE)
+               if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
                    && (page_table[page_index].bytes_used == 0))
                    lose ("Ptr %x @ %x sees free page.", thing, start);
                /* Check that it doesn't point to a forwarding pointer! */
@@ -3326,7 +3327,7 @@ verify_generation(int  generation)
     int i;
 
     for (i = 0; i < last_free_page; i++) {
-       if ((page_table[i].allocated != FREE_PAGE)
+       if ((page_table[i].allocated != FREE_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && (page_table[i].gen == generation)) {
            int last_page;
@@ -3365,7 +3366,7 @@ verify_zero_fill(void)
     int page;
 
     for (page = 0; page < last_free_page; page++) {
-       if (page_table[page].allocated == FREE_PAGE) {
+       if (page_table[page].allocated == FREE_PAGE_FLAG) {
            /* The whole page should be zero filled. */
            int *start_addr = (int *)page_address(page);
            int size = 1024;
@@ -3423,7 +3424,7 @@ write_protect_generation_pages(int generation)
     gc_assert(generation < NUM_GENERATIONS);
 
     for (i = 0; i < last_free_page; i++)
-       if ((page_table[i].allocated == BOXED_PAGE)
+       if ((page_table[i].allocated == BOXED_PAGE_FLAG)
            && (page_table[i].bytes_used != 0)
            && !page_table[i].dont_move
            && (page_table[i].gen == generation))  {
@@ -3708,7 +3709,7 @@ update_x86_dynamic_space_free_pointer(void)
     int i;
 
     for (i = 0; i < NUM_PAGES; i++)
-       if ((page_table[i].allocated != FREE_PAGE)
+       if ((page_table[i].allocated != FREE_PAGE_FLAG)
            && (page_table[i].bytes_used != 0))
            last_page = i;
 
@@ -3859,15 +3860,15 @@ gc_free_heap(void)
 
     for (page = 0; page < NUM_PAGES; page++) {
        /* Skip free pages which should already be zero filled. */
-       if (page_table[page].allocated != FREE_PAGE) {
+       if (page_table[page].allocated != FREE_PAGE_FLAG) {
            void *page_start, *addr;
 
            /* Mark the page free. The other slots are assumed invalid
-            * when it is a FREE_PAGE and bytes_used is 0 and it
+            * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
             * should not be write-protected -- except that the
             * generation is used for the current region but it sets
             * that up. */
-           page_table[page].allocated = FREE_PAGE;
+           page_table[page].allocated = FREE_PAGE_FLAG;
            page_table[page].bytes_used = 0;
 
            /* Zero the page. */
@@ -3887,7 +3888,7 @@ gc_free_heap(void)
        } else if (gencgc_zero_check_during_free_heap) {
            /* Double-check that the page is zero filled. */
            int *page_start, i;
-           gc_assert(page_table[page].allocated == FREE_PAGE);
+           gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
            gc_assert(page_table[page].bytes_used == 0);
            page_start = (int *)page_address(page);
            for (i=0; i<1024; i++) {
@@ -3947,7 +3948,7 @@ gc_init(void)
     /* Initialize each page structure. */
     for (i = 0; i < NUM_PAGES; i++) {
        /* Initialize all pages as free. */
-       page_table[i].allocated = FREE_PAGE;
+       page_table[i].allocated = FREE_PAGE_FLAG;
        page_table[i].bytes_used = 0;
 
        /* Pages are not write-protected at startup. */
@@ -3997,7 +3998,7 @@ gencgc_pickup_dynamic(void)
 
     do {
        lispobj *first,*ptr= (lispobj *)page_address(page);
-       page_table[page].allocated = BOXED_PAGE;
+       page_table[page].allocated = BOXED_PAGE_FLAG;
        page_table[page].gen = 0;
        page_table[page].bytes_used = PAGE_BYTES;
        page_table[page].large_object = 0;
index 2ef449c..08f7f72 100644 (file)
@@ -17,4 +17,4 @@
 ;;; checkins which aren't released. (And occasionally for internal
 ;;; versions, especially for internal versions off the main CVS
 ;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"0.8.7.31"
+"0.8.7.32"