- int j;
- int boxed_cnt = 0;
- int unboxed_cnt = 0;
- int large_boxed_cnt = 0;
- int large_unboxed_cnt = 0;
- int pinned_cnt=0;
-
- for (j = 0; j < last_free_page; j++)
- if (page_table[j].gen == i) {
-
- /* Count the number of boxed pages within the given
- * generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_boxed_cnt++;
- else
- boxed_cnt++;
- }
- if(page_table[j].dont_move) pinned_cnt++;
- /* Count the number of unboxed pages within the given
- * generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_unboxed_cnt++;
- else
- unboxed_cnt++;
- }
- }
-
- gc_assert(generations[i].bytes_allocated
- == count_generation_bytes_allocated(i));
- fprintf(stderr,
- " %1d: %5d %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
- i,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
- pinned_cnt,
- generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
- - generations[i].bytes_allocated),
- generations[i].gc_trigger,
- count_write_protect_generation_pages(i),
- generations[i].num_gc,
- gen_av_mem_age(i));
+ page_index_t j;
+ long boxed_cnt = 0;
+ long unboxed_cnt = 0;
+ long large_boxed_cnt = 0;
+ long large_unboxed_cnt = 0;
+ long pinned_cnt=0;
+
+ for (j = 0; j < last_free_page; j++)
+ if (page_table[j].gen == i) {
+
+ /* Count the number of boxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_boxed_cnt++;
+ else
+ boxed_cnt++;
+ }
+ if(page_table[j].dont_move) pinned_cnt++;
+ /* Count the number of unboxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_unboxed_cnt++;
+ else
+ unboxed_cnt++;
+ }
+ }
+
+ gc_assert(generations[i].bytes_allocated
+ == count_generation_bytes_allocated(i));
+ fprintf(stderr,
+ " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ i,
+ generations[i].alloc_start_page,
+ generations[i].alloc_unboxed_start_page,
+ generations[i].alloc_large_start_page,
+ generations[i].alloc_large_unboxed_start_page,
+ boxed_cnt,
+ unboxed_cnt,
+ large_boxed_cnt,
+ large_unboxed_cnt,
+ pinned_cnt,
+ generations[i].bytes_allocated,
+ (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
+ generations[i].gc_trigger,
+ count_write_protect_generation_pages(i),
+ generations[i].num_gc,
+ gen_av_mem_age(i));
- /* some bytes were allocated in the region */
- orig_first_page_bytes_used = page_table[first_page].bytes_used;
-
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
-
- /* All the pages used need to be updated */
-
- /* Update the first page. */
-
- /* If the page was free then set up the gen, and
- * first_object_offset. */
- if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].first_object_offset == 0);
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
-
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[first_page].gen == gc_alloc_generation);
- gc_assert(page_table[first_page].large_object == 0);
-
- byte_cnt = 0;
-
- /* Calculate the number of bytes used in this page. This is not
- * always the number of new bytes, unless it was free. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[first_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
-
- /* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and set
- * the bytes_used. */
- while (more) {
- page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].bytes_used == 0);
- gc_assert(page_table[next_page].gen == gc_alloc_generation);
- gc_assert(page_table[next_page].large_object == 0);
-
- gc_assert(page_table[next_page].first_object_offset ==
- alloc_region->start_addr - page_address(next_page));
-
- /* Calculate the number of bytes used in this page. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[next_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
- next_page++;
- }
-
- region_size = alloc_region->free_pointer - alloc_region->start_addr;
- bytes_allocated += region_size;
- generations[gc_alloc_generation].bytes_allocated += region_size;
-
- gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
-
- /* Set the generations alloc restart page to the last page of
- * the region. */
- if (unboxed)
- generations[gc_alloc_generation].alloc_unboxed_start_page =
- next_page-1;
- else
- generations[gc_alloc_generation].alloc_start_page = next_page-1;
-
- /* Add the region to the new_areas if requested. */
- if (!unboxed)
- add_new_area(first_page,orig_first_page_bytes_used, region_size);
-
- /*
- FSHOW((stderr,
- "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
- region_size,
- gc_alloc_generation));
- */
+ /* some bytes were allocated in the region */
+ orig_first_page_bytes_used = page_table[first_page].bytes_used;
+
+ gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+
+ /* All the pages used need to be updated */
+
+ /* Update the first page. */
+
+ /* If the page was free then set up the gen, and
+ * first_object_offset. */
+ if (page_table[first_page].bytes_used == 0)
+ gc_assert(page_table[first_page].first_object_offset == 0);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+
+ if (unboxed)
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].gen == gc_alloc_generation);
+ gc_assert(page_table[first_page].large_object == 0);
+
+ byte_cnt = 0;
+
+ /* Calculate the number of bytes used in this page. This is not
+ * always the number of new bytes, unless it was free. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[first_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+
+ /* All the rest of the pages should be free. We need to set their
+ * first_object_offset pointer to the start of the region, and set
+ * the bytes_used. */
+ while (more) {
+ page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+ if (unboxed)
+ gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].bytes_used == 0);
+ gc_assert(page_table[next_page].gen == gc_alloc_generation);
+ gc_assert(page_table[next_page].large_object == 0);
+
+ gc_assert(page_table[next_page].first_object_offset ==
+ alloc_region->start_addr - page_address(next_page));
+
+ /* Calculate the number of bytes used in this page. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[next_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+ next_page++;
+ }
+
+ region_size = alloc_region->free_pointer - alloc_region->start_addr;
+ bytes_allocated += region_size;
+ generations[gc_alloc_generation].bytes_allocated += region_size;
+
+ gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
+
+ /* Set the generations alloc restart page to the last page of
+ * the region. */
+ if (unboxed)
+ generations[gc_alloc_generation].alloc_unboxed_start_page =
+ next_page-1;
+ else
+ generations[gc_alloc_generation].alloc_start_page = next_page-1;
+
+ /* Add the region to the new_areas if requested. */
+ if (!unboxed)
+ add_new_area(first_page,orig_first_page_bytes_used, region_size);
+
+ /*
+ FSHOW((stderr,
+ "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
+ region_size,
+ gc_alloc_generation));
+ */
- first_page = restart_page;
- if (large_p)
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < NUM_PAGES) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
- first_page++;
- }
-
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- gc_assert(page_table[first_page].write_protected == 0);
-
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
- } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
+ first_page = restart_page;
+ if (large_p)
+ while ((first_page < page_table_pages)
+ && (page_table[first_page].allocated != FREE_PAGE_FLAG))
+ first_page++;
+ else
+ while (first_page < page_table_pages) {
+ if(page_table[first_page].allocated == FREE_PAGE_FLAG)
+ break;
+ if((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0)) {
+ break;
+ }
+ first_page++;
+ }
+
+ if (first_page >= page_table_pages)
+ gc_heap_exhausted_error_or_lose(0, nbytes);
+
+ gc_assert(page_table[first_page].write_protected == 0);
+
+ last_page = first_page;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
+ num_pages = 1;
+ while (((bytes_found < nbytes)
+ || (!large_p && (num_pages < 2)))
+ && (last_page < (page_table_pages-1))
+ && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ num_pages++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
+
+ gc_assert(bytes_found == region_size);
+ restart_page = last_page + 1;
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
- /* Promote the object. */
-
- int remaining_bytes;
- int next_page;
- int bytes_freed;
- int old_bytes_used;
-
- /* Note: Any page write-protection must be removed, else a
- * later scavenge_newspace may incorrectly not scavenge these
- * pages. This would not be necessary if they are added to the
- * new areas, but let's do it for them all (they'll probably
- * be written anyway?). */
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*4;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk
- * so there may be more unused pages which will be freed. */
-
- /* The object may have shrunk but shouldn't have grown. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to bother zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected as they
- * should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
- generations[new_space].bytes_allocated += 4*nwords;
- bytes_allocated -= bytes_freed;
-
- /* Add the region to the new_areas if requested. */
- add_new_area(first_page,0,nwords*4);
-
- return(object);
+ /* Promote the object. */
+
+ long remaining_bytes;
+ page_index_t next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ /* Note: Any page write-protection must be removed, else a
+ * later scavenge_newspace may incorrectly not scavenge these
+ * pages. This would not be necessary if they are added to the
+ * new areas, but let's do it for them all (they'll probably
+ * be written anyway?). */
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ if (page_table[next_page].write_protected) {
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[next_page].write_protected = 0;
+ }
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk
+ * so there may be more unused pages which will be freed. */
+
+ /* The object may have shrunk but shouldn't have grown. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to bother zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected as they
+ * should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
+ bytes_freed;
+ generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+ bytes_allocated -= bytes_freed;
+
+ /* Add the region to the new_areas if requested. */
+ add_new_area(first_page,0,nwords*N_WORD_BYTES);
+
+ return(object);
- /* Promote the object. Note: Unboxed objects may have been
- * allocated to a BOXED region so it may be necessary to
- * change the region to UNBOXED. */
- int remaining_bytes;
- int next_page;
- int bytes_freed;
- int old_bytes_used;
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*4;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk so
- * there may be more unused pages which will be freed. */
-
- /* Object may have shrunk but shouldn't have grown - check. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected, even if
- * boxed they should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- if ((bytes_freed > 0) && gencgc_verbose)
- FSHOW((stderr,
- "/copy_large_unboxed bytes_freed=%d\n",
- bytes_freed));
-
- generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
- generations[new_space].bytes_allocated += 4*nwords;
- bytes_allocated -= bytes_freed;
-
- return(object);
+ /* Promote the object. Note: Unboxed objects may have been
+ * allocated to a BOXED region so it may be necessary to
+ * change the region to UNBOXED. */
+ long remaining_bytes;
+ page_index_t next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk so
+ * there may be more unused pages which will be freed. */
+
+ /* Object may have shrunk but shouldn't have grown - check. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to both zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected, even if
+ * boxed they should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ if ((bytes_freed > 0) && gencgc_verbose)
+ FSHOW((stderr,
+ "/copy_large_unboxed bytes_freed=%d\n",
+ bytes_freed));
+
+ generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
+ bytes_allocated -= bytes_freed;
+
+ return(object);
- /* Check for code references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the code adea of the code object. */
- if ((data >= (code_start_addr-displacement))
- && (data < (code_end_addr-displacement))) {
- /* function header */
- if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
- /* Skip the function header */
- p += 6*4 - 4 - 1;
- continue;
- }
- /* the case of PUSH imm32 */
- if (d1 == 0x68) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/PUSH $0x%.8x\n", data));
- }
- /* the case of MOV [reg-8],imm32 */
- if ((d3 == 0xc7)
- && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
- || d2==0x45 || d2==0x46 || d2==0x47)
- && (d1 == 0xf8)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
- }
- /* the case of LEA reg,[disp32] */
- if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
- }
- }
-
- /* Check for constant references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the constant vector. Constant references
- will be aligned. */
- if ((data >= (constants_start_addr-displacement))
- && (data < (constants_end_addr-displacement))
- && (((unsigned)data & 0x3) == 0)) {
- /* Mov eax,m32 */
- if (d1 == 0xa1) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
- }
-
- /* the case of MOV m32,EAX */
- if (d1 == 0xa3) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
- }
-
- /* the case of CMP m32,imm32 */
- if ((d1 == 0x3d) && (d2 == 0x81)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- /* XX Check this */
- FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
- }
-
- /* Check for a mod=00, r/m=101 byte. */
- if ((d1 & 0xc7) == 5) {
- /* Cmp m32,reg */
- if (d2 == 0x39) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
- }
- /* the case of CMP reg32,m32 */
- if (d2 == 0x3b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
- }
- /* the case of MOV m32,reg32 */
- if (d2 == 0x89) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
- }
- /* the case of MOV reg32,m32 */
- if (d2 == 0x8b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
- }
- /* the case of LEA reg32,m32 */
- if (d2 == 0x8d) {
- fixup_found = 1;
- FSHOW((stderr,
- "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
- }
- }
- }
+ /* Check for code references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the code adea of the code object. */
+ if ((data >= (code_start_addr-displacement))
+ && (data < (code_end_addr-displacement))) {
+ /* function header */
+ if ((d4 == 0x5e)
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ /* Skip the function header */
+ p += 6*4 - 4 - 1;
+ continue;
+ }
+ /* the case of PUSH imm32 */
+ if (d1 == 0x68) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/PUSH $0x%.8x\n", data));
+ }
+ /* the case of MOV [reg-8],imm32 */
+ if ((d3 == 0xc7)
+ && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
+ || d2==0x45 || d2==0x46 || d2==0x47)
+ && (d1 == 0xf8)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
+ }
+ /* the case of LEA reg,[disp32] */
+ if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
+ }
+ }
+
+ /* Check for constant references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the constant vector. Constant references
+ will be aligned. */
+ if ((data >= (constants_start_addr-displacement))
+ && (data < (constants_end_addr-displacement))
+ && (((unsigned)data & 0x3) == 0)) {
+ /* Mov eax,m32 */
+ if (d1 == 0xa1) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
+ }
+
+ /* the case of MOV m32,EAX */
+ if (d1 == 0xa3) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
+ }
+
+ /* the case of CMP m32,imm32 */
+ if ((d1 == 0x3d) && (d2 == 0x81)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ /* XX Check this */
+ FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
+ }
+
+ /* Check for a mod=00, r/m=101 byte. */
+ if ((d1 & 0xc7) == 5) {
+ /* Cmp m32,reg */
+ if (d2 == 0x39) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
+ }
+ /* the case of CMP reg32,m32 */
+ if (d2 == 0x3b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
+ }
+ /* the case of MOV m32,reg32 */
+ if (d2 == 0x89) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
+ }
+ /* the case of MOV reg32,m32 */
+ if (d2 == 0x8b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
+ }
+ /* the case of LEA reg32,m32 */
+ if (d2 == 0x8d) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
+ }
+ }
+ }
- if (widetag_of(fixups_vector->header) ==
- SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG) {
- /* Got the fixups for the code block. Now work through the vector,
- and apply a fixup at each address. */
- int length = fixnum_value(fixups_vector->length);
- int i;
- for (i = 0; i < length; i++) {
- unsigned offset = fixups_vector->data[i];
- /* Now check the current value of offset. */
- unsigned old_value =
- *(unsigned *)((unsigned)code_start_addr + offset);
-
- /* If it's within the old_code object then it must be an
- * absolute fixup (relative ones are not saved) */
- if ((old_value >= (unsigned)old_code)
- && (old_value < ((unsigned)old_code + nwords*4)))
- /* So add the dispacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
- old_value + displacement;
- else
- /* It is outside the old code object so it must be a
- * relative fixup (absolute fixups are not saved). So
- * subtract the displacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
- old_value - displacement;
- }
+ if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
+ /* Got the fixups for the code block. Now work through the vector,
+ and apply a fixup at each address. */
+ long length = fixnum_value(fixups_vector->length);
+ long i;
+ for (i = 0; i < length; i++) {
+ unsigned long offset = fixups_vector->data[i];
+ /* Now check the current value of offset. */
+ unsigned long old_value =
+ *(unsigned long *)((unsigned long)code_start_addr + offset);
+
+ /* If it's within the old_code object then it must be an
+ * absolute fixup (relative ones are not saved) */
+ if ((old_value >= (unsigned long)old_code)
+ && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES)))
+ /* So add the dispacement. */
+ *(unsigned long *)((unsigned long)code_start_addr + offset) =
+ old_value + displacement;
+ else
+ /* It is outside the old code object so it must be a
+ * relative fixup (absolute fixups are not saved). So
+ * subtract the displacement. */
+ *(unsigned long *)((unsigned long)code_start_addr + offset) =
+ old_value - displacement;
+ }
+ } else {
+ /* This used to just print a note to stderr, but a bogus fixup seems to
+ * indicate real heap corruption, so a hard hailure is in order. */
+ lose("fixup vector %p has a bad widetag: %d\n", fixups_vector, widetag_of(fixups_vector->header));
- /* Work through the KV vector. */
- {
- int i;
- for (i = 1; i < next_vector_length; i++) {
- lispobj old_key = kv_vector[2*i];
- unsigned int old_index = (old_key & 0x1fffffff)%length;
-
- /* Scavenge the key and value. */
- scavenge(&kv_vector[2*i],2);
-
- /* Check whether the key has moved and is EQ based. */
- {
- lispobj new_key = kv_vector[2*i];
- unsigned int new_index = (new_key & 0x1fffffff)%length;
-
- if ((old_index != new_index) &&
- ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
- ((new_key != empty_symbol) ||
- (kv_vector[2*i] != empty_symbol))) {
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
- /* Unlink the key from the old_index chain. */
- if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table[11]);
- hash_table[11] = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned prior = index_vector[old_index];
- unsigned next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table[11]);
- hash_table[11] = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
- }
- }
- }
- }
+ return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+}
+
+static lispobj
+trans_lutex(lispobj object)
+{
+ struct lutex *lutex = (struct lutex *) native_pointer(object);
+ lispobj copied;
+ size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+ gc_assert(is_lisp_pointer(object));
+ copied = copy_object(object, words);
+
+ /* Update the links, since the lutex moved in memory. */
+ if (lutex->next) {
+ lutex->next->prev = (struct lutex *) native_pointer(copied);
+ }
+
+ if (lutex->prev) {
+ lutex->prev->next = (struct lutex *) native_pointer(copied);
+ } else {
+ generations[lutex->gen].lutexes =
+ (struct lutex *) native_pointer(copied);
- if ((unsigned)pointer !=
- ((int)start_addr+OTHER_POINTER_LOWTAG)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wo1: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- /* Is it plausible? Not a cons. XXX should check the headers. */
- if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wo2: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- switch (widetag_of(start_addr[0])) {
- case UNBOUND_MARKER_WIDETAG:
- case BASE_CHAR_WIDETAG:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo3: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- /* only pointed to by function pointers? */
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo4: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- case INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo5: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- /* the valid other immediate pointer objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
+ if ((unsigned long)pointer !=
+ ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wo1: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ /* Is it plausible? Not a cons. XXX should check the headers. */
+ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wo2: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ switch (widetag_of(start_addr[0])) {
+ case UNBOUND_MARKER_WIDETAG:
+ case NO_TLS_VALUE_MARKER_WIDETAG:
+ case CHARACTER_WIDETAG:
+#if N_WORD_BITS == 64
+ case SINGLE_FLOAT_WIDETAG:
+#endif
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo3: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ /* only pointed to by function pointers? */
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo4: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ case INSTANCE_HEADER_WIDETAG:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo5: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ /* the valid other immediate pointer objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
- gc_assert(page_table[i].allocated == region_allocation);
-
- /* Mark the page static. */
- page_table[i].dont_move = 1;
-
- /* Move the page to the new_space. XX I'd rather not do this
- * but the GC logic is not quite able to copy with the static
- * pages remaining in the from space. This also requires the
- * generation bytes_allocated counters be updated. */
- page_table[i].gen = new_space;
- generations[new_space].bytes_allocated += page_table[i].bytes_used;
- generations[from_space].bytes_allocated -= page_table[i].bytes_used;
-
- /* It is essential that the pages are not write protected as
- * they may have pointers into the old-space which need
- * scavenging. They shouldn't be write protected at this
- * stage. */
- gc_assert(!page_table[i].write_protected);
-
- /* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < PAGE_BYTES)
- /* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
- || (page_table[i+1].bytes_used == 0) /* next page free */
- || (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].first_object_offset == 0))
- break;
+ gc_assert(page_table[i].allocated == region_allocation);
+
+ /* Mark the page static. */
+ page_table[i].dont_move = 1;
+
+ /* Move the page to the new_space. XX I'd rather not do this
+ * but the GC logic is not quite able to copy with the static
+ * pages remaining in the from space. This also requires the
+ * generation bytes_allocated counters be updated. */
+ page_table[i].gen = new_space;
+ generations[new_space].bytes_allocated += page_table[i].bytes_used;
+ generations[from_space].bytes_allocated -= page_table[i].bytes_used;
+
+ /* It is essential that the pages are not write protected as
+ * they may have pointers into the old-space which need
+ * scavenging. They shouldn't be write protected at this
+ * stage. */
+ gc_assert(!page_table[i].write_protected);
+
+ /* Check whether this is the last page in this contiguous block.. */
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
+ || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || (page_table[i+1].bytes_used == 0) /* next page free */
+ || (page_table[i+1].gen != from_space) /* diff. gen */
+ || (page_table[i+1].first_object_offset == 0))
+ break;
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)) {
- int last_page,j;
- int write_protected=1;
-
- /* This should be the start of a region */
- gc_assert(page_table[i].first_object_offset == 0);
-
- /* Now work forward until the end of the region */
- for (last_page = i; ; last_page++) {
- write_protected =
- write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
- if (!write_protected) {
- scavenge(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/4);
-
- /* Now scan the pages and write protect those that
- * don't have pointers to younger generations. */
- if (enable_page_protection) {
- for (j = i; j <= last_page; j++) {
- num_wp += update_page_write_prot(j);
- }
- }
- }
- i = last_page;
- }
- }
- if ((gencgc_verbose > 1) && (num_wp != 0)) {
- FSHOW((stderr,
- "/write protected %d pages within generation %d\n",
- num_wp, generation));
+ generation_index_t generation = page_table[i].gen;
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (generation != new_space)
+ && (generation >= from)
+ && (generation <= to)) {
+ page_index_t last_page,j;
+ int write_protected=1;
+
+ /* This should be the start of a region */
+ gc_assert(page_table[i].first_object_offset == 0);
+
+ /* Now work forward until the end of the region */
+ for (last_page = i; ; last_page++) {
+ write_protected =
+ write_protected && page_table[last_page].write_protected;
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+ if (!write_protected) {
+ scavenge(page_address(i),
+ (page_table[last_page].bytes_used +
+ (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+
+ /* Now scan the pages and write protect those that
+ * don't have pointers to younger generations. */
+ if (enable_page_protection) {
+ for (j = i; j <= last_page; j++) {
+ num_wp += update_page_write_prot(j);
+ }
+ }
+ if ((gencgc_verbose > 1) && (num_wp != 0)) {
+ FSHOW((stderr,
+ "/write protected %d pages within generation %d\n",
+ num_wp, generation));
+ }
+ }
+ i = last_page;
+ }
- /* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)
- && ((page_table[i].write_protected == 0)
- /* (This may be redundant as write_protected is now
- * cleared before promotion.) */
- || (page_table[i].dont_move == 1))) {
- int last_page;
- int all_wp=1;
-
- /* The scavenge will start at the first_object_offset of page i.
- *
- * We need to find the full extent of this contiguous
- * block in case objects span pages.
- *
- * Now work forward until the end of this contiguous area
- * is found. A small area is preferred as there is a
- * better chance of its pages being write-protected. */
- for (last_page = i; ;last_page++) {
- /* If all pages are write-protected and movable,
- * then no need to scavenge */
- all_wp=all_wp && page_table[last_page].write_protected &&
- !page_table[last_page].dont_move;
-
- /* Check whether this is the last page in this
- * contiguous block */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
-
- /* Do a limited check for write-protected pages. */
- if (!all_wp) {
- int size;
-
- size = (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/4;
- new_areas_ignore_page = last_page;
-
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
-
- }
- i = last_page;
- }
+ /* Note that this skips over open regions when it encounters them. */
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)
+ && ((page_table[i].write_protected == 0)
+ /* (This may be redundant as write_protected is now
+ * cleared before promotion.) */
+ || (page_table[i].dont_move == 1))) {
+ page_index_t last_page;
+ int all_wp=1;
+
+ /* The scavenge will start at the first_object_offset of page i.
+ *
+ * We need to find the full extent of this contiguous
+ * block in case objects span pages.
+ *
+ * Now work forward until the end of this contiguous area
+ * is found. A small area is preferred as there is a
+ * better chance of its pages being write-protected. */
+ for (last_page = i; ;last_page++) {
+ /* If all pages are write-protected and movable,
+ * then no need to scavenge */
+ all_wp=all_wp && page_table[last_page].write_protected &&
+ !page_table[last_page].dont_move;
+
+ /* Check whether this is the last page in this
+ * contiguous block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+
+ /* Do a limited check for write-protected pages. */
+ if (!all_wp) {
+ long size;
+
+ size = (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES
+ - page_table[i].first_object_offset)/N_WORD_BYTES;
+ new_areas_ignore_page = last_page;
+
+ scavenge(page_address(i) +
+ page_table[i].first_object_offset,
+ size);
+
+ }
+ i = last_page;
+ }
- /* Move the current to the previous new areas */
- previous_new_areas = current_new_areas;
- previous_new_areas_index = current_new_areas_index;
-
- /* Scavenge all the areas in previous new areas. Any new areas
- * allocated are saved in current_new_areas. */
-
- /* Allocate an array for current_new_areas; alternating between
- * new_areas_1 and 2 */
- if (previous_new_areas == &new_areas_1)
- current_new_areas = &new_areas_2;
- else
- current_new_areas = &new_areas_1;
-
- /* Set up for gc_alloc(). */
- new_areas = current_new_areas;
- new_areas_index = 0;
-
- /* Check whether previous_new_areas had overflowed. */
- if (previous_new_areas_index >= NUM_NEW_AREAS) {
-
- /* New areas of objects allocated have been lost so need to do a
- * full scan to be sure! If this becomes a problem try
- * increasing NUM_NEW_AREAS. */
- if (gencgc_verbose)
- SHOW("new_areas overflow, doing full scavenge");
-
- /* Don't need to record new areas that get scavenge anyway
- * during scavenge_newspace_generation_one_scan. */
- record_new_objects = 1;
-
- scavenge_newspace_generation_one_scan(generation);
-
- /* Record all new areas now. */
- record_new_objects = 2;
-
- /* Flush the current regions updating the tables. */
- gc_alloc_update_all_page_tables();
-
- } else {
-
- /* Work through previous_new_areas. */
- for (i = 0; i < previous_new_areas_index; i++) {
- /* FIXME: All these bare *4 and /4 should be something
- * like BYTES_PER_WORD or WBYTES. */
- int page = (*previous_new_areas)[i].page;
- int offset = (*previous_new_areas)[i].offset;
- int size = (*previous_new_areas)[i].size / 4;
- gc_assert((*previous_new_areas)[i].size % 4 == 0);
- scavenge(page_address(page)+offset, size);
- }
-
- /* Flush the current regions updating the tables. */
- gc_alloc_update_all_page_tables();
- }
-
- current_new_areas_index = new_areas_index;
-
- /*FSHOW((stderr,
- "The re-scan has finished; current_new_areas_index=%d.\n",
- current_new_areas_index));*/
+ /* Move the current to the previous new areas */
+ previous_new_areas = current_new_areas;
+ previous_new_areas_index = current_new_areas_index;
+
+ /* Scavenge all the areas in previous new areas. Any new areas
+ * allocated are saved in current_new_areas. */
+
+ /* Allocate an array for current_new_areas; alternating between
+ * new_areas_1 and 2 */
+ if (previous_new_areas == &new_areas_1)
+ current_new_areas = &new_areas_2;
+ else
+ current_new_areas = &new_areas_1;
+
+ /* Set up for gc_alloc(). */
+ new_areas = current_new_areas;
+ new_areas_index = 0;
+
+ /* Check whether previous_new_areas had overflowed. */
+ if (previous_new_areas_index >= NUM_NEW_AREAS) {
+
+ /* New areas of objects allocated have been lost so need to do a
+ * full scan to be sure! If this becomes a problem try
+ * increasing NUM_NEW_AREAS. */
+ if (gencgc_verbose)
+ SHOW("new_areas overflow, doing full scavenge");
+
+ /* Don't need to record new areas that get scavenged
+ * anyway during scavenge_newspace_generation_one_scan. */
+ record_new_objects = 1;
+
+ scavenge_newspace_generation_one_scan(generation);
+
+ /* Record all new areas now. */
+ record_new_objects = 2;
+
+ scav_weak_hash_tables();
+
+ /* Flush the current regions updating the tables. */
+ gc_alloc_update_all_page_tables();
+
+ } else {
+
+ /* Work through previous_new_areas. */
+ for (i = 0; i < previous_new_areas_index; i++) {
+ long page = (*previous_new_areas)[i].page;
+ long offset = (*previous_new_areas)[i].offset;
+ long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
+ scavenge(page_address(page)+offset, size);
+ }
+
+ scav_weak_hash_tables();
+
+ /* Flush the current regions updating the tables. */
+ gc_alloc_update_all_page_tables();
+ }
+
+ current_new_areas_index = new_areas_index;
+
+ /*FSHOW((stderr,
+ "The re-scan has finished; current_new_areas_index=%d.\n",
+ current_new_areas_index));*/
- /* Find a first page for the next region of pages. */
- while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
- || (page_table[first_page].bytes_used == 0)
- || (page_table[first_page].gen != from_space)))
- first_page++;
-
- if (first_page >= last_free_page)
- break;
-
- /* Find the last page of this region. */
- last_page = first_page;
-
- do {
- /* Free the page. */
- bytes_freed += page_table[last_page].bytes_used;
- generations[page_table[last_page].gen].bytes_allocated -=
- page_table[last_page].bytes_used;
- page_table[last_page].allocated = FREE_PAGE_FLAG;
- page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
- last_page++;
- }
- while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
- && (page_table[last_page].bytes_used != 0)
- && (page_table[last_page].gen == from_space));
-
- /* Zero pages from first_page to (last_page-1).
- *
- * FIXME: Why not use os_zero(..) function instead of
- * hand-coding this again? (Check other gencgc_unmap_zero
- * stuff too. */
- if (gencgc_unmap_zero) {
- void *page_start, *addr;
-
- page_start = (void *)page_address(first_page);
-
- os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
- addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
- if (addr == NULL || addr != page_start) {
- lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
- addr);
- }
- } else {
- int *page_start;
-
- page_start = (int *)page_address(first_page);
- memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
- }
-
- first_page = last_page;
-
+ /* Find a first page for the next region of pages. */
+ while ((first_page < last_free_page)
+ && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ || (page_table[first_page].bytes_used == 0)
+ || (page_table[first_page].gen != from_space)))
+ first_page++;
+
+ if (first_page >= last_free_page)
+ break;
+
+ /* Find the last page of this region. */
+ last_page = first_page;
+
+ do {
+ /* Free the page. */
+ bytes_freed += page_table[last_page].bytes_used;
+ generations[page_table[last_page].gen].bytes_allocated -=
+ page_table[last_page].bytes_used;
+ page_table[last_page].allocated = FREE_PAGE_FLAG;
+ page_table[last_page].bytes_used = 0;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ {
+ void *page_start = (void *)page_address(last_page);
+
+ if (page_table[last_page].write_protected) {
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[last_page].write_protected = 0;
+ }
+ }
+ last_page++;
+ }
+ while ((last_page < last_free_page)
+ && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && (page_table[last_page].bytes_used != 0)
+ && (page_table[last_page].gen == from_space));
+
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(first_page),
+ PAGE_BYTES*(last_page-first_page),
+ OS_VM_PROT_NONE);
+#endif
+ first_page = last_page;
- size_t count = 1;
- lispobj thing = *(lispobj*)start;
-
- if (is_lisp_pointer(thing)) {
- int page_index = find_page_index((void*)thing);
- int to_readonly_space =
- (READ_ONLY_SPACE_START <= thing &&
- thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
- int to_static_space =
- (STATIC_SPACE_START <= thing &&
- thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
-
- /* Does it point to the dynamic space? */
- if (page_index != -1) {
- /* If it's within the dynamic space it should point to a used
- * page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
- && (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.", thing, start);
- /* Check that it doesn't point to a forwarding pointer! */
- if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
- }
- /* Check that its not in the RO space as it would then be a
- * pointer from the RO to the dynamic space. */
- if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x",
- thing, start);
- }
- /* Does it point to a plausible object? This check slows
- * it down a lot (so it's commented out).
- *
- * "a lot" is serious: it ate 50 minutes cpu time on
- * my duron 950 before I came back from lunch and
- * killed it.
- *
- * FIXME: Add a variable to enable this
- * dynamically. */
- /*
- if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x", thing, start);
- }
- */
- } else {
- /* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space
- && (thing != (unsigned)&undefined_tramp)) {
- lose("Ptr %x @ %x sees junk.", thing, start);
- }
- }
- } else {
- if (!(fixnump(thing))) {
- /* skip fixnums */
- switch(widetag_of(*start)) {
-
- /* boxed objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
- case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_BASE_STRING_WIDETAG:
- case COMPLEX_VECTOR_NIL_WIDETAG:
- case COMPLEX_BIT_VECTOR_WIDETAG:
- case COMPLEX_VECTOR_WIDETAG:
- case COMPLEX_ARRAY_WIDETAG:
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- case VALUE_CELL_HEADER_WIDETAG:
- case SYMBOL_HEADER_WIDETAG:
- case BASE_CHAR_WIDETAG:
- case UNBOUND_MARKER_WIDETAG:
- case INSTANCE_HEADER_WIDETAG:
- case FDEFN_WIDETAG:
- count = 1;
- break;
-
- case CODE_HEADER_WIDETAG:
- {
- lispobj object = *start;
- struct code *code;
- int nheader_words, ncode_words, nwords;
- lispobj fheaderl;
- struct simple_fun *fheaderp;
-
- code = (struct code *) start;
-
- /* Check that it's not in the dynamic space.
- * FIXME: Isn't is supposed to be OK for code
- * objects to be in the dynamic space these days? */
- if (is_in_dynamic_space
- /* It's ok if it's byte compiled code. The trace
- * table offset will be a fixnum if it's x86
- * compiled code - check.
- *
- * FIXME: #^#@@! lack of abstraction here..
- * This line can probably go away now that
- * there's no byte compiler, but I've got
- * too much to worry about right now to try
- * to make sure. -- WHN 2001-10-06 */
- && fixnump(code->trace_table_offset)
- /* Only when enabled */
- && verify_dynamic_code_check) {
- FSHOW((stderr,
- "/code object at %x in the dynamic space\n",
- start));
- }
-
- ncode_words = fixnum_value(code->code_size);
- nheader_words = HeaderValue(object);
- nwords = ncode_words + nheader_words;
- nwords = CEILING(nwords, 2);
- /* Scavenge the boxed section of the code data block */
- verify_space(start + 1, nheader_words - 1);
-
- /* Scavenge the boxed section of each function
- * object in the code data block. */
- fheaderl = code->entry_points;
- while (fheaderl != NIL) {
- fheaderp =
- (struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
- verify_space(&fheaderp->name, 1);
- verify_space(&fheaderp->arglist, 1);
- verify_space(&fheaderp->type, 1);
- fheaderl = fheaderp->next;
- }
- count = nwords;
- break;
- }
-
- /* unboxed objects */
- case BIGNUM_WIDETAG:
- case SINGLE_FLOAT_WIDETAG:
- case DOUBLE_FLOAT_WIDETAG:
+ size_t count = 1;
+ lispobj thing = *(lispobj*)start;
+
+ if (is_lisp_pointer(thing)) {
+ page_index_t page_index = find_page_index((void*)thing);
+ long to_readonly_space =
+ (READ_ONLY_SPACE_START <= thing &&
+ thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ long to_static_space =
+ (STATIC_SPACE_START <= thing &&
+ thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
+
+ /* Does it point to the dynamic space? */
+ if (page_index != -1) {
+ /* If it's within the dynamic space it should point to a used
+ * page. XX Could check the offset too. */
+ if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ && (page_table[page_index].bytes_used == 0))
+ lose ("Ptr %x @ %x sees free page.\n", thing, start);
+ /* Check that it doesn't point to a forwarding pointer! */
+ if (*((lispobj *)native_pointer(thing)) == 0x01) {
+ lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start);
+ }
+ /* Check that its not in the RO space as it would then be a
+ * pointer from the RO to the dynamic space. */
+ if (is_in_readonly_space) {
+ lose("ptr to dynamic space %x from RO space %x\n",
+ thing, start);
+ }
+ /* Does it point to a plausible object? This check slows
+ * it down a lot (so it's commented out).
+ *
+ * "a lot" is serious: it ate 50 minutes cpu time on
+ * my duron 950 before I came back from lunch and
+ * killed it.
+ *
+ * FIXME: Add a variable to enable this
+ * dynamically. */
+ /*
+ if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
+ lose("ptr %x to invalid object %x\n", thing, start);
+ }
+ */
+ } else {
+ /* Verify that it points to another valid space. */
+ if (!to_readonly_space && !to_static_space) {
+ lose("Ptr %x @ %x sees junk.\n", thing, start);
+ }
+ }
+ } else {
+ if (!(fixnump(thing))) {
+ /* skip fixnums */
+ switch(widetag_of(*start)) {
+
+ /* boxed objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
+ case SIMPLE_ARRAY_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
+#endif
+ case COMPLEX_VECTOR_NIL_WIDETAG:
+ case COMPLEX_BIT_VECTOR_WIDETAG:
+ case COMPLEX_VECTOR_WIDETAG:
+ case COMPLEX_ARRAY_WIDETAG:
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ case VALUE_CELL_HEADER_WIDETAG:
+ case SYMBOL_HEADER_WIDETAG:
+ case CHARACTER_WIDETAG:
+#if N_WORD_BITS == 64
+ case SINGLE_FLOAT_WIDETAG:
+#endif
+ case UNBOUND_MARKER_WIDETAG:
+ case FDEFN_WIDETAG:
+ count = 1;
+ break;
+
+ case INSTANCE_HEADER_WIDETAG:
+ {
+ lispobj nuntagged;
+ long ntotal = HeaderValue(thing);
+ lispobj layout = ((struct instance *)start)->slots[0];
+ if (!layout) {
+ count = 1;
+ break;
+ }
+ nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots;
+ verify_space(start + 1, ntotal - fixnum_value(nuntagged));
+ count = ntotal + 1;
+ break;
+ }
+ case CODE_HEADER_WIDETAG:
+ {
+ lispobj object = *start;
+ struct code *code;
+ long nheader_words, ncode_words, nwords;
+ lispobj fheaderl;
+ struct simple_fun *fheaderp;
+
+ code = (struct code *) start;
+
+ /* Check that it's not in the dynamic space.
+ * FIXME: Isn't is supposed to be OK for code
+ * objects to be in the dynamic space these days? */
+ if (is_in_dynamic_space
+ /* It's ok if it's byte compiled code. The trace
+ * table offset will be a fixnum if it's x86
+ * compiled code - check.
+ *
+ * FIXME: #^#@@! lack of abstraction here..
+ * This line can probably go away now that
+ * there's no byte compiler, but I've got
+ * too much to worry about right now to try
+ * to make sure. -- WHN 2001-10-06 */
+ && fixnump(code->trace_table_offset)
+ /* Only when enabled */
+ && verify_dynamic_code_check) {
+ FSHOW((stderr,
+ "/code object at %x in the dynamic space\n",
+ start));
+ }
+
+ ncode_words = fixnum_value(code->code_size);
+ nheader_words = HeaderValue(object);
+ nwords = ncode_words + nheader_words;
+ nwords = CEILING(nwords, 2);
+ /* Scavenge the boxed section of the code data block */
+ verify_space(start + 1, nheader_words - 1);
+
+ /* Scavenge the boxed section of each function
+ * object in the code data block. */
+ fheaderl = code->entry_points;
+ while (fheaderl != NIL) {
+ fheaderp =
+ (struct simple_fun *) native_pointer(fheaderl);
+ gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ verify_space(&fheaderp->name, 1);
+ verify_space(&fheaderp->arglist, 1);
+ verify_space(&fheaderp->type, 1);
+ fheaderl = fheaderp->next;
+ }
+ count = nwords;
+ break;
+ }
+
+ /* unboxed objects */
+ case BIGNUM_WIDETAG:
+#if N_WORD_BITS != 64
+ case SINGLE_FLOAT_WIDETAG:
+#endif
+ case DOUBLE_FLOAT_WIDETAG:
- if (gencgc_verbose > 1) {
- FSHOW((stderr,
- "/write protected %d of %d pages in generation %d\n",
- count_write_protect_generation_pages(generation),
- count_generation_pages(generation),
- generation));
+ index = boxed_registers[i];
+ foo = *os_context_register_addr(context, index);
+ scavenge(&foo, 1);
+ *os_context_register_addr(context, index) = foo;
+
+ scavenge((lispobj*) &(*os_context_register_addr(context, index)), 1);
+ }
+
+#ifdef reg_LIP
+ /* Fix the LIP */
+
+ /*
+ * But what happens if lip_register_pair is -1? *os_context_register_addr on Solaris
+ * (see solaris_register_address in solaris-os.c) will return
+ * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
+ * that what we really want? My guess is that that is not what we
+ * want, so if lip_register_pair is -1, we don't touch reg_LIP at
+ * all. But maybe it doesn't really matter if LIP is trashed?
+ */
+ if (lip_register_pair >= 0) {
+ *os_context_register_addr(context, reg_LIP) =
+ *os_context_register_addr(context, lip_register_pair) + lip_offset;
+ }
+#endif /* reg_LIP */
+
+ /* Fix the PC if it was in from space */
+ if (from_space_p(*os_context_pc_addr(context)))
+ *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+
+#ifdef ARCH_HAS_LINK_REGISTER
+ /* Fix the LR ditto; important if we're being called from
+ * an assembly routine that expects to return using blr, otherwise
+ * harmless */
+ if (from_space_p(*os_context_lr_addr(context)))
+ *os_context_lr_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + lr_code_offset;
+#endif
+
+#ifdef ARCH_HAS_NPC_REGISTER
+ if (from_space_p(*os_context_npc_addr(context)))
+ *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+#endif /* ARCH_HAS_NPC_REGISTER */
+}
+
+void
+scavenge_interrupt_contexts(void)
+{
+ int i, index;
+ os_context_t *context;
+
+ struct thread *th=arch_os_get_current_thread();
+
+ index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0));
+
+#if defined(DEBUG_PRINT_CONTEXT_INDEX)
+ printf("Number of active contexts: %d\n", index);
+#endif
+
+ for (i = 0; i < index; i++) {
+ context = th->interrupt_contexts[i];
+ scavenge_interrupt_context(context);
+ }
+}
+
+#endif
+
+#if defined(LISP_FEATURE_SB_THREAD)
+static void
+preserve_context_registers (os_context_t *c)
+{
+ void **ptr;
+ /* On Darwin the signal context isn't a contiguous block of memory,
+ * so just preserve_pointering its contents won't be sufficient.
+ */
+#if defined(LISP_FEATURE_DARWIN)
+#if defined LISP_FEATURE_X86
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ECX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
+ preserve_pointer((void*)*os_context_pc_addr(c));
+#elif defined LISP_FEATURE_X86_64
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RCX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RSI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R8));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R9));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R10));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R11));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R12));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R13));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R14));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R15));
+ preserve_pointer((void*)*os_context_pc_addr(c));
+#else
+ #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
+#endif
+#endif
+ for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
- SHOW("entering gc_free_heap");
-
- for (page = 0; page < NUM_PAGES; page++) {
- /* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
- void *page_start, *addr;
-
- /* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
- * should not be write-protected -- except that the
- * generation is used for the current region but it sets
- * that up. */
- page_table[page].allocated = FREE_PAGE_FLAG;
- page_table[page].bytes_used = 0;
-
- /* Zero the page. */
- page_start = (void *)page_address(page);
-
- /* First, remove any write-protection. */
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page].write_protected = 0;
-
- os_invalidate(page_start,PAGE_BYTES);
- addr = os_validate(page_start,PAGE_BYTES);
- if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
- page_start,
- addr);
- }
- } else if (gencgc_zero_check_during_free_heap) {
- /* Double-check that the page is zero filled. */
- int *page_start, i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
- gc_assert(page_table[page].bytes_used == 0);
- page_start = (int *)page_address(page);
- for (i=0; i<1024; i++) {
- if (page_start[i] != 0) {
- lose("free region not zero at %x", page_start + i);
- }
- }
- }
+ SHOW("entering gc_free_heap");
+
+ for (page = 0; page < page_table_pages; page++) {
+ /* Skip free pages which should already be zero filled. */
+ if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ void *page_start, *addr;
+
+ /* Mark the page free. The other slots are assumed invalid
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
+ * should not be write-protected -- except that the
+ * generation is used for the current region but it sets
+ * that up. */
+ page_table[page].allocated = FREE_PAGE_FLAG;
+ page_table[page].bytes_used = 0;
+
+#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */
+ /* Zero the page. */
+ page_start = (void *)page_address(page);
+
+ /* First, remove any write-protection. */
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page].write_protected = 0;
+
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
+ if (addr == NULL || addr != page_start) {
+ lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
+ page_start,
+ addr);
+ }
+#else
+ page_table[page].write_protected = 0;
+#endif
+ } else if (gencgc_zero_check_during_free_heap) {
+ /* Double-check that the page is zero filled. */
+ long *page_start;
+ page_index_t i;
+ gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_table[page].bytes_used == 0);
+ page_start = (long *)page_address(page);
+ for (i=0; i<1024; i++) {
+ if (page_start[i] != 0) {
+ lose("free region not zero at %x\n", page_start + i);
+ }
+ }
+ }
+static void
+zero_all_free_pages()
+{
+ page_index_t i;
+
+ for (i = 0; i < last_free_page; i++) {
+ if (page_table[i].allocated == FREE_PAGE_FLAG) {
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(i),
+ PAGE_BYTES,
+ OS_VM_PROT_ALL);
+#endif
+ zero_pages(i, i);
+ }
+ }
+}
+
+/* Things to do before doing a final GC before saving a core (without
+ * purify).
+ *
+ * + Pages in large_object pages aren't moved by the GC, so we need to
+ * unset that flag from all pages.
+ * + The pseudo-static generation isn't normally collected, but it seems
+ * reasonable to collect it at least when saving a core. So move the
+ * pages to a normal generation.
+ */
+static void
+prepare_for_final_gc ()
+{
+ page_index_t i;
+ for (i = 0; i < last_free_page; i++) {
+ page_table[i].large_object = 0;
+ if (page_table[i].gen == PSEUDO_STATIC_GENERATION) {
+ int used = page_table[i].bytes_used;
+ page_table[i].gen = HIGHEST_NORMAL_GENERATION;
+ generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used;
+ generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used;
+ }
+ }
+}
+
+
+/* Do a non-conservative GC, and then save a core with the initial
+ * function being set to the value of the static symbol
+ * SB!VM:RESTART-LISP-FUNCTION */
+void
+gc_and_save(char *filename, int prepend_runtime)
+{
+ FILE *file;
+ void *runtime_bytes = NULL;
+ size_t runtime_size;
+
+ file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
+ &runtime_size);
+ if (file == NULL)
+ return;
+
+ conservative_stack = 0;
+
+ /* The filename might come from Lisp, and be moved by the now
+ * non-conservative GC. */
+ filename = strdup(filename);
+
+ /* Collect twice: once into relatively high memory, and then back
+ * into low memory. This compacts the retained data into the lower
+ * pages, minimizing the size of the core file.
+ */
+ prepare_for_final_gc();
+ gencgc_alloc_start_page = last_free_page;
+ collect_garbage(HIGHEST_NORMAL_GENERATION+1);
+
+ prepare_for_final_gc();
+ gencgc_alloc_start_page = -1;
+ collect_garbage(HIGHEST_NORMAL_GENERATION+1);
+
+ if (prepend_runtime)
+ save_runtime_to_filehandle(file, runtime_bytes, runtime_size);
+
+ /* The dumper doesn't know that pages need to be zeroed before use. */
+ zero_all_free_pages();
+ save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
+ prepend_runtime);
+ /* Oops. Save still managed to fail. Since we've mangled the stack
+ * beyond hope, there's not much we can do.
+ * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
+ * going to be rather unsatisfactory too... */
+ lose("Attempt to save core after non-conservative GC failed.\n");
+}