- int j;
- int boxed_cnt = 0;
- int unboxed_cnt = 0;
- int large_boxed_cnt = 0;
- int large_unboxed_cnt = 0;
- int pinned_cnt=0;
-
- for (j = 0; j < last_free_page; j++)
- if (page_table[j].gen == i) {
-
- /* Count the number of boxed pages within the given
- * generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_boxed_cnt++;
- else
- boxed_cnt++;
- }
- if(page_table[j].dont_move) pinned_cnt++;
- /* Count the number of unboxed pages within the given
- * generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_unboxed_cnt++;
- else
- unboxed_cnt++;
- }
- }
-
- gc_assert(generations[i].bytes_allocated
- == count_generation_bytes_allocated(i));
- fprintf(stderr,
- " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
- i,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
- pinned_cnt,
- generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
- - generations[i].bytes_allocated),
- generations[i].gc_trigger,
- count_write_protect_generation_pages(i),
- generations[i].num_gc,
- gen_av_mem_age(i));
+ int j;
+ int boxed_cnt = 0;
+ int unboxed_cnt = 0;
+ int large_boxed_cnt = 0;
+ int large_unboxed_cnt = 0;
+ int pinned_cnt=0;
+
+ for (j = 0; j < last_free_page; j++)
+ if (page_table[j].gen == i) {
+
+ /* Count the number of boxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_boxed_cnt++;
+ else
+ boxed_cnt++;
+ }
+ if(page_table[j].dont_move) pinned_cnt++;
+ /* Count the number of unboxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_unboxed_cnt++;
+ else
+ unboxed_cnt++;
+ }
+ }
+
+ gc_assert(generations[i].bytes_allocated
+ == count_generation_bytes_allocated(i));
+ fprintf(stderr,
+ " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ i,
+ boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ pinned_cnt,
+ generations[i].bytes_allocated,
+ (count_generation_pages(i)*PAGE_BYTES
+ - generations[i].bytes_allocated),
+ generations[i].gc_trigger,
+ count_write_protect_generation_pages(i),
+ generations[i].num_gc,
+ gen_av_mem_age(i));
- /* some bytes were allocated in the region */
- orig_first_page_bytes_used = page_table[first_page].bytes_used;
-
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
-
- /* All the pages used need to be updated */
-
- /* Update the first page. */
-
- /* If the page was free then set up the gen, and
- * first_object_offset. */
- if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].first_object_offset == 0);
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
-
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[first_page].gen == gc_alloc_generation);
- gc_assert(page_table[first_page].large_object == 0);
-
- byte_cnt = 0;
-
- /* Calculate the number of bytes used in this page. This is not
- * always the number of new bytes, unless it was free. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[first_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
-
- /* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and set
- * the bytes_used. */
- while (more) {
- page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].bytes_used == 0);
- gc_assert(page_table[next_page].gen == gc_alloc_generation);
- gc_assert(page_table[next_page].large_object == 0);
-
- gc_assert(page_table[next_page].first_object_offset ==
- alloc_region->start_addr - page_address(next_page));
-
- /* Calculate the number of bytes used in this page. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[next_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
- next_page++;
- }
-
- region_size = alloc_region->free_pointer - alloc_region->start_addr;
- bytes_allocated += region_size;
- generations[gc_alloc_generation].bytes_allocated += region_size;
-
- gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
-
- /* Set the generations alloc restart page to the last page of
- * the region. */
- if (unboxed)
- generations[gc_alloc_generation].alloc_unboxed_start_page =
- next_page-1;
- else
- generations[gc_alloc_generation].alloc_start_page = next_page-1;
-
- /* Add the region to the new_areas if requested. */
- if (!unboxed)
- add_new_area(first_page,orig_first_page_bytes_used, region_size);
-
- /*
- FSHOW((stderr,
- "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
- region_size,
- gc_alloc_generation));
- */
+ /* some bytes were allocated in the region */
+ orig_first_page_bytes_used = page_table[first_page].bytes_used;
+
+ gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+
+ /* All the pages used need to be updated */
+
+ /* Update the first page. */
+
+ /* If the page was free then set up the gen, and
+ * first_object_offset. */
+ if (page_table[first_page].bytes_used == 0)
+ gc_assert(page_table[first_page].first_object_offset == 0);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+
+ if (unboxed)
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].gen == gc_alloc_generation);
+ gc_assert(page_table[first_page].large_object == 0);
+
+ byte_cnt = 0;
+
+ /* Calculate the number of bytes used in this page. This is not
+ * always the number of new bytes, unless it was free. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[first_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+
+ /* All the rest of the pages should be free. We need to set their
+ * first_object_offset pointer to the start of the region, and set
+ * the bytes_used. */
+ while (more) {
+ page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+ if (unboxed)
+ gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].bytes_used == 0);
+ gc_assert(page_table[next_page].gen == gc_alloc_generation);
+ gc_assert(page_table[next_page].large_object == 0);
+
+ gc_assert(page_table[next_page].first_object_offset ==
+ alloc_region->start_addr - page_address(next_page));
+
+ /* Calculate the number of bytes used in this page. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[next_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+ next_page++;
+ }
+
+ region_size = alloc_region->free_pointer - alloc_region->start_addr;
+ bytes_allocated += region_size;
+ generations[gc_alloc_generation].bytes_allocated += region_size;
+
+ gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
+
+ /* Set the generations alloc restart page to the last page of
+ * the region. */
+ if (unboxed)
+ generations[gc_alloc_generation].alloc_unboxed_start_page =
+ next_page-1;
+ else
+ generations[gc_alloc_generation].alloc_start_page = next_page-1;
+
+ /* Add the region to the new_areas if requested. */
+ if (!unboxed)
+ add_new_area(first_page,orig_first_page_bytes_used, region_size);
+
+ /*
+ FSHOW((stderr,
+ "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
+ region_size,
+ gc_alloc_generation));
+ */
- first_page = restart_page;
- if (large_p)
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < NUM_PAGES) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
- first_page++;
- }
-
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- gc_assert(page_table[first_page].write_protected == 0);
-
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
+ first_page = restart_page;
+ if (large_p)
+ while ((first_page < NUM_PAGES)
+ && (page_table[first_page].allocated != FREE_PAGE_FLAG))
+ first_page++;
+ else
+ while (first_page < NUM_PAGES) {
+ if(page_table[first_page].allocated == FREE_PAGE_FLAG)
+ break;
+ if((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0)) {
+ break;
+ }
+ first_page++;
+ }
+
+ if (first_page >= NUM_PAGES) {
+ fprintf(stderr,
+ "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
+ nbytes);
+ print_generation_stats(1);
+ lose(NULL);
+ }
+
+ gc_assert(page_table[first_page].write_protected == 0);
+
+ last_page = first_page;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
+ num_pages = 1;
+ while (((bytes_found < nbytes)
+ || (!large_p && (num_pages < 2)))
+ && (last_page < (NUM_PAGES-1))
+ && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ num_pages++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
+
+ gc_assert(bytes_found == region_size);
+ restart_page = last_page + 1;
- /* Promote the object. */
-
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
-
- /* Note: Any page write-protection must be removed, else a
- * later scavenge_newspace may incorrectly not scavenge these
- * pages. This would not be necessary if they are added to the
- * new areas, but let's do it for them all (they'll probably
- * be written anyway?). */
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk
- * so there may be more unused pages which will be freed. */
-
- /* The object may have shrunk but shouldn't have grown. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to bother zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected as they
- * should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
- bytes_freed;
- generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
- bytes_allocated -= bytes_freed;
-
- /* Add the region to the new_areas if requested. */
- add_new_area(first_page,0,nwords*N_WORD_BYTES);
-
- return(object);
+ /* Promote the object. */
+
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ /* Note: Any page write-protection must be removed, else a
+ * later scavenge_newspace may incorrectly not scavenge these
+ * pages. This would not be necessary if they are added to the
+ * new areas, but let's do it for them all (they'll probably
+ * be written anyway?). */
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ if (page_table[next_page].write_protected) {
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[next_page].write_protected = 0;
+ }
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk
+ * so there may be more unused pages which will be freed. */
+
+ /* The object may have shrunk but shouldn't have grown. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to bother zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected as they
+ * should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
+ bytes_freed;
+ generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+ bytes_allocated -= bytes_freed;
+
+ /* Add the region to the new_areas if requested. */
+ add_new_area(first_page,0,nwords*N_WORD_BYTES);
+
+ return(object);
- /* Promote the object. Note: Unboxed objects may have been
- * allocated to a BOXED region so it may be necessary to
- * change the region to UNBOXED. */
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk so
- * there may be more unused pages which will be freed. */
-
- /* Object may have shrunk but shouldn't have grown - check. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected, even if
- * boxed they should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- if ((bytes_freed > 0) && gencgc_verbose)
- FSHOW((stderr,
- "/copy_large_unboxed bytes_freed=%d\n",
- bytes_freed));
-
- generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
- generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
- bytes_allocated -= bytes_freed;
-
- return(object);
+ /* Promote the object. Note: Unboxed objects may have been
+ * allocated to a BOXED region so it may be necessary to
+ * change the region to UNBOXED. */
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk so
+ * there may be more unused pages which will be freed. */
+
+ /* Object may have shrunk but shouldn't have grown - check. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to both zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected, even if
+ * boxed they should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ if ((bytes_freed > 0) && gencgc_verbose)
+ FSHOW((stderr,
+ "/copy_large_unboxed bytes_freed=%d\n",
+ bytes_freed));
+
+ generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
+ bytes_allocated -= bytes_freed;
+
+ return(object);
- /* Check for code references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the code adea of the code object. */
- if ((data >= (code_start_addr-displacement))
- && (data < (code_end_addr-displacement))) {
- /* function header */
- if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
- /* Skip the function header */
- p += 6*4 - 4 - 1;
- continue;
- }
- /* the case of PUSH imm32 */
- if (d1 == 0x68) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/PUSH $0x%.8x\n", data));
- }
- /* the case of MOV [reg-8],imm32 */
- if ((d3 == 0xc7)
- && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
- || d2==0x45 || d2==0x46 || d2==0x47)
- && (d1 == 0xf8)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
- }
- /* the case of LEA reg,[disp32] */
- if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
- }
- }
-
- /* Check for constant references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the constant vector. Constant references
- will be aligned. */
- if ((data >= (constants_start_addr-displacement))
- && (data < (constants_end_addr-displacement))
- && (((unsigned)data & 0x3) == 0)) {
- /* Mov eax,m32 */
- if (d1 == 0xa1) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
- }
-
- /* the case of MOV m32,EAX */
- if (d1 == 0xa3) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
- }
-
- /* the case of CMP m32,imm32 */
- if ((d1 == 0x3d) && (d2 == 0x81)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- /* XX Check this */
- FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
- }
-
- /* Check for a mod=00, r/m=101 byte. */
- if ((d1 & 0xc7) == 5) {
- /* Cmp m32,reg */
- if (d2 == 0x39) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
- }
- /* the case of CMP reg32,m32 */
- if (d2 == 0x3b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
- }
- /* the case of MOV m32,reg32 */
- if (d2 == 0x89) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
- }
- /* the case of MOV reg32,m32 */
- if (d2 == 0x8b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
- }
- /* the case of LEA reg32,m32 */
- if (d2 == 0x8d) {
- fixup_found = 1;
- FSHOW((stderr,
- "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
- }
- }
- }
+ /* Check for code references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the code adea of the code object. */
+ if ((data >= (code_start_addr-displacement))
+ && (data < (code_end_addr-displacement))) {
+ /* function header */
+ if ((d4 == 0x5e)
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ /* Skip the function header */
+ p += 6*4 - 4 - 1;
+ continue;
+ }
+ /* the case of PUSH imm32 */
+ if (d1 == 0x68) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/PUSH $0x%.8x\n", data));
+ }
+ /* the case of MOV [reg-8],imm32 */
+ if ((d3 == 0xc7)
+ && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
+ || d2==0x45 || d2==0x46 || d2==0x47)
+ && (d1 == 0xf8)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
+ }
+ /* the case of LEA reg,[disp32] */
+ if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
+ }
+ }
+
+ /* Check for constant references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the constant vector. Constant references
+ will be aligned. */
+ if ((data >= (constants_start_addr-displacement))
+ && (data < (constants_end_addr-displacement))
+ && (((unsigned)data & 0x3) == 0)) {
+ /* Mov eax,m32 */
+ if (d1 == 0xa1) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
+ }
+
+ /* the case of MOV m32,EAX */
+ if (d1 == 0xa3) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
+ }
+
+ /* the case of CMP m32,imm32 */
+ if ((d1 == 0x3d) && (d2 == 0x81)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ /* XX Check this */
+ FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
+ }
+
+ /* Check for a mod=00, r/m=101 byte. */
+ if ((d1 & 0xc7) == 5) {
+ /* Cmp m32,reg */
+ if (d2 == 0x39) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
+ }
+ /* the case of CMP reg32,m32 */
+ if (d2 == 0x3b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
+ }
+ /* the case of MOV m32,reg32 */
+ if (d2 == 0x89) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
+ }
+ /* the case of MOV reg32,m32 */
+ if (d2 == 0x8b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
+ }
+ /* the case of LEA reg32,m32 */
+ if (d2 == 0x8d) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
+ }
+ }
+ }
- if ((old_index != new_index) &&
- ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
- ((new_key != empty_symbol) ||
- (kv_vector[2*i] != empty_symbol))) {
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
- /* Unlink the key from the old_index chain. */
- if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned prior = index_vector[old_index];
- unsigned next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
- }
- }
- }
- }
+ if ((old_index != new_index) &&
+ ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
+ ((new_key != empty_symbol) ||
+ (kv_vector[2*i] != empty_symbol))) {
+
+ /*FSHOW((stderr,
+ "* EQ key %d moved from %x to %x; index %d to %d\n",
+ i, old_key, new_key, old_index, new_index));*/
+
+ if (index_vector[old_index] != 0) {
+ /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
+
+ /* Unlink the key from the old_index chain. */
+ if (index_vector[old_index] == i) {
+ /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
+ index_vector[old_index] = next_vector[i];
+ /* Link it into the needing rehash chain. */
+ next_vector[i] = fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(i);
+ /*SHOW("P2");*/
+ } else {
+ unsigned prior = index_vector[old_index];
+ unsigned next = next_vector[prior];
+
+ /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
+
+ while (next != 0) {
+ /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
+ if (next == i) {
+ /* Unlink it. */
+ next_vector[prior] = next_vector[next];
+ /* Link it into the needing rehash
+ * chain. */
+ next_vector[next] =
+ fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(next);
+ /*SHOW("/P3");*/
+ break;
+ }
+ prior = next;
+ next = next_vector[next];
+ }
+ }
+ }
+ }
+ }
+ }
- gc_assert(page_table[i].allocated == region_allocation);
-
- /* Mark the page static. */
- page_table[i].dont_move = 1;
-
- /* Move the page to the new_space. XX I'd rather not do this
- * but the GC logic is not quite able to copy with the static
- * pages remaining in the from space. This also requires the
- * generation bytes_allocated counters be updated. */
- page_table[i].gen = new_space;
- generations[new_space].bytes_allocated += page_table[i].bytes_used;
- generations[from_space].bytes_allocated -= page_table[i].bytes_used;
-
- /* It is essential that the pages are not write protected as
- * they may have pointers into the old-space which need
- * scavenging. They shouldn't be write protected at this
- * stage. */
- gc_assert(!page_table[i].write_protected);
-
- /* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < PAGE_BYTES)
- /* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
- || (page_table[i+1].bytes_used == 0) /* next page free */
- || (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].first_object_offset == 0))
- break;
+ gc_assert(page_table[i].allocated == region_allocation);
+
+ /* Mark the page static. */
+ page_table[i].dont_move = 1;
+
+ /* Move the page to the new_space. XX I'd rather not do this
+ * but the GC logic is not quite able to copy with the static
+ * pages remaining in the from space. This also requires the
+ * generation bytes_allocated counters be updated. */
+ page_table[i].gen = new_space;
+ generations[new_space].bytes_allocated += page_table[i].bytes_used;
+ generations[from_space].bytes_allocated -= page_table[i].bytes_used;
+
+ /* It is essential that the pages are not write protected as
+ * they may have pointers into the old-space which need
+ * scavenging. They shouldn't be write protected at this
+ * stage. */
+ gc_assert(!page_table[i].write_protected);
+
+ /* Check whether this is the last page in this contiguous block.. */
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
+ || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || (page_table[i+1].bytes_used == 0) /* next page free */
+ || (page_table[i+1].gen != from_space) /* diff. gen */
+ || (page_table[i+1].first_object_offset == 0))
+ break;
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)) {
- long last_page,j;
- int write_protected=1;
-
- /* This should be the start of a region */
- gc_assert(page_table[i].first_object_offset == 0);
-
- /* Now work forward until the end of the region */
- for (last_page = i; ; last_page++) {
- write_protected =
- write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
- if (!write_protected) {
- scavenge(page_address(i),
- (page_table[last_page].bytes_used +
- (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
-
- /* Now scan the pages and write protect those that
- * don't have pointers to younger generations. */
- if (enable_page_protection) {
- for (j = i; j <= last_page; j++) {
- num_wp += update_page_write_prot(j);
- }
- }
- }
- i = last_page;
- }
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)) {
+ long last_page,j;
+ int write_protected=1;
+
+ /* This should be the start of a region */
+ gc_assert(page_table[i].first_object_offset == 0);
+
+ /* Now work forward until the end of the region */
+ for (last_page = i; ; last_page++) {
+ write_protected =
+ write_protected && page_table[last_page].write_protected;
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+ if (!write_protected) {
+ scavenge(page_address(i),
+ (page_table[last_page].bytes_used +
+ (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+
+ /* Now scan the pages and write protect those that
+ * don't have pointers to younger generations. */
+ if (enable_page_protection) {
+ for (j = i; j <= last_page; j++) {
+ num_wp += update_page_write_prot(j);
+ }
+ }
+ }
+ i = last_page;
+ }
- /* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)
- && ((page_table[i].write_protected == 0)
- /* (This may be redundant as write_protected is now
- * cleared before promotion.) */
- || (page_table[i].dont_move == 1))) {
- long last_page;
- int all_wp=1;
-
- /* The scavenge will start at the first_object_offset of page i.
- *
- * We need to find the full extent of this contiguous
- * block in case objects span pages.
- *
- * Now work forward until the end of this contiguous area
- * is found. A small area is preferred as there is a
- * better chance of its pages being write-protected. */
- for (last_page = i; ;last_page++) {
- /* If all pages are write-protected and movable,
- * then no need to scavenge */
- all_wp=all_wp && page_table[last_page].write_protected &&
- !page_table[last_page].dont_move;
-
- /* Check whether this is the last page in this
- * contiguous block */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
-
- /* Do a limited check for write-protected pages. */
- if (!all_wp) {
- long size;
-
- size = (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/N_WORD_BYTES;
- new_areas_ignore_page = last_page;
-
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
-
- }
- i = last_page;
- }
+ /* Note that this skips over open regions when it encounters them. */
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)
+ && ((page_table[i].write_protected == 0)
+ /* (This may be redundant as write_protected is now
+ * cleared before promotion.) */
+ || (page_table[i].dont_move == 1))) {
+ long last_page;
+ int all_wp=1;
+
+ /* The scavenge will start at the first_object_offset of page i.
+ *
+ * We need to find the full extent of this contiguous
+ * block in case objects span pages.
+ *
+ * Now work forward until the end of this contiguous area
+ * is found. A small area is preferred as there is a
+ * better chance of its pages being write-protected. */
+ for (last_page = i; ;last_page++) {
+ /* If all pages are write-protected and movable,
+ * then no need to scavenge */
+ all_wp=all_wp && page_table[last_page].write_protected &&
+ !page_table[last_page].dont_move;
+
+ /* Check whether this is the last page in this
+ * contiguous block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+
+ /* Do a limited check for write-protected pages. */
+ if (!all_wp) {
+ long size;
+
+ size = (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES
+ - page_table[i].first_object_offset)/N_WORD_BYTES;
+ new_areas_ignore_page = last_page;
+
+ scavenge(page_address(i) +
+ page_table[i].first_object_offset,
+ size);
+
+ }
+ i = last_page;
+ }
- /* Move the current to the previous new areas */
- previous_new_areas = current_new_areas;
- previous_new_areas_index = current_new_areas_index;
-
- /* Scavenge all the areas in previous new areas. Any new areas
- * allocated are saved in current_new_areas. */
-
- /* Allocate an array for current_new_areas; alternating between
- * new_areas_1 and 2 */
- if (previous_new_areas == &new_areas_1)
- current_new_areas = &new_areas_2;
- else
- current_new_areas = &new_areas_1;
-
- /* Set up for gc_alloc(). */
- new_areas = current_new_areas;
- new_areas_index = 0;
-
- /* Check whether previous_new_areas had overflowed. */
- if (previous_new_areas_index >= NUM_NEW_AREAS) {
-
- /* New areas of objects allocated have been lost so need to do a
- * full scan to be sure! If this becomes a problem try
- * increasing NUM_NEW_AREAS. */
- if (gencgc_verbose)
- SHOW("new_areas overflow, doing full scavenge");
-
- /* Don't need to record new areas that get scavenge anyway
- * during scavenge_newspace_generation_one_scan. */
- record_new_objects = 1;
-
- scavenge_newspace_generation_one_scan(generation);
-
- /* Record all new areas now. */
- record_new_objects = 2;
-
- /* Flush the current regions updating the tables. */
- gc_alloc_update_all_page_tables();
-
- } else {
-
- /* Work through previous_new_areas. */
- for (i = 0; i < previous_new_areas_index; i++) {
- long page = (*previous_new_areas)[i].page;
- long offset = (*previous_new_areas)[i].offset;
- long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
- gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
- scavenge(page_address(page)+offset, size);
- }
+ /* Move the current to the previous new areas */
+ previous_new_areas = current_new_areas;
+ previous_new_areas_index = current_new_areas_index;
+
+ /* Scavenge all the areas in previous new areas. Any new areas
+ * allocated are saved in current_new_areas. */
+
+ /* Allocate an array for current_new_areas; alternating between
+ * new_areas_1 and 2 */
+ if (previous_new_areas == &new_areas_1)
+ current_new_areas = &new_areas_2;
+ else
+ current_new_areas = &new_areas_1;
+
+ /* Set up for gc_alloc(). */
+ new_areas = current_new_areas;
+ new_areas_index = 0;
+
+ /* Check whether previous_new_areas had overflowed. */
+ if (previous_new_areas_index >= NUM_NEW_AREAS) {
+
+ /* New areas of objects allocated have been lost so need to do a
+ * full scan to be sure! If this becomes a problem try
+ * increasing NUM_NEW_AREAS. */
+ if (gencgc_verbose)
+ SHOW("new_areas overflow, doing full scavenge");
+
+ /* Don't need to record new areas that get scavenge anyway
+ * during scavenge_newspace_generation_one_scan. */
+ record_new_objects = 1;
+
+ scavenge_newspace_generation_one_scan(generation);
+
+ /* Record all new areas now. */
+ record_new_objects = 2;
+
+ /* Flush the current regions updating the tables. */
+ gc_alloc_update_all_page_tables();
+
+ } else {
+
+ /* Work through previous_new_areas. */
+ for (i = 0; i < previous_new_areas_index; i++) {
+ long page = (*previous_new_areas)[i].page;
+ long offset = (*previous_new_areas)[i].offset;
+ long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
+ scavenge(page_address(page)+offset, size);
+ }
- /* Find a first page for the next region of pages. */
- while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
- || (page_table[first_page].bytes_used == 0)
- || (page_table[first_page].gen != from_space)))
- first_page++;
-
- if (first_page >= last_free_page)
- break;
-
- /* Find the last page of this region. */
- last_page = first_page;
-
- do {
- /* Free the page. */
- bytes_freed += page_table[last_page].bytes_used;
- generations[page_table[last_page].gen].bytes_allocated -=
- page_table[last_page].bytes_used;
- page_table[last_page].allocated = FREE_PAGE_FLAG;
- page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
- last_page++;
- }
- while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
- && (page_table[last_page].bytes_used != 0)
- && (page_table[last_page].gen == from_space));
-
- /* Zero pages from first_page to (last_page-1).
- *
- * FIXME: Why not use os_zero(..) function instead of
- * hand-coding this again? (Check other gencgc_unmap_zero
- * stuff too. */
- if (gencgc_unmap_zero) {
- void *page_start, *addr;
-
- page_start = (void *)page_address(first_page);
-
- os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
- addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
- if (addr == NULL || addr != page_start) {
- lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
- addr);
- }
- } else {
- long *page_start;
-
- page_start = (long *)page_address(first_page);
- memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
- }
-
- first_page = last_page;
+ /* Find a first page for the next region of pages. */
+ while ((first_page < last_free_page)
+ && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ || (page_table[first_page].bytes_used == 0)
+ || (page_table[first_page].gen != from_space)))
+ first_page++;
+
+ if (first_page >= last_free_page)
+ break;
+
+ /* Find the last page of this region. */
+ last_page = first_page;
+
+ do {
+ /* Free the page. */
+ bytes_freed += page_table[last_page].bytes_used;
+ generations[page_table[last_page].gen].bytes_allocated -=
+ page_table[last_page].bytes_used;
+ page_table[last_page].allocated = FREE_PAGE_FLAG;
+ page_table[last_page].bytes_used = 0;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ {
+ void *page_start = (void *)page_address(last_page);
+
+ if (page_table[last_page].write_protected) {
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[last_page].write_protected = 0;
+ }
+ }
+ last_page++;
+ }
+ while ((last_page < last_free_page)
+ && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && (page_table[last_page].bytes_used != 0)
+ && (page_table[last_page].gen == from_space));
+
+ /* Zero pages from first_page to (last_page-1).
+ *
+ * FIXME: Why not use os_zero(..) function instead of
+ * hand-coding this again? (Check other gencgc_unmap_zero
+ * stuff too. */
+ if (gencgc_unmap_zero) {
+ void *page_start, *addr;
+
+ page_start = (void *)page_address(first_page);
+
+ os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
+ addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
+ if (addr == NULL || addr != page_start) {
+ lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
+ addr);
+ }
+ } else {
+ long *page_start;
+
+ page_start = (long *)page_address(first_page);
+ memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
+ }
+
+ first_page = last_page;
- size_t count = 1;
- lispobj thing = *(lispobj*)start;
-
- if (is_lisp_pointer(thing)) {
- long page_index = find_page_index((void*)thing);
- long to_readonly_space =
- (READ_ONLY_SPACE_START <= thing &&
- thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
- long to_static_space =
- (STATIC_SPACE_START <= thing &&
- thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
-
- /* Does it point to the dynamic space? */
- if (page_index != -1) {
- /* If it's within the dynamic space it should point to a used
- * page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
- && (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.", thing, start);
- /* Check that it doesn't point to a forwarding pointer! */
- if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
- }
- /* Check that its not in the RO space as it would then be a
- * pointer from the RO to the dynamic space. */
- if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x",
- thing, start);
- }
- /* Does it point to a plausible object? This check slows
- * it down a lot (so it's commented out).
- *
- * "a lot" is serious: it ate 50 minutes cpu time on
- * my duron 950 before I came back from lunch and
- * killed it.
- *
- * FIXME: Add a variable to enable this
- * dynamically. */
- /*
- if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x", thing, start);
- }
- */
- } else {
- /* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space
- && (thing != (unsigned)&undefined_tramp)) {
- lose("Ptr %x @ %x sees junk.", thing, start);
- }
- }
- } else {
- if (!(fixnump(thing))) {
- /* skip fixnums */
- switch(widetag_of(*start)) {
-
- /* boxed objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
- case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_BASE_STRING_WIDETAG:
+ size_t count = 1;
+ lispobj thing = *(lispobj*)start;
+
+ if (is_lisp_pointer(thing)) {
+ long page_index = find_page_index((void*)thing);
+ long to_readonly_space =
+ (READ_ONLY_SPACE_START <= thing &&
+ thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ long to_static_space =
+ (STATIC_SPACE_START <= thing &&
+ thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
+
+ /* Does it point to the dynamic space? */
+ if (page_index != -1) {
+ /* If it's within the dynamic space it should point to a used
+ * page. XX Could check the offset too. */
+ if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ && (page_table[page_index].bytes_used == 0))
+ lose ("Ptr %x @ %x sees free page.", thing, start);
+ /* Check that it doesn't point to a forwarding pointer! */
+ if (*((lispobj *)native_pointer(thing)) == 0x01) {
+ lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
+ }
+ /* Check that its not in the RO space as it would then be a
+ * pointer from the RO to the dynamic space. */
+ if (is_in_readonly_space) {
+ lose("ptr to dynamic space %x from RO space %x",
+ thing, start);
+ }
+ /* Does it point to a plausible object? This check slows
+ * it down a lot (so it's commented out).
+ *
+ * "a lot" is serious: it ate 50 minutes cpu time on
+ * my duron 950 before I came back from lunch and
+ * killed it.
+ *
+ * FIXME: Add a variable to enable this
+ * dynamically. */
+ /*
+ if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
+ lose("ptr %x to invalid object %x", thing, start);
+ }
+ */
+ } else {
+ /* Verify that it points to another valid space. */
+ if (!to_readonly_space && !to_static_space
+ && (thing != (unsigned)&undefined_tramp)) {
+ lose("Ptr %x @ %x sees junk.", thing, start);
+ }
+ }
+ } else {
+ if (!(fixnump(thing))) {
+ /* skip fixnums */
+ switch(widetag_of(*start)) {
+
+ /* boxed objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
+ case SIMPLE_ARRAY_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
- case UNBOUND_MARKER_WIDETAG:
- case INSTANCE_HEADER_WIDETAG:
- case FDEFN_WIDETAG:
- count = 1;
- break;
-
- case CODE_HEADER_WIDETAG:
- {
- lispobj object = *start;
- struct code *code;
- long nheader_words, ncode_words, nwords;
- lispobj fheaderl;
- struct simple_fun *fheaderp;
-
- code = (struct code *) start;
-
- /* Check that it's not in the dynamic space.
- * FIXME: Isn't is supposed to be OK for code
- * objects to be in the dynamic space these days? */
- if (is_in_dynamic_space
- /* It's ok if it's byte compiled code. The trace
- * table offset will be a fixnum if it's x86
- * compiled code - check.
- *
- * FIXME: #^#@@! lack of abstraction here..
- * This line can probably go away now that
- * there's no byte compiler, but I've got
- * too much to worry about right now to try
- * to make sure. -- WHN 2001-10-06 */
- && fixnump(code->trace_table_offset)
- /* Only when enabled */
- && verify_dynamic_code_check) {
- FSHOW((stderr,
- "/code object at %x in the dynamic space\n",
- start));
- }
-
- ncode_words = fixnum_value(code->code_size);
- nheader_words = HeaderValue(object);
- nwords = ncode_words + nheader_words;
- nwords = CEILING(nwords, 2);
- /* Scavenge the boxed section of the code data block */
- verify_space(start + 1, nheader_words - 1);
-
- /* Scavenge the boxed section of each function
- * object in the code data block. */
- fheaderl = code->entry_points;
- while (fheaderl != NIL) {
- fheaderp =
- (struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
- verify_space(&fheaderp->name, 1);
- verify_space(&fheaderp->arglist, 1);
- verify_space(&fheaderp->type, 1);
- fheaderl = fheaderp->next;
- }
- count = nwords;
- break;
- }
-
- /* unboxed objects */
- case BIGNUM_WIDETAG:
+ case UNBOUND_MARKER_WIDETAG:
+ case INSTANCE_HEADER_WIDETAG:
+ case FDEFN_WIDETAG:
+ count = 1;
+ break;
+
+ case CODE_HEADER_WIDETAG:
+ {
+ lispobj object = *start;
+ struct code *code;
+ long nheader_words, ncode_words, nwords;
+ lispobj fheaderl;
+ struct simple_fun *fheaderp;
+
+ code = (struct code *) start;
+
+ /* Check that it's not in the dynamic space.
+ * FIXME: Isn't is supposed to be OK for code
+ * objects to be in the dynamic space these days? */
+ if (is_in_dynamic_space
+ /* It's ok if it's byte compiled code. The trace
+ * table offset will be a fixnum if it's x86
+ * compiled code - check.
+ *
+ * FIXME: #^#@@! lack of abstraction here..
+ * This line can probably go away now that
+ * there's no byte compiler, but I've got
+ * too much to worry about right now to try
+ * to make sure. -- WHN 2001-10-06 */
+ && fixnump(code->trace_table_offset)
+ /* Only when enabled */
+ && verify_dynamic_code_check) {
+ FSHOW((stderr,
+ "/code object at %x in the dynamic space\n",
+ start));
+ }
+
+ ncode_words = fixnum_value(code->code_size);
+ nheader_words = HeaderValue(object);
+ nwords = ncode_words + nheader_words;
+ nwords = CEILING(nwords, 2);
+ /* Scavenge the boxed section of the code data block */
+ verify_space(start + 1, nheader_words - 1);
+
+ /* Scavenge the boxed section of each function
+ * object in the code data block. */
+ fheaderl = code->entry_points;
+ while (fheaderl != NIL) {
+ fheaderp =
+ (struct simple_fun *) native_pointer(fheaderl);
+ gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ verify_space(&fheaderp->name, 1);
+ verify_space(&fheaderp->arglist, 1);
+ verify_space(&fheaderp->type, 1);
+ fheaderl = fheaderp->next;
+ }
+ count = nwords;
+ break;
+ }
+
+ /* unboxed objects */
+ case BIGNUM_WIDETAG:
- /* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
- void *page_start, *addr;
-
- /* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
- * should not be write-protected -- except that the
- * generation is used for the current region but it sets
- * that up. */
- page_table[page].allocated = FREE_PAGE_FLAG;
- page_table[page].bytes_used = 0;
-
- /* Zero the page. */
- page_start = (void *)page_address(page);
-
- /* First, remove any write-protection. */
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page].write_protected = 0;
-
- os_invalidate(page_start,PAGE_BYTES);
- addr = os_validate(page_start,PAGE_BYTES);
- if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
- page_start,
- addr);
- }
- } else if (gencgc_zero_check_during_free_heap) {
- /* Double-check that the page is zero filled. */
- long *page_start, i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
- gc_assert(page_table[page].bytes_used == 0);
- page_start = (long *)page_address(page);
- for (i=0; i<1024; i++) {
- if (page_start[i] != 0) {
- lose("free region not zero at %x", page_start + i);
- }
- }
- }
+ /* Skip free pages which should already be zero filled. */
+ if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ void *page_start, *addr;
+
+ /* Mark the page free. The other slots are assumed invalid
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
+ * should not be write-protected -- except that the
+ * generation is used for the current region but it sets
+ * that up. */
+ page_table[page].allocated = FREE_PAGE_FLAG;
+ page_table[page].bytes_used = 0;
+
+ /* Zero the page. */
+ page_start = (void *)page_address(page);
+
+ /* First, remove any write-protection. */
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page].write_protected = 0;
+
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
+ if (addr == NULL || addr != page_start) {
+ lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
+ page_start,
+ addr);
+ }
+ } else if (gencgc_zero_check_during_free_heap) {
+ /* Double-check that the page is zero filled. */
+ long *page_start, i;
+ gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_table[page].bytes_used == 0);
+ page_start = (long *)page_address(page);
+ for (i=0; i<1024; i++) {
+ if (page_start[i] != 0) {
+ lose("free region not zero at %x", page_start + i);
+ }
+ }
+ }