page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
page_table[first_page].first_object_offset = 0;
}
if (unboxed)
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
page_table[first_page].first_object_offset = 0;
}
if (unboxed)
- gc_assert(page_table[first_page].allocated == BOXED_PAGE);
- page_table[first_page].allocated |= OPEN_REGION_PAGE;
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
if (unboxed)
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
if (unboxed)
page_table[i].gen = gc_alloc_generation;
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
page_table[i].first_object_offset =
alloc_region->start_addr - page_address(i);
page_table[i].gen = gc_alloc_generation;
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
page_table[i].first_object_offset =
alloc_region->start_addr - page_address(i);
* first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
* first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
}
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
gc_assert(page_table[next_page].bytes_used == 0);
}
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
gc_assert(page_table[next_page].bytes_used == 0);
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].first_object_offset = 0;
page_table[first_page].large_object = 1;
}
if (unboxed)
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].first_object_offset = 0;
page_table[first_page].large_object = 1;
}
if (unboxed)
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 1);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 1);
(page_table[first_page].large_object == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
(page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
(page_table[first_page].large_object == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
(page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
while (((bytes_found < nbytes)
|| (!large_p && (num_pages < 2)))
&& (last_page < (NUM_PAGES-1))
while (((bytes_found < nbytes)
|| (!large_p && (num_pages < 2)))
&& (last_page < (NUM_PAGES-1))
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
-PAGE_BYTES*(next_page-first_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
-PAGE_BYTES*(next_page-first_page));
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE));
+ gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
-PAGE_BYTES*(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
-PAGE_BYTES*(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- ((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE)) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
/* Check whether it's a vector or bignum object. */
switch (widetag_of(where[0])) {
case SIMPLE_VECTOR_WIDETAG:
/* Check whether it's a vector or bignum object. */
switch (widetag_of(where[0])) {
case SIMPLE_VECTOR_WIDETAG:
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
remaining_bytes = nwords*4;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE)
- || (page_table[next_page].allocated == UNBOXED_PAGE));
+ gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset ==
-PAGE_BYTES*(next_page-first_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset ==
-PAGE_BYTES*(next_page-first_page));
- ((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE)) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
-(next_page - first_page)*PAGE_BYTES)) {
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
|| (page_table[addr_page_index].dont_move != 0))
return;
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
|| (page_table[addr_page_index].dont_move != 0))
return;
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
|| (((unsigned)addr & (PAGE_BYTES - 1))
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
|| (((unsigned)addr & (PAGE_BYTES - 1))
/* Check whether this is the last page in this contiguous block.. */
if ((page_table[i].bytes_used < PAGE_BYTES)
/* ..or it is PAGE_BYTES and is the last in the block */
/* Check whether this is the last page in this contiguous block.. */
if ((page_table[i].bytes_used < PAGE_BYTES)
/* ..or it is PAGE_BYTES and is the last in the block */
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
|| (page_table[i+1].first_object_offset == 0))
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
|| (page_table[i+1].first_object_offset == 0))
gc_assert(page_table[page].bytes_used != 0);
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
|| page_table[page].dont_move
gc_assert(page_table[page].bytes_used != 0);
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
|| page_table[page].dont_move
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == NUM_GENERATIONS)))
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == NUM_GENERATIONS)))
write_protected && page_table[last_page].write_protected;
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
write_protected && page_table[last_page].write_protected;
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
"/starting one full scan of newspace generation %d\n",
generation));
for (i = 0; i < last_free_page; i++) {
"/starting one full scan of newspace generation %d\n",
generation));
for (i = 0; i < last_free_page; i++) {
- /* note that this skips over open regions when it encounters them */
- if ((page_table[i].allocated == BOXED_PAGE)
+ /* Note that this skips over open regions when it encounters them. */
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
* contiguous block */
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
* contiguous block */
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
bytes_freed += page_table[last_page].bytes_used;
generations[page_table[last_page].gen].bytes_allocated -=
page_table[last_page].bytes_used;
bytes_freed += page_table[last_page].bytes_used;
generations[page_table[last_page].gen].bytes_allocated -=
page_table[last_page].bytes_used;
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
&& (page_table[page_index].bytes_used == 0))
lose ("Ptr %x @ %x sees free page.", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
&& (page_table[page_index].bytes_used == 0))
lose ("Ptr %x @ %x sees free page.", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
* there's no byte compiler, but I've got
* too much to worry about right now to try
* to make sure. -- WHN 2001-10-06 */
* there's no byte compiler, but I've got
* too much to worry about right now to try
* to make sure. -- WHN 2001-10-06 */
&& (page_table[i].bytes_used != 0)
&& !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
&& (page_table[i].bytes_used != 0)
&& !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
for (page = 0; page < NUM_PAGES; page++) {
/* Skip free pages which should already be zero filled. */
for (page = 0; page < NUM_PAGES; page++) {
/* Skip free pages which should already be zero filled. */
* should not be write-protected -- except that the
* generation is used for the current region but it sets
* that up. */
* should not be write-protected -- except that the
* generation is used for the current region but it sets
* that up. */
/* Initialize each page structure. */
for (i = 0; i < NUM_PAGES; i++) {
/* Initialize all pages as free. */
/* Initialize each page structure. */
for (i = 0; i < NUM_PAGES; i++) {
/* Initialize all pages as free. */
page_table[page].gen = 0;
page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
page_table[page].gen = 0;
page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;