page_index_t page_table_pages;
struct page *page_table;
+static inline boolean page_allocated_p(page_index_t page) {
+ return (page_table[page].allocated != FREE_PAGE_FLAG);
+}
+
+static inline boolean page_no_region_p(page_index_t page) {
+ return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG);
+}
+
+static inline boolean page_allocated_no_region_p(page_index_t page) {
+ return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG))
+ && page_no_region_p(page));
+}
+
+static inline boolean page_free_p(page_index_t page) {
+ return (page_table[page].allocated == FREE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_p(page_index_t page) {
+ return (page_table[page].allocated & BOXED_PAGE_FLAG);
+}
+
+static inline boolean code_page_p(page_index_t page) {
+ return (page_table[page].allocated & CODE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_no_region_p(page_index_t page) {
+ return page_boxed_p(page) && page_no_region_p(page);
+}
+
+static inline boolean page_unboxed_p(page_index_t page) {
+ /* Both flags set == boxed code page */
+ return ((page_table[page].allocated & UNBOXED_PAGE_FLAG)
+ && !page_boxed_p(page));
+}
+
+static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
+ return (page_boxed_no_region_p(page)
+ && (page_table[page].bytes_used != 0)
+ && !page_table[page].dont_move
+ && (page_table[page].gen == generation));
+}
+
/* To map addresses to page structures the address of the first page
* is needed. */
static void *heap_base = NULL;
unsigned long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected == 1))
count++;
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation))
count++;
return count;
page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].dont_move != 0)) {
++count;
}
page_index_t i;
unsigned long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
/* Count the number of boxed pages within the given
* generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_boxed_p(j)) {
if (page_table[j].large_object)
large_boxed_cnt++;
else
if(page_table[j].dont_move) pinned_cnt++;
/* Count the number of unboxed pages within the given
* generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_unboxed_p(j)) {
if (page_table[j].large_object)
large_unboxed_cnt++;
else
if (large) {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
return generations[generation].alloc_large_unboxed_start_page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
return generations[generation].alloc_large_start_page;
} else {
lose("bad page type flag: %d", page_type_flag);
} else {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
return generations[generation].alloc_unboxed_start_page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
return generations[generation].alloc_start_page;
} else {
lose("bad page_type_flag: %d", page_type_flag);
if (large) {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
generations[generation].alloc_large_unboxed_start_page = page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
generations[generation].alloc_large_start_page = page;
} else {
lose("bad page type flag: %d", page_type_flag);
} else {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
generations[generation].alloc_unboxed_start_page = page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
generations[generation].alloc_start_page = page;
} else {
lose("bad page type flag: %d", page_type_flag);
gc_assert(page_table[first_page].region_start_offset == 0);
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- gc_assert(page_table[first_page].allocated == page_type_flag);
+ gc_assert(page_table[first_page].allocated & page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
* region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- gc_assert(page_table[next_page].allocated==page_type_flag);
+ gc_assert(page_table[next_page].allocated & page_type_flag);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
/* Add the region to the new_areas if requested. */
- if (BOXED_PAGE_FLAG == page_type_flag)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used, region_size);
/*
* region_start_offset pointer to the start of the region, and set
* the bytes_used. */
while (more) {
- gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_free_p(next_page));
gc_assert(page_table[next_page].bytes_used == 0);
page_table[next_page].allocated = page_type_flag;
page_table[next_page].gen = gc_alloc_generation;
generations[gc_alloc_generation].bytes_allocated += nbytes;
/* Add the region to the new_areas if requested. */
- if (BOXED_PAGE_FLAG == page_type_flag)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used,nbytes);
/* Bump up last_free_page */
do {
first_page = restart_page;
while ((first_page < page_table_pages) &&
- (page_table[first_page].allocated != FREE_PAGE_FLAG))
+ page_allocated_p(first_page))
first_page++;
last_page = first_page;
bytes_found = PAGE_BYTES;
while ((bytes_found < nbytes) &&
(last_page < (page_table_pages-1)) &&
- (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ page_free_p(last_page+1)) {
last_page++;
bytes_found += PAGE_BYTES;
gc_assert(0 == page_table[last_page].bytes_used);
* pages: this helps avoid excessive conservativism. */
first_page = restart_page;
while (first_page < page_table_pages) {
- if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+ if (page_free_p(first_page))
{
gc_assert(0 == page_table[first_page].bytes_used);
bytes_found = PAGE_BYTES;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_boxed_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
lispobj *start;
/* The address may be invalid, so do some checks. */
- if ((page_index == -1) ||
- (page_table[page_index].allocated == FREE_PAGE_FLAG))
+ if ((page_index == -1) || page_free_p(page_index))
return NULL;
start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
static int
looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
{
- /* We need to allow raw pointers into Code objects for return
- * addresses. This will also pick up pointers to functions in code
- * objects. */
- if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG)
- /* XXX could do some further checks here */
- return 1;
-
if (!is_lisp_pointer((lispobj)pointer)) {
return 0;
}
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
/* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
- || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ || page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
* expensive but important, since it vastly reduces the
* probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
- if (!(possibly_valid_dynamic_space_pointer(addr)))
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer(addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
return;
/* Find the beginning of the region. Note that there may be
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
- if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
|| (((unsigned long)addr & (PAGE_BYTES - 1))
/* Check whether this is the last page in this contiguous block.. */
if ((page_table[i].bytes_used < PAGE_BYTES)
/* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || page_free_p(i+1)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
|| (page_table[i+1].region_start_offset == 0))
long num_words = page_table[page].bytes_used / N_WORD_BYTES;
/* Shouldn't be a free page. */
- gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
+ gc_assert(page_allocated_p(page));
gc_assert(page_table[page].bytes_used != 0);
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
/* FIXME: What's the reason for not write-protecting pinned pages? */
|| page_table[page].dont_move
- || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
+ || page_unboxed_p(page))
return (0);
/* Scan the page for pointers to younger generations or the
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
- ((page_table[index].allocated != FREE_PAGE_FLAG)
+ (page_allocated_p(index)
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == SCRATCH_GENERATION)))
for (i = 0; i < last_free_page; i++) {
generation_index_t generation = page_table[i].gen;
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
&& (generation != new_space)
&& (generation >= from)
write_protected && page_table[last_page].write_protected;
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].region_start_offset == 0))
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < page_table_pages; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
generation));
for (i = 0; i < last_free_page; i++) {
/* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
* contiguous block */
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].region_start_offset == 0))
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < page_table_pages; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == from_space)) {
void *page_start;
do {
/* Find a first page for the next region of pages. */
while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ && (page_free_p(first_page)
|| (page_table[first_page].bytes_used == 0)
|| (page_table[first_page].gen != from_space)))
first_page++;
last_page++;
}
while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && page_allocated_p(last_page)
&& (page_table[last_page].bytes_used != 0)
&& (page_table[last_page].gen == from_space));
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(page_index)
&& (page_table[page_index].bytes_used == 0))
lose ("Ptr %x @ %x sees free page.\n", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
page_index_t last_page;
page_index_t page;
for (page = 0; page < last_free_page; page++) {
- if (page_table[page].allocated == FREE_PAGE_FLAG) {
+ if (page_free_p(page)) {
/* The whole page should be zero filled. */
long *start_addr = (long *)page_address(page);
long size = 1024;
gc_assert(generation < SCRATCH_GENERATION);
for (start = 0; start < last_free_page; start++) {
- if ((page_table[start].allocated == BOXED_PAGE_FLAG)
- && (page_table[start].bytes_used != 0)
- && !page_table[start].dont_move
- && (page_table[start].gen == generation)) {
+ if (protect_page_p(start, generation)) {
void *page_start;
page_index_t last;
page_table[start].write_protected = 1;
for (last = start + 1; last < last_free_page; last++) {
- if ((page_table[last].allocated != BOXED_PAGE_FLAG)
- || (page_table[last].bytes_used == 0)
- || page_table[last].dont_move
- || (page_table[last].gen != generation))
+ if (!protect_page_p(last, generation))
break;
page_table[last].write_protected = 1;
}
page_index_t last_page = -1, i;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0))
+ if (page_allocated_p(i) && (page_table[i].bytes_used != 0))
last_page = i;
last_free_page = last_page+1;
page_index_t first_page, last_page;
for (first_page = from; first_page <= to; first_page++) {
- if (page_table[first_page].allocated != FREE_PAGE_FLAG ||
- page_table[first_page].need_to_zero == 0) {
+ if (page_allocated_p(first_page) ||
+ (page_table[first_page].need_to_zero == 0)) {
continue;
}
last_page = first_page + 1;
- while (page_table[last_page].allocated == FREE_PAGE_FLAG &&
- last_page < to &&
- page_table[last_page].need_to_zero == 1) {
+ while (page_free_p(last_page) &&
+ (last_page < to) &&
+ (page_table[last_page].need_to_zero == 1)) {
last_page++;
}
for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ if (page_allocated_p(page)) {
void *page_start, *addr;
/* Mark the page free. The other slots are assumed invalid
/* Double-check that the page is zero filled. */
long *page_start;
page_index_t i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
page_start = (long *)page_address(page);
for (i=0; i<1024; i++) {
void *alloc_ptr = (void *)get_alloc_pointer();
lispobj *prev=(lispobj *)page_address(page);
generation_index_t gen = PSEUDO_STATIC_GENERATION;
-
do {
lispobj *first,*ptr= (lispobj *)page_address(page);
page_table[page].allocated = BOXED_PAGE_FLAG;
/* Select correct region, and call general_alloc_internal with it.
* For other then boxed allocation we must lock first, since the
* region is shared. */
- if (BOXED_PAGE_FLAG == page_type_flag) {
+ if (BOXED_PAGE_FLAG & page_type_flag) {
#ifdef LISP_FEATURE_SB_THREAD
struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
#else
lispobj *
alloc(long nbytes)
{
- general_alloc(nbytes, BOXED_PAGE_FLAG);
+ return general_alloc(nbytes, BOXED_PAGE_FLAG);
}
\f
/*
* shared support for the OS-dependent signal handlers which
* catch GENCGC-related write-protect violations
*/
-
void unhandled_sigmemoryfault(void* addr);
/* Depending on which OS we're running under, different signals might
page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if (page_table[i].allocated == FREE_PAGE_FLAG) {
+ if (page_free_p(i)) {
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(i),
PAGE_BYTES,