#endif
/* the minimum size (in bytes) for a large object*/
-unsigned large_object_size = 4 * 4096;
+/* FIXME: Should this really be PAGE_BYTES? */
+unsigned large_object_size = 4 * PAGE_BYTES;
+
\f
/*
* debugging
int new_space;
-/* FIXME: It would be nice to use this symbolic constant instead of
- * bare 4096 almost everywhere. We could also use an assertion that
- * it's equal to getpagesize(). */
-
-#define PAGE_BYTES 4096
-
/* An array of page structures is statically allocated.
* This helps quickly map between an address its page structure.
* NUM_PAGES is set from the size of the dynamic space. */
inline void *
page_address(int page_num)
{
- return (heap_base + (page_num * 4096));
+ return (heap_base + (page_num * PAGE_BYTES));
}
/* Find the page index within the page_table for the given
int index = addr-heap_base;
if (index >= 0) {
- index = ((unsigned int)index)/4096;
+ index = ((unsigned int)index)/PAGE_BYTES;
if (index < NUM_PAGES)
return (index);
}
i,
boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*4096
+ (count_generation_pages(i)*PAGE_BYTES
- generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[gc_alloc_generation].alloc_start_page;
}
last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region);
- bytes_found=(4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
0);
}
release_spinlock(&free_pages_lock);
gc_abort();
}
- new_area_start = 4096*first_page + offset;
+ new_area_start = PAGE_BYTES*first_page + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
unsigned area_end =
- 4096*((*new_areas)[i].page)
+ PAGE_BYTES*((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
/*FSHOW((stderr,
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
- bytes_used = 4096;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>4096) {
- bytes_used = 4096;
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
/* Calc. the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
page_table[next_page].large_object = large;
page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - 4096*(next_page-first_page);
+ orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
}
release_spinlock(&free_pages_lock);
(page_table[first_page].large_object == 0) &&
(gc_alloc_generation == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (4096-32)) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
(page_table[first_page].write_protected == 0) &&
(page_table[first_page].dont_move == 0))
break;
gc_assert(page_table[first_page].write_protected == 0);
last_page = first_page;
- bytes_found = 4096 - page_table[first_page].bytes_used;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
num_pages = 1;
while (((bytes_found < nbytes)
|| (alloc_region && (num_pages < 2)))
&& (page_table[last_page+1].allocated == FREE_PAGE)) {
last_page++;
num_pages++;
- bytes_found += 4096;
+ bytes_found += PAGE_BYTES;
gc_assert(page_table[last_page].write_protected == 0);
}
- region_size = (4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
gc_assert(bytes_found == region_size);
restart_page = last_page + 1;
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_table[next_page].allocated == BOXED_PAGE);
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
page_table[next_page].write_protected = 0;
}
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
(page_table[next_page].allocated == BOXED_PAGE) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to bother zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected as they
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
page_table[next_page].allocated = UNBOXED_PAGE;
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected, even if
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert((page_table[next_page].allocated == BOXED_PAGE)
|| (page_table[next_page].allocated == UNBOXED_PAGE));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset ==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].allocated = boxed;
/* Shouldn't be write-protected at this stage. Essential that the
* pages aren't. */
gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write protected as they
/* quick check 2: Check the offset within the page.
*
- * FIXME: The mask should have a symbolic name, and ideally should
- * be derived from page size instead of hardwired to 0xfff.
- * (Also fix other uses of 0xfff, elsewhere.) */
- if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
+ */
+ if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
while (page_table[first_page].first_object_offset != 0) {
--first_page;
/* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == 4096);
+ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
if ((page_table[addr_page_index].allocated == FREE_PAGE)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned)addr & 0xfff)
+ || (((unsigned)addr & (PAGE_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < 4096)
- /* ..or it is 4096 and is the last in the block */
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
|| (page_table[i+1].allocated == FREE_PAGE)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
/*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
os_protect((void *)page_addr,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
for (last_page = i; ; last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
#endif
{
scavenge(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
+ + (last_page-i)*PAGE_BYTES)/4);
/* Now scan the pages and write protect those
* that don't have pointers to younger
for (last_page = i; ;last_page++) {
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- page_table[i].first_object_offset)/4;
else
size = (page_table[last_page].bytes_used
- + (last_page-i)*4096
+ + (last_page-i)*PAGE_BYTES
- page_table[i].first_object_offset)/4;
{
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
}
}
void *page_start = (void *)page_address(last_page);
if (page_table[last_page].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[last_page].write_protected = 0;
}
}
page_start = (void *)page_address(first_page);
- os_invalidate(page_start, 4096*(last_page-first_page));
- addr = os_validate(page_start, 4096*(last_page-first_page));
+ os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
+ addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
if (addr == NULL || addr != page_start) {
/* Is this an error condition? I couldn't really tell from
* the old CMU CL code, which fprintf'ed a message with
int *page_start;
page_start = (int *)page_address(first_page);
- i586_bzero(page_start, 4096*(last_page-first_page));
+ i586_bzero(page_start, PAGE_BYTES*(last_page-first_page));
}
first_page = last_page;
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
break;
verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
+ + (last_page-i)*PAGE_BYTES)/4);
i = last_page;
}
}
}
}
} else {
- int free_bytes = 4096 - page_table[page].bytes_used;
+ int free_bytes = PAGE_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
int *start_addr = (int *)((unsigned)page_address(page)
+ page_table[page].bytes_used);
page_start = (void *)page_address(i);
os_protect(page_start,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- /* FIXME: 4096 should be symbolic constant here and
- * prob'ly elsewhere too. */
- num_dont_move_pages * 4096);
+ num_dont_move_pages * PAGE_BYTES);
}
#endif
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
return 0; /* dummy value: return something ... */
}
page_start = (void *)page_address(page);
/* First, remove any write-protection. */
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[page].write_protected = 0;
- os_invalidate(page_start,4096);
- addr = os_validate(page_start,4096);
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
if (addr == NULL || addr != page_start) {
lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
page_start,
do {
page_table[page].allocated = BOXED_PAGE;
page_table[page].gen = 0;
- page_table[page].bytes_used = 4096;
+ page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
page_table[page].first_object_offset =
(void *)DYNAMIC_SPACE_START - page_address(page);
- addr += 4096;
+ addr += PAGE_BYTES;
page++;
} while (addr < alloc_ptr);
- generations[0].bytes_allocated = 4096*page;
- bytes_allocated = 4096*page;
+ generations[0].bytes_allocated = PAGE_BYTES*page;
+ bytes_allocated = PAGE_BYTES*page;
}