#include "validate.h"
#include "lispregs.h"
#include "arch.h"
-#include "fixnump.h"
#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
-unsigned long large_object_size = 4 * PAGE_BYTES;
+long large_object_size = 4 * PAGE_BYTES;
\f
/*
/* An array of page structures is allocated on gc initialization.
* This helps quickly map between an address its page structure.
* page_table_pages is set from the size of the dynamic space. */
-unsigned page_table_pages;
+page_index_t page_table_pages;
struct page *page_table;
/* To map addresses to page structures the address of the first page
return (heap_base + (page_num * PAGE_BYTES));
}
+/* Calculate the address where the allocation region associated with
+ * the page starts. */
+static inline void *
+page_region_start(page_index_t page_index)
+{
+ return page_address(page_index)-page_table[page_index].region_start_offset;
+}
+
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
inline page_index_t
find_page_index(void *addr)
{
- page_index_t index = addr-heap_base;
-
- if (index >= 0) {
- index = ((unsigned long)index)/PAGE_BYTES;
+ if (addr >= heap_base) {
+ page_index_t index = ((pointer_sized_uint_t)addr -
+ (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
if (index < page_table_pages)
return (index);
}
-
return (-1);
}
+static size_t
+npage_bytes(long npages)
+{
+ gc_assert(npages>=0);
+ return ((unsigned long)npages)*PAGE_BYTES;
+}
+
+/* Check that X is a higher address than Y and return offset from Y to
+ * X in bytes. */
+static inline
+size_t void_diff(void *x, void *y)
+{
+ gc_assert(x >= y);
+ return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
+}
+
/* a structure to hold the state of a generation */
struct generation {
page_index_t alloc_large_unboxed_start_page;
/* the bytes allocated to this generation */
- long bytes_allocated;
+ unsigned long bytes_allocated;
/* the number of bytes at which to trigger a GC */
- long gc_trigger;
+ unsigned long gc_trigger;
/* to calculate a new level for gc_trigger */
- long bytes_consed_between_gc;
+ unsigned long bytes_consed_between_gc;
/* the number of GCs since the last raise */
int num_gc;
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC. */
- long cum_sum_bytes_allocated;
+ unsigned long cum_sum_bytes_allocated;
/* a minimum average memory age before a GC will occur helps
* prevent a GC when a large number of new live objects have been
count_write_protect_generation_pages(generation_index_t generation)
{
page_index_t i;
- long count = 0;
+ unsigned long count = 0;
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != FREE_PAGE_FLAG)
/* Work through the pages and add up the number of bytes used for the
* given generation. */
-static long
+static unsigned long
count_generation_bytes_allocated (generation_index_t gen)
{
page_index_t i;
- long result = 0;
+ unsigned long result = 0;
for (i = 0; i < last_free_page; i++) {
if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].gen == gen))
large_unboxed_cnt,
pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
+ (npage_bytes(count_generation_pages(i))
+ - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
*/
void zero_pages_with_mmap(page_index_t start, page_index_t end) {
int i;
- void *addr = (void *) page_address(start), *new_addr;
- size_t length = PAGE_BYTES*(1+end-start);
+ void *addr = page_address(start), *new_addr;
+ size_t length = npage_bytes(1+end-start);
if (start > end)
return;
os_invalidate(addr, length);
new_addr = os_validate(addr, length);
if (new_addr == NULL || new_addr != addr) {
- lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr);
+ lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
+ start, new_addr);
}
for (i = start; i <= end; i++) {
return;
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- fast_bzero(page_address(start), PAGE_BYTES*(1+end-start));
+ fast_bzero(page_address(start), npage_bytes(1+end-start));
#else
- bzero(page_address(start), PAGE_BYTES*(1+end-start));
+ bzero(page_address(start), npage_bytes(1+end-start));
#endif
}
{
page_index_t first_page;
page_index_t last_page;
- long bytes_found;
+ unsigned long bytes_found;
page_index_t i;
int ret;
}
last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
+ + npage_bytes(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
page_table[first_page].allocated = BOXED_PAGE_FLAG;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
- page_table[first_page].first_object_offset = 0;
+ page_table[first_page].region_start_offset = 0;
}
if (unboxed)
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
- page_table[i].first_object_offset =
- alloc_region->start_addr - page_address(i);
+ page_table[i].region_start_offset =
+ void_diff(page_address(i),alloc_region->start_addr);
page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
}
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- /* do we only want to call this on special occasions? like for boxed_region? */
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ /* do we only want to call this on special occasions? like for
+ * boxed_region? */
+ set_alloc_pointer((lispobj)page_address(last_free_page));
}
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
- /* we can do this after releasing free_pages_lock */
- if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.\n", p);
- }
- }
- }
-
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
- PAGE_BYTES*(1+last_page-first_page),
+ npage_bytes(1+last_page-first_page),
OS_VM_PROT_ALL);
#endif
}
zero_dirty_pages(first_page, last_page);
+
+ /* we can do this after releasing free_pages_lock */
+ if (gencgc_zero_check) {
+ long *p;
+ for (p = (long *)alloc_region->start_addr;
+ p < (long *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ p, alloc_region->start_addr, alloc_region->end_addr);
+ }
+ }
+ }
}
/* If the record_new_objects flag is 2 then all new regions created
static page_index_t new_areas_ignore_page;
struct new_area {
page_index_t page;
- long offset;
- long size;
+ size_t offset;
+ size_t size;
};
static struct new_area (*new_areas)[];
static long new_areas_index;
/* Add a new area to new_areas. */
static void
-add_new_area(page_index_t first_page, long offset, long size)
+add_new_area(page_index_t first_page, size_t offset, size_t size)
{
unsigned long new_area_start,c;
long i;
gc_abort();
}
- new_area_start = PAGE_BYTES*first_page + offset;
+ new_area_start = npage_bytes(first_page) + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
unsigned long area_end =
- PAGE_BYTES*((*new_areas)[i].page)
+ npage_bytes((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
/*FSHOW((stderr,
int more;
page_index_t first_page;
page_index_t next_page;
- int bytes_used;
- long orig_first_page_bytes_used;
- long region_size;
- long byte_cnt;
+ unsigned long bytes_used;
+ unsigned long orig_first_page_bytes_used;
+ unsigned long region_size;
+ unsigned long byte_cnt;
int ret;
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+ gc_assert(alloc_region->start_addr ==
+ (page_address(first_page)
+ + page_table[first_page].bytes_used));
/* All the pages used need to be updated */
/* Update the first page. */
/* If the page was free then set up the gen, and
- * first_object_offset. */
+ * region_start_offset. */
if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
if (unboxed)
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ if ((bytes_used = void_diff(alloc_region->free_pointer,
+ page_address(first_page)))
+ >PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
byte_cnt += bytes_used;
- /* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and set
- * the bytes_used. */
+ /* All the rest of the pages should be free. We need to set
+ * their region_start_offset pointer to the start of the
+ * region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
if (unboxed)
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
- gc_assert(page_table[next_page].first_object_offset ==
- alloc_region->start_addr - page_address(next_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ void_diff(page_address(next_page),
+ alloc_region->start_addr));
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>PAGE_BYTES) {
+ if ((bytes_used = void_diff(alloc_region->free_pointer,
+ page_address(next_page)))>PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
next_page++;
}
- region_size = alloc_region->free_pointer - alloc_region->start_addr;
+ region_size = void_diff(alloc_region->free_pointer,
+ alloc_region->start_addr);
bytes_allocated += region_size;
generations[gc_alloc_generation].bytes_allocated += region_size;
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* If the first page was free then set up the gen, and
- * first_object_offset. */
+ * region_start_offset. */
if (page_table[first_page].bytes_used == 0) {
if (unboxed)
page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
else
page_table[first_page].allocated = BOXED_PAGE_FLAG;
page_table[first_page].gen = gc_alloc_generation;
- page_table[first_page].first_object_offset = 0;
+ page_table[first_page].region_start_offset = 0;
page_table[first_page].large_object = 1;
}
next_page = first_page+1;
/* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and
- * set the bytes_used. */
+ * region_start_offset pointer to the start of the region, and set
+ * the bytes_used. */
while (more) {
gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
gc_assert(page_table[next_page].bytes_used == 0);
page_table[next_page].gen = gc_alloc_generation;
page_table[next_page].large_object = 1;
- page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
+ page_table[next_page].region_start_offset =
+ npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
+ if (bytes_used > PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
/* Bump up last_free_page */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ set_alloc_pointer((lispobj)(page_address(last_free_page)));
}
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
- PAGE_BYTES*(1+last_page-first_page),
+ npage_bytes(1+last_page-first_page),
OS_VM_PROT_ALL);
#endif
* handled, or indeed even printed.
*/
fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
- gc_active_p ? "garbage collection" : "allocation", available, requested);
+ gc_active_p ? "garbage collection" : "allocation",
+ available, requested);
if (gc_active_p || (available == 0)) {
/* If we are in GC, or totally out of memory there is no way
* to sanely transfer control to the lisp-side of things.
else {
/* FIXME: assert free_pages_lock held */
(void)thread_mutex_unlock(&free_pages_lock);
- funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
+ funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
alloc_number(available), alloc_number(requested));
lose("HEAP-EXHAUSTED-ERROR fell through");
}
page_index_t
gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
{
- page_index_t first_page;
- page_index_t last_page;
- long region_size;
- page_index_t restart_page=*restart_page_ptr;
- long bytes_found;
- long num_pages;
- int large_p=(nbytes>=large_object_size);
+ page_index_t first_page, last_page;
+ page_index_t restart_page = *restart_page_ptr;
+ long bytes_found = 0;
+ long most_bytes_found = 0;
/* FIXME: assert(free_pages_lock is held); */
- /* Search for a contiguous free space of at least nbytes. If it's
- * a large object then align it on a page boundary by searching
- * for a free page. */
-
+ /* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
- do {
- first_page = restart_page;
- if (large_p)
- while ((first_page < page_table_pages)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < page_table_pages) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
+ if (nbytes>=PAGE_BYTES) {
+ /* Search for a contiguous free space of at least nbytes,
+ * aligned on a page boundary. The page-alignment is strictly
+ * speaking needed only for objects at least large_object_size
+ * bytes in size. */
+ do {
+ first_page = restart_page;
+ while ((first_page < page_table_pages) &&
+ (page_table[first_page].allocated != FREE_PAGE_FLAG))
first_page++;
- }
-
- if (first_page >= page_table_pages)
- gc_heap_exhausted_error_or_lose(0, nbytes);
- gc_assert(page_table[first_page].write_protected == 0);
+ last_page = first_page;
+ bytes_found = PAGE_BYTES;
+ while ((bytes_found < nbytes) &&
+ (last_page < (page_table_pages-1)) &&
+ (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ restart_page = last_page + 1;
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (page_table_pages-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
+ } else {
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism. */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+ {
+ bytes_found = PAGE_BYTES;
+ break;
+ }
+ else if ((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0))
+ {
+ bytes_found = PAGE_BYTES
+ - page_table[first_page].bytes_used;
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ if (bytes_found >= nbytes)
+ break;
+ }
+ first_page++;
}
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
- } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
+ last_page = first_page;
+ restart_page = first_page + 1;
+ }
/* Check for a failure */
- if ((restart_page >= page_table_pages) && (bytes_found < nbytes))
- gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+ if (bytes_found < nbytes) {
+ gc_assert(restart_page >= page_table_pages);
+ gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
+ }
- *restart_page_ptr=first_page;
+ gc_assert(page_table[first_page].write_protected == 0);
+ *restart_page_ptr = first_page;
return last_page;
}
{
void *new_free_pointer;
- if(nbytes>=large_object_size)
+ if (nbytes>=large_object_size)
return gc_alloc_large(nbytes,unboxed_p,my_region);
/* Check whether there is room in the current alloc region. */
/* Unless a `quick' alloc was requested, check whether the
alloc region is almost empty. */
if (!quick_p &&
- (my_region->end_addr - my_region->free_pointer) <= 32) {
+ void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
/* If so, finished with the current region. */
gc_alloc_update_page_tables(unboxed_p, my_region);
/* Set up a new region. */
return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
\f
-/*
- * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
- */
-
-extern long (*scavtab[256])(lispobj *where, lispobj object);
-extern lispobj (*transother[256])(lispobj object);
-extern long (*sizetab[256])(lispobj *where);
/* Copy a large boxed object. If the object is in a large object
* region then it is simply promoted, else it is copied. If it's large
/* Promote the object. */
- long remaining_bytes;
+ unsigned long remaining_bytes;
page_index_t next_page;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
/* Note: Any page write-protection must be removed, else a
* later scavenge_newspace may incorrectly not scavenge these
* new areas, but let's do it for them all (they'll probably
* be written anyway?). */
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
(page_table[next_page].gen == from_space) &&
(page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* Checks out OK, free the page. Don't need to bother zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected as they
next_page++;
}
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
- bytes_freed;
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+ + bytes_freed;
generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
bytes_allocated -= bytes_freed;
gc_assert((nwords & 0x01) == 0);
if ((nwords > 1024*1024) && gencgc_verbose)
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
+ FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
+ nwords*N_WORD_BYTES));
/* Check whether it's a large object. */
first_page = find_page_index((void *)object);
/* Promote the object. Note: Unboxed objects may have been
* allocated to a BOXED region so it may be necessary to
* change the region to UNBOXED. */
- long remaining_bytes;
+ unsigned long remaining_bytes;
page_index_t next_page;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
|| (page_table[next_page].allocated == BOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
|| (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected, even if
"/copy_large_unboxed bytes_freed=%d\n",
bytes_freed));
- generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[from_space].bytes_allocated -=
+ nwords*N_WORD_BYTES + bytes_freed;
generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
bytes_allocated -= bytes_freed;
if (!check_code_fixups)
return;
+ FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
+
ncode_words = fixnum_value(code->code_size);
nheader_words = HeaderValue(*(lispobj *)code);
nwords = ncode_words + nheader_words;
&& (data < (code_end_addr-displacement))) {
/* function header */
if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
+ (unsigned)code)) {
/* Skip the function header */
p += 6*4 - 4 - 1;
continue;
void *constants_start_addr, *constants_end_addr;
void *code_start_addr, *code_end_addr;
lispobj fixups = NIL;
- unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code;
+ unsigned long displacement =
+ (unsigned long)new_code - (unsigned long)old_code;
struct vector *fixups_vector;
ncode_words = fixnum_value(new_code->code_size);
(fixups_vector->header == 0x01)) {
/* If so, then follow it. */
/*SHOW("following pointer to a forwarding pointer");*/
- fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
+ fixups_vector =
+ (struct vector *)native_pointer((lispobj)fixups_vector->length);
}
/*SHOW("got fixups");*/
/* If it's within the old_code object then it must be an
* absolute fixup (relative ones are not saved) */
if ((old_value >= (unsigned long)old_code)
- && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES)))
+ && (old_value < ((unsigned long)old_code
+ + nwords*N_WORD_BYTES)))
/* So add the dispacement. */
*(unsigned long *)((unsigned long)code_start_addr + offset) =
old_value + displacement;
} else {
/* This used to just print a note to stderr, but a bogus fixup seems to
* indicate real heap corruption, so a hard hailure is in order. */
- lose("fixup vector %p has a bad widetag: %d\n", fixups_vector, widetag_of(fixups_vector->header));
+ lose("fixup vector %p has a bad widetag: %d\n",
+ fixups_vector, widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
if ((page_index == -1) ||
(page_table[page_index].allocated == FREE_PAGE_FLAG))
return NULL;
- start = (lispobj *)((void *)page_address(page_index)
- + page_table[page_index].first_object_offset);
+ start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
(((lispobj *)pointer)+2)-start,
(lispobj *)pointer));
return 0;
}
/* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+ if ((is_lisp_pointer(start_addr[0]) ||
+ is_lisp_immediate(start_addr[0])) &&
+ (is_lisp_pointer(start_addr[1]) ||
+ is_lisp_immediate(start_addr[1])))
break;
else {
if (gencgc_verbose)
page_index_t next_page;
long nwords;
- long remaining_bytes;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long remaining_bytes;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
int boxed;
* but lets do it for them all (they'll probably be written
* anyway?). */
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
|| (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset ==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].allocated = boxed;
((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
|| (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write protected as they
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
+ if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
#if 0
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
- first_page=
- find_page_index(page_address(addr_page_index)+
- page_table[addr_page_index].first_object_offset);
+ first_page = find_page_index(page_region_start(addr_page_index))
#else
first_page = addr_page_index;
- while (page_table[first_page].first_object_offset != 0) {
+ while (page_table[first_page].region_start_offset != 0) {
--first_page;
/* Do some checks. */
gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
|| (page_table[i+1].allocated == FREE_PAGE_FLAG)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].first_object_offset == 0))
+ || (page_table[i+1].region_start_offset == 0))
break;
}
int write_protected=1;
/* This should be the start of a region */
- gc_assert(page_table[i].first_object_offset == 0);
+ gc_assert(page_table[i].region_start_offset == 0);
/* Now work forward until the end of the region */
for (last_page = i; ; last_page++) {
|| (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
}
if (!write_protected) {
scavenge(page_address(i),
- (page_table[last_page].bytes_used +
- (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ ((unsigned long)(page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)))
+ /N_WORD_BYTES);
/* Now scan the pages and write protect those that
* don't have pointers to younger generations. */
&& (page_table[i].write_protected_cleared != 0)) {
FSHOW((stderr, "/scavenge_generation() %d\n", generation));
FSHOW((stderr,
- "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
+ "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
page_table[i].bytes_used,
- page_table[i].first_object_offset,
+ page_table[i].region_start_offset,
page_table[i].dont_move));
lose("write to protected page %d in scavenge_generation()\n", i);
}
page_index_t last_page;
int all_wp=1;
- /* The scavenge will start at the first_object_offset of page i.
+ /* The scavenge will start at the region_start_offset of
+ * page i.
*
* We need to find the full extent of this contiguous
* block in case objects span pages.
|| (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
}
/* Do a limited check for write-protected pages. */
if (!all_wp) {
- long size;
-
- size = (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/N_WORD_BYTES;
+ long nwords = (((unsigned long)
+ (page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)
+ + page_table[i].region_start_offset))
+ / N_WORD_BYTES);
new_areas_ignore_page = last_page;
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
+ scavenge(page_region_start(i), nwords);
}
i = last_page;
/* Work through previous_new_areas. */
for (i = 0; i < previous_new_areas_index; i++) {
- long page = (*previous_new_areas)[i].page;
- long offset = (*previous_new_areas)[i].offset;
- long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ page_index_t page = (*previous_new_areas)[i].page;
+ size_t offset = (*previous_new_areas)[i].offset;
+ size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES;
gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
scavenge(page_address(page)+offset, size);
}
* assumes that all objects have been copied or promoted to an older
* generation. Bytes_allocated and the generation bytes_allocated
* counter are updated. The number of bytes freed is returned. */
-static long
+static unsigned long
free_oldspace(void)
{
- long bytes_freed = 0;
+ unsigned long bytes_freed = 0;
page_index_t first_page, last_page;
first_page = 0;
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
- PAGE_BYTES*(last_page-first_page),
+ npage_bytes(last_page-first_page),
OS_VM_PROT_NONE);
#endif
first_page = last_page;
page_index_t pi1 = find_page_index((void*)addr);
if (pi1 != -1)
- fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
+ fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
(unsigned long) addr,
pi1,
page_table[pi1].allocated,
page_table[pi1].gen,
page_table[pi1].bytes_used,
- page_table[pi1].first_object_offset,
+ page_table[pi1].region_start_offset,
page_table[pi1].dont_move);
fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
*(addr-4),
count = 1;
break;
}
- nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots;
- verify_space(start + 1, ntotal - fixnum_value(nuntagged));
+ nuntagged = ((struct layout *)
+ native_pointer(layout))->n_untagged_slots;
+ verify_space(start + 1,
+ ntotal - fixnum_value(nuntagged));
count = ntotal + 1;
break;
}
while (fheaderl != NIL) {
fheaderp =
(struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ gc_assert(widetag_of(fheaderp->header) ==
+ SIMPLE_FUN_HEADER_WIDETAG);
verify_space(&fheaderp->name, 1);
verify_space(&fheaderp->arglist, 1);
verify_space(&fheaderp->type, 1);
#ifdef LUTEX_WIDETAG
case LUTEX_WIDETAG:
#endif
+#ifdef NO_TLS_VALUE_MARKER_WIDETAG
+ case NO_TLS_VALUE_MARKER_WIDETAG:
+#endif
count = (sizetab[widetag_of(*start)])(start);
break;
default:
- FSHOW((stderr,
- "/Unhandled widetag 0x%x at 0x%x\n",
- widetag_of(*start), start));
- fflush(stderr);
- gc_abort();
+ lose("Unhandled widetag 0x%x at 0x%x\n",
+ widetag_of(*start), start);
}
}
}
int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
- gc_assert(page_table[i].first_object_offset == 0);
+ gc_assert(page_table[i].region_start_offset == 0);
/* Need to find the full extent of this contiguous block in case
objects span pages. */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
- verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ verify_space(page_address(i),
+ ((unsigned long)
+ (page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)))
+ / N_WORD_BYTES);
i = last_page;
}
}
page_start = (void *)page_address(start);
os_protect(page_start,
- PAGE_BYTES * (last - start),
+ npage_bytes(last - start),
OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
start = last;
/* Compute the PC's offset from the start of the CODE */
/* register. */
- pc_code_offset = *os_context_pc_addr(context) - *os_context_register_addr(context, reg_CODE);
+ pc_code_offset = *os_context_pc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
#ifdef ARCH_HAS_NPC_REGISTER
- npc_code_offset = *os_context_npc_addr(context) - *os_context_register_addr(context, reg_CODE);
+ npc_code_offset = *os_context_npc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
#endif /* ARCH_HAS_NPC_REGISTER */
#ifdef ARCH_HAS_LINK_REGISTER
/* Fix the LIP */
/*
- * But what happens if lip_register_pair is -1? *os_context_register_addr on Solaris
- * (see solaris_register_address in solaris-os.c) will return
- * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
- * that what we really want? My guess is that that is not what we
+ * But what happens if lip_register_pair is -1?
+ * *os_context_register_addr on Solaris (see
+ * solaris_register_address in solaris-os.c) will return
+ * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
+ * that what we really want? My guess is that that is not what we
* want, so if lip_register_pair is -1, we don't touch reg_LIP at
- * all. But maybe it doesn't really matter if LIP is trashed?
+ * all. But maybe it doesn't really matter if LIP is trashed?
*/
if (lip_register_pair >= 0) {
*os_context_register_addr(context, reg_LIP) =
- *os_context_register_addr(context, lip_register_pair) + lip_offset;
+ *os_context_register_addr(context, lip_register_pair)
+ + lip_offset;
}
#endif /* reg_LIP */
/* Fix the PC if it was in from space */
if (from_space_p(*os_context_pc_addr(context)))
- *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+ *os_context_pc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + pc_code_offset;
#ifdef ARCH_HAS_LINK_REGISTER
/* Fix the LR ditto; important if we're being called from
#ifdef ARCH_HAS_NPC_REGISTER
if (from_space_p(*os_context_npc_addr(context)))
- *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+ *os_context_npc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + npc_code_offset;
#endif /* ARCH_HAS_NPC_REGISTER */
}
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- num_dont_move_pages * PAGE_BYTES);
+ npage_bytes(num_dont_move_pages);
}
#endif
last_free_page = last_page+1;
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ set_alloc_pointer((lispobj)(page_address(last_free_page)));
return 0; /* dummy value: return something ... */
}
page_table[page].allocated = FREE_PAGE_FLAG;
page_table[page].bytes_used = 0;
-#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */
+#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
+ * about this change. */
/* Zero the page. */
page_start = (void *)page_address(page);
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
- if (gencgc_verbose)
- SHOW("checking after free_heap\n");
+ FSHOW((stderr, "checking after free_heap\n"));
verify_gc();
}
}
/* Compute the number of pages needed for the dynamic space.
* Dynamic space size should be aligned on page size. */
page_table_pages = dynamic_space_size/PAGE_BYTES;
- gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES);
+ gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
page_table = calloc(page_table_pages, sizeof(struct page));
gc_assert(page_table);
gencgc_pickup_dynamic(void)
{
page_index_t page = 0;
- long alloc_ptr = get_alloc_pointer();
+ void *alloc_ptr = (void *)get_alloc_pointer();
lispobj *prev=(lispobj *)page_address(page);
generation_index_t gen = PSEUDO_STATIC_GENERATION;
if (!gencgc_partial_pickup) {
first=gc_search_space(prev,(ptr+2)-prev,ptr);
if(ptr == first) prev=ptr;
- page_table[page].first_object_offset =
- (void *)prev - page_address(page);
+ page_table[page].region_start_offset =
+ page_address(page) - (void *)prev;
}
page++;
- } while ((long)page_address(page) < alloc_ptr);
+ } while (page_address(page) < alloc_ptr);
#ifdef LUTEX_WIDETAG
/* Lutexes have been registered in generation 0 by coreparse, and
last_free_page = page;
- generations[gen].bytes_allocated = PAGE_BYTES*page;
- bytes_allocated = PAGE_BYTES*page;
+ generations[gen].bytes_allocated = npage_bytes(page);
+ bytes_allocated = npage_bytes(page);
gc_alloc_update_all_page_tables();
write_protect_generation_pages(gen);
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
if ((signed long) alloc_signal <= 0) {
+ SetSymbolValue(ALLOC_SIGNAL, T, thread);
#ifdef LISP_FEATURE_SB_THREAD
kill_thread_safely(thread->os_thread, SIGPROF);
#else
*/
if(page_table[page_index].write_protected_cleared != 1)
lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
- page_index, boxed_region.first_page, boxed_region.last_page);
+ page_index, boxed_region.first_page,
+ boxed_region.last_page);
}
/* Don't worry, we can handle it. */
return 1;