* during a heap verify? */
boolean verify_dynamic_code_check = 0;
+#ifdef LISP_FEATURE_X86
/* Should we check code objects for fixup errors after they are transported? */
boolean check_code_fixups = 0;
+#endif
/* Should we check that newly allocated regions are zero filled? */
boolean gencgc_zero_check = 0;
}
static inline boolean code_page_p(page_index_t page) {
- return (page_table[page].allocated & CODE_PAGE_FLAG);
+ /* This is used by the conservative pinning logic to determine if
+ * a page can contain code objects. Ideally, we'd be able to
+ * check the page allocation flag to see if it is CODE_PAGE_FLAG,
+ * but this turns out not to be reliable (in fact, badly
+ * unreliable) at the moment. On the upside, all code objects are
+ * boxed objects, so we can simply re-use the boxed_page_p() logic
+ * for a tighter result than merely "is this page allocated". */
+#if 0
+ return (page_table[page].allocated & CODE_PAGE_FLAG) == CODE_PAGE_FLAG;
+#else
+ return page_boxed_p(page);
+#endif
}
static inline boolean page_boxed_no_region_p(page_index_t page) {
/* To map addresses to page structures the address of the first page
* is needed. */
-static void *heap_base = NULL;
+void *heap_base = NULL;
/* Calculate the start address for the given page number. */
inline void *
/* Calculate the address where the allocation region associated with
* the page starts. */
static inline void *
-page_region_start(page_index_t page_index)
+page_scan_start(page_index_t page_index)
{
- return page_address(page_index)-page_table[page_index].region_start_offset;
+ return page_address(page_index)-page_table[page_index].scan_start_offset;
+}
+
+/* True if the page starts a contiguous block. */
+static inline boolean
+page_starts_contiguous_block_p(page_index_t page_index)
+{
+ return page_table[page_index].scan_start_offset == 0;
+}
+
+/* True if the page is the last page in a contiguous block. */
+static inline boolean
+page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen)
+{
+ return (/* page doesn't fill block */
+ (page_table[page_index].bytes_used < GENCGC_CARD_BYTES)
+ /* page is last allocated page */
+ || ((page_index + 1) >= last_free_page)
+ /* next page free */
+ || page_free_p(page_index + 1)
+ /* next page contains no data */
+ || (page_table[page_index + 1].bytes_used == 0)
+ /* next page is in different generation */
+ || (page_table[page_index + 1].gen != gen)
+ /* next page starts its own contiguous block */
+ || (page_starts_contiguous_block_p(page_index + 1)));
}
/* Find the page index within the page_table for the given
page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
- page_table[first_page].region_start_offset = 0;
+ page_table[first_page].scan_start_offset = 0;
}
gc_assert(page_table[first_page].allocated == page_type_flag);
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
- page_table[i].region_start_offset =
+ page_table[i].scan_start_offset =
void_diff(page_address(i),alloc_region->start_addr);
page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
}
/* Update the first page. */
/* If the page was free then set up the gen, and
- * region_start_offset. */
+ * scan_start_offset. */
if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].region_start_offset == 0);
+ gc_assert(page_starts_contiguous_block_p(first_page));
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
gc_assert(page_table[first_page].allocated & page_type_flag);
/* All the rest of the pages should be free. We need to set
- * their region_start_offset pointer to the start of the
+ * their scan_start_offset pointer to the start of the
* region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
- gc_assert(page_table[next_page].region_start_offset ==
+ gc_assert(page_table[next_page].scan_start_offset ==
void_diff(page_address(next_page),
alloc_region->start_addr));
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* If the first page was free then set up the gen, and
- * region_start_offset. */
+ * scan_start_offset. */
if (page_table[first_page].bytes_used == 0) {
page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
- page_table[first_page].region_start_offset = 0;
+ page_table[first_page].scan_start_offset = 0;
page_table[first_page].large_object = 1;
}
next_page = first_page+1;
/* All the rest of the pages should be free. We need to set their
- * region_start_offset pointer to the start of the region, and set
+ * scan_start_offset pointer to the start of the region, and set
* the bytes_used. */
while (more) {
gc_assert(page_free_p(next_page));
page_table[next_page].gen = gc_alloc_generation;
page_table[next_page].large_object = 1;
- page_table[next_page].region_start_offset =
+ page_table[next_page].scan_start_offset =
npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
/* Calculate the number of bytes used in this page. */
* new areas, but let's do it for them all (they'll probably
* be written anyway?). */
- gc_assert(page_table[first_page].region_start_offset == 0);
+ gc_assert(page_starts_contiguous_block_p(first_page));
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].region_start_offset ==
+ gc_assert(page_table[next_page].scan_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
/* Should have been unprotected by unprotect_oldspace()
(page_table[next_page].gen == from_space) &&
/* FIXME: It is not obvious to me why this is necessary
* as a loop condition: it seems to me that the
- * region_start_offset test should be sufficient, but
+ * scan_start_offset test should be sufficient, but
* experimentally that is not the case. --NS
* 2011-11-28 */
(boxedp ?
page_boxed_p(next_page) :
page_allocated_no_region_p(next_page)) &&
page_table[next_page].large_object &&
- (page_table[next_page].region_start_offset ==
+ (page_table[next_page].scan_start_offset ==
npage_bytes(next_page - first_page))) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
*
* Currently only absolute fixups to the constant vector, or to the
* code area are checked. */
+#ifdef LISP_FEATURE_X86
void
sniff_code_object(struct code *code, os_vm_size_t displacement)
{
-#ifdef LISP_FEATURE_X86
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
os_vm_address_t code_start_addr, code_end_addr;
"/code start = %x, end = %x\n",
code_start_addr, code_end_addr));
}
-#endif
}
+#endif
+#ifdef LISP_FEATURE_X86
void
gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
{
-/* x86-64 uses pc-relative addressing instead of this kludge */
-#ifndef LISP_FEATURE_X86_64
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr, constants_end_addr;
os_vm_address_t code_start_addr, code_end_addr;
if (check_code_fixups) {
sniff_code_object(new_code,displacement);
}
-#endif
}
-
+#endif
static lispobj
trans_boxed_large(lispobj object)
/* The address may be invalid, so do some checks. */
if ((page_index == -1) || page_free_p(page_index))
return NULL;
- start = (lispobj *)page_region_start(page_index);
+ start = (lispobj *)page_scan_start(page_index);
return (gc_search_space(start,
(((lispobj *)pointer)+2)-start,
(lispobj *)pointer));
#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+static int
+valid_conservative_root_p(void *addr, page_index_t addr_page_index)
+{
+#ifdef GENCGC_IS_PRECISE
+ /* If we're in precise gencgc (non-x86oid as of this writing) then
+ * we are only called on valid object pointers in the first place,
+ * so we just have to do a bounds-check against the heap, a
+ * generation check, and the already-pinned check. */
+ if ((addr_page_index == -1)
+ || (page_table[addr_page_index].gen != from_space)
+ || (page_table[addr_page_index].dont_move != 0))
+ return 0;
+#else
+ /* quick check 1: Address is quite likely to have been invalid. */
+ if ((addr_page_index == -1)
+ || page_free_p(addr_page_index)
+ || (page_table[addr_page_index].bytes_used == 0)
+ || (page_table[addr_page_index].gen != from_space)
+ /* Skip if already marked dont_move. */
+ || (page_table[addr_page_index].dont_move != 0))
+ return 0;
+ gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
+ /* quick check 2: Check the offset within the page.
+ *
+ */
+ if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
+ return 0;
+
+ /* Filter out anything which can't be a pointer to a Lisp object
+ * (or, as a special case which also requires dont_move, a return
+ * address referring to something in a CodeObject). This is
+ * expensive but important, since it vastly reduces the
+ * probability that random garbage will be bogusly interpreted as
+ * a pointer which prevents a page from moving. */
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer((lispobj)addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
+ return 0;
+#endif
+
+ return 1;
+}
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
* but lets do it for them all (they'll probably be written
* anyway?). */
- gc_assert(page_table[first_page].region_start_offset == 0);
+ gc_assert(page_starts_contiguous_block_p(first_page));
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].region_start_offset ==
+ gc_assert(page_table[next_page].scan_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
(page_table[next_page].gen == from_space) &&
page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
- (page_table[next_page].region_start_offset ==
+ (page_table[next_page].scan_start_offset ==
npage_bytes(next_page - first_page))) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
page_index_t i;
unsigned int region_allocation;
- /* quick check 1: Address is quite likely to have been invalid. */
- if ((addr_page_index == -1)
- || page_free_p(addr_page_index)
- || (page_table[addr_page_index].bytes_used == 0)
- || (page_table[addr_page_index].gen != from_space)
- /* Skip if already marked dont_move. */
- || (page_table[addr_page_index].dont_move != 0))
+ if (!valid_conservative_root_p(addr, addr_page_index))
return;
- gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
- /* quick check 2: Check the offset within the page.
- *
- */
- if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
- page_table[addr_page_index].bytes_used)
- return;
-
- /* Filter out anything which can't be a pointer to a Lisp object
- * (or, as a special case which also requires dont_move, a return
- * address referring to something in a CodeObject). This is
- * expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreted as
- * a pointer which prevents a page from moving.
- *
- * This only needs to happen on x86oids, where this is used for
- * conservative roots. Non-x86oid systems only ever call this
- * function on known-valid lisp objects. */
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- if (!(code_page_p(addr_page_index)
- || (is_lisp_pointer((lispobj)addr) &&
- possibly_valid_dynamic_space_pointer(addr))))
- return;
-#endif
-
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
* pointer to: if this is the case, we will write-protect all the
#if 0
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
- first_page = find_page_index(page_region_start(addr_page_index))
+ first_page = find_page_index(page_scan_start(addr_page_index))
#else
first_page = addr_page_index;
- while (page_table[first_page].region_start_offset != 0) {
+ while (!page_starts_contiguous_block_p(first_page)) {
--first_page;
/* Do some checks. */
gc_assert(page_table[first_page].bytes_used == GENCGC_CARD_BYTES);
/* Adjust any large objects before promotion as they won't be
* copied after promotion. */
if (page_table[first_page].large_object) {
- maybe_adjust_large_object(page_address(first_page));
- /* If a large object has shrunk then addr may now point to a
- * free area in which case it's ignored here. Note it gets
- * through the valid pointer test above because the tail looks
- * like conses. */
- if (page_free_p(addr_page_index)
- || (page_table[addr_page_index].bytes_used == 0)
- /* Check the offset within the page. */
- || (((uword_t)addr & (GENCGC_CARD_BYTES - 1))
- > page_table[addr_page_index].bytes_used)) {
- FSHOW((stderr,
- "weird? ignore ptr 0x%x to freed area of large object\n",
- addr));
+ /* Large objects (specifically vectors and bignums) can
+ * shrink, leaving a "tail" of zeroed space, which appears to
+ * the filter above as a seris of valid conses, both car and
+ * cdr of which contain the fixnum zero, but will be
+ * deallocated when the GC shrinks the large object region to
+ * fit the object within. We allow raw pointers within code
+ * space, but for boxed and unboxed space we do not, nor do
+ * pointers to within a non-code object appear valid above. A
+ * cons cell will never merit allocation to a large object
+ * page, so pick them off now, before we try to adjust the
+ * object. */
+ if ((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) &&
+ !code_page_p(first_page)) {
return;
}
+ maybe_adjust_large_object(page_address(first_page));
/* It may have moved to unboxed pages. */
region_allocation = page_table[first_page].allocated;
}
/* Mark the page static. */
page_table[i].dont_move = 1;
- /* Move the page to the new_space. XX I'd rather not do this
- * but the GC logic is not quite able to copy with the static
- * pages remaining in the from space. This also requires the
- * generation bytes_allocated counters be updated. */
- page_table[i].gen = new_space;
- generations[new_space].bytes_allocated += page_table[i].bytes_used;
- generations[from_space].bytes_allocated -= page_table[i].bytes_used;
-
/* It is essential that the pages are not write protected as
* they may have pointers into the old-space which need
* scavenging. They shouldn't be write protected at this
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < GENCGC_CARD_BYTES)
- /* ..or it is CARD_BYTES and is the last in the block */
- || page_free_p(i+1)
- || (page_table[i+1].bytes_used == 0) /* next page free */
- || (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].region_start_offset == 0))
+ if (page_ends_contiguous_block_p(i, from_space))
break;
}
int write_protected=1;
/* This should be the start of a region */
- gc_assert(page_table[i].region_start_offset == 0);
+ gc_assert(page_starts_contiguous_block_p(i));
/* Now work forward until the end of the region */
for (last_page = i; ; last_page++) {
write_protected =
write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (!page_boxed_p(last_page+1))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].region_start_offset == 0))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
}
if (!write_protected) {
&& (page_table[i].write_protected_cleared != 0)) {
FSHOW((stderr, "/scavenge_generation() %d\n", generation));
FSHOW((stderr,
- "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
+ "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n",
page_table[i].bytes_used,
- page_table[i].region_start_offset,
+ page_table[i].scan_start_offset,
page_table[i].dont_move));
lose("write to protected page %d in scavenge_generation()\n", i);
}
page_index_t last_page;
int all_wp=1;
- /* The scavenge will start at the region_start_offset of
+ /* The scavenge will start at the scan_start_offset of
* page i.
*
* We need to find the full extent of this contiguous
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (!page_boxed_p(last_page+1))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].region_start_offset == 0))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
}
sword_t nwords = (((uword_t)
(page_table[last_page].bytes_used
+ npage_bytes(last_page-i)
- + page_table[i].region_start_offset))
+ + page_table[i].scan_start_offset))
/ N_WORD_BYTES);
new_areas_ignore_page = last_page;
- scavenge(page_region_start(i), nwords);
+ scavenge(page_scan_start(i), nwords);
}
i = last_page;
page_table[pi1].allocated,
page_table[pi1].gen,
page_table[pi1].bytes_used,
- page_table[pi1].region_start_offset,
+ page_table[pi1].scan_start_offset,
page_table[pi1].dont_move);
fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
*(addr-4),
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
+#ifdef SIMD_PACK_WIDETAG
+ case SIMD_PACK_WIDETAG:
+#endif
case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
case SIMPLE_CHARACTER_STRING_WIDETAG:
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
page_index_t last_page;
- int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
- gc_assert(page_table[i].region_start_offset == 0);
+ gc_assert(page_starts_contiguous_block_p(i));
/* Need to find the full extent of this contiguous block in case
objects span pages. */
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (page_table[last_page+1].allocated != region_allocation)
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].region_start_offset == 0))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
verify_space(page_address(i),
}
#endif
+static void
+move_pinned_pages_to_newspace()
+{
+ page_index_t i;
+
+ /* scavenge() will evacuate all oldspace pages, but no newspace
+ * pages. Pinned pages are precisely those pages which must not
+ * be evacuated, so move them to newspace directly. */
+
+ for (i = 0; i < last_free_page; i++) {
+ if (page_table[i].dont_move &&
+ /* dont_move is cleared lazily, so validate the space as well. */
+ page_table[i].gen == from_space) {
+ page_table[i].gen = new_space;
+ /* And since we're moving the pages wholesale, also adjust
+ * the generation allocation counters. */
+ generations[new_space].bytes_allocated += page_table[i].bytes_used;
+ generations[from_space].bytes_allocated -= page_table[i].bytes_used;
+ }
+ }
+}
+
/* Garbage collect a generation. If raise is 0 then the remains of the
* generation are not raised to the next generation. */
static void
}
#endif
+ /* Now that all of the pinned (dont_move) pages are known, and
+ * before we start to scavenge (and thus relocate) objects,
+ * relocate the pinned pages to newspace, so that the scavenger
+ * will not attempt to relocate their contents. */
+ move_pinned_pages_to_newspace();
+
/* Scavenge all the rest of the roots. */
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
first=gc_search_space(prev,(ptr+2)-prev,ptr);
if(ptr == first)
prev=ptr;
- page_table[page].region_start_offset =
+ page_table[page].scan_start_offset =
page_address(page) - (void *)prev;
}
page++;
"Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n"
" boxed_region.first_page: %"PAGE_INDEX_FMT","
" boxed_region.last_page %"PAGE_INDEX_FMT"\n"
- " page.region_start_offset: %"OS_VM_SIZE_FMT"\n"
+ " page.scan_start_offset: %"OS_VM_SIZE_FMT"\n"
" page.bytes_used: %"PAGE_BYTES_FMT"\n"
" page.allocated: %d\n"
" page.write_protected: %d\n"
page_index,
boxed_region.first_page,
boxed_region.last_page,
- page_table[page_index].region_start_offset,
+ page_table[page_index].scan_start_offset,
page_table[page_index].bytes_used,
page_table[page_index].allocated,
page_table[page_index].write_protected,
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options,
- boolean compressed, int compression_level)
+ boolean save_runtime_options, boolean compressed,
+ int compression_level, int application_type)
{
FILE *file;
void *runtime_bytes = NULL;
collect_garbage(HIGHEST_NORMAL_GENERATION+1);
if (prepend_runtime)
- save_runtime_to_filehandle(file, runtime_bytes, runtime_size);
+ save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
+ application_type);
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();