* during a heap verify? */
boolean verify_dynamic_code_check = 0;
+#ifdef LISP_FEATURE_X86
/* Should we check code objects for fixup errors after they are transported? */
boolean check_code_fixups = 0;
+#endif
/* Should we check that newly allocated regions are zero filled? */
boolean gencgc_zero_check = 0;
}
static inline boolean code_page_p(page_index_t page) {
- return (page_table[page].allocated & CODE_PAGE_FLAG);
+ /* This is used by the conservative pinning logic to determine if
+ * a page can contain code objects. Ideally, we'd be able to
+ * check the page allocation flag to see if it is CODE_PAGE_FLAG,
+ * but this turns out not to be reliable (in fact, badly
+ * unreliable) at the moment. On the upside, all code objects are
+ * boxed objects, so we can simply re-use the boxed_page_p() logic
+ * for a tighter result than merely "is this page allocated". */
+#if 0
+ return (page_table[page].allocated & CODE_PAGE_FLAG) == CODE_PAGE_FLAG;
+#else
+ return page_boxed_p(page);
+#endif
}
static inline boolean page_boxed_no_region_p(page_index_t page) {
return page_table[page_index].scan_start_offset == 0;
}
+/* True if the page is the last page in a contiguous block. */
+static inline boolean
+page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen)
+{
+ return (/* page doesn't fill block */
+ (page_table[page_index].bytes_used < GENCGC_CARD_BYTES)
+ /* page is last allocated page */
+ || ((page_index + 1) >= last_free_page)
+ /* next page free */
+ || page_free_p(page_index + 1)
+ /* next page contains no data */
+ || (page_table[page_index + 1].bytes_used == 0)
+ /* next page is in different generation */
+ || (page_table[page_index + 1].gen != gen)
+ /* next page starts its own contiguous block */
+ || (page_starts_contiguous_block_p(page_index + 1)));
+}
+
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
inline page_index_t
*
* Currently only absolute fixups to the constant vector, or to the
* code area are checked. */
+#ifdef LISP_FEATURE_X86
void
sniff_code_object(struct code *code, os_vm_size_t displacement)
{
-#ifdef LISP_FEATURE_X86
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
os_vm_address_t code_start_addr, code_end_addr;
"/code start = %x, end = %x\n",
code_start_addr, code_end_addr));
}
-#endif
}
+#endif
+#ifdef LISP_FEATURE_X86
void
gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
{
-/* x86-64 uses pc-relative addressing instead of this kludge */
-#ifndef LISP_FEATURE_X86_64
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr, constants_end_addr;
os_vm_address_t code_start_addr, code_end_addr;
if (check_code_fixups) {
sniff_code_object(new_code,displacement);
}
-#endif
}
-
+#endif
static lispobj
trans_boxed_large(lispobj object)
#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+static int
+valid_conservative_root_p(void *addr, page_index_t addr_page_index)
+{
+#ifdef GENCGC_IS_PRECISE
+ /* If we're in precise gencgc (non-x86oid as of this writing) then
+ * we are only called on valid object pointers in the first place,
+ * so we just have to do a bounds-check against the heap, a
+ * generation check, and the already-pinned check. */
+ if ((addr_page_index == -1)
+ || (page_table[addr_page_index].gen != from_space)
+ || (page_table[addr_page_index].dont_move != 0))
+ return 0;
+#else
+ /* quick check 1: Address is quite likely to have been invalid. */
+ if ((addr_page_index == -1)
+ || page_free_p(addr_page_index)
+ || (page_table[addr_page_index].bytes_used == 0)
+ || (page_table[addr_page_index].gen != from_space)
+ /* Skip if already marked dont_move. */
+ || (page_table[addr_page_index].dont_move != 0))
+ return 0;
+ gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
+ /* quick check 2: Check the offset within the page.
+ *
+ */
+ if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
+ return 0;
+
+ /* Filter out anything which can't be a pointer to a Lisp object
+ * (or, as a special case which also requires dont_move, a return
+ * address referring to something in a CodeObject). This is
+ * expensive but important, since it vastly reduces the
+ * probability that random garbage will be bogusly interpreted as
+ * a pointer which prevents a page from moving. */
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer((lispobj)addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
+ return 0;
+#endif
+
+ return 1;
+}
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
page_index_t i;
unsigned int region_allocation;
- /* quick check 1: Address is quite likely to have been invalid. */
- if ((addr_page_index == -1)
- || page_free_p(addr_page_index)
- || (page_table[addr_page_index].bytes_used == 0)
- || (page_table[addr_page_index].gen != from_space)
- /* Skip if already marked dont_move. */
- || (page_table[addr_page_index].dont_move != 0))
+ if (!valid_conservative_root_p(addr, addr_page_index))
return;
- gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
- /* quick check 2: Check the offset within the page.
- *
- */
- if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
- page_table[addr_page_index].bytes_used)
- return;
-
- /* Filter out anything which can't be a pointer to a Lisp object
- * (or, as a special case which also requires dont_move, a return
- * address referring to something in a CodeObject). This is
- * expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreted as
- * a pointer which prevents a page from moving.
- *
- * This only needs to happen on x86oids, where this is used for
- * conservative roots. Non-x86oid systems only ever call this
- * function on known-valid lisp objects. */
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- if (!(code_page_p(addr_page_index)
- || (is_lisp_pointer((lispobj)addr) &&
- possibly_valid_dynamic_space_pointer(addr))))
- return;
-#endif
-
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
* pointer to: if this is the case, we will write-protect all the
/* Adjust any large objects before promotion as they won't be
* copied after promotion. */
if (page_table[first_page].large_object) {
- maybe_adjust_large_object(page_address(first_page));
- /* If a large object has shrunk then addr may now point to a
- * free area in which case it's ignored here. Note it gets
- * through the valid pointer test above because the tail looks
- * like conses. */
- if (page_free_p(addr_page_index)
- || (page_table[addr_page_index].bytes_used == 0)
- /* Check the offset within the page. */
- || (((uword_t)addr & (GENCGC_CARD_BYTES - 1))
- > page_table[addr_page_index].bytes_used)) {
- FSHOW((stderr,
- "weird? ignore ptr 0x%x to freed area of large object\n",
- addr));
+ /* Large objects (specifically vectors and bignums) can
+ * shrink, leaving a "tail" of zeroed space, which appears to
+ * the filter above as a seris of valid conses, both car and
+ * cdr of which contain the fixnum zero, but will be
+ * deallocated when the GC shrinks the large object region to
+ * fit the object within. We allow raw pointers within code
+ * space, but for boxed and unboxed space we do not, nor do
+ * pointers to within a non-code object appear valid above. A
+ * cons cell will never merit allocation to a large object
+ * page, so pick them off now, before we try to adjust the
+ * object. */
+ if ((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) &&
+ !code_page_p(first_page)) {
return;
}
+ maybe_adjust_large_object(page_address(first_page));
/* It may have moved to unboxed pages. */
region_allocation = page_table[first_page].allocated;
}
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < GENCGC_CARD_BYTES)
- /* ..or it is CARD_BYTES and is the last in the block */
- || page_free_p(i+1)
- || (page_table[i+1].bytes_used == 0) /* next page free */
- || (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_starts_contiguous_block_p(i+1)))
+ if (page_ends_contiguous_block_p(i, from_space))
break;
}
for (last_page = i; ; last_page++) {
write_protected =
write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (!page_boxed_p(last_page+1))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_starts_contiguous_block_p(last_page+1)))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
}
if (!write_protected) {
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (!page_boxed_p(last_page+1))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_starts_contiguous_block_p(last_page+1)))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
}
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
+#ifdef SIMD_PACK_WIDETAG
+ case SIMD_PACK_WIDETAG:
+#endif
case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
case SIMPLE_CHARACTER_STRING_WIDETAG:
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
page_index_t last_page;
- int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
gc_assert(page_starts_contiguous_block_p(i));
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
- /* Or it is CARD_BYTES and is the last in the block */
- || (page_table[last_page+1].allocated != region_allocation)
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_starts_contiguous_block_p(last_page+1)))
+ if (page_ends_contiguous_block_p(last_page, generation))
break;
verify_space(page_address(i),
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options,
- boolean compressed, int compression_level)
+ boolean save_runtime_options, boolean compressed,
+ int compression_level, int application_type)
{
FILE *file;
void *runtime_bytes = NULL;
collect_garbage(HIGHEST_NORMAL_GENERATION+1);
if (prepend_runtime)
- save_runtime_to_filehandle(file, runtime_bytes, runtime_size);
+ save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
+ application_type);
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();