* during a heap verify? */
boolean verify_dynamic_code_check = 0;
+#ifdef LISP_FEATURE_X86
/* Should we check code objects for fixup errors after they are transported? */
boolean check_code_fixups = 0;
+#endif
/* Should we check that newly allocated regions are zero filled? */
boolean gencgc_zero_check = 0;
}
static inline boolean code_page_p(page_index_t page) {
- return (page_table[page].allocated & CODE_PAGE_FLAG);
+ /* This is used by the conservative pinning logic to determine if
+ * a page can contain code objects. Ideally, we'd be able to
+ * check the page allocation flag to see if it is CODE_PAGE_FLAG,
+ * but this turns out not to be reliable (in fact, badly
+ * unreliable) at the moment. On the upside, all code objects are
+ * boxed objects, so we can simply re-use the boxed_page_p() logic
+ * for a tighter result than merely "is this page allocated". */
+#if 0
+ return (page_table[page].allocated & CODE_PAGE_FLAG) == CODE_PAGE_FLAG;
+#else
+ return page_boxed_p(page);
+#endif
}
static inline boolean page_boxed_no_region_p(page_index_t page) {
*
* Currently only absolute fixups to the constant vector, or to the
* code area are checked. */
+#ifdef LISP_FEATURE_X86
void
sniff_code_object(struct code *code, os_vm_size_t displacement)
{
-#ifdef LISP_FEATURE_X86
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
os_vm_address_t code_start_addr, code_end_addr;
"/code start = %x, end = %x\n",
code_start_addr, code_end_addr));
}
-#endif
}
+#endif
+#ifdef LISP_FEATURE_X86
void
gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
{
-/* x86-64 uses pc-relative addressing instead of this kludge */
-#ifndef LISP_FEATURE_X86_64
sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr, constants_end_addr;
os_vm_address_t code_start_addr, code_end_addr;
if (check_code_fixups) {
sniff_code_object(new_code,displacement);
}
-#endif
}
-
+#endif
static lispobj
trans_boxed_large(lispobj object)
#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+static int
+valid_conservative_root_p(void *addr, page_index_t addr_page_index)
+{
+ /* quick check 1: Address is quite likely to have been invalid. */
+ if ((addr_page_index == -1)
+ || page_free_p(addr_page_index)
+ || (page_table[addr_page_index].bytes_used == 0)
+ || (page_table[addr_page_index].gen != from_space)
+ /* Skip if already marked dont_move. */
+ || (page_table[addr_page_index].dont_move != 0))
+ return 0;
+ gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
+ /* quick check 2: Check the offset within the page.
+ *
+ */
+ if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
+ return 0;
+
+ /* Filter out anything which can't be a pointer to a Lisp object
+ * (or, as a special case which also requires dont_move, a return
+ * address referring to something in a CodeObject). This is
+ * expensive but important, since it vastly reduces the
+ * probability that random garbage will be bogusly interpreted as
+ * a pointer which prevents a page from moving.
+ *
+ * This only needs to happen on x86oids, where this is used for
+ * conservative roots. Non-x86oid systems only ever call this
+ * function on known-valid lisp objects. */
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer((lispobj)addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
+ return 0;
+#endif
+
+ return 1;
+}
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
page_index_t i;
unsigned int region_allocation;
- /* quick check 1: Address is quite likely to have been invalid. */
- if ((addr_page_index == -1)
- || page_free_p(addr_page_index)
- || (page_table[addr_page_index].bytes_used == 0)
- || (page_table[addr_page_index].gen != from_space)
- /* Skip if already marked dont_move. */
- || (page_table[addr_page_index].dont_move != 0))
+ if (!valid_conservative_root_p(addr, addr_page_index))
return;
- gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
+
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
- /* quick check 2: Check the offset within the page.
- *
- */
- if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
- page_table[addr_page_index].bytes_used)
- return;
-
- /* Filter out anything which can't be a pointer to a Lisp object
- * (or, as a special case which also requires dont_move, a return
- * address referring to something in a CodeObject). This is
- * expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreted as
- * a pointer which prevents a page from moving.
- *
- * This only needs to happen on x86oids, where this is used for
- * conservative roots. Non-x86oid systems only ever call this
- * function on known-valid lisp objects. */
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- if (!(code_page_p(addr_page_index)
- || (is_lisp_pointer((lispobj)addr) &&
- possibly_valid_dynamic_space_pointer(addr))))
- return;
-#endif
-
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
* pointer to: if this is the case, we will write-protect all the
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
+#ifdef SIMD_PACK_WIDETAG
+ case SIMD_PACK_WIDETAG:
+#endif
case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
case SIMPLE_CHARACTER_STRING_WIDETAG:
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
page_index_t last_page;
- int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
gc_assert(page_starts_contiguous_block_p(i));
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options,
- boolean compressed, int compression_level)
+ boolean save_runtime_options, boolean compressed,
+ int compression_level, int application_type)
{
FILE *file;
void *runtime_bytes = NULL;
collect_garbage(HIGHEST_NORMAL_GENERATION+1);
if (prepend_runtime)
- save_runtime_to_filehandle(file, runtime_bytes, runtime_size);
+ save_runtime_to_filehandle(file, runtime_bytes, runtime_size,
+ application_type);
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();