boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
+#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
+long large_object_size = 4 * GENCGC_CARD_BYTES;
+#else
+long large_object_size = 4 * PAGE_BYTES;
+#endif
\f
/*
}
+static void
+zero_and_mark_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ zero_pages(start, end);
+ for (i = start; i <= end; i++)
+ page_table[i].need_to_zero = 0;
+}
+
/* Zero the pages from START to END (inclusive), except for those
* pages that are known to already zeroed. Mark all pages in the
* ranges as non-zeroed.
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
/* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
(sizeof (struct thread))/(sizeof (lispobj));
scavenge((lispobj *) (th+1),len);
#endif
}
static void
-remap_page_range (page_index_t from, page_index_t to, int forcibly)
+remap_page_range (page_index_t from, page_index_t to)
{
/* There's a mysterious Solaris/x86 problem with using mmap
* tricks for memory zeroing. See sbcl-devel thread
* "Re: patch: standalone executable redux".
- *
- * Since pages don't have to be zeroed ahead of time, only do
- * so when called from purify.
*/
#if defined(LISP_FEATURE_SUNOS)
- if (forcibly)
- zero_pages(from, to);
+ zero_and_mark_pages(from, to);
#else
- page_index_t aligned_from, aligned_end, end = to+1;
-
- const page_index_t
+ const page_index_t
release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
release_mask = release_granularity-1,
end = to+1,
if (aligned_from < aligned_end) {
zero_pages_with_mmap(aligned_from, aligned_end-1);
- if (forcibly) {
- if (aligned_from != from)
- zero_pages(from, aligned_from-1);
- if (aligned_end != end)
- zero_pages(aligned_end, end-1);
- }
- } else if (forcibly)
- zero_pages(from, to);
+ if (aligned_from != from)
+ zero_and_mark_pages(from, aligned_from-1);
+ if (aligned_end != end)
+ zero_and_mark_pages(aligned_end, end-1);
+ } else {
+ zero_and_mark_pages(from, to);
+ }
#endif
}
static void
remap_free_pages (page_index_t from, page_index_t to, int forcibly)
{
- page_index_t first_page, last_page,
- first_aligned_page, last_aligned_page;
+ page_index_t first_page, last_page;
if (forcibly)
- return remap_page_range(from, to, 1);
+ return remap_page_range(from, to);
- /* See comment above about mysterious failures on Solaris/x86.
- */
-#if !defined(LISP_FEATURE_SUNOS)
for (first_page = from; first_page <= to; first_page++) {
if (page_allocated_p(first_page) ||
(page_table[first_page].need_to_zero == 0))
(page_table[last_page].need_to_zero == 1))
last_page++;
- remap_page_range(first_page, last_page-1, 0);
+ remap_page_range(first_page, last_page-1);
first_page = last_page;
}
-#endif
}
generation_index_t small_generation_limit = 1;
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options)
+ boolean save_runtime_options,
+ boolean compressed, int compression_level)
{
FILE *file;
void *runtime_bytes = NULL;
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();
save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
- prepend_runtime, save_runtime_options);
+ prepend_runtime, save_runtime_options,
+ compressed ? compression_level : COMPRESSION_LEVEL_NONE);
/* Oops. Save still managed to fail. Since we've mangled the stack
* beyond hope, there's not much we can do.
* (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's