#include "genesis/instance.h"
#include "genesis/layout.h"
#include "gencgc.h"
-#if defined(LUTEX_WIDETAG)
-#include "pthread-lutex.h"
-#endif
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
#include "genesis/cons.h"
#endif
boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
+#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
+long large_object_size = 4 * GENCGC_CARD_BYTES;
+#else
+long large_object_size = 4 * PAGE_BYTES;
+#endif
\f
/*
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
double minimum_age_before_gc;
-
- /* A linked list of lutex structures in this generation, used for
- * implementing lutex finalization. */
-#ifdef LUTEX_WIDETAG
- struct lutex *lutexes;
-#else
- void *lutexes;
-#endif
};
/* an array of generation structures. There needs to be one more
}
+static void
+zero_and_mark_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ zero_pages(start, end);
+ for (i = start; i <= end; i++)
+ page_table[i].need_to_zero = 0;
+}
+
/* Zero the pages from START to END (inclusive), except for those
* pages that are known to already zeroed. Mark all pages in the
* ranges as non-zeroed.
return copy_large_unboxed_object(object, length);
}
#endif
-
-\f
-/*
- * Lutexes. Using the normal finalization machinery for finalizing
- * lutexes is tricky, since the finalization depends on working lutexes.
- * So we track the lutexes in the GC and finalize them manually.
- */
-
-#if defined(LUTEX_WIDETAG)
-
-/*
- * Start tracking LUTEX in the GC, by adding it to the linked list of
- * lutexes in the nursery generation. The caller is responsible for
- * locking, and GCs must be inhibited until the registration is
- * complete.
- */
-void
-gencgc_register_lutex (struct lutex *lutex) {
- int index = find_page_index(lutex);
- generation_index_t gen;
- struct lutex *head;
-
- /* This lutex is in static space, so we don't need to worry about
- * finalizing it.
- */
- if (index == -1)
- return;
-
- gen = page_table[index].gen;
-
- gc_assert(gen >= 0);
- gc_assert(gen < NUM_GENERATIONS);
-
- head = generations[gen].lutexes;
-
- lutex->gen = gen;
- lutex->next = head;
- lutex->prev = NULL;
- if (head)
- head->prev = lutex;
- generations[gen].lutexes = lutex;
-}
-
-/*
- * Stop tracking LUTEX in the GC by removing it from the appropriate
- * linked lists. This will only be called during GC, so no locking is
- * needed.
- */
-void
-gencgc_unregister_lutex (struct lutex *lutex) {
- if (lutex->prev) {
- lutex->prev->next = lutex->next;
- } else {
- generations[lutex->gen].lutexes = lutex->next;
- }
-
- if (lutex->next) {
- lutex->next->prev = lutex->prev;
- }
-
- lutex->next = NULL;
- lutex->prev = NULL;
- lutex->gen = -1;
-}
-
-/*
- * Mark all lutexes in generation GEN as not live.
- */
-static void
-unmark_lutexes (generation_index_t gen) {
- struct lutex *lutex = generations[gen].lutexes;
-
- while (lutex) {
- lutex->live = 0;
- lutex = lutex->next;
- }
-}
-
-/*
- * Finalize all lutexes in generation GEN that have not been marked live.
- */
-static void
-reap_lutexes (generation_index_t gen) {
- struct lutex *lutex = generations[gen].lutexes;
-
- while (lutex) {
- struct lutex *next = lutex->next;
- if (!lutex->live) {
- lutex_destroy((tagged_lutex_t) lutex);
- gencgc_unregister_lutex(lutex);
- }
- lutex = next;
- }
-}
-
-/*
- * Mark LUTEX as live.
- */
-static void
-mark_lutex (lispobj tagged_lutex) {
- struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
-
- lutex->live = 1;
-}
-
-/*
- * Move all lutexes in generation FROM to generation TO.
- */
-static void
-move_lutexes (generation_index_t from, generation_index_t to) {
- struct lutex *tail = generations[from].lutexes;
-
- /* Nothing to move */
- if (!tail)
- return;
-
- /* Change the generation of the lutexes in FROM. */
- while (tail->next) {
- tail->gen = to;
- tail = tail->next;
- }
- tail->gen = to;
-
- /* Link the last lutex in the FROM list to the start of the TO list */
- tail->next = generations[to].lutexes;
-
- /* And vice versa */
- if (generations[to].lutexes) {
- generations[to].lutexes->prev = tail;
- }
-
- /* And update the generations structures to match this */
- generations[to].lutexes = generations[from].lutexes;
- generations[from].lutexes = NULL;
-}
-
-static long
-scav_lutex(lispobj *where, lispobj object)
-{
- mark_lutex((lispobj) where);
-
- return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
-}
-
-static lispobj
-trans_lutex(lispobj object)
-{
- struct lutex *lutex = (struct lutex *) native_pointer(object);
- lispobj copied;
- size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
- gc_assert(is_lisp_pointer(object));
- copied = copy_object(object, words);
-
- /* Update the links, since the lutex moved in memory. */
- if (lutex->next) {
- lutex->next->prev = (struct lutex *) native_pointer(copied);
- }
-
- if (lutex->prev) {
- lutex->prev->next = (struct lutex *) native_pointer(copied);
- } else {
- generations[lutex->gen].lutexes =
- (struct lutex *) native_pointer(copied);
- }
-
- return copied;
-}
-
-static long
-size_lutex(lispobj *where)
-{
- return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
-}
-#endif /* LUTEX_WIDETAG */
-
\f
/*
* weak pointers
case CODE_HEADER_WIDETAG:
/* Make sure we actually point to a function in the code object,
* as opposed to a random point there. */
- if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*(pointer-FUN_POINTER_LOWTAG)))
+ if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*((lispobj *)(((unsigned long)pointer)-FUN_POINTER_LOWTAG))))
return 1;
else
return 0;
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
-#ifdef LUTEX_WIDETAG
- case LUTEX_WIDETAG:
-#endif
break;
default:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
-#ifdef LUTEX_WIDETAG
- case LUTEX_WIDETAG:
-#endif
#ifdef NO_TLS_VALUE_MARKER_WIDETAG
case NO_TLS_VALUE_MARKER_WIDETAG:
#endif
/* Initialize the weak pointer list. */
weak_pointers = NULL;
-#ifdef LUTEX_WIDETAG
- unmark_lutexes(generation);
-#endif
-
/* When a generation is not being raised it is transported to a
* temporary generation (NUM_GENERATIONS), and lowered when
* done. Set up this new generation. There should be no pages
scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
/* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
(sizeof (struct thread))/(sizeof (lispobj));
scavenge((lispobj *) (th+1),len);
#endif
else
++generations[generation].num_gc;
-#ifdef LUTEX_WIDETAG
- reap_lutexes(generation);
- if (raise)
- move_lutexes(generation, generation+1);
-#endif
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
}
static void
-remap_page_range (page_index_t from, page_index_t to, int forcibly)
+remap_page_range (page_index_t from, page_index_t to)
{
/* There's a mysterious Solaris/x86 problem with using mmap
* tricks for memory zeroing. See sbcl-devel thread
* "Re: patch: standalone executable redux".
- *
- * Since pages don't have to be zeroed ahead of time, only do
- * so when called from purify.
*/
#if defined(LISP_FEATURE_SUNOS)
- if (forcibly)
- zero_pages(from, to);
+ zero_and_mark_pages(from, to);
#else
- page_index_t aligned_from, aligned_end, end = to+1;
-
- const page_index_t
+ const page_index_t
release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
release_mask = release_granularity-1,
end = to+1,
if (aligned_from < aligned_end) {
zero_pages_with_mmap(aligned_from, aligned_end-1);
- if (forcibly) {
- if (aligned_from != from)
- zero_pages(from, aligned_from-1);
- if (aligned_end != end)
- zero_pages(aligned_end, end-1);
- }
- } else if (forcibly)
- zero_pages(from, to);
+ if (aligned_from != from)
+ zero_and_mark_pages(from, aligned_from-1);
+ if (aligned_end != end)
+ zero_and_mark_pages(aligned_end, end-1);
+ } else {
+ zero_and_mark_pages(from, to);
+ }
#endif
}
static void
remap_free_pages (page_index_t from, page_index_t to, int forcibly)
{
- page_index_t first_page, last_page,
- first_aligned_page, last_aligned_page;
+ page_index_t first_page, last_page;
if (forcibly)
- return remap_page_range(from, to, 1);
+ return remap_page_range(from, to);
- /* See comment above about mysterious failures on Solaris/x86.
- */
-#if !defined(LISP_FEATURE_SUNOS)
for (first_page = from; first_page <= to; first_page++) {
if (page_allocated_p(first_page) ||
(page_table[first_page].need_to_zero == 0))
(page_table[last_page].need_to_zero == 1))
last_page++;
- remap_page_range(first_page, last_page-1, 0);
+ remap_page_range(first_page, last_page-1);
first_page = last_page;
}
-#endif
}
generation_index_t small_generation_limit = 1;
generations[page].gc_trigger = 2000000;
generations[page].num_gc = 0;
generations[page].cum_sum_bytes_allocated = 0;
- generations[page].lutexes = NULL;
}
if (gencgc_verbose > 1)
scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
-#ifdef LUTEX_WIDETAG
- scavtab[LUTEX_WIDETAG] = scav_lutex;
- transother[LUTEX_WIDETAG] = trans_lutex;
- sizetab[LUTEX_WIDETAG] = size_lutex;
-#endif
-
heap_base = (void*)DYNAMIC_SPACE_START;
/* The page structures are initialized implicitly when page_table
generations[i].bytes_consed_between_gc = 2000000;
generations[i].number_of_gcs_before_promotion = 1;
generations[i].minimum_age_before_gc = 0.75;
- generations[i].lutexes = NULL;
}
/* Initialize gc_alloc. */
page++;
} while (page_address(page) < alloc_ptr);
-#ifdef LUTEX_WIDETAG
- /* Lutexes have been registered in generation 0 by coreparse, and
- * need to be moved to the right one manually.
- */
- move_lutexes(0, PSEUDO_STATIC_GENERATION);
-#endif
-
last_free_page = page;
generations[gen].bytes_allocated = npage_bytes(page);
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options)
+ boolean save_runtime_options,
+ boolean compressed, int compression_level)
{
FILE *file;
void *runtime_bytes = NULL;
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();
save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
- prepend_runtime, save_runtime_options);
+ prepend_runtime, save_runtime_options,
+ compressed ? compression_level : COMPRESSION_LEVEL_NONE);
/* Oops. Save still managed to fail. Since we've mangled the stack
* beyond hope, there's not much we can do.
* (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's