#include "genesis/instance.h"
#include "genesis/layout.h"
#include "gencgc.h"
-#if defined(LUTEX_WIDETAG)
-#include "pthread-lutex.h"
-#endif
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
#include "genesis/cons.h"
#endif
boolean enable_page_protection = 1;
/* the minimum size (in bytes) for a large object*/
+#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
+long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
+long large_object_size = 4 * GENCGC_CARD_BYTES;
+#else
long large_object_size = 4 * PAGE_BYTES;
+#endif
\f
/*
inline void *
page_address(page_index_t page_num)
{
- return (heap_base + (page_num * PAGE_BYTES));
+ return (heap_base + (page_num * GENCGC_CARD_BYTES));
}
/* Calculate the address where the allocation region associated with
{
if (addr >= heap_base) {
page_index_t index = ((pointer_sized_uint_t)addr -
- (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
+ (pointer_sized_uint_t)heap_base) / GENCGC_CARD_BYTES;
if (index < page_table_pages)
return (index);
}
npage_bytes(long npages)
{
gc_assert(npages>=0);
- return ((unsigned long)npages)*PAGE_BYTES;
+ return ((unsigned long)npages)*GENCGC_CARD_BYTES;
}
/* Check that X is a higher address than Y and return offset from Y to
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
double minimum_age_before_gc;
-
- /* A linked list of lutex structures in this generation, used for
- * implementing lutex finalization. */
-#ifdef LUTEX_WIDETAG
- struct lutex *lutexes;
-#else
- void *lutexes;
-#endif
};
/* an array of generation structures. There needs to be one more
static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
+extern unsigned long gencgc_release_granularity;
+unsigned long gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
+
+extern unsigned long gencgc_alloc_granularity;
+unsigned long gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
+
\f
/*
* miscellaneous heap functions
if (start > end)
return;
+ gc_assert(length >= gencgc_release_granularity);
+ gc_assert((length % gencgc_release_granularity) == 0);
+
os_invalidate(addr, length);
new_addr = os_validate(addr, length);
if (new_addr == NULL || new_addr != addr) {
}
+static void
+zero_and_mark_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ zero_pages(start, end);
+ for (i = start; i <= end; i++)
+ page_table[i].need_to_zero = 0;
+}
+
/* Zero the pages from START to END (inclusive), except for those
* pages that are known to already zeroed. Mark all pages in the
* ranges as non-zeroed.
gc_assert(ret == 0);
first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
- bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ bytes_found=(GENCGC_CARD_BYTES - page_table[first_page].bytes_used)
+ npage_bytes(last_page-first_page);
/* Set up the alloc_region. */
more = 0;
if ((bytes_used = void_diff(alloc_region->free_pointer,
page_address(first_page)))
- >PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ >GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
if ((bytes_used = void_diff(alloc_region->free_pointer,
- page_address(next_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ page_address(next_page)))>GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
/* Calc. the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ if ((bytes_used = nbytes+orig_first_page_bytes_used) > GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
- if (bytes_used > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
+ if (bytes_used > GENCGC_CARD_BYTES) {
+ bytes_used = GENCGC_CARD_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
{
page_index_t first_page, last_page;
page_index_t restart_page = *restart_page_ptr;
+ long nbytes_goal = nbytes;
long bytes_found = 0;
long most_bytes_found = 0;
+ page_index_t most_bytes_found_from, most_bytes_found_to;
+ int small_object = nbytes < GENCGC_CARD_BYTES;
/* FIXME: assert(free_pages_lock is held); */
+ if (nbytes_goal < gencgc_alloc_granularity)
+ nbytes_goal = gencgc_alloc_granularity;
+
/* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
gc_assert(nbytes>=0);
- if (((unsigned long)nbytes)>=PAGE_BYTES) {
- /* Search for a contiguous free space of at least nbytes,
- * aligned on a page boundary. The page-alignment is strictly
- * speaking needed only for objects at least large_object_size
- * bytes in size. */
- do {
- first_page = restart_page;
- while ((first_page < page_table_pages) &&
- page_allocated_p(first_page))
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism.
+ *
+ * For other objects, we guarantee that they start on their own
+ * page boundary.
+ */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ bytes_found = 0;
+ if (page_free_p(first_page)) {
+ gc_assert(0 == page_table[first_page].bytes_used);
+ bytes_found = GENCGC_CARD_BYTES;
+ } else if (small_object &&
+ (page_table[first_page].allocated == page_type_flag) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0)) {
+ bytes_found = GENCGC_CARD_BYTES - page_table[first_page].bytes_used;
+ if (bytes_found < nbytes) {
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
first_page++;
-
- last_page = first_page;
- bytes_found = PAGE_BYTES;
- while ((bytes_found < nbytes) &&
- (last_page < (page_table_pages-1)) &&
- page_free_p(last_page+1)) {
- last_page++;
- bytes_found += PAGE_BYTES;
- gc_assert(0 == page_table[last_page].bytes_used);
- gc_assert(0 == page_table[last_page].write_protected);
+ continue;
}
- if (bytes_found > most_bytes_found)
- most_bytes_found = bytes_found;
- restart_page = last_page + 1;
- } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
-
- } else {
- /* Search for a page with at least nbytes of space. We prefer
- * not to split small objects on multiple pages, to reduce the
- * number of contiguous allocation regions spaning multiple
- * pages: this helps avoid excessive conservativism. */
- first_page = restart_page;
- while (first_page < page_table_pages) {
- if (page_free_p(first_page))
- {
- gc_assert(0 == page_table[first_page].bytes_used);
- bytes_found = PAGE_BYTES;
- break;
- }
- else if ((page_table[first_page].allocated == page_type_flag) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0))
- {
- bytes_found = PAGE_BYTES
- - page_table[first_page].bytes_used;
- if (bytes_found > most_bytes_found)
- most_bytes_found = bytes_found;
- if (bytes_found >= nbytes)
- break;
- }
+ } else {
first_page++;
+ continue;
}
- last_page = first_page;
- restart_page = first_page + 1;
+
+ gc_assert(page_table[first_page].write_protected == 0);
+ for (last_page = first_page+1;
+ ((last_page < page_table_pages) &&
+ page_free_p(last_page) &&
+ (bytes_found < nbytes_goal));
+ last_page++) {
+ bytes_found += GENCGC_CARD_BYTES;
+ gc_assert(0 == page_table[last_page].bytes_used);
+ gc_assert(0 == page_table[last_page].write_protected);
+ }
+
+ if (bytes_found > most_bytes_found) {
+ most_bytes_found = bytes_found;
+ most_bytes_found_from = first_page;
+ most_bytes_found_to = last_page;
+ }
+ if (bytes_found >= nbytes_goal)
+ break;
+
+ first_page = last_page;
}
+ bytes_found = most_bytes_found;
+ restart_page = first_page + 1;
+
/* Check for a failure */
if (bytes_found < nbytes) {
gc_assert(restart_page >= page_table_pages);
gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
}
- gc_assert(page_table[first_page].write_protected == 0);
-
- *restart_page_ptr = first_page;
- return last_page;
+ *restart_page_ptr = most_bytes_found_from;
+ return most_bytes_found_to-1;
}
/* Allocate bytes. All the rest of the special-purpose allocation
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
/* Should have been unprotected by unprotect_oldspace(). */
gc_assert(page_table[next_page].write_protected == 0);
page_table[next_page].gen = new_space;
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_boxed_p(next_page) &&
page_table[next_page].large_object &&
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
page_table[next_page].gen = new_space;
page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
return copy_large_unboxed_object(object, length);
}
#endif
-
-\f
-/*
- * Lutexes. Using the normal finalization machinery for finalizing
- * lutexes is tricky, since the finalization depends on working lutexes.
- * So we track the lutexes in the GC and finalize them manually.
- */
-
-#if defined(LUTEX_WIDETAG)
-
-/*
- * Start tracking LUTEX in the GC, by adding it to the linked list of
- * lutexes in the nursery generation. The caller is responsible for
- * locking, and GCs must be inhibited until the registration is
- * complete.
- */
-void
-gencgc_register_lutex (struct lutex *lutex) {
- int index = find_page_index(lutex);
- generation_index_t gen;
- struct lutex *head;
-
- /* This lutex is in static space, so we don't need to worry about
- * finalizing it.
- */
- if (index == -1)
- return;
-
- gen = page_table[index].gen;
-
- gc_assert(gen >= 0);
- gc_assert(gen < NUM_GENERATIONS);
-
- head = generations[gen].lutexes;
-
- lutex->gen = gen;
- lutex->next = head;
- lutex->prev = NULL;
- if (head)
- head->prev = lutex;
- generations[gen].lutexes = lutex;
-}
-
-/*
- * Stop tracking LUTEX in the GC by removing it from the appropriate
- * linked lists. This will only be called during GC, so no locking is
- * needed.
- */
-void
-gencgc_unregister_lutex (struct lutex *lutex) {
- if (lutex->prev) {
- lutex->prev->next = lutex->next;
- } else {
- generations[lutex->gen].lutexes = lutex->next;
- }
-
- if (lutex->next) {
- lutex->next->prev = lutex->prev;
- }
-
- lutex->next = NULL;
- lutex->prev = NULL;
- lutex->gen = -1;
-}
-
-/*
- * Mark all lutexes in generation GEN as not live.
- */
-static void
-unmark_lutexes (generation_index_t gen) {
- struct lutex *lutex = generations[gen].lutexes;
-
- while (lutex) {
- lutex->live = 0;
- lutex = lutex->next;
- }
-}
-
-/*
- * Finalize all lutexes in generation GEN that have not been marked live.
- */
-static void
-reap_lutexes (generation_index_t gen) {
- struct lutex *lutex = generations[gen].lutexes;
-
- while (lutex) {
- struct lutex *next = lutex->next;
- if (!lutex->live) {
- lutex_destroy((tagged_lutex_t) lutex);
- gencgc_unregister_lutex(lutex);
- }
- lutex = next;
- }
-}
-
-/*
- * Mark LUTEX as live.
- */
-static void
-mark_lutex (lispobj tagged_lutex) {
- struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
-
- lutex->live = 1;
-}
-
-/*
- * Move all lutexes in generation FROM to generation TO.
- */
-static void
-move_lutexes (generation_index_t from, generation_index_t to) {
- struct lutex *tail = generations[from].lutexes;
-
- /* Nothing to move */
- if (!tail)
- return;
-
- /* Change the generation of the lutexes in FROM. */
- while (tail->next) {
- tail->gen = to;
- tail = tail->next;
- }
- tail->gen = to;
-
- /* Link the last lutex in the FROM list to the start of the TO list */
- tail->next = generations[to].lutexes;
-
- /* And vice versa */
- if (generations[to].lutexes) {
- generations[to].lutexes->prev = tail;
- }
-
- /* And update the generations structures to match this */
- generations[to].lutexes = generations[from].lutexes;
- generations[from].lutexes = NULL;
-}
-
-static long
-scav_lutex(lispobj *where, lispobj object)
-{
- mark_lutex((lispobj) where);
-
- return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
-}
-
-static lispobj
-trans_lutex(lispobj object)
-{
- struct lutex *lutex = (struct lutex *) native_pointer(object);
- lispobj copied;
- size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
- gc_assert(is_lisp_pointer(object));
- copied = copy_object(object, words);
-
- /* Update the links, since the lutex moved in memory. */
- if (lutex->next) {
- lutex->next->prev = (struct lutex *) native_pointer(copied);
- }
-
- if (lutex->prev) {
- lutex->prev->next = (struct lutex *) native_pointer(copied);
- } else {
- generations[lutex->gen].lutexes =
- (struct lutex *) native_pointer(copied);
- }
-
- return copied;
-}
-
-static long
-size_lutex(lispobj *where)
-{
- return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
-}
-#endif /* LUTEX_WIDETAG */
-
\f
/*
* weak pointers
case CODE_HEADER_WIDETAG:
/* Make sure we actually point to a function in the code object,
* as opposed to a random point there. */
- if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*(pointer-FUN_POINTER_LOWTAG)))
+ if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*((lispobj *)(((unsigned long)pointer)-FUN_POINTER_LOWTAG))))
return 1;
else
return 0;
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
-#ifdef LUTEX_WIDETAG
- case LUTEX_WIDETAG:
-#endif
break;
default:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
+ while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
page_table[next_page].allocated = boxed;
/* Shouldn't be write-protected at this stage. Essential that the
* pages aren't. */
gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= PAGE_BYTES;
+ remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
+ while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+ if (((unsigned long)addr & (GENCGC_CARD_BYTES - 1)) >
page_table[addr_page_index].bytes_used)
return;
while (page_table[first_page].region_start_offset != 0) {
--first_page;
/* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[first_page].bytes_used == GENCGC_CARD_BYTES);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned long)addr & (PAGE_BYTES - 1))
+ || (((unsigned long)addr & (GENCGC_CARD_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < PAGE_BYTES)
- /* ..or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[i].bytes_used < GENCGC_CARD_BYTES)
+ /* ..or it is CARD_BYTES and is the last in the block */
|| page_free_p(i+1)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
/*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
os_protect((void *)page_addr,
- PAGE_BYTES,
+ GENCGC_CARD_BYTES,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
for (last_page = i; ; last_page++) {
write_protected =
write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
if (!region_addr) {
/* First region. */
region_addr = page_addr;
- region_bytes = PAGE_BYTES;
+ region_bytes = GENCGC_CARD_BYTES;
} else if (region_addr + region_bytes == page_addr) {
/* Region continue. */
- region_bytes += PAGE_BYTES;
+ region_bytes += GENCGC_CARD_BYTES;
} else {
/* Unprotect previous region. */
os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
/* First page in new region. */
region_addr = page_addr;
- region_bytes = PAGE_BYTES;
+ region_bytes = GENCGC_CARD_BYTES;
}
}
}
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG:
+
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
-#endif
+
+ case SIMPLE_ARRAY_FIXNUM_WIDETAG:
+
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
-#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
-#ifdef LUTEX_WIDETAG
- case LUTEX_WIDETAG:
-#endif
#ifdef NO_TLS_VALUE_MARKER_WIDETAG
case NO_TLS_VALUE_MARKER_WIDETAG:
#endif
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
+ if ((page_table[last_page].bytes_used < GENCGC_CARD_BYTES)
+ /* Or it is CARD_BYTES and is the last in the block */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
}
}
} else {
- long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
+ long free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
long *start_addr = (long *)((unsigned long)page_address(page)
+ page_table[page].bytes_used);
/* Initialize the weak pointer list. */
weak_pointers = NULL;
-#ifdef LUTEX_WIDETAG
- unmark_lutexes(generation);
-#endif
-
/* When a generation is not being raised it is transported to a
* temporary generation (NUM_GENERATIONS), and lowered when
* done. Set up this new generation. There should be no pages
scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
/* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) -
(sizeof (struct thread))/(sizeof (lispobj));
scavenge((lispobj *) (th+1),len);
#endif
else
++generations[generation].num_gc;
-#ifdef LUTEX_WIDETAG
- reap_lutexes(generation);
- if (raise)
- move_lutexes(generation, generation+1);
-#endif
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
}
static void
-remap_free_pages (page_index_t from, page_index_t to)
+remap_page_range (page_index_t from, page_index_t to)
+{
+ /* There's a mysterious Solaris/x86 problem with using mmap
+ * tricks for memory zeroing. See sbcl-devel thread
+ * "Re: patch: standalone executable redux".
+ */
+#if defined(LISP_FEATURE_SUNOS)
+ zero_and_mark_pages(from, to);
+#else
+ const page_index_t
+ release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES,
+ release_mask = release_granularity-1,
+ end = to+1,
+ aligned_from = (from+release_mask)&~release_mask,
+ aligned_end = (end&~release_mask);
+
+ if (aligned_from < aligned_end) {
+ zero_pages_with_mmap(aligned_from, aligned_end-1);
+ if (aligned_from != from)
+ zero_and_mark_pages(from, aligned_from-1);
+ if (aligned_end != end)
+ zero_and_mark_pages(aligned_end, end-1);
+ } else {
+ zero_and_mark_pages(from, to);
+ }
+#endif
+}
+
+static void
+remap_free_pages (page_index_t from, page_index_t to, int forcibly)
{
page_index_t first_page, last_page;
+ if (forcibly)
+ return remap_page_range(from, to);
+
for (first_page = from; first_page <= to; first_page++) {
if (page_allocated_p(first_page) ||
- (page_table[first_page].need_to_zero == 0)) {
+ (page_table[first_page].need_to_zero == 0))
continue;
- }
last_page = first_page + 1;
while (page_free_p(last_page) &&
- (last_page < to) &&
- (page_table[last_page].need_to_zero == 1)) {
+ (last_page <= to) &&
+ (page_table[last_page].need_to_zero == 1))
last_page++;
- }
- /* There's a mysterious Solaris/x86 problem with using mmap
- * tricks for memory zeroing. See sbcl-devel thread
- * "Re: patch: standalone executable redux".
- */
-#if defined(LISP_FEATURE_SUNOS)
- zero_pages(first_page, last_page-1);
-#else
- zero_pages_with_mmap(first_page, last_page-1);
-#endif
+ remap_page_range(first_page, last_page-1);
first_page = last_page;
}
if (gen > small_generation_limit) {
if (last_free_page > high_water_mark)
high_water_mark = last_free_page;
- remap_free_pages(0, high_water_mark);
+ remap_free_pages(0, high_water_mark, 0);
high_water_mark = 0;
}
void
gc_free_heap(void)
{
- page_index_t page;
+ page_index_t page, last_page;
if (gencgc_verbose > 1) {
SHOW("entering gc_free_heap");
/* Skip free pages which should already be zero filled. */
if (page_allocated_p(page)) {
void *page_start, *addr;
-
- /* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
- * should not be write-protected -- except that the
- * generation is used for the current region but it sets
- * that up. */
- page_table[page].allocated = FREE_PAGE_FLAG;
- page_table[page].bytes_used = 0;
+ for (last_page = page;
+ (last_page < page_table_pages) && page_allocated_p(last_page);
+ last_page++) {
+ /* Mark the page free. The other slots are assumed invalid
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
+ * should not be write-protected -- except that the
+ * generation is used for the current region but it sets
+ * that up. */
+ page_table[page].allocated = FREE_PAGE_FLAG;
+ page_table[page].bytes_used = 0;
+ page_table[page].write_protected = 0;
+ }
#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
* about this change. */
- /* Zero the page. */
page_start = (void *)page_address(page);
-
- /* First, remove any write-protection. */
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page].write_protected = 0;
-
- os_invalidate(page_start,PAGE_BYTES);
- addr = os_validate(page_start,PAGE_BYTES);
- if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
- page_start,
- addr);
- }
-#else
- page_table[page].write_protected = 0;
+ os_protect(page_start, npage_bytes(last_page-page), OS_VM_PROT_ALL);
+ remap_free_pages(page, last_page-1, 1);
+ page = last_page-1;
#endif
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
page_start = (long *)page_address(page);
- for (i=0; i<1024; i++) {
+ for (i=0; i<GENCGC_CARD_BYTES/sizeof(long); i++) {
if (page_start[i] != 0) {
lose("free region not zero at %x\n", page_start + i);
}
generations[page].gc_trigger = 2000000;
generations[page].num_gc = 0;
generations[page].cum_sum_bytes_allocated = 0;
- generations[page].lutexes = NULL;
}
if (gencgc_verbose > 1)
/* Compute the number of pages needed for the dynamic space.
* Dynamic space size should be aligned on page size. */
- page_table_pages = dynamic_space_size/PAGE_BYTES;
+ page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
/* The page_table must be allocated using "calloc" to initialize
scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
-#ifdef LUTEX_WIDETAG
- scavtab[LUTEX_WIDETAG] = scav_lutex;
- transother[LUTEX_WIDETAG] = trans_lutex;
- sizetab[LUTEX_WIDETAG] = size_lutex;
-#endif
-
heap_base = (void*)DYNAMIC_SPACE_START;
/* The page structures are initialized implicitly when page_table
generations[i].bytes_consed_between_gc = 2000000;
generations[i].number_of_gcs_before_promotion = 1;
generations[i].minimum_age_before_gc = 0.75;
- generations[i].lutexes = NULL;
}
/* Initialize gc_alloc. */
/* It is possible, though rare, for the saved page table
* to contain free pages below alloc_ptr. */
page_table[page].gen = gen;
- page_table[page].bytes_used = PAGE_BYTES;
+ page_table[page].bytes_used = GENCGC_CARD_BYTES;
page_table[page].large_object = 0;
page_table[page].write_protected = 0;
page_table[page].write_protected_cleared = 0;
page++;
} while (page_address(page) < alloc_ptr);
-#ifdef LUTEX_WIDETAG
- /* Lutexes have been registered in generation 0 by coreparse, and
- * need to be moved to the right one manually.
- */
- move_lutexes(0, PSEUDO_STATIC_GENERATION);
-#endif
-
last_free_page = page;
generations[gen].bytes_allocated = npage_bytes(page);
gc_assert(ret == 0);
if (page_table[page_index].write_protected) {
/* Unprotect the page. */
- os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL);
page_table[page_index].write_protected_cleared = 1;
page_table[page_index].write_protected = 0;
} else {
if (page_free_p(i)) {
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(i),
- PAGE_BYTES,
+ GENCGC_CARD_BYTES,
OS_VM_PROT_ALL);
#endif
zero_pages(i, i);
* SB!VM:RESTART-LISP-FUNCTION */
void
gc_and_save(char *filename, boolean prepend_runtime,
- boolean save_runtime_options)
+ boolean save_runtime_options,
+ boolean compressed, int compression_level)
{
FILE *file;
void *runtime_bytes = NULL;
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();
save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
- prepend_runtime, save_runtime_options);
+ prepend_runtime, save_runtime_options,
+ compressed ? compression_level : COMPRESSION_LEVEL_NONE);
/* Oops. Save still managed to fail. Since we've mangled the stack
* beyond hope, there's not much we can do.
* (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's