#include "thread.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
+#include "genesis/fdefn.h"
#include "genesis/simple-fun.h"
#include "save.h"
#include "genesis/hash-table.h"
#include "genesis/instance.h"
#include "genesis/layout.h"
+#ifdef LUTEX_WIDETAG
+#include "genesis/lutex.h"
+#endif
+
/* forward declarations */
page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
int unboxed);
generation_index_t from_space;
generation_index_t new_space;
+/* Set to 1 when in GC */
+boolean gc_active_p = 0;
+
/* should the GC be conservative on stack. If false (only right before
* saving a core), don't scan the stack / mark pages dont_move. */
static boolean conservative_stack = 1;
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
double min_av_mem_age;
+
+ /* A linked list of lutex structures in this generation, used for
+ * implementing lutex finalization. */
+#ifdef LUTEX_WIDETAG
+ struct lutex *lutexes;
+#else
+ void *lutexes;
+#endif
};
/* an array of generation structures. There needs to be one more
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != 0)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].gen == generation))
count++;
return count;
page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].dont_move != 0)) {
++count;
}
}
page_index_t i;
long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
return result;
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
fprintf(stderr,
- " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
generations[i].alloc_start_page,
generations[i].alloc_unboxed_start_page,
generations[i].alloc_large_start_page,
generations[i].alloc_large_unboxed_start_page,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ boxed_cnt,
+ unboxed_cnt,
+ large_boxed_cnt,
+ large_unboxed_cnt,
pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
- - generations[i].bytes_allocated),
+ (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
page_index_t last_page;
long bytes_found;
page_index_t i;
+ int ret;
/*
FSHOW((stderr,
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- thread_mutex_lock(&free_pages_lock);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
/* do we only want to call this on special occasions? like for boxed_region? */
set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
long orig_first_page_bytes_used;
long region_size;
long byte_cnt;
+ int ret;
first_page = alloc_region->first_page;
next_page = first_page+1;
- thread_mutex_lock(&free_pages_lock);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (alloc_region->free_pointer != alloc_region->start_addr) {
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
page_table[next_page].allocated = FREE_PAGE_FLAG;
next_page++;
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
+
/* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
int more;
long bytes_used;
page_index_t next_page;
+ int ret;
- thread_mutex_lock(&free_pages_lock);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (unboxed) {
first_page =
last_free_page = last_page+1;
set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
static page_index_t gencgc_alloc_start_page = -1;
+void
+gc_heap_exhausted_error_or_lose (long available, long requested)
+{
+ /* Write basic information before doing anything else: if we don't
+ * call to lisp this is a must, and even if we do there is always the
+ * danger that we bounce back here before the error has been handled,
+ * or indeed even printed.
+ */
+ fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
+ gc_active_p ? "garbage collection" : "allocation", available, requested);
+ if (gc_active_p || (available == 0)) {
+ /* If we are in GC, or totally out of memory there is no way
+ * to sanely transfer control to the lisp-side of things.
+ */
+ print_generation_stats(1);
+ lose("Heap exhausted, game over.");
+ }
+ else {
+ /* FIXME: assert free_pages_lock held */
+ thread_mutex_unlock(&free_pages_lock);
+ funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
+ make_fixnum(available), make_fixnum(requested));
+ lose("HEAP-EXHAUSTED-ERROR fell through");
+ }
+}
+
page_index_t
gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
{
first_page++;
}
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose("\n");
- }
+ if (first_page >= NUM_PAGES)
+ gc_heap_exhausted_error_or_lose(0, nbytes);
gc_assert(page_table[first_page].write_protected == 0);
} while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
/* Check for a failure */
- if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose("\n");
- }
+ if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes))
+ gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+
*restart_page_ptr=first_page;
return last_page;
\f
/*
+ * Lutexes. Using the normal finalization machinery for finalizing
+ * lutexes is tricky, since the finalization depends on working lutexes.
+ * So we track the lutexes in the GC and finalize them manually.
+ */
+
+#if defined(LUTEX_WIDETAG)
+
+/*
+ * Start tracking LUTEX in the GC, by adding it to the linked list of
+ * lutexes in the nursery generation. The caller is responsible for
+ * locking, and GCs must be inhibited until the registration is
+ * complete.
+ */
+void
+gencgc_register_lutex (struct lutex *lutex) {
+ int index = find_page_index(lutex);
+ generation_index_t gen;
+ struct lutex *head;
+
+ /* This lutex is in static space, so we don't need to worry about
+ * finalizing it.
+ */
+ if (index == -1)
+ return;
+
+ gen = page_table[index].gen;
+
+ gc_assert(gen >= 0);
+ gc_assert(gen < NUM_GENERATIONS);
+
+ head = generations[gen].lutexes;
+
+ lutex->gen = gen;
+ lutex->next = head;
+ lutex->prev = NULL;
+ if (head)
+ head->prev = lutex;
+ generations[gen].lutexes = lutex;
+}
+
+/*
+ * Stop tracking LUTEX in the GC by removing it from the appropriate
+ * linked lists. This will only be called during GC, so no locking is
+ * needed.
+ */
+void
+gencgc_unregister_lutex (struct lutex *lutex) {
+ if (lutex->prev) {
+ lutex->prev->next = lutex->next;
+ } else {
+ generations[lutex->gen].lutexes = lutex->next;
+ }
+
+ if (lutex->next) {
+ lutex->next->prev = lutex->prev;
+ }
+
+ lutex->next = NULL;
+ lutex->prev = NULL;
+ lutex->gen = -1;
+}
+
+/*
+ * Mark all lutexes in generation GEN as not live.
+ */
+static void
+unmark_lutexes (generation_index_t gen) {
+ struct lutex *lutex = generations[gen].lutexes;
+
+ while (lutex) {
+ lutex->live = 0;
+ lutex = lutex->next;
+ }
+}
+
+/*
+ * Finalize all lutexes in generation GEN that have not been marked live.
+ */
+static void
+reap_lutexes (generation_index_t gen) {
+ struct lutex *lutex = generations[gen].lutexes;
+
+ while (lutex) {
+ struct lutex *next = lutex->next;
+ if (!lutex->live) {
+ lutex_destroy(lutex);
+ gencgc_unregister_lutex(lutex);
+ }
+ lutex = next;
+ }
+}
+
+/*
+ * Mark LUTEX as live.
+ */
+static void
+mark_lutex (lispobj tagged_lutex) {
+ struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
+
+ lutex->live = 1;
+}
+
+/*
+ * Move all lutexes in generation FROM to generation TO.
+ */
+static void
+move_lutexes (generation_index_t from, generation_index_t to) {
+ struct lutex *tail = generations[from].lutexes;
+
+ /* Nothing to move */
+ if (!tail)
+ return;
+
+ /* Change the generation of the lutexes in FROM. */
+ while (tail->next) {
+ tail->gen = to;
+ tail = tail->next;
+ }
+ tail->gen = to;
+
+ /* Link the last lutex in the FROM list to the start of the TO list */
+ tail->next = generations[to].lutexes;
+
+ /* And vice versa */
+ if (generations[to].lutexes) {
+ generations[to].lutexes->prev = tail;
+ }
+
+ /* And update the generations structures to match this */
+ generations[to].lutexes = generations[from].lutexes;
+ generations[from].lutexes = NULL;
+}
+
+static long
+scav_lutex(lispobj *where, lispobj object)
+{
+ mark_lutex((lispobj) where);
+
+ return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+}
+
+static lispobj
+trans_lutex(lispobj object)
+{
+ struct lutex *lutex = native_pointer(object);
+ lispobj copied;
+ size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+ gc_assert(is_lisp_pointer(object));
+ copied = copy_object(object, words);
+
+ /* Update the links, since the lutex moved in memory. */
+ if (lutex->next) {
+ lutex->next->prev = native_pointer(copied);
+ }
+
+ if (lutex->prev) {
+ lutex->prev->next = native_pointer(copied);
+ } else {
+ generations[lutex->gen].lutexes = native_pointer(copied);
+ }
+
+ return copied;
+}
+
+static long
+size_lutex(lispobj *where)
+{
+ return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+}
+#endif /* LUTEX_WIDETAG */
+
+\f
+/*
* weak pointers
*/
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
+#ifdef LUTEX_WIDETAG
+ case LUTEX_WIDETAG:
+#endif
break;
default:
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
+#ifdef LUTEX_WIDETAG
+ case LUTEX_WIDETAG:
+#endif
count = (sizetab[widetag_of(*start)])(start);
break;
#endif
+#if defined(LISP_FEATURE_SB_THREAD)
+static void
+preserve_context_registers (os_context_t *c)
+{
+ void **ptr;
+ /* On Darwin the signal context isn't a contiguous block of memory,
+ * so just preserve_pointering its contents won't be sufficient.
+ */
+#if defined(LISP_FEATURE_DARWIN)
+#if defined LISP_FEATURE_X86
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ECX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
+ preserve_pointer((void*)*os_context_pc_addr(c));
+#else
+ #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
+#endif
+#endif
+ for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
+ }
+}
+#endif
+
/* Garbage collect a generation. If raise is 0 then the remains of the
* generation are not raised to the next generation. */
static void
/* Initialize the weak pointer list. */
weak_pointers = NULL;
+#ifdef LUTEX_WIDETAG
+ unmark_lutexes(generation);
+#endif
+
/* When a generation is not being raised it is transported to a
* temporary generation (NUM_GENERATIONS), and lowered when
* done. Set up this new generation. There should be no pages
if (esp1>=(void **)th->control_stack_start &&
esp1<(void **)th->control_stack_end) {
if(esp1<esp) esp=esp1;
- for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
- preserve_pointer(*ptr);
- }
+ preserve_context_registers(c);
}
}
}
generations[generation].num_gc = 0;
else
++generations[generation].num_gc;
+
+#ifdef LUTEX_WIDETAG
+ reap_lutexes(generation);
+ if (raise)
+ move_lutexes(generation, generation+1);
+#endif
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
last_page++;
}
+ /* There's a mysterious Solaris/x86 problem with using mmap
+ * tricks for memory zeroing. See sbcl-devel thread
+ * "Re: patch: standalone executable redux".
+ */
+#if defined(LISP_FEATURE_SUNOS)
+ zero_pages(first_page, last_page-1);
+#else
zero_pages_with_mmap(first_page, last_page-1);
+#endif
first_page = last_page;
}
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
+ gc_active_p = 1;
+
if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
FSHOW((stderr,
"/collect_garbage: last_gen = %d, doing a level 0 GC\n",
high_water_mark = 0;
}
+ gc_active_p = 0;
+
SHOW("returning from collect_garbage");
}
generations[page].gc_trigger = 2000000;
generations[page].num_gc = 0;
generations[page].cum_sum_bytes_allocated = 0;
+ generations[page].lutexes = NULL;
}
if (gencgc_verbose > 1)
scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
+#ifdef LUTEX_WIDETAG
+ scavtab[LUTEX_WIDETAG] = scav_lutex;
+ transother[LUTEX_WIDETAG] = trans_lutex;
+ sizetab[LUTEX_WIDETAG] = size_lutex;
+#endif
+
heap_base = (void*)DYNAMIC_SPACE_START;
/* Initialize each page structure. */
generations[i].bytes_consed_between_gc = 2000000;
generations[i].trigger_age = 1;
generations[i].min_av_mem_age = 0.75;
+ generations[i].lutexes = NULL;
}
/* Initialize gc_alloc. */
page++;
} while ((long)page_address(page) < alloc_ptr);
+#ifdef LUTEX_WIDETAG
+ /* Lutexes have been registered in generation 0 by coreparse, and
+ * need to be moved to the right one manually.
+ */
+ move_lutexes(0, PSEUDO_STATIC_GENERATION);
+#endif
+
last_free_page = page;
generations[gen].bytes_allocated = PAGE_BYTES*page;