static boolean conservative_stack = 1;
/* An array of page structures is allocated on gc initialization.
- * This helps quickly map between an address its page structure.
+ * This helps to quickly map between an address and its page structure.
* page_table_pages is set from the size of the dynamic space. */
page_index_t page_table_pages;
struct page *page_table;
static void
add_new_area(page_index_t first_page, size_t offset, size_t size)
{
- size_t new_area_start, c, i;
+ size_t new_area_start, c;
+ ssize_t i;
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
for_each_thread(th) {
void **ptr;
void **esp=(void **)-1;
-#ifdef LISP_FEATURE_SB_THREAD
+ if (th->state == STATE_DEAD)
+ continue;
+# if defined(LISP_FEATURE_SB_SAFEPOINT)
+ /* Conservative collect_garbage is always invoked with a
+ * foreign C call or an interrupt handler on top of every
+ * existing thread, so the stored SP in each thread
+ * structure is valid, no matter which thread we are looking
+ * at. For threads that were running Lisp code, the pitstop
+ * and edge functions maintain this value within the
+ * interrupt or exception handler. */
+ esp = os_get_csp(th);
+ assert_on_stack(th, esp);
+
+ /* In addition to pointers on the stack, also preserve the
+ * return PC, the only value from the context that we need
+ * in addition to the SP. The return PC gets saved by the
+ * foreign call wrapper, and removed from the control stack
+ * into a register. */
+ preserve_pointer(th->pc_around_foreign_call);
+
+ /* And on platforms with interrupts: scavenge ctx registers. */
+
+ /* Disabled on Windows, because it does not have an explicit
+ * stack of `interrupt_contexts'. The reported CSP has been
+ * chosen so that the current context on the stack is
+ * covered by the stack scan. See also set_csp_from_context(). */
+# ifndef LISP_FEATURE_WIN32
+ if (th != arch_os_get_current_thread()) {
+ long k = fixnum_value(
+ SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+ while (k > 0)
+ preserve_context_registers(th->interrupt_contexts[--k]);
+ }
+# endif
+# elif defined(LISP_FEATURE_SB_THREAD)
long i,free;
if(th==arch_os_get_current_thread()) {
/* Somebody is going to burn in hell for this, but casting
}
}
}
-#else
+# else
esp = (void **)((void *)&raise);
-#endif
+# endif
+ if (!esp || esp == (void*) -1)
+ lose("garbage_collect: no SP known for thread %x (OS %x)",
+ th, th->os_thread);
for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
preserve_pointer(*ptr);
}
/* Update auto_gc_trigger. Make sure we trigger the next GC before
* running out of heap! */
- if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated)
+ if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated))
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
else
auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2;
generations[i].num_gc = 0;
generations[i].cum_sum_bytes_allocated = 0;
/* the tune-able parameters */
- generations[i].bytes_consed_between_gc = bytes_consed_between_gcs;
+ generations[i].bytes_consed_between_gc
+ = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION;
generations[i].number_of_gcs_before_promotion = 1;
generations[i].minimum_age_before_gc = 0.75;
}
#endif
void *new_obj;
void *new_free_pointer;
+ os_vm_size_t trigger_bytes = 0;
gc_assert(nbytes>0);
return(new_obj); /* yup */
}
+ /* We don't want to count nbytes against auto_gc_trigger unless we
+ * have to: it speeds up the tenuring of objects and slows down
+ * allocation. However, unless we do so when allocating _very_
+ * large objects we are in danger of exhausting the heap without
+ * running sufficient GCs.
+ */
+ if (nbytes >= bytes_consed_between_gcs)
+ trigger_bytes = nbytes;
+
/* we have to go the long way around, it seems. Check whether we
* should GC in the near future
*/
- if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) {
+ if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) {
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
* section */
SetSymbolValue(GC_PENDING,T,thread);
if (SymbolValue(GC_INHIBIT,thread) == NIL) {
+#ifdef LISP_FEATURE_SB_SAFEPOINT
+ thread_register_gc_trigger();
+#else
set_pseudo_atomic_interrupted(thread);
#ifdef LISP_FEATURE_PPC
/* PPC calls alloc() from a trap or from pa_alloc(),
#else
maybe_save_gc_mask_and_block_deferrables(NULL);
#endif
+#endif
}
}
}
new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
#ifndef LISP_FEATURE_WIN32
+ /* for sb-prof, and not supported on Windows yet */
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
if ((signed long) alloc_signal <= 0) {