X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=a89d59c786dd3b7263aa85a0dcd203ee584461ff;hb=eac461c1f1ca91cfe282c779291d582ed6b336cb;hp=1d143c862ac30846a95d763ceb611f32a11623f3;hpb=5a942de4c3645c0c63a262eb7b825a9843e96372;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 1d143c8..a89d59c 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -26,10 +26,14 @@ #include #include -#include #include #include #include "sbcl.h" +#if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD) +#include "pthreads_win32.h" +#else +#include +#endif #include "runtime.h" #include "os.h" #include "interr.h" @@ -96,7 +100,7 @@ os_vm_size_t large_allocation = 0; /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -#if QSHOW +#if QSHOW == 2 boolean gencgc_verbose = 1; #else boolean gencgc_verbose = 0; @@ -168,7 +172,7 @@ boolean gc_active_p = 0; static boolean conservative_stack = 1; /* An array of page structures is allocated on gc initialization. - * This helps quickly map between an address its page structure. + * This helps to quickly map between an address and its page structure. * page_table_pages is set from the size of the dynamic space. */ page_index_t page_table_pages; struct page *page_table; @@ -447,6 +451,15 @@ write_generation_stats(FILE *file) #elif defined(LISP_FEATURE_PPC) #define FPU_STATE_SIZE 32 long long fpu_state[FPU_STATE_SIZE]; +#elif defined(LISP_FEATURE_SPARC) + /* + * 32 (single-precision) FP registers, and the FP state register. + * But Sparc V9 has 32 double-precision registers (equivalent to 64 + * single-precision, but can't be accessed), so we leave enough room + * for that. + */ +#define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2) + long long fpu_state[FPU_STATE_SIZE]; #endif /* This code uses the FP instructions which may be set up for Lisp @@ -917,7 +930,8 @@ size_t max_new_areas; static void add_new_area(page_index_t first_page, size_t offset, size_t size) { - size_t new_area_start, c, i; + size_t new_area_start, c; + ssize_t i; /* Ignore if full. */ if (new_areas_index >= NUM_NEW_AREAS) @@ -1245,10 +1259,12 @@ gc_heap_exhausted_error_or_lose (long available, long requested) else { /* FIXME: assert free_pages_lock held */ (void)thread_mutex_unlock(&free_pages_lock); +#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)) gc_assert(get_pseudo_atomic_atomic(thread)); clear_pseudo_atomic_atomic(thread); if (get_pseudo_atomic_interrupted(thread)) do_pending_interrupt(); +#endif /* Another issue is that signalling HEAP-EXHAUSTED error leads * to running user code at arbitrary places, even in a * WITHOUT-INTERRUPTS which may lead to a deadlock without @@ -3309,7 +3325,7 @@ preserve_context_registers (os_context_t *c) /* On Darwin the signal context isn't a contiguous block of memory, * so just preserve_pointering its contents won't be sufficient. */ -#if defined(LISP_FEATURE_DARWIN) +#if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32) #if defined LISP_FEATURE_X86 preserve_pointer((void*)*os_context_register_addr(c,reg_EAX)); preserve_pointer((void*)*os_context_register_addr(c,reg_ECX)); @@ -3338,9 +3354,11 @@ preserve_context_registers (os_context_t *c) #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" #endif #endif +#if !defined(LISP_FEATURE_WIN32) for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) { preserve_pointer(*ptr); } +#endif } #endif @@ -3422,7 +3440,41 @@ garbage_collect_generation(generation_index_t generation, int raise) for_each_thread(th) { void **ptr; void **esp=(void **)-1; -#ifdef LISP_FEATURE_SB_THREAD + if (th->state == STATE_DEAD) + continue; +# if defined(LISP_FEATURE_SB_SAFEPOINT) + /* Conservative collect_garbage is always invoked with a + * foreign C call or an interrupt handler on top of every + * existing thread, so the stored SP in each thread + * structure is valid, no matter which thread we are looking + * at. For threads that were running Lisp code, the pitstop + * and edge functions maintain this value within the + * interrupt or exception handler. */ + esp = os_get_csp(th); + assert_on_stack(th, esp); + + /* In addition to pointers on the stack, also preserve the + * return PC, the only value from the context that we need + * in addition to the SP. The return PC gets saved by the + * foreign call wrapper, and removed from the control stack + * into a register. */ + preserve_pointer(th->pc_around_foreign_call); + + /* And on platforms with interrupts: scavenge ctx registers. */ + + /* Disabled on Windows, because it does not have an explicit + * stack of `interrupt_contexts'. The reported CSP has been + * chosen so that the current context on the stack is + * covered by the stack scan. See also set_csp_from_context(). */ +# ifndef LISP_FEATURE_WIN32 + if (th != arch_os_get_current_thread()) { + long k = fixnum_value( + SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + while (k > 0) + preserve_context_registers(th->interrupt_contexts[--k]); + } +# endif +# elif defined(LISP_FEATURE_SB_THREAD) long i,free; if(th==arch_os_get_current_thread()) { /* Somebody is going to burn in hell for this, but casting @@ -3441,9 +3493,12 @@ garbage_collect_generation(generation_index_t generation, int raise) } } } -#else +# else esp = (void **)((void *)&raise); -#endif +# endif + if (!esp || esp == (void*) -1) + lose("garbage_collect: no SP known for thread %x (OS %x)", + th, th->os_thread); for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) { preserve_pointer(*ptr); } @@ -3847,7 +3902,7 @@ collect_garbage(generation_index_t last_gen) /* Update auto_gc_trigger. Make sure we trigger the next GC before * running out of heap! */ - if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated) + if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated)) auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; else auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2; @@ -4037,7 +4092,8 @@ gc_init(void) generations[i].num_gc = 0; generations[i].cum_sum_bytes_allocated = 0; /* the tune-able parameters */ - generations[i].bytes_consed_between_gc = bytes_consed_between_gcs; + generations[i].bytes_consed_between_gc + = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION; generations[i].number_of_gcs_before_promotion = 1; generations[i].minimum_age_before_gc = 0.75; } @@ -4125,6 +4181,7 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg #endif void *new_obj; void *new_free_pointer; + os_vm_size_t trigger_bytes = 0; gc_assert(nbytes>0); @@ -4132,8 +4189,10 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); +#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)) /* Must be inside a PA section. */ gc_assert(get_pseudo_atomic_atomic(thread)); +#endif if (nbytes > large_allocation) large_allocation = nbytes; @@ -4146,10 +4205,19 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg return(new_obj); /* yup */ } + /* We don't want to count nbytes against auto_gc_trigger unless we + * have to: it speeds up the tenuring of objects and slows down + * allocation. However, unless we do so when allocating _very_ + * large objects we are in danger of exhausting the heap without + * running sufficient GCs. + */ + if (nbytes >= bytes_consed_between_gcs) + trigger_bytes = nbytes; + /* we have to go the long way around, it seems. Check whether we * should GC in the near future */ - if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) { + if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) { /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4158,8 +4226,11 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg * section */ SetSymbolValue(GC_PENDING,T,thread); if (SymbolValue(GC_INHIBIT,thread) == NIL) { +#ifdef LISP_FEATURE_SB_SAFEPOINT + thread_register_gc_trigger(); +#else set_pseudo_atomic_interrupted(thread); -#ifdef LISP_FEATURE_PPC +#ifdef GENCGC_IS_PRECISE /* PPC calls alloc() from a trap or from pa_alloc(), * look up the most context if it's from a trap. */ { @@ -4171,12 +4242,14 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg #else maybe_save_gc_mask_and_block_deferrables(NULL); #endif +#endif } } } new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); #ifndef LISP_FEATURE_WIN32 + /* for sb-prof, and not supported on Windows yet */ alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { if ((signed long) alloc_signal <= 0) { @@ -4221,7 +4294,9 @@ general_alloc(long nbytes, int page_type_flag) lispobj * alloc(long nbytes) { +#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)) gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread())); +#endif return general_alloc(nbytes, BOXED_PAGE_FLAG); }