X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=9fb8e4436dc09c94a8fe206ee26f61d9aed748ce;hb=ebc0f0ebf9efd39519ab86ba28c33abdb25443e0;hp=596ebba09028c76ba6d7078d4feafda682663566;hpb=3a0f3612dc2bbf3e4e8e7395bcbbf8cd1791b963;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 596ebba..9fb8e44 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -43,12 +43,17 @@ #include "thread.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" +#include "genesis/fdefn.h" #include "genesis/simple-fun.h" #include "save.h" #include "genesis/hash-table.h" #include "genesis/instance.h" #include "genesis/layout.h" +#ifdef LUTEX_WIDETAG +#include "genesis/lutex.h" +#endif + /* forward declarations */ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed); @@ -147,6 +152,9 @@ unsigned long auto_gc_trigger = 0; generation_index_t from_space; generation_index_t new_space; +/* Set to 1 when in GC */ +boolean gc_active_p = 0; + /* should the GC be conservative on stack. If false (only right before * saving a core), don't scan the stack / mark pages dont_move. */ static boolean conservative_stack = 1; @@ -234,6 +242,14 @@ struct generation { * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ double min_av_mem_age; + + /* A linked list of lutex structures in this generation, used for + * implementing lutex finalization. */ +#ifdef LUTEX_WIDETAG + struct lutex *lutexes; +#else + void *lutexes; +#endif }; /* an array of generation structures. There needs to be one more @@ -301,7 +317,7 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != 0) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation)) count++; return count; @@ -314,7 +330,8 @@ count_dont_move_pages(void) page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].dont_move != 0)) { ++count; } } @@ -330,7 +347,8 @@ count_generation_bytes_allocated (generation_index_t gen) page_index_t i; long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } return result; @@ -410,17 +428,19 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); fprintf(stderr, - " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", + " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", i, generations[i].alloc_start_page, generations[i].alloc_unboxed_start_page, generations[i].alloc_large_start_page, generations[i].alloc_large_unboxed_start_page, - boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, + boxed_cnt, + unboxed_cnt, + large_boxed_cnt, + large_unboxed_cnt, pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*PAGE_BYTES - - generations[i].bytes_allocated), + (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, @@ -580,6 +600,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) page_index_t last_page; long bytes_found; page_index_t i; + int ret; /* FSHOW((stderr, @@ -591,7 +612,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -652,7 +674,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* do we only want to call this on special occasions? like for boxed_region? */ set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); /* we can do this after releasing free_pages_lock */ if (gencgc_zero_check) { @@ -794,6 +817,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) long orig_first_page_bytes_used; long region_size; long byte_cnt; + int ret; first_page = alloc_region->first_page; @@ -804,7 +828,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -908,7 +933,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); + /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -926,8 +953,10 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) int more; long bytes_used; page_index_t next_page; + int ret; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = @@ -1027,7 +1056,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) last_free_page = last_page+1; set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), @@ -1042,6 +1072,32 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) static page_index_t gencgc_alloc_start_page = -1; +void +gc_heap_exhausted_error_or_lose (long available, long requested) +{ + /* Write basic information before doing anything else: if we don't + * call to lisp this is a must, and even if we do there is always the + * danger that we bounce back here before the error has been handled, + * or indeed even printed. + */ + fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n", + gc_active_p ? "garbage collection" : "allocation", available, requested); + if (gc_active_p || (available == 0)) { + /* If we are in GC, or totally out of memory there is no way + * to sanely transfer control to the lisp-side of things. + */ + print_generation_stats(1); + lose("Heap exhausted, game over."); + } + else { + /* FIXME: assert free_pages_lock held */ + thread_mutex_unlock(&free_pages_lock); + funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR), + make_fixnum(available), make_fixnum(requested)); + lose("HEAP-EXHAUSTED-ERROR fell through"); + } +} + page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) { @@ -1084,13 +1140,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) first_page++; } - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); - } + if (first_page >= NUM_PAGES) + gc_heap_exhausted_error_or_lose(0, nbytes); gc_assert(page_table[first_page].write_protected == 0); @@ -1115,13 +1166,9 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); - } + if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) + gc_heap_exhausted_error_or_lose(bytes_found, nbytes); + *restart_page_ptr=first_page; return last_page; @@ -2042,6 +2089,179 @@ scav_vector(lispobj *where, lispobj object) /* + * Lutexes. Using the normal finalization machinery for finalizing + * lutexes is tricky, since the finalization depends on working lutexes. + * So we track the lutexes in the GC and finalize them manually. + */ + +#if defined(LUTEX_WIDETAG) + +/* + * Start tracking LUTEX in the GC, by adding it to the linked list of + * lutexes in the nursery generation. The caller is responsible for + * locking, and GCs must be inhibited until the registration is + * complete. + */ +void +gencgc_register_lutex (struct lutex *lutex) { + int index = find_page_index(lutex); + generation_index_t gen; + struct lutex *head; + + /* This lutex is in static space, so we don't need to worry about + * finalizing it. + */ + if (index == -1) + return; + + gen = page_table[index].gen; + + gc_assert(gen >= 0); + gc_assert(gen < NUM_GENERATIONS); + + head = generations[gen].lutexes; + + lutex->gen = gen; + lutex->next = head; + lutex->prev = NULL; + if (head) + head->prev = lutex; + generations[gen].lutexes = lutex; +} + +/* + * Stop tracking LUTEX in the GC by removing it from the appropriate + * linked lists. This will only be called during GC, so no locking is + * needed. + */ +void +gencgc_unregister_lutex (struct lutex *lutex) { + if (lutex->prev) { + lutex->prev->next = lutex->next; + } else { + generations[lutex->gen].lutexes = lutex->next; + } + + if (lutex->next) { + lutex->next->prev = lutex->prev; + } + + lutex->next = NULL; + lutex->prev = NULL; + lutex->gen = -1; +} + +/* + * Mark all lutexes in generation GEN as not live. + */ +static void +unmark_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + lutex->live = 0; + lutex = lutex->next; + } +} + +/* + * Finalize all lutexes in generation GEN that have not been marked live. + */ +static void +reap_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + struct lutex *next = lutex->next; + if (!lutex->live) { + lutex_destroy(lutex); + gencgc_unregister_lutex(lutex); + } + lutex = next; + } +} + +/* + * Mark LUTEX as live. + */ +static void +mark_lutex (lispobj tagged_lutex) { + struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex); + + lutex->live = 1; +} + +/* + * Move all lutexes in generation FROM to generation TO. + */ +static void +move_lutexes (generation_index_t from, generation_index_t to) { + struct lutex *tail = generations[from].lutexes; + + /* Nothing to move */ + if (!tail) + return; + + /* Change the generation of the lutexes in FROM. */ + while (tail->next) { + tail->gen = to; + tail = tail->next; + } + tail->gen = to; + + /* Link the last lutex in the FROM list to the start of the TO list */ + tail->next = generations[to].lutexes; + + /* And vice versa */ + if (generations[to].lutexes) { + generations[to].lutexes->prev = tail; + } + + /* And update the generations structures to match this */ + generations[to].lutexes = generations[from].lutexes; + generations[from].lutexes = NULL; +} + +static long +scav_lutex(lispobj *where, lispobj object) +{ + mark_lutex((lispobj) where); + + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} + +static lispobj +trans_lutex(lispobj object) +{ + struct lutex *lutex = native_pointer(object); + lispobj copied; + size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); + gc_assert(is_lisp_pointer(object)); + copied = copy_object(object, words); + + /* Update the links, since the lutex moved in memory. */ + if (lutex->next) { + lutex->next->prev = native_pointer(copied); + } + + if (lutex->prev) { + lutex->prev->next = native_pointer(copied); + } else { + generations[lutex->gen].lutexes = native_pointer(copied); + } + + return copied; +} + +static long +size_lutex(lispobj *where) +{ + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} +#endif /* LUTEX_WIDETAG */ + + +/* * weak pointers */ @@ -2378,6 +2598,9 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif break; default: @@ -3481,6 +3704,9 @@ verify_space(lispobj *start, size_t words) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif count = (sizetab[widetag_of(*start)])(start); break; @@ -3817,6 +4043,33 @@ scavenge_interrupt_contexts(void) #endif +#if defined(LISP_FEATURE_SB_THREAD) +static void +preserve_context_registers (os_context_t *c) +{ + void **ptr; + /* On Darwin the signal context isn't a contiguous block of memory, + * so just preserve_pointering its contents won't be sufficient. + */ +#if defined(LISP_FEATURE_DARWIN) +#if defined LISP_FEATURE_X86 + preserve_pointer((void*)*os_context_register_addr(c,reg_EAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ECX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ESI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDI)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#else + #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" +#endif +#endif + for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) { + preserve_pointer(*ptr); + } +} +#endif + /* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void @@ -3834,6 +4087,10 @@ garbage_collect_generation(generation_index_t generation, int raise) /* Initialize the weak pointer list. */ weak_pointers = NULL; +#ifdef LUTEX_WIDETAG + unmark_lutexes(generation); +#endif + /* When a generation is not being raised it is transported to a * temporary generation (NUM_GENERATIONS), and lowered when * done. Set up this new generation. There should be no pages @@ -3906,9 +4163,7 @@ garbage_collect_generation(generation_index_t generation, int raise) if (esp1>=(void **)th->control_stack_start && esp1<(void **)th->control_stack_end) { if(esp1=(void **)c; ptr--) { - preserve_pointer(*ptr); - } + preserve_context_registers(c); } } } @@ -4079,6 +4334,12 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].num_gc = 0; else ++generations[generation].num_gc; + +#ifdef LUTEX_WIDETAG + reap_lutexes(generation); + if (raise) + move_lutexes(generation, generation+1); +#endif } /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ @@ -4116,7 +4377,15 @@ remap_free_pages (page_index_t from, page_index_t to) last_page++; } + /* There's a mysterious Solaris/x86 problem with using mmap + * tricks for memory zeroing. See sbcl-devel thread + * "Re: patch: standalone executable redux". + */ +#if defined(LISP_FEATURE_SUNOS) + zero_pages(first_page, last_page-1); +#else zero_pages_with_mmap(first_page, last_page-1); +#endif first_page = last_page; } @@ -4144,6 +4413,8 @@ collect_garbage(generation_index_t last_gen) FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); + gc_active_p = 1; + if (last_gen > HIGHEST_NORMAL_GENERATION+1) { FSHOW((stderr, "/collect_garbage: last_gen = %d, doing a level 0 GC\n", @@ -4263,6 +4534,8 @@ collect_garbage(generation_index_t last_gen) high_water_mark = 0; } + gc_active_p = 0; + SHOW("returning from collect_garbage"); } @@ -4337,6 +4610,7 @@ gc_free_heap(void) generations[page].gc_trigger = 2000000; generations[page].num_gc = 0; generations[page].cum_sum_bytes_allocated = 0; + generations[page].lutexes = NULL; } if (gencgc_verbose > 1) @@ -4369,6 +4643,12 @@ gc_init(void) scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; +#ifdef LUTEX_WIDETAG + scavtab[LUTEX_WIDETAG] = scav_lutex; + transother[LUTEX_WIDETAG] = trans_lutex; + sizetab[LUTEX_WIDETAG] = size_lutex; +#endif + heap_base = (void*)DYNAMIC_SPACE_START; /* Initialize each page structure. */ @@ -4399,6 +4679,7 @@ gc_init(void) generations[i].bytes_consed_between_gc = 2000000; generations[i].trigger_age = 1; generations[i].min_av_mem_age = 0.75; + generations[i].lutexes = NULL; } /* Initialize gc_alloc. */ @@ -4442,6 +4723,13 @@ gencgc_pickup_dynamic(void) page++; } while ((long)page_address(page) < alloc_ptr); +#ifdef LUTEX_WIDETAG + /* Lutexes have been registered in generation 0 by coreparse, and + * need to be moved to the right one manually. + */ + move_lutexes(0, PSEUDO_STATIC_GENERATION); +#endif + last_free_page = page; generations[gen].bytes_allocated = PAGE_BYTES*page; @@ -4531,7 +4819,7 @@ alloc(long nbytes) * section */ SetSymbolValue(GC_PENDING,T,thread); if (SymbolValue(GC_INHIBIT,thread) == NIL) - set_pseudo_atomic_interrupted(0); + set_pseudo_atomic_interrupted(thread); } } new_obj = gc_alloc_with_region(nbytes,0,region,0);