X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=2f648121ff0ec8ff6d03626c97f35650fe8ca215;hb=1eb303172df6650de51ad12b993a392681f50c50;hp=3721c88481835817b9363a3d1e2cac98277dca0d;hpb=baa0eaf21221dc564088c37b228c620c298aeaa1;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 3721c88..2f64812 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -1,5 +1,5 @@ /* - * GENerational Conservative Garbage Collector for SBCL x86 + * GENerational Conservative Garbage Collector for SBCL */ /* @@ -24,6 +24,7 @@ * . */ +#include #include #include #include @@ -37,15 +38,22 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" -#include "fixnump.h" #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" +#include "genesis/fdefn.h" #include "genesis/simple-fun.h" #include "save.h" #include "genesis/hash-table.h" +#include "genesis/instance.h" +#include "genesis/layout.h" +#include "gencgc.h" +#if defined(LUTEX_WIDETAG) +#include "pthread-lutex.h" +#endif /* forward declarations */ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, @@ -71,7 +79,7 @@ enum { boolean enable_page_protection = 1; /* the minimum size (in bytes) for a large object*/ -unsigned long large_object_size = 4 * PAGE_BYTES; +long large_object_size = 4 * PAGE_BYTES; /* @@ -137,7 +145,6 @@ boolean gencgc_partial_pickup = 0; /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ unsigned long bytes_allocated = 0; -extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts @@ -145,25 +152,23 @@ unsigned long auto_gc_trigger = 0; generation_index_t from_space; generation_index_t new_space; +/* Set to 1 when in GC */ +boolean gc_active_p = 0; + /* should the GC be conservative on stack. If false (only right before * saving a core), don't scan the stack / mark pages dont_move. */ static boolean conservative_stack = 1; -/* An array of page structures is statically allocated. +/* An array of page structures is allocated on gc initialization. * This helps quickly map between an address its page structure. - * NUM_PAGES is set from the size of the dynamic space. */ -struct page page_table[NUM_PAGES]; + * page_table_pages is set from the size of the dynamic space. */ +page_index_t page_table_pages; +struct page *page_table; /* To map addresses to page structures the address of the first page * is needed. */ static void *heap_base = NULL; -#if N_WORD_BITS == 32 - #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG -#elif N_WORD_BITS == 64 - #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG -#endif - /* Calculate the start address for the given page number. */ inline void * page_address(page_index_t page_num) @@ -171,6 +176,14 @@ page_address(page_index_t page_num) return (heap_base + (page_num * PAGE_BYTES)); } +/* Calculate the address where the allocation region associated with + * the page starts. */ +inline void * +page_region_start(page_index_t page_index) +{ + return page_address(page_index)+page_table[page_index].first_object_offset; +} + /* Find the page index within the page_table for the given * address. Return -1 on failure. */ inline page_index_t @@ -180,7 +193,7 @@ find_page_index(void *addr) if (index >= 0) { index = ((unsigned long)index)/PAGE_BYTES; - if (index < NUM_PAGES) + if (index < page_table_pages) return (index); } @@ -232,6 +245,14 @@ struct generation { * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ double min_av_mem_age; + + /* A linked list of lutex structures in this generation, used for + * implementing lutex finalization. */ +#ifdef LUTEX_WIDETAG + struct lutex *lutexes; +#else + void *lutexes; +#endif }; /* an array of generation structures. There needs to be one more @@ -299,7 +320,7 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != 0) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation)) count++; return count; @@ -312,7 +333,8 @@ count_dont_move_pages(void) page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].dont_move != 0)) { ++count; } } @@ -328,7 +350,8 @@ count_generation_bytes_allocated (generation_index_t gen) page_index_t i; long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } return result; @@ -346,15 +369,20 @@ gen_av_mem_age(generation_index_t gen) / ((double)generations[gen].bytes_allocated); } -void fpu_save(int *); /* defined in x86-assem.S */ -void fpu_restore(int *); /* defined in x86-assem.S */ /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ static void print_generation_stats(int verbose) /* FIXME: should take FILE argument */ { generation_index_t i, gens; - int fpu_state[27]; + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) +#define FPU_STATE_SIZE 27 + int fpu_state[FPU_STATE_SIZE]; +#elif defined(LISP_FEATURE_PPC) +#define FPU_STATE_SIZE 32 + long long fpu_state[FPU_STATE_SIZE]; +#endif /* This code uses the FP instructions which may be set up for Lisp * so they need to be saved and reset for C. */ @@ -368,7 +396,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Print the heap stats. */ fprintf(stderr, - " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); + " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); for (i = 0; i < gens; i++) { page_index_t j; @@ -403,9 +431,16 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); fprintf(stderr, - " %1d: %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", + " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", i, - boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, + generations[i].alloc_start_page, + generations[i].alloc_unboxed_start_page, + generations[i].alloc_large_start_page, + generations[i].alloc_large_unboxed_start_page, + boxed_cnt, + unboxed_cnt, + large_boxed_cnt, + large_unboxed_cnt, pinned_cnt, generations[i].bytes_allocated, (count_generation_pages(i)*PAGE_BYTES @@ -421,7 +456,9 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ } +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) void fast_bzero(void*, size_t); /* in -assem.S */ +#endif /* Zero the pages from START to END (inclusive), but use mmap/munmap instead * if zeroing it ourselves, i.e. in practice give the memory back to the @@ -438,7 +475,8 @@ void zero_pages_with_mmap(page_index_t start, page_index_t end) { os_invalidate(addr, length); new_addr = os_validate(addr, length); if (new_addr == NULL || new_addr != addr) { - lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr); + lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", + start, new_addr); } for (i = start; i <= end; i++) { @@ -454,7 +492,12 @@ zero_pages(page_index_t start, page_index_t end) { if (start > end) return; +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); +#else + bzero(page_address(start), PAGE_BYTES*(1+end-start)); +#endif + } /* Zero the pages from START to END (inclusive), except for those @@ -562,6 +605,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) page_index_t last_page; long bytes_found; page_index_t i; + int ret; /* FSHOW((stderr, @@ -573,7 +617,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -631,26 +676,13 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), - 0); - } - thread_mutex_unlock(&free_pages_lock); - - /* we can do this after releasing free_pages_lock */ - if (gencgc_zero_check) { - long *p; - for (p = (long *)alloc_region->start_addr; - p < (long *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.\n", p); - } - } + /* do we only want to call this on special occasions? like for + * boxed_region? */ + set_alloc_pointer((lispobj)(((char *)heap_base) + + last_free_page*PAGE_BYTES)); } + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), @@ -667,6 +699,22 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) } zero_dirty_pages(first_page, last_page); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + long *p; + for (p = (long *)alloc_region->start_addr; + p < (long *)alloc_region->end_addr; p++) { + if (*p != 0) { + /* KLUDGE: It would be nice to use %lx and explicit casts + * (long) in code like this, so that it is less likely to + * break randomly when running on a machine with different + * word sizes. -- WHN 19991129 */ + lose("The new region at %x is not zero (start=%p, end=%p).\n", + p, alloc_region->start_addr, alloc_region->end_addr); + } + } + } } /* If the record_new_objects flag is 2 then all new regions created @@ -777,6 +825,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) long orig_first_page_bytes_used; long region_size; long byte_cnt; + int ret; first_page = alloc_region->first_page; @@ -787,12 +836,15 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; - gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); + gc_assert(alloc_region->start_addr == + (page_address(first_page) + + page_table[first_page].bytes_used)); /* All the pages used need to be updated */ @@ -816,7 +868,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { + if ((bytes_used = (alloc_region->free_pointer + - page_address(first_page)))>PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -891,7 +944,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); + /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -909,8 +964,10 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) int more; long bytes_used; page_index_t next_page; + int ret; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = @@ -985,7 +1042,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { + bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt; + if (bytes_used > PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -1008,10 +1066,11 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Bump up last_free_page */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(((char *)heap_base) + + last_free_page*PAGE_BYTES)); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), @@ -1026,88 +1085,122 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) static page_index_t gencgc_alloc_start_page = -1; +void +gc_heap_exhausted_error_or_lose (long available, long requested) +{ + /* Write basic information before doing anything else: if we don't + * call to lisp this is a must, and even if we do there is always + * the danger that we bounce back here before the error has been + * handled, or indeed even printed. + */ + fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n", + gc_active_p ? "garbage collection" : "allocation", + available, requested); + if (gc_active_p || (available == 0)) { + /* If we are in GC, or totally out of memory there is no way + * to sanely transfer control to the lisp-side of things. + */ + struct thread *thread = arch_os_get_current_thread(); + print_generation_stats(1); + fprintf(stderr, "GC control variables:\n"); + fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n", + SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true", + SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true"); +#ifdef LISP_FEATURE_SB_THREAD + fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n", + SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true"); +#endif + lose("Heap exhausted, game over."); + } + else { + /* FIXME: assert free_pages_lock held */ + (void)thread_mutex_unlock(&free_pages_lock); + funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), + alloc_number(available), alloc_number(requested)); + lose("HEAP-EXHAUSTED-ERROR fell through"); + } +} + page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) { - page_index_t first_page; - page_index_t last_page; - long region_size; - page_index_t restart_page=*restart_page_ptr; - long bytes_found; - long num_pages; - int large_p=(nbytes>=large_object_size); + page_index_t first_page, last_page; + page_index_t restart_page = *restart_page_ptr; + long bytes_found = 0; + long most_bytes_found = 0; /* FIXME: assert(free_pages_lock is held); */ - /* Search for a contiguous free space of at least nbytes. If it's - * a large object then align it on a page boundary by searching - * for a free page. */ - + /* Toggled by gc_and_save for heap compaction, normally -1. */ if (gencgc_alloc_start_page != -1) { restart_page = gencgc_alloc_start_page; } - do { - first_page = restart_page; - if (large_p) - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE_FLAG)) - first_page++; - else - while (first_page < NUM_PAGES) { - if(page_table[first_page].allocated == FREE_PAGE_FLAG) - break; - if((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && - (page_table[first_page].large_object == 0) && - (page_table[first_page].gen == gc_alloc_generation) && - (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && - (page_table[first_page].write_protected == 0) && - (page_table[first_page].dont_move == 0)) { - break; - } + if (nbytes>=PAGE_BYTES) { + /* Search for a contiguous free space of at least nbytes, + * aligned on a page boundary. The page-alignment is strictly + * speaking needed only for objects at least large_object_size + * bytes in size. */ + do { + first_page = restart_page; + while ((first_page < page_table_pages) && + (page_table[first_page].allocated != FREE_PAGE_FLAG)) first_page++; + + last_page = first_page; + bytes_found = PAGE_BYTES; + while ((bytes_found < nbytes) && + (last_page < (page_table_pages-1)) && + (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + last_page++; + bytes_found += PAGE_BYTES; + gc_assert(page_table[last_page].write_protected == 0); } + if (bytes_found > most_bytes_found) + most_bytes_found = bytes_found; + restart_page = last_page + 1; + } while ((restart_page < page_table_pages) && (bytes_found < nbytes)); - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); + } else { + /* Search for a page with at least nbytes of space. We prefer + * not to split small objects on multiple pages, to reduce the + * number of contiguous allocation regions spaning multiple + * pages: this helps avoid excessive conservativism. */ + first_page = restart_page; + while (first_page < page_table_pages) { + if (page_table[first_page].allocated == FREE_PAGE_FLAG) + { + bytes_found = PAGE_BYTES; + break; + } + else if ((page_table[first_page].allocated == + (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + (page_table[first_page].large_object == 0) && + (page_table[first_page].gen == gc_alloc_generation) && + (page_table[first_page].write_protected == 0) && + (page_table[first_page].dont_move == 0)) + { + bytes_found = PAGE_BYTES + - page_table[first_page].bytes_used; + if (bytes_found > most_bytes_found) + most_bytes_found = bytes_found; + if (bytes_found >= nbytes) + break; + } + first_page++; } - - gc_assert(page_table[first_page].write_protected == 0); - last_page = first_page; - bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; - num_pages = 1; - while (((bytes_found < nbytes) - || (!large_p && (num_pages < 2))) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { - last_page++; - num_pages++; - bytes_found += PAGE_BYTES; - gc_assert(page_table[last_page].write_protected == 0); - } - - region_size = (PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); - - gc_assert(bytes_found == region_size); - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); + restart_page = first_page + 1; + } /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); + if (bytes_found < nbytes) { + gc_assert(restart_page >= page_table_pages); + gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes); } - *restart_page_ptr=first_page; + gc_assert(page_table[first_page].write_protected == 0); + + *restart_page_ptr = first_page; return last_page; } @@ -1120,7 +1213,7 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, { void *new_free_pointer; - if(nbytes>=large_object_size) + if (nbytes>=large_object_size) return gc_alloc_large(nbytes,unboxed_p,my_region); /* Check whether there is room in the current alloc region. */ @@ -1197,13 +1290,6 @@ gc_quick_alloc_large_unboxed(long nbytes) return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } -/* - * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b - */ - -extern long (*scavtab[256])(lispobj *where, lispobj object); -extern lispobj (*transother[256])(lispobj object); -extern long (*sizetab[256])(lispobj *where); /* Copy a large boxed object. If the object is in a large object * region then it is simply promoted, else it is copied. If it's large @@ -1302,8 +1388,8 @@ copy_large_object(lispobj object, long nwords) next_page++; } - generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + - bytes_freed; + generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + + bytes_freed; generations[new_space].bytes_allocated += N_WORD_BYTES*nwords; bytes_allocated -= bytes_freed; @@ -1371,7 +1457,8 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert((nwords & 0x01) == 0); if ((nwords > 1024*1024) && gencgc_verbose) - FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); + FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", + nwords*N_WORD_BYTES)); /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1447,7 +1534,8 @@ copy_large_unboxed_object(lispobj object, long nwords) "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); - generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; + generations[from_space].bytes_allocated -= + nwords*N_WORD_BYTES + bytes_freed; generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; bytes_allocated -= bytes_freed; @@ -1502,6 +1590,8 @@ sniff_code_object(struct code *code, unsigned long displacement) if (!check_code_fixups) return; + FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement)); + ncode_words = fixnum_value(code->code_size); nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; @@ -1530,7 +1620,8 @@ sniff_code_object(struct code *code, unsigned long displacement) && (data < (code_end_addr-displacement))) { /* function header */ if ((d4 == 0x5e) - && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) { + && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == + (unsigned)code)) { /* Skip the function header */ p += 6*4 - 4 - 1; continue; @@ -1670,7 +1761,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; lispobj fixups = NIL; - unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code; + unsigned long displacement = + (unsigned long)new_code - (unsigned long)old_code; struct vector *fixups_vector; ncode_words = fixnum_value(new_code->code_size); @@ -1718,7 +1810,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) (fixups_vector->header == 0x01)) { /* If so, then follow it. */ /*SHOW("following pointer to a forwarding pointer");*/ - fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length); + fixups_vector = + (struct vector *)native_pointer((lispobj)fixups_vector->length); } /*SHOW("got fixups");*/ @@ -1737,7 +1830,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ if ((old_value >= (unsigned long)old_code) - && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES))) + && (old_value < ((unsigned long)old_code + + nwords*N_WORD_BYTES))) /* So add the dispacement. */ *(unsigned long *)((unsigned long)code_start_addr + offset) = old_value + displacement; @@ -1749,7 +1843,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) old_value - displacement; } } else { - fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header)); + /* This used to just print a note to stderr, but a bogus fixup seems to + * indicate real heap corruption, so a hard hailure is in order. */ + lose("fixup vector %p has a bad widetag: %d\n", + fixups_vector, widetag_of(fixups_vector->header)); } /* Check for possible errors. */ @@ -1795,219 +1892,177 @@ trans_unboxed_large(lispobj object) /* - * vector-like objects + * Lutexes. Using the normal finalization machinery for finalizing + * lutexes is tricky, since the finalization depends on working lutexes. + * So we track the lutexes in the GC and finalize them manually. */ +#if defined(LUTEX_WIDETAG) -/* FIXME: What does this mean? */ -int gencgc_hash = 1; +/* + * Start tracking LUTEX in the GC, by adding it to the linked list of + * lutexes in the nursery generation. The caller is responsible for + * locking, and GCs must be inhibited until the registration is + * complete. + */ +void +gencgc_register_lutex (struct lutex *lutex) { + int index = find_page_index(lutex); + generation_index_t gen; + struct lutex *head; -static long -scav_vector(lispobj *where, lispobj object) -{ - unsigned long kv_length; - lispobj *kv_vector; - unsigned long length = 0; /* (0 = dummy to stop GCC warning) */ - struct hash_table *hash_table; - lispobj empty_symbol; - unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - lispobj weak_p_obj; - unsigned long next_vector_length = 0; - - /* FIXME: A comment explaining this would be nice. It looks as - * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based - * hash tables in the Lisp HASH-TABLE code, and nowhere else. */ - if (HeaderValue(object) != subtype_VectorValidHashing) - return 1; + /* This lutex is in static space, so we don't need to worry about + * finalizing it. + */ + if (index == -1) + return; - if (!gencgc_hash) { - /* This is set for backward compatibility. FIXME: Do we need - * this any more? */ - *where = - (subtype_VectorMustRehash<= 0); + gc_assert(gen < NUM_GENERATIONS); - /* Scavenge element 0, which may be a hash-table structure. */ - scavenge(where+2, 1); - if (!is_lisp_pointer(where[2])) { - lose("no pointer at %x in hash table\n", where[2]); - } - hash_table = (struct hash_table *)native_pointer(where[2]); - /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ - if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { - lose("hash table not instance (%x at %x)\n", - hash_table->header, - hash_table); - } + head = generations[gen].lutexes; - /* Scavenge element 1, which should be some internal symbol that - * the hash table code reserves for marking empty slots. */ - scavenge(where+3, 1); - if (!is_lisp_pointer(where[3])) { - lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]); - } - empty_symbol = where[3]; - /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ - if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != - SYMBOL_HEADER_WIDETAG) { - lose("not a symbol where empty-hash-table-slot symbol expected: %x\n", - *(lispobj *)native_pointer(empty_symbol)); - } + lutex->gen = gen; + lutex->next = head; + lutex->prev = NULL; + if (head) + head->prev = lutex; + generations[gen].lutexes = lutex; +} - /* Scavenge hash table, which will fix the positions of the other - * needed objects. */ - scavenge((lispobj *)hash_table, - sizeof(struct hash_table) / sizeof(lispobj)); +/* + * Stop tracking LUTEX in the GC by removing it from the appropriate + * linked lists. This will only be called during GC, so no locking is + * needed. + */ +void +gencgc_unregister_lutex (struct lutex *lutex) { + if (lutex->prev) { + lutex->prev->next = lutex->next; + } else { + generations[lutex->gen].lutexes = lutex->next; + } - /* Cross-check the kv_vector. */ - if (where != (lispobj *)native_pointer(hash_table->table)) { - lose("hash_table table!=this table %x\n", hash_table->table); + if (lutex->next) { + lutex->next->prev = lutex->prev; } - /* WEAK-P */ - weak_p_obj = hash_table->weak_p; + lutex->next = NULL; + lutex->prev = NULL; + lutex->gen = -1; +} - /* index vector */ - { - lispobj index_vector_obj = hash_table->index_vector; - - if (is_lisp_pointer(index_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)) { - index_vector = - ((unsigned long *)native_pointer(index_vector_obj)) + 2; - /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ - length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); - /*FSHOW((stderr, "/length = %d\n", length));*/ - } else { - lose("invalid index_vector %x\n", index_vector_obj); - } +/* + * Mark all lutexes in generation GEN as not live. + */ +static void +unmark_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + lutex->live = 0; + lutex = lutex->next; } +} - /* next vector */ - { - lispobj next_vector_obj = hash_table->next_vector; - - if (is_lisp_pointer(next_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)) { - next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2; - /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ - next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); - /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ - } else { - lose("invalid next_vector %x\n", next_vector_obj); +/* + * Finalize all lutexes in generation GEN that have not been marked live. + */ +static void +reap_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + struct lutex *next = lutex->next; + if (!lutex->live) { + lutex_destroy((tagged_lutex_t) lutex); + gencgc_unregister_lutex(lutex); } + lutex = next; } +} - /* maybe hash vector */ - { - lispobj hash_vector_obj = hash_table->hash_vector; - - if (is_lisp_pointer(hash_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)){ - hash_vector = - ((unsigned long *)native_pointer(hash_vector_obj)) + 2; - /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ - gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) - == next_vector_length); - } else { - hash_vector = NULL; - /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/ - } +/* + * Mark LUTEX as live. + */ +static void +mark_lutex (lispobj tagged_lutex) { + struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex); + + lutex->live = 1; +} + +/* + * Move all lutexes in generation FROM to generation TO. + */ +static void +move_lutexes (generation_index_t from, generation_index_t to) { + struct lutex *tail = generations[from].lutexes; + + /* Nothing to move */ + if (!tail) + return; + + /* Change the generation of the lutexes in FROM. */ + while (tail->next) { + tail->gen = to; + tail = tail->next; } + tail->gen = to; - /* These lengths could be different as the index_vector can be a - * different length from the others, a larger index_vector could help - * reduce collisions. */ - gc_assert(next_vector_length*2 == kv_length); + /* Link the last lutex in the FROM list to the start of the TO list */ + tail->next = generations[to].lutexes; - /* now all set up.. */ + /* And vice versa */ + if (generations[to].lutexes) { + generations[to].lutexes->prev = tail; + } - /* Work through the KV vector. */ - { - long i; - for (i = 1; i < next_vector_length; i++) { - lispobj old_key = kv_vector[2*i]; + /* And update the generations structures to match this */ + generations[to].lutexes = generations[from].lutexes; + generations[from].lutexes = NULL; +} -#if N_WORD_BITS == 32 - unsigned long old_index = (old_key & 0x1fffffff)%length; -#elif N_WORD_BITS == 64 - unsigned long old_index = (old_key & 0x1fffffffffffffff)%length; -#endif +static long +scav_lutex(lispobj *where, lispobj object) +{ + mark_lutex((lispobj) where); - /* Scavenge the key and value. */ - scavenge(&kv_vector[2*i],2); + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} - /* Check whether the key has moved and is EQ based. */ - { - lispobj new_key = kv_vector[2*i]; -#if N_WORD_BITS == 32 - unsigned long new_index = (new_key & 0x1fffffff)%length; -#elif N_WORD_BITS == 64 - unsigned long new_index = (new_key & 0x1fffffffffffffff)%length; -#endif +static lispobj +trans_lutex(lispobj object) +{ + struct lutex *lutex = (struct lutex *) native_pointer(object); + lispobj copied; + size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); + gc_assert(is_lisp_pointer(object)); + copied = copy_object(object, words); - if ((old_index != new_index) && - ((!hash_vector) || - (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) && - ((new_key != empty_symbol) || - (kv_vector[2*i] != empty_symbol))) { - - /*FSHOW((stderr, - "* EQ key %d moved from %x to %x; index %d to %d\n", - i, old_key, new_key, old_index, new_index));*/ - - if (index_vector[old_index] != 0) { - /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ - - /* Unlink the key from the old_index chain. */ - if (index_vector[old_index] == i) { - /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/ - index_vector[old_index] = next_vector[i]; - /* Link it into the needing rehash chain. */ - next_vector[i] = fixnum_value(hash_table->needing_rehash); - hash_table->needing_rehash = make_fixnum(i); - /*SHOW("P2");*/ - } else { - unsigned long prior = index_vector[old_index]; - unsigned long next = next_vector[prior]; - - /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ - - while (next != 0) { - /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ - if (next == i) { - /* Unlink it. */ - next_vector[prior] = next_vector[next]; - /* Link it into the needing rehash - * chain. */ - next_vector[next] = - fixnum_value(hash_table->needing_rehash); - hash_table->needing_rehash = make_fixnum(next); - /*SHOW("/P3");*/ - break; - } - prior = next; - next = next_vector[next]; - } - } - } - } - } - } + /* Update the links, since the lutex moved in memory. */ + if (lutex->next) { + lutex->next->prev = (struct lutex *) native_pointer(copied); + } + + if (lutex->prev) { + lutex->prev->next = (struct lutex *) native_pointer(copied); + } else { + generations[lutex->gen].lutexes = + (struct lutex *) native_pointer(copied); } - return (CEILING(kv_length + 2, 2)); + + return copied; } +static long +size_lutex(lispobj *where) +{ + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} +#endif /* LUTEX_WIDETAG */ /* @@ -2025,29 +2080,21 @@ scav_vector(lispobj *where, lispobj object) static long scav_weak_pointer(lispobj *where, lispobj object) { - struct weak_pointer *wp = weak_pointers; - /* Push the weak pointer onto the list of weak pointers. - * Do I have to watch for duplicates? Originally this was - * part of trans_weak_pointer but that didn't work in the - * case where the WP was in a promoted region. + /* Since we overwrite the 'next' field, we have to make + * sure not to do so for pointers already in the list. + * Instead of searching the list of weak_pointers each + * time, we ensure that next is always NULL when the weak + * pointer isn't in the list, and not NULL otherwise. + * Since we can't use NULL to denote end of list, we + * use a pointer back to the same weak_pointer. */ + struct weak_pointer * wp = (struct weak_pointer*)where; - /* Check whether it's already in the list. */ - while (wp != NULL) { - if (wp == (struct weak_pointer*)where) { - break; - } - wp = wp->next; - } - if (wp == NULL) { - /* Add it to the start of the list. */ - wp = (struct weak_pointer*)where; - if (wp->next != weak_pointers) { - wp->next = weak_pointers; - } else { - /*SHOW("avoided write to weak pointer");*/ - } + if (NULL == wp->next) { + wp->next = weak_pointers; weak_pointers = wp; + if (NULL == wp->next) + wp->next = wp; } /* Do not let GC scavenge the value slot of the weak pointer. @@ -2093,44 +2140,36 @@ search_dynamic_space(void *pointer) if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE_FLAG)) return NULL; - start = (lispobj *)((void *)page_address(page_index) - + page_table[page_index].first_object_offset); + start = (lispobj *)page_region_start(page_index); return (gc_search_space(start, (((lispobj *)pointer)+2)-start, (lispobj *)pointer)); } -/* Is there any possibility that pointer is a valid Lisp object - * reference, and/or something else (e.g. subroutine call return - * address) which should prevent us from moving the referred-to thing? - * This is called from preserve_pointers() */ +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + +/* Helper for valid_lisp_pointer_p and + * possibly_valid_dynamic_space_pointer. + * + * pointer is the pointer to validate, and start_addr is the address + * of the enclosing object. + */ static int -possibly_valid_dynamic_space_pointer(lispobj *pointer) +looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) { - lispobj *start_addr; - - /* Find the object start address. */ - if ((start_addr = search_dynamic_space(pointer)) == NULL) { - return 0; - } - /* We need to allow raw pointers into Code objects for return * addresses. This will also pick up pointers to functions in code * objects. */ - if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) { + if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) /* XXX could do some further checks here */ return 1; - } - /* If it's not a return address then it needs to be a valid Lisp - * pointer. */ if (!is_lisp_pointer((lispobj)pointer)) { return 0; } /* Check that the object pointed to is consistent with the pointer - * low tag. - */ + * low tag. */ switch (lowtag_of((lispobj)pointer)) { case FUN_POINTER_LOWTAG: /* Start_addr should be the enclosing code object, or a closure @@ -2168,20 +2207,10 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) return 0; } /* Is it plausible cons? */ - if ((is_lisp_pointer(start_addr[0]) - || (fixnump(start_addr[0])) - || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG)) - && (is_lisp_pointer(start_addr[1]) - || (fixnump(start_addr[1])) - || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) + if ((is_lisp_pointer(start_addr[0]) || + is_lisp_immediate(start_addr[0])) && + (is_lisp_pointer(start_addr[1]) || + is_lisp_immediate(start_addr[1]))) break; else { if (gencgc_verbose) @@ -2347,6 +2376,9 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif break; default: @@ -2369,6 +2401,47 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) return 1; } +/* Used by the debugger to validate possibly bogus pointers before + * calling MAKE-LISP-OBJ on them. + * + * FIXME: We would like to make this perfect, because if the debugger + * constructs a reference to a bugs lisp object, and it ends up in a + * location scavenged by the GC all hell breaks loose. + * + * Whereas possibly_valid_dynamic_space_pointer has to be conservative + * and return true for all valid pointers, this could actually be eager + * and lie about a few pointers without bad results... but that should + * be reflected in the name. + */ +int +valid_lisp_pointer_p(lispobj *pointer) +{ + lispobj *start; + if (((start=search_dynamic_space(pointer))!=NULL) || + ((start=search_static_space(pointer))!=NULL) || + ((start=search_read_only_space(pointer))!=NULL)) + return looks_like_valid_lisp_pointer_p(pointer, start); + else + return 0; +} + +/* Is there any possibility that pointer is a valid Lisp object + * reference, and/or something else (e.g. subroutine call return + * address) which should prevent us from moving the referred-to thing? + * This is called from preserve_pointers() */ +static int +possibly_valid_dynamic_space_pointer(lispobj *pointer) +{ + lispobj *start_addr; + + /* Find the object start address. */ + if ((start_addr = search_dynamic_space(pointer)) == NULL) { + return 0; + } + + return looks_like_valid_lisp_pointer_p(pointer, start_addr); +} + /* Adjust large bignum and vector objects. This will adjust the * allocated region if the size has shrunk, and move unboxed objects * into unboxed pages. The pages are not promoted here, and the @@ -2555,6 +2628,7 @@ maybe_adjust_large_object(lispobj *where) * * It is also assumed that the current gc_alloc() region has been * flushed and the tables updated. */ + static void preserve_pointer(void *addr) { @@ -2579,7 +2653,8 @@ preserve_pointer(void *addr) /* quick check 2: Check the offset within the page. * */ - if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) + if (((unsigned long)addr & (PAGE_BYTES - 1)) > + page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object @@ -2599,9 +2674,7 @@ preserve_pointer(void *addr) #if 0 /* I think this'd work just as well, but without the assertions. * -dan 2004.01.01 */ - first_page= - find_page_index(page_address(addr_page_index)+ - page_table[addr_page_index].first_object_offset); + first_page = find_page_index(page_region_start(addr_page_index)) #else first_page = addr_page_index; while (page_table[first_page].first_object_offset != 0) { @@ -2670,6 +2743,9 @@ preserve_pointer(void *addr) /* Check that the page is now static. */ gc_assert(page_table[addr_page_index].dont_move != 0); } + +#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + /* If the given page is not write-protected, then scan it for pointers * to younger generations or the top temp. generation, if no @@ -2782,7 +2858,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) #define SC_GEN_CK 0 #if SC_GEN_CK /* Clear the write_protected_cleared flags on all pages. */ - for (i = 0; i < NUM_PAGES; i++) + for (i = 0; i < page_table_pages; i++) page_table[i].write_protected_cleared = 0; #endif @@ -2813,8 +2889,8 @@ scavenge_generations(generation_index_t from, generation_index_t to) } if (!write_protected) { scavenge(page_address(i), - (page_table[last_page].bytes_used + - (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); /* Now scan the pages and write protect those that * don't have pointers to younger generations. */ @@ -2836,7 +2912,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) #if SC_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) @@ -2935,9 +3011,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) - page_table[i].first_object_offset)/N_WORD_BYTES; new_areas_ignore_page = last_page; - scavenge(page_address(i) + - page_table[i].first_object_offset, - size); + scavenge(page_region_start(i), size); } i = last_page; @@ -2979,6 +3053,13 @@ scavenge_newspace_generation(generation_index_t generation) /* Record all new areas now. */ record_new_objects = 2; + /* Give a chance to weak hash tables to make other objects live. + * FIXME: The algorithm implemented here for weak hash table gcing + * is O(W^2+N) as Bruno Haible warns in + * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html + * see "Implementation 2". */ + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -3017,8 +3098,8 @@ scavenge_newspace_generation(generation_index_t generation) if (gencgc_verbose) SHOW("new_areas overflow, doing full scavenge"); - /* Don't need to record new areas that get scavenge anyway - * during scavenge_newspace_generation_one_scan. */ + /* Don't need to record new areas that get scavenged + * anyway during scavenge_newspace_generation_one_scan. */ record_new_objects = 1; scavenge_newspace_generation_one_scan(generation); @@ -3026,6 +3107,8 @@ scavenge_newspace_generation(generation_index_t generation) /* Record all new areas now. */ record_new_objects = 2; + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -3040,6 +3123,8 @@ scavenge_newspace_generation(generation_index_t generation) scavenge(page_address(page)+offset, size); } + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); } @@ -3057,7 +3142,7 @@ scavenge_newspace_generation(generation_index_t generation) #if SC_NS_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) @@ -3191,8 +3276,6 @@ print_ptr(lispobj *addr) } #endif -extern long undefined_tramp; - static void verify_space(lispobj *start, size_t words) { @@ -3247,8 +3330,7 @@ verify_space(lispobj *start, size_t words) */ } else { /* Verify that it points to another valid space. */ - if (!to_readonly_space && !to_static_space - && (thing != (unsigned long)&undefined_tramp)) { + if (!to_readonly_space && !to_static_space) { lose("Ptr %x @ %x sees junk.\n", thing, start); } } @@ -3279,11 +3361,26 @@ verify_space(lispobj *start, size_t words) case SINGLE_FLOAT_WIDETAG: #endif case UNBOUND_MARKER_WIDETAG: - case INSTANCE_HEADER_WIDETAG: case FDEFN_WIDETAG: count = 1; break; + case INSTANCE_HEADER_WIDETAG: + { + lispobj nuntagged; + long ntotal = HeaderValue(thing); + lispobj layout = ((struct instance *)start)->slots[0]; + if (!layout) { + count = 1; + break; + } + nuntagged = ((struct layout *) + native_pointer(layout))->n_untagged_slots; + verify_space(start + 1, + ntotal - fixnum_value(nuntagged)); + count = ntotal + 1; + break; + } case CODE_HEADER_WIDETAG: { lispobj object = *start; @@ -3328,7 +3425,8 @@ verify_space(lispobj *start, size_t words) while (fheaderl != NIL) { fheaderp = (struct simple_fun *) native_pointer(fheaderl); - gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG); + gc_assert(widetag_of(fheaderp->header) == + SIMPLE_FUN_HEADER_WIDETAG); verify_space(&fheaderp->name, 1); verify_space(&fheaderp->arglist, 1); verify_space(&fheaderp->type, 1); @@ -3416,11 +3514,18 @@ verify_space(lispobj *start, size_t words) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif +#ifdef NO_TLS_VALUE_MARKER_WIDETAG + case NO_TLS_VALUE_MARKER_WIDETAG: +#endif count = (sizetab[widetag_of(*start)])(start); break; default: - gc_abort(); + lose("Unhandled widetag 0x%x at 0x%x\n", + widetag_of(*start), start); } } } @@ -3447,7 +3552,7 @@ verify_gc(void) struct thread *th; for_each_thread(th) { long binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + (lispobj*)get_binding_stack_pointer(th) - (lispobj*)th->binding_stack_start; verify_space(th->binding_stack_start, binding_stack_size); } @@ -3486,8 +3591,9 @@ verify_generation(generation_index_t generation) || (page_table[last_page+1].first_object_offset == 0)) break; - verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + verify_space(page_address(i), + (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); i = last_page; } } @@ -3596,6 +3702,208 @@ write_protect_generation_pages(generation_index_t generation) } } +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + +static void +scavenge_control_stack() +{ + unsigned long control_stack_size; + + /* This is going to be a big problem when we try to port threads + * to PPC... CLH */ + struct thread *th = arch_os_get_current_thread(); + lispobj *control_stack = + (lispobj *)(th->control_stack_start); + + control_stack_size = current_control_stack_pointer - control_stack; + scavenge(control_stack, control_stack_size); +} + +/* Scavenging Interrupt Contexts */ + +static int boxed_registers[] = BOXED_REGISTERS; + +static void +scavenge_interrupt_context(os_context_t * context) +{ + int i; + +#ifdef reg_LIP + unsigned long lip; + unsigned long lip_offset; + int lip_register_pair; +#endif + unsigned long pc_code_offset; + +#ifdef ARCH_HAS_LINK_REGISTER + unsigned long lr_code_offset; +#endif +#ifdef ARCH_HAS_NPC_REGISTER + unsigned long npc_code_offset; +#endif + +#ifdef reg_LIP + /* Find the LIP's register pair and calculate it's offset */ + /* before we scavenge the context. */ + + /* + * I (RLT) think this is trying to find the boxed register that is + * closest to the LIP address, without going past it. Usually, it's + * reg_CODE or reg_LRA. But sometimes, nothing can be found. + */ + lip = *os_context_register_addr(context, reg_LIP); + lip_offset = 0x7FFFFFFF; + lip_register_pair = -1; + for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) { + unsigned long reg; + long offset; + int index; + + index = boxed_registers[i]; + reg = *os_context_register_addr(context, index); + if ((reg & ~((1L<uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is + * that what we really want? My guess is that that is not what we + * want, so if lip_register_pair is -1, we don't touch reg_LIP at + * all. But maybe it doesn't really matter if LIP is trashed? + */ + if (lip_register_pair >= 0) { + *os_context_register_addr(context, reg_LIP) = + *os_context_register_addr(context, lip_register_pair) + + lip_offset; + } +#endif /* reg_LIP */ + + /* Fix the PC if it was in from space */ + if (from_space_p(*os_context_pc_addr(context))) + *os_context_pc_addr(context) = + *os_context_register_addr(context, reg_CODE) + pc_code_offset; + +#ifdef ARCH_HAS_LINK_REGISTER + /* Fix the LR ditto; important if we're being called from + * an assembly routine that expects to return using blr, otherwise + * harmless */ + if (from_space_p(*os_context_lr_addr(context))) + *os_context_lr_addr(context) = + *os_context_register_addr(context, reg_CODE) + lr_code_offset; +#endif + +#ifdef ARCH_HAS_NPC_REGISTER + if (from_space_p(*os_context_npc_addr(context))) + *os_context_npc_addr(context) = + *os_context_register_addr(context, reg_CODE) + npc_code_offset; +#endif /* ARCH_HAS_NPC_REGISTER */ +} + +void +scavenge_interrupt_contexts(void) +{ + int i, index; + os_context_t *context; + + struct thread *th=arch_os_get_current_thread(); + + index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0)); + +#if defined(DEBUG_PRINT_CONTEXT_INDEX) + printf("Number of active contexts: %d\n", index); +#endif + + for (i = 0; i < index; i++) { + context = th->interrupt_contexts[i]; + scavenge_interrupt_context(context); + } +} + +#endif + +#if defined(LISP_FEATURE_SB_THREAD) +static void +preserve_context_registers (os_context_t *c) +{ + void **ptr; + /* On Darwin the signal context isn't a contiguous block of memory, + * so just preserve_pointering its contents won't be sufficient. + */ +#if defined(LISP_FEATURE_DARWIN) +#if defined LISP_FEATURE_X86 + preserve_pointer((void*)*os_context_register_addr(c,reg_EAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ECX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ESI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDI)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#elif defined LISP_FEATURE_X86_64 + preserve_pointer((void*)*os_context_register_addr(c,reg_RAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RCX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RSI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R8)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R9)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R10)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R11)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R12)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R13)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R14)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R15)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#else + #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" +#endif +#endif + for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) { + preserve_pointer(*ptr); + } +} +#endif + /* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void @@ -3604,15 +3912,24 @@ garbage_collect_generation(generation_index_t generation, int raise) unsigned long bytes_freed; page_index_t i; unsigned long static_space_size; +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) struct thread *th; +#endif gc_assert(generation <= HIGHEST_NORMAL_GENERATION); /* The oldest generation can't be raised. */ gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0)); + /* Check if weak hash tables were processed in the previous GC. */ + gc_assert(weak_hash_tables == NULL); + /* Initialize the weak pointer list. */ weak_pointers = NULL; +#ifdef LUTEX_WIDETAG + unmark_lutexes(generation); +#endif + /* When a generation is not being raised it is transported to a * temporary generation (NUM_GENERATIONS), and lowered when * done. Set up this new generation. There should be no pages @@ -3664,6 +3981,7 @@ garbage_collect_generation(generation_index_t generation, int raise) * initiates GC. If you ever call GC from inside an altstack * handler, you will lose. */ +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) /* And if we're saving a core, there's no point in being conservative. */ if (conservative_stack) { for_each_thread(th) { @@ -3684,20 +4002,20 @@ garbage_collect_generation(generation_index_t generation, int raise) if (esp1>=(void **)th->control_stack_start && esp1<(void **)th->control_stack_end) { if(esp1=(void **)c; ptr--) { - preserve_pointer(*ptr); - } + preserve_context_registers(c); } } } #else esp = (void **)((void *)&raise); #endif - for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { + for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) { preserve_pointer(*ptr); } } } +#endif + #ifdef QSHOW if (gencgc_verbose > 1) { long num_dont_move_pages = count_dont_move_pages(); @@ -3710,6 +4028,15 @@ garbage_collect_generation(generation_index_t generation, int raise) /* Scavenge all the rest of the roots. */ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + /* + * If not x86, we need to scavenge the interrupt context(s) and the + * control stack. + */ + scavenge_interrupt_contexts(); + scavenge_control_stack(); +#endif + /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ for (i = 0; i < NSIG; i++) { @@ -3723,7 +4050,7 @@ garbage_collect_generation(generation_index_t generation, int raise) { struct thread *th; for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + long len= (lispobj *)get_binding_stack_pointer(th) - th->binding_stack_start; scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD @@ -3803,6 +4130,7 @@ garbage_collect_generation(generation_index_t generation, int raise) } #endif + scan_weak_hash_tables(); scan_weak_pointers(); /* Flush the current regions, updating the tables. */ @@ -3846,6 +4174,12 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].num_gc = 0; else ++generations[generation].num_gc; + +#ifdef LUTEX_WIDETAG + reap_lutexes(generation); + if (raise) + move_lutexes(generation, generation+1); +#endif } /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ @@ -3861,8 +4195,8 @@ update_dynamic_space_free_pointer(void) last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(((char *)heap_base) + + last_free_page*PAGE_BYTES)); return 0; /* dummy value: return something ... */ } @@ -3884,7 +4218,15 @@ remap_free_pages (page_index_t from, page_index_t to) last_page++; } + /* There's a mysterious Solaris/x86 problem with using mmap + * tricks for memory zeroing. See sbcl-devel thread + * "Re: patch: standalone executable redux". + */ +#if defined(LISP_FEATURE_SUNOS) + zero_pages(first_page, last_page-1); +#else zero_pages_with_mmap(first_page, last_page-1); +#endif first_page = last_page; } @@ -3912,6 +4254,8 @@ collect_garbage(generation_index_t last_gen) FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); + gc_active_p = 1; + if (last_gen > HIGHEST_NORMAL_GENERATION+1) { FSHOW((stderr, "/collect_garbage: last_gen = %d, doing a level 0 GC\n", @@ -4013,7 +4357,9 @@ collect_garbage(generation_index_t last_gen) /* Save the high-water mark before updating last_free_page */ if (last_free_page > high_water_mark) high_water_mark = last_free_page; + update_dynamic_space_free_pointer(); + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; if(gencgc_verbose) fprintf(stderr,"Next gc when %ld bytes have been consed\n", @@ -4029,6 +4375,8 @@ collect_garbage(generation_index_t last_gen) high_water_mark = 0; } + gc_active_p = 0; + SHOW("returning from collect_garbage"); } @@ -4045,7 +4393,7 @@ gc_free_heap(void) if (gencgc_verbose > 1) SHOW("entering gc_free_heap"); - for (page = 0; page < NUM_PAGES; page++) { + for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ if (page_table[page].allocated != FREE_PAGE_FLAG) { void *page_start, *addr; @@ -4058,7 +4406,8 @@ gc_free_heap(void) page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; -#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */ +#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure + * about this change. */ /* Zero the page. */ page_start = (void *)page_address(page); @@ -4103,6 +4452,7 @@ gc_free_heap(void) generations[page].gc_trigger = 2000000; generations[page].num_gc = 0; generations[page].cum_sum_bytes_allocated = 0; + generations[page].lutexes = NULL; } if (gencgc_verbose > 1) @@ -4115,12 +4465,11 @@ gc_free_heap(void) gc_set_region_empty(&unboxed_region); last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); + set_alloc_pointer((lispobj)((char *)heap_base)); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ - if (gencgc_verbose) - SHOW("checking after free_heap\n"); + FSHOW((stderr, "checking after free_heap\n")); verify_gc(); } } @@ -4130,15 +4479,28 @@ gc_init(void) { page_index_t i; + /* Compute the number of pages needed for the dynamic space. + * Dynamic space size should be aligned on page size. */ + page_table_pages = dynamic_space_size/PAGE_BYTES; + gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES); + + page_table = calloc(page_table_pages, sizeof(struct page)); + gc_assert(page_table); + gc_init_tables(); - scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector; scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; +#ifdef LUTEX_WIDETAG + scavtab[LUTEX_WIDETAG] = scav_lutex; + transother[LUTEX_WIDETAG] = trans_lutex; + sizetab[LUTEX_WIDETAG] = size_lutex; +#endif + heap_base = (void*)DYNAMIC_SPACE_START; /* Initialize each page structure. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { /* Initialize all pages as free. */ page_table[i].allocated = FREE_PAGE_FLAG; page_table[i].bytes_used = 0; @@ -4165,6 +4527,7 @@ gc_init(void) generations[i].bytes_consed_between_gc = 2000000; generations[i].trigger_age = 1; generations[i].min_av_mem_age = 0.75; + generations[i].lutexes = NULL; } /* Initialize gc_alloc. */ @@ -4184,7 +4547,7 @@ static void gencgc_pickup_dynamic(void) { page_index_t page = 0; - long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + long alloc_ptr = get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; @@ -4208,6 +4571,13 @@ gencgc_pickup_dynamic(void) page++; } while ((long)page_address(page) < alloc_ptr); +#ifdef LUTEX_WIDETAG + /* Lutexes have been registered in generation 0 by coreparse, and + * need to be moved to the right one manually. + */ + move_lutexes(0, PSEUDO_STATIC_GENERATION); +#endif + last_free_page = page; generations[gen].bytes_allocated = PAGE_BYTES*page; @@ -4238,7 +4608,7 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -char * +lispobj * alloc(long nbytes) { struct thread *thread=arch_os_get_current_thread(); @@ -4248,19 +4618,25 @@ alloc(long nbytes) #else &boxed_region; #endif +#ifndef LISP_FEATURE_WIN32 + lispobj alloc_signal; +#endif void *new_obj; void *new_free_pointer; + gc_assert(nbytes>0); + /* Check for alignment allocation problems. */ gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); + #if 0 if(all_threads) /* there are a few places in the C code that allocate data in the * heap before Lisp starts. This is before interrupts are enabled, * so we don't need to check for pseudo-atomic */ #ifdef LISP_FEATURE_SB_THREAD - if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { + if(!get_psuedo_atomic_atomic(th)) { register u32 fs; fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", th,th->os_thread); @@ -4270,7 +4646,7 @@ alloc(long nbytes) lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); } #else - gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); + gc_assert(get_pseudo_atomic_atomic(th)); #endif #endif @@ -4286,7 +4662,7 @@ alloc(long nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); + gc_assert(get_pseudo_atomic_atomic(thread)); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4295,10 +4671,29 @@ alloc(long nbytes) * section */ SetSymbolValue(GC_PENDING,T,thread); if (SymbolValue(GC_INHIBIT,thread) == NIL) - arch_set_pseudo_atomic_interrupted(0); + set_pseudo_atomic_interrupted(thread); } } new_obj = gc_alloc_with_region(nbytes,0,region,0); + +#ifndef LISP_FEATURE_WIN32 + alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); + if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { + if ((signed long) alloc_signal <= 0) { + SetSymbolValue(ALLOC_SIGNAL, T, thread); +#ifdef LISP_FEATURE_SB_THREAD + kill_thread_safely(thread->os_thread, SIGPROF); +#else + raise(SIGPROF); +#endif + } else { + SetSymbolValue(ALLOC_SIGNAL, + alloc_signal - (1 << N_FIXNUM_TAG_BITS), + thread); + } + } +#endif + return (new_obj); } @@ -4307,7 +4702,7 @@ alloc(long nbytes) * catch GENCGC-related write-protect violations */ -void unhandled_sigmemoryfault(void); +void unhandled_sigmemoryfault(void* addr); /* Depending on which OS we're running under, different signals might * be raised for a violation of write protection in the heap. This @@ -4334,7 +4729,7 @@ gencgc_handle_wp_violation(void* fault_addr) /* It can be helpful to be able to put a breakpoint on this * case to help diagnose low-level problems. */ - unhandled_sigmemoryfault(); + unhandled_sigmemoryfault(fault_addr); /* not within the dynamic space -- not our responsibility */ return 0; @@ -4353,7 +4748,9 @@ gencgc_handle_wp_violation(void* fault_addr) * does this test after the first one has already set wp=0 */ if(page_table[page_index].write_protected_cleared != 1) - lose("fault in heap page not marked as write-protected\n"); + lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n", + page_index, boxed_region.first_page, + boxed_region.last_page); } /* Don't worry, we can handle it. */ return 1; @@ -4364,7 +4761,7 @@ gencgc_handle_wp_violation(void* fault_addr) * are about to let Lisp deal with it. It's basically just a * convenient place to set a gdb breakpoint. */ void -unhandled_sigmemoryfault() +unhandled_sigmemoryfault(void *addr) {} void gc_alloc_update_all_page_tables(void) @@ -4439,7 +4836,8 @@ gc_and_save(char *filename, int prepend_runtime) void *runtime_bytes = NULL; size_t runtime_size; - file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, &runtime_size); + file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, + &runtime_size); if (file == NULL) return;