X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=9fb8e4436dc09c94a8fe206ee26f61d9aed748ce;hb=ebc0f0ebf9efd39519ab86ba28c33abdb25443e0;hp=fd982f9c46d862347b912f0d0d055d4d89a5b3b5;hpb=18dbfbb4e0b4e8531e7dd882f3c55d51a8f38985;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index fd982f9..9fb8e44 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -43,9 +43,16 @@ #include "thread.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" +#include "genesis/fdefn.h" #include "genesis/simple-fun.h" #include "save.h" #include "genesis/hash-table.h" +#include "genesis/instance.h" +#include "genesis/layout.h" + +#ifdef LUTEX_WIDETAG +#include "genesis/lutex.h" +#endif /* forward declarations */ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, @@ -70,22 +77,6 @@ enum { * that don't have pointers to younger generations? */ boolean enable_page_protection = 1; -/* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun) -/* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD - * so don't unmap there. - * - * The CMU CL comment didn't specify a version, but was probably an - * old version of FreeBSD (pre-4.0), so this might no longer be true. - * OTOH, if it is true, this behavior might exist on OpenBSD too, so - * for now we don't unmap there either. -- WHN 2001-04-07 */ -/* Apparently this flag is required to be 0 for SunOS/x86, as there - * are reports of heap corruption otherwise. */ -boolean gencgc_unmap_zero = 0; -#else -boolean gencgc_unmap_zero = 1; -#endif - /* the minimum size (in bytes) for a large object*/ unsigned long large_object_size = 4 * PAGE_BYTES; @@ -139,6 +130,13 @@ boolean gencgc_zero_check_during_free_heap = 0; * contained a pagetable entry). */ boolean gencgc_partial_pickup = 0; + +/* If defined, free pages are read-protected to ensure that nothing + * accesses them. + */ + +/* #define READ_PROTECT_FREE_PAGES */ + /* * GC structures and variables @@ -154,6 +152,9 @@ unsigned long auto_gc_trigger = 0; generation_index_t from_space; generation_index_t new_space; +/* Set to 1 when in GC */ +boolean gc_active_p = 0; + /* should the GC be conservative on stack. If false (only right before * saving a core), don't scan the stack / mark pages dont_move. */ static boolean conservative_stack = 1; @@ -241,6 +242,14 @@ struct generation { * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ double min_av_mem_age; + + /* A linked list of lutex structures in this generation, used for + * implementing lutex finalization. */ +#ifdef LUTEX_WIDETAG + struct lutex *lutexes; +#else + void *lutexes; +#endif }; /* an array of generation structures. There needs to be one more @@ -308,7 +317,7 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != 0) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation)) count++; return count; @@ -321,7 +330,8 @@ count_dont_move_pages(void) page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].dont_move != 0)) { ++count; } } @@ -337,7 +347,8 @@ count_generation_bytes_allocated (generation_index_t gen) page_index_t i; long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) + if ((page_table[i].allocated != FREE_PAGE_FLAG) + && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } return result; @@ -355,15 +366,20 @@ gen_av_mem_age(generation_index_t gen) / ((double)generations[gen].bytes_allocated); } -void fpu_save(int *); /* defined in x86-assem.S */ -void fpu_restore(int *); /* defined in x86-assem.S */ /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ static void print_generation_stats(int verbose) /* FIXME: should take FILE argument */ { generation_index_t i, gens; - int fpu_state[27]; + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) +#define FPU_STATE_SIZE 27 + int fpu_state[FPU_STATE_SIZE]; +#elif defined(LISP_FEATURE_PPC) +#define FPU_STATE_SIZE 32 + long long fpu_state[FPU_STATE_SIZE]; +#endif /* This code uses the FP instructions which may be set up for Lisp * so they need to be saved and reset for C. */ @@ -377,7 +393,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Print the heap stats. */ fprintf(stderr, - " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); + " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); for (i = 0; i < gens; i++) { page_index_t j; @@ -412,13 +428,19 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); fprintf(stderr, - " %1d: %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", + " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", i, - boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, + generations[i].alloc_start_page, + generations[i].alloc_unboxed_start_page, + generations[i].alloc_large_start_page, + generations[i].alloc_large_unboxed_start_page, + boxed_cnt, + unboxed_cnt, + large_boxed_cnt, + large_unboxed_cnt, pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*PAGE_BYTES - - generations[i].bytes_allocated), + (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, @@ -429,9 +451,70 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ fpu_restore(fpu_state); } -/* - * allocation routines + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) +void fast_bzero(void*, size_t); /* in -assem.S */ +#endif + +/* Zero the pages from START to END (inclusive), but use mmap/munmap instead + * if zeroing it ourselves, i.e. in practice give the memory back to the + * OS. Generally done after a large GC. */ +void zero_pages_with_mmap(page_index_t start, page_index_t end) { + int i; + void *addr = (void *) page_address(start), *new_addr; + size_t length = PAGE_BYTES*(1+end-start); + + if (start > end) + return; + + os_invalidate(addr, length); + new_addr = os_validate(addr, length); + if (new_addr == NULL || new_addr != addr) { + lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr); + } + + for (i = start; i <= end; i++) { + page_table[i].need_to_zero = 0; + } +} + +/* Zero the pages from START to END (inclusive). Generally done just after + * a new region has been allocated. + */ +static void +zero_pages(page_index_t start, page_index_t end) { + if (start > end) + return; + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); +#else + bzero(page_address(start), PAGE_BYTES*(1+end-start)); +#endif + +} + +/* Zero the pages from START to END (inclusive), except for those + * pages that are known to already zeroed. Mark all pages in the + * ranges as non-zeroed. + */ +static void +zero_dirty_pages(page_index_t start, page_index_t end) { + page_index_t i; + + for (i = start; i <= end; i++) { + if (page_table[i].need_to_zero == 1) { + zero_pages(start, end); + break; + } + } + + for (i = start; i <= end; i++) { + page_table[i].need_to_zero = 1; + } +} + /* * To support quick and inline allocation, regions of memory can be @@ -517,6 +600,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) page_index_t last_page; long bytes_found; page_index_t i; + int ret; /* FSHOW((stderr, @@ -528,7 +612,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -586,11 +671,11 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), - 0); + /* do we only want to call this on special occasions? like for boxed_region? */ + set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); /* we can do this after releasing free_pages_lock */ if (gencgc_zero_check) { @@ -602,10 +687,26 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) * (long) in code like this, so that it is less likely to * break randomly when running on a machine with different * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.", p); + lose("The new region at %x is not zero.\n", p); } } } + +#ifdef READ_PROTECT_FREE_PAGES + os_protect(page_address(first_page), + PAGE_BYTES*(1+last_page-first_page), + OS_VM_PROT_ALL); +#endif + + /* If the first page was only partial, don't check whether it's + * zeroed (it won't be) and don't zero it (since the parts that + * we're interested in are guaranteed to be zeroed). + */ + if (page_table[first_page].bytes_used) { + first_page++; + } + + zero_dirty_pages(first_page, last_page); } /* If the record_new_objects flag is 2 then all new regions created @@ -716,6 +817,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) long orig_first_page_bytes_used; long region_size; long byte_cnt; + int ret; first_page = alloc_region->first_page; @@ -726,7 +828,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -830,7 +933,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); + /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -848,8 +953,10 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) int more; long bytes_used; page_index_t next_page; + int ret; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (unboxed) { first_page = @@ -947,16 +1054,50 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Bump up last_free_page */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); - return((void *)(page_address(first_page)+orig_first_page_bytes_used)); +#ifdef READ_PROTECT_FREE_PAGES + os_protect(page_address(first_page), + PAGE_BYTES*(1+last_page-first_page), + OS_VM_PROT_ALL); +#endif + + zero_dirty_pages(first_page, last_page); + + return page_address(first_page); } static page_index_t gencgc_alloc_start_page = -1; +void +gc_heap_exhausted_error_or_lose (long available, long requested) +{ + /* Write basic information before doing anything else: if we don't + * call to lisp this is a must, and even if we do there is always the + * danger that we bounce back here before the error has been handled, + * or indeed even printed. + */ + fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n", + gc_active_p ? "garbage collection" : "allocation", available, requested); + if (gc_active_p || (available == 0)) { + /* If we are in GC, or totally out of memory there is no way + * to sanely transfer control to the lisp-side of things. + */ + print_generation_stats(1); + lose("Heap exhausted, game over."); + } + else { + /* FIXME: assert free_pages_lock held */ + thread_mutex_unlock(&free_pages_lock); + funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR), + make_fixnum(available), make_fixnum(requested)); + lose("HEAP-EXHAUSTED-ERROR fell through"); + } +} + page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) { @@ -999,13 +1140,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) first_page++; } - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } + if (first_page >= NUM_PAGES) + gc_heap_exhausted_error_or_lose(0, nbytes); gc_assert(page_table[first_page].write_protected == 0); @@ -1030,13 +1166,9 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } + if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) + gc_heap_exhausted_error_or_lose(bytes_found, nbytes); + *restart_page_ptr=first_page; return last_page; @@ -1733,6 +1865,8 @@ trans_unboxed_large(lispobj object) /* FIXME: What does this mean? */ int gencgc_hash = 1; +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + static long scav_vector(lispobj *where, lispobj object) { @@ -1768,12 +1902,12 @@ scav_vector(lispobj *where, lispobj object) /* Scavenge element 0, which may be a hash-table structure. */ scavenge(where+2, 1); if (!is_lisp_pointer(where[2])) { - lose("no pointer at %x in hash table", where[2]); + lose("no pointer at %x in hash table\n", where[2]); } hash_table = (struct hash_table *)native_pointer(where[2]); /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { - lose("hash table not instance (%x at %x)", + lose("hash table not instance (%x at %x)\n", hash_table->header, hash_table); } @@ -1782,13 +1916,13 @@ scav_vector(lispobj *where, lispobj object) * the hash table code reserves for marking empty slots. */ scavenge(where+3, 1); if (!is_lisp_pointer(where[3])) { - lose("not empty-hash-table-slot symbol pointer: %x", where[3]); + lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]); } empty_symbol = where[3]; /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != SYMBOL_HEADER_WIDETAG) { - lose("not a symbol where empty-hash-table-slot symbol expected: %x", + lose("not a symbol where empty-hash-table-slot symbol expected: %x\n", *(lispobj *)native_pointer(empty_symbol)); } @@ -1799,7 +1933,7 @@ scav_vector(lispobj *where, lispobj object) /* Cross-check the kv_vector. */ if (where != (lispobj *)native_pointer(hash_table->table)) { - lose("hash_table table!=this table %x", hash_table->table); + lose("hash_table table!=this table %x\n", hash_table->table); } /* WEAK-P */ @@ -1818,7 +1952,7 @@ scav_vector(lispobj *where, lispobj object) length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); /*FSHOW((stderr, "/length = %d\n", length));*/ } else { - lose("invalid index_vector %x", index_vector_obj); + lose("invalid index_vector %x\n", index_vector_obj); } } @@ -1834,7 +1968,7 @@ scav_vector(lispobj *where, lispobj object) next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ } else { - lose("invalid next_vector %x", next_vector_obj); + lose("invalid next_vector %x\n", next_vector_obj); } } @@ -1939,6 +2073,192 @@ scav_vector(lispobj *where, lispobj object) return (CEILING(kv_length + 2, 2)); } +#else + +static long +scav_vector(lispobj *where, lispobj object) +{ + if (HeaderValue(object) == subtype_VectorValidHashing) { + *where = + (subtype_VectorMustRehash<= 0); + gc_assert(gen < NUM_GENERATIONS); + + head = generations[gen].lutexes; + + lutex->gen = gen; + lutex->next = head; + lutex->prev = NULL; + if (head) + head->prev = lutex; + generations[gen].lutexes = lutex; +} + +/* + * Stop tracking LUTEX in the GC by removing it from the appropriate + * linked lists. This will only be called during GC, so no locking is + * needed. + */ +void +gencgc_unregister_lutex (struct lutex *lutex) { + if (lutex->prev) { + lutex->prev->next = lutex->next; + } else { + generations[lutex->gen].lutexes = lutex->next; + } + + if (lutex->next) { + lutex->next->prev = lutex->prev; + } + + lutex->next = NULL; + lutex->prev = NULL; + lutex->gen = -1; +} + +/* + * Mark all lutexes in generation GEN as not live. + */ +static void +unmark_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + lutex->live = 0; + lutex = lutex->next; + } +} + +/* + * Finalize all lutexes in generation GEN that have not been marked live. + */ +static void +reap_lutexes (generation_index_t gen) { + struct lutex *lutex = generations[gen].lutexes; + + while (lutex) { + struct lutex *next = lutex->next; + if (!lutex->live) { + lutex_destroy(lutex); + gencgc_unregister_lutex(lutex); + } + lutex = next; + } +} + +/* + * Mark LUTEX as live. + */ +static void +mark_lutex (lispobj tagged_lutex) { + struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex); + + lutex->live = 1; +} + +/* + * Move all lutexes in generation FROM to generation TO. + */ +static void +move_lutexes (generation_index_t from, generation_index_t to) { + struct lutex *tail = generations[from].lutexes; + + /* Nothing to move */ + if (!tail) + return; + + /* Change the generation of the lutexes in FROM. */ + while (tail->next) { + tail->gen = to; + tail = tail->next; + } + tail->gen = to; + + /* Link the last lutex in the FROM list to the start of the TO list */ + tail->next = generations[to].lutexes; + + /* And vice versa */ + if (generations[to].lutexes) { + generations[to].lutexes->prev = tail; + } + + /* And update the generations structures to match this */ + generations[to].lutexes = generations[from].lutexes; + generations[from].lutexes = NULL; +} + +static long +scav_lutex(lispobj *where, lispobj object) +{ + mark_lutex((lispobj) where); + + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} + +static lispobj +trans_lutex(lispobj object) +{ + struct lutex *lutex = native_pointer(object); + lispobj copied; + size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); + gc_assert(is_lisp_pointer(object)); + copied = copy_object(object, words); + + /* Update the links, since the lutex moved in memory. */ + if (lutex->next) { + lutex->next->prev = native_pointer(copied); + } + + if (lutex->prev) { + lutex->prev->next = native_pointer(copied); + } else { + generations[lutex->gen].lutexes = native_pointer(copied); + } + + return copied; +} + +static long +size_lutex(lispobj *where) +{ + return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2); +} +#endif /* LUTEX_WIDETAG */ /* @@ -2278,6 +2598,9 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif break; default: @@ -2300,6 +2623,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) return 1; } +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + /* Adjust large bignum and vector objects. This will adjust the * allocated region if the size has shrunk, and move unboxed objects * into unboxed pages. The pages are not promoted here, and the @@ -2474,6 +2799,8 @@ maybe_adjust_large_object(lispobj *where) return; } +#endif + /* Take a possible pointer to a Lisp object and mark its page in the * page_table so that it will not be relocated during a GC. * @@ -2486,6 +2813,9 @@ maybe_adjust_large_object(lispobj *where) * * It is also assumed that the current gc_alloc() region has been * flushed and the tables updated. */ + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + static void preserve_pointer(void *addr) { @@ -2601,6 +2931,9 @@ preserve_pointer(void *addr) /* Check that the page is now static. */ gc_assert(page_table[addr_page_index].dont_move != 0); } + +#endif + /* If the given page is not write-protected, then scan it for pointers * to younger generations or the top temp. generation, if no @@ -2675,11 +3008,9 @@ update_page_write_prot(page_index_t page) return (wp_it); } -/* Scavenge a generation. - * - * This will not resolve all pointers when generation is the new - * space, as new objects may be added which are not checked here - use - * scavenge_newspace generation. +/* Scavenge all generations from FROM to TO, inclusive, except for + * new_space which needs special handling, as new objects may be + * added which are not checked here - use scavenge_newspace generation. * * Write-protected pages should not have any pointers to the * from_space so do need scavenging; thus write-protected pages are @@ -2707,7 +3038,7 @@ update_page_write_prot(page_index_t page) * pointers as the objects contain a link to the next and are written * if a weak pointer is scavenged. Still it's a useful check. */ static void -scavenge_generation(generation_index_t generation) +scavenge_generations(generation_index_t from, generation_index_t to) { page_index_t i; int num_wp = 0; @@ -2720,9 +3051,12 @@ scavenge_generation(generation_index_t generation) #endif for (i = 0; i < last_free_page; i++) { + generation_index_t generation = page_table[i].gen; if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) - && (page_table[i].gen == generation)) { + && (generation != new_space) + && (generation >= from) + && (generation <= to)) { page_index_t last_page,j; int write_protected=1; @@ -2753,15 +3087,15 @@ scavenge_generation(generation_index_t generation) num_wp += update_page_write_prot(j); } } + if ((gencgc_verbose > 1) && (num_wp != 0)) { + FSHOW((stderr, + "/write protected %d pages within generation %d\n", + num_wp, generation)); + } } i = last_page; } } - if ((gencgc_verbose > 1) && (num_wp != 0)) { - FSHOW((stderr, - "/write protected %d pages within generation %d\n", - num_wp, generation)); - } #if SC_GEN_CK /* Check that none of the write_protected pages in this generation @@ -2777,7 +3111,7 @@ scavenge_generation(generation_index_t generation) page_table[i].bytes_used, page_table[i].first_object_offset, page_table[i].dont_move)); - lose("write to protected page %d in scavenge_generation()", i); + lose("write to protected page %d in scavenge_generation()\n", i); } } #endif @@ -2993,7 +3327,7 @@ scavenge_newspace_generation(generation_index_t generation) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) && (page_table[i].dont_move == 0)) { - lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d", + lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n", i, generation, page_table[i].dont_move); } } @@ -3079,31 +3413,12 @@ free_oldspace(void) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); - /* Zero pages from first_page to (last_page-1). - * - * FIXME: Why not use os_zero(..) function instead of - * hand-coding this again? (Check other gencgc_unmap_zero - * stuff too. */ - if (gencgc_unmap_zero) { - void *page_start, *addr; - - page_start = (void *)page_address(first_page); - - os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); - addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); - if (addr == NULL || addr != page_start) { - lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start, - addr); - } - } else { - long *page_start; - - page_start = (long *)page_address(first_page); - memset(page_start, 0,PAGE_BYTES*(last_page-first_page)); - } - +#ifdef READ_PROTECT_FREE_PAGES + os_protect(page_address(first_page), + PAGE_BYTES*(last_page-first_page), + OS_VM_PROT_NONE); +#endif first_page = last_page; - } while (first_page < last_free_page); bytes_allocated -= bytes_freed; @@ -3140,7 +3455,12 @@ print_ptr(lispobj *addr) } #endif -extern long undefined_tramp; +#if defined(LISP_FEATURE_PPC) +extern int closure_tramp; +extern int undefined_tramp; +#else +extern int undefined_tramp; +#endif static void verify_space(lispobj *start, size_t words) @@ -3169,15 +3489,15 @@ verify_space(lispobj *start, size_t words) * page. XX Could check the offset too. */ if ((page_table[page_index].allocated != FREE_PAGE_FLAG) && (page_table[page_index].bytes_used == 0)) - lose ("Ptr %x @ %x sees free page.", thing, start); + lose ("Ptr %x @ %x sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ if (*((lispobj *)native_pointer(thing)) == 0x01) { - lose("Ptr %x @ %x sees forwarding ptr.", thing, start); + lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start); } /* Check that its not in the RO space as it would then be a * pointer from the RO to the dynamic space. */ if (is_in_readonly_space) { - lose("ptr to dynamic space %x from RO space %x", + lose("ptr to dynamic space %x from RO space %x\n", thing, start); } /* Does it point to a plausible object? This check slows @@ -3191,14 +3511,20 @@ verify_space(lispobj *start, size_t words) * dynamically. */ /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { - lose("ptr %x to invalid object %x", thing, start); + lose("ptr %x to invalid object %x\n", thing, start); } */ } else { /* Verify that it points to another valid space. */ - if (!to_readonly_space && !to_static_space - && (thing != (unsigned long)&undefined_tramp)) { - lose("Ptr %x @ %x sees junk.", thing, start); + if (!to_readonly_space && !to_static_space && +#if defined(LISP_FEATURE_PPC) + !((thing == &closure_tramp) || + (thing == &undefined_tramp)) +#else + thing != (unsigned long)&undefined_tramp +#endif + ) { + lose("Ptr %x @ %x sees junk.\n", thing, start); } } } else { @@ -3228,11 +3554,24 @@ verify_space(lispobj *start, size_t words) case SINGLE_FLOAT_WIDETAG: #endif case UNBOUND_MARKER_WIDETAG: - case INSTANCE_HEADER_WIDETAG: case FDEFN_WIDETAG: count = 1; break; + case INSTANCE_HEADER_WIDETAG: + { + lispobj nuntagged; + long ntotal = HeaderValue(thing); + lispobj layout = ((struct instance *)start)->slots[0]; + if (!layout) { + count = 1; + break; + } + nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots; + verify_space(start + 1, ntotal - fixnum_value(nuntagged)); + count = ntotal + 1; + break; + } case CODE_HEADER_WIDETAG: { lispobj object = *start; @@ -3365,10 +3704,17 @@ verify_space(lispobj *start, size_t words) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef LUTEX_WIDETAG + case LUTEX_WIDETAG: +#endif count = (sizetab[widetag_of(*start)])(start); break; default: + FSHOW((stderr, + "/Unhandled widetag 0x%x at 0x%x\n", + widetag_of(*start), start)); + fflush(stderr); gc_abort(); } } @@ -3396,7 +3742,7 @@ verify_gc(void) struct thread *th; for_each_thread(th) { long binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + (lispobj*)get_binding_stack_pointer(th) - (lispobj*)th->binding_stack_start; verify_space(th->binding_stack_start, binding_stack_size); } @@ -3456,7 +3802,7 @@ verify_zero_fill(void) long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { - lose("free page not zero at %x", start_addr + i); + lose("free page not zero at %x\n", start_addr + i); } } } else { @@ -3468,7 +3814,7 @@ verify_zero_fill(void) long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { - lose("free region not zero at %x", start_addr + i); + lose("free region not zero at %x\n", start_addr + i); } } } @@ -3502,26 +3848,39 @@ verify_dynamic_space(void) static void write_protect_generation_pages(generation_index_t generation) { - page_index_t i; + page_index_t start; gc_assert(generation < SCRATCH_GENERATION); - for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated == BOXED_PAGE_FLAG) - && (page_table[i].bytes_used != 0) - && !page_table[i].dont_move - && (page_table[i].gen == generation)) { + for (start = 0; start < last_free_page; start++) { + if ((page_table[start].allocated == BOXED_PAGE_FLAG) + && (page_table[start].bytes_used != 0) + && !page_table[start].dont_move + && (page_table[start].gen == generation)) { void *page_start; + page_index_t last; - page_start = (void *)page_address(i); + /* Note the page as protected in the page tables. */ + page_table[start].write_protected = 1; + + for (last = start + 1; last < last_free_page; last++) { + if ((page_table[last].allocated != BOXED_PAGE_FLAG) + || (page_table[last].bytes_used == 0) + || page_table[last].dont_move + || (page_table[last].gen != generation)) + break; + page_table[last].write_protected = 1; + } + + page_start = (void *)page_address(start); os_protect(page_start, - PAGE_BYTES, + PAGE_BYTES * (last - start), OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); - /* Note the page as protected in the page tables. */ - page_table[i].write_protected = 1; + start = last; } + } if (gencgc_verbose > 1) { FSHOW((stderr, @@ -3532,6 +3891,185 @@ write_protect_generation_pages(generation_index_t generation) } } +static void +scavenge_control_stack() +{ + unsigned long control_stack_size; + + /* This is going to be a big problem when we try to port threads + * to PPC... CLH */ + struct thread *th = arch_os_get_current_thread(); + lispobj *control_stack = + (lispobj *)(th->control_stack_start); + + control_stack_size = current_control_stack_pointer - control_stack; + scavenge(control_stack, control_stack_size); +} + +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) +/* Scavenging Interrupt Contexts */ + +static int boxed_registers[] = BOXED_REGISTERS; + +static void +scavenge_interrupt_context(os_context_t * context) +{ + int i; + +#ifdef reg_LIP + unsigned long lip; + unsigned long lip_offset; + int lip_register_pair; +#endif + unsigned long pc_code_offset; + +#ifdef ARCH_HAS_LINK_REGISTER + unsigned long lr_code_offset; +#endif +#ifdef ARCH_HAS_NPC_REGISTER + unsigned long npc_code_offset; +#endif + +#ifdef reg_LIP + /* Find the LIP's register pair and calculate it's offset */ + /* before we scavenge the context. */ + + /* + * I (RLT) think this is trying to find the boxed register that is + * closest to the LIP address, without going past it. Usually, it's + * reg_CODE or reg_LRA. But sometimes, nothing can be found. + */ + lip = *os_context_register_addr(context, reg_LIP); + lip_offset = 0x7FFFFFFF; + lip_register_pair = -1; + for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) { + unsigned long reg; + long offset; + int index; + + index = boxed_registers[i]; + reg = *os_context_register_addr(context, index); + if ((reg & ~((1L<uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is + * that what we really want? My guess is that that is not what we + * want, so if lip_register_pair is -1, we don't touch reg_LIP at + * all. But maybe it doesn't really matter if LIP is trashed? + */ + if (lip_register_pair >= 0) { + *os_context_register_addr(context, reg_LIP) = + *os_context_register_addr(context, lip_register_pair) + lip_offset; + } +#endif /* reg_LIP */ + + /* Fix the PC if it was in from space */ + if (from_space_p(*os_context_pc_addr(context))) + *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset; + +#ifdef ARCH_HAS_LINK_REGISTER + /* Fix the LR ditto; important if we're being called from + * an assembly routine that expects to return using blr, otherwise + * harmless */ + if (from_space_p(*os_context_lr_addr(context))) + *os_context_lr_addr(context) = + *os_context_register_addr(context, reg_CODE) + lr_code_offset; +#endif + +#ifdef ARCH_HAS_NPC_REGISTER + if (from_space_p(*os_context_npc_addr(context))) + *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset; +#endif /* ARCH_HAS_NPC_REGISTER */ +} + +void +scavenge_interrupt_contexts(void) +{ + int i, index; + os_context_t *context; + + struct thread *th=arch_os_get_current_thread(); + + index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0)); + +#if defined(DEBUG_PRINT_CONTEXT_INDEX) + printf("Number of active contexts: %d\n", index); +#endif + + for (i = 0; i < index; i++) { + context = th->interrupt_contexts[i]; + scavenge_interrupt_context(context); + } +} + +#endif + +#if defined(LISP_FEATURE_SB_THREAD) +static void +preserve_context_registers (os_context_t *c) +{ + void **ptr; + /* On Darwin the signal context isn't a contiguous block of memory, + * so just preserve_pointering its contents won't be sufficient. + */ +#if defined(LISP_FEATURE_DARWIN) +#if defined LISP_FEATURE_X86 + preserve_pointer((void*)*os_context_register_addr(c,reg_EAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ECX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ESI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDI)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#else + #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" +#endif +#endif + for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) { + preserve_pointer(*ptr); + } +} +#endif + /* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void @@ -3549,6 +4087,10 @@ garbage_collect_generation(generation_index_t generation, int raise) /* Initialize the weak pointer list. */ weak_pointers = NULL; +#ifdef LUTEX_WIDETAG + unmark_lutexes(generation); +#endif + /* When a generation is not being raised it is transported to a * temporary generation (NUM_GENERATIONS), and lowered when * done. Set up this new generation. There should be no pages @@ -3600,6 +4142,7 @@ garbage_collect_generation(generation_index_t generation, int raise) * initiates GC. If you ever call GC from inside an altstack * handler, you will lose. */ +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) /* And if we're saving a core, there's no point in being conservative. */ if (conservative_stack) { for_each_thread(th) { @@ -3620,9 +4163,7 @@ garbage_collect_generation(generation_index_t generation, int raise) if (esp1>=(void **)th->control_stack_start && esp1<(void **)th->control_stack_end) { if(esp1=(void **)c; ptr--) { - preserve_pointer(*ptr); - } + preserve_context_registers(c); } } } @@ -3634,6 +4175,8 @@ garbage_collect_generation(generation_index_t generation, int raise) } } } +#endif + #ifdef QSHOW if (gencgc_verbose > 1) { long num_dont_move_pages = count_dont_move_pages(); @@ -3646,6 +4189,15 @@ garbage_collect_generation(generation_index_t generation, int raise) /* Scavenge all the rest of the roots. */ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + /* + * If not x86, we need to scavenge the interrupt context(s) and the + * control stack. + */ + scavenge_interrupt_contexts(); + scavenge_control_stack(); +#endif + /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ for (i = 0; i < NSIG; i++) { @@ -3659,7 +4211,7 @@ garbage_collect_generation(generation_index_t generation, int raise) { struct thread *th; for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + long len= (lispobj *)get_binding_stack_pointer(th) - th->binding_stack_start; scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD @@ -3704,11 +4256,7 @@ garbage_collect_generation(generation_index_t generation, int raise) /* All generations but the generation being GCed need to be * scavenged. The new_space generation needs special handling as * objects may be moved in - it is handled separately below. */ - for (i = 0; i <= PSEUDO_STATIC_GENERATION; i++) { - if ((i != generation) && (i != new_space)) { - scavenge_generation(i); - } - } + scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION); /* Finally scavenge the new_space generation. Keep going until no * more objects are moved into the new generation */ @@ -3737,7 +4285,7 @@ garbage_collect_generation(generation_index_t generation, int raise) bytes_allocated = bytes_allocated - old_bytes_allocated; if (bytes_allocated != 0) { - lose("Rescan of new_space allocated %d more bytes.", + lose("Rescan of new_space allocated %d more bytes.\n", bytes_allocated); } } @@ -3786,6 +4334,12 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].num_gc = 0; else ++generations[generation].num_gc; + +#ifdef LUTEX_WIDETAG + reap_lutexes(generation); + if (raise) + move_lutexes(generation, generation+1); +#endif } /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ @@ -3801,11 +4355,44 @@ update_dynamic_space_free_pointer(void) last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); return 0; /* dummy value: return something ... */ } +static void +remap_free_pages (page_index_t from, page_index_t to) +{ + page_index_t first_page, last_page; + + for (first_page = from; first_page <= to; first_page++) { + if (page_table[first_page].allocated != FREE_PAGE_FLAG || + page_table[first_page].need_to_zero == 0) { + continue; + } + + last_page = first_page + 1; + while (page_table[last_page].allocated == FREE_PAGE_FLAG && + last_page < to && + page_table[last_page].need_to_zero == 1) { + last_page++; + } + + /* There's a mysterious Solaris/x86 problem with using mmap + * tricks for memory zeroing. See sbcl-devel thread + * "Re: patch: standalone executable redux". + */ +#if defined(LISP_FEATURE_SUNOS) + zero_pages(first_page, last_page-1); +#else + zero_pages_with_mmap(first_page, last_page-1); +#endif + + first_page = last_page; + } +} + +generation_index_t small_generation_limit = 1; + /* GC all generations newer than last_gen, raising the objects in each * to the next older generation - we finish when all generations below * last_gen are empty. Then if last_gen is due for a GC, or if @@ -3814,16 +4401,20 @@ update_dynamic_space_free_pointer(void) * * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */ - void collect_garbage(generation_index_t last_gen) { generation_index_t gen = 0, i; int raise; int gen_to_wp; + /* The largest value of last_free_page seen since the time + * remap_free_pages was called. */ + static page_index_t high_water_mark = 0; FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); + gc_active_p = 1; + if (last_gen > HIGHEST_NORMAL_GENERATION+1) { FSHOW((stderr, "/collect_garbage: last_gen = %d, doing a level 0 GC\n", @@ -3911,7 +4502,7 @@ collect_garbage(generation_index_t last_gen) /* Check that they are all empty. */ for (i = 0; i < gen_to_wp; i++) { if (generations[i].bytes_allocated) - lose("trying to write-protect gen. %d when gen. %d nonempty", + lose("trying to write-protect gen. %d when gen. %d nonempty\n", gen_to_wp, i); } write_protect_generation_pages(gen_to_wp); @@ -3922,11 +4513,29 @@ collect_garbage(generation_index_t last_gen) gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0); gc_alloc_generation = 0; + /* Save the high-water mark before updating last_free_page */ + if (last_free_page > high_water_mark) + high_water_mark = last_free_page; + update_dynamic_space_free_pointer(); + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; if(gencgc_verbose) fprintf(stderr,"Next gc when %ld bytes have been consed\n", auto_gc_trigger); + + /* If we did a big GC (arbitrarily defined as gen > 1), release memory + * back to the OS. + */ + if (gen > small_generation_limit) { + if (last_free_page > high_water_mark) + high_water_mark = last_free_page; + remap_free_pages(0, high_water_mark); + high_water_mark = 0; + } + + gc_active_p = 0; + SHOW("returning from collect_garbage"); } @@ -3956,6 +4565,7 @@ gc_free_heap(void) page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; +#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */ /* Zero the page. */ page_start = (void *)page_address(page); @@ -3966,10 +4576,13 @@ gc_free_heap(void) os_invalidate(page_start,PAGE_BYTES); addr = os_validate(page_start,PAGE_BYTES); if (addr == NULL || addr != page_start) { - lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x", + lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n", page_start, addr); } +#else + page_table[page].write_protected = 0; +#endif } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ long *page_start; @@ -3979,7 +4592,7 @@ gc_free_heap(void) page_start = (long *)page_address(page); for (i=0; i<1024; i++) { if (page_start[i] != 0) { - lose("free region not zero at %x", page_start + i); + lose("free region not zero at %x\n", page_start + i); } } } @@ -3997,6 +4610,7 @@ gc_free_heap(void) generations[page].gc_trigger = 2000000; generations[page].num_gc = 0; generations[page].cum_sum_bytes_allocated = 0; + generations[page].lutexes = NULL; } if (gencgc_verbose > 1) @@ -4009,7 +4623,7 @@ gc_free_heap(void) gc_set_region_empty(&unboxed_region); last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); + set_alloc_pointer((lispobj)((char *)heap_base)); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ @@ -4029,6 +4643,12 @@ gc_init(void) scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; +#ifdef LUTEX_WIDETAG + scavtab[LUTEX_WIDETAG] = scav_lutex; + transother[LUTEX_WIDETAG] = trans_lutex; + sizetab[LUTEX_WIDETAG] = size_lutex; +#endif + heap_base = (void*)DYNAMIC_SPACE_START; /* Initialize each page structure. */ @@ -4059,6 +4679,7 @@ gc_init(void) generations[i].bytes_consed_between_gc = 2000000; generations[i].trigger_age = 1; generations[i].min_av_mem_age = 0.75; + generations[i].lutexes = NULL; } /* Initialize gc_alloc. */ @@ -4078,7 +4699,7 @@ static void gencgc_pickup_dynamic(void) { page_index_t page = 0; - long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + long alloc_ptr = get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; @@ -4091,6 +4712,7 @@ gencgc_pickup_dynamic(void) page_table[page].write_protected = 0; page_table[page].write_protected_cleared = 0; page_table[page].dont_move = 0; + page_table[page].need_to_zero = 1; if (!gencgc_partial_pickup) { first=gc_search_space(prev,(ptr+2)-prev,ptr); @@ -4101,6 +4723,13 @@ gencgc_pickup_dynamic(void) page++; } while ((long)page_address(page) < alloc_ptr); +#ifdef LUTEX_WIDETAG + /* Lutexes have been registered in generation 0 by coreparse, and + * need to be moved to the right one manually. + */ + move_lutexes(0, PSEUDO_STATIC_GENERATION); +#endif + last_free_page = page; generations[gen].bytes_allocated = PAGE_BYTES*page; @@ -4144,16 +4773,18 @@ alloc(long nbytes) void *new_obj; void *new_free_pointer; gc_assert(nbytes>0); + /* Check for alignment allocation problems. */ gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); + #if 0 if(all_threads) /* there are a few places in the C code that allocate data in the * heap before Lisp starts. This is before interrupts are enabled, * so we don't need to check for pseudo-atomic */ #ifdef LISP_FEATURE_SB_THREAD - if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { + if(!get_psuedo_atomic_atomic(th)) { register u32 fs; fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", th,th->os_thread); @@ -4163,7 +4794,7 @@ alloc(long nbytes) lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); } #else - gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); + gc_assert(get_pseudo_atomic_atomic(th)); #endif #endif @@ -4179,7 +4810,7 @@ alloc(long nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); + gc_assert(get_pseudo_atomic_atomic(thread)); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4188,7 +4819,7 @@ alloc(long nbytes) * section */ SetSymbolValue(GC_PENDING,T,thread); if (SymbolValue(GC_INHIBIT,thread) == NIL) - arch_set_pseudo_atomic_interrupted(0); + set_pseudo_atomic_interrupted(thread); } } new_obj = gc_alloc_with_region(nbytes,0,region,0); @@ -4246,7 +4877,8 @@ gencgc_handle_wp_violation(void* fault_addr) * does this test after the first one has already set wp=0 */ if(page_table[page_index].write_protected_cleared != 1) - lose("fault in heap page not marked as write-protected"); + lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n", + page_index, boxed_region.first_page, boxed_region.last_page); } /* Don't worry, we can handle it. */ return 1; @@ -4280,6 +4912,23 @@ gc_set_region_empty(struct alloc_region *region) region->end_addr = page_address(0); } +static void +zero_all_free_pages() +{ + page_index_t i; + + for (i = 0; i < last_free_page; i++) { + if (page_table[i].allocated == FREE_PAGE_FLAG) { +#ifdef READ_PROTECT_FREE_PAGES + os_protect(page_address(i), + PAGE_BYTES, + OS_VM_PROT_ALL); +#endif + zero_pages(i, i); + } + } +} + /* Things to do before doing a final GC before saving a core (without * purify). * @@ -4309,13 +4958,16 @@ prepare_for_final_gc () * function being set to the value of the static symbol * SB!VM:RESTART-LISP-FUNCTION */ void -gc_and_save(char *filename) +gc_and_save(char *filename, int prepend_runtime) { - FILE *file = open_core_for_saving(filename); - if (!file) { - perror(filename); - return; - } + FILE *file; + void *runtime_bytes = NULL; + size_t runtime_size; + + file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, &runtime_size); + if (file == NULL) + return; + conservative_stack = 0; /* The filename might come from Lisp, and be moved by the now @@ -4334,10 +4986,16 @@ gc_and_save(char *filename) gencgc_alloc_start_page = -1; collect_garbage(HIGHEST_NORMAL_GENERATION+1); - save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0)); + if (prepend_runtime) + save_runtime_to_filehandle(file, runtime_bytes, runtime_size); + + /* The dumper doesn't know that pages need to be zeroed before use. */ + zero_all_free_pages(); + save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0), + prepend_runtime); /* Oops. Save still managed to fail. Since we've mangled the stack * beyond hope, there's not much we can do. * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's * going to be rather unsatisfactory too... */ - lose("Attempt to save core after non-conservative GC failed."); + lose("Attempt to save core after non-conservative GC failed.\n"); }