X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=a6891d0149f3ff6a9df1fe9d8ac324373b46acc7;hb=7fb597b585fc715537ea644f7d84440eca217ca1;hp=a35586b0ea7bf3c9b556fe173b367d35a43ab3ac;hpb=79cc569a97e444389350ea3f5b1017374fe16bec;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index a35586b..a6891d0 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -44,28 +44,34 @@ #include "genesis/vector.h" #include "genesis/weak-pointer.h" #include "genesis/simple-fun.h" +#include "save.h" #include "genesis/hash-table.h" /* forward declarations */ -long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed); -static void gencgc_pickup_dynamic(void); +page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, + int unboxed); /* * GC parameters */ -/* the number of actual generations. (The number of 'struct - * generation' objects is one more than this, because one object - * serves as scratch when GC'ing.) */ -#define NUM_GENERATIONS 6 +/* Generations 0-5 are normal collected generations, 6 is only used as + * scratch space by the collector, and should never get collected. + */ +enum { + HIGHEST_NORMAL_GENERATION = 5, + PSEUDO_STATIC_GENERATION, + SCRATCH_GENERATION, + NUM_GENERATIONS +}; /* Should we use page protection to help avoid the scavenging of pages * that don't have pointers to younger generations? */ boolean enable_page_protection = 1; /* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun) /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD * so don't unmap there. * @@ -73,35 +79,36 @@ boolean enable_page_protection = 1; * old version of FreeBSD (pre-4.0), so this might no longer be true. * OTOH, if it is true, this behavior might exist on OpenBSD too, so * for now we don't unmap there either. -- WHN 2001-04-07 */ +/* Apparently this flag is required to be 0 for SunOS/x86, as there + * are reports of heap corruption otherwise. */ boolean gencgc_unmap_zero = 0; #else boolean gencgc_unmap_zero = 1; #endif /* the minimum size (in bytes) for a large object*/ -unsigned large_object_size = 4 * PAGE_BYTES; +unsigned long large_object_size = 4 * PAGE_BYTES; /* * debugging */ - - /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ #ifdef QSHOW -unsigned gencgc_verbose = 1; +boolean gencgc_verbose = 1; #else -unsigned gencgc_verbose = 0; +boolean gencgc_verbose = 0; #endif /* FIXME: At some point enable the various error-checking things below * and see what they say. */ /* We hunt for pointers to old-space, when GCing generations >= verify_gen. - * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */ -int verify_gens = NUM_GENERATIONS; + * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of + * check. */ +generation_index_t verify_gens = HIGHEST_NORMAL_GENERATION + 1; /* Should we do a pre-scan verify of generation 0 before it's GCed? */ boolean pre_verify_gen_0 = 0; @@ -126,6 +133,12 @@ boolean gencgc_enable_verify_zero_fill = 0; /* Should we check that free pages are zero filled during gc_free_heap * called after Lisp PURIFY? */ boolean gencgc_zero_check_during_free_heap = 0; + +/* When loading a core, don't do a full scan of the memory for the + * memory region boundaries. (Set to true by coreparse.c if the core + * contained a pagetable entry). + */ +boolean gencgc_partial_pickup = 0; /* * GC structures and variables @@ -138,9 +151,12 @@ unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ -long from_space; -long new_space; +generation_index_t from_space; +generation_index_t new_space; +/* should the GC be conservative on stack. If false (only right before + * saving a core), don't scan the stack / mark pages dont_move. */ +static boolean conservative_stack = 1; /* An array of page structures is statically allocated. * This helps quickly map between an address its page structure. @@ -159,17 +175,17 @@ static void *heap_base = NULL; /* Calculate the start address for the given page number. */ inline void * -page_address(long page_num) +page_address(page_index_t page_num) { return (heap_base + (page_num * PAGE_BYTES)); } /* Find the page index within the page_table for the given * address. Return -1 on failure. */ -inline long +inline page_index_t find_page_index(void *addr) { - long index = addr-heap_base; + page_index_t index = addr-heap_base; if (index >= 0) { index = ((unsigned long)index)/PAGE_BYTES; @@ -184,19 +200,19 @@ find_page_index(void *addr) struct generation { /* the first page that gc_alloc() checks on its next call */ - long alloc_start_page; + page_index_t alloc_start_page; /* the first page that gc_alloc_unboxed() checks on its next call */ - long alloc_unboxed_start_page; + page_index_t alloc_unboxed_start_page; /* the first page that gc_alloc_large (boxed) considers on its next * call. (Although it always allocates after the boxed_region.) */ - long alloc_large_start_page; + page_index_t alloc_large_start_page; /* the first page that gc_alloc_large (unboxed) considers on its * next call. (Although it always allocates after the * current_unboxed_region.) */ - long alloc_large_unboxed_start_page; + page_index_t alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ long bytes_allocated; @@ -226,20 +242,16 @@ struct generation { * added, in which case a GC could be a waste of time */ double min_av_mem_age; }; -/* the number of actual generations. (The number of 'struct - * generation' objects is one more than this, because one object - * serves as scratch when GC'ing.) */ -#define NUM_GENERATIONS 6 /* an array of generation structures. There needs to be one more * generation structure than actual generations as the oldest * generation is temporarily raised then lowered. */ -struct generation generations[NUM_GENERATIONS+1]; +struct generation generations[NUM_GENERATIONS]; /* the oldest generation that is will currently be GCed by default. - * Valid values are: 0, 1, ... (NUM_GENERATIONS-1) + * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION * - * The default of (NUM_GENERATIONS-1) enables GC on all generations. + * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations. * * Setting this to 0 effectively disables the generational nature of * the GC. In some applications generational GC may not be useful @@ -248,13 +260,13 @@ struct generation generations[NUM_GENERATIONS+1]; * An intermediate value could be handy after moving long-lived data * into an older generation so an unnecessary GC of this long-lived * data can be avoided. */ -unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1; +generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION; /* The maximum free page in the heap is maintained and used to update * ALLOCATION_POINTER which is used by the room function to limit its * search of the heap. XX Gencgc obviously needs to be better * integrated with the Lisp code. */ -static long last_free_page; +page_index_t last_free_page; /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the @@ -263,7 +275,9 @@ static long last_free_page; * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ -static lispobj free_pages_lock=0; +#ifdef LISP_FEATURE_SB_THREAD +static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +#endif /* @@ -273,9 +287,9 @@ static lispobj free_pages_lock=0; /* Count the number of pages which are write-protected within the * given generation. */ static long -count_write_protect_generation_pages(int generation) +count_write_protect_generation_pages(generation_index_t generation) { - long i; + page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) @@ -288,9 +302,9 @@ count_write_protect_generation_pages(int generation) /* Count the number of pages within the given generation. */ static long -count_generation_pages(int generation) +count_generation_pages(generation_index_t generation) { - long i; + page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) @@ -304,7 +318,7 @@ count_generation_pages(int generation) static long count_dont_move_pages(void) { - long i; + page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { @@ -318,9 +332,9 @@ count_dont_move_pages(void) /* Work through the pages and add up the number of bytes used for the * given generation. */ static long -count_generation_bytes_allocated (int gen) +count_generation_bytes_allocated (generation_index_t gen) { - long i; + page_index_t i; long result = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) @@ -331,7 +345,7 @@ count_generation_bytes_allocated (int gen) /* Return the average age of the memory in a generation. */ static double -gen_av_mem_age(int gen) +gen_av_mem_age(generation_index_t gen) { if (generations[gen].bytes_allocated == 0) return 0.0; @@ -348,30 +362,30 @@ void fpu_restore(int *); /* defined in x86-assem.S */ static void print_generation_stats(int verbose) /* FIXME: should take FILE argument */ { - int i, gens; + generation_index_t i, gens; int fpu_state[27]; /* This code uses the FP instructions which may be set up for Lisp * so they need to be saved and reset for C. */ fpu_save(fpu_state); - /* number of generations to print */ + /* highest generation to print */ if (verbose) - gens = NUM_GENERATIONS+1; + gens = SCRATCH_GENERATION; else - gens = NUM_GENERATIONS; + gens = PSEUDO_STATIC_GENERATION; /* Print the heap stats. */ fprintf(stderr, " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); for (i = 0; i < gens; i++) { - int j; - int boxed_cnt = 0; - int unboxed_cnt = 0; - int large_boxed_cnt = 0; - int large_unboxed_cnt = 0; - int pinned_cnt=0; + page_index_t j; + long boxed_cnt = 0; + long unboxed_cnt = 0; + long large_boxed_cnt = 0; + long large_unboxed_cnt = 0; + long pinned_cnt=0; for (j = 0; j < last_free_page; j++) if (page_table[j].gen == i) { @@ -398,7 +412,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); fprintf(stderr, - " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n", + " %1d: %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, pinned_cnt, @@ -471,7 +485,7 @@ struct alloc_region boxed_region; struct alloc_region unboxed_region; /* The generation currently being allocated to. */ -static int gc_alloc_generation; +static generation_index_t gc_alloc_generation; /* Find a new region with room for at least the given number of bytes. * @@ -499,10 +513,10 @@ static int gc_alloc_generation; static void gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) { - long first_page; - long last_page; + page_index_t first_page; + page_index_t last_page; long bytes_found; - long i; + page_index_t i; /* FSHOW((stderr, @@ -514,7 +528,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -576,7 +590,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), 0); } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); /* we can do this after releasing free_pages_lock */ if (gencgc_zero_check) { @@ -588,13 +602,12 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) * (long) in code like this, so that it is less likely to * break randomly when running on a machine with different * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.", p); + lose("The new region at %x is not zero.\n", p); } + } } } -} - /* If the record_new_objects flag is 2 then all new regions created * are recorded. * @@ -612,9 +625,9 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) * scavenge of a generation. */ #define NUM_NEW_AREAS 512 static int record_new_objects = 0; -static long new_areas_ignore_page; +static page_index_t new_areas_ignore_page; struct new_area { - long page; + page_index_t page; long offset; long size; }; @@ -624,9 +637,9 @@ long max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(long first_page, long offset, long size) +add_new_area(page_index_t first_page, long offset, long size) { - unsigned new_area_start,c; + unsigned long new_area_start,c; long i; /* Ignore if full. */ @@ -651,7 +664,7 @@ add_new_area(long first_page, long offset, long size) /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { - unsigned area_end = + unsigned long area_end = PAGE_BYTES*((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; @@ -696,10 +709,10 @@ add_new_area(long first_page, long offset, long size) void gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) { - long more; - long first_page; - long next_page; - long bytes_used; + int more; + page_index_t first_page; + page_index_t next_page; + int bytes_used; long orig_first_page_bytes_used; long region_size; long byte_cnt; @@ -713,7 +726,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -817,7 +830,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -828,15 +841,15 @@ static inline void *gc_quick_alloc(long nbytes); void * gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) { - long first_page; - long last_page; - long orig_first_page_bytes_used; + page_index_t first_page; + page_index_t last_page; + int orig_first_page_bytes_used; long byte_cnt; - long more; + int more; long bytes_used; - long next_page; + page_index_t next_page; - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (unboxed) { first_page = @@ -937,27 +950,33 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) SetSymbolValue(ALLOCATION_POINTER, (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } -long -gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) +static page_index_t gencgc_alloc_start_page = -1; + +page_index_t +gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) { - long first_page; - long last_page; + page_index_t first_page; + page_index_t last_page; long region_size; - long restart_page=*restart_page_ptr; + page_index_t restart_page=*restart_page_ptr; long bytes_found; long num_pages; - long large_p=(nbytes>=large_object_size); - gc_assert(free_pages_lock); + int large_p=(nbytes>=large_object_size); + /* FIXME: assert(free_pages_lock is held); */ /* Search for a contiguous free space of at least nbytes. If it's * a large object then align it on a page boundary by searching * for a free page. */ + if (gencgc_alloc_start_page != -1) { + restart_page = gencgc_alloc_start_page; + } + do { first_page = restart_page; if (large_p) @@ -985,7 +1004,7 @@ gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", nbytes); print_generation_stats(1); - lose(NULL); + lose("\n"); } gc_assert(page_table[first_page].write_protected == 0); @@ -1016,9 +1035,10 @@ gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", nbytes); print_generation_stats(1); - lose(NULL); + lose("\n"); } *restart_page_ptr=first_page; + return last_page; } @@ -1127,7 +1147,7 @@ copy_large_object(lispobj object, long nwords) { int tag; lispobj *new; - long first_page; + page_index_t first_page; gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); @@ -1143,7 +1163,7 @@ copy_large_object(lispobj object, long nwords) /* Promote the object. */ long remaining_bytes; - long next_page; + page_index_t next_page; long bytes_freed; long old_bytes_used; @@ -1275,7 +1295,7 @@ copy_large_unboxed_object(lispobj object, long nwords) { int tag; lispobj *new; - long first_page; + page_index_t first_page; gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); @@ -1293,7 +1313,7 @@ copy_large_unboxed_object(lispobj object, long nwords) * allocated to a BOXED region so it may be necessary to * change the region to UNBOXED. */ long remaining_bytes; - long next_page; + page_index_t next_page; long bytes_freed; long old_bytes_used; @@ -1401,11 +1421,12 @@ static lispobj trans_boxed(lispobj object); * Currently only absolute fixups to the constant vector, or to the * code area are checked. */ void -sniff_code_object(struct code *code, unsigned displacement) +sniff_code_object(struct code *code, unsigned long displacement) { +#ifdef LISP_FEATURE_X86 long nheader_words, ncode_words, nwords; void *p; - void *constants_start_addr, *constants_end_addr; + void *constants_start_addr = NULL, *constants_end_addr; void *code_start_addr, *code_end_addr; int fixup_found = 0; @@ -1568,16 +1589,19 @@ sniff_code_object(struct code *code, unsigned displacement) "/code start = %x, end = %x\n", code_start_addr, code_end_addr)); } +#endif } void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) { +/* x86-64 uses pc-relative addressing instead of this kludge */ +#ifndef LISP_FEATURE_X86_64 long nheader_words, ncode_words, nwords; void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; lispobj fixups = NIL; - unsigned displacement = (unsigned)new_code - (unsigned)old_code; + unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code; struct vector *fixups_vector; ncode_words = fixnum_value(new_code->code_size); @@ -1636,23 +1660,23 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) long length = fixnum_value(fixups_vector->length); long i; for (i = 0; i < length; i++) { - unsigned offset = fixups_vector->data[i]; + unsigned long offset = fixups_vector->data[i]; /* Now check the current value of offset. */ - unsigned old_value = - *(unsigned *)((unsigned)code_start_addr + offset); + unsigned long old_value = + *(unsigned long *)((unsigned long)code_start_addr + offset); /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ - if ((old_value >= (unsigned)old_code) - && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES))) + if ((old_value >= (unsigned long)old_code) + && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES))) /* So add the dispacement. */ - *(unsigned *)((unsigned)code_start_addr + offset) = + *(unsigned long *)((unsigned long)code_start_addr + offset) = old_value + displacement; else /* It is outside the old code object so it must be a * relative fixup (absolute fixups are not saved). So * subtract the displacement. */ - *(unsigned *)((unsigned)code_start_addr + offset) = + *(unsigned long *)((unsigned long)code_start_addr + offset) = old_value - displacement; } } else { @@ -1663,6 +1687,7 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) if (check_code_fixups) { sniff_code_object(new_code,displacement); } +#endif } @@ -1681,14 +1706,14 @@ trans_boxed_large(lispobj object) return copy_large_object(object, length); } - +/* Doesn't seem to be used, delete it after the grace period. */ +#if 0 static lispobj trans_unboxed_large(lispobj object) { lispobj header; unsigned long length; - gc_assert(is_lisp_pointer(object)); header = *((lispobj *) native_pointer(object)); @@ -1697,6 +1722,7 @@ trans_unboxed_large(lispobj object) return copy_large_unboxed_object(object, length); } +#endif /* @@ -1707,7 +1733,7 @@ trans_unboxed_large(lispobj object) /* FIXME: What does this mean? */ int gencgc_hash = 1; -static int +static long scav_vector(lispobj *where, lispobj object) { unsigned long kv_length; @@ -1719,7 +1745,7 @@ scav_vector(lispobj *where, lispobj object) unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ lispobj weak_p_obj; - unsigned next_vector_length = 0; + unsigned long next_vector_length = 0; /* FIXME: A comment explaining this would be nice. It looks as * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based @@ -1742,12 +1768,12 @@ scav_vector(lispobj *where, lispobj object) /* Scavenge element 0, which may be a hash-table structure. */ scavenge(where+2, 1); if (!is_lisp_pointer(where[2])) { - lose("no pointer at %x in hash table", where[2]); + lose("no pointer at %x in hash table\n", where[2]); } - hash_table = (lispobj *)native_pointer(where[2]); + hash_table = (struct hash_table *)native_pointer(where[2]); /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { - lose("hash table not instance (%x at %x)", + lose("hash table not instance (%x at %x)\n", hash_table->header, hash_table); } @@ -1756,23 +1782,24 @@ scav_vector(lispobj *where, lispobj object) * the hash table code reserves for marking empty slots. */ scavenge(where+3, 1); if (!is_lisp_pointer(where[3])) { - lose("not empty-hash-table-slot symbol pointer: %x", where[3]); + lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]); } empty_symbol = where[3]; /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != SYMBOL_HEADER_WIDETAG) { - lose("not a symbol where empty-hash-table-slot symbol expected: %x", + lose("not a symbol where empty-hash-table-slot symbol expected: %x\n", *(lispobj *)native_pointer(empty_symbol)); } /* Scavenge hash table, which will fix the positions of the other * needed objects. */ - scavenge(hash_table, sizeof(struct hash_table) / sizeof(lispobj)); + scavenge((lispobj *)hash_table, + sizeof(struct hash_table) / sizeof(lispobj)); /* Cross-check the kv_vector. */ if (where != (lispobj *)native_pointer(hash_table->table)) { - lose("hash_table table!=this table %x", hash_table->table); + lose("hash_table table!=this table %x\n", hash_table->table); } /* WEAK-P */ @@ -1785,12 +1812,13 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(index_vector_obj) && (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)) { - index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2; + index_vector = + ((unsigned long *)native_pointer(index_vector_obj)) + 2; /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); /*FSHOW((stderr, "/length = %d\n", length));*/ } else { - lose("invalid index_vector %x", index_vector_obj); + lose("invalid index_vector %x\n", index_vector_obj); } } @@ -1801,12 +1829,12 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(next_vector_obj) && (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)) { - next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2; + next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2; /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ } else { - lose("invalid next_vector %x", next_vector_obj); + lose("invalid next_vector %x\n", next_vector_obj); } } @@ -1817,7 +1845,8 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(hash_vector_obj) && (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)){ - hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2; + hash_vector = + ((unsigned long *)native_pointer(hash_vector_obj)) + 2; /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) == next_vector_length); @@ -1859,7 +1888,8 @@ scav_vector(lispobj *where, lispobj object) #endif if ((old_index != new_index) && - ((!hash_vector) || (hash_vector[i] == 0x80000000)) && + ((!hash_vector) || + (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) && ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) { @@ -1879,8 +1909,8 @@ scav_vector(lispobj *where, lispobj object) hash_table->needing_rehash = make_fixnum(i); /*SHOW("P2");*/ } else { - unsigned prior = index_vector[old_index]; - unsigned next = next_vector[prior]; + unsigned long prior = index_vector[old_index]; + unsigned long next = next_vector[prior]; /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ @@ -1987,7 +2017,7 @@ search_static_space(void *pointer) lispobj * search_dynamic_space(void *pointer) { - long page_index = find_page_index(pointer); + page_index_t page_index = find_page_index(pointer); lispobj *start; /* The address may be invalid, so do some checks. */ @@ -2042,8 +2072,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) break; case CLOSURE_HEADER_WIDETAG: case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if ((unsigned)pointer != - ((unsigned)start_addr+FUN_POINTER_LOWTAG)) { + if ((unsigned long)pointer != + ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wf2: %x %x %x\n", @@ -2060,8 +2090,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } break; case LIST_POINTER_LOWTAG: - if ((unsigned)pointer != - ((unsigned)start_addr+LIST_POINTER_LOWTAG)) { + if ((unsigned long)pointer != + ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wl1: %x %x %x\n", @@ -2092,8 +2122,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) return 0; } case INSTANCE_POINTER_LOWTAG: - if ((unsigned)pointer != - ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) { + if ((unsigned long)pointer != + ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wi1: %x %x %x\n", @@ -2109,8 +2139,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } break; case OTHER_POINTER_LOWTAG: - if ((unsigned)pointer != - ((int)start_addr+OTHER_POINTER_LOWTAG)) { + if ((unsigned long)pointer != + ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wo1: %x %x %x\n", @@ -2127,6 +2157,7 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } switch (widetag_of(start_addr[0])) { case UNBOUND_MARKER_WIDETAG: + case NO_TLS_VALUE_MARKER_WIDETAG: case CHARACTER_WIDETAG: #if N_WORD_BITS == 64 case SINGLE_FLOAT_WIDETAG: @@ -2279,11 +2310,11 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) static void maybe_adjust_large_object(lispobj *where) { - long first_page; + page_index_t first_page; + page_index_t next_page; long nwords; long remaining_bytes; - long next_page; long bytes_freed; long old_bytes_used; @@ -2458,10 +2489,10 @@ maybe_adjust_large_object(lispobj *where) static void preserve_pointer(void *addr) { - long addr_page_index = find_page_index(addr); - long first_page; - long i; - unsigned region_allocation; + page_index_t addr_page_index = find_page_index(addr); + page_index_t first_page; + page_index_t i; + unsigned int region_allocation; /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) @@ -2479,7 +2510,7 @@ preserve_pointer(void *addr) /* quick check 2: Check the offset within the page. * */ - if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) + if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object @@ -2524,7 +2555,7 @@ preserve_pointer(void *addr) if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ - || (((unsigned)addr & (PAGE_BYTES - 1)) + || (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)) { FSHOW((stderr, "weird? ignore ptr 0x%x to freed area of large object\n", @@ -2585,9 +2616,9 @@ preserve_pointer(void *addr) * * We return 1 if the page was write-protected, else 0. */ static int -update_page_write_prot(long page) +update_page_write_prot(page_index_t page) { - int gen = page_table[page].gen; + generation_index_t gen = page_table[page].gen; long j; int wp_it = 1; void **page_addr = (void **)page_address(page); @@ -2599,6 +2630,7 @@ update_page_write_prot(long page) /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected + /* FIXME: What's the reason for not write-protecting pinned pages? */ || page_table[page].dont_move || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) return (0); @@ -2608,7 +2640,7 @@ update_page_write_prot(long page) for (j = 0; j < num_words; j++) { void *ptr = *(page_addr+j); - long index = find_page_index(ptr); + page_index_t index = find_page_index(ptr); /* Check that it's in the dynamic space */ if (index != -1) @@ -2616,7 +2648,7 @@ update_page_write_prot(long page) ((page_table[index].allocated != FREE_PAGE_FLAG) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) - || (page_table[index].gen == NUM_GENERATIONS))) + || (page_table[index].gen == SCRATCH_GENERATION))) /* Or does it point within a current gc_alloc() region? */ || ((boxed_region.start_addr <= ptr) @@ -2643,11 +2675,9 @@ update_page_write_prot(long page) return (wp_it); } -/* Scavenge a generation. - * - * This will not resolve all pointers when generation is the new - * space, as new objects may be added which are not checked here - use - * scavenge_newspace generation. +/* Scavenge all generations from FROM to TO, inclusive, except for + * new_space which needs special handling, as new objects may be + * added which are not checked here - use scavenge_newspace generation. * * Write-protected pages should not have any pointers to the * from_space so do need scavenging; thus write-protected pages are @@ -2675,9 +2705,9 @@ update_page_write_prot(long page) * pointers as the objects contain a link to the next and are written * if a weak pointer is scavenged. Still it's a useful check. */ static void -scavenge_generation(int generation) +scavenge_generations(generation_index_t from, generation_index_t to) { - long i; + page_index_t i; int num_wp = 0; #define SC_GEN_CK 0 @@ -2688,10 +2718,13 @@ scavenge_generation(int generation) #endif for (i = 0; i < last_free_page; i++) { + generation_index_t generation = page_table[i].gen; if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) - && (page_table[i].gen == generation)) { - long last_page,j; + && (generation != new_space) + && (generation >= from) + && (generation <= to)) { + page_index_t last_page,j; int write_protected=1; /* This should be the start of a region */ @@ -2721,15 +2754,15 @@ scavenge_generation(int generation) num_wp += update_page_write_prot(j); } } + if ((gencgc_verbose > 1) && (num_wp != 0)) { + FSHOW((stderr, + "/write protected %d pages within generation %d\n", + num_wp, generation)); + } } i = last_page; } } - if ((gencgc_verbose > 1) && (num_wp != 0)) { - FSHOW((stderr, - "/write protected %d pages within generation %d\n", - num_wp, generation)); - } #if SC_GEN_CK /* Check that none of the write_protected pages in this generation @@ -2745,7 +2778,7 @@ scavenge_generation(int generation) page_table[i].bytes_used, page_table[i].first_object_offset, page_table[i].dont_move)); - lose("write to protected page %d in scavenge_generation()", i); + lose("write to protected page %d in scavenge_generation()\n", i); } } #endif @@ -2780,9 +2813,9 @@ static struct new_area new_areas_2[NUM_NEW_AREAS]; * complete the job as new objects may be added to the generation in * the process which are not scavenged. */ static void -scavenge_newspace_generation_one_scan(int generation) +scavenge_newspace_generation_one_scan(generation_index_t generation) { - long i; + page_index_t i; FSHOW((stderr, "/starting one full scan of newspace generation %d\n", @@ -2796,7 +2829,7 @@ scavenge_newspace_generation_one_scan(int generation) /* (This may be redundant as write_protected is now * cleared before promotion.) */ || (page_table[i].dont_move == 1))) { - long last_page; + page_index_t last_page; int all_wp=1; /* The scavenge will start at the first_object_offset of page i. @@ -2848,7 +2881,7 @@ scavenge_newspace_generation_one_scan(int generation) /* Do a complete scavenge of the newspace generation. */ static void -scavenge_newspace_generation(int generation) +scavenge_newspace_generation(generation_index_t generation) { long i; @@ -2961,7 +2994,7 @@ scavenge_newspace_generation(int generation) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) && (page_table[i].dont_move == 0)) { - lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d", + lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n", i, generation, page_table[i].dont_move); } } @@ -2976,7 +3009,7 @@ scavenge_newspace_generation(int generation) static void unprotect_oldspace(void) { - long i; + page_index_t i; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != FREE_PAGE_FLAG) @@ -3004,7 +3037,7 @@ static long free_oldspace(void) { long bytes_freed = 0; - long first_page, last_page; + page_index_t first_page, last_page; first_page = 0; @@ -3060,8 +3093,8 @@ free_oldspace(void) os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); if (addr == NULL || addr != page_start) { - lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start, - addr); + lose("free_oldspace: page moved, 0x%08x ==> 0x%08x\n", + page_start, addr); } } else { long *page_start; @@ -3084,7 +3117,7 @@ static void print_ptr(lispobj *addr) { /* If addr is in the dynamic space then out the page information. */ - long pi1 = find_page_index((void*)addr); + page_index_t pi1 = find_page_index((void*)addr); if (pi1 != -1) fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", @@ -3115,15 +3148,15 @@ verify_space(lispobj *start, size_t words) { int is_in_dynamic_space = (find_page_index((void*)start) != -1); int is_in_readonly_space = - (READ_ONLY_SPACE_START <= (unsigned)start && - (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); + (READ_ONLY_SPACE_START <= (unsigned long)start && + (unsigned long)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); while (words > 0) { size_t count = 1; lispobj thing = *(lispobj*)start; if (is_lisp_pointer(thing)) { - long page_index = find_page_index((void*)thing); + page_index_t page_index = find_page_index((void*)thing); long to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); @@ -3137,15 +3170,15 @@ verify_space(lispobj *start, size_t words) * page. XX Could check the offset too. */ if ((page_table[page_index].allocated != FREE_PAGE_FLAG) && (page_table[page_index].bytes_used == 0)) - lose ("Ptr %x @ %x sees free page.", thing, start); + lose ("Ptr %x @ %x sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ if (*((lispobj *)native_pointer(thing)) == 0x01) { - lose("Ptr %x @ %x sees forwarding ptr.", thing, start); + lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start); } /* Check that its not in the RO space as it would then be a * pointer from the RO to the dynamic space. */ if (is_in_readonly_space) { - lose("ptr to dynamic space %x from RO space %x", + lose("ptr to dynamic space %x from RO space %x\n", thing, start); } /* Does it point to a plausible object? This check slows @@ -3159,14 +3192,14 @@ verify_space(lispobj *start, size_t words) * dynamically. */ /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { - lose("ptr %x to invalid object %x", thing, start); + lose("ptr %x to invalid object %x\n", thing, start); } */ } else { /* Verify that it points to another valid space. */ if (!to_readonly_space && !to_static_space - && (thing != (unsigned)&undefined_tramp)) { - lose("Ptr %x @ %x sees junk.", thing, start); + && (thing != (unsigned long)&undefined_tramp)) { + lose("Ptr %x @ %x sees junk.\n", thing, start); } } } else { @@ -3373,15 +3406,15 @@ verify_gc(void) } static void -verify_generation(int generation) +verify_generation(generation_index_t generation) { - int i; + page_index_t i; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - long last_page; + page_index_t last_page; int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ @@ -3414,7 +3447,7 @@ verify_generation(int generation) static void verify_zero_fill(void) { - long page; + page_index_t page; for (page = 0; page < last_free_page; page++) { if (page_table[page].allocated == FREE_PAGE_FLAG) { @@ -3424,19 +3457,19 @@ verify_zero_fill(void) long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { - lose("free page not zero at %x", start_addr + i); + lose("free page not zero at %x\n", start_addr + i); } } } else { long free_bytes = PAGE_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { - long *start_addr = (long *)((unsigned)page_address(page) + long *start_addr = (long *)((unsigned long)page_address(page) + page_table[page].bytes_used); long size = free_bytes / N_WORD_BYTES; long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { - lose("free region not zero at %x", start_addr + i); + lose("free region not zero at %x\n", start_addr + i); } } } @@ -3457,9 +3490,9 @@ gencgc_verify_zero_fill(void) static void verify_dynamic_space(void) { - long i; + generation_index_t i; - for (i = 0; i < NUM_GENERATIONS; i++) + for (i = 0; i <= HIGHEST_NORMAL_GENERATION; i++) verify_generation(i); if (gencgc_enable_verify_zero_fill) @@ -3468,28 +3501,41 @@ verify_dynamic_space(void) /* Write-protect all the dynamic boxed pages in the given generation. */ static void -write_protect_generation_pages(int generation) +write_protect_generation_pages(generation_index_t generation) { - long i; + page_index_t start; - gc_assert(generation < NUM_GENERATIONS); + gc_assert(generation < SCRATCH_GENERATION); - for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated == BOXED_PAGE_FLAG) - && (page_table[i].bytes_used != 0) - && !page_table[i].dont_move - && (page_table[i].gen == generation)) { + for (start = 0; start < last_free_page; start++) { + if ((page_table[start].allocated == BOXED_PAGE_FLAG) + && (page_table[start].bytes_used != 0) + && !page_table[start].dont_move + && (page_table[start].gen == generation)) { void *page_start; + page_index_t last; - page_start = (void *)page_address(i); + /* Note the page as protected in the page tables. */ + page_table[start].write_protected = 1; + + for (last = start + 1; last < last_free_page; last++) { + if ((page_table[last].allocated != BOXED_PAGE_FLAG) + || (page_table[last].bytes_used == 0) + || page_table[last].dont_move + || (page_table[last].gen != generation)) + break; + page_table[last].write_protected = 1; + } + + page_start = (void *)page_address(start); os_protect(page_start, - PAGE_BYTES, + PAGE_BYTES * (last - start), OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); - /* Note the page as protected in the page tables. */ - page_table[i].write_protected = 1; + start = last; } + } if (gencgc_verbose > 1) { FSHOW((stderr, @@ -3503,16 +3549,16 @@ write_protect_generation_pages(int generation) /* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void -garbage_collect_generation(int generation, int raise) +garbage_collect_generation(generation_index_t generation, int raise) { unsigned long bytes_freed; - unsigned long i; + page_index_t i; unsigned long static_space_size; struct thread *th; - gc_assert(generation <= (NUM_GENERATIONS-1)); + gc_assert(generation <= HIGHEST_NORMAL_GENERATION); /* The oldest generation can't be raised. */ - gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0)); + gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0)); /* Initialize the weak pointer list. */ weak_pointers = NULL; @@ -3522,7 +3568,7 @@ garbage_collect_generation(int generation, int raise) * done. Set up this new generation. There should be no pages * allocated to it yet. */ if (!raise) { - gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0); + gc_assert(generations[SCRATCH_GENERATION].bytes_allocated == 0); } /* Set the global src and dest. generations */ @@ -3530,7 +3576,7 @@ garbage_collect_generation(int generation, int raise) if (raise) new_space = generation+1; else - new_space = NUM_GENERATIONS; + new_space = SCRATCH_GENERATION; /* Change to a new space for allocation, resetting the alloc_start_page */ gc_alloc_generation = new_space; @@ -3567,35 +3613,41 @@ garbage_collect_generation(int generation, int raise) /* we assume that none of the preceding applies to the thread that * initiates GC. If you ever call GC from inside an altstack * handler, you will lose. */ - for_each_thread(th) { - void **ptr; - void **esp=(void **)-1; + + /* And if we're saving a core, there's no point in being conservative. */ + if (conservative_stack) { + for_each_thread(th) { + void **ptr; + void **esp=(void **)-1; #ifdef LISP_FEATURE_SB_THREAD - long i,free; - if(th==arch_os_get_current_thread()) { - esp = (void **) &raise; - } else { - void **esp1; - free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); - for(i=free-1;i>=0;i--) { - os_context_t *c=th->interrupt_contexts[i]; - esp1 = (void **) *os_context_register_addr(c,reg_SP); - if(esp1>=th->control_stack_start&& esp1control_stack_end){ - if(esp1=(void **)c; ptr--) { - preserve_pointer(*ptr); + long i,free; + if(th==arch_os_get_current_thread()) { + /* Somebody is going to burn in hell for this, but casting + * it in two steps shuts gcc up about strict aliasing. */ + esp = (void **)((void *)&raise); + } else { + void **esp1; + free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + for(i=free-1;i>=0;i--) { + os_context_t *c=th->interrupt_contexts[i]; + esp1 = (void **) *os_context_register_addr(c,reg_SP); + if (esp1>=(void **)th->control_stack_start && + esp1<(void **)th->control_stack_end) { + if(esp1=(void **)c; ptr--) { + preserve_pointer(*ptr); + } } } } - } #else - esp = (void **) &raise; + esp = (void **)((void *)&raise); #endif - for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { - preserve_pointer(*ptr); + for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { + preserve_pointer(*ptr); + } } } - #ifdef QSHOW if (gencgc_verbose > 1) { long num_dont_move_pages = count_dont_move_pages(); @@ -3610,28 +3662,25 @@ garbage_collect_generation(int generation, int raise) /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ - for_each_thread(th) { - struct interrupt_data *data=th->interrupt_data; for (i = 0; i < NSIG; i++) { - union interrupt_handler handler = data->interrupt_handlers[i]; + union interrupt_handler handler = interrupt_handlers[i]; if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { - scavenge((lispobj *)(data->interrupt_handlers + i), 1); - } + scavenge((lispobj *)(interrupt_handlers + i), 1); } } /* Scavenge the binding stacks. */ - { - struct thread *th; - for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - - th->binding_stack_start; - scavenge((lispobj *) th->binding_stack_start,len); + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD - /* do the tls as well */ - len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - - (sizeof (struct thread))/(sizeof (lispobj)); - scavenge((lispobj *) (th+1),len); + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); #endif } } @@ -3669,11 +3718,7 @@ garbage_collect_generation(int generation, int raise) /* All generations but the generation being GCed need to be * scavenged. The new_space generation needs special handling as * objects may be moved in - it is handled separately below. */ - for (i = 0; i < NUM_GENERATIONS; i++) { - if ((i != generation) && (i != new_space)) { - scavenge_generation(i); - } - } + scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION); /* Finally scavenge the new_space generation. Keep going until no * more objects are moved into the new generation */ @@ -3702,7 +3747,7 @@ garbage_collect_generation(int generation, int raise) bytes_allocated = bytes_allocated - old_bytes_allocated; if (bytes_allocated != 0) { - lose("Rescan of new_space allocated %d more bytes.", + lose("Rescan of new_space allocated %d more bytes.\n", bytes_allocated); } } @@ -3721,12 +3766,12 @@ garbage_collect_generation(int generation, int raise) if (!raise) { for (i = 0; i < last_free_page; i++) if ((page_table[i].bytes_used != 0) - && (page_table[i].gen == NUM_GENERATIONS)) + && (page_table[i].gen == SCRATCH_GENERATION)) page_table[i].gen = generation; gc_assert(generations[generation].bytes_allocated == 0); generations[generation].bytes_allocated = - generations[NUM_GENERATIONS].bytes_allocated; - generations[NUM_GENERATIONS].bytes_allocated = 0; + generations[SCRATCH_GENERATION].bytes_allocated; + generations[SCRATCH_GENERATION].bytes_allocated = 0; } /* Reset the alloc_start_page for generation. */ @@ -3755,10 +3800,9 @@ garbage_collect_generation(int generation, int raise) /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ long -update_x86_dynamic_space_free_pointer(void) +update_dynamic_space_free_pointer(void) { - long last_page = -1; - long i; + page_index_t last_page = -1, i; for (i = 0; i < last_free_page; i++) if ((page_table[i].allocated != FREE_PAGE_FLAG) @@ -3782,16 +3826,15 @@ update_x86_dynamic_space_free_pointer(void) * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */ void -collect_garbage(unsigned last_gen) +collect_garbage(generation_index_t last_gen) { - int gen = 0; + generation_index_t gen = 0, i; int raise; int gen_to_wp; - long i; FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); - if (last_gen > NUM_GENERATIONS) { + if (last_gen > HIGHEST_NORMAL_GENERATION+1) { FSHOW((stderr, "/collect_garbage: last_gen = %d, doing a level 0 GC\n", last_gen)); @@ -3878,7 +3921,7 @@ collect_garbage(unsigned last_gen) /* Check that they are all empty. */ for (i = 0; i < gen_to_wp; i++) { if (generations[i].bytes_allocated) - lose("trying to write-protect gen. %d when gen. %d nonempty", + lose("trying to write-protect gen. %d when gen. %d nonempty\n", gen_to_wp, i); } write_protect_generation_pages(gen_to_wp); @@ -3889,7 +3932,7 @@ collect_garbage(unsigned last_gen) gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0); gc_alloc_generation = 0; - update_x86_dynamic_space_free_pointer(); + update_dynamic_space_free_pointer(); auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; if(gencgc_verbose) fprintf(stderr,"Next gc when %ld bytes have been consed\n", @@ -3905,7 +3948,7 @@ collect_garbage(unsigned last_gen) void gc_free_heap(void) { - long page; + page_index_t page; if (gencgc_verbose > 1) SHOW("entering gc_free_heap"); @@ -3923,6 +3966,7 @@ gc_free_heap(void) page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; +#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */ /* Zero the page. */ page_start = (void *)page_address(page); @@ -3933,19 +3977,23 @@ gc_free_heap(void) os_invalidate(page_start,PAGE_BYTES); addr = os_validate(page_start,PAGE_BYTES); if (addr == NULL || addr != page_start) { - lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x", + lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n", page_start, addr); } +#else + page_table[page].write_protected = 0; +#endif } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ - long *page_start, i; + long *page_start; + page_index_t i; gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used == 0); page_start = (long *)page_address(page); for (i=0; i<1024; i++) { if (page_start[i] != 0) { - lose("free region not zero at %x", page_start + i); + lose("free region not zero at %x\n", page_start + i); } } } @@ -3988,7 +4036,7 @@ gc_free_heap(void) void gc_init(void) { - long i; + page_index_t i; gc_init_tables(); scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector; @@ -4033,7 +4081,6 @@ gc_init(void) gc_set_region_empty(&unboxed_region); last_free_page = 0; - } /* Pick up the dynamic space from after a core load. @@ -4044,30 +4091,39 @@ gc_init(void) static void gencgc_pickup_dynamic(void) { - long page = 0; + page_index_t page = 0; long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); lispobj *prev=(lispobj *)page_address(page); + generation_index_t gen = PSEUDO_STATIC_GENERATION; do { lispobj *first,*ptr= (lispobj *)page_address(page); page_table[page].allocated = BOXED_PAGE_FLAG; - page_table[page].gen = 0; + page_table[page].gen = gen; page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; - - first=gc_search_space(prev,(ptr+2)-prev,ptr); - if(ptr == first) prev=ptr; - page_table[page].first_object_offset = - (void *)prev - page_address(page); + page_table[page].write_protected = 0; + page_table[page].write_protected_cleared = 0; + page_table[page].dont_move = 0; + + if (!gencgc_partial_pickup) { + first=gc_search_space(prev,(ptr+2)-prev,ptr); + if(ptr == first) prev=ptr; + page_table[page].first_object_offset = + (void *)prev - page_address(page); + } page++; - } while (page_address(page) < alloc_ptr); + } while ((long)page_address(page) < alloc_ptr); + + last_free_page = page; - generations[0].bytes_allocated = PAGE_BYTES*page; + generations[gen].bytes_allocated = PAGE_BYTES*page; bytes_allocated = PAGE_BYTES*page; + gc_alloc_update_all_page_tables(); + write_protect_generation_pages(gen); } - void gc_initialize_pointers(void) { @@ -4092,10 +4148,10 @@ gc_initialize_pointers(void) char * alloc(long nbytes) { - struct thread *th=arch_os_get_current_thread(); + struct thread *thread=arch_os_get_current_thread(); struct alloc_region *region= #ifdef LISP_FEATURE_SB_THREAD - th ? &(th->alloc_region) : &boxed_region; + thread ? &(thread->alloc_region) : &boxed_region; #else &boxed_region; #endif @@ -4103,7 +4159,7 @@ alloc(long nbytes) void *new_free_pointer; gc_assert(nbytes>0); /* Check for alignment allocation problems. */ - gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0) + gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); #if 0 if(all_threads) @@ -4137,35 +4193,16 @@ alloc(long nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - struct thread *thread=arch_os_get_current_thread(); + gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ - if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) { + if (SymbolValue(GC_PENDING,thread) == NIL) { /* set things up so that GC happens when we finish the PA - * section. We only do this if there wasn't a pending - * handler already, in case it was a gc. If it wasn't a - * GC, the next allocation will get us back to this point - * anyway, so no harm done - */ - struct interrupt_data *data=th->interrupt_data; - sigset_t new_mask,old_mask; - sigemptyset(&new_mask); - sigaddset_blockable(&new_mask); - thread_sigmask(SIG_BLOCK,&new_mask,&old_mask); - - if(!data->pending_handler) { - if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0)) - lose("Not in atomic: %d.\n", - SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)); - /* Leave the signals blocked just as if it was - * deferred the normal way and set the - * pending_mask. */ - sigcopyset(&(data->pending_mask),&old_mask); - SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread); - } else { - thread_sigmask(SIG_SETMASK,&old_mask,0); - } + * section */ + SetSymbolValue(GC_PENDING,T,thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) + arch_set_pseudo_atomic_interrupted(0); } } new_obj = gc_alloc_with_region(nbytes,0,region,0); @@ -4192,7 +4229,7 @@ void unhandled_sigmemoryfault(void); int gencgc_handle_wp_violation(void* fault_addr) { - long page_index = find_page_index(fault_addr); + page_index_t page_index = find_page_index(fault_addr); #ifdef QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", @@ -4223,7 +4260,7 @@ gencgc_handle_wp_violation(void* fault_addr) * does this test after the first one has already set wp=0 */ if(page_table[page_index].write_protected_cleared != 1) - lose("fault in heap page not marked as write-protected"); + lose("fault in heap page not marked as write-protected\n"); } /* Don't worry, we can handle it. */ return 1; @@ -4246,6 +4283,7 @@ void gc_alloc_update_all_page_tables(void) gc_alloc_update_page_tables(1, &unboxed_region); gc_alloc_update_page_tables(0, &boxed_region); } + void gc_set_region_empty(struct alloc_region *region) { @@ -4256,3 +4294,64 @@ gc_set_region_empty(struct alloc_region *region) region->end_addr = page_address(0); } +/* Things to do before doing a final GC before saving a core (without + * purify). + * + * + Pages in large_object pages aren't moved by the GC, so we need to + * unset that flag from all pages. + * + The pseudo-static generation isn't normally collected, but it seems + * reasonable to collect it at least when saving a core. So move the + * pages to a normal generation. + */ +static void +prepare_for_final_gc () +{ + page_index_t i; + for (i = 0; i < last_free_page; i++) { + page_table[i].large_object = 0; + if (page_table[i].gen == PSEUDO_STATIC_GENERATION) { + int used = page_table[i].bytes_used; + page_table[i].gen = HIGHEST_NORMAL_GENERATION; + generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used; + generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used; + } + } +} + + +/* Do a non-conservative GC, and then save a core with the initial + * function being set to the value of the static symbol + * SB!VM:RESTART-LISP-FUNCTION */ +void +gc_and_save(char *filename) +{ + FILE *file = open_core_for_saving(filename); + if (!file) { + perror(filename); + return; + } + conservative_stack = 0; + + /* The filename might come from Lisp, and be moved by the now + * non-conservative GC. */ + filename = strdup(filename); + + /* Collect twice: once into relatively high memory, and then back + * into low memory. This compacts the retained data into the lower + * pages, minimizing the size of the core file. + */ + prepare_for_final_gc(); + gencgc_alloc_start_page = last_free_page; + collect_garbage(HIGHEST_NORMAL_GENERATION+1); + + prepare_for_final_gc(); + gencgc_alloc_start_page = -1; + collect_garbage(HIGHEST_NORMAL_GENERATION+1); + + save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0)); + /* Oops. Save still managed to fail. Since we've mangled the stack + * beyond hope, there's not much we can do. + * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's + * going to be rather unsatisfactory too... */ + lose("Attempt to save core after non-conservative GC failed.\n"); +}