X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=fe602ffbd10c59916c809a70d9aaf93af3c68a23;hb=f0cb0cf9c0fe1b6fce5d10dbd34a0b7b249c4ae8;hp=ca7fa4713f1e150858df7ee11f0df8a35999d7ac;hpb=6e6670a5c26b3594a0eaa8da59db75b48e0db878;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index ca7fa47..5d218b1 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -1,5 +1,5 @@ /* - * GENerational Conservative Garbage Collector for SBCL x86 + * GENerational Conservative Garbage Collector for SBCL */ /* @@ -24,11 +24,16 @@ * . */ +#include #include -#include #include #include #include "sbcl.h" +#if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD) +#include "pthreads_win32.h" +#else +#include +#endif #include "runtime.h" #include "os.h" #include "interr.h" @@ -37,19 +42,27 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" -#include "fixnump.h" #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "pseudo-atomic.h" +#include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" +#include "genesis/fdefn.h" #include "genesis/simple-fun.h" #include "save.h" #include "genesis/hash-table.h" +#include "genesis/instance.h" +#include "genesis/layout.h" +#include "gencgc.h" +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) +#include "genesis/cons.h" +#endif /* forward declarations */ -page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, - int unboxed); +page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t nbytes, + int page_type_flag); /* @@ -60,9 +73,7 @@ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, * scratch space by the collector, and should never get collected. */ enum { - HIGHEST_NORMAL_GENERATION = 5, - PSEUDO_STATIC_GENERATION, - SCRATCH_GENERATION, + SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1, NUM_GENERATIONS }; @@ -71,7 +82,16 @@ enum { boolean enable_page_protection = 1; /* the minimum size (in bytes) for a large object*/ -unsigned long large_object_size = 4 * PAGE_BYTES; +#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES) +os_vm_size_t large_object_size = 4 * GENCGC_ALLOC_GRANULARITY; +#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY) +os_vm_size_t large_object_size = 4 * GENCGC_CARD_BYTES; +#else +os_vm_size_t large_object_size = 4 * PAGE_BYTES; +#endif + +/* Largest allocation seen since last GC. */ +os_vm_size_t large_allocation = 0; /* @@ -80,7 +100,7 @@ unsigned long large_object_size = 4 * PAGE_BYTES; /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -#ifdef QSHOW +#if QSHOW == 2 boolean gencgc_verbose = 1; #else boolean gencgc_verbose = 0; @@ -105,8 +125,10 @@ boolean verify_after_free_heap = 0; * during a heap verify? */ boolean verify_dynamic_code_check = 0; +#ifdef LISP_FEATURE_X86 /* Should we check code objects for fixup errors after they are transported? */ boolean check_code_fixups = 0; +#endif /* Should we check that newly allocated regions are zero filled? */ boolean gencgc_zero_check = 0; @@ -136,39 +158,111 @@ boolean gencgc_partial_pickup = 0; */ /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ -unsigned long bytes_allocated = 0; -extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ -unsigned long auto_gc_trigger = 0; +os_vm_size_t bytes_allocated = 0; +os_vm_size_t auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ generation_index_t from_space; generation_index_t new_space; +/* Set to 1 when in GC */ +boolean gc_active_p = 0; + /* should the GC be conservative on stack. If false (only right before * saving a core), don't scan the stack / mark pages dont_move. */ static boolean conservative_stack = 1; -/* An array of page structures is statically allocated. - * This helps quickly map between an address its page structure. - * NUM_PAGES is set from the size of the dynamic space. */ -struct page page_table[NUM_PAGES]; +/* An array of page structures is allocated on gc initialization. + * This helps to quickly map between an address and its page structure. + * page_table_pages is set from the size of the dynamic space. */ +page_index_t page_table_pages; +struct page *page_table; + +static inline boolean page_allocated_p(page_index_t page) { + return (page_table[page].allocated != FREE_PAGE_FLAG); +} + +static inline boolean page_no_region_p(page_index_t page) { + return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG); +} + +static inline boolean page_allocated_no_region_p(page_index_t page) { + return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG)) + && page_no_region_p(page)); +} + +static inline boolean page_free_p(page_index_t page) { + return (page_table[page].allocated == FREE_PAGE_FLAG); +} + +static inline boolean page_boxed_p(page_index_t page) { + return (page_table[page].allocated & BOXED_PAGE_FLAG); +} + +static inline boolean code_page_p(page_index_t page) { + return (page_table[page].allocated & CODE_PAGE_FLAG); +} + +static inline boolean page_boxed_no_region_p(page_index_t page) { + return page_boxed_p(page) && page_no_region_p(page); +} + +static inline boolean page_unboxed_p(page_index_t page) { + /* Both flags set == boxed code page */ + return ((page_table[page].allocated & UNBOXED_PAGE_FLAG) + && !page_boxed_p(page)); +} + +static inline boolean protect_page_p(page_index_t page, generation_index_t generation) { + return (page_boxed_no_region_p(page) + && (page_table[page].bytes_used != 0) + && !page_table[page].dont_move + && (page_table[page].gen == generation)); +} /* To map addresses to page structures the address of the first page * is needed. */ -static void *heap_base = NULL; - -#if N_WORD_BITS == 32 - #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG -#elif N_WORD_BITS == 64 - #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG -#endif +void *heap_base = NULL; /* Calculate the start address for the given page number. */ inline void * page_address(page_index_t page_num) { - return (heap_base + (page_num * PAGE_BYTES)); + return (heap_base + (page_num * GENCGC_CARD_BYTES)); +} + +/* Calculate the address where the allocation region associated with + * the page starts. */ +static inline void * +page_scan_start(page_index_t page_index) +{ + return page_address(page_index)-page_table[page_index].scan_start_offset; +} + +/* True if the page starts a contiguous block. */ +static inline boolean +page_starts_contiguous_block_p(page_index_t page_index) +{ + return page_table[page_index].scan_start_offset == 0; +} + +/* True if the page is the last page in a contiguous block. */ +static inline boolean +page_ends_contiguous_block_p(page_index_t page_index, generation_index_t gen) +{ + return (/* page doesn't fill block */ + (page_table[page_index].bytes_used < GENCGC_CARD_BYTES) + /* page is last allocated page */ + || ((page_index + 1) >= last_free_page) + /* next page free */ + || page_free_p(page_index + 1) + /* next page contains no data */ + || (page_table[page_index + 1].bytes_used == 0) + /* next page is in different generation */ + || (page_table[page_index + 1].gen != gen) + /* next page starts its own contiguous block */ + || (page_starts_contiguous_block_p(page_index + 1))); } /* Find the page index within the page_table for the given @@ -176,18 +270,37 @@ page_address(page_index_t page_num) inline page_index_t find_page_index(void *addr) { - page_index_t index = addr-heap_base; - - if (index >= 0) { - index = ((unsigned long)index)/PAGE_BYTES; - if (index < NUM_PAGES) + if (addr >= heap_base) { + page_index_t index = ((pointer_sized_uint_t)addr - + (pointer_sized_uint_t)heap_base) / GENCGC_CARD_BYTES; + if (index < page_table_pages) return (index); } - return (-1); } -/* a structure to hold the state of a generation */ +static os_vm_size_t +npage_bytes(page_index_t npages) +{ + gc_assert(npages>=0); + return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES; +} + +/* Check that X is a higher address than Y and return offset from Y to + * X in bytes. */ +static inline os_vm_size_t +void_diff(void *x, void *y) +{ + gc_assert(x >= y); + return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y; +} + +/* a structure to hold the state of a generation + * + * CAUTION: If you modify this, make sure to touch up the alien + * definition in src/code/gc.lisp accordingly. ...or better yes, + * deal with the FIXME there... + */ struct generation { /* the first page that gc_alloc() checks on its next call */ @@ -206,32 +319,32 @@ struct generation { page_index_t alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ - long bytes_allocated; + os_vm_size_t bytes_allocated; /* the number of bytes at which to trigger a GC */ - long gc_trigger; + os_vm_size_t gc_trigger; /* to calculate a new level for gc_trigger */ - long bytes_consed_between_gc; + os_vm_size_t bytes_consed_between_gc; /* the number of GCs since the last raise */ int num_gc; - /* the average age after which a GC will raise objects to the + /* the number of GCs to run on the generations before raising objects to the * next generation */ - int trigger_age; + int number_of_gcs_before_promotion; /* the cumulative sum of the bytes allocated to this generation. It is * cleared after a GC on this generations, and update before new * objects are added from a GC of a younger generation. Dividing by * the bytes_allocated will give the average age of the memory in * this generation since its last GC. */ - long cum_sum_bytes_allocated; + os_vm_size_t cum_sum_bytes_allocated; /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ - double min_av_mem_age; + double minimum_age_before_gc; }; /* an array of generation structures. There needs to be one more @@ -259,17 +372,24 @@ generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION; * integrated with the Lisp code. */ page_index_t last_free_page; +#ifdef LISP_FEATURE_SB_THREAD /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the * majority of GC is single-threaded, but alloc() may be called from * >1 thread at a time and must be thread-safe. This lock must be * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ - -#ifdef LISP_FEATURE_SB_THREAD static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +/* This lock is used to protect non-thread-local allocation. */ +static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER; #endif +extern os_vm_size_t gencgc_release_granularity; +os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY; + +extern os_vm_size_t gencgc_alloc_granularity; +os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY; + /* * miscellaneous heap functions @@ -277,14 +397,13 @@ static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; /* Count the number of pages which are write-protected within the * given generation. */ -static long +static page_index_t count_write_protect_generation_pages(generation_index_t generation) { - page_index_t i; - long count = 0; + page_index_t i, count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -292,27 +411,28 @@ count_write_protect_generation_pages(generation_index_t generation) } /* Count the number of pages within the given generation. */ -static long +static page_index_t count_generation_pages(generation_index_t generation) { page_index_t i; - long count = 0; + page_index_t count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != 0) + if (page_allocated_p(i) && (page_table[i].gen == generation)) count++; return count; } -#ifdef QSHOW -static long +#if QSHOW +static page_index_t count_dont_move_pages(void) { page_index_t i; - long count = 0; + page_index_t count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { + if (page_allocated_p(i) + && (page_table[i].dont_move != 0)) { ++count; } } @@ -322,21 +442,22 @@ count_dont_move_pages(void) /* Work through the pages and add up the number of bytes used for the * given generation. */ -static long +static os_vm_size_t count_generation_bytes_allocated (generation_index_t gen) { page_index_t i; - long result = 0; + os_vm_size_t result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) + if (page_allocated_p(i) + && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } return result; } /* Return the average age of the memory in a generation. */ -static double -gen_av_mem_age(generation_index_t gen) +extern double +generation_average_age(generation_index_t gen) { if (generations[gen].bytes_allocated == 0) return 0.0; @@ -346,44 +467,50 @@ gen_av_mem_age(generation_index_t gen) / ((double)generations[gen].bytes_allocated); } -void fpu_save(int *); /* defined in x86-assem.S */ -void fpu_restore(int *); /* defined in x86-assem.S */ -/* The verbose argument controls how much to print: 0 for normal - * level of detail; 1 for debugging. */ -static void -print_generation_stats(int verbose) /* FIXME: should take FILE argument */ +extern void +write_generation_stats(FILE *file) { - generation_index_t i, gens; - int fpu_state[27]; + generation_index_t i; + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) +#define FPU_STATE_SIZE 27 + int fpu_state[FPU_STATE_SIZE]; +#elif defined(LISP_FEATURE_PPC) +#define FPU_STATE_SIZE 32 + long long fpu_state[FPU_STATE_SIZE]; +#elif defined(LISP_FEATURE_SPARC) + /* + * 32 (single-precision) FP registers, and the FP state register. + * But Sparc V9 has 32 double-precision registers (equivalent to 64 + * single-precision, but can't be accessed), so we leave enough room + * for that. + */ +#define FPU_STATE_SIZE (((32 + 32 + 1) + 1)/2) + long long fpu_state[FPU_STATE_SIZE]; +#endif /* This code uses the FP instructions which may be set up for Lisp * so they need to be saved and reset for C. */ fpu_save(fpu_state); - /* highest generation to print */ - if (verbose) - gens = SCRATCH_GENERATION; - else - gens = PSEUDO_STATIC_GENERATION; - /* Print the heap stats. */ - fprintf(stderr, - " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); + fprintf(file, + " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); - for (i = 0; i < gens; i++) { + for (i = 0; i < SCRATCH_GENERATION; i++) { page_index_t j; - long boxed_cnt = 0; - long unboxed_cnt = 0; - long large_boxed_cnt = 0; - long large_unboxed_cnt = 0; - long pinned_cnt=0; + page_index_t boxed_cnt = 0; + page_index_t unboxed_cnt = 0; + page_index_t large_boxed_cnt = 0; + page_index_t large_unboxed_cnt = 0; + page_index_t pinned_cnt=0; for (j = 0; j < last_free_page; j++) if (page_table[j].gen == i) { /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated & BOXED_PAGE_FLAG) { + if (page_boxed_p(j)) { if (page_table[j].large_object) large_boxed_cnt++; else @@ -392,7 +519,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { + if (page_unboxed_p(j)) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -402,43 +529,125 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); - fprintf(stderr, - " %1d: %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n", + fprintf(file, + " %1d: %5ld %5ld %5ld %5ld", i, - boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, - pinned_cnt, + generations[i].alloc_start_page, + generations[i].alloc_unboxed_start_page, + generations[i].alloc_large_start_page, + generations[i].alloc_large_unboxed_start_page); + fprintf(file, + " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT + " %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT, + boxed_cnt, unboxed_cnt, large_boxed_cnt, + large_unboxed_cnt, pinned_cnt); + fprintf(file, + " %8"OS_VM_SIZE_FMT + " %5"OS_VM_SIZE_FMT + " %8"OS_VM_SIZE_FMT + " %4"PAGE_INDEX_FMT" %3d %7.4f\n", generations[i].bytes_allocated, - (count_generation_pages(i)*PAGE_BYTES - - generations[i].bytes_allocated), + (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, - gen_av_mem_age(i)); + generation_average_age(i)); } - fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated); + fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated); + fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size); fpu_restore(fpu_state); } + +extern void +write_heap_exhaustion_report(FILE *file, long available, long requested, + struct thread *thread) +{ + fprintf(file, + "Heap exhausted during %s: %ld bytes available, %ld requested.\n", + gc_active_p ? "garbage collection" : "allocation", + available, + requested); + write_generation_stats(file); + fprintf(file, "GC control variables:\n"); + fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n", + SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true", + (SymbolValue(GC_PENDING, thread) == T) ? + "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ? + "false" : "in progress")); +#ifdef LISP_FEATURE_SB_THREAD + fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n", + SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true"); +#endif +} + +extern void +print_generation_stats(void) +{ + write_generation_stats(stderr); +} + +extern char* gc_logfile; +char * gc_logfile = NULL; + +extern void +log_generation_stats(char *logfile, char *header) +{ + if (logfile) { + FILE * log = fopen(logfile, "a"); + if (log) { + fprintf(log, "%s\n", header); + write_generation_stats(log); + fclose(log); + } else { + fprintf(stderr, "Could not open gc logfile: %s\n", logfile); + fflush(stderr); + } + } +} + +extern void +report_heap_exhaustion(long available, long requested, struct thread *th) +{ + if (gc_logfile) { + FILE * log = fopen(gc_logfile, "a"); + if (log) { + write_heap_exhaustion_report(log, available, requested, th); + fclose(log); + } else { + fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile); + fflush(stderr); + } + } + /* Always to stderr as well. */ + write_heap_exhaustion_report(stderr, available, requested, th); +} +#if defined(LISP_FEATURE_X86) void fast_bzero(void*, size_t); /* in -assem.S */ +#endif /* Zero the pages from START to END (inclusive), but use mmap/munmap instead * if zeroing it ourselves, i.e. in practice give the memory back to the * OS. Generally done after a large GC. */ void zero_pages_with_mmap(page_index_t start, page_index_t end) { - int i; - void *addr = (void *) page_address(start), *new_addr; - size_t length = PAGE_BYTES*(1+end-start); + page_index_t i; + void *addr = page_address(start), *new_addr; + os_vm_size_t length = npage_bytes(1+end-start); if (start > end) return; + gc_assert(length >= gencgc_release_granularity); + gc_assert((length % gencgc_release_granularity) == 0); + os_invalidate(addr, length); new_addr = os_validate(addr, length); if (new_addr == NULL || new_addr != addr) { - lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr); + lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", + start, new_addr); } for (i = start; i <= end; i++) { @@ -454,7 +663,21 @@ zero_pages(page_index_t start, page_index_t end) { if (start > end) return; - fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + fast_bzero(page_address(start), npage_bytes(1+end-start)); +#else + bzero(page_address(start), npage_bytes(1+end-start)); +#endif + +} + +static void +zero_and_mark_pages(page_index_t start, page_index_t end) { + page_index_t i; + + zero_pages(start, end); + for (i = start; i <= end; i++) + page_table[i].need_to_zero = 0; } /* Zero the pages from START to END (inclusive), except for those @@ -463,20 +686,19 @@ zero_pages(page_index_t start, page_index_t end) { */ static void zero_dirty_pages(page_index_t start, page_index_t end) { - page_index_t i; + page_index_t i, j; for (i = start; i <= end; i++) { - if (page_table[i].need_to_zero == 1) { - zero_pages(start, end); - break; - } + if (!page_table[i].need_to_zero) continue; + for (j = i+1; (j <= end) && (page_table[j].need_to_zero); j++); + zero_pages(i, j-1); + i = j; } for (i = start; i <= end; i++) { page_table[i].need_to_zero = 1; } } -' /* @@ -533,6 +755,55 @@ struct alloc_region unboxed_region; /* The generation currently being allocated to. */ static generation_index_t gc_alloc_generation; +static inline page_index_t +generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_large_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_large_start_page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_start_page; + } else { + lose("bad page_type_flag: %d", page_type_flag); + } + } +} + +static inline void +set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large, + page_index_t page) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_large_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_large_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } +} + /* Find a new region with room for at least the given number of bytes. * * It starts looking at the current generation's alloc_start_page. So @@ -557,12 +828,13 @@ static generation_index_t gc_alloc_generation; * are allocated, although they will initially be empty. */ static void -gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_new_region(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; - long bytes_found; + os_vm_size_t bytes_found; page_index_t i; + int ret; /* FSHOW((stderr, @@ -574,17 +846,12 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - thread_mutex_lock(&free_pages_lock); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_unboxed_start_page; - } else { - first_page = - generations[gc_alloc_generation].alloc_start_page; - } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); - bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0); + last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag); + bytes_found=(GENCGC_CARD_BYTES - page_table[first_page].bytes_used) + + npage_bytes(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -598,64 +865,41 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; - page_table[first_page].first_object_offset = 0; + page_table[first_page].scan_start_offset = 0; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { - if (unboxed) - page_table[i].allocated = UNBOXED_PAGE_FLAG; - else - page_table[i].allocated = BOXED_PAGE_FLAG; + page_table[i].allocated = page_type_flag; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ - page_table[i].first_object_offset = - alloc_region->start_addr - page_address(i); + page_table[i].scan_start_offset = + void_diff(page_address(i),alloc_region->start_addr); page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), - 0); - } - thread_mutex_unlock(&free_pages_lock); - - /* we can do this after releasing free_pages_lock */ - if (gencgc_zero_check) { - long *p; - for (p = (long *)alloc_region->start_addr; - p < (long *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.\n", p); - } - } + /* do we only want to call this on special occasions? like for + * boxed_region? */ + set_alloc_pointer((lispobj)page_address(last_free_page)); } + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -668,6 +912,18 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) } zero_dirty_pages(first_page, last_page); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + word_t *p; + for (p = (word_t *)alloc_region->start_addr; + p < (word_t *)alloc_region->end_addr; p++) { + if (*p != 0) { + lose("The new region is not zero at %p (start=%p, end=%p).\n", + p, alloc_region->start_addr, alloc_region->end_addr); + } + } + } } /* If the record_new_objects flag is 2 then all new regions created @@ -690,19 +946,19 @@ static int record_new_objects = 0; static page_index_t new_areas_ignore_page; struct new_area { page_index_t page; - long offset; - long size; + size_t offset; + size_t size; }; static struct new_area (*new_areas)[]; -static long new_areas_index; -long max_new_areas; +static size_t new_areas_index; +size_t max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(page_index_t first_page, long offset, long size) +add_new_area(page_index_t first_page, size_t offset, size_t size) { - unsigned long new_area_start,c; - long i; + size_t new_area_start, c; + ssize_t i; /* Ignore if full. */ if (new_areas_index >= NUM_NEW_AREAS) @@ -721,13 +977,13 @@ add_new_area(page_index_t first_page, long offset, long size) gc_abort(); } - new_area_start = PAGE_BYTES*first_page + offset; + new_area_start = npage_bytes(first_page) + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { - unsigned long area_end = - PAGE_BYTES*((*new_areas)[i].page) + size_t area_end = + npage_bytes((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -769,15 +1025,16 @@ add_new_area(page_index_t first_page, long offset, long size) * it is safe to try to re-update the page table of this reset * alloc_region. */ void -gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) +gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region) { - int more; + boolean more; page_index_t first_page; page_index_t next_page; - int bytes_used; - long orig_first_page_bytes_used; - long region_size; - long byte_cnt; + os_vm_size_t bytes_used; + os_vm_size_t region_size; + os_vm_size_t byte_cnt; + page_bytes_t orig_first_page_bytes_used; + int ret; first_page = alloc_region->first_page; @@ -788,27 +1045,27 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; - gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); + gc_assert(alloc_region->start_addr == + (page_address(first_page) + + page_table[first_page].bytes_used)); /* All the pages used need to be updated */ /* Update the first page. */ /* If the page was free then set up the gen, and - * first_object_offset. */ + * scan_start_offset. */ if (page_table[first_page].bytes_used == 0) - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_starts_contiguous_block_p(first_page)); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated & page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -817,35 +1074,35 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { - bytes_used = PAGE_BYTES; + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(first_page))) + >GENCGC_CARD_BYTES) { + bytes_used = GENCGC_CARD_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; byte_cnt += bytes_used; - /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and set - * the bytes_used. */ + /* All the rest of the pages should be free. We need to set + * their scan_start_offset pointer to the start of the + * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[next_page].allocated & page_type_flag); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); - gc_assert(page_table[next_page].first_object_offset == - alloc_region->start_addr - page_address(next_page)); + gc_assert(page_table[next_page].scan_start_offset == + void_diff(page_address(next_page), + alloc_region->start_addr)); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>PAGE_BYTES) { - bytes_used = PAGE_BYTES; + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(next_page)))>GENCGC_CARD_BYTES) { + bytes_used = GENCGC_CARD_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -854,7 +1111,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page++; } - region_size = alloc_region->free_pointer - alloc_region->start_addr; + region_size = void_diff(alloc_region->free_pointer, + alloc_region->start_addr); bytes_allocated += region_size; generations[gc_alloc_generation].bytes_allocated += region_size; @@ -862,14 +1120,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Set the generations alloc restart page to the last page of * the region. */ - if (unboxed) - generations[gc_alloc_generation].alloc_unboxed_start_page = - next_page-1; - else - generations[gc_alloc_generation].alloc_start_page = next_page-1; + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1); /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used, region_size); /* @@ -892,65 +1146,53 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); + /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } -static inline void *gc_quick_alloc(long nbytes); +static inline void *gc_quick_alloc(word_t nbytes); /* Allocate a possibly large object. */ void * -gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_large(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region) { - page_index_t first_page; - page_index_t last_page; - int orig_first_page_bytes_used; - long byte_cnt; - int more; - long bytes_used; - page_index_t next_page; + boolean more; + page_index_t first_page, next_page, last_page; + page_bytes_t orig_first_page_bytes_used; + os_vm_size_t byte_cnt; + os_vm_size_t bytes_used; + int ret; - thread_mutex_lock(&free_pages_lock); + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_large_unboxed_start_page; - } else { - first_page = generations[gc_alloc_generation].alloc_large_start_page; - } + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1); if (first_page <= alloc_region->last_page) { first_page = alloc_region->last_page+1; } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag); gc_assert(first_page > alloc_region->last_page); - if (unboxed) - generations[gc_alloc_generation].alloc_large_unboxed_start_page = - last_page; - else - generations[gc_alloc_generation].alloc_large_start_page = last_page; + + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page); /* Set up the pages. */ orig_first_page_bytes_used = page_table[first_page].bytes_used; /* If the first page was free then set up the gen, and - * first_object_offset. */ + * scan_start_offset. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; - page_table[first_page].first_object_offset = 0; + page_table[first_page].scan_start_offset = 0; page_table[first_page].large_object = 1; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 1); @@ -959,8 +1201,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Calc. the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) { - bytes_used = PAGE_BYTES; + if ((bytes_used = nbytes+orig_first_page_bytes_used) > GENCGC_CARD_BYTES) { + bytes_used = GENCGC_CARD_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -969,25 +1211,23 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and - * set the bytes_used. */ + * scan_start_offset pointer to the start of the region, and set + * the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(next_page)); gc_assert(page_table[next_page].bytes_used == 0); - if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[next_page].allocated = BOXED_PAGE_FLAG; + page_table[next_page].allocated = page_type_flag; page_table[next_page].gen = gc_alloc_generation; page_table[next_page].large_object = 1; - page_table[next_page].first_object_offset = - orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); + page_table[next_page].scan_start_offset = + npage_bytes(next_page-first_page) - orig_first_page_bytes_used; /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { - bytes_used = PAGE_BYTES; + bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt; + if (bytes_used > GENCGC_CARD_BYTES) { + bytes_used = GENCGC_CARD_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -1003,20 +1243,20 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) generations[gc_alloc_generation].bytes_allocated += nbytes; /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used,nbytes); /* Bump up last_free_page */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(page_address(last_free_page))); } - thread_mutex_unlock(&free_pages_lock); + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -1027,102 +1267,148 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) static page_index_t gencgc_alloc_start_page = -1; +void +gc_heap_exhausted_error_or_lose (sword_t available, sword_t requested) +{ + struct thread *thread = arch_os_get_current_thread(); + /* Write basic information before doing anything else: if we don't + * call to lisp this is a must, and even if we do there is always + * the danger that we bounce back here before the error has been + * handled, or indeed even printed. + */ + report_heap_exhaustion(available, requested, thread); + if (gc_active_p || (available == 0)) { + /* If we are in GC, or totally out of memory there is no way + * to sanely transfer control to the lisp-side of things. + */ + lose("Heap exhausted, game over."); + } + else { + /* FIXME: assert free_pages_lock held */ + (void)thread_mutex_unlock(&free_pages_lock); +#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)) + gc_assert(get_pseudo_atomic_atomic(thread)); + clear_pseudo_atomic_atomic(thread); + if (get_pseudo_atomic_interrupted(thread)) + do_pending_interrupt(); +#endif + /* Another issue is that signalling HEAP-EXHAUSTED error leads + * to running user code at arbitrary places, even in a + * WITHOUT-INTERRUPTS which may lead to a deadlock without + * running out of the heap. So at this point all bets are + * off. */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) + corruption_warning_and_maybe_lose + ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS."); + funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), + alloc_number(available), alloc_number(requested)); + lose("HEAP-EXHAUSTED-ERROR fell through"); + } +} + page_index_t -gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) +gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t bytes, + int page_type_flag) { - page_index_t first_page; - page_index_t last_page; - long region_size; - page_index_t restart_page=*restart_page_ptr; - long bytes_found; - long num_pages; - int large_p=(nbytes>=large_object_size); + page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0; + page_index_t first_page, last_page, restart_page = *restart_page_ptr; + os_vm_size_t nbytes = bytes; + os_vm_size_t nbytes_goal = nbytes; + os_vm_size_t bytes_found = 0; + os_vm_size_t most_bytes_found = 0; + boolean small_object = nbytes < GENCGC_CARD_BYTES; /* FIXME: assert(free_pages_lock is held); */ - /* Search for a contiguous free space of at least nbytes. If it's - * a large object then align it on a page boundary by searching - * for a free page. */ + if (nbytes_goal < gencgc_alloc_granularity) + nbytes_goal = gencgc_alloc_granularity; + /* Toggled by gc_and_save for heap compaction, normally -1. */ if (gencgc_alloc_start_page != -1) { restart_page = gencgc_alloc_start_page; } - do { - first_page = restart_page; - if (large_p) - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE_FLAG)) - first_page++; - else - while (first_page < NUM_PAGES) { - if(page_table[first_page].allocated == FREE_PAGE_FLAG) - break; - if((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + /* FIXME: This is on bytes instead of nbytes pending cleanup of + * long from the interface. */ + gc_assert(bytes>=0); + /* Search for a page with at least nbytes of space. We prefer + * not to split small objects on multiple pages, to reduce the + * number of contiguous allocation regions spaning multiple + * pages: this helps avoid excessive conservativism. + * + * For other objects, we guarantee that they start on their own + * page boundary. + */ + first_page = restart_page; + while (first_page < page_table_pages) { + bytes_found = 0; + if (page_free_p(first_page)) { + gc_assert(0 == page_table[first_page].bytes_used); + bytes_found = GENCGC_CARD_BYTES; + } else if (small_object && + (page_table[first_page].allocated == page_type_flag) && (page_table[first_page].large_object == 0) && (page_table[first_page].gen == gc_alloc_generation) && - (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && (page_table[first_page].write_protected == 0) && (page_table[first_page].dont_move == 0)) { - break; - } + bytes_found = GENCGC_CARD_BYTES - page_table[first_page].bytes_used; + if (bytes_found < nbytes) { + if (bytes_found > most_bytes_found) + most_bytes_found = bytes_found; first_page++; + continue; } - - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); + } else { + first_page++; + continue; } gc_assert(page_table[first_page].write_protected == 0); + for (last_page = first_page+1; + ((last_page < page_table_pages) && + page_free_p(last_page) && + (bytes_found < nbytes_goal)); + last_page++) { + bytes_found += GENCGC_CARD_BYTES; + gc_assert(0 == page_table[last_page].bytes_used); + gc_assert(0 == page_table[last_page].write_protected); + } - last_page = first_page; - bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; - num_pages = 1; - while (((bytes_found < nbytes) - || (!large_p && (num_pages < 2))) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { - last_page++; - num_pages++; - bytes_found += PAGE_BYTES; - gc_assert(page_table[last_page].write_protected == 0); + if (bytes_found > most_bytes_found) { + most_bytes_found = bytes_found; + most_bytes_found_from = first_page; + most_bytes_found_to = last_page; } + if (bytes_found >= nbytes_goal) + break; - region_size = (PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); + first_page = last_page; + } - gc_assert(bytes_found == region_size); - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); + bytes_found = most_bytes_found; + restart_page = first_page + 1; /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", - nbytes); - print_generation_stats(1); - lose("\n"); + if (bytes_found < nbytes) { + gc_assert(restart_page >= page_table_pages); + gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes); } - *restart_page_ptr=first_page; - return last_page; + gc_assert(most_bytes_found_to); + *restart_page_ptr = most_bytes_found_from; + return most_bytes_found_to-1; } /* Allocate bytes. All the rest of the special-purpose allocation * functions will eventually call this */ void * -gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, +gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region, int quick_p) { void *new_free_pointer; - if(nbytes>=large_object_size) - return gc_alloc_large(nbytes,unboxed_p,my_region); + if (nbytes>=large_object_size) + return gc_alloc_large(nbytes, page_type_flag, my_region); /* Check whether there is room in the current alloc region. */ new_free_pointer = my_region->free_pointer + nbytes; @@ -1138,11 +1424,11 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Unless a `quick' alloc was requested, check whether the alloc region is almost empty. */ if (!quick_p && - (my_region->end_addr - my_region->free_pointer) <= 32) { + void_diff(my_region->end_addr,my_region->free_pointer) <= 32) { /* If so, finished with the current region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); + gc_alloc_update_page_tables(page_type_flag, my_region); /* Set up a new region. */ - gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region); + gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region); } return((void *)new_obj); @@ -1151,69 +1437,41 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Else not enough free space in the current region: retry with a * new region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); - gc_alloc_new_region(nbytes, unboxed_p, my_region); - return gc_alloc_with_region(nbytes,unboxed_p,my_region,0); + gc_alloc_update_page_tables(page_type_flag, my_region); + gc_alloc_new_region(nbytes, page_type_flag, my_region); + return gc_alloc_with_region(nbytes, page_type_flag, my_region,0); } /* these are only used during GC: all allocation from the mutator calls * alloc() -> gc_alloc_with_region() with the appropriate per-thread * region */ -void * -gc_general_alloc(long nbytes,int unboxed_p,int quick_p) -{ - struct alloc_region *my_region = - unboxed_p ? &unboxed_region : &boxed_region; - return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p); -} - -static inline void * -gc_quick_alloc(long nbytes) -{ - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); -} - -static inline void * -gc_quick_alloc_large(long nbytes) -{ - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); -} - static inline void * -gc_alloc_unboxed(long nbytes) +gc_quick_alloc(word_t nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,0); + return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK); } static inline void * -gc_quick_alloc_unboxed(long nbytes) +gc_alloc_unboxed(word_t nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0); } static inline void * -gc_quick_alloc_large_unboxed(long nbytes) +gc_quick_alloc_unboxed(word_t nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK); } -/* - * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b - */ - -extern long (*scavtab[256])(lispobj *where, lispobj object); -extern lispobj (*transother[256])(lispobj object); -extern long (*sizetab[256])(lispobj *where); - -/* Copy a large boxed object. If the object is in a large object - * region then it is simply promoted, else it is copied. If it's large - * enough then it's copied to a large object region. +/* Copy a large object. If the object is in a large object region then + * it is simply promoted, else it is copied. If it's large enough then + * it's copied to a large object region. * - * Vectors may have shrunk. If the object is not copied the space - * needs to be reclaimed, and the page_tables corrected. */ -lispobj -copy_large_object(lispobj object, long nwords) + * Bignums and vectors may have shrunk. If the object is not copied + * the space needs to be reclaimed, and the page_tables corrected. */ +static lispobj +general_copy_large_object(lispobj object, word_t nwords, boolean boxedp) { int tag; lispobj *new; @@ -1223,58 +1481,71 @@ copy_large_object(lispobj object, long nwords) gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); + if ((nwords > 1024*1024) && gencgc_verbose) { + FSHOW((stderr, "/general_copy_large_object: %d bytes\n", + nwords*N_WORD_BYTES)); + } - /* Check whether it's in a large object region. */ + /* Check whether it's a large object. */ first_page = find_page_index((void *)object); gc_assert(first_page >= 0); if (page_table[first_page].large_object) { - - /* Promote the object. */ - - long remaining_bytes; + /* Promote the object. Note: Unboxed objects may have been + * allocated to a BOXED region so it may be necessary to + * change the region to UNBOXED. */ + os_vm_size_t remaining_bytes; + os_vm_size_t bytes_freed; page_index_t next_page; - long bytes_freed; - long old_bytes_used; + page_bytes_t old_bytes_used; - /* Note: Any page write-protection must be removed, else a + /* FIXME: This comment is somewhat stale. + * + * Note: Any page write-protection must be removed, else a * later scavenge_newspace may incorrectly not scavenge these * pages. This would not be necessary if they are added to the * new areas, but let's do it for them all (they'll probably * be written anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); - + gc_assert(page_starts_contiguous_block_p(first_page)); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; - while (remaining_bytes > PAGE_BYTES) { + + while (remaining_bytes > GENCGC_CARD_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); - + gc_assert(page_table[next_page].scan_start_offset == + npage_bytes(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES); + /* Should have been unprotected by unprotect_oldspace() + * for boxed objects, and after promotion unboxed ones + * should not be on protected pages at all. */ + gc_assert(!page_table[next_page].write_protected); + + if (boxedp) + gc_assert(page_boxed_p(next_page)); + else { + gc_assert(page_allocated_no_region_p(next_page)); + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; + } page_table[next_page].gen = new_space; - /* Remove any write-protection. We should be able to rely - * on the write-protect flag to avoid redundant calls. */ - if (page_table[next_page].write_protected) { - os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL); - page_table[next_page].write_protected = 0; - } - remaining_bytes -= PAGE_BYTES; + remaining_bytes -= GENCGC_CARD_BYTES; next_page++; } - /* Now only one page remains, but the object may have shrunk - * so there may be more unused pages which will be freed. */ + /* Now only one page remains, but the object may have shrunk so + * there may be more unused pages which will be freed. */ - /* The object may have shrunk but shouldn't have grown. */ + /* Object may have shrunk but shouldn't have grown - check. */ gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + + if (boxedp) + gc_assert(page_boxed_p(next_page)); + else + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1284,16 +1555,23 @@ copy_large_object(lispobj object, long nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == PAGE_BYTES) && + while ((old_bytes_used == GENCGC_CARD_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE_FLAG) && + /* FIXME: It is not obvious to me why this is necessary + * as a loop condition: it seems to me that the + * scan_start_offset test should be sufficient, but + * experimentally that is not the case. --NS + * 2011-11-28 */ + (boxedp ? + page_boxed_p(next_page) : + page_allocated_no_region_p(next_page)) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { - /* Checks out OK, free the page. Don't need to bother zeroing + (page_table[next_page].scan_start_offset == + npage_bytes(next_page - first_page))) { + /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the - * object. These pages shouldn't be write-protected as they - * should be zero filled. */ + * object. These pages shouldn't be write-protected, even if + * boxed they should be zero filled. */ gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; @@ -1303,22 +1581,33 @@ copy_large_object(lispobj object, long nwords) next_page++; } - generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + - bytes_freed; - generations[new_space].bytes_allocated += N_WORD_BYTES*nwords; + if ((bytes_freed > 0) && gencgc_verbose) { + FSHOW((stderr, + "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n", + bytes_freed)); + } + + generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + + bytes_freed; + generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; bytes_allocated -= bytes_freed; /* Add the region to the new_areas if requested. */ - add_new_area(first_page,0,nwords*N_WORD_BYTES); + if (boxedp) + add_new_area(first_page,0,nwords*N_WORD_BYTES); return(object); + } else { /* Get tag of object. */ tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_large(nwords*N_WORD_BYTES); + new = gc_general_alloc(nwords*N_WORD_BYTES, + (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG), + ALLOC_QUICK); + /* Copy the object. */ memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ @@ -1326,152 +1615,25 @@ copy_large_object(lispobj object, long nwords) } } -/* to copy unboxed objects */ lispobj -copy_unboxed_object(lispobj object, long nwords) +copy_large_object(lispobj object, sword_t nwords) { - long tag; - lispobj *new; - - gc_assert(is_lisp_pointer(object)); - gc_assert(from_space_p(object)); - gc_assert((nwords & 0x01) == 0); - - /* Get tag of object. */ - tag = lowtag_of(object); - - /* Allocate space. */ - new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES); - - memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); + return general_copy_large_object(object, nwords, 1); +} - /* Return Lisp pointer of new object. */ - return ((lispobj) new) | tag; +lispobj +copy_large_unboxed_object(lispobj object, sword_t nwords) +{ + return general_copy_large_object(object, nwords, 0); } -/* to copy large unboxed objects - * - * If the object is in a large object region then it is simply - * promoted, else it is copied. If it's large enough then it's copied - * to a large object region. - * - * Bignums and vectors may have shrunk. If the object is not copied - * the space needs to be reclaimed, and the page_tables corrected. - * - * KLUDGE: There's a lot of cut-and-paste duplication between this - * function and copy_large_object(..). -- WHN 20000619 */ +/* to copy unboxed objects */ lispobj -copy_large_unboxed_object(lispobj object, long nwords) +copy_unboxed_object(lispobj object, sword_t nwords) { - int tag; - lispobj *new; - page_index_t first_page; - - gc_assert(is_lisp_pointer(object)); - gc_assert(from_space_p(object)); - gc_assert((nwords & 0x01) == 0); - - if ((nwords > 1024*1024) && gencgc_verbose) - FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); - - /* Check whether it's a large object. */ - first_page = find_page_index((void *)object); - gc_assert(first_page >= 0); - - if (page_table[first_page].large_object) { - /* Promote the object. Note: Unboxed objects may have been - * allocated to a BOXED region so it may be necessary to - * change the region to UNBOXED. */ - long remaining_bytes; - page_index_t next_page; - long bytes_freed; - long old_bytes_used; - - gc_assert(page_table[first_page].first_object_offset == 0); - - next_page = first_page; - remaining_bytes = nwords*N_WORD_BYTES; - while (remaining_bytes > PAGE_BYTES) { - gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); - gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); - - page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE_FLAG; - remaining_bytes -= PAGE_BYTES; - next_page++; - } - - /* Now only one page remains, but the object may have shrunk so - * there may be more unused pages which will be freed. */ - - /* Object may have shrunk but shouldn't have grown - check. */ - gc_assert(page_table[next_page].bytes_used >= remaining_bytes); - - page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE_FLAG; - - /* Adjust the bytes_used. */ - old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].bytes_used = remaining_bytes; - - bytes_freed = old_bytes_used - remaining_bytes; - - /* Free any remaining pages; needs care. */ - next_page++; - while ((old_bytes_used == PAGE_BYTES) && - (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && - page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { - /* Checks out OK, free the page. Don't need to both zeroing - * pages as this should have been done before shrinking the - * object. These pages shouldn't be write-protected, even if - * boxed they should be zero filled. */ - gc_assert(page_table[next_page].write_protected == 0); - - old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE_FLAG; - page_table[next_page].bytes_used = 0; - bytes_freed += old_bytes_used; - next_page++; - } - - if ((bytes_freed > 0) && gencgc_verbose) - FSHOW((stderr, - "/copy_large_unboxed bytes_freed=%d\n", - bytes_freed)); - - generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; - generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; - bytes_allocated -= bytes_freed; - - return(object); - } - else { - /* Get tag of object. */ - tag = lowtag_of(object); - - /* Allocate space. */ - new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES); - - /* Copy the object. */ - memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); - - /* Return Lisp pointer of new object. */ - return ((lispobj) new) | tag; - } -} - - - - + return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG); +} + /* * code and code-related objects @@ -1490,27 +1652,29 @@ static lispobj trans_boxed(lispobj object); * * Currently only absolute fixups to the constant vector, or to the * code area are checked. */ +#ifdef LISP_FEATURE_X86 void -sniff_code_object(struct code *code, unsigned long displacement) +sniff_code_object(struct code *code, os_vm_size_t displacement) { -#ifdef LISP_FEATURE_X86 - long nheader_words, ncode_words, nwords; - void *p; - void *constants_start_addr = NULL, *constants_end_addr; - void *code_start_addr, *code_end_addr; + sword_t nheader_words, ncode_words, nwords; + os_vm_address_t constants_start_addr = NULL, constants_end_addr, p; + os_vm_address_t code_start_addr, code_end_addr; + os_vm_address_t code_addr = (os_vm_address_t)code; int fixup_found = 0; if (!check_code_fixups) return; + FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement)); + ncode_words = fixnum_value(code->code_size); nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; - constants_start_addr = (void *)code + 5*N_WORD_BYTES; - constants_end_addr = (void *)code + nheader_words*N_WORD_BYTES; - code_start_addr = (void *)code + nheader_words*N_WORD_BYTES; - code_end_addr = (void *)code + nwords*N_WORD_BYTES; + constants_start_addr = code_addr + 5*N_WORD_BYTES; + constants_end_addr = code_addr + nheader_words*N_WORD_BYTES; + code_start_addr = code_addr + nheader_words*N_WORD_BYTES; + code_end_addr = code_addr + nwords*N_WORD_BYTES; /* Work through the unboxed code. */ for (p = code_start_addr; p < code_end_addr; p++) { @@ -1519,7 +1683,7 @@ sniff_code_object(struct code *code, unsigned long displacement) unsigned d2 = *((unsigned char *)p - 2); unsigned d3 = *((unsigned char *)p - 3); unsigned d4 = *((unsigned char *)p - 4); -#ifdef QSHOW +#if QSHOW unsigned d5 = *((unsigned char *)p - 5); unsigned d6 = *((unsigned char *)p - 6); #endif @@ -1527,11 +1691,12 @@ sniff_code_object(struct code *code, unsigned long displacement) /* Check for code references. */ /* Check for a 32 bit word that looks like an absolute reference to within the code adea of the code object. */ - if ((data >= (code_start_addr-displacement)) - && (data < (code_end_addr-displacement))) { + if ((data >= (void*)(code_start_addr-displacement)) + && (data < (void*)(code_end_addr-displacement))) { /* function header */ if ((d4 == 0x5e) - && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) { + && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == + (unsigned)code)) { /* Skip the function header */ p += 6*4 - 4 - 1; continue; @@ -1569,8 +1734,8 @@ sniff_code_object(struct code *code, unsigned long displacement) /* Check for a 32 bit word that looks like an absolute reference to within the constant vector. Constant references will be aligned. */ - if ((data >= (constants_start_addr-displacement)) - && (data < (constants_end_addr-displacement)) + if ((data >= (void*)(constants_start_addr-displacement)) + && (data < (void*)(constants_end_addr-displacement)) && (((unsigned)data & 0x3) == 0)) { /* Mov eax,m32 */ if (d1 == 0xa1) { @@ -1659,19 +1824,20 @@ sniff_code_object(struct code *code, unsigned long displacement) "/code start = %x, end = %x\n", code_start_addr, code_end_addr)); } -#endif } +#endif +#ifdef LISP_FEATURE_X86 void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) { -/* x86-64 uses pc-relative addressing instead of this kludge */ -#ifndef LISP_FEATURE_X86_64 - long nheader_words, ncode_words, nwords; - void *constants_start_addr, *constants_end_addr; - void *code_start_addr, *code_end_addr; + sword_t nheader_words, ncode_words, nwords; + os_vm_address_t constants_start_addr, constants_end_addr; + os_vm_address_t code_start_addr, code_end_addr; + os_vm_address_t code_addr = (os_vm_address_t)new_code; + os_vm_address_t old_addr = (os_vm_address_t)old_code; + os_vm_size_t displacement = code_addr - old_addr; lispobj fixups = NIL; - unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code; struct vector *fixups_vector; ncode_words = fixnum_value(new_code->code_size); @@ -1680,10 +1846,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* FSHOW((stderr, "/compiled code object at %x: header words = %d, code words = %d\n", new_code, nheader_words, ncode_words)); */ - constants_start_addr = (void *)new_code + 5*N_WORD_BYTES; - constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES; - code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES; - code_end_addr = (void *)new_code + nwords*N_WORD_BYTES; + constants_start_addr = code_addr + 5*N_WORD_BYTES; + constants_end_addr = code_addr + nheader_words*N_WORD_BYTES; + code_start_addr = code_addr + nheader_words*N_WORD_BYTES; + code_end_addr = code_addr + nwords*N_WORD_BYTES; /* FSHOW((stderr, "/const start = %x, end = %x\n", @@ -1719,7 +1885,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) (fixups_vector->header == 0x01)) { /* If so, then follow it. */ /*SHOW("following pointer to a forwarding pointer");*/ - fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length); + fixups_vector = + (struct vector *)native_pointer((lispobj)fixups_vector->length); } /*SHOW("got fixups");*/ @@ -1727,45 +1894,46 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) { /* Got the fixups for the code block. Now work through the vector, and apply a fixup at each address. */ - long length = fixnum_value(fixups_vector->length); - long i; + sword_t length = fixnum_value(fixups_vector->length); + sword_t i; for (i = 0; i < length; i++) { - unsigned long offset = fixups_vector->data[i]; + long offset = fixups_vector->data[i]; /* Now check the current value of offset. */ - unsigned long old_value = - *(unsigned long *)((unsigned long)code_start_addr + offset); + os_vm_address_t old_value = *(os_vm_address_t *)(code_start_addr + offset); /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ - if ((old_value >= (unsigned long)old_code) - && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES))) + if ((old_value >= old_addr) + && (old_value < (old_addr + nwords*N_WORD_BYTES))) /* So add the dispacement. */ - *(unsigned long *)((unsigned long)code_start_addr + offset) = + *(os_vm_address_t *)(code_start_addr + offset) = old_value + displacement; else /* It is outside the old code object so it must be a * relative fixup (absolute fixups are not saved). So * subtract the displacement. */ - *(unsigned long *)((unsigned long)code_start_addr + offset) = + *(os_vm_address_t *)(code_start_addr + offset) = old_value - displacement; } } else { - fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header)); + /* This used to just print a note to stderr, but a bogus fixup seems to + * indicate real heap corruption, so a hard hailure is in order. */ + lose("fixup vector %p has a bad widetag: %d\n", + fixups_vector, widetag_of(fixups_vector->header)); } /* Check for possible errors. */ if (check_code_fixups) { sniff_code_object(new_code,displacement); } -#endif } - +#endif static lispobj trans_boxed_large(lispobj object) { lispobj header; - unsigned long length; + uword_t length; gc_assert(is_lisp_pointer(object)); @@ -1782,7 +1950,7 @@ static lispobj trans_unboxed_large(lispobj object) { lispobj header; - unsigned long length; + uword_t length; gc_assert(is_lisp_pointer(object)); @@ -1793,223 +1961,6 @@ trans_unboxed_large(lispobj object) return copy_large_unboxed_object(object, length); } #endif - - -/* - * vector-like objects - */ - - -/* FIXME: What does this mean? */ -int gencgc_hash = 1; - -static long -scav_vector(lispobj *where, lispobj object) -{ - unsigned long kv_length; - lispobj *kv_vector; - unsigned long length = 0; /* (0 = dummy to stop GCC warning) */ - struct hash_table *hash_table; - lispobj empty_symbol; - unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - lispobj weak_p_obj; - unsigned long next_vector_length = 0; - - /* FIXME: A comment explaining this would be nice. It looks as - * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based - * hash tables in the Lisp HASH-TABLE code, and nowhere else. */ - if (HeaderValue(object) != subtype_VectorValidHashing) - return 1; - - if (!gencgc_hash) { - /* This is set for backward compatibility. FIXME: Do we need - * this any more? */ - *where = - (subtype_VectorMustRehash<header) != INSTANCE_HEADER_WIDETAG) { - lose("hash table not instance (%x at %x)\n", - hash_table->header, - hash_table); - } - - /* Scavenge element 1, which should be some internal symbol that - * the hash table code reserves for marking empty slots. */ - scavenge(where+3, 1); - if (!is_lisp_pointer(where[3])) { - lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]); - } - empty_symbol = where[3]; - /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ - if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != - SYMBOL_HEADER_WIDETAG) { - lose("not a symbol where empty-hash-table-slot symbol expected: %x\n", - *(lispobj *)native_pointer(empty_symbol)); - } - - /* Scavenge hash table, which will fix the positions of the other - * needed objects. */ - scavenge((lispobj *)hash_table, - sizeof(struct hash_table) / sizeof(lispobj)); - - /* Cross-check the kv_vector. */ - if (where != (lispobj *)native_pointer(hash_table->table)) { - lose("hash_table table!=this table %x\n", hash_table->table); - } - - /* WEAK-P */ - weak_p_obj = hash_table->weak_p; - - /* index vector */ - { - lispobj index_vector_obj = hash_table->index_vector; - - if (is_lisp_pointer(index_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)) { - index_vector = - ((unsigned long *)native_pointer(index_vector_obj)) + 2; - /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ - length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); - /*FSHOW((stderr, "/length = %d\n", length));*/ - } else { - lose("invalid index_vector %x\n", index_vector_obj); - } - } - - /* next vector */ - { - lispobj next_vector_obj = hash_table->next_vector; - - if (is_lisp_pointer(next_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)) { - next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2; - /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ - next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); - /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ - } else { - lose("invalid next_vector %x\n", next_vector_obj); - } - } - - /* maybe hash vector */ - { - lispobj hash_vector_obj = hash_table->hash_vector; - - if (is_lisp_pointer(hash_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == - SIMPLE_ARRAY_WORD_WIDETAG)){ - hash_vector = - ((unsigned long *)native_pointer(hash_vector_obj)) + 2; - /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ - gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) - == next_vector_length); - } else { - hash_vector = NULL; - /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/ - } - } - - /* These lengths could be different as the index_vector can be a - * different length from the others, a larger index_vector could help - * reduce collisions. */ - gc_assert(next_vector_length*2 == kv_length); - - /* now all set up.. */ - - /* Work through the KV vector. */ - { - long i; - for (i = 1; i < next_vector_length; i++) { - lispobj old_key = kv_vector[2*i]; - -#if N_WORD_BITS == 32 - unsigned long old_index = (old_key & 0x1fffffff)%length; -#elif N_WORD_BITS == 64 - unsigned long old_index = (old_key & 0x1fffffffffffffff)%length; -#endif - - /* Scavenge the key and value. */ - scavenge(&kv_vector[2*i],2); - - /* Check whether the key has moved and is EQ based. */ - { - lispobj new_key = kv_vector[2*i]; -#if N_WORD_BITS == 32 - unsigned long new_index = (new_key & 0x1fffffff)%length; -#elif N_WORD_BITS == 64 - unsigned long new_index = (new_key & 0x1fffffffffffffff)%length; -#endif - - if ((old_index != new_index) && - ((!hash_vector) || - (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) && - ((new_key != empty_symbol) || - (kv_vector[2*i] != empty_symbol))) { - - /*FSHOW((stderr, - "* EQ key %d moved from %x to %x; index %d to %d\n", - i, old_key, new_key, old_index, new_index));*/ - - if (index_vector[old_index] != 0) { - /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ - - /* Unlink the key from the old_index chain. */ - if (index_vector[old_index] == i) { - /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/ - index_vector[old_index] = next_vector[i]; - /* Link it into the needing rehash chain. */ - next_vector[i] = fixnum_value(hash_table->needing_rehash); - hash_table->needing_rehash = make_fixnum(i); - /*SHOW("P2");*/ - } else { - unsigned long prior = index_vector[old_index]; - unsigned long next = next_vector[prior]; - - /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ - - while (next != 0) { - /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ - if (next == i) { - /* Unlink it. */ - next_vector[prior] = next_vector[next]; - /* Link it into the needing rehash - * chain. */ - next_vector[next] = - fixnum_value(hash_table->needing_rehash); - hash_table->needing_rehash = make_fixnum(next); - /*SHOW("/P3");*/ - break; - } - prior = next; - next = next_vector[next]; - } - } - } - } - } - } - } - return (CEILING(kv_length + 2, 2)); -} - - /* * weak pointers @@ -2023,32 +1974,24 @@ scav_vector(lispobj *where, lispobj object) #define WEAK_POINTER_NWORDS \ CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2) -static long +static sword_t scav_weak_pointer(lispobj *where, lispobj object) { - struct weak_pointer *wp = weak_pointers; - /* Push the weak pointer onto the list of weak pointers. - * Do I have to watch for duplicates? Originally this was - * part of trans_weak_pointer but that didn't work in the - * case where the WP was in a promoted region. + /* Since we overwrite the 'next' field, we have to make + * sure not to do so for pointers already in the list. + * Instead of searching the list of weak_pointers each + * time, we ensure that next is always NULL when the weak + * pointer isn't in the list, and not NULL otherwise. + * Since we can't use NULL to denote end of list, we + * use a pointer back to the same weak_pointer. */ + struct weak_pointer * wp = (struct weak_pointer*)where; - /* Check whether it's already in the list. */ - while (wp != NULL) { - if (wp == (struct weak_pointer*)where) { - break; - } - wp = wp->next; - } - if (wp == NULL) { - /* Add it to the start of the list. */ - wp = (struct weak_pointer*)where; - if (wp->next != weak_pointers) { - wp->next = weak_pointers; - } else { - /*SHOW("avoided write to weak pointer");*/ - } + if (NULL == wp->next) { + wp->next = weak_pointers; weak_pointers = wp; + if (NULL == wp->next) + wp->next = wp; } /* Do not let GC scavenge the value slot of the weak pointer. @@ -2071,305 +2014,55 @@ search_read_only_space(void *pointer) } lispobj * -search_static_space(void *pointer) -{ - lispobj *start = (lispobj *)STATIC_SPACE_START; - lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0); - if ((pointer < (void *)start) || (pointer >= (void *)end)) - return NULL; - return (gc_search_space(start, - (((lispobj *)pointer)+2)-start, - (lispobj *) pointer)); -} - -/* a faster version for searching the dynamic space. This will work even - * if the object is in a current allocation region. */ -lispobj * -search_dynamic_space(void *pointer) -{ - page_index_t page_index = find_page_index(pointer); - lispobj *start; - - /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || - (page_table[page_index].allocated == FREE_PAGE_FLAG)) - return NULL; - start = (lispobj *)((void *)page_address(page_index) - + page_table[page_index].first_object_offset); - return (gc_search_space(start, - (((lispobj *)pointer)+2)-start, - (lispobj *)pointer)); -} - -/* Is there any possibility that pointer is a valid Lisp object - * reference, and/or something else (e.g. subroutine call return - * address) which should prevent us from moving the referred-to thing? - * This is called from preserve_pointers() */ -static int -possibly_valid_dynamic_space_pointer(lispobj *pointer) -{ - lispobj *start_addr; - - /* Find the object start address. */ - if ((start_addr = search_dynamic_space(pointer)) == NULL) { - return 0; - } - - /* We need to allow raw pointers into Code objects for return - * addresses. This will also pick up pointers to functions in code - * objects. */ - if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) { - /* XXX could do some further checks here */ - return 1; - } - - /* If it's not a return address then it needs to be a valid Lisp - * pointer. */ - if (!is_lisp_pointer((lispobj)pointer)) { - return 0; - } - - /* Check that the object pointed to is consistent with the pointer - * low tag. - */ - switch (lowtag_of((lispobj)pointer)) { - case FUN_POINTER_LOWTAG: - /* Start_addr should be the enclosing code object, or a closure - * header. */ - switch (widetag_of(*start_addr)) { - case CODE_HEADER_WIDETAG: - /* This case is probably caught above. */ - break; - case CLOSURE_HEADER_WIDETAG: - case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if ((unsigned long)pointer != - ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wf2: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - break; - default: - if (gencgc_verbose) - FSHOW((stderr, - "/Wf3: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - break; - case LIST_POINTER_LOWTAG: - if ((unsigned long)pointer != - ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wl1: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - /* Is it plausible cons? */ - if ((is_lisp_pointer(start_addr[0]) - || (fixnump(start_addr[0])) - || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG)) - && (is_lisp_pointer(start_addr[1]) - || (fixnump(start_addr[1])) - || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) - break; - else { - if (gencgc_verbose) - FSHOW((stderr, - "/Wl2: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - case INSTANCE_POINTER_LOWTAG: - if ((unsigned long)pointer != - ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wi1: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wi2: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - break; - case OTHER_POINTER_LOWTAG: - if ((unsigned long)pointer != - ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wo1: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - /* Is it plausible? Not a cons. XXX should check the headers. */ - if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) { - if (gencgc_verbose) - FSHOW((stderr, - "/Wo2: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - switch (widetag_of(start_addr[0])) { - case UNBOUND_MARKER_WIDETAG: - case NO_TLS_VALUE_MARKER_WIDETAG: - case CHARACTER_WIDETAG: -#if N_WORD_BITS == 64 - case SINGLE_FLOAT_WIDETAG: -#endif - if (gencgc_verbose) - FSHOW((stderr, - "*Wo3: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - - /* only pointed to by function pointers? */ - case CLOSURE_HEADER_WIDETAG: - case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) - FSHOW((stderr, - "*Wo4: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - - case INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) - FSHOW((stderr, - "*Wo5: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - - /* the valid other immediate pointer objects */ - case SIMPLE_VECTOR_WIDETAG: - case RATIO_WIDETAG: - case COMPLEX_WIDETAG: -#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG - case COMPLEX_SINGLE_FLOAT_WIDETAG: -#endif -#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG - case COMPLEX_DOUBLE_FLOAT_WIDETAG: -#endif -#ifdef COMPLEX_LONG_FLOAT_WIDETAG - case COMPLEX_LONG_FLOAT_WIDETAG: -#endif - case SIMPLE_ARRAY_WIDETAG: - case COMPLEX_BASE_STRING_WIDETAG: -#ifdef COMPLEX_CHARACTER_STRING_WIDETAG - case COMPLEX_CHARACTER_STRING_WIDETAG: -#endif - case COMPLEX_VECTOR_NIL_WIDETAG: - case COMPLEX_BIT_VECTOR_WIDETAG: - case COMPLEX_VECTOR_WIDETAG: - case COMPLEX_ARRAY_WIDETAG: - case VALUE_CELL_HEADER_WIDETAG: - case SYMBOL_HEADER_WIDETAG: - case FDEFN_WIDETAG: - case CODE_HEADER_WIDETAG: - case BIGNUM_WIDETAG: -#if N_WORD_BITS != 64 - case SINGLE_FLOAT_WIDETAG: -#endif - case DOUBLE_FLOAT_WIDETAG: -#ifdef LONG_FLOAT_WIDETAG - case LONG_FLOAT_WIDETAG: -#endif - case SIMPLE_BASE_STRING_WIDETAG: -#ifdef SIMPLE_CHARACTER_STRING_WIDETAG - case SIMPLE_CHARACTER_STRING_WIDETAG: -#endif - case SIMPLE_BIT_VECTOR_WIDETAG: - case SIMPLE_ARRAY_NIL_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: -#endif - case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: - case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: -#endif - case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: - case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: -#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG - case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG - case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG - case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG: -#endif -#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG - case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: -#endif - case SAP_WIDETAG: - case WEAK_POINTER_WIDETAG: - break; +search_static_space(void *pointer) +{ + lispobj *start = (lispobj *)STATIC_SPACE_START; + lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0); + if ((pointer < (void *)start) || (pointer >= (void *)end)) + return NULL; + return (gc_search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *) pointer)); +} - default: - if (gencgc_verbose) - FSHOW((stderr, - "/Wo6: %x %x %x\n", - pointer, start_addr, *start_addr)); - return 0; - } - break; - default: - if (gencgc_verbose) - FSHOW((stderr, - "*W?: %x %x %x\n", - pointer, start_addr, *start_addr)); +/* a faster version for searching the dynamic space. This will work even + * if the object is in a current allocation region. */ +lispobj * +search_dynamic_space(void *pointer) +{ + page_index_t page_index = find_page_index(pointer); + lispobj *start; + + /* The address may be invalid, so do some checks. */ + if ((page_index == -1) || page_free_p(page_index)) + return NULL; + start = (lispobj *)page_scan_start(page_index); + return (gc_search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *)pointer)); +} + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + +/* Is there any possibility that pointer is a valid Lisp object + * reference, and/or something else (e.g. subroutine call return + * address) which should prevent us from moving the referred-to thing? + * This is called from preserve_pointers() */ +static int +possibly_valid_dynamic_space_pointer(lispobj *pointer) +{ + lispobj *start_addr; + + /* Find the object start address. */ + if ((start_addr = search_dynamic_space(pointer)) == NULL) { return 0; } - /* looks good */ - return 1; + return looks_like_valid_lisp_pointer_p(pointer, start_addr); } +#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + /* Adjust large bignum and vector objects. This will adjust the * allocated region if the size has shrunk, and move unboxed objects * into unboxed pages. The pages are not promoted here, and the @@ -2382,11 +2075,11 @@ maybe_adjust_large_object(lispobj *where) { page_index_t first_page; page_index_t next_page; - long nwords; + sword_t nwords; - long remaining_bytes; - long bytes_freed; - long old_bytes_used; + uword_t remaining_bytes; + uword_t bytes_freed; + uword_t old_bytes_used; int boxed; @@ -2408,14 +2101,11 @@ maybe_adjust_large_object(lispobj *where) case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: -#endif + + case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: -#endif #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: #endif @@ -2428,15 +2118,12 @@ maybe_adjust_large_object(lispobj *where) #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: #endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: -#endif + + case SIMPLE_ARRAY_FIXNUM_WIDETAG: + #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: -#endif #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: #endif @@ -2472,25 +2159,24 @@ maybe_adjust_large_object(lispobj *where) * but lets do it for them all (they'll probably be written * anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_starts_contiguous_block_p(first_page)); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; - while (remaining_bytes > PAGE_BYTES) { + while (remaining_bytes > GENCGC_CARD_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) - || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset == - -PAGE_BYTES*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); + gc_assert(page_table[next_page].scan_start_offset == + npage_bytes(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES); page_table[next_page].allocated = boxed; /* Shouldn't be write-protected at this stage. Essential that the * pages aren't. */ gc_assert(!page_table[next_page].write_protected); - remaining_bytes -= PAGE_BYTES; + remaining_bytes -= GENCGC_CARD_BYTES; next_page++; } @@ -2512,13 +2198,12 @@ maybe_adjust_large_object(lispobj *where) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == PAGE_BYTES) && + while ((old_bytes_used == GENCGC_CARD_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].scan_start_offset == + npage_bytes(next_page - first_page))) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -2556,6 +2241,7 @@ maybe_adjust_large_object(lispobj *where) * * It is also assumed that the current gc_alloc() region has been * flushed and the tables updated. */ + static void preserve_pointer(void *addr) { @@ -2566,7 +2252,7 @@ preserve_pointer(void *addr) /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + || page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ @@ -2580,7 +2266,8 @@ preserve_pointer(void *addr) /* quick check 2: Check the offset within the page. * */ - if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) + if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) > + page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object @@ -2588,9 +2275,17 @@ preserve_pointer(void *addr) * address referring to something in a CodeObject). This is * expensive but important, since it vastly reduces the * probability that random garbage will be bogusly interpreted as - * a pointer which prevents a page from moving. */ - if (!(possibly_valid_dynamic_space_pointer(addr))) + * a pointer which prevents a page from moving. + * + * This only needs to happen on x86oids, where this is used for + * conservative roots. Non-x86oid systems only ever call this + * function on known-valid lisp objects. */ +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) + if (!(code_page_p(addr_page_index) + || (is_lisp_pointer((lispobj)addr) && + possibly_valid_dynamic_space_pointer(addr)))) return; +#endif /* Find the beginning of the region. Note that there may be * objects in the region preceding the one that we were passed a @@ -2600,15 +2295,13 @@ preserve_pointer(void *addr) #if 0 /* I think this'd work just as well, but without the assertions. * -dan 2004.01.01 */ - first_page= - find_page_index(page_address(addr_page_index)+ - page_table[addr_page_index].first_object_offset); + first_page = find_page_index(page_scan_start(addr_page_index)) #else first_page = addr_page_index; - while (page_table[first_page].first_object_offset != 0) { + while (!page_starts_contiguous_block_p(first_page)) { --first_page; /* Do some checks. */ - gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); + gc_assert(page_table[first_page].bytes_used == GENCGC_CARD_BYTES); gc_assert(page_table[first_page].gen == from_space); gc_assert(page_table[first_page].allocated == region_allocation); } @@ -2617,21 +2310,22 @@ preserve_pointer(void *addr) /* Adjust any large objects before promotion as they won't be * copied after promotion. */ if (page_table[first_page].large_object) { - maybe_adjust_large_object(page_address(first_page)); - /* If a large object has shrunk then addr may now point to a - * free area in which case it's ignored here. Note it gets - * through the valid pointer test above because the tail looks - * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) - || (page_table[addr_page_index].bytes_used == 0) - /* Check the offset within the page. */ - || (((unsigned long)addr & (PAGE_BYTES - 1)) - > page_table[addr_page_index].bytes_used)) { - FSHOW((stderr, - "weird? ignore ptr 0x%x to freed area of large object\n", - addr)); + /* Large objects (specifically vectors and bignums) can + * shrink, leaving a "tail" of zeroed space, which appears to + * the filter above as a seris of valid conses, both car and + * cdr of which contain the fixnum zero, but will be + * deallocated when the GC shrinks the large object region to + * fit the object within. We allow raw pointers within code + * space, but for boxed and unboxed space we do not, nor do + * pointers to within a non-code object appear valid above. A + * cons cell will never merit allocation to a large object + * page, so pick them off now, before we try to adjust the + * object. */ + if ((lowtag_of((lispobj)addr) == LIST_POINTER_LOWTAG) && + !code_page_p(first_page)) { return; } + maybe_adjust_large_object(page_address(first_page)); /* It may have moved to unboxed pages. */ region_allocation = page_table[first_page].allocated; } @@ -2644,14 +2338,6 @@ preserve_pointer(void *addr) /* Mark the page static. */ page_table[i].dont_move = 1; - /* Move the page to the new_space. XX I'd rather not do this - * but the GC logic is not quite able to copy with the static - * pages remaining in the from space. This also requires the - * generation bytes_allocated counters be updated. */ - page_table[i].gen = new_space; - generations[new_space].bytes_allocated += page_table[i].bytes_used; - generations[from_space].bytes_allocated -= page_table[i].bytes_used; - /* It is essential that the pages are not write protected as * they may have pointers into the old-space which need * scavenging. They shouldn't be write protected at this @@ -2659,12 +2345,7 @@ preserve_pointer(void *addr) gc_assert(!page_table[i].write_protected); /* Check whether this is the last page in this contiguous block.. */ - if ((page_table[i].bytes_used < PAGE_BYTES) - /* ..or it is PAGE_BYTES and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE_FLAG) - || (page_table[i+1].bytes_used == 0) /* next page free */ - || (page_table[i+1].gen != from_space) /* diff. gen */ - || (page_table[i+1].first_object_offset == 0)) + if (page_ends_contiguous_block_p(i, from_space)) break; } @@ -2689,20 +2370,20 @@ static int update_page_write_prot(page_index_t page) { generation_index_t gen = page_table[page].gen; - long j; + sword_t j; int wp_it = 1; void **page_addr = (void **)page_address(page); - long num_words = page_table[page].bytes_used / N_WORD_BYTES; + sword_t num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); + gc_assert(page_allocated_p(page)); gc_assert(page_table[page].bytes_used != 0); /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected /* FIXME: What's the reason for not write-protecting pinned pages? */ || page_table[page].dont_move - || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) + || page_unboxed_p(page)) return (0); /* Scan the page for pointers to younger generations or the @@ -2715,7 +2396,7 @@ update_page_write_prot(page_index_t page) /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE_FLAG) + (page_allocated_p(index) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == SCRATCH_GENERATION))) @@ -2735,7 +2416,7 @@ update_page_write_prot(page_index_t page) /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/ os_protect((void *)page_addr, - PAGE_BYTES, + GENCGC_CARD_BYTES, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -2778,18 +2459,18 @@ static void scavenge_generations(generation_index_t from, generation_index_t to) { page_index_t i; - int num_wp = 0; + page_index_t num_wp = 0; #define SC_GEN_CK 0 #if SC_GEN_CK /* Clear the write_protected_cleared flags on all pages. */ - for (i = 0; i < NUM_PAGES; i++) + for (i = 0; i < page_table_pages; i++) page_table[i].write_protected_cleared = 0; #endif for (i = 0; i < last_free_page; i++) { generation_index_t generation = page_table[i].gen; - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (generation != new_space) && (generation >= from) @@ -2798,24 +2479,20 @@ scavenge_generations(generation_index_t from, generation_index_t to) int write_protected=1; /* This should be the start of a region */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_starts_contiguous_block_p(i)); /* Now work forward until the end of the region */ for (last_page = i; ; last_page++) { write_protected = write_protected && page_table[last_page].write_protected; - if ((page_table[last_page].bytes_used < PAGE_BYTES) - /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) - || (page_table[last_page+1].bytes_used == 0) - || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + if (page_ends_contiguous_block_p(last_page, generation)) break; } if (!write_protected) { scavenge(page_address(i), - (page_table[last_page].bytes_used + - (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + ((uword_t)(page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + /N_WORD_BYTES); /* Now scan the pages and write protect those that * don't have pointers to younger generations. */ @@ -2837,16 +2514,16 @@ scavenge_generations(generation_index_t from, generation_index_t to) #if SC_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + for (i = 0; i < page_table_pages; i++) { + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { FSHOW((stderr, "/scavenge_generation() %d\n", generation)); FSHOW((stderr, - "/page bytes_used=%d first_object_offset=%d dont_move=%d\n", + "/page bytes_used=%d scan_start_offset=%lu dont_move=%d\n", page_table[i].bytes_used, - page_table[i].first_object_offset, + page_table[i].scan_start_offset, page_table[i].dont_move)); lose("write to protected page %d in scavenge_generation()\n", i); } @@ -2892,7 +2569,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) generation)); for (i = 0; i < last_free_page; i++) { /* Note that this skips over open regions when it encounters them. */ - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) @@ -2902,7 +2579,8 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) page_index_t last_page; int all_wp=1; - /* The scavenge will start at the first_object_offset of page i. + /* The scavenge will start at the scan_start_offset of + * page i. * * We need to find the full extent of this contiguous * block in case objects span pages. @@ -2918,27 +2596,20 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) /* Check whether this is the last page in this * contiguous block */ - if ((page_table[last_page].bytes_used < PAGE_BYTES) - /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) - || (page_table[last_page+1].bytes_used == 0) - || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + if (page_ends_contiguous_block_p(last_page, generation)) break; } /* Do a limited check for write-protected pages. */ if (!all_wp) { - long size; - - size = (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES - - page_table[i].first_object_offset)/N_WORD_BYTES; + sword_t nwords = (((uword_t) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i) + + page_table[i].scan_start_offset)) + / N_WORD_BYTES); new_areas_ignore_page = last_page; - scavenge(page_address(i) + - page_table[i].first_object_offset, - size); + scavenge(page_scan_start(i), nwords); } i = last_page; @@ -2953,15 +2624,15 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) static void scavenge_newspace_generation(generation_index_t generation) { - long i; + size_t i; /* the new_areas array currently being written to by gc_alloc() */ struct new_area (*current_new_areas)[] = &new_areas_1; - long current_new_areas_index; + size_t current_new_areas_index; /* the new_areas created by the previous scavenge cycle */ struct new_area (*previous_new_areas)[] = NULL; - long previous_new_areas_index; + size_t previous_new_areas_index; /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -2980,6 +2651,13 @@ scavenge_newspace_generation(generation_index_t generation) /* Record all new areas now. */ record_new_objects = 2; + /* Give a chance to weak hash tables to make other objects live. + * FIXME: The algorithm implemented here for weak hash table gcing + * is O(W^2+N) as Bruno Haible warns in + * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html + * see "Implementation 2". */ + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -3015,11 +2693,12 @@ scavenge_newspace_generation(generation_index_t generation) /* New areas of objects allocated have been lost so need to do a * full scan to be sure! If this becomes a problem try * increasing NUM_NEW_AREAS. */ - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("new_areas overflow, doing full scavenge"); + } - /* Don't need to record new areas that get scavenge anyway - * during scavenge_newspace_generation_one_scan. */ + /* Don't need to record new areas that get scavenged + * anyway during scavenge_newspace_generation_one_scan. */ record_new_objects = 1; scavenge_newspace_generation_one_scan(generation); @@ -3027,6 +2706,8 @@ scavenge_newspace_generation(generation_index_t generation) /* Record all new areas now. */ record_new_objects = 2; + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -3034,13 +2715,15 @@ scavenge_newspace_generation(generation_index_t generation) /* Work through previous_new_areas. */ for (i = 0; i < previous_new_areas_index; i++) { - long page = (*previous_new_areas)[i].page; - long offset = (*previous_new_areas)[i].offset; - long size = (*previous_new_areas)[i].size / N_WORD_BYTES; + page_index_t page = (*previous_new_areas)[i].page; + size_t offset = (*previous_new_areas)[i].offset; + size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES; gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0); scavenge(page_address(page)+offset, size); } + scav_weak_hash_tables(); + /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); } @@ -3056,16 +2739,19 @@ scavenge_newspace_generation(generation_index_t generation) record_new_objects = 0; #if SC_NS_GEN_CK - /* Check that none of the write_protected pages in this generation - * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) - && (page_table[i].bytes_used != 0) - && (page_table[i].gen == generation) - && (page_table[i].write_protected_cleared != 0) - && (page_table[i].dont_move == 0)) { - lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n", - i, generation, page_table[i].dont_move); + { + page_index_t i; + /* Check that none of the write_protected pages in this generation + * have been written to. */ + for (i = 0; i < page_table_pages; i++) { + if (page_allocated_p(i) + && (page_table[i].bytes_used != 0) + && (page_table[i].gen == generation) + && (page_table[i].write_protected_cleared != 0) + && (page_table[i].dont_move == 0)) { + lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n", + i, generation, page_table[i].dont_move); + } } } #endif @@ -3080,33 +2766,51 @@ static void unprotect_oldspace(void) { page_index_t i; + void *region_addr = 0; + void *page_addr = 0; + uword_t region_bytes = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { - void *page_start; - - page_start = (void *)page_address(i); /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[i].write_protected) { - os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[i].write_protected = 0; + page_addr = page_address(i); + if (!region_addr) { + /* First region. */ + region_addr = page_addr; + region_bytes = GENCGC_CARD_BYTES; + } else if (region_addr + region_bytes == page_addr) { + /* Region continue. */ + region_bytes += GENCGC_CARD_BYTES; + } else { + /* Unprotect previous region. */ + os_protect(region_addr, region_bytes, OS_VM_PROT_ALL); + /* First page in new region. */ + region_addr = page_addr; + region_bytes = GENCGC_CARD_BYTES; + } } } } + if (region_addr) { + /* Unprotect last region. */ + os_protect(region_addr, region_bytes, OS_VM_PROT_ALL); + } } /* Work through all the pages and free any in from_space. This * assumes that all objects have been copied or promoted to an older * generation. Bytes_allocated and the generation bytes_allocated * counter are updated. The number of bytes freed is returned. */ -static long +static uword_t free_oldspace(void) { - long bytes_freed = 0; + uword_t bytes_freed = 0; page_index_t first_page, last_page; first_page = 0; @@ -3114,7 +2818,7 @@ free_oldspace(void) do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE_FLAG) + && (page_free_p(first_page) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3132,27 +2836,18 @@ free_oldspace(void) page_table[last_page].bytes_used; page_table[last_page].allocated = FREE_PAGE_FLAG; page_table[last_page].bytes_used = 0; - - /* Remove any write-protection. We should be able to rely - * on the write-protect flag to avoid redundant calls. */ - { - void *page_start = (void *)page_address(last_page); - - if (page_table[last_page].write_protected) { - os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); - page_table[last_page].write_protected = 0; - } - } + /* Should already be unprotected by unprotect_oldspace(). */ + gc_assert(!page_table[last_page].write_protected); last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE_FLAG) + && page_allocated_p(last_page) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(last_page-first_page), + npage_bytes(last_page-first_page), OS_VM_PROT_NONE); #endif first_page = last_page; @@ -3171,13 +2866,13 @@ print_ptr(lispobj *addr) page_index_t pi1 = find_page_index((void*)addr); if (pi1 != -1) - fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", - (unsigned long) addr, + fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n", + addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, page_table[pi1].bytes_used, - page_table[pi1].first_object_offset, + page_table[pi1].scan_start_offset, page_table[pi1].dont_move); fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n", *(addr-4), @@ -3192,15 +2887,30 @@ print_ptr(lispobj *addr) } #endif -extern long undefined_tramp; +static int +is_in_stack_space(lispobj ptr) +{ + /* For space verification: Pointers can be valid if they point + * to a thread stack space. This would be faster if the thread + * structures had page-table entries as if they were part of + * the heap space. */ + struct thread *th; + for_each_thread(th) { + if ((th->control_stack_start <= (lispobj *)ptr) && + (th->control_stack_end >= (lispobj *)ptr)) { + return 1; + } + } + return 0; +} static void verify_space(lispobj *start, size_t words) { int is_in_dynamic_space = (find_page_index((void*)start) != -1); int is_in_readonly_space = - (READ_ONLY_SPACE_START <= (unsigned long)start && - (unsigned long)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); + (READ_ONLY_SPACE_START <= (uword_t)start && + (uword_t)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); while (words > 0) { size_t count = 1; @@ -3208,10 +2918,10 @@ verify_space(lispobj *start, size_t words) if (is_lisp_pointer(thing)) { page_index_t page_index = find_page_index((void*)thing); - long to_readonly_space = + sword_t to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); - long to_static_space = + sword_t to_static_space = (STATIC_SPACE_START <= thing && thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0)); @@ -3219,17 +2929,17 @@ verify_space(lispobj *start, size_t words) if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(page_index) && (page_table[page_index].bytes_used == 0)) - lose ("Ptr %x @ %x sees free page.\n", thing, start); + lose ("Ptr %p @ %p sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ if (*((lispobj *)native_pointer(thing)) == 0x01) { - lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start); + lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start); } /* Check that its not in the RO space as it would then be a * pointer from the RO to the dynamic space. */ if (is_in_readonly_space) { - lose("ptr to dynamic space %x from RO space %x\n", + lose("ptr to dynamic space %p from RO space %x\n", thing, start); } /* Does it point to a plausible object? This check slows @@ -3243,14 +2953,16 @@ verify_space(lispobj *start, size_t words) * dynamically. */ /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { - lose("ptr %x to invalid object %x\n", thing, start); + lose("ptr %p to invalid object %p\n", thing, start); } */ } else { + extern void funcallable_instance_tramp; /* Verify that it points to another valid space. */ if (!to_readonly_space && !to_static_space - && (thing != (unsigned long)&undefined_tramp)) { - lose("Ptr %x @ %x sees junk.\n", thing, start); + && (thing != (lispobj)&funcallable_instance_tramp) + && !is_in_stack_space(thing)) { + lose("Ptr %p @ %p sees junk.\n", thing, start); } } } else { @@ -3280,16 +2992,31 @@ verify_space(lispobj *start, size_t words) case SINGLE_FLOAT_WIDETAG: #endif case UNBOUND_MARKER_WIDETAG: - case INSTANCE_HEADER_WIDETAG: case FDEFN_WIDETAG: count = 1; break; + case INSTANCE_HEADER_WIDETAG: + { + lispobj nuntagged; + sword_t ntotal = HeaderValue(thing); + lispobj layout = ((struct instance *)start)->slots[0]; + if (!layout) { + count = 1; + break; + } + nuntagged = ((struct layout *) + native_pointer(layout))->n_untagged_slots; + verify_space(start + 1, + ntotal - fixnum_value(nuntagged)); + count = ntotal + 1; + break; + } case CODE_HEADER_WIDETAG: { lispobj object = *start; struct code *code; - long nheader_words, ncode_words, nwords; + sword_t nheader_words, ncode_words, nwords; lispobj fheaderl; struct simple_fun *fheaderp; @@ -3312,7 +3039,7 @@ verify_space(lispobj *start, size_t words) /* Only when enabled */ && verify_dynamic_code_check) { FSHOW((stderr, - "/code object at %x in the dynamic space\n", + "/code object at %p in the dynamic space\n", start)); } @@ -3329,7 +3056,8 @@ verify_space(lispobj *start, size_t words) while (fheaderl != NIL) { fheaderp = (struct simple_fun *) native_pointer(fheaderl); - gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG); + gc_assert(widetag_of(fheaderp->header) == + SIMPLE_FUN_HEADER_WIDETAG); verify_space(&fheaderp->name, 1); verify_space(&fheaderp->arglist, 1); verify_space(&fheaderp->type, 1); @@ -3357,6 +3085,9 @@ verify_space(lispobj *start, size_t words) #ifdef COMPLEX_LONG_FLOAT_WIDETAG case COMPLEX_LONG_FLOAT_WIDETAG: #endif +#ifdef SIMD_PACK_WIDETAG + case SIMD_PACK_WIDETAG: +#endif case SIMPLE_BASE_STRING_WIDETAG: #ifdef SIMPLE_CHARACTER_STRING_WIDETAG case SIMPLE_CHARACTER_STRING_WIDETAG: @@ -3369,14 +3100,11 @@ verify_space(lispobj *start, size_t words) case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: -#endif + + case SIMPLE_ARRAY_UNSIGNED_FIXNUM_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: -#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG - case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: -#endif #ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: #endif @@ -3389,15 +3117,12 @@ verify_space(lispobj *start, size_t words) #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: #endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: -#endif + + case SIMPLE_ARRAY_FIXNUM_WIDETAG: + #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif -#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG - case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: -#endif #ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: #endif @@ -3417,11 +3142,15 @@ verify_space(lispobj *start, size_t words) #endif case SAP_WIDETAG: case WEAK_POINTER_WIDETAG: +#ifdef NO_TLS_VALUE_MARKER_WIDETAG + case NO_TLS_VALUE_MARKER_WIDETAG: +#endif count = (sizetab[widetag_of(*start)])(start); break; default: - gc_abort(); + lose("Unhandled widetag %p at %p\n", + widetag_of(*start), start); } } } @@ -3439,16 +3168,16 @@ verify_gc(void) * Some counts of lispobjs are called foo_count; it might be good * to grep for all foo_size and rename the appropriate ones to * foo_count. */ - long read_only_space_size = + sword_t read_only_space_size = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0) - (lispobj*)READ_ONLY_SPACE_START; - long static_space_size = + sword_t static_space_size = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj*)STATIC_SPACE_START; struct thread *th; for_each_thread(th) { - long binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + sword_t binding_stack_size = + (lispobj*)get_binding_stack_pointer(th) - (lispobj*)th->binding_stack_start; verify_space(th->binding_stack_start, binding_stack_size); } @@ -3462,14 +3191,13 @@ verify_generation(generation_index_t generation) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { page_index_t last_page; - int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_starts_contiguous_block_p(i)); /* Need to find the full extent of this contiguous block in case objects span pages. */ @@ -3479,16 +3207,14 @@ verify_generation(generation_index_t generation) for (last_page = i; ;last_page++) /* Check whether this is the last page in this contiguous * block. */ - if ((page_table[last_page].bytes_used < PAGE_BYTES) - /* Or it is PAGE_BYTES and is the last in the block */ - || (page_table[last_page+1].allocated != region_allocation) - || (page_table[last_page+1].bytes_used == 0) - || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + if (page_ends_contiguous_block_p(last_page, generation)) break; - verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + verify_space(page_address(i), + ((uword_t) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + / N_WORD_BYTES); i = last_page; } } @@ -3501,23 +3227,23 @@ verify_zero_fill(void) page_index_t page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE_FLAG) { + if (page_free_p(page)) { /* The whole page should be zero filled. */ - long *start_addr = (long *)page_address(page); - long size = 1024; - long i; + sword_t *start_addr = (sword_t *)page_address(page); + sword_t size = 1024; + sword_t i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free page not zero at %x\n", start_addr + i); } } } else { - long free_bytes = PAGE_BYTES - page_table[page].bytes_used; + sword_t free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { - long *start_addr = (long *)((unsigned long)page_address(page) + sword_t *start_addr = (sword_t *)((uword_t)page_address(page) + page_table[page].bytes_used); - long size = free_bytes / N_WORD_BYTES; - long i; + sword_t size = free_bytes / N_WORD_BYTES; + sword_t i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free region not zero at %x\n", start_addr + i); @@ -3559,10 +3285,7 @@ write_protect_generation_pages(generation_index_t generation) gc_assert(generation < SCRATCH_GENERATION); for (start = 0; start < last_free_page; start++) { - if ((page_table[start].allocated == BOXED_PAGE_FLAG) - && (page_table[start].bytes_used != 0) - && !page_table[start].dont_move - && (page_table[start].gen == generation)) { + if (protect_page_p(start, generation)) { void *page_start; page_index_t last; @@ -3570,10 +3293,7 @@ write_protect_generation_pages(generation_index_t generation) page_table[start].write_protected = 1; for (last = start + 1; last < last_free_page; last++) { - if ((page_table[last].allocated != BOXED_PAGE_FLAG) - || (page_table[last].bytes_used == 0) - || page_table[last].dont_move - || (page_table[last].gen != generation)) + if (!protect_page_p(last, generation)) break; page_table[last].write_protected = 1; } @@ -3581,7 +3301,7 @@ write_protect_generation_pages(generation_index_t generation) page_start = (void *)page_address(start); os_protect(page_start, - PAGE_BYTES * (last - start), + npage_bytes(last - start), OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); start = last; @@ -3597,20 +3317,91 @@ write_protect_generation_pages(generation_index_t generation) } } +#if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)) +static void +preserve_context_registers (os_context_t *c) +{ + void **ptr; + /* On Darwin the signal context isn't a contiguous block of memory, + * so just preserve_pointering its contents won't be sufficient. + */ +#if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32) +#if defined LISP_FEATURE_X86 + preserve_pointer((void*)*os_context_register_addr(c,reg_EAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ECX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_ESI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_EDI)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#elif defined LISP_FEATURE_X86_64 + preserve_pointer((void*)*os_context_register_addr(c,reg_RAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RCX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RSI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R8)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R9)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R10)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R11)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R12)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R13)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R14)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R15)); + preserve_pointer((void*)*os_context_pc_addr(c)); +#else + #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" +#endif +#endif +#if !defined(LISP_FEATURE_WIN32) + for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) { + preserve_pointer(*ptr); + } +#endif +} +#endif + +static void +move_pinned_pages_to_newspace() +{ + page_index_t i; + + /* scavenge() will evacuate all oldspace pages, but no newspace + * pages. Pinned pages are precisely those pages which must not + * be evacuated, so move them to newspace directly. */ + + for (i = 0; i < last_free_page; i++) { + if (page_table[i].dont_move && + /* dont_move is cleared lazily, so validate the space as well. */ + page_table[i].gen == from_space) { + page_table[i].gen = new_space; + /* And since we're moving the pages wholesale, also adjust + * the generation allocation counters. */ + generations[new_space].bytes_allocated += page_table[i].bytes_used; + generations[from_space].bytes_allocated -= page_table[i].bytes_used; + } + } +} + /* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void garbage_collect_generation(generation_index_t generation, int raise) { - unsigned long bytes_freed; + uword_t bytes_freed; page_index_t i; - unsigned long static_space_size; + uword_t static_space_size; struct thread *th; + gc_assert(generation <= HIGHEST_NORMAL_GENERATION); /* The oldest generation can't be raised. */ gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0)); + /* Check if weak hash tables were processed in the previous GC. */ + gc_assert(weak_hash_tables == NULL); + /* Initialize the weak pointer list. */ weak_pointers = NULL; @@ -3665,13 +3456,48 @@ garbage_collect_generation(generation_index_t generation, int raise) * initiates GC. If you ever call GC from inside an altstack * handler, you will lose. */ +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) /* And if we're saving a core, there's no point in being conservative. */ if (conservative_stack) { for_each_thread(th) { void **ptr; void **esp=(void **)-1; -#ifdef LISP_FEATURE_SB_THREAD - long i,free; + if (th->state == STATE_DEAD) + continue; +# if defined(LISP_FEATURE_SB_SAFEPOINT) + /* Conservative collect_garbage is always invoked with a + * foreign C call or an interrupt handler on top of every + * existing thread, so the stored SP in each thread + * structure is valid, no matter which thread we are looking + * at. For threads that were running Lisp code, the pitstop + * and edge functions maintain this value within the + * interrupt or exception handler. */ + esp = os_get_csp(th); + assert_on_stack(th, esp); + + /* In addition to pointers on the stack, also preserve the + * return PC, the only value from the context that we need + * in addition to the SP. The return PC gets saved by the + * foreign call wrapper, and removed from the control stack + * into a register. */ + preserve_pointer(th->pc_around_foreign_call); + + /* And on platforms with interrupts: scavenge ctx registers. */ + + /* Disabled on Windows, because it does not have an explicit + * stack of `interrupt_contexts'. The reported CSP has been + * chosen so that the current context on the stack is + * covered by the stack scan. See also set_csp_from_context(). */ +# ifndef LISP_FEATURE_WIN32 + if (th != arch_os_get_current_thread()) { + long k = fixnum_value( + SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + while (k > 0) + preserve_context_registers(th->interrupt_contexts[--k]); + } +# endif +# elif defined(LISP_FEATURE_SB_THREAD) + sword_t i,free; if(th==arch_os_get_current_thread()) { /* Somebody is going to burn in hell for this, but casting * it in two steps shuts gcc up about strict aliasing. */ @@ -3685,32 +3511,81 @@ garbage_collect_generation(generation_index_t generation, int raise) if (esp1>=(void **)th->control_stack_start && esp1<(void **)th->control_stack_end) { if(esp1=(void **)c; ptr--) { - preserve_pointer(*ptr); - } + preserve_context_registers(c); } } } -#else +# else esp = (void **)((void *)&raise); -#endif - for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { +# endif + if (!esp || esp == (void*) -1) + lose("garbage_collect: no SP known for thread %x (OS %x)", + th, th->os_thread); + for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) { preserve_pointer(*ptr); } } } -#ifdef QSHOW +#else + /* Non-x86oid systems don't have "conservative roots" as such, but + * the same mechanism is used for objects pinned for use by alien + * code. */ + for_each_thread(th) { + lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th); + while (pin_list != NIL) { + struct cons *list_entry = + (struct cons *)native_pointer(pin_list); + preserve_pointer(list_entry->car); + pin_list = list_entry->cdr; + } + } +#endif + +#if QSHOW if (gencgc_verbose > 1) { - long num_dont_move_pages = count_dont_move_pages(); + sword_t num_dont_move_pages = count_dont_move_pages(); fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - num_dont_move_pages * PAGE_BYTES); + npage_bytes(num_dont_move_pages)); } #endif + /* Now that all of the pinned (dont_move) pages are known, and + * before we start to scavenge (and thus relocate) objects, + * relocate the pinned pages to newspace, so that the scavenger + * will not attempt to relocate their contents. */ + move_pinned_pages_to_newspace(); + /* Scavenge all the rest of the roots. */ +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + /* + * If not x86, we need to scavenge the interrupt context(s) and the + * control stack. + */ + { + struct thread *th; + for_each_thread(th) { + scavenge_interrupt_contexts(th); + scavenge_control_stack(th); + } + +# ifdef LISP_FEATURE_SB_SAFEPOINT + /* In this case, scrub all stacks right here from the GCing thread + * instead of doing what the comment below says. Suboptimal, but + * easier. */ + for_each_thread(th) + scrub_thread_control_stack(th); +# else + /* Scrub the unscavenged control stack space, so that we can't run + * into any stale pointers in a later GC (this is done by the + * stop-for-gc handler in the other threads). */ + scrub_control_stack(); +# endif + } +#endif + /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ for (i = 0; i < NSIG; i++) { @@ -3724,12 +3599,12 @@ garbage_collect_generation(generation_index_t generation, int raise) { struct thread *th; for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + sword_t len= (lispobj *)get_binding_stack_pointer(th) - th->binding_stack_start; scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD /* do the tls as well */ - len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + len=(SymbolValue(FREE_TLS_INDEX,0) >> WORD_SHIFT) - (sizeof (struct thread))/(sizeof (lispobj)); scavenge((lispobj *) (th+1),len); #endif @@ -3745,7 +3620,7 @@ garbage_collect_generation(generation_index_t generation, int raise) * please submit a patch. */ #if 0 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) { - unsigned long read_only_space_size = + uword_t read_only_space_size = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) - (lispobj*)READ_ONLY_SPACE_START; FSHOW((stderr, @@ -3786,8 +3661,8 @@ garbage_collect_generation(generation_index_t generation, int raise) /* As a check re-scavenge the newspace once; no new objects should * be found. */ { - long old_bytes_allocated = bytes_allocated; - long bytes_allocated; + os_vm_size_t old_bytes_allocated = bytes_allocated; + os_vm_size_t bytes_allocated; /* Start with a full scavenge. */ scavenge_newspace_generation_one_scan(new_space); @@ -3804,6 +3679,7 @@ garbage_collect_generation(generation_index_t generation, int raise) } #endif + scan_weak_hash_tables(); scan_weak_pointers(); /* Flush the current regions, updating the tables. */ @@ -3832,8 +3708,9 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].alloc_large_unboxed_start_page = 0; if (generation >= verify_gens) { - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("verifying"); + } verify_gc(); verify_dynamic_space(); } @@ -3847,45 +3724,74 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].num_gc = 0; else ++generations[generation].num_gc; + } /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ -long +sword_t update_dynamic_space_free_pointer(void) { page_index_t last_page = -1, i; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) - && (page_table[i].bytes_used != 0)) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; - SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); + set_alloc_pointer((lispobj)(page_address(last_free_page))); return 0; /* dummy value: return something ... */ } static void -remap_free_pages (page_index_t from, page_index_t to) +remap_page_range (page_index_t from, page_index_t to) +{ + /* There's a mysterious Solaris/x86 problem with using mmap + * tricks for memory zeroing. See sbcl-devel thread + * "Re: patch: standalone executable redux". + */ +#if defined(LISP_FEATURE_SUNOS) + zero_and_mark_pages(from, to); +#else + const page_index_t + release_granularity = gencgc_release_granularity/GENCGC_CARD_BYTES, + release_mask = release_granularity-1, + end = to+1, + aligned_from = (from+release_mask)&~release_mask, + aligned_end = (end&~release_mask); + + if (aligned_from < aligned_end) { + zero_pages_with_mmap(aligned_from, aligned_end-1); + if (aligned_from != from) + zero_and_mark_pages(from, aligned_from-1); + if (aligned_end != end) + zero_and_mark_pages(aligned_end, end-1); + } else { + zero_and_mark_pages(from, to); + } +#endif +} + +static void +remap_free_pages (page_index_t from, page_index_t to, int forcibly) { page_index_t first_page, last_page; + if (forcibly) + return remap_page_range(from, to); + for (first_page = from; first_page <= to; first_page++) { - if (page_table[first_page].allocated != FREE_PAGE_FLAG || - page_table[first_page].need_to_zero == 0) { + if (page_allocated_p(first_page) || + (page_table[first_page].need_to_zero == 0)) continue; - } last_page = first_page + 1; - while (page_table[last_page].allocated == FREE_PAGE_FLAG && - last_page < to && - page_table[last_page].need_to_zero == 1) { + while (page_free_p(last_page) && + (last_page <= to) && + (page_table[last_page].need_to_zero == 1)) last_page++; - } - zero_pages_with_mmap(first_page, last_page-1); + remap_page_range(first_page, last_page-1); first_page = last_page; } @@ -3905,13 +3811,16 @@ void collect_garbage(generation_index_t last_gen) { generation_index_t gen = 0, i; - int raise; + int raise, more = 0; int gen_to_wp; /* The largest value of last_free_page seen since the time * remap_free_pages was called. */ static page_index_t high_water_mark = 0; FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); + log_generation_stats(gc_logfile, "=== GC Start ==="); + + gc_active_p = 1; if (last_gen > HIGHEST_NORMAL_GENERATION+1) { FSHOW((stderr, @@ -3930,18 +3839,28 @@ collect_garbage(generation_index_t last_gen) } if (gencgc_verbose > 1) - print_generation_stats(0); + print_generation_stats(); do { /* Collect the generation. */ - if (gen >= gencgc_oldest_gen_to_gc) { - /* Never raise the oldest generation. */ + if (more || (gen >= gencgc_oldest_gen_to_gc)) { + /* Never raise the oldest generation. Never raise the extra generation + * collected due to more-flag. */ raise = 0; + more = 0; } else { raise = (gen < last_gen) - || (generations[gen].num_gc >= generations[gen].trigger_age); + || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion); + /* If we would not normally raise this one, but we're + * running low on space in comparison to the object-sizes + * we've been seeing, raise it and collect the next one + * too. */ + if (!raise && gen == last_gen) { + more = (2*large_allocation) >= (dynamic_space_size - bytes_allocated); + raise = more; + } } if (gencgc_verbose > 1) { @@ -3968,18 +3887,18 @@ collect_garbage(generation_index_t last_gen) if (gencgc_verbose > 1) { FSHOW((stderr, "GC of generation %d finished:\n", gen)); - print_generation_stats(0); + print_generation_stats(); } gen++; } while ((gen <= gencgc_oldest_gen_to_gc) && ((gen < last_gen) - || ((gen <= gencgc_oldest_gen_to_gc) - && raise + || more + || (raise && (generations[gen].bytes_allocated > generations[gen].gc_trigger) - && (gen_av_mem_age(gen) - > generations[gen].min_av_mem_age)))); + && (generation_average_age(gen) + > generations[gen].minimum_age_before_gc)))); /* Now if gen-1 was raised all generations before gen are empty. * If it wasn't raised then all generations before gen-1 are empty. @@ -4014,10 +3933,18 @@ collect_garbage(generation_index_t last_gen) /* Save the high-water mark before updating last_free_page */ if (last_free_page > high_water_mark) high_water_mark = last_free_page; + update_dynamic_space_free_pointer(); - auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; + + /* Update auto_gc_trigger. Make sure we trigger the next GC before + * running out of heap! */ + if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated)) + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; + else + auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2; + if(gencgc_verbose) - fprintf(stderr,"Next gc when %ld bytes have been consed\n", + fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n", auto_gc_trigger); /* If we did a big GC (arbitrarily defined as gen > 1), release memory @@ -4026,10 +3953,14 @@ collect_garbage(generation_index_t last_gen) if (gen > small_generation_limit) { if (last_free_page > high_water_mark) high_water_mark = last_free_page; - remap_free_pages(0, high_water_mark); + remap_free_pages(0, high_water_mark, 0); high_water_mark = 0; } + gc_active_p = 0; + large_allocation = 0; + + log_generation_stats(gc_logfile, "=== GC End ==="); SHOW("returning from collect_garbage"); } @@ -4041,50 +3972,44 @@ collect_garbage(generation_index_t last_gen) void gc_free_heap(void) { - page_index_t page; + page_index_t page, last_page; - if (gencgc_verbose > 1) + if (gencgc_verbose > 1) { SHOW("entering gc_free_heap"); + } - for (page = 0; page < NUM_PAGES; page++) { + for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE_FLAG) { - void *page_start, *addr; - - /* Mark the page free. The other slots are assumed invalid - * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it - * should not be write-protected -- except that the - * generation is used for the current region but it sets - * that up. */ - page_table[page].allocated = FREE_PAGE_FLAG; - page_table[page].bytes_used = 0; - -#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */ - /* Zero the page. */ - page_start = (void *)page_address(page); - - /* First, remove any write-protection. */ - os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); - page_table[page].write_protected = 0; - - os_invalidate(page_start,PAGE_BYTES); - addr = os_validate(page_start,PAGE_BYTES); - if (addr == NULL || addr != page_start) { - lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n", - page_start, - addr); + if (page_allocated_p(page)) { + void *page_start; + for (last_page = page; + (last_page < page_table_pages) && page_allocated_p(last_page); + last_page++) { + /* Mark the page free. The other slots are assumed invalid + * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it + * should not be write-protected -- except that the + * generation is used for the current region but it sets + * that up. */ + page_table[page].allocated = FREE_PAGE_FLAG; + page_table[page].bytes_used = 0; + page_table[page].write_protected = 0; } -#else - page_table[page].write_protected = 0; + +#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure + * about this change. */ + page_start = (void *)page_address(page); + os_protect(page_start, npage_bytes(last_page-page), OS_VM_PROT_ALL); + remap_free_pages(page, last_page-1, 1); + page = last_page-1; #endif } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ - long *page_start; + sword_t *page_start; page_index_t i; - gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(page)); gc_assert(page_table[page].bytes_used == 0); - page_start = (long *)page_address(page); - for (i=0; i<1024; i++) { + page_start = (sword_t *)page_address(page); + for (i=0; i 1) - print_generation_stats(0); + print_generation_stats(); /* Initialize gc_alloc(). */ gc_alloc_generation = 0; @@ -4116,12 +4041,11 @@ gc_free_heap(void) gc_set_region_empty(&unboxed_region); last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); + set_alloc_pointer((lispobj)((char *)heap_base)); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ - if (gencgc_verbose) - SHOW("checking after free_heap\n"); + FSHOW((stderr, "checking after free_heap\n")); verify_gc(); } } @@ -4131,21 +4055,66 @@ gc_init(void) { page_index_t i; +#if defined(LISP_FEATURE_SB_SAFEPOINT) + alloc_gc_page(); +#endif + + /* Compute the number of pages needed for the dynamic space. + * Dynamic space size should be aligned on page size. */ + page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES; + gc_assert(dynamic_space_size == npage_bytes(page_table_pages)); + + /* Default nursery size to 5% of the total dynamic space size, + * min 1Mb. */ + bytes_consed_between_gcs = dynamic_space_size/(os_vm_size_t)20; + if (bytes_consed_between_gcs < (1024*1024)) + bytes_consed_between_gcs = 1024*1024; + + /* The page_table must be allocated using "calloc" to initialize + * the page structures correctly. There used to be a separate + * initialization loop (now commented out; see below) but that was + * unnecessary and did hurt startup time. */ + page_table = calloc(page_table_pages, sizeof(struct page)); + gc_assert(page_table); + gc_init_tables(); - scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector; scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; heap_base = (void*)DYNAMIC_SPACE_START; - /* Initialize each page structure. */ - for (i = 0; i < NUM_PAGES; i++) { - /* Initialize all pages as free. */ - page_table[i].allocated = FREE_PAGE_FLAG; - page_table[i].bytes_used = 0; - - /* Pages are not write-protected at startup. */ - page_table[i].write_protected = 0; + /* The page structures are initialized implicitly when page_table + * is allocated with "calloc" above. Formerly we had the following + * explicit initialization here (comments converted to C99 style + * for readability as C's block comments don't nest): + * + * // Initialize each page structure. + * for (i = 0; i < page_table_pages; i++) { + * // Initialize all pages as free. + * page_table[i].allocated = FREE_PAGE_FLAG; + * page_table[i].bytes_used = 0; + * + * // Pages are not write-protected at startup. + * page_table[i].write_protected = 0; + * } + * + * Without this loop the image starts up much faster when dynamic + * space is large -- which it is on 64-bit platforms already by + * default -- and when "calloc" for large arrays is implemented + * using copy-on-write of a page of zeroes -- which it is at least + * on Linux. In this case the pages that page_table_pages is stored + * in are mapped and cleared not before the corresponding part of + * dynamic space is used. For example, this saves clearing 16 MB of + * memory at startup if the page size is 4 KB and the size of + * dynamic space is 4 GB. + * FREE_PAGE_FLAG must be 0 for this to work correctly which is + * asserted below: */ + { + /* Compile time assertion: If triggered, declares an array + * of dimension -1 forcing a syntax error. The intent of the + * assignment is to avoid an "unused variable" warning. */ + char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1]; + assert_free_page_flag_0[0] = assert_free_page_flag_0[0]; } bytes_allocated = 0; @@ -4163,9 +4132,10 @@ gc_init(void) generations[i].num_gc = 0; generations[i].cum_sum_bytes_allocated = 0; /* the tune-able parameters */ - generations[i].bytes_consed_between_gc = 2000000; - generations[i].trigger_age = 1; - generations[i].min_av_mem_age = 0.75; + generations[i].bytes_consed_between_gc + = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION; + generations[i].number_of_gcs_before_promotion = 1; + generations[i].minimum_age_before_gc = 0.75; } /* Initialize gc_alloc. */ @@ -4185,34 +4155,43 @@ static void gencgc_pickup_dynamic(void) { page_index_t page = 0; - long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + void *alloc_ptr = (void *)get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; + bytes_allocated = 0; + do { lispobj *first,*ptr= (lispobj *)page_address(page); - page_table[page].allocated = BOXED_PAGE_FLAG; - page_table[page].gen = gen; - page_table[page].bytes_used = PAGE_BYTES; - page_table[page].large_object = 0; - page_table[page].write_protected = 0; - page_table[page].write_protected_cleared = 0; - page_table[page].dont_move = 0; - page_table[page].need_to_zero = 1; + + if (!gencgc_partial_pickup || page_allocated_p(page)) { + /* It is possible, though rare, for the saved page table + * to contain free pages below alloc_ptr. */ + page_table[page].gen = gen; + page_table[page].bytes_used = GENCGC_CARD_BYTES; + page_table[page].large_object = 0; + page_table[page].write_protected = 0; + page_table[page].write_protected_cleared = 0; + page_table[page].dont_move = 0; + page_table[page].need_to_zero = 1; + + bytes_allocated += GENCGC_CARD_BYTES; + } if (!gencgc_partial_pickup) { + page_table[page].allocated = BOXED_PAGE_FLAG; first=gc_search_space(prev,(ptr+2)-prev,ptr); - if(ptr == first) prev=ptr; - page_table[page].first_object_offset = - (void *)prev - page_address(page); + if(ptr == first) + prev=ptr; + page_table[page].scan_start_offset = + page_address(page) - (void *)prev; } page++; - } while ((long)page_address(page) < alloc_ptr); + } while (page_address(page) < alloc_ptr); last_free_page = page; - generations[gen].bytes_allocated = PAGE_BYTES*page; - bytes_allocated = PAGE_BYTES*page; + generations[gen].bytes_allocated = bytes_allocated; gc_alloc_update_all_page_tables(); write_protect_generation_pages(gen); @@ -4223,8 +4202,6 @@ gc_initialize_pointers(void) { gencgc_pickup_dynamic(); } - - /* alloc(..) is the external interface for memory allocation. It @@ -4239,42 +4216,31 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -char * -alloc(long nbytes) +static inline lispobj * +general_alloc_internal(sword_t nbytes, int page_type_flag, struct alloc_region *region, + struct thread *thread) { - struct thread *thread=arch_os_get_current_thread(); - struct alloc_region *region= -#ifdef LISP_FEATURE_SB_THREAD - thread ? &(thread->alloc_region) : &boxed_region; -#else - &boxed_region; +#ifndef LISP_FEATURE_WIN32 + lispobj alloc_signal; #endif void *new_obj; void *new_free_pointer; + os_vm_size_t trigger_bytes = 0; + gc_assert(nbytes>0); + /* Check for alignment allocation problems. */ - gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) + gc_assert((((uword_t)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); -#if 0 - if(all_threads) - /* there are a few places in the C code that allocate data in the - * heap before Lisp starts. This is before interrupts are enabled, - * so we don't need to check for pseudo-atomic */ -#ifdef LISP_FEATURE_SB_THREAD - if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { - register u32 fs; - fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", - th,th->os_thread); - __asm__("movl %fs,%0" : "=r" (fs) : ); - fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", - debug_get_fs(),th->tls_cookie); - lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); - } -#else - gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); -#endif + +#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)) + /* Must be inside a PA section. */ + gc_assert(get_pseudo_atomic_atomic(thread)); #endif + if (nbytes > large_allocation) + large_allocation = nbytes; + /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; if (new_free_pointer <= region->end_addr) { @@ -4283,11 +4249,19 @@ alloc(long nbytes) return(new_obj); /* yup */ } - /* we have to go the long way around, it seems. Check whether - * we should GC in the near future + /* We don't want to count nbytes against auto_gc_trigger unless we + * have to: it speeds up the tenuring of objects and slows down + * allocation. However, unless we do so when allocating _very_ + * large objects we are in danger of exhausting the heap without + * running sufficient GCs. + */ + if (nbytes >= bytes_consed_between_gcs) + trigger_bytes = nbytes; + + /* we have to go the long way around, it seems. Check whether we + * should GC in the near future */ - if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); + if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) { /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4295,20 +4269,99 @@ alloc(long nbytes) /* set things up so that GC happens when we finish the PA * section */ SetSymbolValue(GC_PENDING,T,thread); - if (SymbolValue(GC_INHIBIT,thread) == NIL) - arch_set_pseudo_atomic_interrupted(0); + if (SymbolValue(GC_INHIBIT,thread) == NIL) { +#ifdef LISP_FEATURE_SB_SAFEPOINT + thread_register_gc_trigger(); +#else + set_pseudo_atomic_interrupted(thread); +#ifdef GENCGC_IS_PRECISE + /* PPC calls alloc() from a trap or from pa_alloc(), + * look up the most context if it's from a trap. */ + { + os_context_t *context = + thread->interrupt_data->allocation_trap_context; + maybe_save_gc_mask_and_block_deferrables + (context ? os_context_sigmask_addr(context) : NULL); + } +#else + maybe_save_gc_mask_and_block_deferrables(NULL); +#endif +#endif + } + } + } + new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); + +#ifndef LISP_FEATURE_WIN32 + /* for sb-prof, and not supported on Windows yet */ + alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); + if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { + if ((sword_t) alloc_signal <= 0) { + SetSymbolValue(ALLOC_SIGNAL, T, thread); + raise(SIGPROF); + } else { + SetSymbolValue(ALLOC_SIGNAL, + alloc_signal - (1 << N_FIXNUM_TAG_BITS), + thread); } } - new_obj = gc_alloc_with_region(nbytes,0,region,0); +#endif + return (new_obj); } + +lispobj * +general_alloc(sword_t nbytes, int page_type_flag) +{ + struct thread *thread = arch_os_get_current_thread(); + /* Select correct region, and call general_alloc_internal with it. + * For other then boxed allocation we must lock first, since the + * region is shared. */ + if (BOXED_PAGE_FLAG & page_type_flag) { +#ifdef LISP_FEATURE_SB_THREAD + struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region); +#else + struct alloc_region *region = &boxed_region; +#endif + return general_alloc_internal(nbytes, page_type_flag, region, thread); + } else if (UNBOXED_PAGE_FLAG == page_type_flag) { + lispobj * obj; + gc_assert(0 == thread_mutex_lock(&allocation_lock)); + obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread); + gc_assert(0 == thread_mutex_unlock(&allocation_lock)); + return obj; + } else { + lose("bad page type flag: %d", page_type_flag); + } +} + +lispobj AMD64_SYSV_ABI * +alloc(long nbytes) +{ +#ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY + struct thread *self = arch_os_get_current_thread(); + int was_pseudo_atomic = get_pseudo_atomic_atomic(self); + if (!was_pseudo_atomic) + set_pseudo_atomic_atomic(self); +#else + gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread())); +#endif + + lispobj *result = general_alloc(nbytes, BOXED_PAGE_FLAG); + +#ifdef LISP_FEATURE_SB_SAFEPOINT_STRICTLY + if (!was_pseudo_atomic) + clear_pseudo_atomic_atomic(self); +#endif + + return result; +} /* * shared support for the OS-dependent signal handlers which * catch GENCGC-related write-protect violations */ - -void unhandled_sigmemoryfault(void); +void unhandled_sigmemoryfault(void* addr); /* Depending on which OS we're running under, different signals might * be raised for a violation of write protection in the heap. This @@ -4318,14 +4371,24 @@ void unhandled_sigmemoryfault(void); * * Return true if this signal is a normal generational GC thing that * we were able to handle, or false if it was abnormal and control - * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */ + * should fall through to the general SIGSEGV/SIGBUS/whatever logic. + * + * We have two control flags for this: one causes us to ignore faults + * on unprotected pages completely, and the second complains to stderr + * but allows us to continue without losing. + */ +extern boolean ignore_memoryfaults_on_unprotected_pages; +boolean ignore_memoryfaults_on_unprotected_pages = 0; + +extern boolean continue_after_memoryfault_on_unprotected_pages; +boolean continue_after_memoryfault_on_unprotected_pages = 0; int gencgc_handle_wp_violation(void* fault_addr) { page_index_t page_index = find_page_index(fault_addr); -#ifdef QSHOW_SIGNALS +#if QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", fault_addr, page_index)); #endif @@ -4335,27 +4398,56 @@ gencgc_handle_wp_violation(void* fault_addr) /* It can be helpful to be able to put a breakpoint on this * case to help diagnose low-level problems. */ - unhandled_sigmemoryfault(); + unhandled_sigmemoryfault(fault_addr); /* not within the dynamic space -- not our responsibility */ return 0; } else { + int ret; + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (page_table[page_index].write_protected) { /* Unprotect the page. */ - os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); + os_protect(page_address(page_index), GENCGC_CARD_BYTES, OS_VM_PROT_ALL); page_table[page_index].write_protected_cleared = 1; page_table[page_index].write_protected = 0; - } else { + } else if (!ignore_memoryfaults_on_unprotected_pages) { /* The only acceptable reason for this signal on a heap * access is that GENCGC write-protected the page. * However, if two CPUs hit a wp page near-simultaneously, * we had better not have the second one lose here if it * does this test after the first one has already set wp=0 */ - if(page_table[page_index].write_protected_cleared != 1) - lose("fault in heap page not marked as write-protected\n"); + if(page_table[page_index].write_protected_cleared != 1) { + void lisp_backtrace(int frames); + lisp_backtrace(10); + fprintf(stderr, + "Fault @ %p, page %"PAGE_INDEX_FMT" not marked as write-protected:\n" + " boxed_region.first_page: %"PAGE_INDEX_FMT"," + " boxed_region.last_page %"PAGE_INDEX_FMT"\n" + " page.scan_start_offset: %"OS_VM_SIZE_FMT"\n" + " page.bytes_used: %"PAGE_BYTES_FMT"\n" + " page.allocated: %d\n" + " page.write_protected: %d\n" + " page.write_protected_cleared: %d\n" + " page.generation: %d\n", + fault_addr, + page_index, + boxed_region.first_page, + boxed_region.last_page, + page_table[page_index].scan_start_offset, + page_table[page_index].bytes_used, + page_table[page_index].allocated, + page_table[page_index].write_protected, + page_table[page_index].write_protected_cleared, + page_table[page_index].gen); + if (!continue_after_memoryfault_on_unprotected_pages) + lose("Feh.\n"); + } } + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); /* Don't worry, we can handle it. */ return 1; } @@ -4365,17 +4457,21 @@ gencgc_handle_wp_violation(void* fault_addr) * are about to let Lisp deal with it. It's basically just a * convenient place to set a gdb breakpoint. */ void -unhandled_sigmemoryfault() +unhandled_sigmemoryfault(void *addr) {} void gc_alloc_update_all_page_tables(void) { /* Flush the alloc regions updating the tables. */ struct thread *th; - for_each_thread(th) - gc_alloc_update_page_tables(0, &th->alloc_region); - gc_alloc_update_page_tables(1, &unboxed_region); - gc_alloc_update_page_tables(0, &boxed_region); + for_each_thread(th) { + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region); +#if defined(LISP_FEATURE_SB_SAFEPOINT_STRICTLY) && !defined(LISP_FEATURE_WIN32) + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->sprof_alloc_region); +#endif + } + gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region); + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region); } void @@ -4394,10 +4490,10 @@ zero_all_free_pages() page_index_t i; for (i = 0; i < last_free_page; i++) { - if (page_table[i].allocated == FREE_PAGE_FLAG) { + if (page_free_p(i)) { #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(i), - PAGE_BYTES, + GENCGC_CARD_BYTES, OS_VM_PROT_ALL); #endif zero_pages(i, i); @@ -4434,13 +4530,19 @@ prepare_for_final_gc () * function being set to the value of the static symbol * SB!VM:RESTART-LISP-FUNCTION */ void -gc_and_save(char *filename) +gc_and_save(char *filename, boolean prepend_runtime, + boolean save_runtime_options, boolean compressed, + int compression_level, int application_type) { - FILE *file = open_core_for_saving(filename); - if (!file) { - perror(filename); - return; - } + FILE *file; + void *runtime_bytes = NULL; + size_t runtime_size; + + file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, + &runtime_size); + if (file == NULL) + return; + conservative_stack = 0; /* The filename might come from Lisp, and be moved by the now @@ -4459,9 +4561,15 @@ gc_and_save(char *filename) gencgc_alloc_start_page = -1; collect_garbage(HIGHEST_NORMAL_GENERATION+1); + if (prepend_runtime) + save_runtime_to_filehandle(file, runtime_bytes, runtime_size, + application_type); + /* The dumper doesn't know that pages need to be zeroed before use. */ zero_all_free_pages(); - save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0)); + save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0), + prepend_runtime, save_runtime_options, + compressed ? compression_level : COMPRESSION_LEVEL_NONE); /* Oops. Save still managed to fail. Since we've mangled the stack * beyond hope, there's not much we can do. * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's