X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=3d144879bbcb8de9eb54c3d0f5610f716ac973e9;hb=dcf5978d9d33098e868ae6eea28e1b310038c03d;hp=aec65cd83de703f311dc9607e6956535a4744f89;hpb=2db3b6b4cb740d5b6512459c223859f747807b09;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index aec65cd..3d14487 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -38,14 +38,26 @@ #include "arch.h" #include "gc.h" #include "gc-internal.h" +#include "thread.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" #include "genesis/simple-fun.h" -#include "genesis/static-symbols.h" -#include "genesis/symbol.h" + +#ifdef LISP_FEATURE_SB_THREAD +#include +#include /* threading is presently linux-only */ +#endif + /* assembly language stub that executes trap_PendingInterrupt */ void do_pending_interrupt(void); +/* forward declarations */ +int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region); +void gc_set_region_empty(struct alloc_region *region); +void gc_alloc_update_all_page_tables(void); +static void gencgc_pickup_dynamic(void); +boolean interrupt_maybe_gc_int(int, siginfo_t *, void *); + /* * GC parameters @@ -75,7 +87,9 @@ boolean gencgc_unmap_zero = 1; #endif /* the minimum size (in bytes) for a large object*/ -unsigned large_object_size = 4 * 4096; +/* FIXME: Should this really be PAGE_BYTES? */ +unsigned large_object_size = 4 * PAGE_BYTES; + /* * debugging @@ -124,7 +138,8 @@ boolean gencgc_zero_check_during_free_heap = 0; /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ unsigned long bytes_allocated = 0; -static unsigned long auto_gc_trigger = 0; +extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ +unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ @@ -132,12 +147,6 @@ int from_space; int new_space; -/* FIXME: It would be nice to use this symbolic constant instead of - * bare 4096 almost everywhere. We could also use an assertion that - * it's equal to getpagesize(). */ - -#define PAGE_BYTES 4096 - /* An array of page structures is statically allocated. * This helps quickly map between an address its page structure. * NUM_PAGES is set from the size of the dynamic space. */ @@ -152,7 +161,7 @@ static void *heap_base = NULL; inline void * page_address(int page_num) { - return (heap_base + (page_num * 4096)); + return (heap_base + (page_num * PAGE_BYTES)); } /* Find the page index within the page_table for the given @@ -163,7 +172,7 @@ find_page_index(void *addr) int index = addr-heap_base; if (index >= 0) { - index = ((unsigned int)index)/4096; + index = ((unsigned int)index)/PAGE_BYTES; if (index < NUM_PAGES) return (index); } @@ -247,6 +256,16 @@ unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1; * integrated with the Lisp code. */ static int last_free_page; +/* This lock is to prevent multiple threads from simultaneously + * allocating new regions which overlap each other. Note that the + * majority of GC is single-threaded, but alloc() may be called from + * >1 thread at a time and must be thread-safe. This lock must be + * seized before all accesses to generations[] or to parts of + * page_table[] that other threads may want to see */ + +static lispobj free_pages_lock=0; + + /* * miscellaneous heap functions */ @@ -321,6 +340,8 @@ gen_av_mem_age(int gen) / ((double)generations[gen].bytes_allocated); } +void fpu_save(int *); /* defined in x86-assem.S */ +void fpu_restore(int *); /* defined in x86-assem.S */ /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ static void @@ -379,7 +400,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*4096 + (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), @@ -490,7 +511,7 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - + get_spinlock(&free_pages_lock,(int) alloc_region); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -499,8 +520,8 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) generations[gc_alloc_generation].alloc_start_page; } last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region); - bytes_found=(4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); + bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -510,20 +531,6 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) alloc_region->free_pointer = alloc_region->start_addr; alloc_region->end_addr = alloc_region->start_addr + bytes_found; - if (gencgc_zero_check) { - int *p; - for (p = (int *)alloc_region->start_addr; - p < (int *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.", p); - } - } - } - /* Set up the pages. */ /* The first page may have already been in use. */ @@ -559,15 +566,32 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) alloc_region->start_addr - page_address(i); page_table[i].allocated |= OPEN_REGION_PAGE ; } - /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), + 0); + } + release_spinlock(&free_pages_lock); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + int *p; + for (p = (int *)alloc_region->start_addr; + p < (int *)alloc_region->end_addr; p++) { + if (*p != 0) { + /* KLUDGE: It would be nice to use %lx and explicit casts + * (long) in code like this, so that it is less likely to + * break randomly when running on a machine with different + * word sizes. -- WHN 19991129 */ + lose("The new region at %x is not zero.", p); + } } } +} + /* If the record_new_objects flag is 2 then all new regions created * are recorded. * @@ -619,13 +643,13 @@ add_new_area(int first_page, int offset, int size) gc_abort(); } - new_area_start = 4096*first_page + offset; + new_area_start = PAGE_BYTES*first_page + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { unsigned area_end = - 4096*((*new_areas)[i].page) + PAGE_BYTES*((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -659,7 +683,7 @@ add_new_area(int first_page, int offset, int size) max_new_areas = new_areas_index; } -/* Update the tables for the alloc_region. The region maybe added to +/* Update the tables for the alloc_region. The region may be added to * the new_areas. * * When done the alloc_region is set up so that the next quick alloc @@ -691,8 +715,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - /* Skip if no bytes were allocated. */ + get_spinlock(&free_pages_lock,(int) alloc_region); if (alloc_region->free_pointer != alloc_region->start_addr) { + /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); @@ -719,8 +744,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) { - bytes_used = 4096; + if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -746,8 +771,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. */ more = 0; if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>4096) { - bytes_used = 4096; + - page_address(next_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -794,7 +819,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE; next_page++; } - + release_spinlock(&free_pages_lock); + /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -836,6 +862,8 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) index ahead of the current region and bumped up here to save a lot of re-scanning. */ + get_spinlock(&free_pages_lock,(int) alloc_region); + if (unboxed) { first_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page; @@ -882,8 +910,8 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) /* Calc. the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) { - bytes_used = 4096; + if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -905,12 +933,12 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) page_table[next_page].large_object = large; page_table[next_page].first_object_offset = - orig_first_page_bytes_used - 4096*(next_page-first_page); + orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) { - bytes_used = 4096; + if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -932,8 +960,9 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); } + release_spinlock(&free_pages_lock); return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } @@ -951,6 +980,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct all int num_pages; int large = !alloc_region && (nbytes >= large_object_size); + gc_assert(free_pages_lock); /* Search for a contiguous free space of at least nbytes. If it's a large object then align it on a page boundary by searching for a free page. */ @@ -979,7 +1009,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct all (page_table[first_page].large_object == 0) && (gc_alloc_generation == 0) && (page_table[first_page].gen == gc_alloc_generation) && - (page_table[first_page].bytes_used < (4096-32)) && + (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && (page_table[first_page].write_protected == 0) && (page_table[first_page].dont_move == 0)) break; @@ -997,7 +1027,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct all gc_assert(page_table[first_page].write_protected == 0); last_page = first_page; - bytes_found = 4096 - page_table[first_page].bytes_used; + bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; num_pages = 1; while (((bytes_found < nbytes) || (alloc_region && (num_pages < 2))) @@ -1005,12 +1035,12 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct all && (page_table[last_page+1].allocated == FREE_PAGE)) { last_page++; num_pages++; - bytes_found += 4096; + bytes_found += PAGE_BYTES; gc_assert(page_table[last_page].write_protected == 0); } - region_size = (4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); + region_size = (PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); gc_assert(bytes_found == region_size); restart_page = last_page + 1; @@ -1224,23 +1254,23 @@ copy_large_object(lispobj object, int nwords) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); gc_assert(page_table[next_page].allocated == BOXED_PAGE); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[next_page].write_protected) { - os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL); + os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL); page_table[next_page].write_protected = 0; } - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1261,12 +1291,12 @@ copy_large_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && (page_table[next_page].allocated == BOXED_PAGE) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* Checks out OK, free the page. Don't need to bother zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected as they @@ -1389,18 +1419,18 @@ copy_large_unboxed_object(lispobj object, int nwords) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); gc_assert((page_table[next_page].allocated == UNBOXED_PAGE) || (page_table[next_page].allocated == BOXED_PAGE)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; page_table[next_page].allocated = UNBOXED_PAGE; - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1421,13 +1451,13 @@ copy_large_unboxed_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && ((page_table[next_page].allocated == UNBOXED_PAGE) || (page_table[next_page].allocated == BOXED_PAGE)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected, even if @@ -2084,21 +2114,21 @@ search_space(lispobj *start, size_t words, lispobj *pointer) return (NULL); } -static lispobj* +lispobj* search_read_only_space(lispobj *pointer) { lispobj* start = (lispobj*)READ_ONLY_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER); + lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0); if ((pointer < start) || (pointer >= end)) return NULL; return (search_space(start, (pointer+2)-start, pointer)); } -static lispobj * +lispobj * search_static_space(lispobj *pointer) { lispobj* start = (lispobj*)STATIC_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER); + lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0); if ((pointer < start) || (pointer >= end)) return NULL; return (search_space(start, (pointer+2)-start, pointer)); @@ -2122,7 +2152,8 @@ search_dynamic_space(lispobj *pointer) /* Is there any possibility that pointer is a valid Lisp object * reference, and/or something else (e.g. subroutine call return - * address) which should prevent us from moving the referred-to thing? */ + * address) which should prevent us from moving the referred-to thing? + * This is called from preserve_pointers() */ static int possibly_valid_dynamic_space_pointer(lispobj *pointer) { @@ -2149,21 +2180,7 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) /* Check that the object pointed to is consistent with the pointer * low tag. - * - * FIXME: It's not safe to rely on the result from this check - * before an object is initialized. Thus, if we were interrupted - * just as an object had been allocated but not initialized, the - * GC relying on this result could bogusly reclaim the memory. - * However, we can't really afford to do without this check. So - * we should make it safe somehow. - * (1) Perhaps just review the code to make sure - * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such - * thing is wrapped around critical sections where allocated - * memory type bits haven't been set. - * (2) Perhaps find some other hack to protect against this, e.g. - * recording the result of the last call to allocate-lisp-memory, - * and returning true from this function when *pointer is - * a reference to that result. */ + */ switch (lowtag_of((lispobj)pointer)) { case FUN_POINTER_LOWTAG: /* Start_addr should be the enclosing code object, or a closure @@ -2290,7 +2307,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) case COMPLEX_LONG_FLOAT_WIDETAG: #endif case SIMPLE_ARRAY_WIDETAG: - case COMPLEX_STRING_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: + case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: case COMPLEX_ARRAY_WIDETAG: @@ -2304,13 +2322,17 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #ifdef LONG_FLOAT_WIDETAG case LONG_FLOAT_WIDETAG: #endif - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -2388,13 +2410,17 @@ maybe_adjust_large_object(lispobj *where) boxed = BOXED_PAGE; break; case BIGNUM_WIDETAG: - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -2444,21 +2470,21 @@ maybe_adjust_large_object(lispobj *where) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); gc_assert((page_table[next_page].allocated == BOXED_PAGE) || (page_table[next_page].allocated == UNBOXED_PAGE)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset == - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].allocated = boxed; /* Shouldn't be write-protected at this stage. Essential that the * pages aren't. */ gc_assert(!page_table[next_page].write_protected); - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -2480,13 +2506,13 @@ maybe_adjust_large_object(lispobj *where) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && ((page_table[next_page].allocated == UNBOXED_PAGE) || (page_table[next_page].allocated == BOXED_PAGE)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -2550,17 +2576,15 @@ preserve_pointer(void *addr) /* quick check 2: Check the offset within the page. * - * FIXME: The mask should have a symbolic name, and ideally should - * be derived from page size instead of hardwired to 0xfff. - * (Also fix other uses of 0xfff, elsewhere.) */ - if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used) + */ + if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object * (or, as a special case which also requires dont_move, a return * address referring to something in a CodeObject). This is * expensive but important, since it vastly reduces the - * probability that random garbage will be bogusly interpreter as + * probability that random garbage will be bogusly interpreted as * a pointer which prevents a page from moving. */ if (!(possibly_valid_dynamic_space_pointer(addr))) return; @@ -2579,7 +2603,7 @@ preserve_pointer(void *addr) while (page_table[first_page].first_object_offset != 0) { --first_page; /* Do some checks. */ - gc_assert(page_table[first_page].bytes_used == 4096); + gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); gc_assert(page_table[first_page].gen == from_space); gc_assert(page_table[first_page].allocated == region_allocation); } @@ -2595,7 +2619,7 @@ preserve_pointer(void *addr) if ((page_table[addr_page_index].allocated == FREE_PAGE) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ - || (((unsigned)addr & 0xfff) + || (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)) { FSHOW((stderr, "weird? ignore ptr 0x%x to freed area of large object\n", @@ -2629,8 +2653,8 @@ preserve_pointer(void *addr) gc_assert(!page_table[i].write_protected); /* Check whether this is the last page in this contiguous block.. */ - if ((page_table[i].bytes_used < 4096) - /* ..or it is 4096 and is the last in the block */ + if ((page_table[i].bytes_used < PAGE_BYTES) + /* ..or it is PAGE_BYTES and is the last in the block */ || (page_table[i+1].allocated == FREE_PAGE) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ @@ -2668,8 +2692,9 @@ update_page_write_prot(int page) gc_assert(page_table[page].allocated != FREE_PAGE); gc_assert(page_table[page].bytes_used != 0); - /* Skip if it's already write-protected or an unboxed page. */ + /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected + || page_table[page].dont_move || (page_table[page].allocated & UNBOXED_PAGE)) return (0); @@ -2703,7 +2728,7 @@ update_page_write_prot(int page) /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/ os_protect((void *)page_addr, - 4096, + PAGE_BYTES, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -2775,8 +2800,8 @@ scavenge_generation(int generation) for (last_page = i; ; last_page++) /* Check whether this is the last page in this contiguous * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ || (!(page_table[last_page+1].allocated & BOXED_PAGE)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) @@ -2797,7 +2822,7 @@ scavenge_generation(int generation) #endif { scavenge(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); + + (last_page-i)*PAGE_BYTES)/4); /* Now scan the pages and write protect those * that don't have pointers to younger @@ -2897,8 +2922,8 @@ scavenge_newspace_generation_one_scan(int generation) for (last_page = i; ;last_page++) { /* Check whether this is the last page in this * contiguous block */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ || (!(page_table[last_page+1].allocated & BOXED_PAGE)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) @@ -2927,7 +2952,7 @@ scavenge_newspace_generation_one_scan(int generation) - page_table[i].first_object_offset)/4; else size = (page_table[last_page].bytes_used - + (last_page-i)*4096 + + (last_page-i)*PAGE_BYTES - page_table[i].first_object_offset)/4; { @@ -3094,7 +3119,7 @@ unprotect_oldspace(void) /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[i].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[i].write_protected = 0; } } @@ -3142,7 +3167,7 @@ free_oldspace(void) void *page_start = (void *)page_address(last_page); if (page_table[last_page].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[last_page].write_protected = 0; } } @@ -3163,8 +3188,8 @@ free_oldspace(void) page_start = (void *)page_address(first_page); - os_invalidate(page_start, 4096*(last_page-first_page)); - addr = os_validate(page_start, 4096*(last_page-first_page)); + os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); + addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); if (addr == NULL || addr != page_start) { /* Is this an error condition? I couldn't really tell from * the old CMU CL code, which fprintf'ed a message with @@ -3182,7 +3207,7 @@ free_oldspace(void) int *page_start; page_start = (int *)page_address(first_page); - i586_bzero(page_start, 4096*(last_page-first_page)); + i586_bzero(page_start, PAGE_BYTES*(last_page-first_page)); } first_page = last_page; @@ -3231,7 +3256,7 @@ verify_space(lispobj *start, size_t words) int is_in_dynamic_space = (find_page_index((void*)start) != -1); int is_in_readonly_space = (READ_ONLY_SPACE_START <= (unsigned)start && - (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); + (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); while (words > 0) { size_t count = 1; @@ -3241,10 +3266,10 @@ verify_space(lispobj *start, size_t words) int page_index = find_page_index((void*)thing); int to_readonly_space = (READ_ONLY_SPACE_START <= thing && - thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); + thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); int to_static_space = (STATIC_SPACE_START <= thing && - thing < SymbolValue(STATIC_SPACE_FREE_POINTER)); + thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0)); /* Does it point to the dynamic space? */ if (page_index != -1) { @@ -3295,7 +3320,8 @@ verify_space(lispobj *start, size_t words) case RATIO_WIDETAG: case COMPLEX_WIDETAG: case SIMPLE_ARRAY_WIDETAG: - case COMPLEX_STRING_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: + case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: case COMPLEX_ARRAY_WIDETAG: @@ -3380,13 +3406,17 @@ verify_space(lispobj *start, size_t words) #ifdef COMPLEX_LONG_FLOAT_WIDETAG case COMPLEX_LONG_FLOAT_WIDETAG: #endif - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -3439,18 +3469,20 @@ verify_gc(void) * to grep for all foo_size and rename the appropriate ones to * foo_count. */ int read_only_space_size = - (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) + (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0) - (lispobj*)READ_ONLY_SPACE_START; int static_space_size = - (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER) + (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj*)STATIC_SPACE_START; + struct thread *th; + for_each_thread(th) { int binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER) - - (lispobj*)BINDING_STACK_START; - + (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + - (lispobj*)th->binding_stack_start; + verify_space(th->binding_stack_start, binding_stack_size); + } verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size); verify_space((lispobj*)STATIC_SPACE_START , static_space_size); - verify_space((lispobj*)BINDING_STACK_START , binding_stack_size); } static void @@ -3476,8 +3508,8 @@ verify_generation(int generation) for (last_page = i; ;last_page++) /* Check whether this is the last page in this contiguous * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ || (page_table[last_page+1].allocated != region_allocation) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) @@ -3485,7 +3517,7 @@ verify_generation(int generation) break; verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); + + (last_page-i)*PAGE_BYTES)/4); i = last_page; } } @@ -3509,7 +3541,7 @@ verify_zero_fill(void) } } } else { - int free_bytes = 4096 - page_table[page].bytes_used; + int free_bytes = PAGE_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { int *start_addr = (int *)((unsigned)page_address(page) + page_table[page].bytes_used); @@ -3558,13 +3590,14 @@ write_protect_generation_pages(int generation) for (i = 0; i < last_free_page; i++) if ((page_table[i].allocated == BOXED_PAGE) && (page_table[i].bytes_used != 0) + && !page_table[i].dont_move && (page_table[i].gen == generation)) { void *page_start; page_start = (void *)page_address(i); os_protect(page_start, - 4096, + PAGE_BYTES, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -3588,7 +3621,7 @@ garbage_collect_generation(int generation, int raise) unsigned long bytes_freed; unsigned long i; unsigned long static_space_size; - + struct thread *th; gc_assert(generation <= (NUM_GENERATIONS-1)); /* The oldest generation can't be raised. */ @@ -3621,7 +3654,8 @@ garbage_collect_generation(int generation, int raise) /* Before any pointers are preserved, the dont_move flags on the * pages need to be cleared. */ for (i = 0; i < last_free_page; i++) - page_table[i].dont_move = 0; + if(page_table[i].gen==from_space) + page_table[i].dont_move = 0; /* Un-write-protect the old-space pages. This is essential for the * promoted pages as they may contain pointers into the old-space @@ -3630,12 +3664,46 @@ garbage_collect_generation(int generation, int raise) * be un-protected anyway before unmapping later. */ unprotect_oldspace(); - /* Scavenge the stack's conservative roots. */ - { + /* Scavenge the stacks' conservative roots. */ + + /* there are potentially two stacks for each thread: the main + * stack, which may contain Lisp pointers, and the alternate stack. + * We don't ever run Lisp code on the altstack, but it may + * host a sigcontext with lisp objects in it */ + + /* what we need to do: (1) find the stack pointer for the main + * stack; scavenge it (2) find the interrupt context on the + * alternate stack that might contain lisp values, and scavenge + * that */ + + /* we assume that none of the preceding applies to the thread that + * initiates GC. If you ever call GC from inside an altstack + * handler, you will lose. */ + for_each_thread(th) { void **ptr; - for (ptr = (void **)CONTROL_STACK_END - 1; - ptr > (void **)&raise; - ptr--) { + void **esp=(void **)-1; + int i,free; +#ifdef LISP_FEATURE_SB_THREAD + if(th==arch_os_get_current_thread()) { + esp = (void **) &raise; + } else { + void **esp1; + free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + for(i=free-1;i>=0;i--) { + os_context_t *c=th->interrupt_contexts[i]; + esp1 = (void **) *os_context_register_addr(c,reg_ESP); + if(esp1>=th->control_stack_start&& esp1control_stack_end){ + if(esp1=(void **)c; ptr--) { + preserve_pointer(*ptr); + } + } + } + } +#else + esp = (void **) &raise; +#endif + for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { preserve_pointer(*ptr); } } @@ -3646,9 +3714,7 @@ garbage_collect_generation(int generation, int raise) fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - /* FIXME: 4096 should be symbolic constant here and - * prob'ly elsewhere too. */ - num_dont_move_pages * 4096); + num_dont_move_pages * PAGE_BYTES); } #endif @@ -3656,18 +3722,31 @@ garbage_collect_generation(int generation, int raise) /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ + for_each_thread(th) { + struct interrupt_data *data=th->interrupt_data; for (i = 0; i < NSIG; i++) { - union interrupt_handler handler = interrupt_handlers[i]; + union interrupt_handler handler = data->interrupt_handlers[i]; if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { - scavenge((lispobj *)(interrupt_handlers + i), 1); + scavenge((lispobj *)(data->interrupt_handlers + i), 1); + } + } + } + /* Scavenge the binding stacks. */ + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); +#ifdef LISP_FEATURE_SB_THREAD + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); +#endif } } - - /* Scavenge the binding stack. */ - scavenge((lispobj *) BINDING_STACK_START, - (lispobj *)SymbolValue(BINDING_STACK_POINTER) - - (lispobj *)BINDING_STACK_START); /* The original CMU CL code had scavenge-read-only-space code * controlled by the Lisp-level variable @@ -3690,7 +3769,7 @@ garbage_collect_generation(int generation, int raise) /* Scavenge static space. */ static_space_size = - (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) - + (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj *)STATIC_SPACE_START; if (gencgc_verbose > 1) { FSHOW((stderr, @@ -3801,7 +3880,7 @@ update_x86_dynamic_space_free_pointer(void) last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); return 0; /* dummy value: return something ... */ } @@ -3923,7 +4002,10 @@ collect_garbage(unsigned last_gen) gc_alloc_generation = 0; update_x86_dynamic_space_free_pointer(); - + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; + if(gencgc_verbose) + fprintf(stderr,"Next gc when %ld bytes have been consed\n", + auto_gc_trigger); SHOW("returning from collect_garbage"); } @@ -3957,11 +4039,11 @@ gc_free_heap(void) page_start = (void *)page_address(page); /* First, remove any write-protection. */ - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[page].write_protected = 0; - os_invalidate(page_start,4096); - addr = os_validate(page_start,4096); + os_invalidate(page_start,PAGE_BYTES); + addr = os_validate(page_start,PAGE_BYTES); if (addr == NULL || addr != page_start) { lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x", page_start, @@ -4005,7 +4087,7 @@ gc_free_heap(void) gc_set_region_empty(&unboxed_region); last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base)); + SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ @@ -4076,22 +4158,22 @@ gencgc_pickup_dynamic(void) { int page = 0; int addr = DYNAMIC_SPACE_START; - int alloc_ptr = SymbolValue(ALLOCATION_POINTER); + int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); /* Initialize the first region. */ do { page_table[page].allocated = BOXED_PAGE; page_table[page].gen = 0; - page_table[page].bytes_used = 4096; + page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; page_table[page].first_object_offset = (void *)DYNAMIC_SPACE_START - page_address(page); - addr += 4096; + addr += PAGE_BYTES; page++; } while (addr < alloc_ptr); - generations[0].bytes_allocated = 4096*page; - bytes_allocated = 4096*page; + generations[0].bytes_allocated = PAGE_BYTES*page; + bytes_allocated = PAGE_BYTES*page; } @@ -4104,7 +4186,6 @@ gc_initialize_pointers(void) -extern boolean maybe_gc_pending ; /* alloc(..) is the external interface for memory allocation. It * allocates to generation 0. It is not called from within the garbage * collector as it is only external uses that need the check for heap @@ -4120,19 +4201,33 @@ extern boolean maybe_gc_pending ; char * alloc(int nbytes) { - struct alloc_region *region= &boxed_region; + struct thread *th=arch_os_get_current_thread(); + struct alloc_region *region= + th ? &(th->alloc_region) : &boxed_region; void *new_obj; void *new_free_pointer; /* Check for alignment allocation problems. */ gc_assert((((unsigned)region->free_pointer & 0x7) == 0) && ((nbytes & 0x7) == 0)); - /* At this point we should either be in pseudo-atomic, or early - * enough in cold initn that interrupts are not yet enabled anyway. - * It would be nice to assert same. - */ - gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC)); - + if(all_threads) + /* there are a few places in the C code that allocate data in the + * heap before Lisp starts. This is before interrupts are enabled, + * so we don't need to check for pseudo-atomic */ +#ifdef LISP_FEATURE_SB_THREAD + if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { + register u32 fs; + fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n", + th,getpid()); + __asm__("movl %fs,%0" : "=r" (fs) : ); + fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n", + debug_get_fs(),th->tls_cookie); + lose("If you see this message before 2003.12.01, mail details to sbcl-devel\n"); + } +#else + gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); +#endif + /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; if (new_free_pointer <= region->end_addr) { @@ -4145,33 +4240,20 @@ alloc(int nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - auto_gc_trigger *= 2; /* set things up so that GC happens when we finish the PA - * section. */ - maybe_gc_pending=1; - SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1)); + * section. We only do this if there wasn't a pending handler + * already, in case it was a gc. If it wasn't a GC, the next + * allocation will get us back to this point anyway, so no harm done + */ + struct interrupt_data *data=th->interrupt_data; + if(!data->pending_handler) + maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0); } new_obj = gc_alloc_with_region(nbytes,0,region,0); return (new_obj); } -/* - * noise to manipulate the gc trigger stuff - */ - -void -set_auto_gc_trigger(os_vm_size_t dynamic_usage) -{ - auto_gc_trigger += dynamic_usage; -} - -void -clear_auto_gc_trigger(void) -{ - auto_gc_trigger = 0; -} - /* Find the code object for the given pc, or return NULL on failure. * * FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */ @@ -4232,23 +4314,25 @@ gencgc_handle_wp_violation(void* fault_addr) return 0; } else { - - /* The only acceptable reason for an signal like this from the - * heap is that the generational GC write-protected the page. */ - if (page_table[page_index].write_protected != 1) { - lose("access failure in heap page not marked as write-protected"); + if (page_table[page_index].write_protected) { + /* Unprotect the page. */ + os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); + page_table[page_index].write_protected_cleared = 1; + page_table[page_index].write_protected = 0; + } else { + /* The only acceptable reason for this signal on a heap + * access is that GENCGC write-protected the page. + * However, if two CPUs hit a wp page near-simultaneously, + * we had better not have the second one lose here if it + * does this test after the first one has already set wp=0 + */ + if(page_table[page_index].write_protected_cleared != 1) + lose("fault in heap page not marked as write-protected"); } - - /* Unprotect the page. */ - os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL); - page_table[page_index].write_protected = 0; - page_table[page_index].write_protected_cleared = 1; - /* Don't worry, we can handle it. */ return 1; } } - /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that * it's not just a case of the program hitting the write barrier, and * are about to let Lisp deal with it. It's basically just a @@ -4257,9 +4341,12 @@ void unhandled_sigmemoryfault() {} -gc_alloc_update_all_page_tables(void) +void gc_alloc_update_all_page_tables(void) { /* Flush the alloc regions updating the tables. */ + struct thread *th; + for_each_thread(th) + gc_alloc_update_page_tables(0, &th->alloc_region); gc_alloc_update_page_tables(1, &unboxed_region); gc_alloc_update_page_tables(0, &boxed_region); }