X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=2b5600984c798dc10759625d186fbfa51180706b;hb=a6bda328d1a33a5ad328ec97bed83d5c49c530e0;hp=6e394e6d319403530ee76b81a5e649466932dd48;hpb=904d96b38f7035ff93ff56588e72b65b189800c8;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 6e394e6..2b56009 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -26,6 +26,8 @@ #include #include +#include +#include #include "runtime.h" #include "sbcl.h" #include "os.h" @@ -37,10 +39,21 @@ #include "arch.h" #include "gc.h" #include "gc-internal.h" +#include "thread.h" +#include "genesis/vector.h" +#include "genesis/weak-pointer.h" +#include "genesis/simple-fun.h" /* assembly language stub that executes trap_PendingInterrupt */ void do_pending_interrupt(void); +/* forward declarations */ +int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed); +void gc_set_region_empty(struct alloc_region *region); +void gc_alloc_update_all_page_tables(void); +static void gencgc_pickup_dynamic(void); +boolean interrupt_maybe_gc_int(int, siginfo_t *, void *); + /* * GC parameters @@ -70,7 +83,8 @@ boolean gencgc_unmap_zero = 1; #endif /* the minimum size (in bytes) for a large object*/ -unsigned large_object_size = 4 * 4096; +unsigned large_object_size = 4 * PAGE_BYTES; + /* * debugging @@ -119,7 +133,8 @@ boolean gencgc_zero_check_during_free_heap = 0; /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ unsigned long bytes_allocated = 0; -static unsigned long auto_gc_trigger = 0; +extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ +unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ @@ -127,11 +142,6 @@ int from_space; int new_space; -/* FIXME: It would be nice to use this symbolic constant instead of - * bare 4096 almost everywhere. We could also use an assertion that - * it's equal to getpagesize(). */ -#define PAGE_BYTES 4096 - /* An array of page structures is statically allocated. * This helps quickly map between an address its page structure. * NUM_PAGES is set from the size of the dynamic space. */ @@ -146,7 +156,7 @@ static void *heap_base = NULL; inline void * page_address(int page_num) { - return (heap_base + (page_num * 4096)); + return (heap_base + (page_num * PAGE_BYTES)); } /* Find the page index within the page_table for the given @@ -157,7 +167,7 @@ find_page_index(void *addr) int index = addr-heap_base; if (index >= 0) { - index = ((unsigned int)index)/4096; + index = ((unsigned int)index)/PAGE_BYTES; if (index < NUM_PAGES) return (index); } @@ -240,7 +250,16 @@ unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1; * search of the heap. XX Gencgc obviously needs to be better * integrated with the Lisp code. */ static int last_free_page; -static int last_used_page = 0; + +/* This lock is to prevent multiple threads from simultaneously + * allocating new regions which overlap each other. Note that the + * majority of GC is single-threaded, but alloc() may be called from + * >1 thread at a time and must be thread-safe. This lock must be + * seized before all accesses to generations[] or to parts of + * page_table[] that other threads may want to see */ + +static lispobj free_pages_lock=0; + /* * miscellaneous heap functions @@ -255,7 +274,7 @@ count_write_protect_generation_pages(int generation) int count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -316,6 +335,8 @@ gen_av_mem_age(int gen) / ((double)generations[gen].bytes_allocated); } +void fpu_save(int *); /* defined in x86-assem.S */ +void fpu_restore(int *); /* defined in x86-assem.S */ /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ static void @@ -336,7 +357,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Print the heap stats. */ fprintf(stderr, - " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n"); + " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); for (i = 0; i < gens; i++) { int j; @@ -344,22 +365,23 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ int unboxed_cnt = 0; int large_boxed_cnt = 0; int large_unboxed_cnt = 0; + int pinned_cnt=0; for (j = 0; j < last_free_page; j++) if (page_table[j].gen == i) { /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated == BOXED_PAGE) { + if (page_table[j].allocated & BOXED_PAGE_FLAG) { if (page_table[j].large_object) large_boxed_cnt++; else boxed_cnt++; } - + if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated == UNBOXED_PAGE) { + if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -370,11 +392,12 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ gc_assert(generations[i].bytes_allocated == count_generation_bytes_allocated(i)); fprintf(stderr, - " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n", + " %1d: %5d %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n", i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, + pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*4096 + (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), @@ -441,10 +464,6 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ struct alloc_region boxed_region; struct alloc_region unboxed_region; -/* XX hack. Current Lisp code uses the following. Need copying in/out. */ -void *current_region_free_pointer; -void *current_region_end_addr; - /* The generation currently being allocated to. */ static int gc_alloc_generation; @@ -476,10 +495,7 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) { int first_page; int last_page; - int region_size; - int restart_page; int bytes_found; - int num_pages; int i; /* @@ -492,102 +508,17 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - + get_spinlock(&free_pages_lock,(int) alloc_region); if (unboxed) { - restart_page = + first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; } else { - restart_page = + first_page = generations[gc_alloc_generation].alloc_start_page; } - - /* Search for a contiguous free region of at least nbytes with the - * given properties: boxed/unboxed, generation. */ - do { - first_page = restart_page; - - /* First search for a page with at least 32 bytes free, which is - * not write-protected, and which is not marked dont_move. - * - * FIXME: This looks extremely similar, perhaps identical, to - * code in gc_alloc_large(). It should be shared somehow. */ - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE) /* not free page */ - && ((unboxed && - (page_table[first_page].allocated != UNBOXED_PAGE)) - || (!unboxed && - (page_table[first_page].allocated != BOXED_PAGE)) - || (page_table[first_page].large_object != 0) - || (page_table[first_page].gen != gc_alloc_generation) - || (page_table[first_page].bytes_used >= (4096-32)) - || (page_table[first_page].write_protected != 0) - || (page_table[first_page].dont_move != 0))) - first_page++; - /* Check for a failure. */ - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - gc_assert(page_table[first_page].write_protected == 0); - - /* - FSHOW((stderr, - "/first_page=%d bytes_used=%d\n", - first_page, page_table[first_page].bytes_used)); - */ - - /* Now search forward to calculate the available region size. It - * tries to keeps going until nbytes are found and the number of - * pages is greater than some level. This helps keep down the - * number of pages in a region. */ - last_page = first_page; - bytes_found = 4096 - page_table[first_page].bytes_used; - num_pages = 1; - while (((bytes_found < nbytes) || (num_pages < 2)) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE)) { - last_page++; - num_pages++; - bytes_found += 4096; - gc_assert(page_table[last_page].write_protected == 0); - } - - region_size = (4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); - - gc_assert(bytes_found == region_size); - - /* - FSHOW((stderr, - "/last_page=%d bytes_found=%d num_pages=%d\n", - last_page, bytes_found, num_pages)); - */ - - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); - - /* Check for a failure. */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_alloc_new_region() failed on restart_page, nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - /* - FSHOW((stderr, - "/gc_alloc_new_region() gen %d: %d bytes: pages %d to %d: addr=%x\n", - gc_alloc_generation, - bytes_found, - first_page, - last_page, - page_address(first_page))); - */ + last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -597,61 +528,65 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) alloc_region->free_pointer = alloc_region->start_addr; alloc_region->end_addr = alloc_region->start_addr + bytes_found; - if (gencgc_zero_check) { - int *p; - for (p = (int *)alloc_region->start_addr; - p < (int *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.", p); - } - } - } - /* Set up the pages. */ /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; page_table[first_page].first_object_offset = 0; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; + gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { if (unboxed) - page_table[i].allocated = UNBOXED_PAGE; + page_table[i].allocated = UNBOXED_PAGE_FLAG; else - page_table[i].allocated = BOXED_PAGE; + page_table[i].allocated = BOXED_PAGE_FLAG; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ page_table[i].first_object_offset = alloc_region->start_addr - page_address(i); + page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } - /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); - if (last_page+1 > last_used_page) - last_used_page = last_page+1; + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), + 0); } + release_spinlock(&free_pages_lock); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + int *p; + for (p = (int *)alloc_region->start_addr; + p < (int *)alloc_region->end_addr; p++) { + if (*p != 0) { + /* KLUDGE: It would be nice to use %lx and explicit casts + * (long) in code like this, so that it is less likely to + * break randomly when running on a machine with different + * word sizes. -- WHN 19991129 */ + lose("The new region at %x is not zero.", p); + } + } +} + } /* If the record_new_objects flag is 2 then all new regions created @@ -705,13 +640,13 @@ add_new_area(int first_page, int offset, int size) gc_abort(); } - new_area_start = 4096*first_page + offset; + new_area_start = PAGE_BYTES*first_page + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { unsigned area_end = - 4096*((*new_areas)[i].page) + PAGE_BYTES*((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -726,12 +661,11 @@ add_new_area(int first_page, int offset, int size) (*new_areas)[i].size, first_page, offset, - size));*/ + size);*/ (*new_areas)[i].size += size; return; } } - /*FSHOW((stderr, "/add_new_area S1 %d %d %d\n", i, c, new_area_start));*/ (*new_areas)[new_areas_index].page = first_page; (*new_areas)[new_areas_index].offset = offset; @@ -746,7 +680,7 @@ add_new_area(int first_page, int offset, int size) max_new_areas = new_areas_index; } -/* Update the tables for the alloc_region. The region maybe added to +/* Update the tables for the alloc_region. The region may be added to * the new_areas. * * When done the alloc_region is set up so that the next quick alloc @@ -764,11 +698,6 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) int region_size; int byte_cnt; - /* - FSHOW((stderr, - "/gc_alloc_update_page_tables() to gen %d:\n", - gc_alloc_generation)); - */ first_page = alloc_region->first_page; @@ -778,8 +707,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - /* Skip if no bytes were allocated. */ + get_spinlock(&free_pages_lock,(int) alloc_region); if (alloc_region->free_pointer != alloc_region->start_addr) { + /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); @@ -792,11 +722,12 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) * first_object_offset. */ if (page_table[first_page].bytes_used == 0) gc_assert(page_table[first_page].first_object_offset == 0); + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -805,8 +736,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) { - bytes_used = 4096; + if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -817,10 +748,11 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) * first_object_offset pointer to the start of the region, and set * the bytes_used. */ while (more) { + page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[next_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); else - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -831,8 +763,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. */ more = 0; if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>4096) { - bytes_used = 4096; + - page_address(next_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -868,23 +800,20 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) } else { /* There are no bytes allocated. Unallocate the first_page if * there are 0 bytes_used. */ + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (page_table[first_page].bytes_used == 0) - page_table[first_page].allocated = FREE_PAGE; + page_table[first_page].allocated = FREE_PAGE_FLAG; } /* Unallocate any unused pages. */ while (next_page <= alloc_region->last_page) { gc_assert(page_table[next_page].bytes_used == 0); - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - - /* Reset the alloc_region. */ - alloc_region->first_page = 0; - alloc_region->last_page = -1; - alloc_region->start_addr = page_address(0); - alloc_region->free_pointer = page_address(0); - alloc_region->end_addr = page_address(0); + release_spinlock(&free_pages_lock); + /* alloc_region is per-thread, we're ok to do this unlocked */ + gc_set_region_empty(alloc_region); } static inline void *gc_quick_alloc(int nbytes); @@ -895,139 +824,25 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) { int first_page; int last_page; - int region_size; - int restart_page; - int bytes_found; - int num_pages; int orig_first_page_bytes_used; int byte_cnt; int more; int bytes_used; int next_page; - int large = (nbytes >= large_object_size); - - /* - if (nbytes > 200000) - FSHOW((stderr, "/alloc_large %d\n", nbytes)); - */ - /* - FSHOW((stderr, - "/gc_alloc_large() for %d bytes from gen %d\n", - nbytes, gc_alloc_generation)); - */ + get_spinlock(&free_pages_lock,(int) alloc_region); - /* If the object is small, and there is room in the current region - then allocation it in the current region. */ - if (!large - && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes)) - return gc_quick_alloc(nbytes); - - /* Search for a contiguous free region of at least nbytes. If it's a - large object then align it on a page boundary by searching for a - free page. */ - - /* To allow the allocation of small objects without the danger of - using a page in the current boxed region, the search starts after - the current boxed free region. XX could probably keep a page - index ahead of the current region and bumped up here to save a - lot of re-scanning. */ if (unboxed) { - restart_page = + first_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page; } else { - restart_page = generations[gc_alloc_generation].alloc_large_start_page; + first_page = generations[gc_alloc_generation].alloc_large_start_page; } - if (restart_page <= alloc_region->last_page) { - restart_page = alloc_region->last_page+1; + if (first_page <= alloc_region->last_page) { + first_page = alloc_region->last_page+1; } - do { - first_page = restart_page; - - if (large) - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE)) - first_page++; - else - /* FIXME: This looks extremely similar, perhaps identical, - * to code in gc_alloc_new_region(). It should be shared - * somehow. */ - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE) - && ((unboxed && - (page_table[first_page].allocated != UNBOXED_PAGE)) - || (!unboxed && - (page_table[first_page].allocated != BOXED_PAGE)) - || (page_table[first_page].large_object != 0) - || (page_table[first_page].gen != gc_alloc_generation) - || (page_table[first_page].bytes_used >= (4096-32)) - || (page_table[first_page].write_protected != 0) - || (page_table[first_page].dont_move != 0))) - first_page++; - - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - gc_assert(page_table[first_page].write_protected == 0); - - /* - FSHOW((stderr, - "/first_page=%d bytes_used=%d\n", - first_page, page_table[first_page].bytes_used)); - */ - - last_page = first_page; - bytes_found = 4096 - page_table[first_page].bytes_used; - num_pages = 1; - while ((bytes_found < nbytes) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE)) { - last_page++; - num_pages++; - bytes_found += 4096; - gc_assert(page_table[last_page].write_protected == 0); - } - - region_size = (4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); - - gc_assert(bytes_found == region_size); - - /* - FSHOW((stderr, - "/last_page=%d bytes_found=%d num_pages=%d\n", - last_page, bytes_found, num_pages)); - */ - - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); - - /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - /* - if (large) - FSHOW((stderr, - "/gc_alloc_large() gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n", - gc_alloc_generation, - nbytes, - bytes_found, - first_page, - last_page, - page_address(first_page))); - */ + last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); gc_assert(first_page > alloc_region->last_page); if (unboxed) @@ -1043,28 +858,28 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) * first_object_offset. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].first_object_offset = 0; - page_table[first_page].large_object = large; + page_table[first_page].large_object = 1; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); - gc_assert(page_table[first_page].large_object == large); + gc_assert(page_table[first_page].large_object == 1); byte_cnt = 0; /* Calc. the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) { - bytes_used = 4096; + if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -1076,27 +891,28 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) * first_object_offset pointer to the start of the region, and * set the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE); + gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[next_page].allocated = BOXED_PAGE; + page_table[next_page].allocated = BOXED_PAGE_FLAG; page_table[next_page].gen = gc_alloc_generation; - page_table[next_page].large_object = large; + page_table[next_page].large_object = 1; page_table[next_page].first_object_offset = - orig_first_page_bytes_used - 4096*(next_page-first_page); + orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) { - bytes_used = 4096; + if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; + page_table[next_page].write_protected=0; + page_table[next_page].dont_move=0; byte_cnt += bytes_used; - next_page++; } @@ -1113,26 +929,104 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); - if (last_page+1 > last_used_page) - last_used_page = last_page+1; + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); } + release_spinlock(&free_pages_lock); return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } +int +gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed) +{ + int first_page; + int last_page; + int region_size; + int restart_page=*restart_page_ptr; + int bytes_found; + int num_pages; + int large_p=(nbytes>=large_object_size); + gc_assert(free_pages_lock); + + /* Search for a contiguous free space of at least nbytes. If it's + * a large object then align it on a page boundary by searching + * for a free page. */ + + do { + first_page = restart_page; + if (large_p) + while ((first_page < NUM_PAGES) + && (page_table[first_page].allocated != FREE_PAGE_FLAG)) + first_page++; + else + while (first_page < NUM_PAGES) { + if(page_table[first_page].allocated == FREE_PAGE_FLAG) + break; + if((page_table[first_page].allocated == + (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + (page_table[first_page].large_object == 0) && + (page_table[first_page].gen == gc_alloc_generation) && + (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && + (page_table[first_page].write_protected == 0) && + (page_table[first_page].dont_move == 0)) { + break; + } + first_page++; + } + + if (first_page >= NUM_PAGES) { + fprintf(stderr, + "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n", + nbytes); + print_generation_stats(1); + lose(NULL); + } + + gc_assert(page_table[first_page].write_protected == 0); + + last_page = first_page; + bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; + num_pages = 1; + while (((bytes_found < nbytes) + || (!large_p && (num_pages < 2))) + && (last_page < (NUM_PAGES-1)) + && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + last_page++; + num_pages++; + bytes_found += PAGE_BYTES; + gc_assert(page_table[last_page].write_protected == 0); + } + + region_size = (PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); + + gc_assert(bytes_found == region_size); + restart_page = last_page + 1; + } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); + + /* Check for a failure */ + if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { + fprintf(stderr, + "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n", + nbytes); + print_generation_stats(1); + lose(NULL); + } + *restart_page_ptr=first_page; + return last_page; +} + /* Allocate bytes. All the rest of the special-purpose allocation - * functions will eventually call this (instead of just duplicating - * parts of its code) */ + * functions will eventually call this */ void * -gc_general_alloc(int nbytes,int unboxed_p,int quick_p) +gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region, + int quick_p) { void *new_free_pointer; - struct alloc_region *my_region = - unboxed_p ? &unboxed_region : &boxed_region; - /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */ + if(nbytes>=large_object_size) + return gc_alloc_large(nbytes,unboxed_p,my_region); /* Check whether there is room in the current alloc region. */ new_free_pointer = my_region->free_pointer + nbytes; @@ -1151,86 +1045,40 @@ gc_general_alloc(int nbytes,int unboxed_p,int quick_p) /* Set up a new region. */ gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region); } + return((void *)new_obj); } - /* Else not enough free space in the current region. */ + /* Else not enough free space in the current region: retry with a + * new region. */ - /* If there some room left in the current region, enough to be worth - * saving, then allocate a large object. */ - /* FIXME: "32" should be a named parameter. */ - if ((my_region->end_addr-my_region->free_pointer) > 32) - return gc_alloc_large(nbytes, unboxed_p, my_region); - - /* Else find a new region. */ - - /* Finished with the current region. */ gc_alloc_update_page_tables(unboxed_p, my_region); - - /* Set up a new region. */ gc_alloc_new_region(nbytes, unboxed_p, my_region); - - /* Should now be enough room. */ - - /* Check whether there is room in the current region. */ - new_free_pointer = my_region->free_pointer + nbytes; - - if (new_free_pointer <= my_region->end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = my_region->free_pointer; - my_region->free_pointer = new_free_pointer; - - /* Check whether the current region is almost empty. */ - if ((my_region->end_addr - my_region->free_pointer) <= 32) { - /* If so find, finished with the current region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); - - /* Set up a new region. */ - gc_alloc_new_region(32, unboxed_p, my_region); - } - - return((void *)new_obj); - } - - /* shouldn't happen */ - gc_assert(0); - return((void *) NIL); /* dummy value: return something ... */ + return gc_alloc_with_region(nbytes,unboxed_p,my_region,0); } +/* these are only used during GC: all allocation from the mutator calls + * alloc() -> gc_alloc_with_region() with the appropriate per-thread + * region */ -static void * -gc_alloc(int nbytes,int unboxed_p) +void * +gc_general_alloc(int nbytes,int unboxed_p,int quick_p) { - /* this is the only function that the external interface to - * allocation presently knows how to call: Lisp code will never - * allocate large objects, or to unboxed space, or `quick'ly. - * Any of that stuff will only ever happen inside of GC */ - return gc_general_alloc(nbytes,unboxed_p,0); + struct alloc_region *my_region = + unboxed_p ? &unboxed_region : &boxed_region; + return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p); } -/* Allocate space from the boxed_region. If there is not enough free - * space then call gc_alloc to do the job. A pointer to the start of - * the object is returned. */ static inline void * gc_quick_alloc(int nbytes) { return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); } -/* Allocate space for the possibly large boxed object. If it is a - * large object then do a large alloc else use gc_quick_alloc. Note - * that gc_quick_alloc will eventually fall through to - * gc_general_alloc which may allocate the object in a large way - * anyway, but based on decisions about the free space in the current - * region, not the object size itself */ - static inline void * gc_quick_alloc_large(int nbytes) { - if (nbytes >= large_object_size) - return gc_alloc_large(nbytes, ALLOC_BOXED, &boxed_region); - else - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); } static inline void * @@ -1245,18 +1093,10 @@ gc_quick_alloc_unboxed(int nbytes) return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } -/* Allocate space for the object. If it is a large object then do a - * large alloc else allocate from the current region. If there is not - * enough free space then call general gc_alloc_unboxed() to do the job. - * - * A pointer to the start of the object is returned. */ static inline void * gc_quick_alloc_large_unboxed(int nbytes) { - if (nbytes >= large_object_size) - return gc_alloc_large(nbytes,ALLOC_UNBOXED,&unboxed_region); - else - return gc_quick_alloc_unboxed(nbytes); + return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } /* @@ -1278,7 +1118,6 @@ copy_large_object(lispobj object, int nwords) { int tag; lispobj *new; - lispobj *source, *dest; int first_page; gc_assert(is_lisp_pointer(object)); @@ -1286,7 +1125,7 @@ copy_large_object(lispobj object, int nwords) gc_assert((nwords & 0x01) == 0); - /* Check whether it's a large object. */ + /* Check whether it's in a large object region. */ first_page = find_page_index((void *)object); gc_assert(first_page >= 0); @@ -1309,23 +1148,23 @@ copy_large_object(lispobj object, int nwords) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[next_page].write_protected) { - os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL); + os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL); page_table[next_page].write_protected = 0; } - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1336,7 +1175,7 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated = BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1346,12 +1185,12 @@ copy_large_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE) && + (page_table[next_page].allocated == BOXED_PAGE_FLAG) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* Checks out OK, free the page. Don't need to bother zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected as they @@ -1359,7 +1198,7 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -1380,17 +1219,7 @@ copy_large_object(lispobj object, int nwords) /* Allocate space. */ new = gc_quick_alloc_large(nwords*4); - dest = new; - source = (lispobj *) native_pointer(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + memcpy(new,native_pointer(object),nwords*4); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1403,7 +1232,6 @@ copy_unboxed_object(lispobj object, int nwords) { int tag; lispobj *new; - lispobj *source, *dest; gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); @@ -1415,17 +1243,7 @@ copy_unboxed_object(lispobj object, int nwords) /* Allocate space. */ new = gc_quick_alloc_unboxed(nwords*4); - dest = new; - source = (lispobj *) native_pointer(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + memcpy(new,native_pointer(object),nwords*4); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1474,18 +1292,18 @@ copy_large_unboxed_object(lispobj object, int nwords) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)); + gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; - remaining_bytes -= 4096; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1496,7 +1314,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1506,13 +1324,13 @@ copy_large_unboxed_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected, even if @@ -1520,7 +1338,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -1785,26 +1603,23 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) code objects. Check. */ fixups = new_code->constants[0]; - /* It will be 0 or the unbound-marker if there are no fixups, and - * will be an other pointer if it is valid. */ + /* It will be 0 or the unbound-marker if there are no fixups (as + * will be the case if the code object has been purified, for + * example) and will be an other pointer if it is valid. */ if ((fixups == 0) || (fixups == UNBOUND_MARKER_WIDETAG) || !is_lisp_pointer(fixups)) { /* Check for possible errors. */ if (check_code_fixups) sniff_code_object(new_code, displacement); - /*fprintf(stderr,"Fixups for code object not found!?\n"); - fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n", - new_code, nheader_words, ncode_words); - fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n", - constants_start_addr,constants_end_addr, - code_start_addr,code_end_addr);*/ return; } fixups_vector = (struct vector *)native_pointer(fixups); /* Could be pointing to a forwarding pointer. */ + /* FIXME is this always in from_space? if so, could replace this code with + * forwarding_pointer_p/forwarding_pointer_value */ if (is_lisp_pointer(fixups) && (find_page_index((void*)fixups_vector) != -1) && (fixups_vector->header == 0x01)) { @@ -2169,21 +1984,21 @@ search_space(lispobj *start, size_t words, lispobj *pointer) return (NULL); } -static lispobj* +lispobj* search_read_only_space(lispobj *pointer) { lispobj* start = (lispobj*)READ_ONLY_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER); + lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0); if ((pointer < start) || (pointer >= end)) return NULL; return (search_space(start, (pointer+2)-start, pointer)); } -static lispobj * +lispobj * search_static_space(lispobj *pointer) { lispobj* start = (lispobj*)STATIC_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER); + lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0); if ((pointer < start) || (pointer >= end)) return NULL; return (search_space(start, (pointer+2)-start, pointer)); @@ -2194,11 +2009,12 @@ search_static_space(lispobj *pointer) lispobj * search_dynamic_space(lispobj *pointer) { - int page_index = find_page_index(pointer); + int page_index = find_page_index(pointer); lispobj *start; /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE)) + if ((page_index == -1) || + (page_table[page_index].allocated == FREE_PAGE_FLAG)) return NULL; start = (lispobj *)((void *)page_address(page_index) + page_table[page_index].first_object_offset); @@ -2207,7 +2023,8 @@ search_dynamic_space(lispobj *pointer) /* Is there any possibility that pointer is a valid Lisp object * reference, and/or something else (e.g. subroutine call return - * address) which should prevent us from moving the referred-to thing? */ + * address) which should prevent us from moving the referred-to thing? + * This is called from preserve_pointers() */ static int possibly_valid_dynamic_space_pointer(lispobj *pointer) { @@ -2234,21 +2051,7 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) /* Check that the object pointed to is consistent with the pointer * low tag. - * - * FIXME: It's not safe to rely on the result from this check - * before an object is initialized. Thus, if we were interrupted - * just as an object had been allocated but not initialized, the - * GC relying on this result could bogusly reclaim the memory. - * However, we can't really afford to do without this check. So - * we should make it safe somehow. - * (1) Perhaps just review the code to make sure - * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such - * thing is wrapped around critical sections where allocated - * memory type bits haven't been set. - * (2) Perhaps find some other hack to protect against this, e.g. - * recording the result of the last call to allocate-lisp-memory, - * and returning true from this function when *pointer is - * a reference to that result. */ + */ switch (lowtag_of((lispobj)pointer)) { case FUN_POINTER_LOWTAG: /* Start_addr should be the enclosing code object, or a closure @@ -2375,7 +2178,8 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) case COMPLEX_LONG_FLOAT_WIDETAG: #endif case SIMPLE_ARRAY_WIDETAG: - case COMPLEX_STRING_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: + case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: case COMPLEX_ARRAY_WIDETAG: @@ -2389,12 +2193,17 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #ifdef LONG_FLOAT_WIDETAG case LONG_FLOAT_WIDETAG: #endif - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -2469,15 +2278,20 @@ maybe_adjust_large_object(lispobj *where) /* Check whether it's a vector or bignum object. */ switch (widetag_of(where[0])) { case SIMPLE_VECTOR_WIDETAG: - boxed = BOXED_PAGE; + boxed = BOXED_PAGE_FLAG; break; case BIGNUM_WIDETAG: - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -2505,7 +2319,7 @@ maybe_adjust_large_object(lispobj *where) #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: #endif - boxed = UNBOXED_PAGE; + boxed = UNBOXED_PAGE_FLAG; break; default: return; @@ -2527,21 +2341,21 @@ maybe_adjust_large_object(lispobj *where) next_page = first_page; remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE) - || (page_table[next_page].allocated == UNBOXED_PAGE)); + gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) + || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset == - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].allocated = boxed; /* Shouldn't be write-protected at this stage. Essential that the * pages aren't. */ gc_assert(!page_table[next_page].write_protected); - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -2563,13 +2377,13 @@ maybe_adjust_large_object(lispobj *where) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -2577,7 +2391,7 @@ maybe_adjust_large_object(lispobj *where) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -2599,11 +2413,8 @@ maybe_adjust_large_object(lispobj *where) * page_table so that it will not be relocated during a GC. * * This involves locating the page it points to, then backing up to - * the first page that has its first object start at offset 0, and - * then marking all pages dont_move from the first until a page that - * ends by being full, or having free gen. - * - * This ensures that objects spanning pages are not broken. + * the start of its region, then marking all pages dont_move from there + * up to the first page that's not full or has a different generation * * It is assumed that all the page static flags have been cleared at * the start of a GC. @@ -2620,45 +2431,53 @@ preserve_pointer(void *addr) /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE) + || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ || (page_table[addr_page_index].dont_move != 0)) return; - + gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG)); /* (Now that we know that addr_page_index is in range, it's * safe to index into page_table[] with it.) */ region_allocation = page_table[addr_page_index].allocated; /* quick check 2: Check the offset within the page. * - * FIXME: The mask should have a symbolic name, and ideally should - * be derived from page size instead of hardwired to 0xfff. - * (Also fix other uses of 0xfff, elsewhere.) */ - if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used) + */ + if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object * (or, as a special case which also requires dont_move, a return * address referring to something in a CodeObject). This is * expensive but important, since it vastly reduces the - * probability that random garbage will be bogusly interpreter as + * probability that random garbage will be bogusly interpreted as * a pointer which prevents a page from moving. */ - if (!possibly_valid_dynamic_space_pointer(addr)) + if (!(possibly_valid_dynamic_space_pointer(addr))) return; - /* Work backwards to find a page with a first_object_offset of 0. - * The pages should be contiguous with all bytes used in the same - * gen. Assumes the first_object_offset is negative or zero. */ + /* Find the beginning of the region. Note that there may be + * objects in the region preceding the one that we were passed a + * pointer to: if this is the case, we will write-protect all the + * previous objects' pages too. */ + +#if 0 + /* I think this'd work just as well, but without the assertions. + * -dan 2004.01.01 */ + first_page= + find_page_index(page_address(addr_page_index)+ + page_table[addr_page_index].first_object_offset); +#else first_page = addr_page_index; while (page_table[first_page].first_object_offset != 0) { --first_page; /* Do some checks. */ - gc_assert(page_table[first_page].bytes_used == 4096); + gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); gc_assert(page_table[first_page].gen == from_space); gc_assert(page_table[first_page].allocated == region_allocation); } +#endif /* Adjust any large objects before promotion as they won't be * copied after promotion. */ @@ -2668,10 +2487,10 @@ preserve_pointer(void *addr) * free area in which case it's ignored here. Note it gets * through the valid pointer test above because the tail looks * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE) + if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ - || (((unsigned)addr & 0xfff) + || (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)) { FSHOW((stderr, "weird? ignore ptr 0x%x to freed area of large object\n", @@ -2705,9 +2524,9 @@ preserve_pointer(void *addr) gc_assert(!page_table[i].write_protected); /* Check whether this is the last page in this contiguous block.. */ - if ((page_table[i].bytes_used < 4096) - /* ..or it is 4096 and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE) + if ((page_table[i].bytes_used < PAGE_BYTES) + /* ..or it is PAGE_BYTES and is the last in the block */ + || (page_table[i+1].allocated == FREE_PAGE_FLAG) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ || (page_table[i+1].first_object_offset == 0)) @@ -2741,12 +2560,13 @@ update_page_write_prot(int page) int num_words = page_table[page].bytes_used / 4; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE); + gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used != 0); - /* Skip if it's already write-protected or an unboxed page. */ + /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected - || (page_table[page].allocated == UNBOXED_PAGE)) + || page_table[page].dont_move + || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) return (0); /* Scan the page for pointers to younger generations or the @@ -2759,7 +2579,7 @@ update_page_write_prot(int page) /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE) + ((page_table[index].allocated != FREE_PAGE_FLAG) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == NUM_GENERATIONS))) @@ -2779,7 +2599,7 @@ update_page_write_prot(int page) /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/ os_protect((void *)page_addr, - 4096, + PAGE_BYTES, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -2792,7 +2612,7 @@ update_page_write_prot(int page) /* Scavenge a generation. * * This will not resolve all pointers when generation is the new - * space, as new objects may be added which are not check here - use + * space, as new objects may be added which are not checked here - use * scavenge_newspace generation. * * Write-protected pages should not have any pointers to the @@ -2834,61 +2654,42 @@ scavenge_generation(int generation) #endif for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated == BOXED_PAGE) + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - int last_page; + int last_page,j; + int write_protected=1; - /* This should be the start of a contiguous block. */ + /* This should be the start of a region */ gc_assert(page_table[i].first_object_offset == 0); - /* We need to find the full extent of this contiguous - * block in case objects span pages. */ - - /* Now work forward until the end of this contiguous area - * is found. A small area is preferred as there is a - * better chance of its pages being write-protected. */ - for (last_page = i; ; last_page++) - /* Check whether this is the last page in this contiguous - * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ - || (page_table[last_page+1].allocated != BOXED_PAGE) + /* Now work forward until the end of the region */ + for (last_page = i; ; last_page++) { + write_protected = + write_protected && page_table[last_page].write_protected; + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) break; - - /* Do a limited check for write_protected pages. If all pages - * are write_protected then there is no need to scavenge. */ - { - int j, all_wp = 1; - for (j = i; j <= last_page; j++) - if (page_table[j].write_protected == 0) { - all_wp = 0; - break; - } -#if !SC_GEN_CK - if (all_wp == 0) -#endif - { - scavenge(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); - - /* Now scan the pages and write protect those - * that don't have pointers to younger - * generations. */ - if (enable_page_protection) { - for (j = i; j <= last_page; j++) { - num_wp += update_page_write_prot(j); - } - } + } + if (!write_protected) { + scavenge(page_address(i), (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES)/4); + + /* Now scan the pages and write protect those that + * don't have pointers to younger generations. */ + if (enable_page_protection) { + for (j = i; j <= last_page; j++) { + num_wp += update_page_write_prot(j); } + } } i = last_page; } } - if ((gencgc_verbose > 1) && (num_wp != 0)) { FSHOW((stderr, "/write protected %d pages within generation %d\n", @@ -2899,7 +2700,7 @@ scavenge_generation(int generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation ! =FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { @@ -2951,9 +2752,9 @@ scavenge_newspace_generation_one_scan(int generation) FSHOW((stderr, "/starting one full scan of newspace generation %d\n", generation)); - for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated == BOXED_PAGE) + /* Note that this skips over open regions when it encounters them. */ + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) @@ -2961,6 +2762,7 @@ scavenge_newspace_generation_one_scan(int generation) * cleared before promotion.) */ || (page_table[i].dont_move == 1))) { int last_page; + int all_wp=1; /* The scavenge will start at the first_object_offset of page i. * @@ -2971,52 +2773,36 @@ scavenge_newspace_generation_one_scan(int generation) * is found. A small area is preferred as there is a * better chance of its pages being write-protected. */ for (last_page = i; ;last_page++) { + /* If all pages are write-protected and movable, + * then no need to scavenge */ + all_wp=all_wp && page_table[last_page].write_protected && + !page_table[last_page].dont_move; + /* Check whether this is the last page in this * contiguous block */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ - || (page_table[last_page+1].allocated != BOXED_PAGE) + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) break; } - /* Do a limited check for write-protected pages. If all - * pages are write-protected then no need to scavenge, - * except if the pages are marked dont_move. */ - { - int j, all_wp = 1; - for (j = i; j <= last_page; j++) - if ((page_table[j].write_protected == 0) - || (page_table[j].dont_move != 0)) { - all_wp = 0; - break; - } - - if (!all_wp) { - int size; - - /* Calculate the size. */ - if (last_page == i) - size = (page_table[last_page].bytes_used - - page_table[i].first_object_offset)/4; - else - size = (page_table[last_page].bytes_used - + (last_page-i)*4096 - - page_table[i].first_object_offset)/4; - - { - new_areas_ignore_page = last_page; - - scavenge(page_address(i) + - page_table[i].first_object_offset, - size); - - } - } + /* Do a limited check for write-protected pages. */ + if (!all_wp) { + int size; + + size = (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES + - page_table[i].first_object_offset)/4; + new_areas_ignore_page = last_page; + + scavenge(page_address(i) + + page_table[i].first_object_offset, + size); + } - i = last_page; } } @@ -3035,13 +2821,12 @@ scavenge_newspace_generation(int generation) struct new_area (*current_new_areas)[] = &new_areas_1; int current_new_areas_index; - /* the new_areas created but the previous scavenge cycle */ + /* the new_areas created by the previous scavenge cycle */ struct new_area (*previous_new_areas)[] = NULL; int previous_new_areas_index; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Turn on the recording of new areas by gc_alloc(). */ new_areas = current_new_areas; @@ -3058,8 +2843,7 @@ scavenge_newspace_generation(int generation) record_new_objects = 2; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Grab new_areas_index. */ current_new_areas_index = new_areas_index; @@ -3106,8 +2890,7 @@ scavenge_newspace_generation(int generation) record_new_objects = 2; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); } else { @@ -3119,13 +2902,11 @@ scavenge_newspace_generation(int generation) int offset = (*previous_new_areas)[i].offset; int size = (*previous_new_areas)[i].size / 4; gc_assert((*previous_new_areas)[i].size % 4 == 0); - scavenge(page_address(page)+offset, size); } /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); } current_new_areas_index = new_areas_index; @@ -3142,7 +2923,7 @@ scavenge_newspace_generation(int generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation != FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -3165,7 +2946,7 @@ unprotect_oldspace(void) int i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -3175,7 +2956,7 @@ unprotect_oldspace(void) /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[i].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[i].write_protected = 0; } } @@ -3198,7 +2979,7 @@ free_oldspace(void) do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE) + && ((page_table[first_page].allocated == FREE_PAGE_FLAG) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3214,7 +2995,7 @@ free_oldspace(void) bytes_freed += page_table[last_page].bytes_used; generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used; - page_table[last_page].allocated = FREE_PAGE; + page_table[last_page].allocated = FREE_PAGE_FLAG; page_table[last_page].bytes_used = 0; /* Remove any write-protection. We should be able to rely @@ -3223,14 +3004,14 @@ free_oldspace(void) void *page_start = (void *)page_address(last_page); if (page_table[last_page].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[last_page].write_protected = 0; } } last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE) + && (page_table[last_page].allocated != FREE_PAGE_FLAG) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); @@ -3244,8 +3025,8 @@ free_oldspace(void) page_start = (void *)page_address(first_page); - os_invalidate(page_start, 4096*(last_page-first_page)); - addr = os_validate(page_start, 4096*(last_page-first_page)); + os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); + addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); if (addr == NULL || addr != page_start) { /* Is this an error condition? I couldn't really tell from * the old CMU CL code, which fprintf'ed a message with @@ -3263,7 +3044,7 @@ free_oldspace(void) int *page_start; page_start = (int *)page_address(first_page); - i586_bzero(page_start, 4096*(last_page-first_page)); + i586_bzero(page_start, PAGE_BYTES*(last_page-first_page)); } first_page = last_page; @@ -3312,7 +3093,7 @@ verify_space(lispobj *start, size_t words) int is_in_dynamic_space = (find_page_index((void*)start) != -1); int is_in_readonly_space = (READ_ONLY_SPACE_START <= (unsigned)start && - (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); + (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); while (words > 0) { size_t count = 1; @@ -3322,16 +3103,16 @@ verify_space(lispobj *start, size_t words) int page_index = find_page_index((void*)thing); int to_readonly_space = (READ_ONLY_SPACE_START <= thing && - thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); + thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); int to_static_space = (STATIC_SPACE_START <= thing && - thing < SymbolValue(STATIC_SPACE_FREE_POINTER)); + thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0)); /* Does it point to the dynamic space? */ if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE) + if ((page_table[page_index].allocated != FREE_PAGE_FLAG) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.", thing, start); /* Check that it doesn't point to a forwarding pointer! */ @@ -3347,9 +3128,17 @@ verify_space(lispobj *start, size_t words) /* Does it point to a plausible object? This check slows * it down a lot (so it's commented out). * - * FIXME: Add a variable to enable this dynamically. */ - /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { - * lose("ptr %x to invalid object %x", thing, start); */ + * "a lot" is serious: it ate 50 minutes cpu time on + * my duron 950 before I came back from lunch and + * killed it. + * + * FIXME: Add a variable to enable this + * dynamically. */ + /* + if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { + lose("ptr %x to invalid object %x", thing, start); + } + */ } else { /* Verify that it points to another valid space. */ if (!to_readonly_space && !to_static_space @@ -3358,9 +3147,8 @@ verify_space(lispobj *start, size_t words) } } } else { - if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an - * is_fixnum for this. */ - + if (!(fixnump(thing))) { + /* skip fixnums */ switch(widetag_of(*start)) { /* boxed objects */ @@ -3368,7 +3156,8 @@ verify_space(lispobj *start, size_t words) case RATIO_WIDETAG: case COMPLEX_WIDETAG: case SIMPLE_ARRAY_WIDETAG: - case COMPLEX_STRING_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: + case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: case COMPLEX_ARRAY_WIDETAG: @@ -3406,7 +3195,7 @@ verify_space(lispobj *start, size_t words) * there's no byte compiler, but I've got * too much to worry about right now to try * to make sure. -- WHN 2001-10-06 */ - && !(code->trace_table_offset & 0x3) + && fixnump(code->trace_table_offset) /* Only when enabled */ && verify_dynamic_code_check) { FSHOW((stderr, @@ -3453,12 +3242,17 @@ verify_space(lispobj *start, size_t words) #ifdef COMPLEX_LONG_FLOAT_WIDETAG case COMPLEX_LONG_FLOAT_WIDETAG: #endif - case SIMPLE_STRING_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: @@ -3511,18 +3305,20 @@ verify_gc(void) * to grep for all foo_size and rename the appropriate ones to * foo_count. */ int read_only_space_size = - (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) + (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0) - (lispobj*)READ_ONLY_SPACE_START; int static_space_size = - (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER) + (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj*)STATIC_SPACE_START; + struct thread *th; + for_each_thread(th) { int binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER) - - (lispobj*)BINDING_STACK_START; - + (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + - (lispobj*)th->binding_stack_start; + verify_space(th->binding_stack_start, binding_stack_size); + } verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size); verify_space((lispobj*)STATIC_SPACE_START , static_space_size); - verify_space((lispobj*)BINDING_STACK_START , binding_stack_size); } static void @@ -3531,7 +3327,7 @@ verify_generation(int generation) int i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { int last_page; @@ -3548,8 +3344,8 @@ verify_generation(int generation) for (last_page = i; ;last_page++) /* Check whether this is the last page in this contiguous * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ || (page_table[last_page+1].allocated != region_allocation) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) @@ -3557,7 +3353,7 @@ verify_generation(int generation) break; verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); + + (last_page-i)*PAGE_BYTES)/4); i = last_page; } } @@ -3570,7 +3366,7 @@ verify_zero_fill(void) int page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE) { + if (page_table[page].allocated == FREE_PAGE_FLAG) { /* The whole page should be zero filled. */ int *start_addr = (int *)page_address(page); int size = 1024; @@ -3581,7 +3377,7 @@ verify_zero_fill(void) } } } else { - int free_bytes = 4096 - page_table[page].bytes_used; + int free_bytes = PAGE_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { int *start_addr = (int *)((unsigned)page_address(page) + page_table[page].bytes_used); @@ -3602,13 +3398,9 @@ void gencgc_verify_zero_fill(void) { /* Flush the alloc regions updating the tables. */ - boxed_region.free_pointer = current_region_free_pointer; - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); SHOW("verifying zero fill"); verify_zero_fill(); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; } static void @@ -3632,15 +3424,16 @@ write_protect_generation_pages(int generation) gc_assert(generation < NUM_GENERATIONS); for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated == BOXED_PAGE) + if ((page_table[i].allocated == BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) + && !page_table[i].dont_move && (page_table[i].gen == generation)) { void *page_start; page_start = (void *)page_address(i); os_protect(page_start, - 4096, + PAGE_BYTES, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -3664,7 +3457,7 @@ garbage_collect_generation(int generation, int raise) unsigned long bytes_freed; unsigned long i; unsigned long static_space_size; - + struct thread *th; gc_assert(generation <= (NUM_GENERATIONS-1)); /* The oldest generation can't be raised. */ @@ -3697,7 +3490,8 @@ garbage_collect_generation(int generation, int raise) /* Before any pointers are preserved, the dont_move flags on the * pages need to be cleared. */ for (i = 0; i < last_free_page; i++) - page_table[i].dont_move = 0; + if(page_table[i].gen==from_space) + page_table[i].dont_move = 0; /* Un-write-protect the old-space pages. This is essential for the * promoted pages as they may contain pointers into the old-space @@ -3706,12 +3500,46 @@ garbage_collect_generation(int generation, int raise) * be un-protected anyway before unmapping later. */ unprotect_oldspace(); - /* Scavenge the stack's conservative roots. */ - { + /* Scavenge the stacks' conservative roots. */ + + /* there are potentially two stacks for each thread: the main + * stack, which may contain Lisp pointers, and the alternate stack. + * We don't ever run Lisp code on the altstack, but it may + * host a sigcontext with lisp objects in it */ + + /* what we need to do: (1) find the stack pointer for the main + * stack; scavenge it (2) find the interrupt context on the + * alternate stack that might contain lisp values, and scavenge + * that */ + + /* we assume that none of the preceding applies to the thread that + * initiates GC. If you ever call GC from inside an altstack + * handler, you will lose. */ + for_each_thread(th) { void **ptr; - for (ptr = (void **)CONTROL_STACK_END - 1; - ptr > (void **)&raise; - ptr--) { + void **esp=(void **)-1; + int i,free; +#ifdef LISP_FEATURE_SB_THREAD + if(th==arch_os_get_current_thread()) { + esp = (void **) &raise; + } else { + void **esp1; + free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + for(i=free-1;i>=0;i--) { + os_context_t *c=th->interrupt_contexts[i]; + esp1 = (void **) *os_context_register_addr(c,reg_ESP); + if(esp1>=th->control_stack_start&& esp1control_stack_end){ + if(esp1=(void **)c; ptr--) { + preserve_pointer(*ptr); + } + } + } + } +#else + esp = (void **) &raise; +#endif + for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { preserve_pointer(*ptr); } } @@ -3722,9 +3550,7 @@ garbage_collect_generation(int generation, int raise) fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - /* FIXME: 4096 should be symbolic constant here and - * prob'ly elsewhere too. */ - num_dont_move_pages * 4096); + num_dont_move_pages * PAGE_BYTES); } #endif @@ -3732,18 +3558,31 @@ garbage_collect_generation(int generation, int raise) /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ + for_each_thread(th) { + struct interrupt_data *data=th->interrupt_data; for (i = 0; i < NSIG; i++) { - union interrupt_handler handler = interrupt_handlers[i]; + union interrupt_handler handler = data->interrupt_handlers[i]; if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { - scavenge((lispobj *)(interrupt_handlers + i), 1); + scavenge((lispobj *)(data->interrupt_handlers + i), 1); + } + } + } + /* Scavenge the binding stacks. */ + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); +#ifdef LISP_FEATURE_SB_THREAD + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); +#endif } } - - /* Scavenge the binding stack. */ - scavenge((lispobj *) BINDING_STACK_START, - (lispobj *)SymbolValue(BINDING_STACK_POINTER) - - (lispobj *)BINDING_STACK_START); /* The original CMU CL code had scavenge-read-only-space code * controlled by the Lisp-level variable @@ -3766,7 +3605,7 @@ garbage_collect_generation(int generation, int raise) /* Scavenge static space. */ static_space_size = - (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) - + (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj *)STATIC_SPACE_START; if (gencgc_verbose > 1) { FSHOW((stderr, @@ -3806,8 +3645,7 @@ garbage_collect_generation(int generation, int raise) scavenge_newspace_generation_one_scan(new_space); /* Flush the current regions, updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); bytes_allocated = bytes_allocated - old_bytes_allocated; @@ -3821,8 +3659,7 @@ garbage_collect_generation(int generation, int raise) scan_weak_pointers(); /* Flush the current regions, updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Free the pages in oldspace, but not those marked dont_move. */ bytes_freed = free_oldspace(); @@ -3872,14 +3709,14 @@ update_x86_dynamic_space_free_pointer(void) int i; for (i = 0; i < NUM_PAGES; i++) - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); return 0; /* dummy value: return something ... */ } @@ -3900,8 +3737,6 @@ collect_garbage(unsigned last_gen) int gen_to_wp; int i; - boxed_region.free_pointer = current_region_free_pointer; - FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); if (last_gen > NUM_GENERATIONS) { @@ -3912,12 +3747,11 @@ collect_garbage(unsigned last_gen) } /* Flush the alloc regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Verify the new objects created by Lisp code. */ if (pre_verify_gen_0) { - SHOW((stderr, "pre-checking generation 0\n")); + FSHOW((stderr, "pre-checking generation 0\n")); verify_generation(0); } @@ -4004,14 +3838,10 @@ collect_garbage(unsigned last_gen) gc_alloc_generation = 0; update_x86_dynamic_space_free_pointer(); - - /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so - * we needn't do it here: */ - /* zero_stack();*/ - - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; + if(gencgc_verbose) + fprintf(stderr,"Next gc when %ld bytes have been consed\n", + auto_gc_trigger); SHOW("returning from collect_garbage"); } @@ -4030,26 +3860,26 @@ gc_free_heap(void) for (page = 0; page < NUM_PAGES; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE) { + if (page_table[page].allocated != FREE_PAGE_FLAG) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid - * when it is a FREE_PAGE and bytes_used is 0 and it + * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it * should not be write-protected -- except that the * generation is used for the current region but it sets * that up. */ - page_table[page].allocated = FREE_PAGE; + page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; /* Zero the page. */ page_start = (void *)page_address(page); /* First, remove any write-protection. */ - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[page].write_protected = 0; - os_invalidate(page_start,4096); - addr = os_validate(page_start,4096); + os_invalidate(page_start,PAGE_BYTES); + addr = os_validate(page_start,PAGE_BYTES); if (addr == NULL || addr != page_start) { lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x", page_start, @@ -4058,7 +3888,7 @@ gc_free_heap(void) } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ int *page_start, i; - gc_assert(page_table[page].allocated == FREE_PAGE); + gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used == 0); page_start = (int *)page_address(page); for (i=0; i<1024; i++) { @@ -4088,26 +3918,12 @@ gc_free_heap(void) /* Initialize gc_alloc(). */ gc_alloc_generation = 0; - boxed_region.first_page = 0; - boxed_region.last_page = -1; - boxed_region.start_addr = page_address(0); - boxed_region.free_pointer = page_address(0); - boxed_region.end_addr = page_address(0); - unboxed_region.first_page = 0; - unboxed_region.last_page = -1; - unboxed_region.start_addr = page_address(0); - unboxed_region.free_pointer = page_address(0); - unboxed_region.end_addr = page_address(0); - -#if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */ - zero_stack(); -#endif - last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base)); + gc_set_region_empty(&boxed_region); + gc_set_region_empty(&unboxed_region); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; + last_free_page = 0; + SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ @@ -4132,7 +3948,7 @@ gc_init(void) /* Initialize each page structure. */ for (i = 0; i < NUM_PAGES; i++) { /* Initialize all pages as free. */ - page_table[i].allocated = FREE_PAGE; + page_table[i].allocated = FREE_PAGE_FLAG; page_table[i].bytes_used = 0; /* Pages are not write-protected at startup. */ @@ -4159,58 +3975,47 @@ gc_init(void) generations[i].min_av_mem_age = 0.75; } - /* Initialize gc_alloc. - * - * FIXME: identical with code in gc_free_heap(), should be shared */ + /* Initialize gc_alloc. */ gc_alloc_generation = 0; - boxed_region.first_page = 0; - boxed_region.last_page = -1; - boxed_region.start_addr = page_address(0); - boxed_region.free_pointer = page_address(0); - boxed_region.end_addr = page_address(0); - unboxed_region.first_page = 0; - unboxed_region.last_page = -1; - unboxed_region.start_addr = page_address(0); - unboxed_region.free_pointer = page_address(0); - unboxed_region.end_addr = page_address(0); + gc_set_region_empty(&boxed_region); + gc_set_region_empty(&unboxed_region); last_free_page = 0; - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; } /* Pick up the dynamic space from after a core load. * * The ALLOCATION_POINTER points to the end of the dynamic space. - * - * XX A scan is needed to identify the closest first objects for pages. */ + */ + static void gencgc_pickup_dynamic(void) { int page = 0; - int addr = DYNAMIC_SPACE_START; - int alloc_ptr = SymbolValue(ALLOCATION_POINTER); + int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + lispobj *prev=(lispobj *)page_address(page); - /* Initialize the first region. */ do { - page_table[page].allocated = BOXED_PAGE; + lispobj *first,*ptr= (lispobj *)page_address(page); + page_table[page].allocated = BOXED_PAGE_FLAG; page_table[page].gen = 0; - page_table[page].bytes_used = 4096; + page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; + + first=search_space(prev,(ptr+2)-prev,ptr); + if(ptr == first) prev=ptr; page_table[page].first_object_offset = - (void *)DYNAMIC_SPACE_START - page_address(page); - addr += 4096; + (void *)prev - page_address(page); page++; - } while (addr < alloc_ptr); + } while (page_address(page) < alloc_ptr); - generations[0].bytes_allocated = 4096*page; - bytes_allocated = 4096*page; + generations[0].bytes_allocated = PAGE_BYTES*page; + bytes_allocated = PAGE_BYTES*page; - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; } + void gc_initialize_pointers(void) { @@ -4219,8 +4024,6 @@ gc_initialize_pointers(void) -/* a counter for how deep we are in alloc(..) calls */ -int alloc_entered = 0; /* alloc(..) is the external interface for memory allocation. It * allocates to generation 0. It is not called from within the garbage @@ -4232,167 +4035,63 @@ int alloc_entered = 0; * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.) * * The check for a GC trigger is only performed when the current - * region is full, so in most cases it's not needed. Further MAYBE-GC - * is only called once because Lisp will remember "need to collect - * garbage" and get around to it when it can. */ + * region is full, so in most cases it's not needed. */ + char * alloc(int nbytes) { + struct thread *th=arch_os_get_current_thread(); + struct alloc_region *region= + th ? &(th->alloc_region) : &boxed_region; + void *new_obj; + void *new_free_pointer; + /* Check for alignment allocation problems. */ - gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0) + gc_assert((((unsigned)region->free_pointer & 0x7) == 0) && ((nbytes & 0x7) == 0)); - - if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {/* if already in a pseudo atomic */ - - void *new_free_pointer; - - retry1: - if (alloc_entered) { - SHOW("alloc re-entered in already-pseudo-atomic case"); - } - ++alloc_entered; - - /* Check whether there is room in the current region. */ - new_free_pointer = current_region_free_pointer + nbytes; - - /* FIXME: Shouldn't we be doing some sort of lock here, to - * keep from getting screwed if an interrupt service routine - * allocates memory between the time we calculate new_free_pointer - * and the time we write it back to current_region_free_pointer? - * Perhaps I just don't understand pseudo-atomics.. - * - * Perhaps I don't. It looks as though what happens is if we - * were interrupted any time during the pseudo-atomic - * interval (which includes now) we discard the allocated - * memory and try again. So, at least we don't return - * a memory area that was allocated out from underneath us - * by code in an ISR. - * Still, that doesn't seem to prevent - * current_region_free_pointer from getting corrupted: - * We read current_region_free_pointer. - * They read current_region_free_pointer. - * They write current_region_free_pointer. - * We write current_region_free_pointer, scribbling over - * whatever they wrote. */ - - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = current_region_free_pointer; - current_region_free_pointer = new_free_pointer; - alloc_entered--; - return((void *)new_obj); - } - - if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - /* Double the trigger. */ - auto_gc_trigger *= 2; - alloc_entered--; - /* Exit the pseudo-atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during - * gc_alloc(..). */ - do_pending_interrupt(); - } - funcall0(SymbolFunction(MAYBE_GC)); - /* Re-enter the pseudo-atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0)); - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1)); - goto retry1; - } - /* Call gc_alloc(). */ - boxed_region.free_pointer = current_region_free_pointer; - { - void *new_obj = gc_alloc(nbytes,0); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - alloc_entered--; - return (new_obj); - } - } else { - void *result; - void *new_free_pointer; - - retry2: - /* At least wrap this allocation in a pseudo atomic to prevent - * gc_alloc() from being re-entered. */ - SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0)); - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1)); - - if (alloc_entered) - SHOW("alloc re-entered in not-already-pseudo-atomic case"); - ++alloc_entered; - - /* Check whether there is room in the current region. */ - new_free_pointer = current_region_free_pointer + nbytes; - - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = current_region_free_pointer; - current_region_free_pointer = new_free_pointer; - alloc_entered--; - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)) { - /* Handle any interrupts that occurred during - * gc_alloc(..). */ - do_pending_interrupt(); - goto retry2; - } - - return((void *)new_obj); - } - - /* KLUDGE: There's lots of code around here shared with the - * the other branch. Is there some way to factor out the - * duplicate code? -- WHN 19991129 */ - if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - /* Double the trigger. */ - auto_gc_trigger *= 2; - alloc_entered--; - /* Exit the pseudo atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during - * gc_alloc(..); */ - do_pending_interrupt(); - } - funcall0(SymbolFunction(MAYBE_GC)); - goto retry2; + if(all_threads) + /* there are a few places in the C code that allocate data in the + * heap before Lisp starts. This is before interrupts are enabled, + * so we don't need to check for pseudo-atomic */ +#ifdef LISP_FEATURE_SB_THREAD + if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { + register u32 fs; + fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n", + th,getpid()); + __asm__("movl %fs,%0" : "=r" (fs) : ); + fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", + debug_get_fs(),th->tls_cookie); + lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); } - - /* Else call gc_alloc(). */ - boxed_region.free_pointer = current_region_free_pointer; - result = gc_alloc(nbytes,0); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - - alloc_entered--; - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during gc_alloc(..). */ - do_pending_interrupt(); - goto retry2; - } - - return result; +#else + gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); +#endif + + /* maybe we can do this quickly ... */ + new_free_pointer = region->free_pointer + nbytes; + if (new_free_pointer <= region->end_addr) { + new_obj = (void*)(region->free_pointer); + region->free_pointer = new_free_pointer; + return(new_obj); /* yup */ } -} - -/* - * noise to manipulate the gc trigger stuff - */ - -void -set_auto_gc_trigger(os_vm_size_t dynamic_usage) -{ - auto_gc_trigger += dynamic_usage; + + /* we have to go the long way around, it seems. Check whether + * we should GC in the near future + */ + if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { + /* set things up so that GC happens when we finish the PA + * section. We only do this if there wasn't a pending handler + * already, in case it was a gc. If it wasn't a GC, the next + * allocation will get us back to this point anyway, so no harm done + */ + struct interrupt_data *data=th->interrupt_data; + if(!data->pending_handler) + maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0); + } + new_obj = gc_alloc_with_region(nbytes,0,region,0); + return (new_obj); } -void -clear_auto_gc_trigger(void) -{ - auto_gc_trigger = 0; -} /* Find the code object for the given pc, or return NULL on failure. * @@ -4454,23 +4153,25 @@ gencgc_handle_wp_violation(void* fault_addr) return 0; } else { - - /* The only acceptable reason for an signal like this from the - * heap is that the generational GC write-protected the page. */ - if (page_table[page_index].write_protected != 1) { - lose("access failure in heap page not marked as write-protected"); + if (page_table[page_index].write_protected) { + /* Unprotect the page. */ + os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); + page_table[page_index].write_protected_cleared = 1; + page_table[page_index].write_protected = 0; + } else { + /* The only acceptable reason for this signal on a heap + * access is that GENCGC write-protected the page. + * However, if two CPUs hit a wp page near-simultaneously, + * we had better not have the second one lose here if it + * does this test after the first one has already set wp=0 + */ + if(page_table[page_index].write_protected_cleared != 1) + lose("fault in heap page not marked as write-protected"); } - - /* Unprotect the page. */ - os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL); - page_table[page_index].write_protected = 0; - page_table[page_index].write_protected_cleared = 1; - /* Don't worry, we can handle it. */ return 1; } } - /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that * it's not just a case of the program hitting the write barrier, and * are about to let Lisp deal with it. It's basically just a @@ -4478,3 +4179,23 @@ gencgc_handle_wp_violation(void* fault_addr) void unhandled_sigmemoryfault() {} + +void gc_alloc_update_all_page_tables(void) +{ + /* Flush the alloc regions updating the tables. */ + struct thread *th; + for_each_thread(th) + gc_alloc_update_page_tables(0, &th->alloc_region); + gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_page_tables(0, &boxed_region); +} +void +gc_set_region_empty(struct alloc_region *region) +{ + region->first_page = 0; + region->last_page = -1; + region->start_addr = page_address(0); + region->free_pointer = page_address(0); + region->end_addr = page_address(0); +} +