X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=0c0c98f4a00ce86b5741121981aa7614fe0dae2b;hb=d8e682fdfb7e8ba067e15aea0f3d1f8d37ca9eb1;hp=f2becf0d4e1fce55f651b6b24bfffe7f15417c82;hpb=30d3955b07af6b6b2e52699f213e3b87b11e0f2d;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index f2becf0..0c0c98f 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -24,17 +24,12 @@ * . */ -/* - * FIXME: GC :FULL T seems to be unable to recover a lot of unused - * space. After cold init is complete, GC :FULL T gets us down to - * about 44 Mb total used, but PURIFY gets us down to about 17 Mb - * total used. - */ - #include #include -#include "runtime.h" +#include +#include #include "sbcl.h" +#include "runtime.h" #include "os.h" #include "interr.h" #include "globals.h" @@ -42,20 +37,27 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" +#include "fixnump.h" #include "gc.h" -#include "gencgc.h" +#include "gc-internal.h" +#include "thread.h" +#include "genesis/vector.h" +#include "genesis/weak-pointer.h" +#include "genesis/simple-fun.h" +#include "genesis/hash-table.h" + +/* forward declarations */ +long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed); +static void gencgc_pickup_dynamic(void); -/* a function defined externally in assembly language, called from - * this file */ -void do_pending_interrupt(void); /* * GC parameters */ /* the number of actual generations. (The number of 'struct - * generation' objects is one more than this, because one serves as - * scratch when GC'ing.) */ + * generation' objects is one more than this, because one object + * serves as scratch when GC'ing.) */ #define NUM_GENERATIONS 6 /* Should we use page protection to help avoid the scavenging of pages @@ -63,7 +65,7 @@ void do_pending_interrupt(void); boolean enable_page_protection = 1; /* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD * so don't unmap there. * @@ -77,34 +79,22 @@ boolean gencgc_unmap_zero = 1; #endif /* the minimum size (in bytes) for a large object*/ -unsigned large_object_size = 4 * 4096; +unsigned large_object_size = 4 * PAGE_BYTES; -/* Should we filter stack/register pointers? This could reduce the - * number of invalid pointers accepted. KLUDGE: It will probably - * degrades interrupt safety during object initialization. */ -boolean enable_pointer_filter = 1; /* * debugging */ -#define gc_abort() lose("GC invariant lost, file \"%s\", line %d", \ - __FILE__, __LINE__) -/* FIXME: In CMU CL, this was "#if 0" with no explanation. Find out - * how much it costs to make it "#if 1". If it's not too expensive, - * keep it. */ -#if 1 -#define gc_assert(ex) do { \ - if (!(ex)) gc_abort(); \ -} while (0) -#else -#define gc_assert(ex) -#endif /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -unsigned gencgc_verbose = (QSHOW ? 1 : 0); +#ifdef QSHOW +unsigned gencgc_verbose = 1; +#else +unsigned gencgc_verbose = 0; +#endif /* FIXME: At some point enable the various error-checking things below * and see what they say. */ @@ -143,17 +133,14 @@ boolean gencgc_zero_check_during_free_heap = 0; /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ unsigned long bytes_allocated = 0; -static unsigned long auto_gc_trigger = 0; +extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ +unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ -static int from_space; -static int new_space; +long from_space; +long new_space; -/* FIXME: It would be nice to use this symbolic constant instead of - * bare 4096 almost everywhere. We could also use an assertion that - * it's equal to getpagesize(). */ -#define PAGE_BYTES 4096 /* An array of page structures is statically allocated. * This helps quickly map between an address its page structure. @@ -164,22 +151,28 @@ struct page page_table[NUM_PAGES]; * is needed. */ static void *heap_base = NULL; +#if N_WORD_BITS == 32 + #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG +#elif N_WORD_BITS == 64 + #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG +#endif + /* Calculate the start address for the given page number. */ -inline void -*page_address(int page_num) +inline void * +page_address(long page_num) { - return (heap_base + (page_num * 4096)); + return (heap_base + (page_num * PAGE_BYTES)); } /* Find the page index within the page_table for the given * address. Return -1 on failure. */ -inline int +inline long find_page_index(void *addr) { - int index = addr-heap_base; + long index = addr-heap_base; if (index >= 0) { - index = ((unsigned int)index)/4096; + index = ((unsigned long)index)/PAGE_BYTES; if (index < NUM_PAGES) return (index); } @@ -190,29 +183,29 @@ find_page_index(void *addr) /* a structure to hold the state of a generation */ struct generation { - /* the first page that gc_alloc checks on its next call */ - int alloc_start_page; + /* the first page that gc_alloc() checks on its next call */ + long alloc_start_page; - /* the first page that gc_alloc_unboxed checks on its next call */ - int alloc_unboxed_start_page; + /* the first page that gc_alloc_unboxed() checks on its next call */ + long alloc_unboxed_start_page; /* the first page that gc_alloc_large (boxed) considers on its next * call. (Although it always allocates after the boxed_region.) */ - int alloc_large_start_page; + long alloc_large_start_page; /* the first page that gc_alloc_large (unboxed) considers on its * next call. (Although it always allocates after the * current_unboxed_region.) */ - int alloc_large_unboxed_start_page; + long alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ - int bytes_allocated; + long bytes_allocated; /* the number of bytes at which to trigger a GC */ - int gc_trigger; + long gc_trigger; /* to calculate a new level for gc_trigger */ - int bytes_consed_between_gc; + long bytes_consed_between_gc; /* the number of GCs since the last raise */ int num_gc; @@ -226,18 +219,22 @@ struct generation { * objects are added from a GC of a younger generation. Dividing by * the bytes_allocated will give the average age of the memory in * this generation since its last GC. */ - int cum_sum_bytes_allocated; + long cum_sum_bytes_allocated; /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ double min_av_mem_age; }; +/* the number of actual generations. (The number of 'struct + * generation' objects is one more than this, because one object + * serves as scratch when GC'ing.) */ +#define NUM_GENERATIONS 6 /* an array of generation structures. There needs to be one more * generation structure than actual generations as the oldest * generation is temporarily raised then lowered. */ -static struct generation generations[NUM_GENERATIONS+1]; +struct generation generations[NUM_GENERATIONS+1]; /* the oldest generation that is will currently be GCed by default. * Valid values are: 0, 1, ... (NUM_GENERATIONS-1) @@ -257,8 +254,17 @@ unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1; * ALLOCATION_POINTER which is used by the room function to limit its * search of the heap. XX Gencgc obviously needs to be better * integrated with the Lisp code. */ -static int last_free_page; -static int last_used_page = 0; +static long last_free_page; + +/* This lock is to prevent multiple threads from simultaneously + * allocating new regions which overlap each other. Note that the + * majority of GC is single-threaded, but alloc() may be called from + * >1 thread at a time and must be thread-safe. This lock must be + * seized before all accesses to generations[] or to parts of + * page_table[] that other threads may want to see */ + +static lispobj free_pages_lock=0; + /* * miscellaneous heap functions @@ -266,56 +272,56 @@ static int last_used_page = 0; /* Count the number of pages which are write-protected within the * given generation. */ -static int +static long count_write_protect_generation_pages(int generation) { - int i; - int cnt = 0; + long i; + long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) - cnt++; - return(cnt); + count++; + return count; } -/* Count the number of pages within the given generation */ -static int +/* Count the number of pages within the given generation. */ +static long count_generation_pages(int generation) { - int i; - int cnt = 0; + long i; + long count = 0; for (i = 0; i < last_free_page; i++) if ((page_table[i].allocated != 0) && (page_table[i].gen == generation)) - cnt++; - return(cnt); + count++; + return count; } -/* Count the number of dont_move pages. */ -static int +#ifdef QSHOW +static long count_dont_move_pages(void) { - int i; - int cnt = 0; - - for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != 0) - && (page_table[i].dont_move != 0)) - cnt++; - return(cnt); + long i; + long count = 0; + for (i = 0; i < last_free_page; i++) { + if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { + ++count; + } + } + return count; } +#endif /* QSHOW */ /* Work through the pages and add up the number of bytes used for the * given generation. */ -static int -generation_bytes_allocated (int gen) +static long +count_generation_bytes_allocated (int gen) { - int i; - int result = 0; - + long i; + long result = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) result += page_table[i].bytes_used; @@ -335,6 +341,8 @@ gen_av_mem_age(int gen) / ((double)generations[gen].bytes_allocated); } +void fpu_save(int *); /* defined in x86-assem.S */ +void fpu_restore(int *); /* defined in x86-assem.S */ /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ static void @@ -355,7 +363,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Print the heap stats. */ fprintf(stderr, - " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n"); + " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); for (i = 0; i < gens; i++) { int j; @@ -363,22 +371,23 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ int unboxed_cnt = 0; int large_boxed_cnt = 0; int large_unboxed_cnt = 0; + int pinned_cnt=0; for (j = 0; j < last_free_page; j++) if (page_table[j].gen == i) { /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated == BOXED_PAGE) { + if (page_table[j].allocated & BOXED_PAGE_FLAG) { if (page_table[j].large_object) large_boxed_cnt++; else boxed_cnt++; } - + if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated == UNBOXED_PAGE) { + if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -387,13 +396,14 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ } gc_assert(generations[i].bytes_allocated - == generation_bytes_allocated(i)); + == count_generation_bytes_allocated(i)); fprintf(stderr, - " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n", + " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n", i, boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt, + pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*4096 + (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), @@ -418,7 +428,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ * e.g. boxed/unboxed, generation, ages; there may need to be many * allocation regions. * - * Each allocation region may be start within a partly used page. Many + * Each allocation region may start within a partly used page. Many * features of memory use are noted on a page wise basis, e.g. the * generation; so if a region starts within an existing allocated page * it must be consistent with this page. @@ -460,10 +470,6 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ struct alloc_region boxed_region; struct alloc_region unboxed_region; -/* XX hack. Current Lisp code uses the following. Need copying in/out. */ -void *current_region_free_pointer; -void *current_region_end_addr; - /* The generation currently being allocated to. */ static int gc_alloc_generation; @@ -474,7 +480,7 @@ static int gc_alloc_generation; * keeps the allocation contiguous when scavenging the newspace. * * The alloc_region should have been closed by a call to - * gc_alloc_update_page_tables, and will thus be in an empty state. + * gc_alloc_update_page_tables(), and will thus be in an empty state. * * To assist the scavenging functions write-protected pages are not * used. Free pages should not be write-protected. @@ -491,15 +497,12 @@ static int gc_alloc_generation; * are allocated, although they will initially be empty. */ static void -gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) { - int first_page; - int last_page; - int region_size; - int restart_page; - int bytes_found; - int num_pages; - int i; + long first_page; + long last_page; + long bytes_found; + long i; /* FSHOW((stderr, @@ -511,99 +514,17 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - + get_spinlock(&free_pages_lock,(long) alloc_region); if (unboxed) { - restart_page = + first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; } else { - restart_page = + first_page = generations[gc_alloc_generation].alloc_start_page; } - - /* Search for a contiguous free region of at least nbytes with the - * given properties: boxed/unboxed, generation. */ - do { - first_page = restart_page; - - /* First search for a page with at least 32 bytes free, which is - * not write-protected, and which is not marked dont_move. */ - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE) /* not free page */ - && ((unboxed && - (page_table[first_page].allocated != UNBOXED_PAGE)) - || (!unboxed && - (page_table[first_page].allocated != BOXED_PAGE)) - || (page_table[first_page].large_object != 0) - || (page_table[first_page].gen != gc_alloc_generation) - || (page_table[first_page].bytes_used >= (4096-32)) - || (page_table[first_page].write_protected != 0) - || (page_table[first_page].dont_move != 0))) - first_page++; - /* Check for a failure. */ - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - gc_assert(page_table[first_page].write_protected == 0); - - /* - FSHOW((stderr, - "/first_page=%d bytes_used=%d\n", - first_page, page_table[first_page].bytes_used)); - */ - - /* Now search forward to calculate the available region size. It - * tries to keeps going until nbytes are found and the number of - * pages is greater than some level. This helps keep down the - * number of pages in a region. */ - last_page = first_page; - bytes_found = 4096 - page_table[first_page].bytes_used; - num_pages = 1; - while (((bytes_found < nbytes) || (num_pages < 2)) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE)) { - last_page++; - num_pages++; - bytes_found += 4096; - gc_assert(page_table[last_page].write_protected == 0); - } - - region_size = (4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); - - gc_assert(bytes_found == region_size); - - /* - FSHOW((stderr, - "/last_page=%d bytes_found=%d num_pages=%d\n", - last_page, bytes_found, num_pages)); - */ - - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); - - /* Check for a failure. */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_alloc_new_region failed on restart_page, nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - /* - FSHOW((stderr, - "/gc_alloc_new_region gen %d: %d bytes: pages %d to %d: addr=%x\n", - gc_alloc_generation, - bytes_found, - first_page, - last_page, - page_address(first_page))); - */ + last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -613,63 +534,67 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) alloc_region->free_pointer = alloc_region->start_addr; alloc_region->end_addr = alloc_region->start_addr + bytes_found; - if (gencgc_zero_check) { - int *p; - for (p = (int *)alloc_region->start_addr; - p < (int *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.", p); - } - } - } - /* Set up the pages. */ /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; page_table[first_page].first_object_offset = 0; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; + gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { if (unboxed) - page_table[i].allocated = UNBOXED_PAGE; + page_table[i].allocated = UNBOXED_PAGE_FLAG; else - page_table[i].allocated = BOXED_PAGE; + page_table[i].allocated = BOXED_PAGE_FLAG; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ page_table[i].first_object_offset = alloc_region->start_addr - page_address(i); + page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } - /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); - if (last_page+1 > last_used_page) - last_used_page = last_page+1; + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), + 0); + } + release_spinlock(&free_pages_lock); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + long *p; + for (p = (long *)alloc_region->start_addr; + p < (long *)alloc_region->end_addr; p++) { + if (*p != 0) { + /* KLUDGE: It would be nice to use %lx and explicit casts + * (long) in code like this, so that it is less likely to + * break randomly when running on a machine with different + * word sizes. -- WHN 19991129 */ + lose("The new region at %x is not zero.", p); + } } } +} + /* If the record_new_objects flag is 2 then all new regions created * are recorded. * @@ -687,22 +612,22 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) * scavenge of a generation. */ #define NUM_NEW_AREAS 512 static int record_new_objects = 0; -static int new_areas_ignore_page; +static long new_areas_ignore_page; struct new_area { - int page; - int offset; - int size; + long page; + long offset; + long size; }; static struct new_area (*new_areas)[]; -static int new_areas_index; -int max_new_areas; +static long new_areas_index; +long max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(int first_page, int offset, int size) +add_new_area(long first_page, long offset, long size) { unsigned new_area_start,c; - int i; + long i; /* Ignore if full. */ if (new_areas_index >= NUM_NEW_AREAS) @@ -721,13 +646,13 @@ add_new_area(int first_page, int offset, int size) gc_abort(); } - new_area_start = 4096*first_page + offset; + new_area_start = PAGE_BYTES*first_page + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { unsigned area_end = - 4096*((*new_areas)[i].page) + PAGE_BYTES*((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -742,12 +667,11 @@ add_new_area(int first_page, int offset, int size) (*new_areas)[i].size, first_page, offset, - size));*/ + size);*/ (*new_areas)[i].size += size; return; } } - /*FSHOW((stderr, "/add_new_area S1 %d %d %d\n", i, c, new_area_start));*/ (*new_areas)[new_areas_index].page = first_page; (*new_areas)[new_areas_index].offset = offset; @@ -762,7 +686,7 @@ add_new_area(int first_page, int offset, int size) max_new_areas = new_areas_index; } -/* Update the tables for the alloc_region. The region maybe added to +/* Update the tables for the alloc_region. The region may be added to * the new_areas. * * When done the alloc_region is set up so that the next quick alloc @@ -772,19 +696,14 @@ add_new_area(int first_page, int offset, int size) void gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) { - int more; - int first_page; - int next_page; - int bytes_used; - int orig_first_page_bytes_used; - int region_size; - int byte_cnt; + long more; + long first_page; + long next_page; + long bytes_used; + long orig_first_page_bytes_used; + long region_size; + long byte_cnt; - /* - FSHOW((stderr, - "/gc_alloc_update_page_tables to gen %d:\n", - gc_alloc_generation)); - */ first_page = alloc_region->first_page; @@ -794,8 +713,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - /* Skip if no bytes were allocated */ + get_spinlock(&free_pages_lock,(long) alloc_region); if (alloc_region->free_pointer != alloc_region->start_addr) { + /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); @@ -805,38 +725,40 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Update the first page. */ /* If the page was free then set up the gen, and - first_object_offset. */ + * first_object_offset. */ if (page_table[first_page].bytes_used == 0) gc_assert(page_table[first_page].first_object_offset == 0); + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); byte_cnt = 0; - /* Calc. the number of bytes used in this page. This is not always - the number of new bytes, unless it was free. */ + /* Calculate the number of bytes used in this page. This is not + * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) { - bytes_used = 4096; + if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; byte_cnt += bytes_used; - /* All the rest of the pages should be free. Need to set their - first_object_offset pointer to the start of the region, and set - the bytes_used. */ + /* All the rest of the pages should be free. We need to set their + * first_object_offset pointer to the start of the region, and set + * the bytes_used. */ while (more) { + page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[next_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); else - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -847,8 +769,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. */ more = 0; if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>4096) { - bytes_used = 4096; + - page_address(next_page)))>PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; @@ -864,7 +786,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size); /* Set the generations alloc restart page to the last page of - the region. */ + * the region. */ if (unboxed) generations[gc_alloc_generation].alloc_unboxed_start_page = next_page-1; @@ -881,163 +803,52 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) region_size, gc_alloc_generation)); */ - } - else - /* No bytes allocated. Unallocate the first_page if there are 0 - bytes_used. */ + } else { + /* There are no bytes allocated. Unallocate the first_page if + * there are 0 bytes_used. */ + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (page_table[first_page].bytes_used == 0) - page_table[first_page].allocated = FREE_PAGE; + page_table[first_page].allocated = FREE_PAGE_FLAG; + } /* Unallocate any unused pages. */ while (next_page <= alloc_region->last_page) { gc_assert(page_table[next_page].bytes_used == 0); - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - - /* Reset the alloc_region. */ - alloc_region->first_page = 0; - alloc_region->last_page = -1; - alloc_region->start_addr = page_address(0); - alloc_region->free_pointer = page_address(0); - alloc_region->end_addr = page_address(0); + release_spinlock(&free_pages_lock); + /* alloc_region is per-thread, we're ok to do this unlocked */ + gc_set_region_empty(alloc_region); } -static inline void *gc_quick_alloc(int nbytes); +static inline void *gc_quick_alloc(long nbytes); /* Allocate a possibly large object. */ -static void -*gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) +void * +gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) { - int first_page; - int last_page; - int region_size; - int restart_page; - int bytes_found; - int num_pages; - int orig_first_page_bytes_used; - int byte_cnt; - int more; - int bytes_used; - int next_page; - int large = (nbytes >= large_object_size); - - /* - if (nbytes > 200000) - FSHOW((stderr, "/alloc_large %d\n", nbytes)); - */ - - /* - FSHOW((stderr, - "/gc_alloc_large for %d bytes from gen %d\n", - nbytes, gc_alloc_generation)); - */ - - /* If the object is small, and there is room in the current region - then allocation it in the current region. */ - if (!large - && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes)) - return gc_quick_alloc(nbytes); - - /* Search for a contiguous free region of at least nbytes. If it's a - large object then align it on a page boundary by searching for a - free page. */ - - /* To allow the allocation of small objects without the danger of - using a page in the current boxed region, the search starts after - the current boxed free region. XX could probably keep a page - index ahead of the current region and bumped up here to save a - lot of re-scanning. */ - if (unboxed) - restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page; - else - restart_page = generations[gc_alloc_generation].alloc_large_start_page; - if (restart_page <= alloc_region->last_page) - restart_page = alloc_region->last_page+1; + long first_page; + long last_page; + long orig_first_page_bytes_used; + long byte_cnt; + long more; + long bytes_used; + long next_page; - do { - first_page = restart_page; - - if (large) - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE)) - first_page++; - else - while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE) - && ((unboxed && - (page_table[first_page].allocated != UNBOXED_PAGE)) - || (!unboxed && - (page_table[first_page].allocated != BOXED_PAGE)) - || (page_table[first_page].large_object != 0) - || (page_table[first_page].gen != gc_alloc_generation) - || (page_table[first_page].bytes_used >= (4096-32)) - || (page_table[first_page].write_protected != 0) - || (page_table[first_page].dont_move != 0))) - first_page++; - - if (first_page >= NUM_PAGES) { - fprintf(stderr, - "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); - } - - gc_assert(page_table[first_page].write_protected == 0); - - /* - FSHOW((stderr, - "/first_page=%d bytes_used=%d\n", - first_page, page_table[first_page].bytes_used)); - */ - - last_page = first_page; - bytes_found = 4096 - page_table[first_page].bytes_used; - num_pages = 1; - while ((bytes_found < nbytes) - && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE)) { - last_page++; - num_pages++; - bytes_found += 4096; - gc_assert(page_table[last_page].write_protected == 0); - } - - region_size = (4096 - page_table[first_page].bytes_used) - + 4096*(last_page-first_page); - - gc_assert(bytes_found == region_size); + get_spinlock(&free_pages_lock,(long) alloc_region); - /* - FSHOW((stderr, - "/last_page=%d bytes_found=%d num_pages=%d\n", - last_page, bytes_found, num_pages)); - */ - - restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); - - /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { - fprintf(stderr, - "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n", - nbytes); - print_generation_stats(1); - lose(NULL); + if (unboxed) { + first_page = + generations[gc_alloc_generation].alloc_large_unboxed_start_page; + } else { + first_page = generations[gc_alloc_generation].alloc_large_start_page; + } + if (first_page <= alloc_region->last_page) { + first_page = alloc_region->last_page+1; } - /* - if (large) - FSHOW((stderr, - "/gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n", - gc_alloc_generation, - nbytes, - bytes_found, - first_page, - last_page, - page_address(first_page))); - */ + last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); gc_assert(first_page > alloc_region->last_page); if (unboxed) @@ -1053,28 +864,28 @@ static void * first_object_offset. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].first_object_offset = 0; - page_table[first_page].large_object = large; + page_table[first_page].large_object = 1; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); - gc_assert(page_table[first_page].large_object == large); + gc_assert(page_table[first_page].large_object == 1); byte_cnt = 0; /* Calc. the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) { - bytes_used = 4096; + if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[first_page].bytes_used = bytes_used; @@ -1086,27 +897,28 @@ static void * first_object_offset pointer to the start of the region, and * set the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE); + gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[next_page].allocated = BOXED_PAGE; + page_table[next_page].allocated = BOXED_PAGE_FLAG; page_table[next_page].gen = gc_alloc_generation; - page_table[next_page].large_object = large; + page_table[next_page].large_object = 1; page_table[next_page].first_object_offset = - orig_first_page_bytes_used - 4096*(next_page-first_page); + orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) { - bytes_used = 4096; + if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { + bytes_used = PAGE_BYTES; more = 1; } page_table[next_page].bytes_used = bytes_used; + page_table[next_page].write_protected=0; + page_table[next_page].dont_move=0; byte_cnt += bytes_used; - next_page++; } @@ -1123,347 +935,206 @@ static void if (last_page+1 > last_free_page) { last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); - if (last_page+1 > last_used_page) - last_used_page = last_page+1; + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); } + release_spinlock(&free_pages_lock); return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } -/* Allocate bytes from the boxed_region. It first checks if there is - * room, if not then it calls gc_alloc_new_region to find a new region - * with enough space. A pointer to the start of the region is returned. */ -static void -*gc_alloc(int nbytes) +long +gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) { - void *new_free_pointer; + long first_page; + long last_page; + long region_size; + long restart_page=*restart_page_ptr; + long bytes_found; + long num_pages; + long large_p=(nbytes>=large_object_size); + gc_assert(free_pages_lock); - /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */ + /* Search for a contiguous free space of at least nbytes. If it's + * a large object then align it on a page boundary by searching + * for a free page. */ - /* Check whether there is room in the current alloc region. */ - new_free_pointer = boxed_region.free_pointer + nbytes; + do { + first_page = restart_page; + if (large_p) + while ((first_page < NUM_PAGES) + && (page_table[first_page].allocated != FREE_PAGE_FLAG)) + first_page++; + else + while (first_page < NUM_PAGES) { + if(page_table[first_page].allocated == FREE_PAGE_FLAG) + break; + if((page_table[first_page].allocated == + (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + (page_table[first_page].large_object == 0) && + (page_table[first_page].gen == gc_alloc_generation) && + (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && + (page_table[first_page].write_protected == 0) && + (page_table[first_page].dont_move == 0)) { + break; + } + first_page++; + } + + if (first_page >= NUM_PAGES) { + fprintf(stderr, + "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n", + nbytes); + print_generation_stats(1); + lose(NULL); + } - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current alloc region. */ - void *new_obj = boxed_region.free_pointer; - boxed_region.free_pointer = new_free_pointer; + gc_assert(page_table[first_page].write_protected == 0); - /* Check whether the alloc region is almost empty. */ - if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) { - /* If so finished with the current region. */ - gc_alloc_update_page_tables(0, &boxed_region); - /* Set up a new region. */ - gc_alloc_new_region(32, 0, &boxed_region); + last_page = first_page; + bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; + num_pages = 1; + while (((bytes_found < nbytes) + || (!large_p && (num_pages < 2))) + && (last_page < (NUM_PAGES-1)) + && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + last_page++; + num_pages++; + bytes_found += PAGE_BYTES; + gc_assert(page_table[last_page].write_protected == 0); } - return((void *)new_obj); - } - - /* Else not enough free space in the current region. */ - /* If there some room left in the current region, enough to be worth - * saving, then allocate a large object. */ - /* FIXME: "32" should be a named parameter. */ - if ((boxed_region.end_addr-boxed_region.free_pointer) > 32) - return gc_alloc_large(nbytes, 0, &boxed_region); + region_size = (PAGE_BYTES - page_table[first_page].bytes_used) + + PAGE_BYTES*(last_page-first_page); - /* Else find a new region. */ + gc_assert(bytes_found == region_size); + restart_page = last_page + 1; + } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); - /* Finished with the current region. */ - gc_alloc_update_page_tables(0, &boxed_region); + /* Check for a failure */ + if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) { + fprintf(stderr, + "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n", + nbytes); + print_generation_stats(1); + lose(NULL); + } + *restart_page_ptr=first_page; + return last_page; +} - /* Set up a new region. */ - gc_alloc_new_region(nbytes, 0, &boxed_region); +/* Allocate bytes. All the rest of the special-purpose allocation + * functions will eventually call this */ - /* Should now be enough room. */ +void * +gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, + int quick_p) +{ + void *new_free_pointer; - /* Check whether there is room in the current region. */ - new_free_pointer = boxed_region.free_pointer + nbytes; + if(nbytes>=large_object_size) + return gc_alloc_large(nbytes,unboxed_p,my_region); - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = boxed_region.free_pointer; - boxed_region.free_pointer = new_free_pointer; + /* Check whether there is room in the current alloc region. */ + new_free_pointer = my_region->free_pointer + nbytes; - /* Check whether the current region is almost empty. */ - if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) { - /* If so find, finished with the current region. */ - gc_alloc_update_page_tables(0, &boxed_region); + /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes, + my_region->free_pointer, new_free_pointer); */ + if (new_free_pointer <= my_region->end_addr) { + /* If so then allocate from the current alloc region. */ + void *new_obj = my_region->free_pointer; + my_region->free_pointer = new_free_pointer; + + /* Unless a `quick' alloc was requested, check whether the + alloc region is almost empty. */ + if (!quick_p && + (my_region->end_addr - my_region->free_pointer) <= 32) { + /* If so, finished with the current region. */ + gc_alloc_update_page_tables(unboxed_p, my_region); /* Set up a new region. */ - gc_alloc_new_region(32, 0, &boxed_region); + gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region); } return((void *)new_obj); } - /* shouldn't happen */ - gc_assert(0); - return((void *) NIL); /* dummy value: return something ... */ -} - -/* Allocate space from the boxed_region. If there is not enough free - * space then call gc_alloc to do the job. A pointer to the start of - * the region is returned. */ -static inline void -*gc_quick_alloc(int nbytes) -{ - void *new_free_pointer; + /* Else not enough free space in the current region: retry with a + * new region. */ - /* Check whether there is room in the current region. */ - new_free_pointer = boxed_region.free_pointer + nbytes; + gc_alloc_update_page_tables(unboxed_p, my_region); + gc_alloc_new_region(nbytes, unboxed_p, my_region); + return gc_alloc_with_region(nbytes,unboxed_p,my_region,0); +} - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = boxed_region.free_pointer; - boxed_region.free_pointer = new_free_pointer; - return((void *)new_obj); - } +/* these are only used during GC: all allocation from the mutator calls + * alloc() -> gc_alloc_with_region() with the appropriate per-thread + * region */ - /* Else call gc_alloc */ - return (gc_alloc(nbytes)); +void * +gc_general_alloc(long nbytes,int unboxed_p,int quick_p) +{ + struct alloc_region *my_region = + unboxed_p ? &unboxed_region : &boxed_region; + return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p); } -/* Allocate space for the boxed object. If it is a large object then - * do a large alloc else allocate from the current region. If there is - * not enough free space then call gc_alloc to do the job. A pointer - * to the start of the region is returned. */ -static inline void -*gc_quick_alloc_large(int nbytes) +static inline void * +gc_quick_alloc(long nbytes) { - void *new_free_pointer; - - if (nbytes >= large_object_size) - return gc_alloc_large(nbytes, 0, &boxed_region); - - /* Check whether there is room in the current region. */ - new_free_pointer = boxed_region.free_pointer + nbytes; + return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); +} - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = boxed_region.free_pointer; - boxed_region.free_pointer = new_free_pointer; - return((void *)new_obj); - } +static inline void * +gc_quick_alloc_large(long nbytes) +{ + return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); +} - /* Else call gc_alloc */ - return (gc_alloc(nbytes)); +static inline void * +gc_alloc_unboxed(long nbytes) +{ + return gc_general_alloc(nbytes,ALLOC_UNBOXED,0); } -static void -*gc_alloc_unboxed(int nbytes) +static inline void * +gc_quick_alloc_unboxed(long nbytes) { - void *new_free_pointer; + return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); +} - /* - FSHOW((stderr, "/gc_alloc_unboxed %d\n", nbytes)); - */ +static inline void * +gc_quick_alloc_large_unboxed(long nbytes) +{ + return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); +} + +/* + * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b + */ - /* Check whether there is room in the current region. */ - new_free_pointer = unboxed_region.free_pointer + nbytes; +extern long (*scavtab[256])(lispobj *where, lispobj object); +extern lispobj (*transother[256])(lispobj object); +extern long (*sizetab[256])(lispobj *where); - if (new_free_pointer <= unboxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = unboxed_region.free_pointer; - unboxed_region.free_pointer = new_free_pointer; - - /* Check whether the current region is almost empty. */ - if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) { - /* If so finished with the current region. */ - gc_alloc_update_page_tables(1, &unboxed_region); - - /* Set up a new region. */ - gc_alloc_new_region(32, 1, &unboxed_region); - } - - return((void *)new_obj); - } - - /* Else not enough free space in the current region. */ - - /* If there is a bit of room left in the current region then - allocate a large object. */ - if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32) - return gc_alloc_large(nbytes,1,&unboxed_region); - - /* Else find a new region. */ - - /* Finished with the current region. */ - gc_alloc_update_page_tables(1, &unboxed_region); - - /* Set up a new region. */ - gc_alloc_new_region(nbytes, 1, &unboxed_region); - - /* Should now be enough room. */ - - /* Check whether there is room in the current region. */ - new_free_pointer = unboxed_region.free_pointer + nbytes; - - if (new_free_pointer <= unboxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = unboxed_region.free_pointer; - unboxed_region.free_pointer = new_free_pointer; - - /* Check whether the current region is almost empty. */ - if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) { - /* If so find, finished with the current region. */ - gc_alloc_update_page_tables(1, &unboxed_region); - - /* Set up a new region. */ - gc_alloc_new_region(32, 1, &unboxed_region); - } - - return((void *)new_obj); - } - - /* shouldn't happen? */ - gc_assert(0); - return((void *) NIL); /* dummy value: return something ... */ -} - -static inline void -*gc_quick_alloc_unboxed(int nbytes) -{ - void *new_free_pointer; - - /* Check whether there is room in the current region. */ - new_free_pointer = unboxed_region.free_pointer + nbytes; - - if (new_free_pointer <= unboxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = unboxed_region.free_pointer; - unboxed_region.free_pointer = new_free_pointer; - - return((void *)new_obj); - } - - /* Else call gc_alloc */ - return (gc_alloc_unboxed(nbytes)); -} - -/* Allocate space for the object. If it is a large object then do a - * large alloc else allocate from the current region. If there is not - * enough free space then call gc_alloc to do the job. - * - * A pointer to the start of the region is returned. */ -static inline void -*gc_quick_alloc_large_unboxed(int nbytes) -{ - void *new_free_pointer; - - if (nbytes >= large_object_size) - return gc_alloc_large(nbytes,1,&unboxed_region); - - /* Check whether there is room in the current region. */ - new_free_pointer = unboxed_region.free_pointer + nbytes; - - if (new_free_pointer <= unboxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = unboxed_region.free_pointer; - unboxed_region.free_pointer = new_free_pointer; - - return((void *)new_obj); - } - - /* Else call gc_alloc. */ - return (gc_alloc_unboxed(nbytes)); -} - -/* - * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b - */ - -static int (*scavtab[256])(lispobj *where, lispobj object); -static lispobj (*transother[256])(lispobj object); -static int (*sizetab[256])(lispobj *where); - -static struct weak_pointer *weak_pointers; - -#define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1))) - -/* - * predicates - */ - -static inline boolean -from_space_p(lispobj obj) -{ - int page_index=(void*)obj - heap_base; - return ((page_index >= 0) - && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES) - && (page_table[page_index].gen == from_space)); -} - -static inline boolean -new_space_p(lispobj obj) -{ - int page_index = (void*)obj - heap_base; - return ((page_index >= 0) - && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES) - && (page_table[page_index].gen == new_space)); -} - -/* - * copying objects - */ - -/* to copy a boxed object */ -static inline lispobj -copy_object(lispobj object, int nwords) -{ - int tag; - lispobj *new; - lispobj *source, *dest; - - gc_assert(Pointerp(object)); - gc_assert(from_space_p(object)); - gc_assert((nwords & 0x01) == 0); - - /* Get tag of object. */ - tag = LowtagOf(object); - - /* Allocate space. */ - new = gc_quick_alloc(nwords*4); - - dest = new; - source = (lispobj *) PTR(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } - - /* Return Lisp pointer of new object. */ - return ((lispobj) new) | tag; -} - -/* to copy a large boxed object. If the object is in a large object +/* Copy a large boxed object. If the object is in a large object * region then it is simply promoted, else it is copied. If it's large * enough then it's copied to a large object region. * * Vectors may have shrunk. If the object is not copied the space * needs to be reclaimed, and the page_tables corrected. */ -static lispobj -copy_large_object(lispobj object, int nwords) +lispobj +copy_large_object(lispobj object, long nwords) { int tag; lispobj *new; - lispobj *source, *dest; - int first_page; + long first_page; - gc_assert(Pointerp(object)); + gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); - if ((nwords > 1024*1024) && gencgc_verbose) { - FSHOW((stderr, "/copy_large_object: %d bytes\n", nwords*4)); - } - /* Check whether it's a large object. */ + /* Check whether it's in a large object region. */ first_page = find_page_index((void *)object); gc_assert(first_page >= 0); @@ -1471,10 +1142,10 @@ copy_large_object(lispobj object, int nwords) /* Promote the object. */ - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; /* Note: Any page write-protection must be removed, else a * later scavenge_newspace may incorrectly not scavenge these @@ -1485,24 +1156,24 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + remaining_bytes = nwords*N_WORD_BYTES; + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[next_page].write_protected) { - os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL); + os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL); page_table[next_page].write_protected = 0; } - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1513,7 +1184,7 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated = BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1523,54 +1194,42 @@ copy_large_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE) && + (page_table[next_page].allocated == BOXED_PAGE_FLAG) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { - /* Checks out OK, free the page. Don't need to both zeroing + -(next_page - first_page)*PAGE_BYTES)) { + /* Checks out OK, free the page. Don't need to bother zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected as they * should be zero filled. */ gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; } - if ((bytes_freed > 0) && gencgc_verbose) - FSHOW((stderr, "/copy_large_boxed bytes_freed=%d\n", bytes_freed)); - - generations[from_space].bytes_allocated -= 4*nwords + bytes_freed; - generations[new_space].bytes_allocated += 4*nwords; + generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + + bytes_freed; + generations[new_space].bytes_allocated += N_WORD_BYTES*nwords; bytes_allocated -= bytes_freed; /* Add the region to the new_areas if requested. */ - add_new_area(first_page,0,nwords*4); + add_new_area(first_page,0,nwords*N_WORD_BYTES); return(object); } else { /* Get tag of object. */ - tag = LowtagOf(object); + tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_large(nwords*4); - - dest = new; - source = (lispobj *) PTR(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + new = gc_quick_alloc_large(nwords*N_WORD_BYTES); + + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1578,34 +1237,23 @@ copy_large_object(lispobj object, int nwords) } /* to copy unboxed objects */ -static inline lispobj -copy_unboxed_object(lispobj object, int nwords) +lispobj +copy_unboxed_object(lispobj object, long nwords) { - int tag; + long tag; lispobj *new; - lispobj *source, *dest; - gc_assert(Pointerp(object)); + gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); /* Get tag of object. */ - tag = LowtagOf(object); + tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_unboxed(nwords*4); - - dest = new; - source = (lispobj *) PTR(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES); + + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1622,20 +1270,19 @@ copy_unboxed_object(lispobj object, int nwords) * * KLUDGE: There's a lot of cut-and-paste duplication between this * function and copy_large_object(..). -- WHN 20000619 */ -static lispobj -copy_large_unboxed_object(lispobj object, int nwords) +lispobj +copy_large_unboxed_object(lispobj object, long nwords) { int tag; lispobj *new; - lispobj *source, *dest; - int first_page; + long first_page; - gc_assert(Pointerp(object)); + gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); if ((nwords > 1024*1024) && gencgc_verbose) - FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*4)); + FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1645,27 +1292,27 @@ copy_large_unboxed_object(lispobj object, int nwords) /* Promote the object. Note: Unboxed objects may have been * allocated to a BOXED region so it may be necessary to * change the region to UNBOXED. */ - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + remaining_bytes = nwords*N_WORD_BYTES; + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)); + gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; - remaining_bytes -= 4096; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1676,7 +1323,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1686,13 +1333,13 @@ copy_large_unboxed_object(lispobj object, int nwords) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected, even if @@ -1700,7 +1347,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -1711,136 +1358,38 @@ copy_large_unboxed_object(lispobj object, int nwords) "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); - generations[from_space].bytes_allocated -= 4*nwords + bytes_freed; - generations[new_space].bytes_allocated += 4*nwords; + generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; + generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; bytes_allocated -= bytes_freed; return(object); } else { /* Get tag of object. */ - tag = LowtagOf(object); + tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_large_unboxed(nwords*4); - - dest = new; - source = (lispobj *) PTR(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES); + + /* Copy the object. */ + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; } } - -/* - * scavenging - */ -/* FIXME: Most calls end up going to some trouble to compute an - * 'n_words' value for this function. The system might be a little - * simpler if this function used an 'end' parameter instead. */ -static void -scavenge(lispobj *start, long n_words) -{ - lispobj *end = start + n_words; - lispobj *object_ptr; - int n_words_scavenged; - - for (object_ptr = start; - object_ptr < end; - object_ptr += n_words_scavenged) { - lispobj object = *object_ptr; - - gc_assert(object != 0x01); /* not a forwarding pointer */ - - if (Pointerp(object)) { - if (from_space_p(object)) { - /* It currently points to old space. Check for a - * forwarding pointer. */ - lispobj *ptr = (lispobj *)PTR(object); - lispobj first_word = *ptr; - if (first_word == 0x01) { - /* Yes, there's a forwarding pointer. */ - *object_ptr = ptr[1]; - n_words_scavenged = 1; - } else { - /* Scavenge that pointer. */ - n_words_scavenged = - (scavtab[TypeOf(object)])(object_ptr, object); - } - } else { - /* It points somewhere other than oldspace. Leave it - * alone. */ - n_words_scavenged = 1; - } - } else if ((object & 3) == 0) { - /* It's a fixnum: really easy.. */ - n_words_scavenged = 1; - } else { - /* It's some sort of header object or another. */ - n_words_scavenged = - (scavtab[TypeOf(object)])(object_ptr, object); - } - } - gc_assert(object_ptr == end); -} + + /* * code and code-related objects */ - -#define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer) - -static lispobj trans_function_header(lispobj object); +/* +static lispobj trans_fun_header(lispobj object); static lispobj trans_boxed(lispobj object); - -static int -scav_function_pointer(lispobj *where, lispobj object) -{ - lispobj *first_pointer; - lispobj copy; - - gc_assert(Pointerp(object)); - - /* Object is a pointer into from space - no a FP. */ - first_pointer = (lispobj *) PTR(object); - - /* must transport object -- object may point to either a function - * header, a closure function header, or to a closure header. */ - - switch (TypeOf(*first_pointer)) { - case type_FunctionHeader: - case type_ClosureFunctionHeader: - copy = trans_function_header(object); - break; - default: - copy = trans_boxed(object); - break; - } - - if (copy != object) { - /* Set forwarding pointer */ - first_pointer[0] = 0x01; - first_pointer[1] = copy; - } - - gc_assert(Pointerp(copy)); - gc_assert(!from_space_p(copy)); - - *where = copy; - - return 1; -} +*/ /* Scan a x86 compiled code object, looking for possible fixups that * have been missed after a move. @@ -1854,7 +1403,7 @@ scav_function_pointer(lispobj *where, lispobj object) void sniff_code_object(struct code *code, unsigned displacement) { - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; void *p; void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; @@ -1863,23 +1412,14 @@ sniff_code_object(struct code *code, unsigned displacement) if (!check_code_fixups) return; - /* It's ok if it's byte compiled code. The trace table offset will - * be a fixnum if it's x86 compiled code - check. */ - if (code->trace_table_offset & 0x3) { - FSHOW((stderr, "/Sniffing byte compiled code object at %x.\n", code)); - return; - } - - /* Else it's x86 machine code. */ - ncode_words = fixnum_value(code->code_size); nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; - constants_start_addr = (void *)code + 5*4; - constants_end_addr = (void *)code + nheader_words*4; - code_start_addr = (void *)code + nheader_words*4; - code_end_addr = (void *)code + nwords*4; + constants_start_addr = (void *)code + 5*N_WORD_BYTES; + constants_end_addr = (void *)code + nheader_words*N_WORD_BYTES; + code_start_addr = (void *)code + nheader_words*N_WORD_BYTES; + code_end_addr = (void *)code + nwords*N_WORD_BYTES; /* Work through the unboxed code. */ for (p = code_start_addr; p < code_end_addr; p++) { @@ -1888,8 +1428,10 @@ sniff_code_object(struct code *code, unsigned displacement) unsigned d2 = *((unsigned char *)p - 2); unsigned d3 = *((unsigned char *)p - 3); unsigned d4 = *((unsigned char *)p - 4); +#ifdef QSHOW unsigned d5 = *((unsigned char *)p - 5); unsigned d6 = *((unsigned char *)p - 6); +#endif /* Check for code references. */ /* Check for a 32 bit word that looks like an absolute @@ -2028,34 +1570,26 @@ sniff_code_object(struct code *code, unsigned displacement) } } -static void -apply_code_fixups(struct code *old_code, struct code *new_code) +void +gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) { - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; lispobj fixups = NIL; unsigned displacement = (unsigned)new_code - (unsigned)old_code; struct vector *fixups_vector; - /* It's OK if it's byte compiled code. The trace table offset will - * be a fixnum if it's x86 compiled code - check. */ - if (new_code->trace_table_offset & 0x3) { -/* FSHOW((stderr, "/byte compiled code object at %x\n", new_code)); */ - return; - } - - /* Else it's x86 machine code. */ ncode_words = fixnum_value(new_code->code_size); nheader_words = HeaderValue(*(lispobj *)new_code); nwords = ncode_words + nheader_words; /* FSHOW((stderr, "/compiled code object at %x: header words = %d, code words = %d\n", new_code, nheader_words, ncode_words)); */ - constants_start_addr = (void *)new_code + 5*4; - constants_end_addr = (void *)new_code + nheader_words*4; - code_start_addr = (void *)new_code + nheader_words*4; - code_end_addr = (void *)new_code + nwords*4; + constants_start_addr = (void *)new_code + 5*N_WORD_BYTES; + constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES; + code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES; + code_end_addr = (void *)new_code + nwords*N_WORD_BYTES; /* FSHOW((stderr, "/const start = %x, end = %x\n", @@ -2069,39 +1603,38 @@ apply_code_fixups(struct code *old_code, struct code *new_code) code objects. Check. */ fixups = new_code->constants[0]; - /* It will be 0 or the unbound-marker if there are no fixups, and - * will be an other pointer if it is valid. */ - if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) { + /* It will be 0 or the unbound-marker if there are no fixups (as + * will be the case if the code object has been purified, for + * example) and will be an other pointer if it is valid. */ + if ((fixups == 0) || (fixups == UNBOUND_MARKER_WIDETAG) || + !is_lisp_pointer(fixups)) { /* Check for possible errors. */ if (check_code_fixups) sniff_code_object(new_code, displacement); - /*fprintf(stderr,"Fixups for code object not found!?\n"); - fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n", - new_code, nheader_words, ncode_words); - fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n", - constants_start_addr,constants_end_addr, - code_start_addr,code_end_addr);*/ return; } - fixups_vector = (struct vector *)PTR(fixups); + fixups_vector = (struct vector *)native_pointer(fixups); /* Could be pointing to a forwarding pointer. */ - if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1) - && (fixups_vector->header == 0x01)) { + /* FIXME is this always in from_space? if so, could replace this code with + * forwarding_pointer_p/forwarding_pointer_value */ + if (is_lisp_pointer(fixups) && + (find_page_index((void*)fixups_vector) != -1) && + (fixups_vector->header == 0x01)) { /* If so, then follow it. */ /*SHOW("following pointer to a forwarding pointer");*/ - fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length); + fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length); } /*SHOW("got fixups");*/ - if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) { + if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) { /* Got the fixups for the code block. Now work through the vector, and apply a fixup at each address. */ - int length = fixnum_value(fixups_vector->length); - int i; + long length = fixnum_value(fixups_vector->length); + long i; for (i = 0; i < length; i++) { unsigned offset = fixups_vector->data[i]; /* Now check the current value of offset. */ @@ -2111,7 +1644,7 @@ apply_code_fixups(struct code *old_code, struct code *new_code) /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ if ((old_value >= (unsigned)old_code) - && (old_value < ((unsigned)old_code + nwords*4))) + && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES))) /* So add the dispacement. */ *(unsigned *)((unsigned)code_start_addr + offset) = old_value + displacement; @@ -2122,6 +1655,8 @@ apply_code_fixups(struct code *old_code, struct code *new_code) *(unsigned *)((unsigned)code_start_addr + offset) = old_value - displacement; } + } else { + fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header)); } /* Check for possible errors. */ @@ -2130,1329 +1665,265 @@ apply_code_fixups(struct code *old_code, struct code *new_code) } } -static struct code * -trans_code(struct code *code) -{ - struct code *new_code; - lispobj l_code, l_new_code; - int nheader_words, ncode_words, nwords; - unsigned long displacement; - lispobj fheaderl, *prev_pointer; - - /* FSHOW((stderr, - "\n/transporting code object located at 0x%08x\n", - (unsigned long) code)); */ - - /* If object has already been transported, just return pointer. */ - if (*((lispobj *)code) == 0x01) - return (struct code*)(((lispobj *)code)[1]); - - gc_assert(TypeOf(code->header) == type_CodeHeader); - /* Prepare to transport the code vector. */ - l_code = (lispobj) code | type_OtherPointer; +static lispobj +trans_boxed_large(lispobj object) +{ + lispobj header; + unsigned long length; - ncode_words = fixnum_value(code->code_size); - nheader_words = HeaderValue(code->header); - nwords = ncode_words + nheader_words; - nwords = CEILING(nwords, 2); + gc_assert(is_lisp_pointer(object)); - l_new_code = copy_large_object(l_code, nwords); - new_code = (struct code *) PTR(l_new_code); + header = *((lispobj *) native_pointer(object)); + length = HeaderValue(header) + 1; + length = CEILING(length, 2); - /* may not have been moved.. */ - if (new_code == code) - return new_code; + return copy_large_object(object, length); +} - displacement = l_new_code - l_code; - /* - FSHOW((stderr, - "/old code object at 0x%08x, new code object at 0x%08x\n", - (unsigned long) code, - (unsigned long) new_code)); - FSHOW((stderr, "/Code object is %d words long.\n", nwords)); - */ +static lispobj +trans_unboxed_large(lispobj object) +{ + lispobj header; + unsigned long length; - /* Set forwarding pointer. */ - ((lispobj *)code)[0] = 0x01; - ((lispobj *)code)[1] = l_new_code; - /* Set forwarding pointers for all the function headers in the - * code object. Also fix all self pointers. */ + gc_assert(is_lisp_pointer(object)); - fheaderl = code->entry_points; - prev_pointer = &new_code->entry_points; + header = *((lispobj *) native_pointer(object)); + length = HeaderValue(header) + 1; + length = CEILING(length, 2); - while (fheaderl != NIL) { - struct function *fheaderp, *nfheaderp; - lispobj nfheaderl; + return copy_large_unboxed_object(object, length); +} - fheaderp = (struct function *) PTR(fheaderl); - gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader); + +/* + * vector-like objects + */ - /* Calculate the new function pointer and the new */ - /* function header. */ - nfheaderl = fheaderl + displacement; - nfheaderp = (struct function *) PTR(nfheaderl); - /* Set forwarding pointer. */ - ((lispobj *)fheaderp)[0] = 0x01; - ((lispobj *)fheaderp)[1] = nfheaderl; +/* FIXME: What does this mean? */ +int gencgc_hash = 1; - /* Fix self pointer. */ - nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET; +static int +scav_vector(lispobj *where, lispobj object) +{ + unsigned long kv_length; + lispobj *kv_vector; + unsigned long length = 0; /* (0 = dummy to stop GCC warning) */ + struct hash_table *hash_table; + lispobj empty_symbol; + unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + lispobj weak_p_obj; + unsigned next_vector_length = 0; - *prev_pointer = nfheaderl; + /* FIXME: A comment explaining this would be nice. It looks as + * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based + * hash tables in the Lisp HASH-TABLE code, and nowhere else. */ + if (HeaderValue(object) != subtype_VectorValidHashing) + return 1; - fheaderl = fheaderp->next; - prev_pointer = &nfheaderp->next; + if (!gencgc_hash) { + /* This is set for backward compatibility. FIXME: Do we need + * this any more? */ + *where = + (subtype_VectorMustRehash<code_size); - n_header_words = HeaderValue(object); - n_words = n_code_words + n_header_words; - n_words = CEILING(n_words, 2); - - /* Scavenge the boxed section of the code data block. */ - scavenge(where + 1, n_header_words - 1); - - /* Scavenge the boxed section of each function object in the */ - /* code data block. */ - for (entry_point = code->entry_points; - entry_point != NIL; - entry_point = function_ptr->next) { - - gc_assert(Pointerp(entry_point)); - - function_ptr = (struct function *) PTR(entry_point); - gc_assert(TypeOf(function_ptr->header) == type_FunctionHeader); - - scavenge(&function_ptr->name, 1); - scavenge(&function_ptr->arglist, 1); - scavenge(&function_ptr->type, 1); + /* Scavenge element 0, which may be a hash-table structure. */ + scavenge(where+2, 1); + if (!is_lisp_pointer(where[2])) { + lose("no pointer at %x in hash table", where[2]); + } + hash_table = (lispobj *)native_pointer(where[2]); + /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ + if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { + lose("hash table not instance (%x at %x)", + hash_table->header, + hash_table); } - - return n_words; -} - -static lispobj -trans_code_header(lispobj object) -{ - struct code *ncode; - ncode = trans_code((struct code *) PTR(object)); - return (lispobj) ncode | type_OtherPointer; -} + /* Scavenge element 1, which should be some internal symbol that + * the hash table code reserves for marking empty slots. */ + scavenge(where+3, 1); + if (!is_lisp_pointer(where[3])) { + lose("not empty-hash-table-slot symbol pointer: %x", where[3]); + } + empty_symbol = where[3]; + /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ + if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != + SYMBOL_HEADER_WIDETAG) { + lose("not a symbol where empty-hash-table-slot symbol expected: %x", + *(lispobj *)native_pointer(empty_symbol)); + } -static int -size_code_header(lispobj *where) -{ - struct code *code; - int nheader_words, ncode_words, nwords; + /* Scavenge hash table, which will fix the positions of the other + * needed objects. */ + scavenge(hash_table, sizeof(struct hash_table) / sizeof(lispobj)); - code = (struct code *) where; - - ncode_words = fixnum_value(code->code_size); - nheader_words = HeaderValue(code->header); - nwords = ncode_words + nheader_words; - nwords = CEILING(nwords, 2); + /* Cross-check the kv_vector. */ + if (where != (lispobj *)native_pointer(hash_table->table)) { + lose("hash_table table!=this table %x", hash_table->table); + } - return nwords; -} + /* WEAK-P */ + weak_p_obj = hash_table->weak_p; -static int -scav_return_pc_header(lispobj *where, lispobj object) -{ - lose("attempted to scavenge a return PC header where=0x%08x object=0x%08x", - (unsigned long) where, - (unsigned long) object); - return 0; /* bogus return value to satisfy static type checking */ -} + /* index vector */ + { + lispobj index_vector_obj = hash_table->index_vector; -static lispobj -trans_return_pc_header(lispobj object) -{ - struct function *return_pc; - unsigned long offset; - struct code *code, *ncode; + if (is_lisp_pointer(index_vector_obj) && + (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == + SIMPLE_ARRAY_WORD_WIDETAG)) { + index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2; + /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ + length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); + /*FSHOW((stderr, "/length = %d\n", length));*/ + } else { + lose("invalid index_vector %x", index_vector_obj); + } + } - SHOW("/trans_return_pc_header: Will this work?"); + /* next vector */ + { + lispobj next_vector_obj = hash_table->next_vector; - return_pc = (struct function *) PTR(object); - offset = HeaderValue(return_pc->header) * 4; + if (is_lisp_pointer(next_vector_obj) && + (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == + SIMPLE_ARRAY_WORD_WIDETAG)) { + next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2; + /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ + next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); + /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ + } else { + lose("invalid next_vector %x", next_vector_obj); + } + } - /* Transport the whole code object. */ - code = (struct code *) ((unsigned long) return_pc - offset); - ncode = trans_code(code); + /* maybe hash vector */ + { + lispobj hash_vector_obj = hash_table->hash_vector; - return ((lispobj) ncode + offset) | type_OtherPointer; -} - -/* On the 386, closures hold a pointer to the raw address instead of the - * function object. */ -#ifdef __i386__ -static int -scav_closure_header(lispobj *where, lispobj object) -{ - struct closure *closure; - lispobj fun; - - closure = (struct closure *)where; - fun = closure->function - RAW_ADDR_OFFSET; - scavenge(&fun, 1); - /* The function may have moved so update the raw address. But - * don't write unnecessarily. */ - if (closure->function != fun + RAW_ADDR_OFFSET) - closure->function = fun + RAW_ADDR_OFFSET; - - return 2; -} -#endif - -static int -scav_function_header(lispobj *where, lispobj object) -{ - lose("attempted to scavenge a function header where=0x%08x object=0x%08x", - (unsigned long) where, - (unsigned long) object); - return 0; /* bogus return value to satisfy static type checking */ -} - -static lispobj -trans_function_header(lispobj object) -{ - struct function *fheader; - unsigned long offset; - struct code *code, *ncode; - - fheader = (struct function *) PTR(object); - offset = HeaderValue(fheader->header) * 4; - - /* Transport the whole code object. */ - code = (struct code *) ((unsigned long) fheader - offset); - ncode = trans_code(code); - - return ((lispobj) ncode + offset) | type_FunctionPointer; -} - -/* - * instances - */ - -static int -scav_instance_pointer(lispobj *where, lispobj object) -{ - lispobj copy, *first_pointer; - - /* Object is a pointer into from space - not a FP. */ - copy = trans_boxed(object); - - gc_assert(copy != object); - - first_pointer = (lispobj *) PTR(object); - - /* Set forwarding pointer. */ - first_pointer[0] = 0x01; - first_pointer[1] = copy; - *where = copy; - - return 1; -} - -/* - * lists and conses - */ - -static lispobj trans_list(lispobj object); - -static int -scav_list_pointer(lispobj *where, lispobj object) -{ - lispobj first, *first_pointer; - - gc_assert(Pointerp(object)); - - /* Object is a pointer into from space - not FP. */ - - first = trans_list(object); - gc_assert(first != object); - - first_pointer = (lispobj *) PTR(object); - - /* Set forwarding pointer */ - first_pointer[0] = 0x01; - first_pointer[1] = first; - - gc_assert(Pointerp(first)); - gc_assert(!from_space_p(first)); - *where = first; - return 1; -} - -static lispobj -trans_list(lispobj object) -{ - lispobj new_list_pointer; - struct cons *cons, *new_cons; - lispobj cdr; - - gc_assert(from_space_p(object)); - - cons = (struct cons *) PTR(object); - - /* Copy 'object'. */ - new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons)); - new_cons->car = cons->car; - new_cons->cdr = cons->cdr; /* updated later */ - new_list_pointer = (lispobj)new_cons | LowtagOf(object); - - /* Grab the cdr before it is clobbered. */ - cdr = cons->cdr; - - /* Set forwarding pointer (clobbers start of list). */ - cons->car = 0x01; - cons->cdr = new_list_pointer; - - /* Try to linearize the list in the cdr direction to help reduce - * paging. */ - while (1) { - lispobj new_cdr; - struct cons *cdr_cons, *new_cdr_cons; - - if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr) - || (*((lispobj *)PTR(cdr)) == 0x01)) - break; - - cdr_cons = (struct cons *) PTR(cdr); - - /* Copy 'cdr'. */ - new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons)); - new_cdr_cons->car = cdr_cons->car; - new_cdr_cons->cdr = cdr_cons->cdr; - new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr); - - /* Grab the cdr before it is clobbered. */ - cdr = cdr_cons->cdr; - - /* Set forwarding pointer. */ - cdr_cons->car = 0x01; - cdr_cons->cdr = new_cdr; - - /* Update the cdr of the last cons copied into new space to - * keep the newspace scavenge from having to do it. */ - new_cons->cdr = new_cdr; - - new_cons = new_cdr_cons; - } - - return new_list_pointer; -} - - -/* - * scavenging and transporting other pointers - */ - -static int -scav_other_pointer(lispobj *where, lispobj object) -{ - lispobj first, *first_pointer; - - gc_assert(Pointerp(object)); - - /* Object is a pointer into from space - not FP. */ - first_pointer = (lispobj *) PTR(object); - - first = (transother[TypeOf(*first_pointer)])(object); - - if (first != object) { - /* Set forwarding pointer. */ - first_pointer[0] = 0x01; - first_pointer[1] = first; - *where = first; - } - - gc_assert(Pointerp(first)); - gc_assert(!from_space_p(first)); - - return 1; -} - -/* - * immediate, boxed, and unboxed objects - */ - -static int -size_pointer(lispobj *where) -{ - return 1; -} - -static int -scav_immediate(lispobj *where, lispobj object) -{ - return 1; -} - -static lispobj -trans_immediate(lispobj object) -{ - lose("trying to transport an immediate"); - return NIL; /* bogus return value to satisfy static type checking */ -} - -static int -size_immediate(lispobj *where) -{ - return 1; -} - - -static int -scav_boxed(lispobj *where, lispobj object) -{ - return 1; -} - -static lispobj -trans_boxed(lispobj object) -{ - lispobj header; - unsigned long length; - - gc_assert(Pointerp(object)); - - header = *((lispobj *) PTR(object)); - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return copy_object(object, length); -} - -static lispobj -trans_boxed_large(lispobj object) -{ - lispobj header; - unsigned long length; - - gc_assert(Pointerp(object)); - - header = *((lispobj *) PTR(object)); - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return copy_large_object(object, length); -} - -static int -size_boxed(lispobj *where) -{ - lispobj header; - unsigned long length; - - header = *where; - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return length; -} - -static int -scav_fdefn(lispobj *where, lispobj object) -{ - struct fdefn *fdefn; - - fdefn = (struct fdefn *)where; - - /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n", - fdefn->function, fdefn->raw_addr)); */ - - if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) { - scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1); - - /* Don't write unnecessarily. */ - if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET)) - fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET); - - return sizeof(struct fdefn) / sizeof(lispobj); - } else { - return 1; - } -} - -static int -scav_unboxed(lispobj *where, lispobj object) -{ - unsigned long length; - - length = HeaderValue(object) + 1; - length = CEILING(length, 2); - - return length; -} - -static lispobj -trans_unboxed(lispobj object) -{ - lispobj header; - unsigned long length; - - - gc_assert(Pointerp(object)); - - header = *((lispobj *) PTR(object)); - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return copy_unboxed_object(object, length); -} - -static lispobj -trans_unboxed_large(lispobj object) -{ - lispobj header; - unsigned long length; - - - gc_assert(Pointerp(object)); - - header = *((lispobj *) PTR(object)); - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return copy_large_unboxed_object(object, length); -} - -static int -size_unboxed(lispobj *where) -{ - lispobj header; - unsigned long length; - - header = *where; - length = HeaderValue(header) + 1; - length = CEILING(length, 2); - - return length; -} - -/* - * vector-like objects - */ - -#define NWORDS(x,y) (CEILING((x),(y)) / (y)) - -static int -scav_string(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - /* NOTE: Strings contain one more byte of data than the length */ - /* slot indicates. */ - - vector = (struct vector *) where; - length = fixnum_value(vector->length) + 1; - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return nwords; -} - -static lispobj -trans_string(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - /* NOTE: A string contains one more byte of data (a terminating - * '\0' to help when interfacing with C functions) than indicated - * by the length slot. */ - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length) + 1; - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_string(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - /* NOTE: A string contains one more byte of data (a terminating - * '\0' to help when interfacing with C functions) than indicated - * by the length slot. */ - - vector = (struct vector *) where; - length = fixnum_value(vector->length) + 1; - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return nwords; -} - -/* FIXME: What does this mean? */ -int gencgc_hash = 1; - -static int -scav_vector(lispobj *where, lispobj object) -{ - unsigned int kv_length; - lispobj *kv_vector; - unsigned int length = 0; /* (0 = dummy to stop GCC warning) */ - lispobj *hash_table; - lispobj empty_symbol; - unsigned int *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned int *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned int *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - lispobj weak_p_obj; - unsigned next_vector_length = 0; - - /* FIXME: A comment explaining this would be nice. It looks as - * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based - * hash tables in the Lisp HASH-TABLE code, and nowhere else. */ - if (HeaderValue(object) != subtype_VectorValidHashing) - return 1; - - if (!gencgc_hash) { - /* This is set for backward compatibility. FIXME: Do we need - * this any more? */ - *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector; - return 1; - } - - kv_length = fixnum_value(where[1]); - kv_vector = where + 2; /* Skip the header and length. */ - /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/ - - /* Scavenge element 0, which may be a hash-table structure. */ - scavenge(where+2, 1); - if (!Pointerp(where[2])) { - lose("no pointer at %x in hash table", where[2]); - } - hash_table = (lispobj *)PTR(where[2]); - /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ - if (TypeOf(hash_table[0]) != type_InstanceHeader) { - lose("hash table not instance (%x at %x)", hash_table[0], hash_table); - } - - /* Scavenge element 1, which should be some internal symbol that - * the hash table code reserves for marking empty slots. */ - scavenge(where+3, 1); - if (!Pointerp(where[3])) { - lose("not empty-hash-table-slot symbol pointer: %x", where[3]); - } - empty_symbol = where[3]; - /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ - if (TypeOf(*(lispobj *)PTR(empty_symbol)) != type_SymbolHeader) { - lose("not a symbol where empty-hash-table-slot symbol expected: %x", - *(lispobj *)PTR(empty_symbol)); - } - - /* Scavenge hash table, which will fix the positions of the other - * needed objects. */ - scavenge(hash_table, 16); - - /* Cross-check the kv_vector. */ - if (where != (lispobj *)PTR(hash_table[9])) { - lose("hash_table table!=this table %x", hash_table[9]); - } - - /* WEAK-P */ - weak_p_obj = hash_table[10]; - - /* index vector */ - { - lispobj index_vector_obj = hash_table[13]; - - if (Pointerp(index_vector_obj) && - (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) { - index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2; - /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ - length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]); - /*FSHOW((stderr, "/length = %d\n", length));*/ - } else { - lose("invalid index_vector %x", index_vector_obj); - } - } - - /* next vector */ - { - lispobj next_vector_obj = hash_table[14]; - - if (Pointerp(next_vector_obj) && - (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) { - next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2; - /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ - next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]); - /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ - } else { - lose("invalid next_vector %x", next_vector_obj); - } - } - - /* maybe hash vector */ - { - /* FIXME: This bare "15" offset should become a symbolic - * expression of some sort. And all the other bare offsets - * too. And the bare "16" in scavenge(hash_table, 16). And - * probably other stuff too. Ugh.. */ - lispobj hash_vector_obj = hash_table[15]; - - if (Pointerp(hash_vector_obj) && - (TypeOf(*(lispobj *)PTR(hash_vector_obj)) - == type_SimpleArrayUnsignedByte32)) { - hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2; - /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ - gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1]) - == next_vector_length); - } else { - hash_vector = NULL; - /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/ - } - } - - /* These lengths could be different as the index_vector can be a - * different length from the others, a larger index_vector could help - * reduce collisions. */ - gc_assert(next_vector_length*2 == kv_length); - - /* now all set up.. */ - - /* Work through the KV vector. */ - { - int i; - for (i = 1; i < next_vector_length; i++) { - lispobj old_key = kv_vector[2*i]; - unsigned int old_index = (old_key & 0x1fffffff)%length; - - /* Scavenge the key and value. */ - scavenge(&kv_vector[2*i],2); - - /* Check whether the key has moved and is EQ based. */ - { - lispobj new_key = kv_vector[2*i]; - unsigned int new_index = (new_key & 0x1fffffff)%length; - - if ((old_index != new_index) && - ((!hash_vector) || (hash_vector[i] == 0x80000000)) && - ((new_key != empty_symbol) || - (kv_vector[2*i] != empty_symbol))) { - - /*FSHOW((stderr, - "* EQ key %d moved from %x to %x; index %d to %d\n", - i, old_key, new_key, old_index, new_index));*/ - - if (index_vector[old_index] != 0) { - /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ - - /* Unlink the key from the old_index chain. */ - if (index_vector[old_index] == i) { - /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/ - index_vector[old_index] = next_vector[i]; - /* Link it into the needing rehash chain. */ - next_vector[i] = fixnum_value(hash_table[11]); - hash_table[11] = make_fixnum(i); - /*SHOW("P2");*/ - } else { - unsigned prior = index_vector[old_index]; - unsigned next = next_vector[prior]; - - /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ - - while (next != 0) { - /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ - if (next == i) { - /* Unlink it. */ - next_vector[prior] = next_vector[next]; - /* Link it into the needing rehash - * chain. */ - next_vector[next] = - fixnum_value(hash_table[11]); - hash_table[11] = make_fixnum(next); - /*SHOW("/P3");*/ - break; - } - prior = next; - next = next_vector[next]; - } - } - } - } - } - } - } - return (CEILING(kv_length + 2, 2)); -} - -static lispobj -trans_vector(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return copy_large_object(object, nwords); -} - -static int -size_vector(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return nwords; -} - - -static int -scav_vector_bit(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 32) + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_bit(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 32) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_bit(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 32) + 2, 2); - - return nwords; -} - - -static int -scav_vector_unsigned_byte_2(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 16) + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_unsigned_byte_2(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 16) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_unsigned_byte_2(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 16) + 2, 2); - - return nwords; -} - - -static int -scav_vector_unsigned_byte_4(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 8) + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_unsigned_byte_4(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 8) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_unsigned_byte_4(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 8) + 2, 2); - - return nwords; -} - -static int -scav_vector_unsigned_byte_8(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_unsigned_byte_8(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_unsigned_byte_8(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 4) + 2, 2); - - return nwords; -} - - -static int -scav_vector_unsigned_byte_16(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 2) + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_unsigned_byte_16(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 2) + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_unsigned_byte_16(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(NWORDS(length, 2) + 2, 2); - - return nwords; -} - -static int -scav_vector_unsigned_byte_32(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_unsigned_byte_32(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_unsigned_byte_32(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return nwords; -} - -static int -scav_vector_single_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_single_float(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_single_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length + 2, 2); - - return nwords; -} - -static int -scav_vector_double_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_double_float(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_double_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); - - return nwords; -} - -#ifdef type_SimpleArrayLongFloat -static int -scav_vector_long_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 3 + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_long_float(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length * 3 + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_long_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 3 + 2, 2); - - return nwords; -} -#endif - - -#ifdef type_SimpleArrayComplexSingleFloat -static int -scav_vector_complex_single_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_complex_single_float(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); + if (is_lisp_pointer(hash_vector_obj) && + (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == + SIMPLE_ARRAY_WORD_WIDETAG)){ + hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2; + /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ + gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) + == next_vector_length); + } else { + hash_vector = NULL; + /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/ + } + } - return copy_large_unboxed_object(object, nwords); -} + /* These lengths could be different as the index_vector can be a + * different length from the others, a larger index_vector could help + * reduce collisions. */ + gc_assert(next_vector_length*2 == kv_length); -static int -size_vector_complex_single_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; + /* now all set up.. */ - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 2 + 2, 2); + /* Work through the KV vector. */ + { + long i; + for (i = 1; i < next_vector_length; i++) { + lispobj old_key = kv_vector[2*i]; - return nwords; -} +#if N_WORD_BITS == 32 + unsigned long old_index = (old_key & 0x1fffffff)%length; +#elif N_WORD_BITS == 64 + unsigned long old_index = (old_key & 0x1fffffffffffffff)%length; #endif -#ifdef type_SimpleArrayComplexDoubleFloat -static int -scav_vector_complex_double_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 4 + 2, 2); - - return nwords; -} - -static lispobj -trans_vector_complex_double_float(lispobj object) -{ - struct vector *vector; - int length, nwords; - - gc_assert(Pointerp(object)); - - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length * 4 + 2, 2); - - return copy_large_unboxed_object(object, nwords); -} - -static int -size_vector_complex_double_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 4 + 2, 2); + /* Scavenge the key and value. */ + scavenge(&kv_vector[2*i],2); - return nwords; -} + /* Check whether the key has moved and is EQ based. */ + { + lispobj new_key = kv_vector[2*i]; +#if N_WORD_BITS == 32 + unsigned long new_index = (new_key & 0x1fffffff)%length; +#elif N_WORD_BITS == 64 + unsigned long new_index = (new_key & 0x1fffffffffffffff)%length; #endif + if ((old_index != new_index) && + ((!hash_vector) || (hash_vector[i] == 0x80000000)) && + ((new_key != empty_symbol) || + (kv_vector[2*i] != empty_symbol))) { -#ifdef type_SimpleArrayComplexLongFloat -static int -scav_vector_complex_long_float(lispobj *where, lispobj object) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 6 + 2, 2); - - return nwords; -} + /*FSHOW((stderr, + "* EQ key %d moved from %x to %x; index %d to %d\n", + i, old_key, new_key, old_index, new_index));*/ -static lispobj -trans_vector_complex_long_float(lispobj object) -{ - struct vector *vector; - int length, nwords; + if (index_vector[old_index] != 0) { + /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ - gc_assert(Pointerp(object)); + /* Unlink the key from the old_index chain. */ + if (index_vector[old_index] == i) { + /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/ + index_vector[old_index] = next_vector[i]; + /* Link it into the needing rehash chain. */ + next_vector[i] = fixnum_value(hash_table->needing_rehash); + hash_table->needing_rehash = make_fixnum(i); + /*SHOW("P2");*/ + } else { + unsigned prior = index_vector[old_index]; + unsigned next = next_vector[prior]; - vector = (struct vector *) PTR(object); - length = fixnum_value(vector->length); - nwords = CEILING(length * 6 + 2, 2); + /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ - return copy_large_unboxed_object(object, nwords); + while (next != 0) { + /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ + if (next == i) { + /* Unlink it. */ + next_vector[prior] = next_vector[next]; + /* Link it into the needing rehash + * chain. */ + next_vector[next] = + fixnum_value(hash_table->needing_rehash); + hash_table->needing_rehash = make_fixnum(next); + /*SHOW("/P3");*/ + break; + } + prior = next; + next = next_vector[next]; + } + } + } + } + } + } + } + return (CEILING(kv_length + 2, 2)); } -static int -size_vector_complex_long_float(lispobj *where) -{ - struct vector *vector; - int length, nwords; - - vector = (struct vector *) where; - length = fixnum_value(vector->length); - nwords = CEILING(length * 6 + 2, 2); - - return nwords; -} -#endif /* * weak pointers */ -/* XX This is a hack adapted from cgc.c. These don't work too well with the - * gencgc as a list of the weak pointers is maintained within the - * objects which causes writes to the pages. A limited attempt is made - * to avoid unnecessary writes, but this needs a re-think. */ - +/* XX This is a hack adapted from cgc.c. These don't work too + * efficiently with the gencgc as a list of the weak pointers is + * maintained within the objects which causes writes to the pages. A + * limited attempt is made to avoid unnecessary writes, but this needs + * a re-think. */ #define WEAK_POINTER_NWORDS \ CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2) -static int +static long scav_weak_pointer(lispobj *where, lispobj object) { struct weak_pointer *wp = weak_pointers; @@ -3474,489 +1945,105 @@ scav_weak_pointer(lispobj *where, lispobj object) wp = (struct weak_pointer*)where; if (wp->next != weak_pointers) { wp->next = weak_pointers; - } else { - /*SHOW("avoided write to weak pointer");*/ - } - weak_pointers = wp; - } - - /* Do not let GC scavenge the value slot of the weak pointer. - * (That is why it is a weak pointer.) */ - - return WEAK_POINTER_NWORDS; -} - -static lispobj -trans_weak_pointer(lispobj object) -{ - lispobj copy; - /* struct weak_pointer *wp; */ - - gc_assert(Pointerp(object)); - -#if defined(DEBUG_WEAK) - FSHOW((stderr, "Transporting weak pointer from 0x%08x\n", object)); -#endif - - /* Need to remember where all the weak pointers are that have */ - /* been transported so they can be fixed up in a post-GC pass. */ - - copy = copy_object(object, WEAK_POINTER_NWORDS); - /* wp = (struct weak_pointer *) PTR(copy);*/ - - - /* Push the weak pointer onto the list of weak pointers. */ - /* wp->next = weak_pointers; - * weak_pointers = wp;*/ - - return copy; -} - -static int -size_weak_pointer(lispobj *where) -{ - return WEAK_POINTER_NWORDS; -} - -void scan_weak_pointers(void) -{ - struct weak_pointer *wp; - for (wp = weak_pointers; wp != NULL; wp = wp->next) { - lispobj value = wp->value; - lispobj *first_pointer; - - first_pointer = (lispobj *)PTR(value); - - /* - FSHOW((stderr, "/weak pointer at 0x%08x\n", (unsigned long) wp)); - FSHOW((stderr, "/value: 0x%08x\n", (unsigned long) value)); - */ - - if (Pointerp(value) && from_space_p(value)) { - /* Now, we need to check whether the object has been forwarded. If - * it has been, the weak pointer is still good and needs to be - * updated. Otherwise, the weak pointer needs to be nil'ed - * out. */ - if (first_pointer[0] == 0x01) { - wp->value = first_pointer[1]; - } else { - /* Break it. */ - SHOW("broken"); - wp->value = NIL; - wp->broken = T; - } - } - } -} - -/* - * initialization - */ - -static int -scav_lose(lispobj *where, lispobj object) -{ - lose("no scavenge function for object 0x%08x", (unsigned long) object); - return 0; /* bogus return value to satisfy static type checking */ -} - -static lispobj -trans_lose(lispobj object) -{ - lose("no transport function for object 0x%08x", (unsigned long) object); - return NIL; /* bogus return value to satisfy static type checking */ -} - -static int -size_lose(lispobj *where) -{ - lose("no size function for object at 0x%08x", (unsigned long) where); - return 1; /* bogus return value to satisfy static type checking */ -} - -static void -gc_init_tables(void) -{ - int i; - - /* Set default value in all slots of scavenge table. */ - for (i = 0; i < 256; i++) { /* FIXME: bare constant length, ick! */ - scavtab[i] = scav_lose; - } - - /* For each type which can be selected by the low 3 bits of the tag - * alone, set multiple entries in our 8-bit scavenge table (one for each - * possible value of the high 5 bits). */ - for (i = 0; i < 32; i++) { /* FIXME: bare constant length, ick! */ - scavtab[type_EvenFixnum|(i<<3)] = scav_immediate; - scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer; - /* OtherImmediate0 */ - scavtab[type_ListPointer|(i<<3)] = scav_list_pointer; - scavtab[type_OddFixnum|(i<<3)] = scav_immediate; - scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer; - /* OtherImmediate1 */ - scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer; - } - - /* Other-pointer types (those selected by all eight bits of the tag) get - * one entry each in the scavenge table. */ - scavtab[type_Bignum] = scav_unboxed; - scavtab[type_Ratio] = scav_boxed; - scavtab[type_SingleFloat] = scav_unboxed; - scavtab[type_DoubleFloat] = scav_unboxed; -#ifdef type_LongFloat - scavtab[type_LongFloat] = scav_unboxed; -#endif - scavtab[type_Complex] = scav_boxed; -#ifdef type_ComplexSingleFloat - scavtab[type_ComplexSingleFloat] = scav_unboxed; -#endif -#ifdef type_ComplexDoubleFloat - scavtab[type_ComplexDoubleFloat] = scav_unboxed; -#endif -#ifdef type_ComplexLongFloat - scavtab[type_ComplexLongFloat] = scav_unboxed; -#endif - scavtab[type_SimpleArray] = scav_boxed; - scavtab[type_SimpleString] = scav_string; - scavtab[type_SimpleBitVector] = scav_vector_bit; - scavtab[type_SimpleVector] = scav_vector; - scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2; - scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4; - scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8; - scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16; - scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32; -#ifdef type_SimpleArraySignedByte8 - scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8; -#endif -#ifdef type_SimpleArraySignedByte16 - scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16; -#endif -#ifdef type_SimpleArraySignedByte30 - scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32; -#endif -#ifdef type_SimpleArraySignedByte32 - scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32; -#endif - scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float; - scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float; -#ifdef type_SimpleArrayLongFloat - scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float; -#endif -#ifdef type_SimpleArrayComplexSingleFloat - scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float; -#endif -#ifdef type_SimpleArrayComplexDoubleFloat - scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float; -#endif -#ifdef type_SimpleArrayComplexLongFloat - scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float; -#endif - scavtab[type_ComplexString] = scav_boxed; - scavtab[type_ComplexBitVector] = scav_boxed; - scavtab[type_ComplexVector] = scav_boxed; - scavtab[type_ComplexArray] = scav_boxed; - scavtab[type_CodeHeader] = scav_code_header; - /*scavtab[type_FunctionHeader] = scav_function_header;*/ - /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/ - /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/ -#ifdef __i386__ - scavtab[type_ClosureHeader] = scav_closure_header; - scavtab[type_FuncallableInstanceHeader] = scav_closure_header; - scavtab[type_ByteCodeFunction] = scav_closure_header; - scavtab[type_ByteCodeClosure] = scav_closure_header; -#else - scavtab[type_ClosureHeader] = scav_boxed; - scavtab[type_FuncallableInstanceHeader] = scav_boxed; - scavtab[type_ByteCodeFunction] = scav_boxed; - scavtab[type_ByteCodeClosure] = scav_boxed; -#endif - scavtab[type_ValueCellHeader] = scav_boxed; - scavtab[type_SymbolHeader] = scav_boxed; - scavtab[type_BaseChar] = scav_immediate; - scavtab[type_Sap] = scav_unboxed; - scavtab[type_UnboundMarker] = scav_immediate; - scavtab[type_WeakPointer] = scav_weak_pointer; - scavtab[type_InstanceHeader] = scav_boxed; - scavtab[type_Fdefn] = scav_fdefn; - - /* transport other table, initialized same way as scavtab */ - for (i = 0; i < 256; i++) - transother[i] = trans_lose; - transother[type_Bignum] = trans_unboxed; - transother[type_Ratio] = trans_boxed; - transother[type_SingleFloat] = trans_unboxed; - transother[type_DoubleFloat] = trans_unboxed; -#ifdef type_LongFloat - transother[type_LongFloat] = trans_unboxed; -#endif - transother[type_Complex] = trans_boxed; -#ifdef type_ComplexSingleFloat - transother[type_ComplexSingleFloat] = trans_unboxed; -#endif -#ifdef type_ComplexDoubleFloat - transother[type_ComplexDoubleFloat] = trans_unboxed; -#endif -#ifdef type_ComplexLongFloat - transother[type_ComplexLongFloat] = trans_unboxed; -#endif - transother[type_SimpleArray] = trans_boxed_large; - transother[type_SimpleString] = trans_string; - transother[type_SimpleBitVector] = trans_vector_bit; - transother[type_SimpleVector] = trans_vector; - transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2; - transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4; - transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8; - transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16; - transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32; -#ifdef type_SimpleArraySignedByte8 - transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8; -#endif -#ifdef type_SimpleArraySignedByte16 - transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16; -#endif -#ifdef type_SimpleArraySignedByte30 - transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32; -#endif -#ifdef type_SimpleArraySignedByte32 - transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32; -#endif - transother[type_SimpleArraySingleFloat] = trans_vector_single_float; - transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float; -#ifdef type_SimpleArrayLongFloat - transother[type_SimpleArrayLongFloat] = trans_vector_long_float; -#endif -#ifdef type_SimpleArrayComplexSingleFloat - transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float; -#endif -#ifdef type_SimpleArrayComplexDoubleFloat - transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float; -#endif -#ifdef type_SimpleArrayComplexLongFloat - transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float; -#endif - transother[type_ComplexString] = trans_boxed; - transother[type_ComplexBitVector] = trans_boxed; - transother[type_ComplexVector] = trans_boxed; - transother[type_ComplexArray] = trans_boxed; - transother[type_CodeHeader] = trans_code_header; - transother[type_FunctionHeader] = trans_function_header; - transother[type_ClosureFunctionHeader] = trans_function_header; - transother[type_ReturnPcHeader] = trans_return_pc_header; - transother[type_ClosureHeader] = trans_boxed; - transother[type_FuncallableInstanceHeader] = trans_boxed; - transother[type_ByteCodeFunction] = trans_boxed; - transother[type_ByteCodeClosure] = trans_boxed; - transother[type_ValueCellHeader] = trans_boxed; - transother[type_SymbolHeader] = trans_boxed; - transother[type_BaseChar] = trans_immediate; - transother[type_Sap] = trans_unboxed; - transother[type_UnboundMarker] = trans_immediate; - transother[type_WeakPointer] = trans_weak_pointer; - transother[type_InstanceHeader] = trans_boxed; - transother[type_Fdefn] = trans_boxed; - - /* size table, initialized the same way as scavtab */ - for (i = 0; i < 256; i++) - sizetab[i] = size_lose; - for (i = 0; i < 32; i++) { - sizetab[type_EvenFixnum|(i<<3)] = size_immediate; - sizetab[type_FunctionPointer|(i<<3)] = size_pointer; - /* OtherImmediate0 */ - sizetab[type_ListPointer|(i<<3)] = size_pointer; - sizetab[type_OddFixnum|(i<<3)] = size_immediate; - sizetab[type_InstancePointer|(i<<3)] = size_pointer; - /* OtherImmediate1 */ - sizetab[type_OtherPointer|(i<<3)] = size_pointer; - } - sizetab[type_Bignum] = size_unboxed; - sizetab[type_Ratio] = size_boxed; - sizetab[type_SingleFloat] = size_unboxed; - sizetab[type_DoubleFloat] = size_unboxed; -#ifdef type_LongFloat - sizetab[type_LongFloat] = size_unboxed; -#endif - sizetab[type_Complex] = size_boxed; -#ifdef type_ComplexSingleFloat - sizetab[type_ComplexSingleFloat] = size_unboxed; -#endif -#ifdef type_ComplexDoubleFloat - sizetab[type_ComplexDoubleFloat] = size_unboxed; -#endif -#ifdef type_ComplexLongFloat - sizetab[type_ComplexLongFloat] = size_unboxed; -#endif - sizetab[type_SimpleArray] = size_boxed; - sizetab[type_SimpleString] = size_string; - sizetab[type_SimpleBitVector] = size_vector_bit; - sizetab[type_SimpleVector] = size_vector; - sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2; - sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4; - sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8; - sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16; - sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32; -#ifdef type_SimpleArraySignedByte8 - sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8; -#endif -#ifdef type_SimpleArraySignedByte16 - sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16; -#endif -#ifdef type_SimpleArraySignedByte30 - sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32; -#endif -#ifdef type_SimpleArraySignedByte32 - sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32; -#endif - sizetab[type_SimpleArraySingleFloat] = size_vector_single_float; - sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float; -#ifdef type_SimpleArrayLongFloat - sizetab[type_SimpleArrayLongFloat] = size_vector_long_float; -#endif -#ifdef type_SimpleArrayComplexSingleFloat - sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float; -#endif -#ifdef type_SimpleArrayComplexDoubleFloat - sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float; -#endif -#ifdef type_SimpleArrayComplexLongFloat - sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float; -#endif - sizetab[type_ComplexString] = size_boxed; - sizetab[type_ComplexBitVector] = size_boxed; - sizetab[type_ComplexVector] = size_boxed; - sizetab[type_ComplexArray] = size_boxed; - sizetab[type_CodeHeader] = size_code_header; -#if 0 - /* We shouldn't see these, so just lose if it happens. */ - sizetab[type_FunctionHeader] = size_function_header; - sizetab[type_ClosureFunctionHeader] = size_function_header; - sizetab[type_ReturnPcHeader] = size_return_pc_header; -#endif - sizetab[type_ClosureHeader] = size_boxed; - sizetab[type_FuncallableInstanceHeader] = size_boxed; - sizetab[type_ValueCellHeader] = size_boxed; - sizetab[type_SymbolHeader] = size_boxed; - sizetab[type_BaseChar] = size_immediate; - sizetab[type_Sap] = size_unboxed; - sizetab[type_UnboundMarker] = size_immediate; - sizetab[type_WeakPointer] = size_weak_pointer; - sizetab[type_InstanceHeader] = size_boxed; - sizetab[type_Fdefn] = size_boxed; -} - -/* Scan an area looking for an object which encloses the given pointer. - * Return the object start on success or NULL on failure. */ -static lispobj * -search_space(lispobj *start, size_t words, lispobj *pointer) -{ - while (words > 0) { - size_t count = 1; - lispobj thing = *start; - - /* If thing is an immediate then this is a cons */ - if (Pointerp(thing) - || ((thing & 3) == 0) /* fixnum */ - || (TypeOf(thing) == type_BaseChar) - || (TypeOf(thing) == type_UnboundMarker)) - count = 2; - else - count = (sizetab[TypeOf(thing)])(start); - - /* Check whether the pointer is within this object? */ - if ((pointer >= start) && (pointer < (start+count))) { - /* found it! */ - /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/ - return(start); + } else { + /*SHOW("avoided write to weak pointer");*/ } + weak_pointers = wp; + } - /* Round up the count */ - count = CEILING(count,2); + /* Do not let GC scavenge the value slot of the weak pointer. + * (That is why it is a weak pointer.) */ - start += count; - words -= count; - } - return (NULL); + return WEAK_POINTER_NWORDS; } -static lispobj* -search_read_only_space(lispobj *pointer) + +lispobj * +search_read_only_space(void *pointer) { - lispobj* start = (lispobj*)READ_ONLY_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER); - if ((pointer < start) || (pointer >= end)) + lispobj *start = (lispobj *) READ_ONLY_SPACE_START; + lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0); + if ((pointer < (void *)start) || (pointer >= (void *)end)) return NULL; - return (search_space(start, (pointer+2)-start, pointer)); + return (gc_search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *) pointer)); } -static lispobj * -search_static_space(lispobj *pointer) +lispobj * +search_static_space(void *pointer) { - lispobj* start = (lispobj*)STATIC_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER); - if ((pointer < start) || (pointer >= end)) + lispobj *start = (lispobj *)STATIC_SPACE_START; + lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0); + if ((pointer < (void *)start) || (pointer >= (void *)end)) return NULL; - return (search_space(start, (pointer+2)-start, pointer)); + return (gc_search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *) pointer)); } /* a faster version for searching the dynamic space. This will work even * if the object is in a current allocation region. */ lispobj * -search_dynamic_space(lispobj *pointer) +search_dynamic_space(void *pointer) { - int page_index = find_page_index(pointer); + long page_index = find_page_index(pointer); lispobj *start; - /* Address may be invalid - do some checks. */ - if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE)) + /* The address may be invalid, so do some checks. */ + if ((page_index == -1) || + (page_table[page_index].allocated == FREE_PAGE_FLAG)) return NULL; start = (lispobj *)((void *)page_address(page_index) + page_table[page_index].first_object_offset); - return (search_space(start, (pointer+2)-start, pointer)); + return (gc_search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *)pointer)); } -/* FIXME: There is a strong family resemblance between this function - * and the function of the same name in purify.c. Would it be possible - * to implement them as exactly the same function? */ +/* Is there any possibility that pointer is a valid Lisp object + * reference, and/or something else (e.g. subroutine call return + * address) which should prevent us from moving the referred-to thing? + * This is called from preserve_pointers() */ static int -valid_dynamic_space_pointer(lispobj *pointer) +possibly_valid_dynamic_space_pointer(lispobj *pointer) { lispobj *start_addr; - /* Find the object start address */ + /* Find the object start address. */ if ((start_addr = search_dynamic_space(pointer)) == NULL) { return 0; } /* We need to allow raw pointers into Code objects for return - * addresses. This will also pickup pointers to functions in code + * addresses. This will also pick up pointers to functions in code * objects. */ - if (TypeOf(*start_addr) == type_CodeHeader) { - /* X Could do some further checks here. */ + if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) { + /* XXX could do some further checks here */ return 1; } /* If it's not a return address then it needs to be a valid Lisp * pointer. */ - if (!Pointerp((lispobj)pointer)) { + if (!is_lisp_pointer((lispobj)pointer)) { return 0; } /* Check that the object pointed to is consistent with the pointer - * low tag. */ - switch (LowtagOf((lispobj)pointer)) { - case type_FunctionPointer: + * low tag. + */ + switch (lowtag_of((lispobj)pointer)) { + case FUN_POINTER_LOWTAG: /* Start_addr should be the enclosing code object, or a closure - header. */ - switch (TypeOf(*start_addr)) { - case type_CodeHeader: + * header. */ + switch (widetag_of(*start_addr)) { + case CODE_HEADER_WIDETAG: /* This case is probably caught above. */ break; - case type_ClosureHeader: - case type_FuncallableInstanceHeader: - case type_ByteCodeFunction: - case type_ByteCodeClosure: + case CLOSURE_HEADER_WIDETAG: + case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: if ((unsigned)pointer != - ((unsigned)start_addr+type_FunctionPointer)) { + ((unsigned)start_addr+FUN_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wf2: %x %x %x\n", @@ -3972,9 +2059,9 @@ valid_dynamic_space_pointer(lispobj *pointer) return 0; } break; - case type_ListPointer: + case LIST_POINTER_LOWTAG: if ((unsigned)pointer != - ((unsigned)start_addr+type_ListPointer)) { + ((unsigned)start_addr+LIST_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wl1: %x %x %x\n", @@ -3982,14 +2069,20 @@ valid_dynamic_space_pointer(lispobj *pointer) return 0; } /* Is it plausible cons? */ - if ((Pointerp(start_addr[0]) - || ((start_addr[0] & 3) == 0) /* fixnum */ - || (TypeOf(start_addr[0]) == type_BaseChar) - || (TypeOf(start_addr[0]) == type_UnboundMarker)) - && (Pointerp(start_addr[1]) - || ((start_addr[1] & 3) == 0) /* fixnum */ - || (TypeOf(start_addr[1]) == type_BaseChar) - || (TypeOf(start_addr[1]) == type_UnboundMarker))) + if ((is_lisp_pointer(start_addr[0]) + || (fixnump(start_addr[0])) + || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG) +#if N_WORD_BITS == 64 + || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG) +#endif + || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG)) + && (is_lisp_pointer(start_addr[1]) + || (fixnump(start_addr[1])) + || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG) +#if N_WORD_BITS == 64 + || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG) +#endif + || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) break; else { if (gencgc_verbose) @@ -3998,16 +2091,16 @@ valid_dynamic_space_pointer(lispobj *pointer) pointer, start_addr, *start_addr)); return 0; } - case type_InstancePointer: + case INSTANCE_POINTER_LOWTAG: if ((unsigned)pointer != - ((unsigned)start_addr+type_InstancePointer)) { + ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wi1: %x %x %x\n", pointer, start_addr, *start_addr)); return 0; } - if (TypeOf(start_addr[0]) != type_InstanceHeader) { + if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) { if (gencgc_verbose) FSHOW((stderr, "/Wi2: %x %x %x\n", @@ -4015,26 +2108,29 @@ valid_dynamic_space_pointer(lispobj *pointer) return 0; } break; - case type_OtherPointer: + case OTHER_POINTER_LOWTAG: if ((unsigned)pointer != - ((int)start_addr+type_OtherPointer)) { + ((int)start_addr+OTHER_POINTER_LOWTAG)) { if (gencgc_verbose) FSHOW((stderr, "/Wo1: %x %x %x\n", pointer, start_addr, *start_addr)); return 0; } - /* Is it plausible? Not a cons. X should check the headers. */ - if (Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) { + /* Is it plausible? Not a cons. XXX should check the headers. */ + if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) { if (gencgc_verbose) FSHOW((stderr, "/Wo2: %x %x %x\n", pointer, start_addr, *start_addr)); return 0; } - switch (TypeOf(start_addr[0])) { - case type_UnboundMarker: - case type_BaseChar: + switch (widetag_of(start_addr[0])) { + case UNBOUND_MARKER_WIDETAG: + case CHARACTER_WIDETAG: +#if N_WORD_BITS == 64 + case SINGLE_FLOAT_WIDETAG: +#endif if (gencgc_verbose) FSHOW((stderr, "*Wo3: %x %x %x\n", @@ -4042,17 +2138,15 @@ valid_dynamic_space_pointer(lispobj *pointer) return 0; /* only pointed to by function pointers? */ - case type_ClosureHeader: - case type_FuncallableInstanceHeader: - case type_ByteCodeFunction: - case type_ByteCodeClosure: + case CLOSURE_HEADER_WIDETAG: + case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: if (gencgc_verbose) FSHOW((stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr)); return 0; - case type_InstanceHeader: + case INSTANCE_HEADER_WIDETAG: if (gencgc_verbose) FSHOW((stderr, "*Wo5: %x %x %x\n", @@ -4060,68 +2154,99 @@ valid_dynamic_space_pointer(lispobj *pointer) return 0; /* the valid other immediate pointer objects */ - case type_SimpleVector: - case type_Ratio: - case type_Complex: -#ifdef type_ComplexSingleFloat - case type_ComplexSingleFloat: + case SIMPLE_VECTOR_WIDETAG: + case RATIO_WIDETAG: + case COMPLEX_WIDETAG: +#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG + case COMPLEX_SINGLE_FLOAT_WIDETAG: +#endif +#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG + case COMPLEX_DOUBLE_FLOAT_WIDETAG: +#endif +#ifdef COMPLEX_LONG_FLOAT_WIDETAG + case COMPLEX_LONG_FLOAT_WIDETAG: +#endif + case SIMPLE_ARRAY_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: +#ifdef COMPLEX_CHARACTER_STRING_WIDETAG + case COMPLEX_CHARACTER_STRING_WIDETAG: +#endif + case COMPLEX_VECTOR_NIL_WIDETAG: + case COMPLEX_BIT_VECTOR_WIDETAG: + case COMPLEX_VECTOR_WIDETAG: + case COMPLEX_ARRAY_WIDETAG: + case VALUE_CELL_HEADER_WIDETAG: + case SYMBOL_HEADER_WIDETAG: + case FDEFN_WIDETAG: + case CODE_HEADER_WIDETAG: + case BIGNUM_WIDETAG: +#if N_WORD_BITS != 64 + case SINGLE_FLOAT_WIDETAG: +#endif + case DOUBLE_FLOAT_WIDETAG: +#ifdef LONG_FLOAT_WIDETAG + case LONG_FLOAT_WIDETAG: +#endif + case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif + case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: #endif -#ifdef type_ComplexDoubleFloat - case type_ComplexDoubleFloat: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: #endif -#ifdef type_ComplexLongFloat - case type_ComplexLongFloat: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: #endif - case type_SimpleArray: - case type_ComplexString: - case type_ComplexBitVector: - case type_ComplexVector: - case type_ComplexArray: - case type_ValueCellHeader: - case type_SymbolHeader: - case type_Fdefn: - case type_CodeHeader: - case type_Bignum: - case type_SingleFloat: - case type_DoubleFloat: -#ifdef type_LongFloat - case type_LongFloat: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: #endif - case type_SimpleString: - case type_SimpleBitVector: - case type_SimpleArrayUnsignedByte2: - case type_SimpleArrayUnsignedByte4: - case type_SimpleArrayUnsignedByte8: - case type_SimpleArrayUnsignedByte16: - case type_SimpleArrayUnsignedByte32: -#ifdef type_SimpleArraySignedByte8 - case type_SimpleArraySignedByte8: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte16 - case type_SimpleArraySignedByte16: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte30 - case type_SimpleArraySignedByte30: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte32 - case type_SimpleArraySignedByte32: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif - case type_SimpleArraySingleFloat: - case type_SimpleArrayDoubleFloat: -#ifdef type_SimpleArrayLongFloat - case type_SimpleArrayLongFloat: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: #endif -#ifdef type_SimpleArrayComplexSingleFloat - case type_SimpleArrayComplexSingleFloat: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: #endif -#ifdef type_SimpleArrayComplexDoubleFloat - case type_SimpleArrayComplexDoubleFloat: + case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: + case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: +#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG: #endif -#ifdef type_SimpleArrayComplexLongFloat - case type_SimpleArrayComplexLongFloat: +#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG: #endif - case type_Sap: - case type_WeakPointer: +#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: +#endif + case SAP_WIDETAG: + case WEAK_POINTER_WIDETAG: break; default: @@ -4144,72 +2269,98 @@ valid_dynamic_space_pointer(lispobj *pointer) return 1; } -/* Adjust large bignum and vector objects. This will adjust the allocated - * region if the size has shrunk, and move unboxed objects into unboxed - * pages. The pages are not promoted here, and the promoted region is not - * added to the new_regions; this is really only designed to be called from - * preserve_pointer. Shouldn't fail if this is missed, just may delay the - * moving of objects to unboxed pages, and the freeing of pages. */ +/* Adjust large bignum and vector objects. This will adjust the + * allocated region if the size has shrunk, and move unboxed objects + * into unboxed pages. The pages are not promoted here, and the + * promoted region is not added to the new_regions; this is really + * only designed to be called from preserve_pointer(). Shouldn't fail + * if this is missed, just may delay the moving of objects to unboxed + * pages, and the freeing of pages. */ static void maybe_adjust_large_object(lispobj *where) { - int first_page; - int nwords; + long first_page; + long nwords; - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; int boxed; /* Check whether it's a vector or bignum object. */ - switch (TypeOf(where[0])) { - case type_SimpleVector: - boxed = BOXED_PAGE; + switch (widetag_of(where[0])) { + case SIMPLE_VECTOR_WIDETAG: + boxed = BOXED_PAGE_FLAG; break; - case type_Bignum: - case type_SimpleString: - case type_SimpleBitVector: - case type_SimpleArrayUnsignedByte2: - case type_SimpleArrayUnsignedByte4: - case type_SimpleArrayUnsignedByte8: - case type_SimpleArrayUnsignedByte16: - case type_SimpleArrayUnsignedByte32: -#ifdef type_SimpleArraySignedByte8 - case type_SimpleArraySignedByte8: + case BIGNUM_WIDETAG: + case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif + case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: +#endif + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte16 - case type_SimpleArraySignedByte16: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte30 - case type_SimpleArraySignedByte30: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte32 - case type_SimpleArraySignedByte32: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: #endif - case type_SimpleArraySingleFloat: - case type_SimpleArrayDoubleFloat: -#ifdef type_SimpleArrayLongFloat - case type_SimpleArrayLongFloat: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: #endif -#ifdef type_SimpleArrayComplexSingleFloat - case type_SimpleArrayComplexSingleFloat: + case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: + case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: +#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG: #endif -#ifdef type_SimpleArrayComplexDoubleFloat - case type_SimpleArrayComplexDoubleFloat: +#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG: #endif -#ifdef type_SimpleArrayComplexLongFloat - case type_SimpleArrayComplexLongFloat: +#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG: #endif - boxed = UNBOXED_PAGE; +#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: +#endif + boxed = UNBOXED_PAGE_FLAG; break; default: return; } /* Find its current size. */ - nwords = (sizetab[TypeOf(where[0])])(where); + nwords = (sizetab[widetag_of(where[0])])(where); first_page = find_page_index((void *)where); gc_assert(first_page >= 0); @@ -4223,22 +2374,22 @@ maybe_adjust_large_object(lispobj *where) gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; - while (remaining_bytes > 4096) { + remaining_bytes = nwords*N_WORD_BYTES; + while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE) - || (page_table[next_page].allocated == UNBOXED_PAGE)); + gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) + || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset == - -4096*(next_page-first_page)); - gc_assert(page_table[next_page].bytes_used == 4096); + -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].allocated = boxed; /* Shouldn't be write-protected at this stage. Essential that the * pages aren't. */ gc_assert(!page_table[next_page].write_protected); - remaining_bytes -= 4096; + remaining_bytes -= PAGE_BYTES; next_page++; } @@ -4260,13 +2411,13 @@ maybe_adjust_large_object(lispobj *where) /* Free any remaining pages; needs care. */ next_page++; - while ((old_bytes_used == 4096) && + while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == - -(next_page - first_page)*4096)) { + -(next_page - first_page)*PAGE_BYTES)) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -4274,14 +2425,17 @@ maybe_adjust_large_object(lispobj *where) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; } - if ((bytes_freed > 0) && gencgc_verbose) - FSHOW((stderr, "/adjust_large_object freed %d\n", bytes_freed)); + if ((bytes_freed > 0) && gencgc_verbose) { + FSHOW((stderr, + "/maybe_adjust_large_object() freed %d\n", + bytes_freed)); + } generations[from_space].bytes_allocated -= bytes_freed; bytes_allocated -= bytes_freed; @@ -4289,74 +2443,88 @@ maybe_adjust_large_object(lispobj *where) return; } -/* Take a possible pointer to a list object and mark the page_table - * so that it will not need changing during a GC. +/* Take a possible pointer to a Lisp object and mark its page in the + * page_table so that it will not be relocated during a GC. * * This involves locating the page it points to, then backing up to - * the first page that has its first object start at offset 0, and - * then marking all pages dont_move from the first until a page that ends - * by being full, or having free gen. - * - * This ensures that objects spanning pages are not broken. + * the start of its region, then marking all pages dont_move from there + * up to the first page that's not full or has a different generation * * It is assumed that all the page static flags have been cleared at * the start of a GC. * - * It is also assumed that the current gc_alloc region has been flushed and - * the tables updated. */ + * It is also assumed that the current gc_alloc() region has been + * flushed and the tables updated. */ static void preserve_pointer(void *addr) { - int addr_page_index = find_page_index(addr); - int first_page; - int i; + long addr_page_index = find_page_index(addr); + long first_page; + long i; unsigned region_allocation; - /* Address is quite likely to have been invalid - do some checks. */ + /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE) + || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) - /* Skip if already marked dont_move */ + /* Skip if already marked dont_move. */ || (page_table[addr_page_index].dont_move != 0)) return; - + gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG)); + /* (Now that we know that addr_page_index is in range, it's + * safe to index into page_table[] with it.) */ region_allocation = page_table[addr_page_index].allocated; - /* Check the offset within the page. + /* quick check 2: Check the offset within the page. * - * FIXME: The mask should have a symbolic name, and ideally should - * be derived from page size instead of hardwired to 0xfff. - * (Also fix other uses of 0xfff, elsewhere.) */ - if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used) + */ + if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) return; - if (enable_pointer_filter && !valid_dynamic_space_pointer(addr)) + /* Filter out anything which can't be a pointer to a Lisp object + * (or, as a special case which also requires dont_move, a return + * address referring to something in a CodeObject). This is + * expensive but important, since it vastly reduces the + * probability that random garbage will be bogusly interpreted as + * a pointer which prevents a page from moving. */ + if (!(possibly_valid_dynamic_space_pointer(addr))) return; - /* Work backwards to find a page with a first_object_offset of 0. - * The pages should be contiguous with all bytes used in the same - * gen. Assumes the first_object_offset is negative or zero. */ + /* Find the beginning of the region. Note that there may be + * objects in the region preceding the one that we were passed a + * pointer to: if this is the case, we will write-protect all the + * previous objects' pages too. */ + +#if 0 + /* I think this'd work just as well, but without the assertions. + * -dan 2004.01.01 */ + first_page= + find_page_index(page_address(addr_page_index)+ + page_table[addr_page_index].first_object_offset); +#else first_page = addr_page_index; while (page_table[first_page].first_object_offset != 0) { - first_page--; + --first_page; /* Do some checks. */ - gc_assert(page_table[first_page].bytes_used == 4096); + gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); gc_assert(page_table[first_page].gen == from_space); gc_assert(page_table[first_page].allocated == region_allocation); } +#endif - /* Adjust any large objects before promotion as they won't be copied - * after promotion. */ + /* Adjust any large objects before promotion as they won't be + * copied after promotion. */ if (page_table[first_page].large_object) { maybe_adjust_large_object(page_address(first_page)); - /* If a large object has shrunk then addr may now point to a free - * area in which case it's ignored here. Note it gets through the - * valid pointer test above because the tail looks like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE) + /* If a large object has shrunk then addr may now point to a + * free area in which case it's ignored here. Note it gets + * through the valid pointer test above because the tail looks + * like conses. */ + if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ - || (((unsigned)addr & 0xfff) + || (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)) { FSHOW((stderr, "weird? ignore ptr 0x%x to freed area of large object\n", @@ -4375,23 +2543,24 @@ preserve_pointer(void *addr) /* Mark the page static. */ page_table[i].dont_move = 1; - /* Move the page to the new_space. XX I'd rather not do this but - * the GC logic is not quite able to copy with the static pages - * remaining in the from space. This also requires the generation - * bytes_allocated counters be updated. */ + /* Move the page to the new_space. XX I'd rather not do this + * but the GC logic is not quite able to copy with the static + * pages remaining in the from space. This also requires the + * generation bytes_allocated counters be updated. */ page_table[i].gen = new_space; generations[new_space].bytes_allocated += page_table[i].bytes_used; generations[from_space].bytes_allocated -= page_table[i].bytes_used; - /* It is essential that the pages are not write protected as they - * may have pointers into the old-space which need scavenging. They - * shouldn't be write protected at this stage. */ + /* It is essential that the pages are not write protected as + * they may have pointers into the old-space which need + * scavenging. They shouldn't be write protected at this + * stage. */ gc_assert(!page_table[i].write_protected); /* Check whether this is the last page in this contiguous block.. */ - if ((page_table[i].bytes_used < 4096) - /* ..or it is 4096 and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE) + if ((page_table[i].bytes_used < PAGE_BYTES) + /* ..or it is PAGE_BYTES and is the last in the block */ + || (page_table[i+1].allocated == FREE_PAGE_FLAG) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ || (page_table[i+1].first_object_offset == 0)) @@ -4400,107 +2569,38 @@ preserve_pointer(void *addr) /* Check that the page is now static. */ gc_assert(page_table[addr_page_index].dont_move != 0); - - return; -} - -#ifdef CONTROL_STACKS -/* Scavenge the thread stack conservative roots. */ -static void -scavenge_thread_stacks(void) -{ - lispobj thread_stacks = SymbolValue(CONTROL_STACKS); - int type = TypeOf(thread_stacks); - - if (LowtagOf(thread_stacks) == type_OtherPointer) { - struct vector *vector = (struct vector *) PTR(thread_stacks); - int length, i; - if (TypeOf(vector->header) != type_SimpleVector) - return; - length = fixnum_value(vector->length); - for (i = 0; i < length; i++) { - lispobj stack_obj = vector->data[i]; - if (LowtagOf(stack_obj) == type_OtherPointer) { - struct vector *stack = (struct vector *) PTR(stack_obj); - int vector_length; - if (TypeOf(stack->header) != - type_SimpleArrayUnsignedByte32) { - return; - } - vector_length = fixnum_value(stack->length); - if ((gencgc_verbose > 1) && (vector_length <= 0)) - FSHOW((stderr, - "/weird? control stack vector length %d\n", - vector_length)); - if (vector_length > 0) { - lispobj *stack_pointer = (lispobj*)stack->data[0]; - if ((stack_pointer < (lispobj *)CONTROL_STACK_START) || - (stack_pointer > (lispobj *)CONTROL_STACK_END)) - lose("invalid stack pointer %x", - (unsigned)stack_pointer); - if ((stack_pointer > (lispobj *)CONTROL_STACK_START) && - (stack_pointer < (lispobj *)CONTROL_STACK_END)) { - /* FIXME: Ick! - * (1) hardwired word length = 4; and as usual, - * when fixing this, check for other places - * with the same problem - * (2) calling it 'length' suggests bytes; - * perhaps 'size' instead? */ - unsigned int length = ((unsigned)CONTROL_STACK_END - - (unsigned)stack_pointer) / 4; - int j; - if (length >= vector_length) { - lose("invalid stack size %d >= vector length %d", - length, - vector_length); - } - if (gencgc_verbose > 1) { - FSHOW((stderr, - "scavenging %d words of control stack %d of length %d words.\n", - length, i, vector_length)); - } - for (j = 0; j < length; j++) { - preserve_pointer((void *)stack->data[1+j]); - } - } - } - } - } - } } -#endif - /* If the given page is not write-protected, then scan it for pointers * to younger generations or the top temp. generation, if no * suspicious pointers are found then the page is write-protected. * - * Care is taken to check for pointers to the current gc_alloc region - * if it is a younger generation or the temp. generation. This frees - * the caller from doing a gc_alloc_update_page_tables. Actually the - * gc_alloc_generation does not need to be checked as this is only - * called from scavenge_generation when the gc_alloc generation is + * Care is taken to check for pointers to the current gc_alloc() + * region if it is a younger generation or the temp. generation. This + * frees the caller from doing a gc_alloc_update_page_tables(). Actually + * the gc_alloc_generation does not need to be checked as this is only + * called from scavenge_generation() when the gc_alloc generation is * younger, so it just checks if there is a pointer to the current * region. * - * We return 1 if the page was write-protected, else 0. - */ + * We return 1 if the page was write-protected, else 0. */ static int -update_page_write_prot(int page) +update_page_write_prot(long page) { int gen = page_table[page].gen; - int j; + long j; int wp_it = 1; void **page_addr = (void **)page_address(page); - int num_words = page_table[page].bytes_used / 4; + long num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE); + gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used != 0); - /* Skip if it's already write-protected or an unboxed page. */ + /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected - || (page_table[page].allocated == UNBOXED_PAGE)) + || page_table[page].dont_move + || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) return (0); /* Scan the page for pointers to younger generations or the @@ -4508,17 +2608,17 @@ update_page_write_prot(int page) for (j = 0; j < num_words; j++) { void *ptr = *(page_addr+j); - int index = find_page_index(ptr); + long index = find_page_index(ptr); /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE) + ((page_table[index].allocated != FREE_PAGE_FLAG) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == NUM_GENERATIONS))) - /* Or does it point within a current gc_alloc region? */ + /* Or does it point within a current gc_alloc() region? */ || ((boxed_region.start_addr <= ptr) && (ptr <= boxed_region.free_pointer)) || ((unboxed_region.start_addr <= ptr) @@ -4533,7 +2633,7 @@ update_page_write_prot(int page) /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/ os_protect((void *)page_addr, - 4096, + PAGE_BYTES, OS_VM_PROT_READ|OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -4546,7 +2646,7 @@ update_page_write_prot(int page) /* Scavenge a generation. * * This will not resolve all pointers when generation is the new - * space, as new objects may be added which are not check here - use + * space, as new objects may be added which are not checked here - use * scavenge_newspace generation. * * Write-protected pages should not have any pointers to the @@ -4577,7 +2677,7 @@ update_page_write_prot(int page) static void scavenge_generation(int generation) { - int i; + long i; int num_wp = 0; #define SC_GEN_CK 0 @@ -4588,61 +2688,43 @@ scavenge_generation(int generation) #endif for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated == BOXED_PAGE) + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - int last_page; + long last_page,j; + int write_protected=1; - /* This should be the start of a contiguous block. */ + /* This should be the start of a region */ gc_assert(page_table[i].first_object_offset == 0); - /* We need to find the full extent of this contiguous - * block in case objects span pages. */ - - /* Now work forward until the end of this contiguous area - * is found. A small area is preferred as there is a - * better chance of its pages being write-protected. */ - for (last_page = i; ;last_page++) - /* Check whether this is the last page in this contiguous - * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ - || (page_table[last_page+1].allocated != BOXED_PAGE) + /* Now work forward until the end of the region */ + for (last_page = i; ; last_page++) { + write_protected = + write_protected && page_table[last_page].write_protected; + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) break; - - /* Do a limited check for write_protected pages. If all pages - * are write_protected then there is no need to scavenge. */ - { - int j, all_wp = 1; - for (j = i; j <= last_page; j++) - if (page_table[j].write_protected == 0) { - all_wp = 0; - break; - } -#if !SC_GEN_CK - if (all_wp == 0) -#endif - { - scavenge(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); - - /* Now scan the pages and write protect those - * that don't have pointers to younger - * generations. */ - if (enable_page_protection) { - for (j = i; j <= last_page; j++) { - num_wp += update_page_write_prot(j); - } - } + } + if (!write_protected) { + scavenge(page_address(i), + (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + + /* Now scan the pages and write protect those that + * don't have pointers to younger generations. */ + if (enable_page_protection) { + for (j = i; j <= last_page; j++) { + num_wp += update_page_write_prot(j); } + } } i = last_page; } } - if ((gencgc_verbose > 1) && (num_wp != 0)) { FSHOW((stderr, "/write protected %d pages within generation %d\n", @@ -4653,18 +2735,17 @@ scavenge_generation(int generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation ! =FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { - FSHOW((stderr, "/scavenge_generation %d\n", generation)); + FSHOW((stderr, "/scavenge_generation() %d\n", generation)); FSHOW((stderr, "/page bytes_used=%d first_object_offset=%d dont_move=%d\n", page_table[i].bytes_used, page_table[i].first_object_offset, page_table[i].dont_move)); - lose("write-protected page %d written to in scavenge_generation", - i); + lose("write to protected page %d in scavenge_generation()", i); } } #endif @@ -4677,7 +2758,7 @@ scavenge_generation(int generation) * newspace generation. * * To help improve the efficiency, areas written are recorded by - * gc_alloc and only these scavenged. Sometimes a little more will be + * gc_alloc() and only these scavenged. Sometimes a little more will be * scavenged, but this causes no harm. An easy check is done that the * scavenged bytes equals the number allocated in the previous * scavenge. @@ -4688,7 +2769,7 @@ scavenge_generation(int generation) * * Write-protected pages could potentially be written by alloc however * to avoid having to handle re-scavenging of write-protected pages - * gc_alloc does not write to write-protected pages. + * gc_alloc() does not write to write-protected pages. * * New areas of objects allocated are recorded alternatively in the two * new_areas arrays below. */ @@ -4701,21 +2782,22 @@ static struct new_area new_areas_2[NUM_NEW_AREAS]; static void scavenge_newspace_generation_one_scan(int generation) { - int i; + long i; FSHOW((stderr, "/starting one full scan of newspace generation %d\n", generation)); - for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated == BOXED_PAGE) + /* Note that this skips over open regions when it encounters them. */ + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) /* (This may be redundant as write_protected is now * cleared before promotion.) */ || (page_table[i].dont_move == 1))) { - int last_page; + long last_page; + int all_wp=1; /* The scavenge will start at the first_object_offset of page i. * @@ -4726,52 +2808,36 @@ scavenge_newspace_generation_one_scan(int generation) * is found. A small area is preferred as there is a * better chance of its pages being write-protected. */ for (last_page = i; ;last_page++) { + /* If all pages are write-protected and movable, + * then no need to scavenge */ + all_wp=all_wp && page_table[last_page].write_protected && + !page_table[last_page].dont_move; + /* Check whether this is the last page in this * contiguous block */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ - || (page_table[last_page+1].allocated != BOXED_PAGE) + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) break; } - /* Do a limited check for write-protected pages. If all - * pages are write-protected then no need to scavenge, - * except if the pages are marked dont_move. */ - { - int j, all_wp = 1; - for (j = i; j <= last_page; j++) - if ((page_table[j].write_protected == 0) - || (page_table[j].dont_move != 0)) { - all_wp = 0; - break; - } - - if (!all_wp) { - int size; - - /* Calculate the size. */ - if (last_page == i) - size = (page_table[last_page].bytes_used - - page_table[i].first_object_offset)/4; - else - size = (page_table[last_page].bytes_used - + (last_page-i)*4096 - - page_table[i].first_object_offset)/4; - - { - new_areas_ignore_page = last_page; - - scavenge(page_address(i) + - page_table[i].first_object_offset, - size); - - } - } + /* Do a limited check for write-protected pages. */ + if (!all_wp) { + long size; + + size = (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES + - page_table[i].first_object_offset)/N_WORD_BYTES; + new_areas_ignore_page = last_page; + + scavenge(page_address(i) + + page_table[i].first_object_offset, + size); + } - i = last_page; } } @@ -4784,21 +2850,20 @@ scavenge_newspace_generation_one_scan(int generation) static void scavenge_newspace_generation(int generation) { - int i; + long i; - /* the new_areas array currently being written to by gc_alloc */ + /* the new_areas array currently being written to by gc_alloc() */ struct new_area (*current_new_areas)[] = &new_areas_1; - int current_new_areas_index; + long current_new_areas_index; - /* the new_areas created but the previous scavenge cycle */ + /* the new_areas created by the previous scavenge cycle */ struct new_area (*previous_new_areas)[] = NULL; - int previous_new_areas_index; + long previous_new_areas_index; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); - /* Turn on the recording of new areas by gc_alloc. */ + /* Turn on the recording of new areas by gc_alloc(). */ new_areas = current_new_areas; new_areas_index = 0; @@ -4813,8 +2878,7 @@ scavenge_newspace_generation(int generation) record_new_objects = 2; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Grab new_areas_index. */ current_new_areas_index = new_areas_index; @@ -4838,7 +2902,7 @@ scavenge_newspace_generation(int generation) else current_new_areas = &new_areas_1; - /* Set up for gc_alloc. */ + /* Set up for gc_alloc(). */ new_areas = current_new_areas; new_areas_index = 0; @@ -4861,26 +2925,21 @@ scavenge_newspace_generation(int generation) record_new_objects = 2; /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); } else { /* Work through previous_new_areas. */ for (i = 0; i < previous_new_areas_index; i++) { - /* FIXME: All these bare *4 and /4 should be something - * like BYTES_PER_WORD or WBYTES. */ - int page = (*previous_new_areas)[i].page; - int offset = (*previous_new_areas)[i].offset; - int size = (*previous_new_areas)[i].size / 4; - gc_assert((*previous_new_areas)[i].size % 4 == 0); - + long page = (*previous_new_areas)[i].page; + long offset = (*previous_new_areas)[i].offset; + long size = (*previous_new_areas)[i].size / N_WORD_BYTES; + gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0); scavenge(page_address(page)+offset, size); } /* Flush the current regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); } current_new_areas_index = new_areas_index; @@ -4890,14 +2949,14 @@ scavenge_newspace_generation(int generation) current_new_areas_index));*/ } - /* Turn off recording of areas allocated by gc_alloc. */ + /* Turn off recording of areas allocated by gc_alloc(). */ record_new_objects = 0; #if SC_NS_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation != FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -4917,10 +2976,10 @@ scavenge_newspace_generation(int generation) static void unprotect_oldspace(void) { - int i; + long i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -4930,7 +2989,7 @@ unprotect_oldspace(void) /* Remove any write-protection. We should be able to rely * on the write-protect flag to avoid redundant calls. */ if (page_table[i].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[i].write_protected = 0; } } @@ -4941,19 +3000,18 @@ unprotect_oldspace(void) * assumes that all objects have been copied or promoted to an older * generation. Bytes_allocated and the generation bytes_allocated * counter are updated. The number of bytes freed is returned. */ -extern void i586_bzero(void *addr, int nbytes); -static int +static long free_oldspace(void) { - int bytes_freed = 0; - int first_page, last_page; + long bytes_freed = 0; + long first_page, last_page; first_page = 0; do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE) + && ((page_table[first_page].allocated == FREE_PAGE_FLAG) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -4969,7 +3027,7 @@ free_oldspace(void) bytes_freed += page_table[last_page].bytes_used; generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used; - page_table[last_page].allocated = FREE_PAGE; + page_table[last_page].allocated = FREE_PAGE_FLAG; page_table[last_page].bytes_used = 0; /* Remove any write-protection. We should be able to rely @@ -4978,14 +3036,14 @@ free_oldspace(void) void *page_start = (void *)page_address(last_page); if (page_table[last_page].write_protected) { - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[last_page].write_protected = 0; } } last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE) + && (page_table[last_page].allocated != FREE_PAGE_FLAG) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); @@ -4999,26 +3057,17 @@ free_oldspace(void) page_start = (void *)page_address(first_page); - os_invalidate(page_start, 4096*(last_page-first_page)); - addr = os_validate(page_start, 4096*(last_page-first_page)); + os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); + addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); if (addr == NULL || addr != page_start) { - /* Is this an error condition? I couldn't really tell from - * the old CMU CL code, which fprintf'ed a message with - * an exclamation point at the end. But I've never seen the - * message, so it must at least be unusual.. - * - * (The same condition is also tested for in gc_free_heap.) - * - * -- WHN 19991129 */ - lose("i586_bzero: page moved, 0x%08x ==> 0x%08x", - page_start, + lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start, addr); } } else { - int *page_start; + long *page_start; - page_start = (int *)page_address(first_page); - i586_bzero(page_start, 4096*(last_page-first_page)); + page_start = (long *)page_address(first_page); + memset(page_start, 0,PAGE_BYTES*(last_page-first_page)); } first_page = last_page; @@ -5029,16 +3078,17 @@ free_oldspace(void) return bytes_freed; } +#if 0 /* Print some information about a pointer at the given address. */ static void print_ptr(lispobj *addr) { /* If addr is in the dynamic space then out the page information. */ - int pi1 = find_page_index((void*)addr); + long pi1 = find_page_index((void*)addr); if (pi1 != -1) fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", - (unsigned int) addr, + (unsigned long) addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, @@ -5056,8 +3106,9 @@ print_ptr(lispobj *addr) *(addr+3), *(addr+4)); } +#endif -extern int undefined_tramp; +extern long undefined_tramp; static void verify_space(lispobj *start, size_t words) @@ -5065,30 +3116,30 @@ verify_space(lispobj *start, size_t words) int is_in_dynamic_space = (find_page_index((void*)start) != -1); int is_in_readonly_space = (READ_ONLY_SPACE_START <= (unsigned)start && - (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); + (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); while (words > 0) { size_t count = 1; lispobj thing = *(lispobj*)start; - if (Pointerp(thing)) { - int page_index = find_page_index((void*)thing); - int to_readonly_space = + if (is_lisp_pointer(thing)) { + long page_index = find_page_index((void*)thing); + long to_readonly_space = (READ_ONLY_SPACE_START <= thing && - thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER)); - int to_static_space = + thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); + long to_static_space = (STATIC_SPACE_START <= thing && - thing < SymbolValue(STATIC_SPACE_FREE_POINTER)); + thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0)); /* Does it point to the dynamic space? */ if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE) + if ((page_table[page_index].allocated != FREE_PAGE_FLAG) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.", thing, start); /* Check that it doesn't point to a forwarding pointer! */ - if (*((lispobj *)PTR(thing)) == 0x01) { + if (*((lispobj *)native_pointer(thing)) == 0x01) { lose("Ptr %x @ %x sees forwarding ptr.", thing, start); } /* Check that its not in the RO space as it would then be a @@ -5100,9 +3151,17 @@ verify_space(lispobj *start, size_t words) /* Does it point to a plausible object? This check slows * it down a lot (so it's commented out). * - * FIXME: Add a variable to enable this dynamically. */ - /* if (!valid_dynamic_space_pointer((lispobj *)thing)) { - * lose("ptr %x to invalid object %x", thing, start); */ + * "a lot" is serious: it ate 50 minutes cpu time on + * my duron 950 before I came back from lunch and + * killed it. + * + * FIXME: Add a variable to enable this + * dynamically. */ + /* + if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) { + lose("ptr %x to invalid object %x", thing, start); + } + */ } else { /* Verify that it points to another valid space. */ if (!to_readonly_space && !to_static_space @@ -5111,40 +3170,44 @@ verify_space(lispobj *start, size_t words) } } } else { - if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an - * is_fixnum for this. */ - - switch(TypeOf(*start)) { + if (!(fixnump(thing))) { + /* skip fixnums */ + switch(widetag_of(*start)) { /* boxed objects */ - case type_SimpleVector: - case type_Ratio: - case type_Complex: - case type_SimpleArray: - case type_ComplexString: - case type_ComplexBitVector: - case type_ComplexVector: - case type_ComplexArray: - case type_ClosureHeader: - case type_FuncallableInstanceHeader: - case type_ByteCodeFunction: - case type_ByteCodeClosure: - case type_ValueCellHeader: - case type_SymbolHeader: - case type_BaseChar: - case type_UnboundMarker: - case type_InstanceHeader: - case type_Fdefn: + case SIMPLE_VECTOR_WIDETAG: + case RATIO_WIDETAG: + case COMPLEX_WIDETAG: + case SIMPLE_ARRAY_WIDETAG: + case COMPLEX_BASE_STRING_WIDETAG: +#ifdef COMPLEX_CHARACTER_STRING_WIDETAG + case COMPLEX_CHARACTER_STRING_WIDETAG: +#endif + case COMPLEX_VECTOR_NIL_WIDETAG: + case COMPLEX_BIT_VECTOR_WIDETAG: + case COMPLEX_VECTOR_WIDETAG: + case COMPLEX_ARRAY_WIDETAG: + case CLOSURE_HEADER_WIDETAG: + case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: + case VALUE_CELL_HEADER_WIDETAG: + case SYMBOL_HEADER_WIDETAG: + case CHARACTER_WIDETAG: +#if N_WORD_BITS == 64 + case SINGLE_FLOAT_WIDETAG: +#endif + case UNBOUND_MARKER_WIDETAG: + case INSTANCE_HEADER_WIDETAG: + case FDEFN_WIDETAG: count = 1; break; - case type_CodeHeader: + case CODE_HEADER_WIDETAG: { lispobj object = *start; struct code *code; - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; lispobj fheaderl; - struct function *fheaderp; + struct simple_fun *fheaderp; code = (struct code *) start; @@ -5154,8 +3217,14 @@ verify_space(lispobj *start, size_t words) if (is_in_dynamic_space /* It's ok if it's byte compiled code. The trace * table offset will be a fixnum if it's x86 - * compiled code - check. */ - && !(code->trace_table_offset & 0x3) + * compiled code - check. + * + * FIXME: #^#@@! lack of abstraction here.. + * This line can probably go away now that + * there's no byte compiler, but I've got + * too much to worry about right now to try + * to make sure. -- WHN 2001-10-06 */ + && fixnump(code->trace_table_offset) /* Only when enabled */ && verify_dynamic_code_check) { FSHOW((stderr, @@ -5170,12 +3239,13 @@ verify_space(lispobj *start, size_t words) /* Scavenge the boxed section of the code data block */ verify_space(start + 1, nheader_words - 1); - /* Scavenge the boxed section of each function object in - * the code data block. */ + /* Scavenge the boxed section of each function + * object in the code data block. */ fheaderl = code->entry_points; while (fheaderl != NIL) { - fheaderp = (struct function *) PTR(fheaderl); - gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader); + fheaderp = + (struct simple_fun *) native_pointer(fheaderl); + gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG); verify_space(&fheaderp->name, 1); verify_space(&fheaderp->arglist, 1); verify_space(&fheaderp->type, 1); @@ -5186,57 +3256,84 @@ verify_space(lispobj *start, size_t words) } /* unboxed objects */ - case type_Bignum: - case type_SingleFloat: - case type_DoubleFloat: -#ifdef type_ComplexLongFloat - case type_LongFloat: + case BIGNUM_WIDETAG: +#if N_WORD_BITS != 64 + case SINGLE_FLOAT_WIDETAG: +#endif + case DOUBLE_FLOAT_WIDETAG: +#ifdef COMPLEX_LONG_FLOAT_WIDETAG + case LONG_FLOAT_WIDETAG: +#endif +#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG + case COMPLEX_SINGLE_FLOAT_WIDETAG: +#endif +#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG + case COMPLEX_DOUBLE_FLOAT_WIDETAG: +#endif +#ifdef COMPLEX_LONG_FLOAT_WIDETAG + case COMPLEX_LONG_FLOAT_WIDETAG: +#endif + case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif + case SIMPLE_BIT_VECTOR_WIDETAG: + case SIMPLE_ARRAY_NIL_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: #endif -#ifdef type_ComplexSingleFloat - case type_ComplexSingleFloat: + case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: #endif -#ifdef type_ComplexDoubleFloat - case type_ComplexDoubleFloat: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: #endif -#ifdef type_ComplexLongFloat - case type_ComplexLongFloat: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: #endif - case type_SimpleString: - case type_SimpleBitVector: - case type_SimpleArrayUnsignedByte2: - case type_SimpleArrayUnsignedByte4: - case type_SimpleArrayUnsignedByte8: - case type_SimpleArrayUnsignedByte16: - case type_SimpleArrayUnsignedByte32: -#ifdef type_SimpleArraySignedByte8 - case type_SimpleArraySignedByte8: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte16 - case type_SimpleArraySignedByte16: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte30 - case type_SimpleArraySignedByte30: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG: #endif -#ifdef type_SimpleArraySignedByte32 - case type_SimpleArraySignedByte32: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif - case type_SimpleArraySingleFloat: - case type_SimpleArrayDoubleFloat: -#ifdef type_SimpleArrayComplexLongFloat - case type_SimpleArrayLongFloat: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: #endif -#ifdef type_SimpleArrayComplexSingleFloat - case type_SimpleArrayComplexSingleFloat: +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: #endif -#ifdef type_SimpleArrayComplexDoubleFloat - case type_SimpleArrayComplexDoubleFloat: + case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: + case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: +#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG: #endif -#ifdef type_SimpleArrayComplexLongFloat - case type_SimpleArrayComplexLongFloat: +#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG: #endif - case type_Sap: - case type_WeakPointer: - count = (sizetab[TypeOf(*start)])(start); +#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG + case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: +#endif + case SAP_WIDETAG: + case WEAK_POINTER_WIDETAG: + count = (sizetab[widetag_of(*start)])(start); break; default: @@ -5258,19 +3355,21 @@ verify_gc(void) * Some counts of lispobjs are called foo_count; it might be good * to grep for all foo_size and rename the appropriate ones to * foo_count. */ - int read_only_space_size = - (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) + long read_only_space_size = + (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0) - (lispobj*)READ_ONLY_SPACE_START; - int static_space_size = - (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER) + long static_space_size = + (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj*)STATIC_SPACE_START; - int binding_stack_size = - (lispobj*)SymbolValue(BINDING_STACK_POINTER) - - (lispobj*)BINDING_STACK_START; - + struct thread *th; + for_each_thread(th) { + long binding_stack_size = + (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) + - (lispobj*)th->binding_stack_start; + verify_space(th->binding_stack_start, binding_stack_size); + } verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size); verify_space((lispobj*)STATIC_SPACE_START , static_space_size); - verify_space((lispobj*)BINDING_STACK_START , binding_stack_size); } static void @@ -5279,10 +3378,10 @@ verify_generation(int generation) int i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - int last_page; + long last_page; int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ @@ -5296,8 +3395,8 @@ verify_generation(int generation) for (last_page = i; ;last_page++) /* Check whether this is the last page in this contiguous * block. */ - if ((page_table[last_page].bytes_used < 4096) - /* Or it is 4096 and is the last in the block */ + if ((page_table[last_page].bytes_used < PAGE_BYTES) + /* Or it is PAGE_BYTES and is the last in the block */ || (page_table[last_page+1].allocated != region_allocation) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) @@ -5305,36 +3404,36 @@ verify_generation(int generation) break; verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*4096)/4); + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); i = last_page; } } } -/* Check the all the free space is zero filled. */ +/* Check that all the free space is zero filled. */ static void verify_zero_fill(void) { - int page; + long page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE) { + if (page_table[page].allocated == FREE_PAGE_FLAG) { /* The whole page should be zero filled. */ - int *start_addr = (int *)page_address(page); - int size = 1024; - int i; + long *start_addr = (long *)page_address(page); + long size = 1024; + long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free page not zero at %x", start_addr + i); } } } else { - int free_bytes = 4096 - page_table[page].bytes_used; + long free_bytes = PAGE_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { - int *start_addr = (int *)((unsigned)page_address(page) + long *start_addr = (long *)((unsigned)page_address(page) + page_table[page].bytes_used); - int size = free_bytes / 4; - int i; + long size = free_bytes / N_WORD_BYTES; + long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free region not zero at %x", start_addr + i); @@ -5350,19 +3449,15 @@ void gencgc_verify_zero_fill(void) { /* Flush the alloc regions updating the tables. */ - boxed_region.free_pointer = current_region_free_pointer; - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); SHOW("verifying zero fill"); verify_zero_fill(); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; } static void verify_dynamic_space(void) { - int i; + long i; for (i = 0; i < NUM_GENERATIONS; i++) verify_generation(i); @@ -5375,20 +3470,21 @@ verify_dynamic_space(void) static void write_protect_generation_pages(int generation) { - int i; + long i; gc_assert(generation < NUM_GENERATIONS); for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated == BOXED_PAGE) + if ((page_table[i].allocated == BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) + && !page_table[i].dont_move && (page_table[i].gen == generation)) { void *page_start; page_start = (void *)page_address(i); os_protect(page_start, - 4096, + PAGE_BYTES, OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); /* Note the page as protected in the page tables. */ @@ -5404,15 +3500,15 @@ write_protect_generation_pages(int generation) } } -/* Garbage collect a generation. If raise is 0 the remains of the +/* Garbage collect a generation. If raise is 0 then the remains of the * generation are not raised to the next generation. */ static void garbage_collect_generation(int generation, int raise) { unsigned long bytes_freed; unsigned long i; - unsigned long read_only_space_size, static_space_size; - + unsigned long static_space_size; + struct thread *th; gc_assert(generation <= (NUM_GENERATIONS-1)); /* The oldest generation can't be raised. */ @@ -5425,8 +3521,9 @@ garbage_collect_generation(int generation, int raise) * temporary generation (NUM_GENERATIONS), and lowered when * done. Set up this new generation. There should be no pages * allocated to it yet. */ - if (!raise) - gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0); + if (!raise) { + gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0); + } /* Set the global src and dest. generations */ from_space = generation; @@ -5445,57 +3542,110 @@ garbage_collect_generation(int generation, int raise) /* Before any pointers are preserved, the dont_move flags on the * pages need to be cleared. */ for (i = 0; i < last_free_page; i++) - page_table[i].dont_move = 0; + if(page_table[i].gen==from_space) + page_table[i].dont_move = 0; /* Un-write-protect the old-space pages. This is essential for the * promoted pages as they may contain pointers into the old-space * which need to be scavenged. It also helps avoid unnecessary page - * faults as forwarding pointer are written into them. They need to + * faults as forwarding pointers are written into them. They need to * be un-protected anyway before unmapping later. */ unprotect_oldspace(); - /* Scavenge the stack's conservative roots. */ - { - lispobj **ptr; - for (ptr = (lispobj **)CONTROL_STACK_END - 1; - ptr > (lispobj **)&raise; - ptr--) { + /* Scavenge the stacks' conservative roots. */ + + /* there are potentially two stacks for each thread: the main + * stack, which may contain Lisp pointers, and the alternate stack. + * We don't ever run Lisp code on the altstack, but it may + * host a sigcontext with lisp objects in it */ + + /* what we need to do: (1) find the stack pointer for the main + * stack; scavenge it (2) find the interrupt context on the + * alternate stack that might contain lisp values, and scavenge + * that */ + + /* we assume that none of the preceding applies to the thread that + * initiates GC. If you ever call GC from inside an altstack + * handler, you will lose. */ + for_each_thread(th) { + void **ptr; + void **esp=(void **)-1; +#ifdef LISP_FEATURE_SB_THREAD + long i,free; + if(th==arch_os_get_current_thread()) { + esp = (void **) &raise; + } else { + void **esp1; + free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + for(i=free-1;i>=0;i--) { + os_context_t *c=th->interrupt_contexts[i]; + esp1 = (void **) *os_context_register_addr(c,reg_SP); + if(esp1>=th->control_stack_start&& esp1control_stack_end){ + if(esp1=(void **)c; ptr--) { + preserve_pointer(*ptr); + } + } + } + } +#else + esp = (void **) &raise; +#endif + for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { preserve_pointer(*ptr); } } -#ifdef CONTROL_STACKS - scavenge_thread_stacks(); -#endif +#ifdef QSHOW if (gencgc_verbose > 1) { - int num_dont_move_pages = count_dont_move_pages(); - FSHOW((stderr, - "/non-movable pages due to conservative pointers = %d (%d bytes)\n", - num_dont_move_pages, - /* FIXME: 4096 should be symbolic constant here and - * prob'ly elsewhere too. */ - num_dont_move_pages * 4096)); + long num_dont_move_pages = count_dont_move_pages(); + fprintf(stderr, + "/non-movable pages due to conservative pointers = %d (%d bytes)\n", + num_dont_move_pages, + num_dont_move_pages * PAGE_BYTES); } +#endif /* Scavenge all the rest of the roots. */ /* Scavenge the Lisp functions of the interrupt handlers, taking - * care to avoid SIG_DFL, SIG_IGN. */ + * care to avoid SIG_DFL and SIG_IGN. */ + for_each_thread(th) { + struct interrupt_data *data=th->interrupt_data; for (i = 0; i < NSIG; i++) { - union interrupt_handler handler = interrupt_handlers[i]; + union interrupt_handler handler = data->interrupt_handlers[i]; if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { - scavenge((lispobj *)(interrupt_handlers + i), 1); + scavenge((lispobj *)(data->interrupt_handlers + i), 1); + } + } + } + /* Scavenge the binding stacks. */ + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); +#ifdef LISP_FEATURE_SB_THREAD + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); +#endif } } - /* Scavenge the binding stack. */ - scavenge( (lispobj *) BINDING_STACK_START, - (lispobj *)SymbolValue(BINDING_STACK_POINTER) - - (lispobj *)BINDING_STACK_START); - + /* The original CMU CL code had scavenge-read-only-space code + * controlled by the Lisp-level variable + * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it + * wasn't documented under what circumstances it was useful or + * safe to turn it on, so it's been turned off in SBCL. If you + * want/need this functionality, and can test and document it, + * please submit a patch. */ +#if 0 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) { - read_only_space_size = + unsigned long read_only_space_size = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) - (lispobj*)READ_ONLY_SPACE_START; FSHOW((stderr, @@ -5503,41 +3653,51 @@ garbage_collect_generation(int generation, int raise) read_only_space_size * sizeof(lispobj))); scavenge( (lispobj *) READ_ONLY_SPACE_START, read_only_space_size); } +#endif + /* Scavenge static space. */ static_space_size = - (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) - + (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj *)STATIC_SPACE_START; - if (gencgc_verbose > 1) + if (gencgc_verbose > 1) { FSHOW((stderr, "/scavenge static space: %d bytes\n", static_space_size * sizeof(lispobj))); + } scavenge( (lispobj *) STATIC_SPACE_START, static_space_size); /* All generations but the generation being GCed need to be * scavenged. The new_space generation needs special handling as * objects may be moved in - it is handled separately below. */ - for (i = 0; i < NUM_GENERATIONS; i++) - if ((i != generation) && (i != new_space)) + for (i = 0; i < NUM_GENERATIONS; i++) { + if ((i != generation) && (i != new_space)) { scavenge_generation(i); + } + } /* Finally scavenge the new_space generation. Keep going until no * more objects are moved into the new generation */ scavenge_newspace_generation(new_space); + /* FIXME: I tried reenabling this check when debugging unrelated + * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately. + * Since the current GC code seems to work well, I'm guessing that + * this debugging code is just stale, but I haven't tried to + * figure it out. It should be figured out and then either made to + * work or just deleted. */ #define RESCAN_CHECK 0 #if RESCAN_CHECK /* As a check re-scavenge the newspace once; no new objects should * be found. */ { - int old_bytes_allocated = bytes_allocated; - int bytes_allocated; + long old_bytes_allocated = bytes_allocated; + long bytes_allocated; /* Start with a full scavenge. */ scavenge_newspace_generation_one_scan(new_space); /* Flush the current regions, updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); bytes_allocated = bytes_allocated - old_bytes_allocated; @@ -5551,8 +3711,7 @@ garbage_collect_generation(int generation, int raise) scan_weak_pointers(); /* Flush the current regions, updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Free the pages in oldspace, but not those marked dont_move. */ bytes_freed = free_oldspace(); @@ -5594,42 +3753,41 @@ garbage_collect_generation(int generation, int raise) ++generations[generation].num_gc; } -/* Update last_free_page then ALLOCATION_POINTER */ -int +/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ +long update_x86_dynamic_space_free_pointer(void) { - int last_page = -1; - int i; + long last_page = -1; + long i; - for (i = 0; i < NUM_PAGES; i++) - if ((page_table[i].allocated != FREE_PAGE) + for (i = 0; i < last_free_page; i++) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; SetSymbolValue(ALLOCATION_POINTER, - (lispobj)(((char *)heap_base) + last_free_page*4096)); + (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); return 0; /* dummy value: return something ... */ } -/* GC all generations below last_gen, raising their objects to the - * next generation until all generations below last_gen are empty. - * Then if last_gen is due for a GC then GC it. In the special case - * that last_gen==NUM_GENERATIONS, the last generation is always - * GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS. +/* GC all generations newer than last_gen, raising the objects in each + * to the next older generation - we finish when all generations below + * last_gen are empty. Then if last_gen is due for a GC, or if + * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that + * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS. * - * The oldest generation to be GCed will always be - * gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */ + * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than + * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */ + void collect_garbage(unsigned last_gen) { int gen = 0; int raise; int gen_to_wp; - int i; - - boxed_region.free_pointer = current_region_free_pointer; + long i; FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); @@ -5641,12 +3799,11 @@ collect_garbage(unsigned last_gen) } /* Flush the alloc regions updating the tables. */ - gc_alloc_update_page_tables(0, &boxed_region); - gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_all_page_tables(); /* Verify the new objects created by Lisp code. */ if (pre_verify_gen_0) { - SHOW((stderr, "pre-checking generation 0\n")); + FSHOW((stderr, "pre-checking generation 0\n")); verify_generation(0); } @@ -5727,58 +3884,54 @@ collect_garbage(unsigned last_gen) write_protect_generation_pages(gen_to_wp); } - /* Set gc_alloc back to generation 0. The current regions should - * be flushed after the above GCs */ + /* Set gc_alloc() back to generation 0. The current regions should + * be flushed after the above GCs. */ gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0); gc_alloc_generation = 0; update_x86_dynamic_space_free_pointer(); - - /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so we - * needn't do it here: */ - /* zero_stack();*/ - - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - + auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; + if(gencgc_verbose) + fprintf(stderr,"Next gc when %ld bytes have been consed\n", + auto_gc_trigger); SHOW("returning from collect_garbage"); } /* This is called by Lisp PURIFY when it is finished. All live objects * will have been moved to the RO and Static heaps. The dynamic space * will need a full re-initialization. We don't bother having Lisp - * PURIFY flush the current gc_alloc region, as the page_tables are + * PURIFY flush the current gc_alloc() region, as the page_tables are * re-initialized, and every page is zeroed to be sure. */ void gc_free_heap(void) { - int page; + long page; if (gencgc_verbose > 1) SHOW("entering gc_free_heap"); for (page = 0; page < NUM_PAGES; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE) { + if (page_table[page].allocated != FREE_PAGE_FLAG) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid - * when it is a FREE_PAGE and bytes_used is 0 and it + * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it * should not be write-protected -- except that the * generation is used for the current region but it sets * that up. */ - page_table[page].allocated = FREE_PAGE; + page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; /* Zero the page. */ page_start = (void *)page_address(page); /* First, remove any write-protection. */ - os_protect(page_start, 4096, OS_VM_PROT_ALL); + os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL); page_table[page].write_protected = 0; - os_invalidate(page_start,4096); - addr = os_validate(page_start,4096); + os_invalidate(page_start,PAGE_BYTES); + addr = os_validate(page_start,PAGE_BYTES); if (addr == NULL || addr != page_start) { lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x", page_start, @@ -5786,10 +3939,10 @@ gc_free_heap(void) } } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ - int *page_start, i; - gc_assert(page_table[page].allocated == FREE_PAGE); + long *page_start, i; + gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used == 0); - page_start = (int *)page_address(page); + page_start = (long *)page_address(page); for (i=0; i<1024; i++) { if (page_start[i] != 0) { lose("free region not zero at %x", page_start + i); @@ -5815,29 +3968,14 @@ gc_free_heap(void) if (gencgc_verbose > 1) print_generation_stats(0); - /* Initialize gc_alloc */ + /* Initialize gc_alloc(). */ gc_alloc_generation = 0; - boxed_region.first_page = 0; - boxed_region.last_page = -1; - boxed_region.start_addr = page_address(0); - boxed_region.free_pointer = page_address(0); - boxed_region.end_addr = page_address(0); - - unboxed_region.first_page = 0; - unboxed_region.last_page = -1; - unboxed_region.start_addr = page_address(0); - unboxed_region.free_pointer = page_address(0); - unboxed_region.end_addr = page_address(0); - -#if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */ - zero_stack(); -#endif - last_free_page = 0; - SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base)); + gc_set_region_empty(&boxed_region); + gc_set_region_empty(&unboxed_region); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; + last_free_page = 0; + SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0); if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ @@ -5850,16 +3988,19 @@ gc_free_heap(void) void gc_init(void) { - int i; + long i; gc_init_tables(); + scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector; + scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; + transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; heap_base = (void*)DYNAMIC_SPACE_START; /* Initialize each page structure. */ for (i = 0; i < NUM_PAGES; i++) { /* Initialize all pages as free. */ - page_table[i].allocated = FREE_PAGE; + page_table[i].allocated = FREE_PAGE_FLAG; page_table[i].bytes_used = 0; /* Pages are not write-protected at startup. */ @@ -5868,7 +4009,9 @@ gc_init(void) bytes_allocated = 0; - /* Initialize the generations. */ + /* Initialize the generations. + * + * FIXME: very similar to code in gc_free_heap(), should be shared */ for (i = 0; i < NUM_GENERATIONS; i++) { generations[i].alloc_start_page = 0; generations[i].alloc_unboxed_start_page = 0; @@ -5886,57 +4029,53 @@ gc_init(void) /* Initialize gc_alloc. */ gc_alloc_generation = 0; - boxed_region.first_page = 0; - boxed_region.last_page = -1; - boxed_region.start_addr = page_address(0); - boxed_region.free_pointer = page_address(0); - boxed_region.end_addr = page_address(0); - - unboxed_region.first_page = 0; - unboxed_region.last_page = -1; - unboxed_region.start_addr = page_address(0); - unboxed_region.free_pointer = page_address(0); - unboxed_region.end_addr = page_address(0); + gc_set_region_empty(&boxed_region); + gc_set_region_empty(&unboxed_region); last_free_page = 0; - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; } /* Pick up the dynamic space from after a core load. * * The ALLOCATION_POINTER points to the end of the dynamic space. - * - * XX A scan is needed to identify the closest first objects for pages. */ -void + */ + +static void gencgc_pickup_dynamic(void) { - int page = 0; - int addr = DYNAMIC_SPACE_START; - int alloc_ptr = SymbolValue(ALLOCATION_POINTER); + long page = 0; + long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + lispobj *prev=(lispobj *)page_address(page); - /* Initialize the first region. */ do { - page_table[page].allocated = BOXED_PAGE; + lispobj *first,*ptr= (lispobj *)page_address(page); + page_table[page].allocated = BOXED_PAGE_FLAG; page_table[page].gen = 0; - page_table[page].bytes_used = 4096; + page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; + + first=gc_search_space(prev,(ptr+2)-prev,ptr); + if(ptr == first) prev=ptr; page_table[page].first_object_offset = - (void *)DYNAMIC_SPACE_START - page_address(page); - addr += 4096; + (void *)prev - page_address(page); page++; - } while (addr < alloc_ptr); + } while (page_address(page) < alloc_ptr); + + generations[0].bytes_allocated = PAGE_BYTES*page; + bytes_allocated = PAGE_BYTES*page; + +} - generations[0].bytes_allocated = 4096*page; - bytes_allocated = 4096*page; - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; +void +gc_initialize_pointers(void) +{ + gencgc_pickup_dynamic(); } + + -/* a counter for how deep we are in alloc(..) calls */ -int alloc_entered = 0; /* alloc(..) is the external interface for memory allocation. It * allocates to generation 0. It is not called from within the garbage @@ -5948,189 +4087,87 @@ int alloc_entered = 0; * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.) * * The check for a GC trigger is only performed when the current - * region is full, so in most cases it's not needed. Further MAYBE-GC - * is only called once because Lisp will remember "need to collect - * garbage" and get around to it when it can. */ + * region is full, so in most cases it's not needed. */ + char * -alloc(int nbytes) +alloc(long nbytes) { + struct thread *th=arch_os_get_current_thread(); + struct alloc_region *region= +#ifdef LISP_FEATURE_SB_THREAD + th ? &(th->alloc_region) : &boxed_region; +#else + &boxed_region; +#endif + void *new_obj; + void *new_free_pointer; + gc_assert(nbytes>0); /* Check for alignment allocation problems. */ - gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0) - && ((nbytes & 0x7) == 0)); - - if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {/* if already in a pseudo atomic */ - - void *new_free_pointer; - - retry1: - if (alloc_entered) { - SHOW("alloc re-entered in already-pseudo-atomic case"); - } - ++alloc_entered; - - /* Check whether there is room in the current region. */ - new_free_pointer = current_region_free_pointer + nbytes; - - /* FIXME: Shouldn't we be doing some sort of lock here, to - * keep from getting screwed if an interrupt service routine - * allocates memory between the time we calculate new_free_pointer - * and the time we write it back to current_region_free_pointer? - * Perhaps I just don't understand pseudo-atomics.. - * - * Perhaps I don't. It looks as though what happens is if we - * were interrupted any time during the pseudo-atomic - * interval (which includes now) we discard the allocated - * memory and try again. So, at least we don't return - * a memory area that was allocated out from underneath us - * by code in an ISR. - * Still, that doesn't seem to prevent - * current_region_free_pointer from getting corrupted: - * We read current_region_free_pointer. - * They read current_region_free_pointer. - * They write current_region_free_pointer. - * We write current_region_free_pointer, scribbling over - * whatever they wrote. */ - - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = current_region_free_pointer; - current_region_free_pointer = new_free_pointer; - alloc_entered--; - return((void *)new_obj); - } - - if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - /* Double the trigger. */ - auto_gc_trigger *= 2; - alloc_entered--; - /* Exit the pseudo-atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during - * gc_alloc(..). */ - do_pending_interrupt(); - } - funcall0(SymbolFunction(MAYBE_GC)); - /* Re-enter the pseudo-atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0)); - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1)); - goto retry1; - } - /* Call gc_alloc. */ - boxed_region.free_pointer = current_region_free_pointer; - { - void *new_obj = gc_alloc(nbytes); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - alloc_entered--; - return (new_obj); - } - } else { - void *result; - void *new_free_pointer; - - retry2: - /* At least wrap this allocation in a pseudo atomic to prevent - * gc_alloc from being re-entered. */ - SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0)); - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1)); - - if (alloc_entered) - SHOW("alloc re-entered in not-already-pseudo-atomic case"); - ++alloc_entered; - - /* Check whether there is room in the current region. */ - new_free_pointer = current_region_free_pointer + nbytes; - - if (new_free_pointer <= boxed_region.end_addr) { - /* If so then allocate from the current region. */ - void *new_obj = current_region_free_pointer; - current_region_free_pointer = new_free_pointer; - alloc_entered--; - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)) { - /* Handle any interrupts that occurred during - * gc_alloc(..). */ - do_pending_interrupt(); - goto retry2; - } - - return((void *)new_obj); - } - - /* KLUDGE: There's lots of code around here shared with the - * the other branch. Is there some way to factor out the - * duplicate code? -- WHN 19991129 */ - if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - /* Double the trigger. */ - auto_gc_trigger *= 2; - alloc_entered--; - /* Exit the pseudo atomic. */ - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during - * gc_alloc(..); */ - do_pending_interrupt(); - } - funcall0(SymbolFunction(MAYBE_GC)); - goto retry2; - } - - /* Else call gc_alloc. */ - boxed_region.free_pointer = current_region_free_pointer; - result = gc_alloc(nbytes); - current_region_free_pointer = boxed_region.free_pointer; - current_region_end_addr = boxed_region.end_addr; - - alloc_entered--; - SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0)); - if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) { - /* Handle any interrupts that occurred during - * gc_alloc(..). */ - do_pending_interrupt(); - goto retry2; + gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0) + && ((nbytes & LOWTAG_MASK) == 0)); +#if 0 + if(all_threads) + /* there are a few places in the C code that allocate data in the + * heap before Lisp starts. This is before interrupts are enabled, + * so we don't need to check for pseudo-atomic */ +#ifdef LISP_FEATURE_SB_THREAD + if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) { + register u32 fs; + fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", + th,th->os_thread); + __asm__("movl %fs,%0" : "=r" (fs) : ); + fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", + debug_get_fs(),th->tls_cookie); + lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); } - - return result; +#else + gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); +#endif +#endif + + /* maybe we can do this quickly ... */ + new_free_pointer = region->free_pointer + nbytes; + if (new_free_pointer <= region->end_addr) { + new_obj = (void*)(region->free_pointer); + region->free_pointer = new_free_pointer; + return(new_obj); /* yup */ } -} - -/* - * noise to manipulate the gc trigger stuff - */ - -void -set_auto_gc_trigger(os_vm_size_t dynamic_usage) -{ - auto_gc_trigger += dynamic_usage; -} - -void -clear_auto_gc_trigger(void) -{ - auto_gc_trigger = 0; -} - -/* Find the code object for the given pc, or return NULL on failure. - * - * FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */ -lispobj * -component_ptr_from_pc(lispobj *pc) -{ - lispobj *object = NULL; - - if ( (object = search_read_only_space(pc)) ) - ; - else if ( (object = search_static_space(pc)) ) - ; - else - object = search_dynamic_space(pc); - - if (object) /* if we found something */ - if (TypeOf(*object) == type_CodeHeader) /* if it's a code object */ - return(object); - - return (NULL); + + /* we have to go the long way around, it seems. Check whether + * we should GC in the near future + */ + if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { + struct thread *thread=arch_os_get_current_thread(); + /* Don't flood the system with interrupts if the need to gc is + * already noted. This can happen for example when SUB-GC + * allocates or after a gc triggered in a WITHOUT-GCING. */ + if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) { + /* set things up so that GC happens when we finish the PA + * section. We only do this if there wasn't a pending + * handler already, in case it was a gc. If it wasn't a + * GC, the next allocation will get us back to this point + * anyway, so no harm done + */ + struct interrupt_data *data=th->interrupt_data; + sigset_t new_mask,old_mask; + sigemptyset(&new_mask); + sigaddset_blockable(&new_mask); + thread_sigmask(SIG_BLOCK,&new_mask,&old_mask); + + if((!data->pending_handler) && + maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0)) { + /* Leave the signals blocked just as if it was + * deferred the normal way and set the + * pending_mask. */ + sigcopyset(&(data->pending_mask),&old_mask); + SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread); + } else { + thread_sigmask(SIG_SETMASK,&old_mask,0); + } + } + } + new_obj = gc_alloc_with_region(nbytes,0,region,0); + return (new_obj); } /* @@ -6149,12 +4186,13 @@ void unhandled_sigmemoryfault(void); * Return true if this signal is a normal generational GC thing that * we were able to handle, or false if it was abnormal and control * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */ + int gencgc_handle_wp_violation(void* fault_addr) { - int page_index = find_page_index(fault_addr); + long page_index = find_page_index(fault_addr); -#if defined QSHOW_SIGNALS +#ifdef QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", fault_addr, page_index)); #endif @@ -6170,23 +4208,25 @@ gencgc_handle_wp_violation(void* fault_addr) return 0; } else { - - /* The only acceptable reason for an signal like this from the - * heap is that the generational GC write-protected the page. */ - if (page_table[page_index].write_protected != 1) { - lose("access failure in heap page not marked as write-protected"); + if (page_table[page_index].write_protected) { + /* Unprotect the page. */ + os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); + page_table[page_index].write_protected_cleared = 1; + page_table[page_index].write_protected = 0; + } else { + /* The only acceptable reason for this signal on a heap + * access is that GENCGC write-protected the page. + * However, if two CPUs hit a wp page near-simultaneously, + * we had better not have the second one lose here if it + * does this test after the first one has already set wp=0 + */ + if(page_table[page_index].write_protected_cleared != 1) + lose("fault in heap page not marked as write-protected"); } - - /* Unprotect the page. */ - os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL); - page_table[page_index].write_protected = 0; - page_table[page_index].write_protected_cleared = 1; - /* Don't worry, we can handle it. */ return 1; } } - /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that * it's not just a case of the program hitting the write barrier, and * are about to let Lisp deal with it. It's basically just a @@ -6194,3 +4234,23 @@ gencgc_handle_wp_violation(void* fault_addr) void unhandled_sigmemoryfault() {} + +void gc_alloc_update_all_page_tables(void) +{ + /* Flush the alloc regions updating the tables. */ + struct thread *th; + for_each_thread(th) + gc_alloc_update_page_tables(0, &th->alloc_region); + gc_alloc_update_page_tables(1, &unboxed_region); + gc_alloc_update_page_tables(0, &boxed_region); +} +void +gc_set_region_empty(struct alloc_region *region) +{ + region->first_page = 0; + region->last_page = -1; + region->start_addr = page_address(0); + region->free_pointer = page_address(0); + region->end_addr = page_address(0); +} +