X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=f7f45a7ffa9d71a72ef3fa0d7e7c5dc3c1cd3f08;hb=78fa16bf55be44cc16845be84d98023e83fb14bc;hp=e350702396169f11b821701284a87cbad3488f57;hpb=af1ca7ec5eb13312e1ad0bfcca8a02329339f8e6;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index e350702..f7f45a7 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -28,8 +28,8 @@ #include #include #include -#include "runtime.h" #include "sbcl.h" +#include "runtime.h" #include "os.h" #include "interr.h" #include "globals.h" @@ -37,6 +37,7 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" +#include "fixnump.h" #include "gc.h" #include "gc-internal.h" #include "thread.h" @@ -48,9 +49,7 @@ void do_pending_interrupt(void); /* forward declarations */ -int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed); -void gc_set_region_empty(struct alloc_region *region); -void gc_alloc_update_all_page_tables(void); +long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed); static void gencgc_pickup_dynamic(void); boolean interrupt_maybe_gc_int(int, siginfo_t *, void *); @@ -69,7 +68,7 @@ boolean interrupt_maybe_gc_int(int, siginfo_t *, void *); boolean enable_page_protection = 1; /* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD * so don't unmap there. * @@ -94,7 +93,11 @@ unsigned large_object_size = 4 * PAGE_BYTES; /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -unsigned gencgc_verbose = (QSHOW ? 1 : 0); +#ifdef QSHOW +unsigned gencgc_verbose = 1; +#else +unsigned gencgc_verbose = 0; +#endif /* FIXME: At some point enable the various error-checking things below * and see what they say. */ @@ -138,8 +141,8 @@ unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts * scavenging. */ -int from_space; -int new_space; +long from_space; +long new_space; /* An array of page structures is statically allocated. @@ -151,23 +154,28 @@ struct page page_table[NUM_PAGES]; * is needed. */ static void *heap_base = NULL; +#if N_WORD_BITS == 32 + #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG +#elif N_WORD_BITS == 64 + #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG +#endif /* Calculate the start address for the given page number. */ inline void * -page_address(int page_num) +page_address(long page_num) { return (heap_base + (page_num * PAGE_BYTES)); } /* Find the page index within the page_table for the given * address. Return -1 on failure. */ -inline int +inline long find_page_index(void *addr) { - int index = addr-heap_base; + long index = addr-heap_base; if (index >= 0) { - index = ((unsigned int)index)/PAGE_BYTES; + index = ((unsigned long)index)/PAGE_BYTES; if (index < NUM_PAGES) return (index); } @@ -179,28 +187,28 @@ find_page_index(void *addr) struct generation { /* the first page that gc_alloc() checks on its next call */ - int alloc_start_page; + long alloc_start_page; /* the first page that gc_alloc_unboxed() checks on its next call */ - int alloc_unboxed_start_page; + long alloc_unboxed_start_page; /* the first page that gc_alloc_large (boxed) considers on its next * call. (Although it always allocates after the boxed_region.) */ - int alloc_large_start_page; + long alloc_large_start_page; /* the first page that gc_alloc_large (unboxed) considers on its * next call. (Although it always allocates after the * current_unboxed_region.) */ - int alloc_large_unboxed_start_page; + long alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ - int bytes_allocated; + long bytes_allocated; /* the number of bytes at which to trigger a GC */ - int gc_trigger; + long gc_trigger; /* to calculate a new level for gc_trigger */ - int bytes_consed_between_gc; + long bytes_consed_between_gc; /* the number of GCs since the last raise */ int num_gc; @@ -214,7 +222,7 @@ struct generation { * objects are added from a GC of a younger generation. Dividing by * the bytes_allocated will give the average age of the memory in * this generation since its last GC. */ - int cum_sum_bytes_allocated; + long cum_sum_bytes_allocated; /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been @@ -249,7 +257,7 @@ unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1; * ALLOCATION_POINTER which is used by the room function to limit its * search of the heap. XX Gencgc obviously needs to be better * integrated with the Lisp code. */ -static int last_free_page; +static long last_free_page; /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the @@ -267,14 +275,14 @@ static lispobj free_pages_lock=0; /* Count the number of pages which are write-protected within the * given generation. */ -static int +static long count_write_protect_generation_pages(int generation) { - int i; - int count = 0; + long i; + long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -282,11 +290,11 @@ count_write_protect_generation_pages(int generation) } /* Count the number of pages within the given generation. */ -static int +static long count_generation_pages(int generation) { - int i; - int count = 0; + long i; + long count = 0; for (i = 0; i < last_free_page; i++) if ((page_table[i].allocated != 0) @@ -295,12 +303,12 @@ count_generation_pages(int generation) return count; } -/* Count the number of dont_move pages. */ -static int +#ifdef QSHOW +static long count_dont_move_pages(void) { - int i; - int count = 0; + long i; + long count = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) { ++count; @@ -308,14 +316,15 @@ count_dont_move_pages(void) } return count; } +#endif /* QSHOW */ /* Work through the pages and add up the number of bytes used for the * given generation. */ -static int +static long count_generation_bytes_allocated (int gen) { - int i; - int result = 0; + long i; + long result = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != 0) && (page_table[i].gen == gen)) result += page_table[i].bytes_used; @@ -372,7 +381,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated & BOXED_PAGE) { + if (page_table[j].allocated & BOXED_PAGE_FLAG) { if (page_table[j].large_object) large_boxed_cnt++; else @@ -381,7 +390,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated & UNBOXED_PAGE) { + if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -491,12 +500,12 @@ static int gc_alloc_generation; * are allocated, although they will initially be empty. */ static void -gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) { - int first_page; - int last_page; - int bytes_found; - int i; + long first_page; + long last_page; + long bytes_found; + long i; /* FSHOW((stderr, @@ -508,7 +517,7 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - get_spinlock(&free_pages_lock,(int) alloc_region); + get_spinlock(&free_pages_lock,(long) alloc_region); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -533,35 +542,35 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; page_table[first_page].first_object_offset = 0; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); - page_table[first_page].allocated |= OPEN_REGION_PAGE; + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { if (unboxed) - page_table[i].allocated = UNBOXED_PAGE; + page_table[i].allocated = UNBOXED_PAGE_FLAG; else - page_table[i].allocated = BOXED_PAGE; + page_table[i].allocated = BOXED_PAGE_FLAG; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ page_table[i].first_object_offset = alloc_region->start_addr - page_address(i); - page_table[i].allocated |= OPEN_REGION_PAGE ; + page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { @@ -574,9 +583,9 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) /* we can do this after releasing free_pages_lock */ if (gencgc_zero_check) { - int *p; - for (p = (int *)alloc_region->start_addr; - p < (int *)alloc_region->end_addr; p++) { + long *p; + for (p = (long *)alloc_region->start_addr; + p < (long *)alloc_region->end_addr; p++) { if (*p != 0) { /* KLUDGE: It would be nice to use %lx and explicit casts * (long) in code like this, so that it is less likely to @@ -606,22 +615,22 @@ gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region) * scavenge of a generation. */ #define NUM_NEW_AREAS 512 static int record_new_objects = 0; -static int new_areas_ignore_page; +static long new_areas_ignore_page; struct new_area { - int page; - int offset; - int size; + long page; + long offset; + long size; }; static struct new_area (*new_areas)[]; -static int new_areas_index; -int max_new_areas; +static long new_areas_index; +long max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(int first_page, int offset, int size) +add_new_area(long first_page, long offset, long size) { unsigned new_area_start,c; - int i; + long i; /* Ignore if full. */ if (new_areas_index >= NUM_NEW_AREAS) @@ -690,13 +699,13 @@ add_new_area(int first_page, int offset, int size) void gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) { - int more; - int first_page; - int next_page; - int bytes_used; - int orig_first_page_bytes_used; - int region_size; - int byte_cnt; + long more; + long first_page; + long next_page; + long bytes_used; + long orig_first_page_bytes_used; + long region_size; + long byte_cnt; first_page = alloc_region->first_page; @@ -707,7 +716,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - get_spinlock(&free_pages_lock,(int) alloc_region); + get_spinlock(&free_pages_lock,(long) alloc_region); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -722,12 +731,12 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) * first_object_offset. */ if (page_table[first_page].bytes_used == 0) gc_assert(page_table[first_page].first_object_offset == 0); - page_table[first_page].allocated &= ~(OPEN_REGION_PAGE); + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -748,11 +757,11 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) * first_object_offset pointer to the start of the region, and set * the bytes_used. */ while (more) { - page_table[next_page].allocated &= ~(OPEN_REGION_PAGE); + page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) - gc_assert(page_table[next_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); else - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -800,15 +809,15 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) } else { /* There are no bytes allocated. Unallocate the first_page if * there are 0 bytes_used. */ - page_table[first_page].allocated &= ~(OPEN_REGION_PAGE); + page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (page_table[first_page].bytes_used == 0) - page_table[first_page].allocated = FREE_PAGE; + page_table[first_page].allocated = FREE_PAGE_FLAG; } /* Unallocate any unused pages. */ while (next_page <= alloc_region->last_page) { gc_assert(page_table[next_page].bytes_used == 0); - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } release_spinlock(&free_pages_lock); @@ -816,21 +825,21 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) gc_set_region_empty(alloc_region); } -static inline void *gc_quick_alloc(int nbytes); +static inline void *gc_quick_alloc(long nbytes); /* Allocate a possibly large object. */ void * -gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) { - int first_page; - int last_page; - int orig_first_page_bytes_used; - int byte_cnt; - int more; - int bytes_used; - int next_page; + long first_page; + long last_page; + long orig_first_page_bytes_used; + long byte_cnt; + long more; + long bytes_used; + long next_page; - get_spinlock(&free_pages_lock,(int) alloc_region); + get_spinlock(&free_pages_lock,(long) alloc_region); if (unboxed) { first_page = @@ -858,18 +867,18 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) * first_object_offset. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE; + page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[first_page].allocated = BOXED_PAGE; + page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].first_object_offset = 0; page_table[first_page].large_object = 1; } if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE); + gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); else - gc_assert(page_table[first_page].allocated == BOXED_PAGE); + gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 1); @@ -891,12 +900,12 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) * first_object_offset pointer to the start of the region, and * set the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE); + gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; else - page_table[next_page].allocated = BOXED_PAGE; + page_table[next_page].allocated = BOXED_PAGE_FLAG; page_table[next_page].gen = gc_alloc_generation; page_table[next_page].large_object = 1; @@ -936,16 +945,16 @@ gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region) return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } -int -gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed) +long +gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) { - int first_page; - int last_page; - int region_size; - int restart_page=*restart_page_ptr; - int bytes_found; - int num_pages; - int large_p=(nbytes>=large_object_size); + long first_page; + long last_page; + long region_size; + long restart_page=*restart_page_ptr; + long bytes_found; + long num_pages; + long large_p=(nbytes>=large_object_size); gc_assert(free_pages_lock); /* Search for a contiguous free space of at least nbytes. If it's @@ -956,26 +965,16 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed) first_page = restart_page; if (large_p) while ((first_page < NUM_PAGES) - && (page_table[first_page].allocated != FREE_PAGE)) + && (page_table[first_page].allocated != FREE_PAGE_FLAG)) first_page++; else while (first_page < NUM_PAGES) { - if(page_table[first_page].allocated == FREE_PAGE) + if(page_table[first_page].allocated == FREE_PAGE_FLAG) break; if((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE : BOXED_PAGE)) && + (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && (page_table[first_page].large_object == 0) && (page_table[first_page].gen == gc_alloc_generation) && - /* FIXME: Why? Please tell me why? Removal of - this test, which restricts opening an allocation - region on a partially-used page to the nursery - generation, causes more or less instant heap - corruption on forms such as - (loop repeat 2 - do (compile nil '(lambda (x) x)) - do (gc :full t)) - -- CSR, 2003-01-14 */ - (gc_alloc_generation == 0) && (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && (page_table[first_page].write_protected == 0) && (page_table[first_page].dont_move == 0)) { @@ -1000,7 +999,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed) while (((bytes_found < nbytes) || (!large_p && (num_pages < 2))) && (last_page < (NUM_PAGES-1)) - && (page_table[last_page+1].allocated == FREE_PAGE)) { + && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { last_page++; num_pages++; bytes_found += PAGE_BYTES; @@ -1030,7 +1029,7 @@ gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed) * functions will eventually call this */ void * -gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region, +gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, int quick_p) { void *new_free_pointer; @@ -1041,6 +1040,9 @@ gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region, /* Check whether there is room in the current alloc region. */ new_free_pointer = my_region->free_pointer + nbytes; + /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes, + my_region->free_pointer, new_free_pointer); */ + if (new_free_pointer <= my_region->end_addr) { /* If so then allocate from the current alloc region. */ void *new_obj = my_region->free_pointer; @@ -1072,7 +1074,7 @@ gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region, * region */ void * -gc_general_alloc(int nbytes,int unboxed_p,int quick_p) +gc_general_alloc(long nbytes,int unboxed_p,int quick_p) { struct alloc_region *my_region = unboxed_p ? &unboxed_region : &boxed_region; @@ -1080,31 +1082,31 @@ gc_general_alloc(int nbytes,int unboxed_p,int quick_p) } static inline void * -gc_quick_alloc(int nbytes) +gc_quick_alloc(long nbytes) { return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); } static inline void * -gc_quick_alloc_large(int nbytes) +gc_quick_alloc_large(long nbytes) { return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); } static inline void * -gc_alloc_unboxed(int nbytes) +gc_alloc_unboxed(long nbytes) { return gc_general_alloc(nbytes,ALLOC_UNBOXED,0); } static inline void * -gc_quick_alloc_unboxed(int nbytes) +gc_quick_alloc_unboxed(long nbytes) { return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } static inline void * -gc_quick_alloc_large_unboxed(int nbytes) +gc_quick_alloc_large_unboxed(long nbytes) { return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } @@ -1113,9 +1115,9 @@ gc_quick_alloc_large_unboxed(int nbytes) * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b */ -extern int (*scavtab[256])(lispobj *where, lispobj object); +extern long (*scavtab[256])(lispobj *where, lispobj object); extern lispobj (*transother[256])(lispobj object); -extern int (*sizetab[256])(lispobj *where); +extern long (*sizetab[256])(lispobj *where); /* Copy a large boxed object. If the object is in a large object * region then it is simply promoted, else it is copied. If it's large @@ -1124,11 +1126,11 @@ extern int (*sizetab[256])(lispobj *where); * Vectors may have shrunk. If the object is not copied the space * needs to be reclaimed, and the page_tables corrected. */ lispobj -copy_large_object(lispobj object, int nwords) +copy_large_object(lispobj object, long nwords) { int tag; lispobj *new; - int first_page; + long first_page; gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); @@ -1143,10 +1145,10 @@ copy_large_object(lispobj object, int nwords) /* Promote the object. */ - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; /* Note: Any page write-protection must be removed, else a * later scavenge_newspace may incorrectly not scavenge these @@ -1157,10 +1159,10 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; + remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== -PAGE_BYTES*(next_page-first_page)); @@ -1185,7 +1187,7 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated == BOXED_PAGE); + gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1197,7 +1199,7 @@ copy_large_object(lispobj object, int nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE) && + (page_table[next_page].allocated == BOXED_PAGE_FLAG) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == -(next_page - first_page)*PAGE_BYTES)) { @@ -1208,18 +1210,19 @@ copy_large_object(lispobj object, int nwords) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; } - generations[from_space].bytes_allocated -= 4*nwords + bytes_freed; - generations[new_space].bytes_allocated += 4*nwords; + generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + + bytes_freed; + generations[new_space].bytes_allocated += N_WORD_BYTES*nwords; bytes_allocated -= bytes_freed; /* Add the region to the new_areas if requested. */ - add_new_area(first_page,0,nwords*4); + add_new_area(first_page,0,nwords*N_WORD_BYTES); return(object); } else { @@ -1227,9 +1230,9 @@ copy_large_object(lispobj object, int nwords) tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_large(nwords*4); + new = gc_quick_alloc_large(nwords*N_WORD_BYTES); - memcpy(new,native_pointer(object),nwords*4); + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1238,9 +1241,9 @@ copy_large_object(lispobj object, int nwords) /* to copy unboxed objects */ lispobj -copy_unboxed_object(lispobj object, int nwords) +copy_unboxed_object(lispobj object, long nwords) { - int tag; + long tag; lispobj *new; gc_assert(is_lisp_pointer(object)); @@ -1251,9 +1254,9 @@ copy_unboxed_object(lispobj object, int nwords) tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_unboxed(nwords*4); + new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES); - memcpy(new,native_pointer(object),nwords*4); + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1271,19 +1274,18 @@ copy_unboxed_object(lispobj object, int nwords) * KLUDGE: There's a lot of cut-and-paste duplication between this * function and copy_large_object(..). -- WHN 20000619 */ lispobj -copy_large_unboxed_object(lispobj object, int nwords) +copy_large_unboxed_object(lispobj object, long nwords) { int tag; lispobj *new; - lispobj *source, *dest; - int first_page; + long first_page; gc_assert(is_lisp_pointer(object)); gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); if ((nwords > 1024*1024) && gencgc_verbose) - FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*4)); + FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1293,26 +1295,26 @@ copy_large_unboxed_object(lispobj object, int nwords) /* Promote the object. Note: Unboxed objects may have been * allocated to a BOXED region so it may be necessary to * change the region to UNBOXED. */ - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; + remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)); + gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset== -PAGE_BYTES*(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; remaining_bytes -= PAGE_BYTES; next_page++; } @@ -1324,7 +1326,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - page_table[next_page].allocated = UNBOXED_PAGE; + page_table[next_page].allocated = UNBOXED_PAGE_FLAG; /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1336,8 +1338,8 @@ copy_large_unboxed_object(lispobj object, int nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == -(next_page - first_page)*PAGE_BYTES)) { @@ -1348,7 +1350,7 @@ copy_large_unboxed_object(lispobj object, int nwords) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -1359,8 +1361,8 @@ copy_large_unboxed_object(lispobj object, int nwords) "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); - generations[from_space].bytes_allocated -= 4*nwords + bytes_freed; - generations[new_space].bytes_allocated += 4*nwords; + generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; + generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; bytes_allocated -= bytes_freed; return(object); @@ -1370,19 +1372,10 @@ copy_large_unboxed_object(lispobj object, int nwords) tag = lowtag_of(object); /* Allocate space. */ - new = gc_quick_alloc_large_unboxed(nwords*4); - - dest = new; - source = (lispobj *) native_pointer(object); - - /* Copy the object. */ - while (nwords > 0) { - dest[0] = source[0]; - dest[1] = source[1]; - dest += 2; - source += 2; - nwords -= 2; - } + new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES); + + /* Copy the object. */ + memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); /* Return Lisp pointer of new object. */ return ((lispobj) new) | tag; @@ -1413,7 +1406,7 @@ static lispobj trans_boxed(lispobj object); void sniff_code_object(struct code *code, unsigned displacement) { - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; void *p; void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; @@ -1426,10 +1419,10 @@ sniff_code_object(struct code *code, unsigned displacement) nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; - constants_start_addr = (void *)code + 5*4; - constants_end_addr = (void *)code + nheader_words*4; - code_start_addr = (void *)code + nheader_words*4; - code_end_addr = (void *)code + nwords*4; + constants_start_addr = (void *)code + 5*N_WORD_BYTES; + constants_end_addr = (void *)code + nheader_words*N_WORD_BYTES; + code_start_addr = (void *)code + nheader_words*N_WORD_BYTES; + code_end_addr = (void *)code + nwords*N_WORD_BYTES; /* Work through the unboxed code. */ for (p = code_start_addr; p < code_end_addr; p++) { @@ -1438,7 +1431,7 @@ sniff_code_object(struct code *code, unsigned displacement) unsigned d2 = *((unsigned char *)p - 2); unsigned d3 = *((unsigned char *)p - 3); unsigned d4 = *((unsigned char *)p - 4); -#if QSHOW +#ifdef QSHOW unsigned d5 = *((unsigned char *)p - 5); unsigned d6 = *((unsigned char *)p - 6); #endif @@ -1583,7 +1576,7 @@ sniff_code_object(struct code *code, unsigned displacement) void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) { - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; lispobj fixups = NIL; @@ -1596,10 +1589,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* FSHOW((stderr, "/compiled code object at %x: header words = %d, code words = %d\n", new_code, nheader_words, ncode_words)); */ - constants_start_addr = (void *)new_code + 5*4; - constants_end_addr = (void *)new_code + nheader_words*4; - code_start_addr = (void *)new_code + nheader_words*4; - code_end_addr = (void *)new_code + nwords*4; + constants_start_addr = (void *)new_code + 5*N_WORD_BYTES; + constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES; + code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES; + code_end_addr = (void *)new_code + nwords*N_WORD_BYTES; /* FSHOW((stderr, "/const start = %x, end = %x\n", @@ -1640,12 +1633,11 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /*SHOW("got fixups");*/ - if (widetag_of(fixups_vector->header) == - SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG) { + if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) { /* Got the fixups for the code block. Now work through the vector, and apply a fixup at each address. */ - int length = fixnum_value(fixups_vector->length); - int i; + long length = fixnum_value(fixups_vector->length); + long i; for (i = 0; i < length; i++) { unsigned offset = fixups_vector->data[i]; /* Now check the current value of offset. */ @@ -1655,7 +1647,7 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ if ((old_value >= (unsigned)old_code) - && (old_value < ((unsigned)old_code + nwords*4))) + && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES))) /* So add the dispacement. */ *(unsigned *)((unsigned)code_start_addr + offset) = old_value + displacement; @@ -1666,6 +1658,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) *(unsigned *)((unsigned)code_start_addr + offset) = old_value - displacement; } + } else { + fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header)); } /* Check for possible errors. */ @@ -1719,14 +1713,14 @@ int gencgc_hash = 1; static int scav_vector(lispobj *where, lispobj object) { - unsigned int kv_length; + unsigned long kv_length; lispobj *kv_vector; - unsigned int length = 0; /* (0 = dummy to stop GCC warning) */ + unsigned long length = 0; /* (0 = dummy to stop GCC warning) */ lispobj *hash_table; lispobj empty_symbol; - unsigned int *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned int *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ - unsigned int *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */ + unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */ lispobj weak_p_obj; unsigned next_vector_length = 0; @@ -1791,10 +1785,10 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(index_vector_obj) && (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == - SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) { - index_vector = ((unsigned int *)native_pointer(index_vector_obj)) + 2; + SIMPLE_ARRAY_WORD_WIDETAG)) { + index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2; /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ - length = fixnum_value(((unsigned int *)native_pointer(index_vector_obj))[1]); + length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); /*FSHOW((stderr, "/length = %d\n", length));*/ } else { lose("invalid index_vector %x", index_vector_obj); @@ -1807,10 +1801,10 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(next_vector_obj) && (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == - SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) { - next_vector = ((unsigned int *)native_pointer(next_vector_obj)) + 2; + SIMPLE_ARRAY_WORD_WIDETAG)) { + next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2; /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ - next_vector_length = fixnum_value(((unsigned int *)native_pointer(next_vector_obj))[1]); + next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ } else { lose("invalid next_vector %x", next_vector_obj); @@ -1826,11 +1820,11 @@ scav_vector(lispobj *where, lispobj object) lispobj hash_vector_obj = hash_table[15]; if (is_lisp_pointer(hash_vector_obj) && - (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) - == SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) { - hash_vector = ((unsigned int *)native_pointer(hash_vector_obj)) + 2; + (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == + SIMPLE_ARRAY_WORD_WIDETAG)){ + hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2; /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ - gc_assert(fixnum_value(((unsigned int *)native_pointer(hash_vector_obj))[1]) + gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) == next_vector_length); } else { hash_vector = NULL; @@ -1847,10 +1841,15 @@ scav_vector(lispobj *where, lispobj object) /* Work through the KV vector. */ { - int i; + long i; for (i = 1; i < next_vector_length; i++) { lispobj old_key = kv_vector[2*i]; - unsigned int old_index = (old_key & 0x1fffffff)%length; + +#if N_WORD_BITS == 32 + unsigned long old_index = (old_key & 0x1fffffff)%length; +#elif N_WORD_BITS == 64 + unsigned long old_index = (old_key & 0x1fffffffffffffff)%length; +#endif /* Scavenge the key and value. */ scavenge(&kv_vector[2*i],2); @@ -1858,19 +1857,23 @@ scav_vector(lispobj *where, lispobj object) /* Check whether the key has moved and is EQ based. */ { lispobj new_key = kv_vector[2*i]; - unsigned int new_index = (new_key & 0x1fffffff)%length; +#if N_WORD_BITS == 32 + unsigned long new_index = (new_key & 0x1fffffff)%length; +#elif N_WORD_BITS == 64 + unsigned long new_index = (new_key & 0x1fffffffffffffff)%length; +#endif if ((old_index != new_index) && ((!hash_vector) || (hash_vector[i] == 0x80000000)) && ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) { - /*FSHOW((stderr, - "* EQ key %d moved from %x to %x; index %d to %d\n", - i, old_key, new_key, old_index, new_index));*/ + /*FSHOW((stderr, + "* EQ key %d moved from %x to %x; index %d to %d\n", + i, old_key, new_key, old_index, new_index));*/ if (index_vector[old_index] != 0) { - /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ + /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/ /* Unlink the key from the old_index chain. */ if (index_vector[old_index] == i) { @@ -1887,7 +1890,7 @@ scav_vector(lispobj *where, lispobj object) /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/ while (next != 0) { - /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ + /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/ if (next == i) { /* Unlink it. */ next_vector[prior] = next_vector[next]; @@ -1925,7 +1928,7 @@ scav_vector(lispobj *where, lispobj object) #define WEAK_POINTER_NWORDS \ CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2) -static int +static long scav_weak_pointer(lispobj *where, lispobj object) { struct weak_pointer *wp = weak_pointers; @@ -1960,74 +1963,47 @@ scav_weak_pointer(lispobj *where, lispobj object) } -/* Scan an area looking for an object which encloses the given pointer. - * Return the object start on success or NULL on failure. */ -static lispobj * -search_space(lispobj *start, size_t words, lispobj *pointer) -{ - while (words > 0) { - size_t count = 1; - lispobj thing = *start; - - /* If thing is an immediate then this is a cons. */ - if (is_lisp_pointer(thing) - || ((thing & 3) == 0) /* fixnum */ - || (widetag_of(thing) == BASE_CHAR_WIDETAG) - || (widetag_of(thing) == UNBOUND_MARKER_WIDETAG)) - count = 2; - else - count = (sizetab[widetag_of(thing)])(start); - - /* Check whether the pointer is within this object. */ - if ((pointer >= start) && (pointer < (start+count))) { - /* found it! */ - /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/ - return(start); - } - - /* Round up the count. */ - count = CEILING(count,2); - - start += count; - words -= count; - } - return (NULL); -} - -lispobj* -search_read_only_space(lispobj *pointer) +lispobj * +search_read_only_space(void *pointer) { - lispobj* start = (lispobj*)READ_ONLY_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0); - if ((pointer < start) || (pointer >= end)) + lispobj *start = (lispobj *) READ_ONLY_SPACE_START; + lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0); + if ((pointer < (void *)start) || (pointer >= (void *)end)) return NULL; - return (search_space(start, (pointer+2)-start, pointer)); + return (search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *) pointer)); } lispobj * -search_static_space(lispobj *pointer) +search_static_space(void *pointer) { - lispobj* start = (lispobj*)STATIC_SPACE_START; - lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0); - if ((pointer < start) || (pointer >= end)) + lispobj *start = (lispobj *)STATIC_SPACE_START; + lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0); + if ((pointer < (void *)start) || (pointer >= (void *)end)) return NULL; - return (search_space(start, (pointer+2)-start, pointer)); + return (search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *) pointer)); } /* a faster version for searching the dynamic space. This will work even * if the object is in a current allocation region. */ lispobj * -search_dynamic_space(lispobj *pointer) +search_dynamic_space(void *pointer) { - int page_index = find_page_index(pointer); + long page_index = find_page_index(pointer); lispobj *start; /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE)) + if ((page_index == -1) || + (page_table[page_index].allocated == FREE_PAGE_FLAG)) return NULL; start = (lispobj *)((void *)page_address(page_index) + page_table[page_index].first_object_offset); - return (search_space(start, (pointer+2)-start, pointer)); + return (search_space(start, + (((lispobj *)pointer)+2)-start, + (lispobj *)pointer)); } /* Is there any possibility that pointer is a valid Lisp object @@ -2099,12 +2075,12 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } /* Is it plausible cons? */ if ((is_lisp_pointer(start_addr[0]) - || ((start_addr[0] & 3) == 0) /* fixnum */ - || (widetag_of(start_addr[0]) == BASE_CHAR_WIDETAG) + || (fixnump(start_addr[0])) + || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG) || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG)) && (is_lisp_pointer(start_addr[1]) - || ((start_addr[1] & 3) == 0) /* fixnum */ - || (widetag_of(start_addr[1]) == BASE_CHAR_WIDETAG) + || (fixnump(start_addr[1])) + || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG) || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) break; else { @@ -2150,7 +2126,7 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } switch (widetag_of(start_addr[0])) { case UNBOUND_MARKER_WIDETAG: - case BASE_CHAR_WIDETAG: + case CHARACTER_WIDETAG: if (gencgc_verbose) FSHOW((stderr, "*Wo3: %x %x %x\n", @@ -2188,6 +2164,9 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #endif case SIMPLE_ARRAY_WIDETAG: case COMPLEX_BASE_STRING_WIDETAG: +#ifdef COMPLEX_CHARACTER_STRING_WIDETAG + case COMPLEX_CHARACTER_STRING_WIDETAG: +#endif case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: @@ -2203,6 +2182,9 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) case LONG_FLOAT_WIDETAG: #endif case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: @@ -2211,9 +2193,20 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: +#endif case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: +#endif #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: #endif @@ -2226,6 +2219,12 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: +#endif case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG @@ -2274,23 +2273,26 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) static void maybe_adjust_large_object(lispobj *where) { - int first_page; - int nwords; + long first_page; + long nwords; - int remaining_bytes; - int next_page; - int bytes_freed; - int old_bytes_used; + long remaining_bytes; + long next_page; + long bytes_freed; + long old_bytes_used; int boxed; /* Check whether it's a vector or bignum object. */ switch (widetag_of(where[0])) { case SIMPLE_VECTOR_WIDETAG: - boxed = BOXED_PAGE; + boxed = BOXED_PAGE_FLAG; break; case BIGNUM_WIDETAG: case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: @@ -2299,9 +2301,20 @@ maybe_adjust_large_object(lispobj *where) case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: +#endif case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: +#endif #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: #endif @@ -2314,6 +2327,12 @@ maybe_adjust_large_object(lispobj *where) #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: +#endif case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG @@ -2328,7 +2347,7 @@ maybe_adjust_large_object(lispobj *where) #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG: #endif - boxed = UNBOXED_PAGE; + boxed = UNBOXED_PAGE_FLAG; break; default: return; @@ -2349,11 +2368,11 @@ maybe_adjust_large_object(lispobj *where) gc_assert(page_table[first_page].first_object_offset == 0); next_page = first_page; - remaining_bytes = nwords*4; + remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE) - || (page_table[next_page].allocated == UNBOXED_PAGE)); + gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) + || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].first_object_offset == -PAGE_BYTES*(next_page-first_page)); @@ -2388,8 +2407,8 @@ maybe_adjust_large_object(lispobj *where) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE) - || (page_table[next_page].allocated == BOXED_PAGE)) && + ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) + || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && (page_table[next_page].first_object_offset == -(next_page - first_page)*PAGE_BYTES)) { @@ -2400,7 +2419,7 @@ maybe_adjust_large_object(lispobj *where) gc_assert(page_table[next_page].write_protected == 0); old_bytes_used = page_table[next_page].bytes_used; - page_table[next_page].allocated = FREE_PAGE; + page_table[next_page].allocated = FREE_PAGE_FLAG; page_table[next_page].bytes_used = 0; bytes_freed += old_bytes_used; next_page++; @@ -2433,20 +2452,20 @@ maybe_adjust_large_object(lispobj *where) static void preserve_pointer(void *addr) { - int addr_page_index = find_page_index(addr); - int first_page; - int i; + long addr_page_index = find_page_index(addr); + long first_page; + long i; unsigned region_allocation; /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE) + || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ || (page_table[addr_page_index].dont_move != 0)) return; - gc_assert(!(page_table[addr_page_index].allocated & OPEN_REGION_PAGE)); + gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG)); /* (Now that we know that addr_page_index is in range, it's * safe to index into page_table[] with it.) */ region_allocation = page_table[addr_page_index].allocated; @@ -2496,7 +2515,7 @@ preserve_pointer(void *addr) * free area in which case it's ignored here. Note it gets * through the valid pointer test above because the tail looks * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE) + if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ || (((unsigned)addr & (PAGE_BYTES - 1)) @@ -2535,7 +2554,7 @@ preserve_pointer(void *addr) /* Check whether this is the last page in this contiguous block.. */ if ((page_table[i].bytes_used < PAGE_BYTES) /* ..or it is PAGE_BYTES and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE) + || (page_table[i+1].allocated == FREE_PAGE_FLAG) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ || (page_table[i+1].first_object_offset == 0)) @@ -2560,22 +2579,22 @@ preserve_pointer(void *addr) * * We return 1 if the page was write-protected, else 0. */ static int -update_page_write_prot(int page) +update_page_write_prot(long page) { int gen = page_table[page].gen; - int j; + long j; int wp_it = 1; void **page_addr = (void **)page_address(page); - int num_words = page_table[page].bytes_used / 4; + long num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE); + gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used != 0); /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected || page_table[page].dont_move - || (page_table[page].allocated & UNBOXED_PAGE)) + || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) return (0); /* Scan the page for pointers to younger generations or the @@ -2583,12 +2602,12 @@ update_page_write_prot(int page) for (j = 0; j < num_words; j++) { void *ptr = *(page_addr+j); - int index = find_page_index(ptr); + long index = find_page_index(ptr); /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE) + ((page_table[index].allocated != FREE_PAGE_FLAG) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == NUM_GENERATIONS))) @@ -2652,7 +2671,7 @@ update_page_write_prot(int page) static void scavenge_generation(int generation) { - int i; + long i; int num_wp = 0; #define SC_GEN_CK 0 @@ -2663,10 +2682,10 @@ scavenge_generation(int generation) #endif for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated & BOXED_PAGE) + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - int last_page,j; + long last_page,j; int write_protected=1; /* This should be the start of a region */ @@ -2678,15 +2697,16 @@ scavenge_generation(int generation) write_protected && page_table[last_page].write_protected; if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE)) + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) break; } if (!write_protected) { - scavenge(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/4); + scavenge(page_address(i), + (page_table[last_page].bytes_used + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); /* Now scan the pages and write protect those that * don't have pointers to younger generations. */ @@ -2709,7 +2729,7 @@ scavenge_generation(int generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation ! =FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { @@ -2756,21 +2776,21 @@ static struct new_area new_areas_2[NUM_NEW_AREAS]; static void scavenge_newspace_generation_one_scan(int generation) { - int i; + long i; FSHOW((stderr, "/starting one full scan of newspace generation %d\n", generation)); for (i = 0; i < last_free_page; i++) { - /* note that this skips over open regions when it encounters them */ - if ((page_table[i].allocated == BOXED_PAGE) + /* Note that this skips over open regions when it encounters them. */ + if ((page_table[i].allocated & BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) /* (This may be redundant as write_protected is now * cleared before promotion.) */ || (page_table[i].dont_move == 1))) { - int last_page; + long last_page; int all_wp=1; /* The scavenge will start at the first_object_offset of page i. @@ -2791,7 +2811,7 @@ scavenge_newspace_generation_one_scan(int generation) * contiguous block */ if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE)) + || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].first_object_offset == 0)) @@ -2800,11 +2820,11 @@ scavenge_newspace_generation_one_scan(int generation) /* Do a limited check for write-protected pages. */ if (!all_wp) { - int size; + long size; size = (page_table[last_page].bytes_used + (last_page-i)*PAGE_BYTES - - page_table[i].first_object_offset)/4; + - page_table[i].first_object_offset)/N_WORD_BYTES; new_areas_ignore_page = last_page; scavenge(page_address(i) + @@ -2824,15 +2844,15 @@ scavenge_newspace_generation_one_scan(int generation) static void scavenge_newspace_generation(int generation) { - int i; + long i; /* the new_areas array currently being written to by gc_alloc() */ struct new_area (*current_new_areas)[] = &new_areas_1; - int current_new_areas_index; + long current_new_areas_index; /* the new_areas created by the previous scavenge cycle */ struct new_area (*previous_new_areas)[] = NULL; - int previous_new_areas_index; + long previous_new_areas_index; /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -2905,12 +2925,10 @@ scavenge_newspace_generation(int generation) /* Work through previous_new_areas. */ for (i = 0; i < previous_new_areas_index; i++) { - /* FIXME: All these bare *4 and /4 should be something - * like BYTES_PER_WORD or WBYTES. */ - int page = (*previous_new_areas)[i].page; - int offset = (*previous_new_areas)[i].offset; - int size = (*previous_new_areas)[i].size / 4; - gc_assert((*previous_new_areas)[i].size % 4 == 0); + long page = (*previous_new_areas)[i].page; + long offset = (*previous_new_areas)[i].offset; + long size = (*previous_new_areas)[i].size / N_WORD_BYTES; + gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0); scavenge(page_address(page)+offset, size); } @@ -2932,7 +2950,7 @@ scavenge_newspace_generation(int generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < NUM_PAGES; i++) { - if ((page_table[i].allocation != FREE_PAGE) + if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -2952,10 +2970,10 @@ scavenge_newspace_generation(int generation) static void unprotect_oldspace(void) { - int i; + long i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -2976,19 +2994,18 @@ unprotect_oldspace(void) * assumes that all objects have been copied or promoted to an older * generation. Bytes_allocated and the generation bytes_allocated * counter are updated. The number of bytes freed is returned. */ -extern void i586_bzero(void *addr, int nbytes); -static int +static long free_oldspace(void) { - int bytes_freed = 0; - int first_page, last_page; + long bytes_freed = 0; + long first_page, last_page; first_page = 0; do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE) + && ((page_table[first_page].allocated == FREE_PAGE_FLAG) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3004,7 +3021,7 @@ free_oldspace(void) bytes_freed += page_table[last_page].bytes_used; generations[page_table[last_page].gen].bytes_allocated -= page_table[last_page].bytes_used; - page_table[last_page].allocated = FREE_PAGE; + page_table[last_page].allocated = FREE_PAGE_FLAG; page_table[last_page].bytes_used = 0; /* Remove any write-protection. We should be able to rely @@ -3020,7 +3037,7 @@ free_oldspace(void) last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE) + && (page_table[last_page].allocated != FREE_PAGE_FLAG) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); @@ -3037,23 +3054,14 @@ free_oldspace(void) os_invalidate(page_start, PAGE_BYTES*(last_page-first_page)); addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page)); if (addr == NULL || addr != page_start) { - /* Is this an error condition? I couldn't really tell from - * the old CMU CL code, which fprintf'ed a message with - * an exclamation point at the end. But I've never seen the - * message, so it must at least be unusual.. - * - * (The same condition is also tested for in gc_free_heap.) - * - * -- WHN 19991129 */ - lose("i586_bzero: page moved, 0x%08x ==> 0x%08x", - page_start, + lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start, addr); } } else { - int *page_start; + long *page_start; - page_start = (int *)page_address(first_page); - i586_bzero(page_start, PAGE_BYTES*(last_page-first_page)); + page_start = (long *)page_address(first_page); + memset(page_start, 0,PAGE_BYTES*(last_page-first_page)); } first_page = last_page; @@ -3070,11 +3078,11 @@ static void print_ptr(lispobj *addr) { /* If addr is in the dynamic space then out the page information. */ - int pi1 = find_page_index((void*)addr); + long pi1 = find_page_index((void*)addr); if (pi1 != -1) fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", - (unsigned int) addr, + (unsigned long) addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, @@ -3094,7 +3102,7 @@ print_ptr(lispobj *addr) } #endif -extern int undefined_tramp; +extern long undefined_tramp; static void verify_space(lispobj *start, size_t words) @@ -3109,11 +3117,11 @@ verify_space(lispobj *start, size_t words) lispobj thing = *(lispobj*)start; if (is_lisp_pointer(thing)) { - int page_index = find_page_index((void*)thing); - int to_readonly_space = + long page_index = find_page_index((void*)thing); + long to_readonly_space = (READ_ONLY_SPACE_START <= thing && thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)); - int to_static_space = + long to_static_space = (STATIC_SPACE_START <= thing && thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0)); @@ -3121,7 +3129,7 @@ verify_space(lispobj *start, size_t words) if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE) + if ((page_table[page_index].allocated != FREE_PAGE_FLAG) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.", thing, start); /* Check that it doesn't point to a forwarding pointer! */ @@ -3166,6 +3174,9 @@ verify_space(lispobj *start, size_t words) case COMPLEX_WIDETAG: case SIMPLE_ARRAY_WIDETAG: case COMPLEX_BASE_STRING_WIDETAG: +#ifdef COMPLEX_CHARACTER_STRING_WIDETAG + case COMPLEX_CHARACTER_STRING_WIDETAG: +#endif case COMPLEX_VECTOR_NIL_WIDETAG: case COMPLEX_BIT_VECTOR_WIDETAG: case COMPLEX_VECTOR_WIDETAG: @@ -3174,7 +3185,7 @@ verify_space(lispobj *start, size_t words) case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: case VALUE_CELL_HEADER_WIDETAG: case SYMBOL_HEADER_WIDETAG: - case BASE_CHAR_WIDETAG: + case CHARACTER_WIDETAG: case UNBOUND_MARKER_WIDETAG: case INSTANCE_HEADER_WIDETAG: case FDEFN_WIDETAG: @@ -3185,7 +3196,7 @@ verify_space(lispobj *start, size_t words) { lispobj object = *start; struct code *code; - int nheader_words, ncode_words, nwords; + long nheader_words, ncode_words, nwords; lispobj fheaderl; struct simple_fun *fheaderp; @@ -3252,6 +3263,9 @@ verify_space(lispobj *start, size_t words) case COMPLEX_LONG_FLOAT_WIDETAG: #endif case SIMPLE_BASE_STRING_WIDETAG: +#ifdef SIMPLE_CHARACTER_STRING_WIDETAG + case SIMPLE_CHARACTER_STRING_WIDETAG: +#endif case SIMPLE_BIT_VECTOR_WIDETAG: case SIMPLE_ARRAY_NIL_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG: @@ -3260,9 +3274,20 @@ verify_space(lispobj *start, size_t words) case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG: +#endif case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG: case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG: +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG: +#endif #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG: #endif @@ -3275,6 +3300,12 @@ verify_space(lispobj *start, size_t words) #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG: #endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG: +#endif +#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG + case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG: +#endif case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG: case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG: #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG @@ -3313,15 +3344,15 @@ verify_gc(void) * Some counts of lispobjs are called foo_count; it might be good * to grep for all foo_size and rename the appropriate ones to * foo_count. */ - int read_only_space_size = + long read_only_space_size = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0) - (lispobj*)READ_ONLY_SPACE_START; - int static_space_size = + long static_space_size = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0) - (lispobj*)STATIC_SPACE_START; struct thread *th; for_each_thread(th) { - int binding_stack_size = + long binding_stack_size = (lispobj*)SymbolValue(BINDING_STACK_POINTER,th) - (lispobj*)th->binding_stack_start; verify_space(th->binding_stack_start, binding_stack_size); @@ -3336,10 +3367,10 @@ verify_generation(int generation) int i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { - int last_page; + long last_page; int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ @@ -3362,7 +3393,7 @@ verify_generation(int generation) break; verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/4); + + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); i = last_page; } } @@ -3372,26 +3403,26 @@ verify_generation(int generation) static void verify_zero_fill(void) { - int page; + long page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE) { + if (page_table[page].allocated == FREE_PAGE_FLAG) { /* The whole page should be zero filled. */ - int *start_addr = (int *)page_address(page); - int size = 1024; - int i; + long *start_addr = (long *)page_address(page); + long size = 1024; + long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free page not zero at %x", start_addr + i); } } } else { - int free_bytes = PAGE_BYTES - page_table[page].bytes_used; + long free_bytes = PAGE_BYTES - page_table[page].bytes_used; if (free_bytes > 0) { - int *start_addr = (int *)((unsigned)page_address(page) + long *start_addr = (long *)((unsigned)page_address(page) + page_table[page].bytes_used); - int size = free_bytes / 4; - int i; + long size = free_bytes / N_WORD_BYTES; + long i; for (i = 0; i < size; i++) { if (start_addr[i] != 0) { lose("free region not zero at %x", start_addr + i); @@ -3415,7 +3446,7 @@ gencgc_verify_zero_fill(void) static void verify_dynamic_space(void) { - int i; + long i; for (i = 0; i < NUM_GENERATIONS; i++) verify_generation(i); @@ -3428,12 +3459,12 @@ verify_dynamic_space(void) static void write_protect_generation_pages(int generation) { - int i; + long i; gc_assert(generation < NUM_GENERATIONS); for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated == BOXED_PAGE) + if ((page_table[i].allocated == BOXED_PAGE_FLAG) && (page_table[i].bytes_used != 0) && !page_table[i].dont_move && (page_table[i].gen == generation)) { @@ -3479,8 +3510,9 @@ garbage_collect_generation(int generation, int raise) * temporary generation (NUM_GENERATIONS), and lowered when * done. Set up this new generation. There should be no pages * allocated to it yet. */ - if (!raise) - gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0); + if (!raise) { + gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0); + } /* Set the global src and dest. generations */ from_space = generation; @@ -3527,8 +3559,8 @@ garbage_collect_generation(int generation, int raise) for_each_thread(th) { void **ptr; void **esp=(void **)-1; - int i,free; #ifdef LISP_FEATURE_SB_THREAD + long i,free; if(th==arch_os_get_current_thread()) { esp = (void **) &raise; } else { @@ -3553,9 +3585,9 @@ garbage_collect_generation(int generation, int raise) } } -#if QSHOW +#ifdef QSHOW if (gencgc_verbose > 1) { - int num_dont_move_pages = count_dont_move_pages(); + long num_dont_move_pages = count_dont_move_pages(); fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, @@ -3647,8 +3679,8 @@ garbage_collect_generation(int generation, int raise) /* As a check re-scavenge the newspace once; no new objects should * be found. */ { - int old_bytes_allocated = bytes_allocated; - int bytes_allocated; + long old_bytes_allocated = bytes_allocated; + long bytes_allocated; /* Start with a full scavenge. */ scavenge_newspace_generation_one_scan(new_space); @@ -3711,14 +3743,14 @@ garbage_collect_generation(int generation, int raise) } /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */ -int +long update_x86_dynamic_space_free_pointer(void) { - int last_page = -1; - int i; + long last_page = -1; + long i; - for (i = 0; i < NUM_PAGES; i++) - if ((page_table[i].allocated != FREE_PAGE) + for (i = 0; i < last_free_page; i++) + if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0)) last_page = i; @@ -3744,7 +3776,7 @@ collect_garbage(unsigned last_gen) int gen = 0; int raise; int gen_to_wp; - int i; + long i; FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen)); @@ -3862,22 +3894,22 @@ collect_garbage(unsigned last_gen) void gc_free_heap(void) { - int page; + long page; if (gencgc_verbose > 1) SHOW("entering gc_free_heap"); for (page = 0; page < NUM_PAGES; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE) { + if (page_table[page].allocated != FREE_PAGE_FLAG) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid - * when it is a FREE_PAGE and bytes_used is 0 and it + * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it * should not be write-protected -- except that the * generation is used for the current region but it sets * that up. */ - page_table[page].allocated = FREE_PAGE; + page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; /* Zero the page. */ @@ -3896,10 +3928,10 @@ gc_free_heap(void) } } else if (gencgc_zero_check_during_free_heap) { /* Double-check that the page is zero filled. */ - int *page_start, i; - gc_assert(page_table[page].allocated == FREE_PAGE); + long *page_start, i; + gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[page].bytes_used == 0); - page_start = (int *)page_address(page); + page_start = (long *)page_address(page); for (i=0; i<1024; i++) { if (page_start[i] != 0) { lose("free region not zero at %x", page_start + i); @@ -3945,7 +3977,7 @@ gc_free_heap(void) void gc_init(void) { - int i; + long i; gc_init_tables(); scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector; @@ -3957,7 +3989,7 @@ gc_init(void) /* Initialize each page structure. */ for (i = 0; i < NUM_PAGES; i++) { /* Initialize all pages as free. */ - page_table[i].allocated = FREE_PAGE; + page_table[i].allocated = FREE_PAGE_FLAG; page_table[i].bytes_used = 0; /* Pages are not write-protected at startup. */ @@ -4001,13 +4033,13 @@ gc_init(void) static void gencgc_pickup_dynamic(void) { - int page = 0; - int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); + long page = 0; + long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0); lispobj *prev=(lispobj *)page_address(page); do { lispobj *first,*ptr= (lispobj *)page_address(page); - page_table[page].allocated = BOXED_PAGE; + page_table[page].allocated = BOXED_PAGE_FLAG; page_table[page].gen = 0; page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; @@ -4047,17 +4079,22 @@ gc_initialize_pointers(void) * region is full, so in most cases it's not needed. */ char * -alloc(int nbytes) +alloc(long nbytes) { struct thread *th=arch_os_get_current_thread(); - struct alloc_region *region= + struct alloc_region *region= +#ifdef LISP_FEATURE_SB_THREAD th ? &(th->alloc_region) : &boxed_region; +#else + &boxed_region; +#endif void *new_obj; void *new_free_pointer; - + gc_assert(nbytes>0); /* Check for alignment allocation problems. */ - gc_assert((((unsigned)region->free_pointer & 0x7) == 0) - && ((nbytes & 0x7) == 0)); + gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0) + && ((nbytes & LOWTAG_MASK) == 0)); +#if 0 if(all_threads) /* there are a few places in the C code that allocate data in the * heap before Lisp starts. This is before interrupts are enabled, @@ -4075,6 +4112,7 @@ alloc(int nbytes) #else gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)); #endif +#endif /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; @@ -4100,29 +4138,6 @@ alloc(int nbytes) new_obj = gc_alloc_with_region(nbytes,0,region,0); return (new_obj); } - - -/* Find the code object for the given pc, or return NULL on failure. - * - * FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */ -lispobj * -component_ptr_from_pc(lispobj *pc) -{ - lispobj *object = NULL; - - if ( (object = search_read_only_space(pc)) ) - ; - else if ( (object = search_static_space(pc)) ) - ; - else - object = search_dynamic_space(pc); - - if (object) /* if we found something */ - if (widetag_of(*object) == CODE_HEADER_WIDETAG) /* if it's a code object */ - return(object); - - return (NULL); -} /* * shared support for the OS-dependent signal handlers which @@ -4144,9 +4159,9 @@ void unhandled_sigmemoryfault(void); int gencgc_handle_wp_violation(void* fault_addr) { - int page_index = find_page_index(fault_addr); + long page_index = find_page_index(fault_addr); -#if defined QSHOW_SIGNALS +#ifdef QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", fault_addr, page_index)); #endif