X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=81ef8b29537b5ffdece4d6966573e21a7ba0c9d5;hb=aa0ed5a420ea5295d586b3f323b5375d3b506860;hp=2f648121ff0ec8ff6d03626c97f35650fe8ca215;hpb=1eb303172df6650de51ad12b993a392681f50c50;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 2f64812..81ef8b2 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -41,6 +41,7 @@ #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "pseudo-atomic.h" #include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" @@ -57,7 +58,7 @@ /* forward declarations */ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, - int unboxed); + int page_type_flag); /* @@ -165,6 +166,48 @@ static boolean conservative_stack = 1; page_index_t page_table_pages; struct page *page_table; +static inline boolean page_allocated_p(page_index_t page) { + return (page_table[page].allocated != FREE_PAGE_FLAG); +} + +static inline boolean page_no_region_p(page_index_t page) { + return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG); +} + +static inline boolean page_allocated_no_region_p(page_index_t page) { + return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG)) + && page_no_region_p(page)); +} + +static inline boolean page_free_p(page_index_t page) { + return (page_table[page].allocated == FREE_PAGE_FLAG); +} + +static inline boolean page_boxed_p(page_index_t page) { + return (page_table[page].allocated & BOXED_PAGE_FLAG); +} + +static inline boolean code_page_p(page_index_t page) { + return (page_table[page].allocated & CODE_PAGE_FLAG); +} + +static inline boolean page_boxed_no_region_p(page_index_t page) { + return page_boxed_p(page) && page_no_region_p(page); +} + +static inline boolean page_unboxed_p(page_index_t page) { + /* Both flags set == boxed code page */ + return ((page_table[page].allocated & UNBOXED_PAGE_FLAG) + && !page_boxed_p(page)); +} + +static inline boolean protect_page_p(page_index_t page, generation_index_t generation) { + return (page_boxed_no_region_p(page) + && (page_table[page].bytes_used != 0) + && !page_table[page].dont_move + && (page_table[page].gen == generation)); +} + /* To map addresses to page structures the address of the first page * is needed. */ static void *heap_base = NULL; @@ -178,10 +221,10 @@ page_address(page_index_t page_num) /* Calculate the address where the allocation region associated with * the page starts. */ -inline void * +static inline void * page_region_start(page_index_t page_index) { - return page_address(page_index)+page_table[page_index].first_object_offset; + return page_address(page_index)-page_table[page_index].region_start_offset; } /* Find the page index within the page_table for the given @@ -189,17 +232,31 @@ page_region_start(page_index_t page_index) inline page_index_t find_page_index(void *addr) { - page_index_t index = addr-heap_base; - - if (index >= 0) { - index = ((unsigned long)index)/PAGE_BYTES; + if (addr >= heap_base) { + page_index_t index = ((pointer_sized_uint_t)addr - + (pointer_sized_uint_t)heap_base) / PAGE_BYTES; if (index < page_table_pages) return (index); } - return (-1); } +static size_t +npage_bytes(long npages) +{ + gc_assert(npages>=0); + return ((unsigned long)npages)*PAGE_BYTES; +} + +/* Check that X is a higher address than Y and return offset from Y to + * X in bytes. */ +static inline +size_t void_diff(void *x, void *y) +{ + gc_assert(x >= y); + return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y; +} + /* a structure to hold the state of a generation */ struct generation { @@ -219,13 +276,13 @@ struct generation { page_index_t alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ - long bytes_allocated; + unsigned long bytes_allocated; /* the number of bytes at which to trigger a GC */ - long gc_trigger; + unsigned long gc_trigger; /* to calculate a new level for gc_trigger */ - long bytes_consed_between_gc; + unsigned long bytes_consed_between_gc; /* the number of GCs since the last raise */ int num_gc; @@ -239,7 +296,7 @@ struct generation { * objects are added from a GC of a younger generation. Dividing by * the bytes_allocated will give the average age of the memory in * this generation since its last GC. */ - long cum_sum_bytes_allocated; + unsigned long cum_sum_bytes_allocated; /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been @@ -280,15 +337,16 @@ generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION; * integrated with the Lisp code. */ page_index_t last_free_page; +#ifdef LISP_FEATURE_SB_THREAD /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the * majority of GC is single-threaded, but alloc() may be called from * >1 thread at a time and must be thread-safe. This lock must be * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ - -#ifdef LISP_FEATURE_SB_THREAD static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +/* This lock is used to protect non-thread-local allocation. */ +static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER; #endif @@ -302,10 +360,10 @@ static long count_write_protect_generation_pages(generation_index_t generation) { page_index_t i; - long count = 0; + unsigned long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -320,7 +378,7 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation)) count++; return count; @@ -333,7 +391,7 @@ count_dont_move_pages(void) page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].dont_move != 0)) { ++count; } @@ -344,13 +402,13 @@ count_dont_move_pages(void) /* Work through the pages and add up the number of bytes used for the * given generation. */ -static long +static unsigned long count_generation_bytes_allocated (generation_index_t gen) { page_index_t i; - long result = 0; + unsigned long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } @@ -411,7 +469,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated & BOXED_PAGE_FLAG) { + if (page_boxed_p(j)) { if (page_table[j].large_object) large_boxed_cnt++; else @@ -420,7 +478,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { + if (page_unboxed_p(j)) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -443,14 +501,15 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ large_unboxed_cnt, pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*PAGE_BYTES + (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, gen_av_mem_age(i)); } - fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated); + fprintf(stderr," Total bytes allocated = %lu\n", bytes_allocated); + fprintf(stderr," Dynamic-space-size bytes = %u\n", dynamic_space_size); fpu_restore(fpu_state); } @@ -466,8 +525,8 @@ void fast_bzero(void*, size_t); /* in -assem.S */ */ void zero_pages_with_mmap(page_index_t start, page_index_t end) { int i; - void *addr = (void *) page_address(start), *new_addr; - size_t length = PAGE_BYTES*(1+end-start); + void *addr = page_address(start), *new_addr; + size_t length = npage_bytes(1+end-start); if (start > end) return; @@ -493,9 +552,9 @@ zero_pages(page_index_t start, page_index_t end) { return; #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) - fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); + fast_bzero(page_address(start), npage_bytes(1+end-start)); #else - bzero(page_address(start), PAGE_BYTES*(1+end-start)); + bzero(page_address(start), npage_bytes(1+end-start)); #endif } @@ -575,6 +634,55 @@ struct alloc_region unboxed_region; /* The generation currently being allocated to. */ static generation_index_t gc_alloc_generation; +static inline page_index_t +generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_large_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_large_start_page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_start_page; + } else { + lose("bad page_type_flag: %d", page_type_flag); + } + } +} + +static inline void +set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large, + page_index_t page) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_large_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_large_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } +} + /* Find a new region with room for at least the given number of bytes. * * It starts looking at the current generation's alloc_start_page. So @@ -599,11 +707,11 @@ static generation_index_t gc_alloc_generation; * are allocated, although they will initially be empty. */ static void -gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; - long bytes_found; + unsigned long bytes_found; page_index_t i; int ret; @@ -619,16 +727,10 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) && (alloc_region->free_pointer == alloc_region->end_addr)); ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_unboxed_start_page; - } else { - first_page = - generations[gc_alloc_generation].alloc_start_page; - } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0); + last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag); bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); + + npage_bytes(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -642,35 +744,26 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; - page_table[first_page].first_object_offset = 0; + page_table[first_page].region_start_offset = 0; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { - if (unboxed) - page_table[i].allocated = UNBOXED_PAGE_FLAG; - else - page_table[i].allocated = BOXED_PAGE_FLAG; + page_table[i].allocated = page_type_flag; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ - page_table[i].first_object_offset = - alloc_region->start_addr - page_address(i); + page_table[i].region_start_offset = + void_diff(page_address(i),alloc_region->start_addr); page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } /* Bump up last_free_page. */ @@ -678,15 +771,14 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) last_free_page = last_page+1; /* do we only want to call this on special occasions? like for * boxed_region? */ - set_alloc_pointer((lispobj)(((char *)heap_base) - + last_free_page*PAGE_BYTES)); + set_alloc_pointer((lispobj)page_address(last_free_page)); } ret = thread_mutex_unlock(&free_pages_lock); gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -737,8 +829,8 @@ static int record_new_objects = 0; static page_index_t new_areas_ignore_page; struct new_area { page_index_t page; - long offset; - long size; + size_t offset; + size_t size; }; static struct new_area (*new_areas)[]; static long new_areas_index; @@ -746,7 +838,7 @@ long max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(page_index_t first_page, long offset, long size) +add_new_area(page_index_t first_page, size_t offset, size_t size) { unsigned long new_area_start,c; long i; @@ -768,13 +860,13 @@ add_new_area(page_index_t first_page, long offset, long size) gc_abort(); } - new_area_start = PAGE_BYTES*first_page + offset; + new_area_start = npage_bytes(first_page) + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { unsigned long area_end = - PAGE_BYTES*((*new_areas)[i].page) + npage_bytes((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -816,15 +908,15 @@ add_new_area(page_index_t first_page, long offset, long size) * it is safe to try to re-update the page table of this reset * alloc_region. */ void -gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) +gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region) { int more; page_index_t first_page; page_index_t next_page; - int bytes_used; - long orig_first_page_bytes_used; - long region_size; - long byte_cnt; + unsigned long bytes_used; + unsigned long orig_first_page_bytes_used; + unsigned long region_size; + unsigned long byte_cnt; int ret; @@ -851,15 +943,12 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Update the first page. */ /* If the page was free then set up the gen, and - * first_object_offset. */ + * region_start_offset. */ if (page_table[first_page].bytes_used == 0) - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated & page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -868,8 +957,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - - page_address(first_page)))>PAGE_BYTES) { + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(first_page))) + >PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -877,26 +967,24 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) byte_cnt += bytes_used; - /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and set - * the bytes_used. */ + /* All the rest of the pages should be free. We need to set + * their region_start_offset pointer to the start of the + * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[next_page].allocated & page_type_flag); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); - gc_assert(page_table[next_page].first_object_offset == - alloc_region->start_addr - page_address(next_page)); + gc_assert(page_table[next_page].region_start_offset == + void_diff(page_address(next_page), + alloc_region->start_addr)); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>PAGE_BYTES) { + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(next_page)))>PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -906,7 +994,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page++; } - region_size = alloc_region->free_pointer - alloc_region->start_addr; + region_size = void_diff(alloc_region->free_pointer, + alloc_region->start_addr); bytes_allocated += region_size; generations[gc_alloc_generation].bytes_allocated += region_size; @@ -914,14 +1003,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Set the generations alloc restart page to the last page of * the region. */ - if (unboxed) - generations[gc_alloc_generation].alloc_unboxed_start_page = - next_page-1; - else - generations[gc_alloc_generation].alloc_start_page = next_page-1; + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1); /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used, region_size); /* @@ -955,58 +1040,44 @@ static inline void *gc_quick_alloc(long nbytes); /* Allocate a possibly large object. */ void * -gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; int orig_first_page_bytes_used; long byte_cnt; int more; - long bytes_used; + unsigned long bytes_used; page_index_t next_page; int ret; ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_large_unboxed_start_page; - } else { - first_page = generations[gc_alloc_generation].alloc_large_start_page; - } + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1); if (first_page <= alloc_region->last_page) { first_page = alloc_region->last_page+1; } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag); gc_assert(first_page > alloc_region->last_page); - if (unboxed) - generations[gc_alloc_generation].alloc_large_unboxed_start_page = - last_page; - else - generations[gc_alloc_generation].alloc_large_start_page = last_page; + + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page); /* Set up the pages. */ orig_first_page_bytes_used = page_table[first_page].bytes_used; /* If the first page was free then set up the gen, and - * first_object_offset. */ + * region_start_offset. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; - page_table[first_page].first_object_offset = 0; + page_table[first_page].region_start_offset = 0; page_table[first_page].large_object = 1; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 1); @@ -1025,20 +1096,17 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and - * set the bytes_used. */ + * region_start_offset pointer to the start of the region, and set + * the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(next_page)); gc_assert(page_table[next_page].bytes_used == 0); - if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[next_page].allocated = BOXED_PAGE_FLAG; + page_table[next_page].allocated = page_type_flag; page_table[next_page].gen = gc_alloc_generation; page_table[next_page].large_object = 1; - page_table[next_page].first_object_offset = - orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); + page_table[next_page].region_start_offset = + npage_bytes(next_page-first_page) - orig_first_page_bytes_used; /* Calculate the number of bytes used in this page. */ more = 0; @@ -1060,21 +1128,20 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) generations[gc_alloc_generation].bytes_allocated += nbytes; /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used,nbytes); /* Bump up last_free_page */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - set_alloc_pointer((lispobj)(((char *)heap_base) - + last_free_page*PAGE_BYTES)); + set_alloc_pointer((lispobj)(page_address(last_free_page))); } ret = thread_mutex_unlock(&free_pages_lock); gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -1088,6 +1155,7 @@ static page_index_t gencgc_alloc_start_page = -1; void gc_heap_exhausted_error_or_lose (long available, long requested) { + struct thread *thread = arch_os_get_current_thread(); /* Write basic information before doing anything else: if we don't * call to lisp this is a must, and even if we do there is always * the danger that we bounce back here before the error has been @@ -1100,12 +1168,13 @@ gc_heap_exhausted_error_or_lose (long available, long requested) /* If we are in GC, or totally out of memory there is no way * to sanely transfer control to the lisp-side of things. */ - struct thread *thread = arch_os_get_current_thread(); print_generation_stats(1); fprintf(stderr, "GC control variables:\n"); fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n", SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true", - SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true"); + (SymbolValue(GC_PENDING, thread) == T) ? + "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ? + "false" : "in progress")); #ifdef LISP_FEATURE_SB_THREAD fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n", SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true"); @@ -1115,6 +1184,18 @@ gc_heap_exhausted_error_or_lose (long available, long requested) else { /* FIXME: assert free_pages_lock held */ (void)thread_mutex_unlock(&free_pages_lock); + gc_assert(get_pseudo_atomic_atomic(thread)); + clear_pseudo_atomic_atomic(thread); + if (get_pseudo_atomic_interrupted(thread)) + do_pending_interrupt(); + /* Another issue is that signalling HEAP-EXHAUSTED error leads + * to running user code at arbitrary places, even in a + * WITHOUT-INTERRUPTS which may lead to a deadlock without + * running out of the heap. So at this point all bets are + * off. */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) + corruption_warning_and_maybe_lose + ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS."); funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), alloc_number(available), alloc_number(requested)); lose("HEAP-EXHAUSTED-ERROR fell through"); @@ -1122,7 +1203,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested) } page_index_t -gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) +gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, + int page_type_flag) { page_index_t first_page, last_page; page_index_t restart_page = *restart_page_ptr; @@ -1135,7 +1217,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) restart_page = gencgc_alloc_start_page; } - if (nbytes>=PAGE_BYTES) { + gc_assert(nbytes>=0); + if (((unsigned long)nbytes)>=PAGE_BYTES) { /* Search for a contiguous free space of at least nbytes, * aligned on a page boundary. The page-alignment is strictly * speaking needed only for objects at least large_object_size @@ -1143,17 +1226,18 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) do { first_page = restart_page; while ((first_page < page_table_pages) && - (page_table[first_page].allocated != FREE_PAGE_FLAG)) + page_allocated_p(first_page)) first_page++; last_page = first_page; bytes_found = PAGE_BYTES; while ((bytes_found < nbytes) && (last_page < (page_table_pages-1)) && - (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + page_free_p(last_page+1)) { last_page++; bytes_found += PAGE_BYTES; - gc_assert(page_table[last_page].write_protected == 0); + gc_assert(0 == page_table[last_page].bytes_used); + gc_assert(0 == page_table[last_page].write_protected); } if (bytes_found > most_bytes_found) most_bytes_found = bytes_found; @@ -1167,13 +1251,13 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) * pages: this helps avoid excessive conservativism. */ first_page = restart_page; while (first_page < page_table_pages) { - if (page_table[first_page].allocated == FREE_PAGE_FLAG) + if (page_free_p(first_page)) { + gc_assert(0 == page_table[first_page].bytes_used); bytes_found = PAGE_BYTES; break; } - else if ((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + else if ((page_table[first_page].allocated == page_type_flag) && (page_table[first_page].large_object == 0) && (page_table[first_page].gen == gc_alloc_generation) && (page_table[first_page].write_protected == 0) && @@ -1208,13 +1292,13 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) * functions will eventually call this */ void * -gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, +gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region, int quick_p) { void *new_free_pointer; if (nbytes>=large_object_size) - return gc_alloc_large(nbytes,unboxed_p,my_region); + return gc_alloc_large(nbytes, page_type_flag, my_region); /* Check whether there is room in the current alloc region. */ new_free_pointer = my_region->free_pointer + nbytes; @@ -1230,11 +1314,11 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Unless a `quick' alloc was requested, check whether the alloc region is almost empty. */ if (!quick_p && - (my_region->end_addr - my_region->free_pointer) <= 32) { + void_diff(my_region->end_addr,my_region->free_pointer) <= 32) { /* If so, finished with the current region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); + gc_alloc_update_page_tables(page_type_flag, my_region); /* Set up a new region. */ - gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region); + gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region); } return((void *)new_obj); @@ -1243,51 +1327,43 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Else not enough free space in the current region: retry with a * new region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); - gc_alloc_new_region(nbytes, unboxed_p, my_region); - return gc_alloc_with_region(nbytes,unboxed_p,my_region,0); + gc_alloc_update_page_tables(page_type_flag, my_region); + gc_alloc_new_region(nbytes, page_type_flag, my_region); + return gc_alloc_with_region(nbytes, page_type_flag, my_region,0); } /* these are only used during GC: all allocation from the mutator calls * alloc() -> gc_alloc_with_region() with the appropriate per-thread * region */ -void * -gc_general_alloc(long nbytes,int unboxed_p,int quick_p) -{ - struct alloc_region *my_region = - unboxed_p ? &unboxed_region : &boxed_region; - return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p); -} - static inline void * gc_quick_alloc(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK); } static inline void * gc_quick_alloc_large(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK); } static inline void * gc_alloc_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,0); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0); } static inline void * gc_quick_alloc_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK); } static inline void * gc_quick_alloc_large_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK); } @@ -1317,10 +1393,10 @@ copy_large_object(lispobj object, long nwords) /* Promote the object. */ - long remaining_bytes; + unsigned long remaining_bytes; page_index_t next_page; - long bytes_freed; - long old_bytes_used; + unsigned long bytes_freed; + unsigned long old_bytes_used; /* Note: Any page write-protection must be removed, else a * later scavenge_newspace may incorrectly not scavenge these @@ -1328,16 +1404,16 @@ copy_large_object(lispobj object, long nwords) * new areas, but let's do it for them all (they'll probably * be written anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; @@ -1359,7 +1435,7 @@ copy_large_object(lispobj object, long nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1371,10 +1447,10 @@ copy_large_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE_FLAG) && + page_boxed_p(next_page) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* Checks out OK, free the page. Don't need to bother zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected as they @@ -1456,9 +1532,10 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); - if ((nwords > 1024*1024) && gencgc_verbose) + if ((nwords > 1024*1024) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); + } /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1468,22 +1545,21 @@ copy_large_unboxed_object(lispobj object, long nwords) /* Promote the object. Note: Unboxed objects may have been * allocated to a BOXED region so it may be necessary to * change the region to UNBOXED. */ - long remaining_bytes; + unsigned long remaining_bytes; page_index_t next_page; - long bytes_freed; - long old_bytes_used; + unsigned long bytes_freed; + unsigned long old_bytes_used; - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; @@ -1511,11 +1587,10 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected, even if @@ -1529,10 +1604,11 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; } - if ((bytes_freed > 0) && gencgc_verbose) + if ((bytes_freed > 0) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); + } generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; @@ -2137,8 +2213,7 @@ search_dynamic_space(void *pointer) lispobj *start; /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || - (page_table[page_index].allocated == FREE_PAGE_FLAG)) + if ((page_index == -1) || page_free_p(page_index)) return NULL; start = (lispobj *)page_region_start(page_index); return (gc_search_space(start, @@ -2157,13 +2232,6 @@ search_dynamic_space(void *pointer) static int looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) { - /* We need to allow raw pointers into Code objects for return - * addresses. This will also pick up pointers to functions in code - * objects. */ - if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) - /* XXX could do some further checks here */ - return 1; - if (!is_lisp_pointer((lispobj)pointer)) { return 0; } @@ -2182,28 +2250,31 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: if ((unsigned long)pointer != ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case LIST_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible cons? */ @@ -2213,44 +2284,49 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) is_lisp_immediate(start_addr[1]))) break; else { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } case INSTANCE_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case OTHER_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible? Not a cons. XXX should check the headers. */ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } switch (widetag_of(start_addr[0])) { @@ -2260,26 +2336,29 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) #if N_WORD_BITS == 64 case SINGLE_FLOAT_WIDETAG: #endif - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* only pointed to by function pointers? */ case CLOSURE_HEADER_WIDETAG: case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; case INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* the valid other immediate pointer objects */ @@ -2382,18 +2461,20 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo6: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } @@ -2456,9 +2537,9 @@ maybe_adjust_large_object(lispobj *where) page_index_t next_page; long nwords; - long remaining_bytes; - long bytes_freed; - long old_bytes_used; + unsigned long remaining_bytes; + unsigned long bytes_freed; + unsigned long old_bytes_used; int boxed; @@ -2544,17 +2625,16 @@ maybe_adjust_large_object(lispobj *where) * but lets do it for them all (they'll probably be written * anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) - || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset == - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].allocated = boxed; @@ -2586,11 +2666,10 @@ maybe_adjust_large_object(lispobj *where) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -2639,7 +2718,7 @@ preserve_pointer(void *addr) /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + || page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ @@ -2663,7 +2742,9 @@ preserve_pointer(void *addr) * expensive but important, since it vastly reduces the * probability that random garbage will be bogusly interpreted as * a pointer which prevents a page from moving. */ - if (!(possibly_valid_dynamic_space_pointer(addr))) + if (!(code_page_p(addr_page_index) + || (is_lisp_pointer((lispobj)addr) && + possibly_valid_dynamic_space_pointer(addr)))) return; /* Find the beginning of the region. Note that there may be @@ -2677,7 +2758,7 @@ preserve_pointer(void *addr) first_page = find_page_index(page_region_start(addr_page_index)) #else first_page = addr_page_index; - while (page_table[first_page].first_object_offset != 0) { + while (page_table[first_page].region_start_offset != 0) { --first_page; /* Do some checks. */ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); @@ -2694,7 +2775,7 @@ preserve_pointer(void *addr) * free area in which case it's ignored here. Note it gets * through the valid pointer test above because the tail looks * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + if (page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ || (((unsigned long)addr & (PAGE_BYTES - 1)) @@ -2733,10 +2814,10 @@ preserve_pointer(void *addr) /* Check whether this is the last page in this contiguous block.. */ if ((page_table[i].bytes_used < PAGE_BYTES) /* ..or it is PAGE_BYTES and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE_FLAG) + || page_free_p(i+1) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ - || (page_table[i+1].first_object_offset == 0)) + || (page_table[i+1].region_start_offset == 0)) break; } @@ -2770,14 +2851,14 @@ update_page_write_prot(page_index_t page) long num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); + gc_assert(page_allocated_p(page)); gc_assert(page_table[page].bytes_used != 0); /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected /* FIXME: What's the reason for not write-protecting pinned pages? */ || page_table[page].dont_move - || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) + || page_unboxed_p(page)) return (0); /* Scan the page for pointers to younger generations or the @@ -2790,7 +2871,7 @@ update_page_write_prot(page_index_t page) /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE_FLAG) + (page_allocated_p(index) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == SCRATCH_GENERATION))) @@ -2864,7 +2945,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) for (i = 0; i < last_free_page; i++) { generation_index_t generation = page_table[i].gen; - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (generation != new_space) && (generation >= from) @@ -2873,7 +2954,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) int write_protected=1; /* This should be the start of a region */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_table[i].region_start_offset == 0); /* Now work forward until the end of the region */ for (last_page = i; ; last_page++) { @@ -2881,16 +2962,17 @@ scavenge_generations(generation_index_t from, generation_index_t to) write_protected && page_table[last_page].write_protected; if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; } if (!write_protected) { scavenge(page_address(i), - (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + ((unsigned long)(page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + /N_WORD_BYTES); /* Now scan the pages and write protect those that * don't have pointers to younger generations. */ @@ -2913,15 +2995,15 @@ scavenge_generations(generation_index_t from, generation_index_t to) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { FSHOW((stderr, "/scavenge_generation() %d\n", generation)); FSHOW((stderr, - "/page bytes_used=%d first_object_offset=%d dont_move=%d\n", + "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n", page_table[i].bytes_used, - page_table[i].first_object_offset, + page_table[i].region_start_offset, page_table[i].dont_move)); lose("write to protected page %d in scavenge_generation()\n", i); } @@ -2967,7 +3049,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) generation)); for (i = 0; i < last_free_page; i++) { /* Note that this skips over open regions when it encounters them. */ - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) @@ -2977,7 +3059,8 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) page_index_t last_page; int all_wp=1; - /* The scavenge will start at the first_object_offset of page i. + /* The scavenge will start at the region_start_offset of + * page i. * * We need to find the full extent of this contiguous * block in case objects span pages. @@ -2995,23 +3078,23 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) * contiguous block */ if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; } /* Do a limited check for write-protected pages. */ if (!all_wp) { - long size; - - size = (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES - - page_table[i].first_object_offset)/N_WORD_BYTES; + long nwords = (((unsigned long) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i) + + page_table[i].region_start_offset)) + / N_WORD_BYTES); new_areas_ignore_page = last_page; - scavenge(page_region_start(i), size); + scavenge(page_region_start(i), nwords); } i = last_page; @@ -3095,8 +3178,9 @@ scavenge_newspace_generation(generation_index_t generation) /* New areas of objects allocated have been lost so need to do a * full scan to be sure! If this becomes a problem try * increasing NUM_NEW_AREAS. */ - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("new_areas overflow, doing full scavenge"); + } /* Don't need to record new areas that get scavenged * anyway during scavenge_newspace_generation_one_scan. */ @@ -3116,9 +3200,9 @@ scavenge_newspace_generation(generation_index_t generation) /* Work through previous_new_areas. */ for (i = 0; i < previous_new_areas_index; i++) { - long page = (*previous_new_areas)[i].page; - long offset = (*previous_new_areas)[i].offset; - long size = (*previous_new_areas)[i].size / N_WORD_BYTES; + page_index_t page = (*previous_new_areas)[i].page; + size_t offset = (*previous_new_areas)[i].offset; + size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES; gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0); scavenge(page_address(page)+offset, size); } @@ -3143,7 +3227,7 @@ scavenge_newspace_generation(generation_index_t generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -3166,7 +3250,7 @@ unprotect_oldspace(void) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -3187,10 +3271,10 @@ unprotect_oldspace(void) * assumes that all objects have been copied or promoted to an older * generation. Bytes_allocated and the generation bytes_allocated * counter are updated. The number of bytes freed is returned. */ -static long +static unsigned long free_oldspace(void) { - long bytes_freed = 0; + unsigned long bytes_freed = 0; page_index_t first_page, last_page; first_page = 0; @@ -3198,7 +3282,7 @@ free_oldspace(void) do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE_FLAG) + && (page_free_p(first_page) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3230,13 +3314,13 @@ free_oldspace(void) last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE_FLAG) + && page_allocated_p(last_page) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(last_page-first_page), + npage_bytes(last_page-first_page), OS_VM_PROT_NONE); #endif first_page = last_page; @@ -3255,13 +3339,13 @@ print_ptr(lispobj *addr) page_index_t pi1 = find_page_index((void*)addr); if (pi1 != -1) - fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", + fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n", (unsigned long) addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, page_table[pi1].bytes_used, - page_table[pi1].first_object_offset, + page_table[pi1].region_start_offset, page_table[pi1].dont_move); fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n", *(addr-4), @@ -3301,7 +3385,7 @@ verify_space(lispobj *start, size_t words) if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(page_index) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ @@ -3566,14 +3650,14 @@ verify_generation(generation_index_t generation) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { page_index_t last_page; int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_table[i].region_start_offset == 0); /* Need to find the full extent of this contiguous block in case objects span pages. */ @@ -3588,12 +3672,14 @@ verify_generation(generation_index_t generation) || (page_table[last_page+1].allocated != region_allocation) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; verify_space(page_address(i), - (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + ((unsigned long) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + / N_WORD_BYTES); i = last_page; } } @@ -3606,7 +3692,7 @@ verify_zero_fill(void) page_index_t page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE_FLAG) { + if (page_free_p(page)) { /* The whole page should be zero filled. */ long *start_addr = (long *)page_address(page); long size = 1024; @@ -3664,10 +3750,7 @@ write_protect_generation_pages(generation_index_t generation) gc_assert(generation < SCRATCH_GENERATION); for (start = 0; start < last_free_page; start++) { - if ((page_table[start].allocated == BOXED_PAGE_FLAG) - && (page_table[start].bytes_used != 0) - && !page_table[start].dont_move - && (page_table[start].gen == generation)) { + if (protect_page_p(start, generation)) { void *page_start; page_index_t last; @@ -3675,10 +3758,7 @@ write_protect_generation_pages(generation_index_t generation) page_table[start].write_protected = 1; for (last = start + 1; last < last_free_page; last++) { - if ((page_table[last].allocated != BOXED_PAGE_FLAG) - || (page_table[last].bytes_used == 0) - || page_table[last].dont_move - || (page_table[last].gen != generation)) + if (!protect_page_p(last, generation)) break; page_table[last].write_protected = 1; } @@ -3686,7 +3766,7 @@ write_protect_generation_pages(generation_index_t generation) page_start = (void *)page_address(start); os_protect(page_start, - PAGE_BYTES * (last - start), + npage_bytes(last - start), OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); start = last; @@ -4022,7 +4102,7 @@ garbage_collect_generation(generation_index_t generation, int raise) fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - num_dont_move_pages * PAGE_BYTES); + npage_bytes(num_dont_move_pages)); } #endif @@ -4159,8 +4239,9 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].alloc_large_unboxed_start_page = 0; if (generation >= verify_gens) { - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("verifying"); + } verify_gc(); verify_dynamic_space(); } @@ -4189,14 +4270,12 @@ update_dynamic_space_free_pointer(void) page_index_t last_page = -1, i; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) - && (page_table[i].bytes_used != 0)) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; - set_alloc_pointer((lispobj)(((char *)heap_base) - + last_free_page*PAGE_BYTES)); + set_alloc_pointer((lispobj)(page_address(last_free_page))); return 0; /* dummy value: return something ... */ } @@ -4206,15 +4285,15 @@ remap_free_pages (page_index_t from, page_index_t to) page_index_t first_page, last_page; for (first_page = from; first_page <= to; first_page++) { - if (page_table[first_page].allocated != FREE_PAGE_FLAG || - page_table[first_page].need_to_zero == 0) { + if (page_allocated_p(first_page) || + (page_table[first_page].need_to_zero == 0)) { continue; } last_page = first_page + 1; - while (page_table[last_page].allocated == FREE_PAGE_FLAG && - last_page < to && - page_table[last_page].need_to_zero == 1) { + while (page_free_p(last_page) && + (last_page < to) && + (page_table[last_page].need_to_zero == 1)) { last_page++; } @@ -4390,12 +4469,13 @@ gc_free_heap(void) { page_index_t page; - if (gencgc_verbose > 1) + if (gencgc_verbose > 1) { SHOW("entering gc_free_heap"); + } for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE_FLAG) { + if (page_allocated_p(page)) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid @@ -4429,7 +4509,7 @@ gc_free_heap(void) /* Double-check that the page is zero filled. */ long *page_start; page_index_t i; - gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(page)); gc_assert(page_table[page].bytes_used == 0); page_start = (long *)page_address(page); for (i=0; i<1024; i++) { @@ -4482,7 +4562,7 @@ gc_init(void) /* Compute the number of pages needed for the dynamic space. * Dynamic space size should be aligned on page size. */ page_table_pages = dynamic_space_size/PAGE_BYTES; - gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES); + gc_assert(dynamic_space_size == npage_bytes(page_table_pages)); page_table = calloc(page_table_pages, sizeof(struct page)); gc_assert(page_table); @@ -4547,10 +4627,9 @@ static void gencgc_pickup_dynamic(void) { page_index_t page = 0; - long alloc_ptr = get_alloc_pointer(); + void *alloc_ptr = (void *)get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; - do { lispobj *first,*ptr= (lispobj *)page_address(page); page_table[page].allocated = BOXED_PAGE_FLAG; @@ -4565,11 +4644,11 @@ gencgc_pickup_dynamic(void) if (!gencgc_partial_pickup) { first=gc_search_space(prev,(ptr+2)-prev,ptr); if(ptr == first) prev=ptr; - page_table[page].first_object_offset = - (void *)prev - page_address(page); + page_table[page].region_start_offset = + page_address(page) - (void *)prev; } page++; - } while ((long)page_address(page) < alloc_ptr); + } while (page_address(page) < alloc_ptr); #ifdef LUTEX_WIDETAG /* Lutexes have been registered in generation 0 by coreparse, and @@ -4580,8 +4659,8 @@ gencgc_pickup_dynamic(void) last_free_page = page; - generations[gen].bytes_allocated = PAGE_BYTES*page; - bytes_allocated = PAGE_BYTES*page; + generations[gen].bytes_allocated = npage_bytes(page); + bytes_allocated = npage_bytes(page); gc_alloc_update_all_page_tables(); write_protect_generation_pages(gen); @@ -4592,8 +4671,6 @@ gc_initialize_pointers(void) { gencgc_pickup_dynamic(); } - - /* alloc(..) is the external interface for memory allocation. It @@ -4608,16 +4685,10 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -lispobj * -alloc(long nbytes) +static inline lispobj * +general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region, + struct thread *thread) { - struct thread *thread=arch_os_get_current_thread(); - struct alloc_region *region= -#ifdef LISP_FEATURE_SB_THREAD - thread ? &(thread->alloc_region) : &boxed_region; -#else - &boxed_region; -#endif #ifndef LISP_FEATURE_WIN32 lispobj alloc_signal; #endif @@ -4630,25 +4701,8 @@ alloc(long nbytes) gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); -#if 0 - if(all_threads) - /* there are a few places in the C code that allocate data in the - * heap before Lisp starts. This is before interrupts are enabled, - * so we don't need to check for pseudo-atomic */ -#ifdef LISP_FEATURE_SB_THREAD - if(!get_psuedo_atomic_atomic(th)) { - register u32 fs; - fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", - th,th->os_thread); - __asm__("movl %fs,%0" : "=r" (fs) : ); - fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", - debug_get_fs(),th->tls_cookie); - lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); - } -#else - gc_assert(get_pseudo_atomic_atomic(th)); -#endif -#endif + /* Must be inside a PA section. */ + gc_assert(get_pseudo_atomic_atomic(thread)); /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; @@ -4658,11 +4712,10 @@ alloc(long nbytes) return(new_obj); /* yup */ } - /* we have to go the long way around, it seems. Check whether - * we should GC in the near future + /* we have to go the long way around, it seems. Check whether we + * should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(get_pseudo_atomic_atomic(thread)); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4670,22 +4723,33 @@ alloc(long nbytes) /* set things up so that GC happens when we finish the PA * section */ SetSymbolValue(GC_PENDING,T,thread); - if (SymbolValue(GC_INHIBIT,thread) == NIL) - set_pseudo_atomic_interrupted(thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) { + set_pseudo_atomic_interrupted(thread); +#ifdef LISP_FEATURE_PPC + /* PPC calls alloc() from a trap, look up the most + * recent one and frob that. */ + { + int context_index = + fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX, + thread)); + os_context_t *context = + thread->interrupt_contexts[context_index - 1]; + maybe_save_gc_mask_and_block_deferrables(context); + } +#else + maybe_save_gc_mask_and_block_deferrables(NULL); +#endif + } } } - new_obj = gc_alloc_with_region(nbytes,0,region,0); + new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); #ifndef LISP_FEATURE_WIN32 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { if ((signed long) alloc_signal <= 0) { SetSymbolValue(ALLOC_SIGNAL, T, thread); -#ifdef LISP_FEATURE_SB_THREAD - kill_thread_safely(thread->os_thread, SIGPROF); -#else - raise(SIGPROF); -#endif + thread_kill(thread->os_thread, SIGPROF); } else { SetSymbolValue(ALLOC_SIGNAL, alloc_signal - (1 << N_FIXNUM_TAG_BITS), @@ -4696,12 +4760,43 @@ alloc(long nbytes) return (new_obj); } + +lispobj * +general_alloc(long nbytes, int page_type_flag) +{ + struct thread *thread = arch_os_get_current_thread(); + /* Select correct region, and call general_alloc_internal with it. + * For other then boxed allocation we must lock first, since the + * region is shared. */ + if (BOXED_PAGE_FLAG & page_type_flag) { +#ifdef LISP_FEATURE_SB_THREAD + struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region); +#else + struct alloc_region *region = &boxed_region; +#endif + return general_alloc_internal(nbytes, page_type_flag, region, thread); + } else if (UNBOXED_PAGE_FLAG == page_type_flag) { + lispobj * obj; + gc_assert(0 == thread_mutex_lock(&allocation_lock)); + obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread); + gc_assert(0 == thread_mutex_unlock(&allocation_lock)); + return obj; + } else { + lose("bad page type flag: %d", page_type_flag); + } +} + +lispobj * +alloc(long nbytes) +{ + gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread())); + return general_alloc(nbytes, BOXED_PAGE_FLAG); +} /* * shared support for the OS-dependent signal handlers which * catch GENCGC-related write-protect violations */ - void unhandled_sigmemoryfault(void* addr); /* Depending on which OS we're running under, different signals might @@ -4769,9 +4864,9 @@ void gc_alloc_update_all_page_tables(void) /* Flush the alloc regions updating the tables. */ struct thread *th; for_each_thread(th) - gc_alloc_update_page_tables(0, &th->alloc_region); - gc_alloc_update_page_tables(1, &unboxed_region); - gc_alloc_update_page_tables(0, &boxed_region); + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region); + gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region); + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region); } void @@ -4790,7 +4885,7 @@ zero_all_free_pages() page_index_t i; for (i = 0; i < last_free_page; i++) { - if (page_table[i].allocated == FREE_PAGE_FLAG) { + if (page_free_p(i)) { #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(i), PAGE_BYTES, @@ -4830,7 +4925,8 @@ prepare_for_final_gc () * function being set to the value of the static symbol * SB!VM:RESTART-LISP-FUNCTION */ void -gc_and_save(char *filename, int prepend_runtime) +gc_and_save(char *filename, boolean prepend_runtime, + boolean save_runtime_options) { FILE *file; void *runtime_bytes = NULL; @@ -4865,7 +4961,7 @@ gc_and_save(char *filename, int prepend_runtime) /* The dumper doesn't know that pages need to be zeroed before use. */ zero_all_free_pages(); save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0), - prepend_runtime); + prepend_runtime, save_runtime_options); /* Oops. Save still managed to fail. Since we've mangled the stack * beyond hope, there's not much we can do. * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's