X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=c9d529cb2ac9c611b2e45d5c7d4e85a3b37d5304;hb=b3f99a6af5bfdaa3090ec244bd3348e279ebbbaf;hp=7091554fcfc980afd7092235b1d8abc38c30aa3c;hpb=afb56ab2865fdb72102a9bb6b2c846b7b5a6ad7e;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 7091554..c9d529c 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -38,10 +38,10 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" -#include "fixnump.h" #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" #include "genesis/fdefn.h" @@ -79,7 +79,7 @@ enum { boolean enable_page_protection = 1; /* the minimum size (in bytes) for a large object*/ -unsigned long large_object_size = 4 * PAGE_BYTES; +long large_object_size = 4 * PAGE_BYTES; /* @@ -162,7 +162,7 @@ static boolean conservative_stack = 1; /* An array of page structures is allocated on gc initialization. * This helps quickly map between an address its page structure. * page_table_pages is set from the size of the dynamic space. */ -unsigned page_table_pages; +page_index_t page_table_pages; struct page *page_table; /* To map addresses to page structures the address of the first page @@ -176,22 +176,44 @@ page_address(page_index_t page_num) return (heap_base + (page_num * PAGE_BYTES)); } +/* Calculate the address where the allocation region associated with + * the page starts. */ +static inline void * +page_region_start(page_index_t page_index) +{ + return page_address(page_index)-page_table[page_index].region_start_offset; +} + /* Find the page index within the page_table for the given * address. Return -1 on failure. */ inline page_index_t find_page_index(void *addr) { - page_index_t index = addr-heap_base; - - if (index >= 0) { - index = ((unsigned long)index)/PAGE_BYTES; + if (addr >= heap_base) { + page_index_t index = ((pointer_sized_uint_t)addr - + (pointer_sized_uint_t)heap_base) / PAGE_BYTES; if (index < page_table_pages) return (index); } - return (-1); } +static size_t +npage_bytes(long npages) +{ + gc_assert(npages>=0); + return ((unsigned long)npages)*PAGE_BYTES; +} + +/* Check that X is a higher address than Y and return offset from Y to + * X in bytes. */ +static inline +size_t void_diff(void *x, void *y) +{ + gc_assert(x >= y); + return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y; +} + /* a structure to hold the state of a generation */ struct generation { @@ -211,13 +233,13 @@ struct generation { page_index_t alloc_large_unboxed_start_page; /* the bytes allocated to this generation */ - long bytes_allocated; + unsigned long bytes_allocated; /* the number of bytes at which to trigger a GC */ - long gc_trigger; + unsigned long gc_trigger; /* to calculate a new level for gc_trigger */ - long bytes_consed_between_gc; + unsigned long bytes_consed_between_gc; /* the number of GCs since the last raise */ int num_gc; @@ -231,7 +253,7 @@ struct generation { * objects are added from a GC of a younger generation. Dividing by * the bytes_allocated will give the average age of the memory in * this generation since its last GC. */ - long cum_sum_bytes_allocated; + unsigned long cum_sum_bytes_allocated; /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been @@ -294,7 +316,7 @@ static long count_write_protect_generation_pages(generation_index_t generation) { page_index_t i; - long count = 0; + unsigned long count = 0; for (i = 0; i < last_free_page; i++) if ((page_table[i].allocated != FREE_PAGE_FLAG) @@ -336,11 +358,11 @@ count_dont_move_pages(void) /* Work through the pages and add up the number of bytes used for the * given generation. */ -static long +static unsigned long count_generation_bytes_allocated (generation_index_t gen) { page_index_t i; - long result = 0; + unsigned long result = 0; for (i = 0; i < last_free_page; i++) { if ((page_table[i].allocated != FREE_PAGE_FLAG) && (page_table[i].gen == gen)) @@ -435,7 +457,8 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ large_unboxed_cnt, pinned_cnt, generations[i].bytes_allocated, - (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated), + (npage_bytes(count_generation_pages(i)) + - generations[i].bytes_allocated), generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, @@ -457,8 +480,8 @@ void fast_bzero(void*, size_t); /* in -assem.S */ */ void zero_pages_with_mmap(page_index_t start, page_index_t end) { int i; - void *addr = (void *) page_address(start), *new_addr; - size_t length = PAGE_BYTES*(1+end-start); + void *addr = page_address(start), *new_addr; + size_t length = npage_bytes(1+end-start); if (start > end) return; @@ -466,7 +489,8 @@ void zero_pages_with_mmap(page_index_t start, page_index_t end) { os_invalidate(addr, length); new_addr = os_validate(addr, length); if (new_addr == NULL || new_addr != addr) { - lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr); + lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", + start, new_addr); } for (i = start; i <= end; i++) { @@ -483,9 +507,9 @@ zero_pages(page_index_t start, page_index_t end) { return; #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) - fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); + fast_bzero(page_address(start), npage_bytes(1+end-start)); #else - bzero(page_address(start), PAGE_BYTES*(1+end-start)); + bzero(page_address(start), npage_bytes(1+end-start)); #endif } @@ -593,7 +617,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; - long bytes_found; + unsigned long bytes_found; page_index_t i; int ret; @@ -618,7 +642,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) } last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); + + npage_bytes(last_page-first_page); /* Set up the alloc_region. */ alloc_region->first_page = first_page; @@ -638,7 +662,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; - page_table[first_page].first_object_offset = 0; + page_table[first_page].region_start_offset = 0; } if (unboxed) @@ -659,37 +683,23 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was * broken before!) */ - page_table[i].first_object_offset = - alloc_region->start_addr - page_address(i); + page_table[i].region_start_offset = + void_diff(page_address(i),alloc_region->start_addr); page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ; } /* Bump up last_free_page. */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - /* do we only want to call this on special occasions? like for boxed_region? */ - set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); + /* do we only want to call this on special occasions? like for + * boxed_region? */ + set_alloc_pointer((lispobj)page_address(last_free_page)); } ret = thread_mutex_unlock(&free_pages_lock); gc_assert(ret == 0); - /* we can do this after releasing free_pages_lock */ - if (gencgc_zero_check) { - long *p; - for (p = (long *)alloc_region->start_addr; - p < (long *)alloc_region->end_addr; p++) { - if (*p != 0) { - /* KLUDGE: It would be nice to use %lx and explicit casts - * (long) in code like this, so that it is less likely to - * break randomly when running on a machine with different - * word sizes. -- WHN 19991129 */ - lose("The new region at %x is not zero.\n", p); - } - } - } - #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -702,6 +712,22 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) } zero_dirty_pages(first_page, last_page); + + /* we can do this after releasing free_pages_lock */ + if (gencgc_zero_check) { + long *p; + for (p = (long *)alloc_region->start_addr; + p < (long *)alloc_region->end_addr; p++) { + if (*p != 0) { + /* KLUDGE: It would be nice to use %lx and explicit casts + * (long) in code like this, so that it is less likely to + * break randomly when running on a machine with different + * word sizes. -- WHN 19991129 */ + lose("The new region at %x is not zero (start=%p, end=%p).\n", + p, alloc_region->start_addr, alloc_region->end_addr); + } + } + } } /* If the record_new_objects flag is 2 then all new regions created @@ -724,8 +750,8 @@ static int record_new_objects = 0; static page_index_t new_areas_ignore_page; struct new_area { page_index_t page; - long offset; - long size; + size_t offset; + size_t size; }; static struct new_area (*new_areas)[]; static long new_areas_index; @@ -733,7 +759,7 @@ long max_new_areas; /* Add a new area to new_areas. */ static void -add_new_area(page_index_t first_page, long offset, long size) +add_new_area(page_index_t first_page, size_t offset, size_t size) { unsigned long new_area_start,c; long i; @@ -755,13 +781,13 @@ add_new_area(page_index_t first_page, long offset, long size) gc_abort(); } - new_area_start = PAGE_BYTES*first_page + offset; + new_area_start = npage_bytes(first_page) + offset; /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { unsigned long area_end = - PAGE_BYTES*((*new_areas)[i].page) + npage_bytes((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; /*FSHOW((stderr, @@ -808,10 +834,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) int more; page_index_t first_page; page_index_t next_page; - int bytes_used; - long orig_first_page_bytes_used; - long region_size; - long byte_cnt; + unsigned long bytes_used; + unsigned long orig_first_page_bytes_used; + unsigned long region_size; + unsigned long byte_cnt; int ret; @@ -829,16 +855,18 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; - gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used)); + gc_assert(alloc_region->start_addr == + (page_address(first_page) + + page_table[first_page].bytes_used)); /* All the pages used need to be updated */ /* Update the first page. */ /* If the page was free then set up the gen, and - * first_object_offset. */ + * region_start_offset. */ if (page_table[first_page].bytes_used == 0) - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) @@ -853,7 +881,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Calculate the number of bytes used in this page. This is not * always the number of new bytes, unless it was free. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) { + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(first_page))) + >PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -861,9 +891,9 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) byte_cnt += bytes_used; - /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and set - * the bytes_used. */ + /* All the rest of the pages should be free. We need to set + * their region_start_offset pointer to the start of the + * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); if (unboxed) @@ -874,13 +904,14 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); - gc_assert(page_table[next_page].first_object_offset == - alloc_region->start_addr - page_address(next_page)); + gc_assert(page_table[next_page].region_start_offset == + void_diff(page_address(next_page), + alloc_region->start_addr)); /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used = (alloc_region->free_pointer - - page_address(next_page)))>PAGE_BYTES) { + if ((bytes_used = void_diff(alloc_region->free_pointer, + page_address(next_page)))>PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -890,7 +921,8 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page++; } - region_size = alloc_region->free_pointer - alloc_region->start_addr; + region_size = void_diff(alloc_region->free_pointer, + alloc_region->start_addr); bytes_allocated += region_size; generations[gc_alloc_generation].bytes_allocated += region_size; @@ -976,14 +1008,14 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) orig_first_page_bytes_used = page_table[first_page].bytes_used; /* If the first page was free then set up the gen, and - * first_object_offset. */ + * region_start_offset. */ if (page_table[first_page].bytes_used == 0) { if (unboxed) page_table[first_page].allocated = UNBOXED_PAGE_FLAG; else page_table[first_page].allocated = BOXED_PAGE_FLAG; page_table[first_page].gen = gc_alloc_generation; - page_table[first_page].first_object_offset = 0; + page_table[first_page].region_start_offset = 0; page_table[first_page].large_object = 1; } @@ -1009,8 +1041,8 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; /* All the rest of the pages should be free. We need to set their - * first_object_offset pointer to the start of the region, and - * set the bytes_used. */ + * region_start_offset pointer to the start of the region, and set + * the bytes_used. */ while (more) { gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); @@ -1021,12 +1053,13 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) page_table[next_page].gen = gc_alloc_generation; page_table[next_page].large_object = 1; - page_table[next_page].first_object_offset = - orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page); + page_table[next_page].region_start_offset = + npage_bytes(next_page-first_page) - orig_first_page_bytes_used; /* Calculate the number of bytes used in this page. */ more = 0; - if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) { + bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt; + if (bytes_used > PAGE_BYTES) { bytes_used = PAGE_BYTES; more = 1; } @@ -1049,14 +1082,14 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* Bump up last_free_page */ if (last_page+1 > last_free_page) { last_free_page = last_page+1; - set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); + set_alloc_pointer((lispobj)(page_address(last_free_page))); } ret = thread_mutex_unlock(&free_pages_lock); gc_assert(ret == 0); #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(1+last_page-first_page), + npage_bytes(1+last_page-first_page), OS_VM_PROT_ALL); #endif @@ -1076,7 +1109,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested) * handled, or indeed even printed. */ fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n", - gc_active_p ? "garbage collection" : "allocation", available, requested); + gc_active_p ? "garbage collection" : "allocation", + available, requested); if (gc_active_p || (available == 0)) { /* If we are in GC, or totally out of memory there is no way * to sanely transfer control to the lisp-side of things. @@ -1095,8 +1129,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested) } else { /* FIXME: assert free_pages_lock held */ - thread_mutex_unlock(&free_pages_lock); - funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR), + (void)thread_mutex_unlock(&free_pages_lock); + funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), alloc_number(available), alloc_number(requested)); lose("HEAP-EXHAUSTED-ERROR fell through"); } @@ -1105,76 +1139,83 @@ gc_heap_exhausted_error_or_lose (long available, long requested) page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) { - page_index_t first_page; - page_index_t last_page; - long region_size; - page_index_t restart_page=*restart_page_ptr; - long bytes_found; - long num_pages; - int large_p=(nbytes>=large_object_size); + page_index_t first_page, last_page; + page_index_t restart_page = *restart_page_ptr; + long bytes_found = 0; + long most_bytes_found = 0; /* FIXME: assert(free_pages_lock is held); */ - /* Search for a contiguous free space of at least nbytes. If it's - * a large object then align it on a page boundary by searching - * for a free page. */ - + /* Toggled by gc_and_save for heap compaction, normally -1. */ if (gencgc_alloc_start_page != -1) { restart_page = gencgc_alloc_start_page; } - do { - first_page = restart_page; - if (large_p) - while ((first_page < page_table_pages) - && (page_table[first_page].allocated != FREE_PAGE_FLAG)) - first_page++; - else - while (first_page < page_table_pages) { - if(page_table[first_page].allocated == FREE_PAGE_FLAG) - break; - if((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && - (page_table[first_page].large_object == 0) && - (page_table[first_page].gen == gc_alloc_generation) && - (page_table[first_page].bytes_used < (PAGE_BYTES-32)) && - (page_table[first_page].write_protected == 0) && - (page_table[first_page].dont_move == 0)) { - break; - } + if (nbytes>=PAGE_BYTES) { + /* Search for a contiguous free space of at least nbytes, + * aligned on a page boundary. The page-alignment is strictly + * speaking needed only for objects at least large_object_size + * bytes in size. */ + do { + first_page = restart_page; + while ((first_page < page_table_pages) && + (page_table[first_page].allocated != FREE_PAGE_FLAG)) first_page++; - } - - if (first_page >= page_table_pages) - gc_heap_exhausted_error_or_lose(0, nbytes); - gc_assert(page_table[first_page].write_protected == 0); + last_page = first_page; + bytes_found = PAGE_BYTES; + while ((bytes_found < nbytes) && + (last_page < (page_table_pages-1)) && + (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + last_page++; + bytes_found += PAGE_BYTES; + gc_assert(page_table[last_page].write_protected == 0); + } + if (bytes_found > most_bytes_found) + most_bytes_found = bytes_found; + restart_page = last_page + 1; + } while ((restart_page < page_table_pages) && (bytes_found < nbytes)); - last_page = first_page; - bytes_found = PAGE_BYTES - page_table[first_page].bytes_used; - num_pages = 1; - while (((bytes_found < nbytes) - || (!large_p && (num_pages < 2))) - && (last_page < (page_table_pages-1)) - && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { - last_page++; - num_pages++; - bytes_found += PAGE_BYTES; - gc_assert(page_table[last_page].write_protected == 0); + } else { + /* Search for a page with at least nbytes of space. We prefer + * not to split small objects on multiple pages, to reduce the + * number of contiguous allocation regions spaning multiple + * pages: this helps avoid excessive conservativism. */ + first_page = restart_page; + while (first_page < page_table_pages) { + if (page_table[first_page].allocated == FREE_PAGE_FLAG) + { + bytes_found = PAGE_BYTES; + break; + } + else if ((page_table[first_page].allocated == + (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + (page_table[first_page].large_object == 0) && + (page_table[first_page].gen == gc_alloc_generation) && + (page_table[first_page].write_protected == 0) && + (page_table[first_page].dont_move == 0)) + { + bytes_found = PAGE_BYTES + - page_table[first_page].bytes_used; + if (bytes_found > most_bytes_found) + most_bytes_found = bytes_found; + if (bytes_found >= nbytes) + break; + } + first_page++; } - - region_size = (PAGE_BYTES - page_table[first_page].bytes_used) - + PAGE_BYTES*(last_page-first_page); - - gc_assert(bytes_found == region_size); - restart_page = last_page + 1; - } while ((restart_page < page_table_pages) && (bytes_found < nbytes)); + last_page = first_page; + restart_page = first_page + 1; + } /* Check for a failure */ - if ((restart_page >= page_table_pages) && (bytes_found < nbytes)) - gc_heap_exhausted_error_or_lose(bytes_found, nbytes); + if (bytes_found < nbytes) { + gc_assert(restart_page >= page_table_pages); + gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes); + } - *restart_page_ptr=first_page; + gc_assert(page_table[first_page].write_protected == 0); + *restart_page_ptr = first_page; return last_page; } @@ -1187,7 +1228,7 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, { void *new_free_pointer; - if(nbytes>=large_object_size) + if (nbytes>=large_object_size) return gc_alloc_large(nbytes,unboxed_p,my_region); /* Check whether there is room in the current alloc region. */ @@ -1204,7 +1245,7 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Unless a `quick' alloc was requested, check whether the alloc region is almost empty. */ if (!quick_p && - (my_region->end_addr - my_region->free_pointer) <= 32) { + void_diff(my_region->end_addr,my_region->free_pointer) <= 32) { /* If so, finished with the current region. */ gc_alloc_update_page_tables(unboxed_p, my_region); /* Set up a new region. */ @@ -1264,13 +1305,6 @@ gc_quick_alloc_large_unboxed(long nbytes) return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); } -/* - * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b - */ - -extern long (*scavtab[256])(lispobj *where, lispobj object); -extern lispobj (*transother[256])(lispobj object); -extern long (*sizetab[256])(lispobj *where); /* Copy a large boxed object. If the object is in a large object * region then it is simply promoted, else it is copied. If it's large @@ -1298,10 +1332,10 @@ copy_large_object(lispobj object, long nwords) /* Promote the object. */ - long remaining_bytes; + unsigned long remaining_bytes; page_index_t next_page; - long bytes_freed; - long old_bytes_used; + unsigned long bytes_freed; + unsigned long old_bytes_used; /* Note: Any page write-protection must be removed, else a * later scavenge_newspace may incorrectly not scavenge these @@ -1309,7 +1343,7 @@ copy_large_object(lispobj object, long nwords) * new areas, but let's do it for them all (they'll probably * be written anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; @@ -1317,8 +1351,8 @@ copy_large_object(lispobj object, long nwords) gc_assert(page_table[next_page].gen == from_space); gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; @@ -1354,8 +1388,8 @@ copy_large_object(lispobj object, long nwords) (page_table[next_page].gen == from_space) && (page_table[next_page].allocated == BOXED_PAGE_FLAG) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* Checks out OK, free the page. Don't need to bother zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected as they @@ -1369,8 +1403,8 @@ copy_large_object(lispobj object, long nwords) next_page++; } - generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + - bytes_freed; + generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords + + bytes_freed; generations[new_space].bytes_allocated += N_WORD_BYTES*nwords; bytes_allocated -= bytes_freed; @@ -1438,7 +1472,8 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert((nwords & 0x01) == 0); if ((nwords > 1024*1024) && gencgc_verbose) - FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); + FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", + nwords*N_WORD_BYTES)); /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1448,12 +1483,12 @@ copy_large_unboxed_object(lispobj object, long nwords) /* Promote the object. Note: Unboxed objects may have been * allocated to a BOXED region so it may be necessary to * change the region to UNBOXED. */ - long remaining_bytes; + unsigned long remaining_bytes; page_index_t next_page; - long bytes_freed; - long old_bytes_used; + unsigned long bytes_freed; + unsigned long old_bytes_used; - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; @@ -1462,8 +1497,8 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset== - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].gen = new_space; @@ -1494,8 +1529,8 @@ copy_large_unboxed_object(lispobj object, long nwords) ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* Checks out OK, free the page. Don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write-protected, even if @@ -1514,7 +1549,8 @@ copy_large_unboxed_object(lispobj object, long nwords) "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); - generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; + generations[from_space].bytes_allocated -= + nwords*N_WORD_BYTES + bytes_freed; generations[new_space].bytes_allocated += nwords*N_WORD_BYTES; bytes_allocated -= bytes_freed; @@ -1569,6 +1605,8 @@ sniff_code_object(struct code *code, unsigned long displacement) if (!check_code_fixups) return; + FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement)); + ncode_words = fixnum_value(code->code_size); nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; @@ -1597,7 +1635,8 @@ sniff_code_object(struct code *code, unsigned long displacement) && (data < (code_end_addr-displacement))) { /* function header */ if ((d4 == 0x5e) - && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) { + && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == + (unsigned)code)) { /* Skip the function header */ p += 6*4 - 4 - 1; continue; @@ -1737,7 +1776,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) void *constants_start_addr, *constants_end_addr; void *code_start_addr, *code_end_addr; lispobj fixups = NIL; - unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code; + unsigned long displacement = + (unsigned long)new_code - (unsigned long)old_code; struct vector *fixups_vector; ncode_words = fixnum_value(new_code->code_size); @@ -1785,7 +1825,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) (fixups_vector->header == 0x01)) { /* If so, then follow it. */ /*SHOW("following pointer to a forwarding pointer");*/ - fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length); + fixups_vector = + (struct vector *)native_pointer((lispobj)fixups_vector->length); } /*SHOW("got fixups");*/ @@ -1804,7 +1845,8 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ if ((old_value >= (unsigned long)old_code) - && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES))) + && (old_value < ((unsigned long)old_code + + nwords*N_WORD_BYTES))) /* So add the dispacement. */ *(unsigned long *)((unsigned long)code_start_addr + offset) = old_value + displacement; @@ -1816,7 +1858,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) old_value - displacement; } } else { - fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header)); + /* This used to just print a note to stderr, but a bogus fixup seems to + * indicate real heap corruption, so a hard hailure is in order. */ + lose("fixup vector %p has a bad widetag: %d\n", + fixups_vector, widetag_of(fixups_vector->header)); } /* Check for possible errors. */ @@ -2050,29 +2095,21 @@ size_lutex(lispobj *where) static long scav_weak_pointer(lispobj *where, lispobj object) { - struct weak_pointer *wp = weak_pointers; - /* Push the weak pointer onto the list of weak pointers. - * Do I have to watch for duplicates? Originally this was - * part of trans_weak_pointer but that didn't work in the - * case where the WP was in a promoted region. + /* Since we overwrite the 'next' field, we have to make + * sure not to do so for pointers already in the list. + * Instead of searching the list of weak_pointers each + * time, we ensure that next is always NULL when the weak + * pointer isn't in the list, and not NULL otherwise. + * Since we can't use NULL to denote end of list, we + * use a pointer back to the same weak_pointer. */ + struct weak_pointer * wp = (struct weak_pointer*)where; - /* Check whether it's already in the list. */ - while (wp != NULL) { - if (wp == (struct weak_pointer*)where) { - break; - } - wp = wp->next; - } - if (wp == NULL) { - /* Add it to the start of the list. */ - wp = (struct weak_pointer*)where; - if (wp->next != weak_pointers) { - wp->next = weak_pointers; - } else { - /*SHOW("avoided write to weak pointer");*/ - } + if (NULL == wp->next) { + wp->next = weak_pointers; weak_pointers = wp; + if (NULL == wp->next) + wp->next = wp; } /* Do not let GC scavenge the value slot of the weak pointer. @@ -2118,8 +2155,7 @@ search_dynamic_space(void *pointer) if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE_FLAG)) return NULL; - start = (lispobj *)((void *)page_address(page_index) - + page_table[page_index].first_object_offset); + start = (lispobj *)page_region_start(page_index); return (gc_search_space(start, (((lispobj *)pointer)+2)-start, (lispobj *)pointer)); @@ -2186,20 +2222,10 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) return 0; } /* Is it plausible cons? */ - if ((is_lisp_pointer(start_addr[0]) - || (fixnump(start_addr[0])) - || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG)) - && (is_lisp_pointer(start_addr[1]) - || (fixnump(start_addr[1])) - || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG) -#if N_WORD_BITS == 64 - || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG) -#endif - || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) + if ((is_lisp_pointer(start_addr[0]) || + is_lisp_immediate(start_addr[0])) && + (is_lisp_pointer(start_addr[1]) || + is_lisp_immediate(start_addr[1]))) break; else { if (gencgc_verbose) @@ -2445,9 +2471,9 @@ maybe_adjust_large_object(lispobj *where) page_index_t next_page; long nwords; - long remaining_bytes; - long bytes_freed; - long old_bytes_used; + unsigned long remaining_bytes; + unsigned long bytes_freed; + unsigned long old_bytes_used; int boxed; @@ -2533,7 +2559,7 @@ maybe_adjust_large_object(lispobj *where) * but lets do it for them all (they'll probably be written * anyway?). */ - gc_assert(page_table[first_page].first_object_offset == 0); + gc_assert(page_table[first_page].region_start_offset == 0); next_page = first_page; remaining_bytes = nwords*N_WORD_BYTES; @@ -2542,8 +2568,8 @@ maybe_adjust_large_object(lispobj *where) gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); gc_assert(page_table[next_page].large_object); - gc_assert(page_table[next_page].first_object_offset == - -PAGE_BYTES*(next_page-first_page)); + gc_assert(page_table[next_page].region_start_offset == + npage_bytes(next_page-first_page)); gc_assert(page_table[next_page].bytes_used == PAGE_BYTES); page_table[next_page].allocated = boxed; @@ -2578,8 +2604,8 @@ maybe_adjust_large_object(lispobj *where) ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && page_table[next_page].large_object && - (page_table[next_page].first_object_offset == - -(next_page - first_page)*PAGE_BYTES)) { + (page_table[next_page].region_start_offset == + npage_bytes(next_page - first_page))) { /* It checks out OK, free the page. We don't need to both zeroing * pages as this should have been done before shrinking the * object. These pages shouldn't be write protected as they @@ -2642,7 +2668,8 @@ preserve_pointer(void *addr) /* quick check 2: Check the offset within the page. * */ - if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used) + if (((unsigned long)addr & (PAGE_BYTES - 1)) > + page_table[addr_page_index].bytes_used) return; /* Filter out anything which can't be a pointer to a Lisp object @@ -2662,12 +2689,10 @@ preserve_pointer(void *addr) #if 0 /* I think this'd work just as well, but without the assertions. * -dan 2004.01.01 */ - first_page= - find_page_index(page_address(addr_page_index)+ - page_table[addr_page_index].first_object_offset); + first_page = find_page_index(page_region_start(addr_page_index)) #else first_page = addr_page_index; - while (page_table[first_page].first_object_offset != 0) { + while (page_table[first_page].region_start_offset != 0) { --first_page; /* Do some checks. */ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES); @@ -2726,7 +2751,7 @@ preserve_pointer(void *addr) || (page_table[i+1].allocated == FREE_PAGE_FLAG) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ - || (page_table[i+1].first_object_offset == 0)) + || (page_table[i+1].region_start_offset == 0)) break; } @@ -2863,7 +2888,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) int write_protected=1; /* This should be the start of a region */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_table[i].region_start_offset == 0); /* Now work forward until the end of the region */ for (last_page = i; ; last_page++) { @@ -2874,13 +2899,14 @@ scavenge_generations(generation_index_t from, generation_index_t to) || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; } if (!write_protected) { scavenge(page_address(i), - (page_table[last_page].bytes_used + - (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + ((unsigned long)(page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + /N_WORD_BYTES); /* Now scan the pages and write protect those that * don't have pointers to younger generations. */ @@ -2909,9 +2935,9 @@ scavenge_generations(generation_index_t from, generation_index_t to) && (page_table[i].write_protected_cleared != 0)) { FSHOW((stderr, "/scavenge_generation() %d\n", generation)); FSHOW((stderr, - "/page bytes_used=%d first_object_offset=%d dont_move=%d\n", + "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n", page_table[i].bytes_used, - page_table[i].first_object_offset, + page_table[i].region_start_offset, page_table[i].dont_move)); lose("write to protected page %d in scavenge_generation()\n", i); } @@ -2967,7 +2993,8 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) page_index_t last_page; int all_wp=1; - /* The scavenge will start at the first_object_offset of page i. + /* The scavenge will start at the region_start_offset of + * page i. * * We need to find the full extent of this contiguous * block in case objects span pages. @@ -2988,22 +3015,20 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; } /* Do a limited check for write-protected pages. */ if (!all_wp) { - long size; - - size = (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES - - page_table[i].first_object_offset)/N_WORD_BYTES; + long nwords = (((unsigned long) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i) + + page_table[i].region_start_offset)) + / N_WORD_BYTES); new_areas_ignore_page = last_page; - scavenge(page_address(i) + - page_table[i].first_object_offset, - size); + scavenge(page_region_start(i), nwords); } i = last_page; @@ -3108,9 +3133,9 @@ scavenge_newspace_generation(generation_index_t generation) /* Work through previous_new_areas. */ for (i = 0; i < previous_new_areas_index; i++) { - long page = (*previous_new_areas)[i].page; - long offset = (*previous_new_areas)[i].offset; - long size = (*previous_new_areas)[i].size / N_WORD_BYTES; + page_index_t page = (*previous_new_areas)[i].page; + size_t offset = (*previous_new_areas)[i].offset; + size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES; gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0); scavenge(page_address(page)+offset, size); } @@ -3179,10 +3204,10 @@ unprotect_oldspace(void) * assumes that all objects have been copied or promoted to an older * generation. Bytes_allocated and the generation bytes_allocated * counter are updated. The number of bytes freed is returned. */ -static long +static unsigned long free_oldspace(void) { - long bytes_freed = 0; + unsigned long bytes_freed = 0; page_index_t first_page, last_page; first_page = 0; @@ -3228,7 +3253,7 @@ free_oldspace(void) #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(first_page), - PAGE_BYTES*(last_page-first_page), + npage_bytes(last_page-first_page), OS_VM_PROT_NONE); #endif first_page = last_page; @@ -3247,13 +3272,13 @@ print_ptr(lispobj *addr) page_index_t pi1 = find_page_index((void*)addr); if (pi1 != -1) - fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n", + fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n", (unsigned long) addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, page_table[pi1].bytes_used, - page_table[pi1].first_object_offset, + page_table[pi1].region_start_offset, page_table[pi1].dont_move); fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n", *(addr-4), @@ -3366,8 +3391,10 @@ verify_space(lispobj *start, size_t words) count = 1; break; } - nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots; - verify_space(start + 1, ntotal - fixnum_value(nuntagged)); + nuntagged = ((struct layout *) + native_pointer(layout))->n_untagged_slots; + verify_space(start + 1, + ntotal - fixnum_value(nuntagged)); count = ntotal + 1; break; } @@ -3415,7 +3442,8 @@ verify_space(lispobj *start, size_t words) while (fheaderl != NIL) { fheaderp = (struct simple_fun *) native_pointer(fheaderl); - gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG); + gc_assert(widetag_of(fheaderp->header) == + SIMPLE_FUN_HEADER_WIDETAG); verify_space(&fheaderp->name, 1); verify_space(&fheaderp->arglist, 1); verify_space(&fheaderp->type, 1); @@ -3506,15 +3534,15 @@ verify_space(lispobj *start, size_t words) #ifdef LUTEX_WIDETAG case LUTEX_WIDETAG: #endif +#ifdef NO_TLS_VALUE_MARKER_WIDETAG + case NO_TLS_VALUE_MARKER_WIDETAG: +#endif count = (sizetab[widetag_of(*start)])(start); break; default: - FSHOW((stderr, - "/Unhandled widetag 0x%x at 0x%x\n", - widetag_of(*start), start)); - fflush(stderr); - gc_abort(); + lose("Unhandled widetag 0x%x at 0x%x\n", + widetag_of(*start), start); } } } @@ -3562,7 +3590,7 @@ verify_generation(generation_index_t generation) int region_allocation = page_table[i].allocated; /* This should be the start of a contiguous block */ - gc_assert(page_table[i].first_object_offset == 0); + gc_assert(page_table[i].region_start_offset == 0); /* Need to find the full extent of this contiguous block in case objects span pages. */ @@ -3577,11 +3605,14 @@ verify_generation(generation_index_t generation) || (page_table[last_page+1].allocated != region_allocation) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) - || (page_table[last_page+1].first_object_offset == 0)) + || (page_table[last_page+1].region_start_offset == 0)) break; - verify_space(page_address(i), (page_table[last_page].bytes_used - + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES); + verify_space(page_address(i), + ((unsigned long) + (page_table[last_page].bytes_used + + npage_bytes(last_page-i))) + / N_WORD_BYTES); i = last_page; } } @@ -3674,7 +3705,7 @@ write_protect_generation_pages(generation_index_t generation) page_start = (void *)page_address(start); os_protect(page_start, - PAGE_BYTES * (last - start), + npage_bytes(last - start), OS_VM_PROT_READ | OS_VM_PROT_EXECUTE); start = last; @@ -3761,9 +3792,11 @@ scavenge_interrupt_context(os_context_t * context) /* Compute the PC's offset from the start of the CODE */ /* register. */ - pc_code_offset = *os_context_pc_addr(context) - *os_context_register_addr(context, reg_CODE); + pc_code_offset = *os_context_pc_addr(context) + - *os_context_register_addr(context, reg_CODE); #ifdef ARCH_HAS_NPC_REGISTER - npc_code_offset = *os_context_npc_addr(context) - *os_context_register_addr(context, reg_CODE); + npc_code_offset = *os_context_npc_addr(context) + - *os_context_register_addr(context, reg_CODE); #endif /* ARCH_HAS_NPC_REGISTER */ #ifdef ARCH_HAS_LINK_REGISTER @@ -3789,22 +3822,25 @@ scavenge_interrupt_context(os_context_t * context) /* Fix the LIP */ /* - * But what happens if lip_register_pair is -1? *os_context_register_addr on Solaris - * (see solaris_register_address in solaris-os.c) will return - * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is - * that what we really want? My guess is that that is not what we + * But what happens if lip_register_pair is -1? + * *os_context_register_addr on Solaris (see + * solaris_register_address in solaris-os.c) will return + * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is + * that what we really want? My guess is that that is not what we * want, so if lip_register_pair is -1, we don't touch reg_LIP at - * all. But maybe it doesn't really matter if LIP is trashed? + * all. But maybe it doesn't really matter if LIP is trashed? */ if (lip_register_pair >= 0) { *os_context_register_addr(context, reg_LIP) = - *os_context_register_addr(context, lip_register_pair) + lip_offset; + *os_context_register_addr(context, lip_register_pair) + + lip_offset; } #endif /* reg_LIP */ /* Fix the PC if it was in from space */ if (from_space_p(*os_context_pc_addr(context))) - *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset; + *os_context_pc_addr(context) = + *os_context_register_addr(context, reg_CODE) + pc_code_offset; #ifdef ARCH_HAS_LINK_REGISTER /* Fix the LR ditto; important if we're being called from @@ -3817,7 +3853,8 @@ scavenge_interrupt_context(os_context_t * context) #ifdef ARCH_HAS_NPC_REGISTER if (from_space_p(*os_context_npc_addr(context))) - *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset; + *os_context_npc_addr(context) = + *os_context_register_addr(context, reg_CODE) + npc_code_offset; #endif /* ARCH_HAS_NPC_REGISTER */ } @@ -4004,7 +4041,7 @@ garbage_collect_generation(generation_index_t generation, int raise) fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - num_dont_move_pages * PAGE_BYTES); + npage_bytes(num_dont_move_pages); } #endif @@ -4177,7 +4214,7 @@ update_dynamic_space_free_pointer(void) last_free_page = last_page+1; - set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES)); + set_alloc_pointer((lispobj)(page_address(last_free_page))); return 0; /* dummy value: return something ... */ } @@ -4387,7 +4424,8 @@ gc_free_heap(void) page_table[page].allocated = FREE_PAGE_FLAG; page_table[page].bytes_used = 0; -#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */ +#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure + * about this change. */ /* Zero the page. */ page_start = (void *)page_address(page); @@ -4449,8 +4487,7 @@ gc_free_heap(void) if (verify_after_free_heap) { /* Check whether purify has left any bad pointers. */ - if (gencgc_verbose) - SHOW("checking after free_heap\n"); + FSHOW((stderr, "checking after free_heap\n")); verify_gc(); } } @@ -4463,7 +4500,7 @@ gc_init(void) /* Compute the number of pages needed for the dynamic space. * Dynamic space size should be aligned on page size. */ page_table_pages = dynamic_space_size/PAGE_BYTES; - gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES); + gc_assert(dynamic_space_size == npage_bytes(page_table_pages)); page_table = calloc(page_table_pages, sizeof(struct page)); gc_assert(page_table); @@ -4528,7 +4565,7 @@ static void gencgc_pickup_dynamic(void) { page_index_t page = 0; - long alloc_ptr = get_alloc_pointer(); + void *alloc_ptr = (void *)get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; @@ -4546,11 +4583,11 @@ gencgc_pickup_dynamic(void) if (!gencgc_partial_pickup) { first=gc_search_space(prev,(ptr+2)-prev,ptr); if(ptr == first) prev=ptr; - page_table[page].first_object_offset = - (void *)prev - page_address(page); + page_table[page].region_start_offset = + page_address(page) - (void *)prev; } page++; - } while ((long)page_address(page) < alloc_ptr); + } while (page_address(page) < alloc_ptr); #ifdef LUTEX_WIDETAG /* Lutexes have been registered in generation 0 by coreparse, and @@ -4561,8 +4598,8 @@ gencgc_pickup_dynamic(void) last_free_page = page; - generations[gen].bytes_allocated = PAGE_BYTES*page; - bytes_allocated = PAGE_BYTES*page; + generations[gen].bytes_allocated = npage_bytes(page); + bytes_allocated = npage_bytes(page); gc_alloc_update_all_page_tables(); write_protect_generation_pages(gen); @@ -4589,7 +4626,7 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -char * +lispobj * alloc(long nbytes) { struct thread *thread=arch_os_get_current_thread(); @@ -4661,6 +4698,7 @@ alloc(long nbytes) alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { if ((signed long) alloc_signal <= 0) { + SetSymbolValue(ALLOC_SIGNAL, T, thread); #ifdef LISP_FEATURE_SB_THREAD kill_thread_safely(thread->os_thread, SIGPROF); #else @@ -4729,7 +4767,8 @@ gencgc_handle_wp_violation(void* fault_addr) */ if(page_table[page_index].write_protected_cleared != 1) lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n", - page_index, boxed_region.first_page, boxed_region.last_page); + page_index, boxed_region.first_page, + boxed_region.last_page); } /* Don't worry, we can handle it. */ return 1;