X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=5bd67c80c2df5f05268bb663e445be6cb8bf46a0;hb=4ec0d70e08ea4b512d45ddbd6c82e8f6a91a914f;hp=efecbd3673c107c697d0c39a3a1cbf8ba131e4b9;hpb=60a3ae22e275749044e255948320397fe96c254e;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index efecbd3..5bd67c8 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -41,6 +41,7 @@ #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "pseudo-atomic.h" #include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" @@ -68,9 +69,7 @@ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, * scratch space by the collector, and should never get collected. */ enum { - HIGHEST_NORMAL_GENERATION = 5, - PSEUDO_STATIC_GENERATION, - SCRATCH_GENERATION, + SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1, NUM_GENERATIONS }; @@ -88,7 +87,7 @@ long large_object_size = 4 * PAGE_BYTES; /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -#ifdef QSHOW +#if QSHOW boolean gencgc_verbose = 1; #else boolean gencgc_verbose = 0; @@ -165,6 +164,48 @@ static boolean conservative_stack = 1; page_index_t page_table_pages; struct page *page_table; +static inline boolean page_allocated_p(page_index_t page) { + return (page_table[page].allocated != FREE_PAGE_FLAG); +} + +static inline boolean page_no_region_p(page_index_t page) { + return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG); +} + +static inline boolean page_allocated_no_region_p(page_index_t page) { + return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG)) + && page_no_region_p(page)); +} + +static inline boolean page_free_p(page_index_t page) { + return (page_table[page].allocated == FREE_PAGE_FLAG); +} + +static inline boolean page_boxed_p(page_index_t page) { + return (page_table[page].allocated & BOXED_PAGE_FLAG); +} + +static inline boolean code_page_p(page_index_t page) { + return (page_table[page].allocated & CODE_PAGE_FLAG); +} + +static inline boolean page_boxed_no_region_p(page_index_t page) { + return page_boxed_p(page) && page_no_region_p(page); +} + +static inline boolean page_unboxed_p(page_index_t page) { + /* Both flags set == boxed code page */ + return ((page_table[page].allocated & UNBOXED_PAGE_FLAG) + && !page_boxed_p(page)); +} + +static inline boolean protect_page_p(page_index_t page, generation_index_t generation) { + return (page_boxed_no_region_p(page) + && (page_table[page].bytes_used != 0) + && !page_table[page].dont_move + && (page_table[page].gen == generation)); +} + /* To map addresses to page structures the address of the first page * is needed. */ static void *heap_base = NULL; @@ -214,7 +255,12 @@ size_t void_diff(void *x, void *y) return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y; } -/* a structure to hold the state of a generation */ +/* a structure to hold the state of a generation + * + * CAUTION: If you modify this, make sure to touch up the alien + * definition in src/code/gc.lisp accordingly. ...or better yes, + * deal with the FIXME there... + */ struct generation { /* the first page that gc_alloc() checks on its next call */ @@ -244,9 +290,9 @@ struct generation { /* the number of GCs since the last raise */ int num_gc; - /* the average age after which a GC will raise objects to the + /* the number of GCs to run on the generations before raising objects to the * next generation */ - int trigger_age; + int number_of_gcs_before_promotion; /* the cumulative sum of the bytes allocated to this generation. It is * cleared after a GC on this generations, and update before new @@ -258,7 +304,7 @@ struct generation { /* a minimum average memory age before a GC will occur helps * prevent a GC when a large number of new live objects have been * added, in which case a GC could be a waste of time */ - double min_av_mem_age; + double minimum_age_before_gc; /* A linked list of lutex structures in this generation, used for * implementing lutex finalization. */ @@ -294,15 +340,16 @@ generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION; * integrated with the Lisp code. */ page_index_t last_free_page; +#ifdef LISP_FEATURE_SB_THREAD /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the * majority of GC is single-threaded, but alloc() may be called from * >1 thread at a time and must be thread-safe. This lock must be * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ - -#ifdef LISP_FEATURE_SB_THREAD static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +/* This lock is used to protect non-thread-local allocation. */ +static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER; #endif @@ -319,7 +366,7 @@ count_write_protect_generation_pages(generation_index_t generation) unsigned long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -334,20 +381,20 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation)) count++; return count; } -#ifdef QSHOW +#if QSHOW static long count_dont_move_pages(void) { page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].dont_move != 0)) { ++count; } @@ -364,7 +411,7 @@ count_generation_bytes_allocated (generation_index_t gen) page_index_t i; unsigned long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } @@ -372,8 +419,8 @@ count_generation_bytes_allocated (generation_index_t gen) } /* Return the average age of the memory in a generation. */ -static double -gen_av_mem_age(generation_index_t gen) +extern double +generation_average_age(generation_index_t gen) { if (generations[gen].bytes_allocated == 0) return 0.0; @@ -385,10 +432,10 @@ gen_av_mem_age(generation_index_t gen) /* The verbose argument controls how much to print: 0 for normal * level of detail; 1 for debugging. */ -static void -print_generation_stats(int verbose) /* FIXME: should take FILE argument */ +extern void +print_generation_stats() /* FIXME: should take FILE argument, or construct a string */ { - generation_index_t i, gens; + generation_index_t i; #if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) #define FPU_STATE_SIZE 27 @@ -402,17 +449,11 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ * so they need to be saved and reset for C. */ fpu_save(fpu_state); - /* highest generation to print */ - if (verbose) - gens = SCRATCH_GENERATION; - else - gens = PSEUDO_STATIC_GENERATION; - /* Print the heap stats. */ fprintf(stderr, " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n"); - for (i = 0; i < gens; i++) { + for (i = 0; i < SCRATCH_GENERATION; i++) { page_index_t j; long boxed_cnt = 0; long unboxed_cnt = 0; @@ -425,7 +466,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated & BOXED_PAGE_FLAG) { + if (page_boxed_p(j)) { if (page_table[j].large_object) large_boxed_cnt++; else @@ -434,7 +475,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { + if (page_unboxed_p(j)) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -462,9 +503,10 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ generations[i].gc_trigger, count_write_protect_generation_pages(i), generations[i].num_gc, - gen_av_mem_age(i)); + generation_average_age(i)); } - fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated); + fprintf(stderr," Total bytes allocated = %lu\n", bytes_allocated); + fprintf(stderr," Dynamic-space-size bytes = %u\n", dynamic_space_size); fpu_restore(fpu_state); } @@ -589,6 +631,55 @@ struct alloc_region unboxed_region; /* The generation currently being allocated to. */ static generation_index_t gc_alloc_generation; +static inline page_index_t +generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_large_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_large_start_page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_unboxed_start_page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + return generations[generation].alloc_start_page; + } else { + lose("bad page_type_flag: %d", page_type_flag); + } + } +} + +static inline void +set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large, + page_index_t page) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_large_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_large_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ + generations[generation].alloc_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } +} + /* Find a new region with room for at least the given number of bytes. * * It starts looking at the current generation's alloc_start_page. So @@ -633,15 +724,7 @@ gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_ && (alloc_region->free_pointer == alloc_region->end_addr)); ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (UNBOXED_PAGE_FLAG == page_type_flag) { - first_page = - generations[gc_alloc_generation].alloc_unboxed_start_page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { - first_page = - generations[gc_alloc_generation].alloc_start_page; - } else { - lose("bad page_type_flag: %d", page_type_flag); - } + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0); last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag); bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) + npage_bytes(last_page-first_page); @@ -862,7 +945,7 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio gc_assert(page_table[first_page].region_start_offset == 0); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - gc_assert(page_table[first_page].allocated == page_type_flag); + gc_assert(page_table[first_page].allocated & page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -886,7 +969,7 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - gc_assert(page_table[next_page].allocated==page_type_flag); + gc_assert(page_table[next_page].allocated & page_type_flag); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -917,16 +1000,11 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio /* Set the generations alloc restart page to the last page of * the region. */ - if (UNBOXED_PAGE_FLAG == page_type_flag) { - generations[gc_alloc_generation].alloc_unboxed_start_page = - next_page-1; - } else if (BOXED_PAGE_FLAG == page_type_flag) { - generations[gc_alloc_generation].alloc_start_page = next_page-1; - /* Add the region to the new_areas if requested. */ + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1); + + /* Add the region to the new_areas if requested. */ + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used, region_size); - } else { - lose("bad page type flag: %d", page_type_flag); - } /* FSHOW((stderr, @@ -966,22 +1044,14 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio int orig_first_page_bytes_used; long byte_cnt; int more; - long bytes_used; + unsigned long bytes_used; page_index_t next_page; int ret; ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (UNBOXED_PAGE_FLAG == page_type_flag) { - first_page = - generations[gc_alloc_generation].alloc_large_unboxed_start_page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { - first_page = - generations[gc_alloc_generation].alloc_large_start_page; - } else { - lose("bad page type flag: %d", page_type_flag); - } + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1); if (first_page <= alloc_region->last_page) { first_page = alloc_region->last_page+1; } @@ -989,11 +1059,8 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag); gc_assert(first_page > alloc_region->last_page); - if (UNBOXED_PAGE_FLAG == page_type_flag) - generations[gc_alloc_generation].alloc_large_unboxed_start_page = - last_page; - else - generations[gc_alloc_generation].alloc_large_start_page = last_page; + + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page); /* Set up the pages. */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -1029,7 +1096,7 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio * region_start_offset pointer to the start of the region, and set * the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(next_page)); gc_assert(page_table[next_page].bytes_used == 0); page_table[next_page].allocated = page_type_flag; page_table[next_page].gen = gc_alloc_generation; @@ -1058,7 +1125,7 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio generations[gc_alloc_generation].bytes_allocated += nbytes; /* Add the region to the new_areas if requested. */ - if (BOXED_PAGE_FLAG == page_type_flag) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used,nbytes); /* Bump up last_free_page */ @@ -1085,6 +1152,7 @@ static page_index_t gencgc_alloc_start_page = -1; void gc_heap_exhausted_error_or_lose (long available, long requested) { + struct thread *thread = arch_os_get_current_thread(); /* Write basic information before doing anything else: if we don't * call to lisp this is a must, and even if we do there is always * the danger that we bounce back here before the error has been @@ -1097,12 +1165,13 @@ gc_heap_exhausted_error_or_lose (long available, long requested) /* If we are in GC, or totally out of memory there is no way * to sanely transfer control to the lisp-side of things. */ - struct thread *thread = arch_os_get_current_thread(); - print_generation_stats(1); + print_generation_stats(); fprintf(stderr, "GC control variables:\n"); fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n", SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true", - SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true"); + (SymbolValue(GC_PENDING, thread) == T) ? + "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ? + "false" : "in progress")); #ifdef LISP_FEATURE_SB_THREAD fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n", SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true"); @@ -1112,6 +1181,18 @@ gc_heap_exhausted_error_or_lose (long available, long requested) else { /* FIXME: assert free_pages_lock held */ (void)thread_mutex_unlock(&free_pages_lock); + gc_assert(get_pseudo_atomic_atomic(thread)); + clear_pseudo_atomic_atomic(thread); + if (get_pseudo_atomic_interrupted(thread)) + do_pending_interrupt(); + /* Another issue is that signalling HEAP-EXHAUSTED error leads + * to running user code at arbitrary places, even in a + * WITHOUT-INTERRUPTS which may lead to a deadlock without + * running out of the heap. So at this point all bets are + * off. */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) + corruption_warning_and_maybe_lose + ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS."); funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), alloc_number(available), alloc_number(requested)); lose("HEAP-EXHAUSTED-ERROR fell through"); @@ -1119,7 +1200,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested) } page_index_t -gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag) +gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, + int page_type_flag) { page_index_t first_page, last_page; page_index_t restart_page = *restart_page_ptr; @@ -1132,7 +1214,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type restart_page = gencgc_alloc_start_page; } - if (nbytes>=PAGE_BYTES) { + gc_assert(nbytes>=0); + if (((unsigned long)nbytes)>=PAGE_BYTES) { /* Search for a contiguous free space of at least nbytes, * aligned on a page boundary. The page-alignment is strictly * speaking needed only for objects at least large_object_size @@ -1140,14 +1223,14 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type do { first_page = restart_page; while ((first_page < page_table_pages) && - (page_table[first_page].allocated != FREE_PAGE_FLAG)) + page_allocated_p(first_page)) first_page++; last_page = first_page; bytes_found = PAGE_BYTES; while ((bytes_found < nbytes) && (last_page < (page_table_pages-1)) && - (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + page_free_p(last_page+1)) { last_page++; bytes_found += PAGE_BYTES; gc_assert(0 == page_table[last_page].bytes_used); @@ -1165,7 +1248,7 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type * pages: this helps avoid excessive conservativism. */ first_page = restart_page; while (first_page < page_table_pages) { - if (page_table[first_page].allocated == FREE_PAGE_FLAG) + if (page_free_p(first_page)) { gc_assert(0 == page_table[first_page].bytes_used); bytes_found = PAGE_BYTES; @@ -1324,7 +1407,7 @@ copy_large_object(lispobj object, long nwords) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -1349,7 +1432,7 @@ copy_large_object(lispobj object, long nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1361,7 +1444,7 @@ copy_large_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE_FLAG) && + page_boxed_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -1446,9 +1529,10 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); - if ((nwords > 1024*1024) && gencgc_verbose) + if ((nwords > 1024*1024) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); + } /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1469,8 +1553,7 @@ copy_large_unboxed_object(lispobj object, long nwords) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -1501,8 +1584,7 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -1519,10 +1601,11 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; } - if ((bytes_freed > 0) && gencgc_verbose) + if ((bytes_freed > 0) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); + } generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; @@ -1598,7 +1681,7 @@ sniff_code_object(struct code *code, unsigned long displacement) unsigned d2 = *((unsigned char *)p - 2); unsigned d3 = *((unsigned char *)p - 3); unsigned d4 = *((unsigned char *)p - 4); -#ifdef QSHOW +#if QSHOW unsigned d5 = *((unsigned char *)p - 5); unsigned d6 = *((unsigned char *)p - 6); #endif @@ -2127,8 +2210,7 @@ search_dynamic_space(void *pointer) lispobj *start; /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || - (page_table[page_index].allocated == FREE_PAGE_FLAG)) + if ((page_index == -1) || page_free_p(page_index)) return NULL; start = (lispobj *)page_region_start(page_index); return (gc_search_space(start, @@ -2147,13 +2229,6 @@ search_dynamic_space(void *pointer) static int looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) { - /* We need to allow raw pointers into Code objects for return - * addresses. This will also pick up pointers to functions in code - * objects. */ - if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) - /* XXX could do some further checks here */ - return 1; - if (!is_lisp_pointer((lispobj)pointer)) { return 0; } @@ -2172,28 +2247,31 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: if ((unsigned long)pointer != ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case LIST_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible cons? */ @@ -2203,44 +2281,49 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) is_lisp_immediate(start_addr[1]))) break; else { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } case INSTANCE_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case OTHER_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible? Not a cons. XXX should check the headers. */ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } switch (widetag_of(start_addr[0])) { @@ -2250,26 +2333,29 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) #if N_WORD_BITS == 64 case SINGLE_FLOAT_WIDETAG: #endif - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* only pointed to by function pointers? */ case CLOSURE_HEADER_WIDETAG: case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; case INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* the valid other immediate pointer objects */ @@ -2372,18 +2458,20 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo6: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } @@ -2540,8 +2628,7 @@ maybe_adjust_large_object(lispobj *where) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) - || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -2576,8 +2663,7 @@ maybe_adjust_large_object(lispobj *where) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -2629,7 +2715,7 @@ preserve_pointer(void *addr) /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + || page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ @@ -2653,7 +2739,9 @@ preserve_pointer(void *addr) * expensive but important, since it vastly reduces the * probability that random garbage will be bogusly interpreted as * a pointer which prevents a page from moving. */ - if (!(possibly_valid_dynamic_space_pointer(addr))) + if (!(code_page_p(addr_page_index) + || (is_lisp_pointer((lispobj)addr) && + possibly_valid_dynamic_space_pointer(addr)))) return; /* Find the beginning of the region. Note that there may be @@ -2684,7 +2772,7 @@ preserve_pointer(void *addr) * free area in which case it's ignored here. Note it gets * through the valid pointer test above because the tail looks * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + if (page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ || (((unsigned long)addr & (PAGE_BYTES - 1)) @@ -2723,7 +2811,7 @@ preserve_pointer(void *addr) /* Check whether this is the last page in this contiguous block.. */ if ((page_table[i].bytes_used < PAGE_BYTES) /* ..or it is PAGE_BYTES and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE_FLAG) + || page_free_p(i+1) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ || (page_table[i+1].region_start_offset == 0)) @@ -2760,14 +2848,14 @@ update_page_write_prot(page_index_t page) long num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); + gc_assert(page_allocated_p(page)); gc_assert(page_table[page].bytes_used != 0); /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected /* FIXME: What's the reason for not write-protecting pinned pages? */ || page_table[page].dont_move - || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) + || page_unboxed_p(page)) return (0); /* Scan the page for pointers to younger generations or the @@ -2780,7 +2868,7 @@ update_page_write_prot(page_index_t page) /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE_FLAG) + (page_allocated_p(index) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == SCRATCH_GENERATION))) @@ -2854,7 +2942,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) for (i = 0; i < last_free_page; i++) { generation_index_t generation = page_table[i].gen; - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (generation != new_space) && (generation >= from) @@ -2871,7 +2959,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) write_protected && page_table[last_page].write_protected; if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].region_start_offset == 0)) @@ -2904,7 +2992,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { @@ -2958,7 +3046,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) generation)); for (i = 0; i < last_free_page; i++) { /* Note that this skips over open regions when it encounters them. */ - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) @@ -2987,7 +3075,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) * contiguous block */ if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].region_start_offset == 0)) @@ -3087,8 +3175,9 @@ scavenge_newspace_generation(generation_index_t generation) /* New areas of objects allocated have been lost so need to do a * full scan to be sure! If this becomes a problem try * increasing NUM_NEW_AREAS. */ - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("new_areas overflow, doing full scavenge"); + } /* Don't need to record new areas that get scavenged * anyway during scavenge_newspace_generation_one_scan. */ @@ -3135,7 +3224,7 @@ scavenge_newspace_generation(generation_index_t generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -3158,7 +3247,7 @@ unprotect_oldspace(void) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -3190,7 +3279,7 @@ free_oldspace(void) do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE_FLAG) + && (page_free_p(first_page) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3222,7 +3311,7 @@ free_oldspace(void) last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE_FLAG) + && page_allocated_p(last_page) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); @@ -3293,7 +3382,7 @@ verify_space(lispobj *start, size_t words) if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(page_index) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ @@ -3558,7 +3647,7 @@ verify_generation(generation_index_t generation) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { page_index_t last_page; @@ -3600,7 +3689,7 @@ verify_zero_fill(void) page_index_t page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE_FLAG) { + if (page_free_p(page)) { /* The whole page should be zero filled. */ long *start_addr = (long *)page_address(page); long size = 1024; @@ -3658,10 +3747,7 @@ write_protect_generation_pages(generation_index_t generation) gc_assert(generation < SCRATCH_GENERATION); for (start = 0; start < last_free_page; start++) { - if ((page_table[start].allocated == BOXED_PAGE_FLAG) - && (page_table[start].bytes_used != 0) - && !page_table[start].dont_move - && (page_table[start].gen == generation)) { + if (protect_page_p(start, generation)) { void *page_start; page_index_t last; @@ -3669,10 +3755,7 @@ write_protect_generation_pages(generation_index_t generation) page_table[start].write_protected = 1; for (last = start + 1; last < last_free_page; last++) { - if ((page_table[last].allocated != BOXED_PAGE_FLAG) - || (page_table[last].bytes_used == 0) - || page_table[last].dont_move - || (page_table[last].gen != generation)) + if (!protect_page_p(last, generation)) break; page_table[last].write_protected = 1; } @@ -4010,13 +4093,13 @@ garbage_collect_generation(generation_index_t generation, int raise) } #endif -#ifdef QSHOW +#if QSHOW if (gencgc_verbose > 1) { long num_dont_move_pages = count_dont_move_pages(); fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - npage_bytes(num_dont_move_pages); + npage_bytes(num_dont_move_pages)); } #endif @@ -4153,8 +4236,9 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].alloc_large_unboxed_start_page = 0; if (generation >= verify_gens) { - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("verifying"); + } verify_gc(); verify_dynamic_space(); } @@ -4183,8 +4267,7 @@ update_dynamic_space_free_pointer(void) page_index_t last_page = -1, i; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) - && (page_table[i].bytes_used != 0)) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; @@ -4199,15 +4282,15 @@ remap_free_pages (page_index_t from, page_index_t to) page_index_t first_page, last_page; for (first_page = from; first_page <= to; first_page++) { - if (page_table[first_page].allocated != FREE_PAGE_FLAG || - page_table[first_page].need_to_zero == 0) { + if (page_allocated_p(first_page) || + (page_table[first_page].need_to_zero == 0)) { continue; } last_page = first_page + 1; - while (page_table[last_page].allocated == FREE_PAGE_FLAG && - last_page < to && - page_table[last_page].need_to_zero == 1) { + while (page_free_p(last_page) && + (last_page < to) && + (page_table[last_page].need_to_zero == 1)) { last_page++; } @@ -4266,7 +4349,7 @@ collect_garbage(generation_index_t last_gen) } if (gencgc_verbose > 1) - print_generation_stats(0); + print_generation_stats(); do { /* Collect the generation. */ @@ -4277,7 +4360,7 @@ collect_garbage(generation_index_t last_gen) } else { raise = (gen < last_gen) - || (generations[gen].num_gc >= generations[gen].trigger_age); + || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion); } if (gencgc_verbose > 1) { @@ -4304,7 +4387,7 @@ collect_garbage(generation_index_t last_gen) if (gencgc_verbose > 1) { FSHOW((stderr, "GC of generation %d finished:\n", gen)); - print_generation_stats(0); + print_generation_stats(); } gen++; @@ -4314,8 +4397,8 @@ collect_garbage(generation_index_t last_gen) && raise && (generations[gen].bytes_allocated > generations[gen].gc_trigger) - && (gen_av_mem_age(gen) - > generations[gen].min_av_mem_age)))); + && (generation_average_age(gen) + > generations[gen].minimum_age_before_gc)))); /* Now if gen-1 was raised all generations before gen are empty. * If it wasn't raised then all generations before gen-1 are empty. @@ -4383,12 +4466,13 @@ gc_free_heap(void) { page_index_t page; - if (gencgc_verbose > 1) + if (gencgc_verbose > 1) { SHOW("entering gc_free_heap"); + } for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE_FLAG) { + if (page_allocated_p(page)) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid @@ -4422,7 +4506,7 @@ gc_free_heap(void) /* Double-check that the page is zero filled. */ long *page_start; page_index_t i; - gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(page)); gc_assert(page_table[page].bytes_used == 0); page_start = (long *)page_address(page); for (i=0; i<1024; i++) { @@ -4449,7 +4533,7 @@ gc_free_heap(void) } if (gencgc_verbose > 1) - print_generation_stats(0); + print_generation_stats(); /* Initialize gc_alloc(). */ gc_alloc_generation = 0; @@ -4518,8 +4602,8 @@ gc_init(void) generations[i].cum_sum_bytes_allocated = 0; /* the tune-able parameters */ generations[i].bytes_consed_between_gc = 2000000; - generations[i].trigger_age = 1; - generations[i].min_av_mem_age = 0.75; + generations[i].number_of_gcs_before_promotion = 1; + generations[i].minimum_age_before_gc = 0.75; generations[i].lutexes = NULL; } @@ -4543,10 +4627,8 @@ gencgc_pickup_dynamic(void) void *alloc_ptr = (void *)get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; - do { lispobj *first,*ptr= (lispobj *)page_address(page); - page_table[page].allocated = BOXED_PAGE_FLAG; page_table[page].gen = gen; page_table[page].bytes_used = PAGE_BYTES; page_table[page].large_object = 0; @@ -4556,8 +4638,10 @@ gencgc_pickup_dynamic(void) page_table[page].need_to_zero = 1; if (!gencgc_partial_pickup) { + page_table[page].allocated = BOXED_PAGE_FLAG; first=gc_search_space(prev,(ptr+2)-prev,ptr); - if(ptr == first) prev=ptr; + if(ptr == first) + prev=ptr; page_table[page].region_start_offset = page_address(page) - (void *)prev; } @@ -4585,8 +4669,6 @@ gc_initialize_pointers(void) { gencgc_pickup_dynamic(); } - - /* alloc(..) is the external interface for memory allocation. It @@ -4601,16 +4683,10 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -lispobj * -alloc(long nbytes) +static inline lispobj * +general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region, + struct thread *thread) { - struct thread *thread=arch_os_get_current_thread(); - struct alloc_region *region= -#ifdef LISP_FEATURE_SB_THREAD - thread ? &(thread->alloc_region) : &boxed_region; -#else - &boxed_region; -#endif #ifndef LISP_FEATURE_WIN32 lispobj alloc_signal; #endif @@ -4623,25 +4699,8 @@ alloc(long nbytes) gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); -#if 0 - if(all_threads) - /* there are a few places in the C code that allocate data in the - * heap before Lisp starts. This is before interrupts are enabled, - * so we don't need to check for pseudo-atomic */ -#ifdef LISP_FEATURE_SB_THREAD - if(!get_psuedo_atomic_atomic(th)) { - register u32 fs; - fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", - th,th->os_thread); - __asm__("movl %fs,%0" : "=r" (fs) : ); - fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", - debug_get_fs(),th->tls_cookie); - lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); - } -#else - gc_assert(get_pseudo_atomic_atomic(th)); -#endif -#endif + /* Must be inside a PA section. */ + gc_assert(get_pseudo_atomic_atomic(thread)); /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; @@ -4651,11 +4710,10 @@ alloc(long nbytes) return(new_obj); /* yup */ } - /* we have to go the long way around, it seems. Check whether - * we should GC in the near future + /* we have to go the long way around, it seems. Check whether we + * should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(get_pseudo_atomic_atomic(thread)); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4663,22 +4721,31 @@ alloc(long nbytes) /* set things up so that GC happens when we finish the PA * section */ SetSymbolValue(GC_PENDING,T,thread); - if (SymbolValue(GC_INHIBIT,thread) == NIL) - set_pseudo_atomic_interrupted(thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) { + set_pseudo_atomic_interrupted(thread); +#ifdef LISP_FEATURE_PPC + /* PPC calls alloc() from a trap or from pa_alloc(), + * look up the most context if it's from a trap. */ + { + os_context_t *context = + thread->interrupt_data->allocation_trap_context; + maybe_save_gc_mask_and_block_deferrables + (context ? os_context_sigmask_addr(context) : NULL); + } +#else + maybe_save_gc_mask_and_block_deferrables(NULL); +#endif + } } } - new_obj = gc_alloc_with_region(nbytes, BOXED_PAGE_FLAG, region, 0); + new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); #ifndef LISP_FEATURE_WIN32 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { if ((signed long) alloc_signal <= 0) { SetSymbolValue(ALLOC_SIGNAL, T, thread); -#ifdef LISP_FEATURE_SB_THREAD - kill_thread_safely(thread->os_thread, SIGPROF); -#else raise(SIGPROF); -#endif } else { SetSymbolValue(ALLOC_SIGNAL, alloc_signal - (1 << N_FIXNUM_TAG_BITS), @@ -4689,12 +4756,43 @@ alloc(long nbytes) return (new_obj); } + +lispobj * +general_alloc(long nbytes, int page_type_flag) +{ + struct thread *thread = arch_os_get_current_thread(); + /* Select correct region, and call general_alloc_internal with it. + * For other then boxed allocation we must lock first, since the + * region is shared. */ + if (BOXED_PAGE_FLAG & page_type_flag) { +#ifdef LISP_FEATURE_SB_THREAD + struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region); +#else + struct alloc_region *region = &boxed_region; +#endif + return general_alloc_internal(nbytes, page_type_flag, region, thread); + } else if (UNBOXED_PAGE_FLAG == page_type_flag) { + lispobj * obj; + gc_assert(0 == thread_mutex_lock(&allocation_lock)); + obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread); + gc_assert(0 == thread_mutex_unlock(&allocation_lock)); + return obj; + } else { + lose("bad page type flag: %d", page_type_flag); + } +} + +lispobj * +alloc(long nbytes) +{ + gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread())); + return general_alloc(nbytes, BOXED_PAGE_FLAG); +} /* * shared support for the OS-dependent signal handlers which * catch GENCGC-related write-protect violations */ - void unhandled_sigmemoryfault(void* addr); /* Depending on which OS we're running under, different signals might @@ -4712,7 +4810,7 @@ gencgc_handle_wp_violation(void* fault_addr) { page_index_t page_index = find_page_index(fault_addr); -#ifdef QSHOW_SIGNALS +#if QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", fault_addr, page_index)); #endif @@ -4728,6 +4826,9 @@ gencgc_handle_wp_violation(void* fault_addr) return 0; } else { + int ret; + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (page_table[page_index].write_protected) { /* Unprotect the page. */ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); @@ -4745,6 +4846,8 @@ gencgc_handle_wp_violation(void* fault_addr) page_index, boxed_region.first_page, boxed_region.last_page); } + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); /* Don't worry, we can handle it. */ return 1; } @@ -4783,7 +4886,7 @@ zero_all_free_pages() page_index_t i; for (i = 0; i < last_free_page; i++) { - if (page_table[i].allocated == FREE_PAGE_FLAG) { + if (page_free_p(i)) { #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(i), PAGE_BYTES,