X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=c87669e37fc67876aab0a708202b852ab4d5db1b;hb=cf507f95509a855a752b6f1771aa06877b8a3b30;hp=7c85b13512d862801d899c2c0e41ae103b3ff59b;hpb=fee931bde89778322557461356580752bc819cbf;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 7c85b13..c87669e 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -41,6 +41,7 @@ #include "gc.h" #include "gc-internal.h" #include "thread.h" +#include "pseudo-atomic.h" #include "alloc.h" #include "genesis/vector.h" #include "genesis/weak-pointer.h" @@ -88,7 +89,7 @@ long large_object_size = 4 * PAGE_BYTES; /* the verbosity level. All non-error messages are disabled at level 0; * and only a few rare messages are printed at level 1. */ -#ifdef QSHOW +#if QSHOW boolean gencgc_verbose = 1; #else boolean gencgc_verbose = 0; @@ -165,6 +166,48 @@ static boolean conservative_stack = 1; page_index_t page_table_pages; struct page *page_table; +static inline boolean page_allocated_p(page_index_t page) { + return (page_table[page].allocated != FREE_PAGE_FLAG); +} + +static inline boolean page_no_region_p(page_index_t page) { + return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG); +} + +static inline boolean page_allocated_no_region_p(page_index_t page) { + return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG)) + && page_no_region_p(page)); +} + +static inline boolean page_free_p(page_index_t page) { + return (page_table[page].allocated == FREE_PAGE_FLAG); +} + +static inline boolean page_boxed_p(page_index_t page) { + return (page_table[page].allocated & BOXED_PAGE_FLAG); +} + +static inline boolean code_page_p(page_index_t page) { + return (page_table[page].allocated & CODE_PAGE_FLAG); +} + +static inline boolean page_boxed_no_region_p(page_index_t page) { + return page_boxed_p(page) && page_no_region_p(page); +} + +static inline boolean page_unboxed_p(page_index_t page) { + /* Both flags set == boxed code page */ + return ((page_table[page].allocated & UNBOXED_PAGE_FLAG) + && !page_boxed_p(page)); +} + +static inline boolean protect_page_p(page_index_t page, generation_index_t generation) { + return (page_boxed_no_region_p(page) + && (page_table[page].bytes_used != 0) + && !page_table[page].dont_move + && (page_table[page].gen == generation)); +} + /* To map addresses to page structures the address of the first page * is needed. */ static void *heap_base = NULL; @@ -320,7 +363,7 @@ count_write_protect_generation_pages(generation_index_t generation) unsigned long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation) && (page_table[i].write_protected == 1)) count++; @@ -335,20 +378,20 @@ count_generation_pages(generation_index_t generation) long count = 0; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == generation)) count++; return count; } -#ifdef QSHOW +#if QSHOW static long count_dont_move_pages(void) { page_index_t i; long count = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].dont_move != 0)) { ++count; } @@ -365,7 +408,7 @@ count_generation_bytes_allocated (generation_index_t gen) page_index_t i; unsigned long result = 0; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].gen == gen)) result += page_table[i].bytes_used; } @@ -426,7 +469,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ /* Count the number of boxed pages within the given * generation. */ - if (page_table[j].allocated & BOXED_PAGE_FLAG) { + if (page_boxed_p(j)) { if (page_table[j].large_object) large_boxed_cnt++; else @@ -435,7 +478,7 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ if(page_table[j].dont_move) pinned_cnt++; /* Count the number of unboxed pages within the given * generation. */ - if (page_table[j].allocated & UNBOXED_PAGE_FLAG) { + if (page_unboxed_p(j)) { if (page_table[j].large_object) large_unboxed_cnt++; else @@ -465,7 +508,8 @@ print_generation_stats(int verbose) /* FIXME: should take FILE argument */ generations[i].num_gc, gen_av_mem_age(i)); } - fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated); + fprintf(stderr," Total bytes allocated = %lu\n", bytes_allocated); + fprintf(stderr," Dynamic-space-size bytes = %u\n", dynamic_space_size); fpu_restore(fpu_state); } @@ -596,7 +640,8 @@ generation_alloc_start_page(generation_index_t generation, int page_type_flag, i if (large) { if (UNBOXED_PAGE_FLAG == page_type_flag) { return generations[generation].alloc_large_unboxed_start_page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ return generations[generation].alloc_large_start_page; } else { lose("bad page type flag: %d", page_type_flag); @@ -604,7 +649,8 @@ generation_alloc_start_page(generation_index_t generation, int page_type_flag, i } else { if (UNBOXED_PAGE_FLAG == page_type_flag) { return generations[generation].alloc_unboxed_start_page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ return generations[generation].alloc_start_page; } else { lose("bad page_type_flag: %d", page_type_flag); @@ -619,7 +665,8 @@ set_generation_alloc_start_page(generation_index_t generation, int page_type_fla if (large) { if (UNBOXED_PAGE_FLAG == page_type_flag) { generations[generation].alloc_large_unboxed_start_page = page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ generations[generation].alloc_large_start_page = page; } else { lose("bad page type flag: %d", page_type_flag); @@ -627,7 +674,8 @@ set_generation_alloc_start_page(generation_index_t generation, int page_type_fla } else { if (UNBOXED_PAGE_FLAG == page_type_flag) { generations[generation].alloc_unboxed_start_page = page; - } else if (BOXED_PAGE_FLAG == page_type_flag) { + } else if (BOXED_PAGE_FLAG & page_type_flag) { + /* Both code and data. */ generations[generation].alloc_start_page = page; } else { lose("bad page type flag: %d", page_type_flag); @@ -900,7 +948,7 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio gc_assert(page_table[first_page].region_start_offset == 0); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - gc_assert(page_table[first_page].allocated == page_type_flag); + gc_assert(page_table[first_page].allocated & page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -924,7 +972,7 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - gc_assert(page_table[next_page].allocated==page_type_flag); + gc_assert(page_table[next_page].allocated & page_type_flag); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -958,7 +1006,7 @@ gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_regio set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1); /* Add the region to the new_areas if requested. */ - if (BOXED_PAGE_FLAG == page_type_flag) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used, region_size); /* @@ -999,7 +1047,7 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio int orig_first_page_bytes_used; long byte_cnt; int more; - long bytes_used; + unsigned long bytes_used; page_index_t next_page; int ret; @@ -1051,7 +1099,7 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio * region_start_offset pointer to the start of the region, and set * the bytes_used. */ while (more) { - gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(next_page)); gc_assert(page_table[next_page].bytes_used == 0); page_table[next_page].allocated = page_type_flag; page_table[next_page].gen = gc_alloc_generation; @@ -1080,7 +1128,7 @@ gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_regio generations[gc_alloc_generation].bytes_allocated += nbytes; /* Add the region to the new_areas if requested. */ - if (BOXED_PAGE_FLAG == page_type_flag) + if (BOXED_PAGE_FLAG & page_type_flag) add_new_area(first_page,orig_first_page_bytes_used,nbytes); /* Bump up last_free_page */ @@ -1107,6 +1155,7 @@ static page_index_t gencgc_alloc_start_page = -1; void gc_heap_exhausted_error_or_lose (long available, long requested) { + struct thread *thread = arch_os_get_current_thread(); /* Write basic information before doing anything else: if we don't * call to lisp this is a must, and even if we do there is always * the danger that we bounce back here before the error has been @@ -1119,12 +1168,13 @@ gc_heap_exhausted_error_or_lose (long available, long requested) /* If we are in GC, or totally out of memory there is no way * to sanely transfer control to the lisp-side of things. */ - struct thread *thread = arch_os_get_current_thread(); print_generation_stats(1); fprintf(stderr, "GC control variables:\n"); fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n", SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true", - SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true"); + (SymbolValue(GC_PENDING, thread) == T) ? + "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ? + "false" : "in progress")); #ifdef LISP_FEATURE_SB_THREAD fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n", SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true"); @@ -1134,6 +1184,18 @@ gc_heap_exhausted_error_or_lose (long available, long requested) else { /* FIXME: assert free_pages_lock held */ (void)thread_mutex_unlock(&free_pages_lock); + gc_assert(get_pseudo_atomic_atomic(thread)); + clear_pseudo_atomic_atomic(thread); + if (get_pseudo_atomic_interrupted(thread)) + do_pending_interrupt(); + /* Another issue is that signalling HEAP-EXHAUSTED error leads + * to running user code at arbitrary places, even in a + * WITHOUT-INTERRUPTS which may lead to a deadlock without + * running out of the heap. So at this point all bets are + * off. */ + if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) + corruption_warning_and_maybe_lose + ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS."); funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR), alloc_number(available), alloc_number(requested)); lose("HEAP-EXHAUSTED-ERROR fell through"); @@ -1141,7 +1203,8 @@ gc_heap_exhausted_error_or_lose (long available, long requested) } page_index_t -gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag) +gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, + int page_type_flag) { page_index_t first_page, last_page; page_index_t restart_page = *restart_page_ptr; @@ -1154,7 +1217,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type restart_page = gencgc_alloc_start_page; } - if (nbytes>=PAGE_BYTES) { + gc_assert(nbytes>=0); + if (((unsigned long)nbytes)>=PAGE_BYTES) { /* Search for a contiguous free space of at least nbytes, * aligned on a page boundary. The page-alignment is strictly * speaking needed only for objects at least large_object_size @@ -1162,14 +1226,14 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type do { first_page = restart_page; while ((first_page < page_table_pages) && - (page_table[first_page].allocated != FREE_PAGE_FLAG)) + page_allocated_p(first_page)) first_page++; last_page = first_page; bytes_found = PAGE_BYTES; while ((bytes_found < nbytes) && (last_page < (page_table_pages-1)) && - (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { + page_free_p(last_page+1)) { last_page++; bytes_found += PAGE_BYTES; gc_assert(0 == page_table[last_page].bytes_used); @@ -1187,7 +1251,7 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type * pages: this helps avoid excessive conservativism. */ first_page = restart_page; while (first_page < page_table_pages) { - if (page_table[first_page].allocated == FREE_PAGE_FLAG) + if (page_free_p(first_page)) { gc_assert(0 == page_table[first_page].bytes_used); bytes_found = PAGE_BYTES; @@ -1346,7 +1410,7 @@ copy_large_object(lispobj object, long nwords) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -1371,7 +1435,7 @@ copy_large_object(lispobj object, long nwords) gc_assert(page_table[next_page].bytes_used >= remaining_bytes); page_table[next_page].gen = new_space; - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_boxed_p(next_page)); /* Adjust the bytes_used. */ old_bytes_used = page_table[next_page].bytes_used; @@ -1383,7 +1447,7 @@ copy_large_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - (page_table[next_page].allocated == BOXED_PAGE_FLAG) && + page_boxed_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -1468,9 +1532,10 @@ copy_large_unboxed_object(lispobj object, long nwords) gc_assert(from_space_p(object)); gc_assert((nwords & 0x01) == 0); - if ((nwords > 1024*1024) && gencgc_verbose) + if ((nwords > 1024*1024) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES)); + } /* Check whether it's a large object. */ first_page = find_page_index((void *)object); @@ -1491,8 +1556,7 @@ copy_large_unboxed_object(lispobj object, long nwords) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -1523,8 +1587,7 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -1541,10 +1604,11 @@ copy_large_unboxed_object(lispobj object, long nwords) next_page++; } - if ((bytes_freed > 0) && gencgc_verbose) + if ((bytes_freed > 0) && gencgc_verbose) { FSHOW((stderr, "/copy_large_unboxed bytes_freed=%d\n", bytes_freed)); + } generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed; @@ -1620,7 +1684,7 @@ sniff_code_object(struct code *code, unsigned long displacement) unsigned d2 = *((unsigned char *)p - 2); unsigned d3 = *((unsigned char *)p - 3); unsigned d4 = *((unsigned char *)p - 4); -#ifdef QSHOW +#if QSHOW unsigned d5 = *((unsigned char *)p - 5); unsigned d6 = *((unsigned char *)p - 6); #endif @@ -2149,8 +2213,7 @@ search_dynamic_space(void *pointer) lispobj *start; /* The address may be invalid, so do some checks. */ - if ((page_index == -1) || - (page_table[page_index].allocated == FREE_PAGE_FLAG)) + if ((page_index == -1) || page_free_p(page_index)) return NULL; start = (lispobj *)page_region_start(page_index); return (gc_search_space(start, @@ -2169,13 +2232,6 @@ search_dynamic_space(void *pointer) static int looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) { - /* We need to allow raw pointers into Code objects for return - * addresses. This will also pick up pointers to functions in code - * objects. */ - if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) - /* XXX could do some further checks here */ - return 1; - if (!is_lisp_pointer((lispobj)pointer)) { return 0; } @@ -2194,28 +2250,31 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: if ((unsigned long)pointer != ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wf3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case LIST_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible cons? */ @@ -2225,44 +2284,49 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) is_lisp_immediate(start_addr[1]))) break; else { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wl2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } case INSTANCE_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wi2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; case OTHER_POINTER_LOWTAG: if ((unsigned long)pointer != ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo1: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } /* Is it plausible? Not a cons. XXX should check the headers. */ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) { - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo2: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } switch (widetag_of(start_addr[0])) { @@ -2272,26 +2336,29 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) #if N_WORD_BITS == 64 case SINGLE_FLOAT_WIDETAG: #endif - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo3: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* only pointed to by function pointers? */ case CLOSURE_HEADER_WIDETAG: case FUNCALLABLE_INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo4: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; case INSTANCE_HEADER_WIDETAG: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*Wo5: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; /* the valid other immediate pointer objects */ @@ -2394,18 +2461,20 @@ looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr) break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "/Wo6: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } break; default: - if (gencgc_verbose) + if (gencgc_verbose) { FSHOW((stderr, "*W?: %x %x %x\n", pointer, start_addr, *start_addr)); + } return 0; } @@ -2562,8 +2631,7 @@ maybe_adjust_large_object(lispobj *where) remaining_bytes = nwords*N_WORD_BYTES; while (remaining_bytes > PAGE_BYTES) { gc_assert(page_table[next_page].gen == from_space); - gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG) - || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG)); + gc_assert(page_allocated_no_region_p(next_page)); gc_assert(page_table[next_page].large_object); gc_assert(page_table[next_page].region_start_offset == npage_bytes(next_page-first_page)); @@ -2598,8 +2666,7 @@ maybe_adjust_large_object(lispobj *where) next_page++; while ((old_bytes_used == PAGE_BYTES) && (page_table[next_page].gen == from_space) && - ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG) - || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) && + page_allocated_no_region_p(next_page) && page_table[next_page].large_object && (page_table[next_page].region_start_offset == npage_bytes(next_page - first_page))) { @@ -2651,7 +2718,7 @@ preserve_pointer(void *addr) /* quick check 1: Address is quite likely to have been invalid. */ if ((addr_page_index == -1) - || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + || page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) || (page_table[addr_page_index].gen != from_space) /* Skip if already marked dont_move. */ @@ -2675,7 +2742,9 @@ preserve_pointer(void *addr) * expensive but important, since it vastly reduces the * probability that random garbage will be bogusly interpreted as * a pointer which prevents a page from moving. */ - if (!(possibly_valid_dynamic_space_pointer(addr))) + if (!(code_page_p(addr_page_index) + || (is_lisp_pointer((lispobj)addr) && + possibly_valid_dynamic_space_pointer(addr)))) return; /* Find the beginning of the region. Note that there may be @@ -2706,7 +2775,7 @@ preserve_pointer(void *addr) * free area in which case it's ignored here. Note it gets * through the valid pointer test above because the tail looks * like conses. */ - if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG) + if (page_free_p(addr_page_index) || (page_table[addr_page_index].bytes_used == 0) /* Check the offset within the page. */ || (((unsigned long)addr & (PAGE_BYTES - 1)) @@ -2745,7 +2814,7 @@ preserve_pointer(void *addr) /* Check whether this is the last page in this contiguous block.. */ if ((page_table[i].bytes_used < PAGE_BYTES) /* ..or it is PAGE_BYTES and is the last in the block */ - || (page_table[i+1].allocated == FREE_PAGE_FLAG) + || page_free_p(i+1) || (page_table[i+1].bytes_used == 0) /* next page free */ || (page_table[i+1].gen != from_space) /* diff. gen */ || (page_table[i+1].region_start_offset == 0)) @@ -2782,14 +2851,14 @@ update_page_write_prot(page_index_t page) long num_words = page_table[page].bytes_used / N_WORD_BYTES; /* Shouldn't be a free page. */ - gc_assert(page_table[page].allocated != FREE_PAGE_FLAG); + gc_assert(page_allocated_p(page)); gc_assert(page_table[page].bytes_used != 0); /* Skip if it's already write-protected, pinned, or unboxed */ if (page_table[page].write_protected /* FIXME: What's the reason for not write-protecting pinned pages? */ || page_table[page].dont_move - || (page_table[page].allocated & UNBOXED_PAGE_FLAG)) + || page_unboxed_p(page)) return (0); /* Scan the page for pointers to younger generations or the @@ -2802,7 +2871,7 @@ update_page_write_prot(page_index_t page) /* Check that it's in the dynamic space */ if (index != -1) if (/* Does it point to a younger or the temp. generation? */ - ((page_table[index].allocated != FREE_PAGE_FLAG) + (page_allocated_p(index) && (page_table[index].bytes_used != 0) && ((page_table[index].gen < gen) || (page_table[index].gen == SCRATCH_GENERATION))) @@ -2876,7 +2945,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) for (i = 0; i < last_free_page; i++) { generation_index_t generation = page_table[i].gen; - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (generation != new_space) && (generation >= from) @@ -2893,7 +2962,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) write_protected && page_table[last_page].write_protected; if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].region_start_offset == 0)) @@ -2926,7 +2995,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0)) { @@ -2980,7 +3049,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) generation)); for (i = 0; i < last_free_page; i++) { /* Note that this skips over open regions when it encounters them. */ - if ((page_table[i].allocated & BOXED_PAGE_FLAG) + if (page_boxed_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && ((page_table[i].write_protected == 0) @@ -3009,7 +3078,7 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) * contiguous block */ if ((page_table[last_page].bytes_used < PAGE_BYTES) /* Or it is PAGE_BYTES and is the last in the block */ - || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG)) + || (!page_boxed_p(last_page+1)) || (page_table[last_page+1].bytes_used == 0) || (page_table[last_page+1].gen != generation) || (page_table[last_page+1].region_start_offset == 0)) @@ -3109,8 +3178,9 @@ scavenge_newspace_generation(generation_index_t generation) /* New areas of objects allocated have been lost so need to do a * full scan to be sure! If this becomes a problem try * increasing NUM_NEW_AREAS. */ - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("new_areas overflow, doing full scavenge"); + } /* Don't need to record new areas that get scavenged * anyway during scavenge_newspace_generation_one_scan. */ @@ -3157,7 +3227,7 @@ scavenge_newspace_generation(generation_index_t generation) /* Check that none of the write_protected pages in this generation * have been written to. */ for (i = 0; i < page_table_pages; i++) { - if ((page_table[i].allocation != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) && (page_table[i].write_protected_cleared != 0) @@ -3180,7 +3250,7 @@ unprotect_oldspace(void) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == from_space)) { void *page_start; @@ -3212,7 +3282,7 @@ free_oldspace(void) do { /* Find a first page for the next region of pages. */ while ((first_page < last_free_page) - && ((page_table[first_page].allocated == FREE_PAGE_FLAG) + && (page_free_p(first_page) || (page_table[first_page].bytes_used == 0) || (page_table[first_page].gen != from_space))) first_page++; @@ -3244,7 +3314,7 @@ free_oldspace(void) last_page++; } while ((last_page < last_free_page) - && (page_table[last_page].allocated != FREE_PAGE_FLAG) + && page_allocated_p(last_page) && (page_table[last_page].bytes_used != 0) && (page_table[last_page].gen == from_space)); @@ -3315,7 +3385,7 @@ verify_space(lispobj *start, size_t words) if (page_index != -1) { /* If it's within the dynamic space it should point to a used * page. XX Could check the offset too. */ - if ((page_table[page_index].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(page_index) && (page_table[page_index].bytes_used == 0)) lose ("Ptr %x @ %x sees free page.\n", thing, start); /* Check that it doesn't point to a forwarding pointer! */ @@ -3580,7 +3650,7 @@ verify_generation(generation_index_t generation) page_index_t i; for (i = 0; i < last_free_page; i++) { - if ((page_table[i].allocated != FREE_PAGE_FLAG) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation)) { page_index_t last_page; @@ -3622,7 +3692,7 @@ verify_zero_fill(void) page_index_t page; for (page = 0; page < last_free_page; page++) { - if (page_table[page].allocated == FREE_PAGE_FLAG) { + if (page_free_p(page)) { /* The whole page should be zero filled. */ long *start_addr = (long *)page_address(page); long size = 1024; @@ -3680,10 +3750,7 @@ write_protect_generation_pages(generation_index_t generation) gc_assert(generation < SCRATCH_GENERATION); for (start = 0; start < last_free_page; start++) { - if ((page_table[start].allocated == BOXED_PAGE_FLAG) - && (page_table[start].bytes_used != 0) - && !page_table[start].dont_move - && (page_table[start].gen == generation)) { + if (protect_page_p(start, generation)) { void *page_start; page_index_t last; @@ -3691,10 +3758,7 @@ write_protect_generation_pages(generation_index_t generation) page_table[start].write_protected = 1; for (last = start + 1; last < last_free_page; last++) { - if ((page_table[last].allocated != BOXED_PAGE_FLAG) - || (page_table[last].bytes_used == 0) - || page_table[last].dont_move - || (page_table[last].gen != generation)) + if (!protect_page_p(last, generation)) break; page_table[last].write_protected = 1; } @@ -4032,13 +4096,13 @@ garbage_collect_generation(generation_index_t generation, int raise) } #endif -#ifdef QSHOW +#if QSHOW if (gencgc_verbose > 1) { long num_dont_move_pages = count_dont_move_pages(); fprintf(stderr, "/non-movable pages due to conservative pointers = %d (%d bytes)\n", num_dont_move_pages, - npage_bytes(num_dont_move_pages); + npage_bytes(num_dont_move_pages)); } #endif @@ -4175,8 +4239,9 @@ garbage_collect_generation(generation_index_t generation, int raise) generations[generation].alloc_large_unboxed_start_page = 0; if (generation >= verify_gens) { - if (gencgc_verbose) + if (gencgc_verbose) { SHOW("verifying"); + } verify_gc(); verify_dynamic_space(); } @@ -4205,8 +4270,7 @@ update_dynamic_space_free_pointer(void) page_index_t last_page = -1, i; for (i = 0; i < last_free_page; i++) - if ((page_table[i].allocated != FREE_PAGE_FLAG) - && (page_table[i].bytes_used != 0)) + if (page_allocated_p(i) && (page_table[i].bytes_used != 0)) last_page = i; last_free_page = last_page+1; @@ -4221,15 +4285,15 @@ remap_free_pages (page_index_t from, page_index_t to) page_index_t first_page, last_page; for (first_page = from; first_page <= to; first_page++) { - if (page_table[first_page].allocated != FREE_PAGE_FLAG || - page_table[first_page].need_to_zero == 0) { + if (page_allocated_p(first_page) || + (page_table[first_page].need_to_zero == 0)) { continue; } last_page = first_page + 1; - while (page_table[last_page].allocated == FREE_PAGE_FLAG && - last_page < to && - page_table[last_page].need_to_zero == 1) { + while (page_free_p(last_page) && + (last_page < to) && + (page_table[last_page].need_to_zero == 1)) { last_page++; } @@ -4405,12 +4469,13 @@ gc_free_heap(void) { page_index_t page; - if (gencgc_verbose > 1) + if (gencgc_verbose > 1) { SHOW("entering gc_free_heap"); + } for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ - if (page_table[page].allocated != FREE_PAGE_FLAG) { + if (page_allocated_p(page)) { void *page_start, *addr; /* Mark the page free. The other slots are assumed invalid @@ -4444,7 +4509,7 @@ gc_free_heap(void) /* Double-check that the page is zero filled. */ long *page_start; page_index_t i; - gc_assert(page_table[page].allocated == FREE_PAGE_FLAG); + gc_assert(page_free_p(page)); gc_assert(page_table[page].bytes_used == 0); page_start = (long *)page_address(page); for (i=0; i<1024; i++) { @@ -4565,7 +4630,6 @@ gencgc_pickup_dynamic(void) void *alloc_ptr = (void *)get_alloc_pointer(); lispobj *prev=(lispobj *)page_address(page); generation_index_t gen = PSEUDO_STATIC_GENERATION; - do { lispobj *first,*ptr= (lispobj *)page_address(page); page_table[page].allocated = BOXED_PAGE_FLAG; @@ -4659,8 +4723,17 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg /* set things up so that GC happens when we finish the PA * section */ SetSymbolValue(GC_PENDING,T,thread); - if (SymbolValue(GC_INHIBIT,thread) == NIL) - set_pseudo_atomic_interrupted(thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) { + set_pseudo_atomic_interrupted(thread); +#ifdef LISP_FEATURE_PPC + /* PPC calls alloc() from a trap, look up the most + * recent one and frob that. */ + maybe_save_gc_mask_and_block_deferrables + (get_interrupt_context_for_thread(thread)); +#else + maybe_save_gc_mask_and_block_deferrables(NULL); +#endif + } } } new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); @@ -4670,11 +4743,7 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { if ((signed long) alloc_signal <= 0) { SetSymbolValue(ALLOC_SIGNAL, T, thread); -#ifdef LISP_FEATURE_SB_THREAD - kill_thread_safely(thread->os_thread, SIGPROF); -#else raise(SIGPROF); -#endif } else { SetSymbolValue(ALLOC_SIGNAL, alloc_signal - (1 << N_FIXNUM_TAG_BITS), @@ -4693,7 +4762,7 @@ general_alloc(long nbytes, int page_type_flag) /* Select correct region, and call general_alloc_internal with it. * For other then boxed allocation we must lock first, since the * region is shared. */ - if (BOXED_PAGE_FLAG == page_type_flag) { + if (BOXED_PAGE_FLAG & page_type_flag) { #ifdef LISP_FEATURE_SB_THREAD struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region); #else @@ -4714,14 +4783,14 @@ general_alloc(long nbytes, int page_type_flag) lispobj * alloc(long nbytes) { - general_alloc(nbytes, BOXED_PAGE_FLAG); + gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread())); + return general_alloc(nbytes, BOXED_PAGE_FLAG); } /* * shared support for the OS-dependent signal handlers which * catch GENCGC-related write-protect violations */ - void unhandled_sigmemoryfault(void* addr); /* Depending on which OS we're running under, different signals might @@ -4739,7 +4808,7 @@ gencgc_handle_wp_violation(void* fault_addr) { page_index_t page_index = find_page_index(fault_addr); -#ifdef QSHOW_SIGNALS +#if QSHOW_SIGNALS FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n", fault_addr, page_index)); #endif @@ -4755,6 +4824,9 @@ gencgc_handle_wp_violation(void* fault_addr) return 0; } else { + int ret; + ret = thread_mutex_lock(&free_pages_lock); + gc_assert(ret == 0); if (page_table[page_index].write_protected) { /* Unprotect the page. */ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL); @@ -4772,6 +4844,8 @@ gencgc_handle_wp_violation(void* fault_addr) page_index, boxed_region.first_page, boxed_region.last_page); } + ret = thread_mutex_unlock(&free_pages_lock); + gc_assert(ret == 0); /* Don't worry, we can handle it. */ return 1; } @@ -4810,7 +4884,7 @@ zero_all_free_pages() page_index_t i; for (i = 0; i < last_free_page; i++) { - if (page_table[i].allocated == FREE_PAGE_FLAG) { + if (page_free_p(i)) { #ifdef READ_PROTECT_FREE_PAGES os_protect(page_address(i), PAGE_BYTES,