X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=7c85b13512d862801d899c2c0e41ae103b3ff59b;hb=fee931bde89778322557461356580752bc819cbf;hp=ced46d0d3adcf94652b32b4b4d8197af2a79d1b0;hpb=f82850855bab2cdaaf51c4e92d506b365866e65f;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index ced46d0..7c85b13 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -57,7 +57,7 @@ /* forward declarations */ page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes, - int unboxed); + int page_type_flag); /* @@ -294,15 +294,16 @@ generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION; * integrated with the Lisp code. */ page_index_t last_free_page; +#ifdef LISP_FEATURE_SB_THREAD /* This lock is to prevent multiple threads from simultaneously * allocating new regions which overlap each other. Note that the * majority of GC is single-threaded, but alloc() may be called from * >1 thread at a time and must be thread-safe. This lock must be * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ - -#ifdef LISP_FEATURE_SB_THREAD static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +/* This lock is used to protect non-thread-local allocation. */ +static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER; #endif @@ -589,6 +590,51 @@ struct alloc_region unboxed_region; /* The generation currently being allocated to. */ static generation_index_t gc_alloc_generation; +static inline page_index_t +generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_large_unboxed_start_page; + } else if (BOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_large_start_page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_unboxed_start_page; + } else if (BOXED_PAGE_FLAG == page_type_flag) { + return generations[generation].alloc_start_page; + } else { + lose("bad page_type_flag: %d", page_type_flag); + } + } +} + +static inline void +set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large, + page_index_t page) +{ + if (large) { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_large_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_large_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } else { + if (UNBOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_unboxed_start_page = page; + } else if (BOXED_PAGE_FLAG == page_type_flag) { + generations[generation].alloc_start_page = page; + } else { + lose("bad page type flag: %d", page_type_flag); + } + } +} + /* Find a new region with room for at least the given number of bytes. * * It starts looking at the current generation's alloc_start_page. So @@ -613,7 +659,7 @@ static generation_index_t gc_alloc_generation; * are allocated, although they will initially be empty. */ static void -gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; @@ -633,14 +679,8 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) && (alloc_region->free_pointer == alloc_region->end_addr)); ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_unboxed_start_page; - } else { - first_page = - generations[gc_alloc_generation].alloc_start_page; - } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0); + last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag); bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used) + npage_bytes(last_page-first_page); @@ -656,29 +696,20 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) /* The first page may have already been in use. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].large_object = 0; page_table[first_page].region_start_offset = 0; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG; gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); for (i = first_page+1; i <= last_page; i++) { - if (unboxed) - page_table[i].allocated = UNBOXED_PAGE_FLAG; - else - page_table[i].allocated = BOXED_PAGE_FLAG; + page_table[i].allocated = page_type_flag; page_table[i].gen = gc_alloc_generation; page_table[i].large_object = 0; /* This may not be necessary for unboxed regions (think it was @@ -829,7 +860,7 @@ add_new_area(page_index_t first_page, size_t offset, size_t size) * it is safe to try to re-update the page table of this reset * alloc_region. */ void -gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) +gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region) { int more; page_index_t first_page; @@ -869,10 +900,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) gc_assert(page_table[first_page].region_start_offset == 0); page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 0); @@ -896,10 +924,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) * region, and set the bytes_used. */ while (more) { page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG); - if (unboxed) - gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[next_page].allocated==page_type_flag); gc_assert(page_table[next_page].bytes_used == 0); gc_assert(page_table[next_page].gen == gc_alloc_generation); gc_assert(page_table[next_page].large_object == 0); @@ -930,14 +955,10 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) /* Set the generations alloc restart page to the last page of * the region. */ - if (unboxed) - generations[gc_alloc_generation].alloc_unboxed_start_page = - next_page-1; - else - generations[gc_alloc_generation].alloc_start_page = next_page-1; + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1); /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG == page_type_flag) add_new_area(first_page,orig_first_page_bytes_used, region_size); /* @@ -971,7 +992,7 @@ static inline void *gc_quick_alloc(long nbytes); /* Allocate a possibly large object. */ void * -gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) +gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region) { page_index_t first_page; page_index_t last_page; @@ -985,24 +1006,16 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) ret = thread_mutex_lock(&free_pages_lock); gc_assert(ret == 0); - if (unboxed) { - first_page = - generations[gc_alloc_generation].alloc_large_unboxed_start_page; - } else { - first_page = generations[gc_alloc_generation].alloc_large_start_page; - } + first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1); if (first_page <= alloc_region->last_page) { first_page = alloc_region->last_page+1; } - last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed); + last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag); gc_assert(first_page > alloc_region->last_page); - if (unboxed) - generations[gc_alloc_generation].alloc_large_unboxed_start_page = - last_page; - else - generations[gc_alloc_generation].alloc_large_start_page = last_page; + + set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page); /* Set up the pages. */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -1010,19 +1023,13 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) /* If the first page was free then set up the gen, and * region_start_offset. */ if (page_table[first_page].bytes_used == 0) { - if (unboxed) - page_table[first_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[first_page].allocated = BOXED_PAGE_FLAG; + page_table[first_page].allocated = page_type_flag; page_table[first_page].gen = gc_alloc_generation; page_table[first_page].region_start_offset = 0; page_table[first_page].large_object = 1; } - if (unboxed) - gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG); - else - gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG); + gc_assert(page_table[first_page].allocated == page_type_flag); gc_assert(page_table[first_page].gen == gc_alloc_generation); gc_assert(page_table[first_page].large_object == 1); @@ -1046,10 +1053,7 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) while (more) { gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG); gc_assert(page_table[next_page].bytes_used == 0); - if (unboxed) - page_table[next_page].allocated = UNBOXED_PAGE_FLAG; - else - page_table[next_page].allocated = BOXED_PAGE_FLAG; + page_table[next_page].allocated = page_type_flag; page_table[next_page].gen = gc_alloc_generation; page_table[next_page].large_object = 1; @@ -1076,7 +1080,7 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) generations[gc_alloc_generation].bytes_allocated += nbytes; /* Add the region to the new_areas if requested. */ - if (!unboxed) + if (BOXED_PAGE_FLAG == page_type_flag) add_new_area(first_page,orig_first_page_bytes_used,nbytes); /* Bump up last_free_page */ @@ -1137,7 +1141,7 @@ gc_heap_exhausted_error_or_lose (long available, long requested) } page_index_t -gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) +gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag) { page_index_t first_page, last_page; page_index_t restart_page = *restart_page_ptr; @@ -1168,7 +1172,8 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { last_page++; bytes_found += PAGE_BYTES; - gc_assert(page_table[last_page].write_protected == 0); + gc_assert(0 == page_table[last_page].bytes_used); + gc_assert(0 == page_table[last_page].write_protected); } if (bytes_found > most_bytes_found) most_bytes_found = bytes_found; @@ -1184,11 +1189,11 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) while (first_page < page_table_pages) { if (page_table[first_page].allocated == FREE_PAGE_FLAG) { + gc_assert(0 == page_table[first_page].bytes_used); bytes_found = PAGE_BYTES; break; } - else if ((page_table[first_page].allocated == - (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) && + else if ((page_table[first_page].allocated == page_type_flag) && (page_table[first_page].large_object == 0) && (page_table[first_page].gen == gc_alloc_generation) && (page_table[first_page].write_protected == 0) && @@ -1223,13 +1228,13 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) * functions will eventually call this */ void * -gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, +gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region, int quick_p) { void *new_free_pointer; if (nbytes>=large_object_size) - return gc_alloc_large(nbytes,unboxed_p,my_region); + return gc_alloc_large(nbytes, page_type_flag, my_region); /* Check whether there is room in the current alloc region. */ new_free_pointer = my_region->free_pointer + nbytes; @@ -1247,9 +1252,9 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, if (!quick_p && void_diff(my_region->end_addr,my_region->free_pointer) <= 32) { /* If so, finished with the current region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); + gc_alloc_update_page_tables(page_type_flag, my_region); /* Set up a new region. */ - gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region); + gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region); } return((void *)new_obj); @@ -1258,51 +1263,43 @@ gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region, /* Else not enough free space in the current region: retry with a * new region. */ - gc_alloc_update_page_tables(unboxed_p, my_region); - gc_alloc_new_region(nbytes, unboxed_p, my_region); - return gc_alloc_with_region(nbytes,unboxed_p,my_region,0); + gc_alloc_update_page_tables(page_type_flag, my_region); + gc_alloc_new_region(nbytes, page_type_flag, my_region); + return gc_alloc_with_region(nbytes, page_type_flag, my_region,0); } /* these are only used during GC: all allocation from the mutator calls * alloc() -> gc_alloc_with_region() with the appropriate per-thread * region */ -void * -gc_general_alloc(long nbytes,int unboxed_p,int quick_p) -{ - struct alloc_region *my_region = - unboxed_p ? &unboxed_region : &boxed_region; - return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p); -} - static inline void * gc_quick_alloc(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK); } static inline void * gc_quick_alloc_large(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK); } static inline void * gc_alloc_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,0); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0); } static inline void * gc_quick_alloc_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK); } static inline void * gc_quick_alloc_large_unboxed(long nbytes) { - return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK); + return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK); } @@ -4610,8 +4607,6 @@ gc_initialize_pointers(void) { gencgc_pickup_dynamic(); } - - /* alloc(..) is the external interface for memory allocation. It @@ -4626,16 +4621,10 @@ gc_initialize_pointers(void) * The check for a GC trigger is only performed when the current * region is full, so in most cases it's not needed. */ -lispobj * -alloc(long nbytes) +static inline lispobj * +general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region, + struct thread *thread) { - struct thread *thread=arch_os_get_current_thread(); - struct alloc_region *region= -#ifdef LISP_FEATURE_SB_THREAD - thread ? &(thread->alloc_region) : &boxed_region; -#else - &boxed_region; -#endif #ifndef LISP_FEATURE_WIN32 lispobj alloc_signal; #endif @@ -4648,25 +4637,8 @@ alloc(long nbytes) gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0) && ((nbytes & LOWTAG_MASK) == 0)); -#if 0 - if(all_threads) - /* there are a few places in the C code that allocate data in the - * heap before Lisp starts. This is before interrupts are enabled, - * so we don't need to check for pseudo-atomic */ -#ifdef LISP_FEATURE_SB_THREAD - if(!get_psuedo_atomic_atomic(th)) { - register u32 fs; - fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n", - th,th->os_thread); - __asm__("movl %fs,%0" : "=r" (fs) : ); - fprintf(stderr, "fs is %x, th->tls_cookie=%x \n", - debug_get_fs(),th->tls_cookie); - lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n"); - } -#else - gc_assert(get_pseudo_atomic_atomic(th)); -#endif -#endif + /* Must be inside a PA section. */ + gc_assert(get_pseudo_atomic_atomic(thread)); /* maybe we can do this quickly ... */ new_free_pointer = region->free_pointer + nbytes; @@ -4676,11 +4648,10 @@ alloc(long nbytes) return(new_obj); /* yup */ } - /* we have to go the long way around, it seems. Check whether - * we should GC in the near future + /* we have to go the long way around, it seems. Check whether we + * should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - gc_assert(get_pseudo_atomic_atomic(thread)); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ @@ -4692,7 +4663,7 @@ alloc(long nbytes) set_pseudo_atomic_interrupted(thread); } } - new_obj = gc_alloc_with_region(nbytes,0,region,0); + new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0); #ifndef LISP_FEATURE_WIN32 alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); @@ -4714,6 +4685,37 @@ alloc(long nbytes) return (new_obj); } + +lispobj * +general_alloc(long nbytes, int page_type_flag) +{ + struct thread *thread = arch_os_get_current_thread(); + /* Select correct region, and call general_alloc_internal with it. + * For other then boxed allocation we must lock first, since the + * region is shared. */ + if (BOXED_PAGE_FLAG == page_type_flag) { +#ifdef LISP_FEATURE_SB_THREAD + struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region); +#else + struct alloc_region *region = &boxed_region; +#endif + return general_alloc_internal(nbytes, page_type_flag, region, thread); + } else if (UNBOXED_PAGE_FLAG == page_type_flag) { + lispobj * obj; + gc_assert(0 == thread_mutex_lock(&allocation_lock)); + obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread); + gc_assert(0 == thread_mutex_unlock(&allocation_lock)); + return obj; + } else { + lose("bad page type flag: %d", page_type_flag); + } +} + +lispobj * +alloc(long nbytes) +{ + general_alloc(nbytes, BOXED_PAGE_FLAG); +} /* * shared support for the OS-dependent signal handlers which @@ -4787,9 +4789,9 @@ void gc_alloc_update_all_page_tables(void) /* Flush the alloc regions updating the tables. */ struct thread *th; for_each_thread(th) - gc_alloc_update_page_tables(0, &th->alloc_region); - gc_alloc_update_page_tables(1, &unboxed_region); - gc_alloc_update_page_tables(0, &boxed_region); + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region); + gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region); + gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region); } void