X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=a73bdca8b3aa375765bd3aa60adc8163f44d7e55;hb=661bcd7d3e0bdc1966f3878fa71d322ffd5927a4;hp=2cf04bbafc82409efdc375a068d6eb6301898679;hpb=6b1b11a6c51e1c29aee947f1fde7f91651ca3763;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 2cf04bb..a73bdca 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -168,7 +168,7 @@ boolean gc_active_p = 0; static boolean conservative_stack = 1; /* An array of page structures is allocated on gc initialization. - * This helps quickly map between an address its page structure. + * This helps to quickly map between an address and its page structure. * page_table_pages is set from the size of the dynamic space. */ page_index_t page_table_pages; struct page *page_table; @@ -910,15 +910,15 @@ struct new_area { size_t size; }; static struct new_area (*new_areas)[]; -static long new_areas_index; -long max_new_areas; +static size_t new_areas_index; +size_t max_new_areas; /* Add a new area to new_areas. */ static void add_new_area(page_index_t first_page, size_t offset, size_t size) { - unsigned long new_area_start,c; - long i; + size_t new_area_start, c; + ssize_t i; /* Ignore if full. */ if (new_areas_index >= NUM_NEW_AREAS) @@ -942,7 +942,7 @@ add_new_area(page_index_t first_page, size_t offset, size_t size) /* Search backwards for a prior area that this follows from. If found this will save adding a new area. */ for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) { - unsigned long area_end = + size_t area_end = npage_bytes((*new_areas)[i].page) + (*new_areas)[i].offset + (*new_areas)[i].size; @@ -1589,23 +1589,7 @@ copy_large_unboxed_object(lispobj object, long nwords) lispobj copy_unboxed_object(lispobj object, long nwords) { - long tag; - lispobj *new; - - gc_assert(is_lisp_pointer(object)); - gc_assert(from_space_p(object)); - gc_assert((nwords & 0x01) == 0); - - /* Get tag of object. */ - tag = lowtag_of(object); - - /* Allocate space. */ - new = gc_quick_alloc_unboxed(nwords*N_WORD_BYTES); - - memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); - - /* Return Lisp pointer of new object. */ - return ((lispobj) new) | tag; + return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG); } @@ -1627,13 +1611,13 @@ static lispobj trans_boxed(lispobj object); * Currently only absolute fixups to the constant vector, or to the * code area are checked. */ void -sniff_code_object(struct code *code, unsigned long displacement) +sniff_code_object(struct code *code, os_vm_size_t displacement) { #ifdef LISP_FEATURE_X86 long nheader_words, ncode_words, nwords; - void *p; - void *constants_start_addr = NULL, *constants_end_addr; - void *code_start_addr, *code_end_addr; + os_vm_address_t constants_start_addr = NULL, constants_end_addr, p; + os_vm_address_t code_start_addr, code_end_addr; + os_vm_address_t code_addr = (os_vm_address_t)code; int fixup_found = 0; if (!check_code_fixups) @@ -1645,10 +1629,10 @@ sniff_code_object(struct code *code, unsigned long displacement) nheader_words = HeaderValue(*(lispobj *)code); nwords = ncode_words + nheader_words; - constants_start_addr = (void *)code + 5*N_WORD_BYTES; - constants_end_addr = (void *)code + nheader_words*N_WORD_BYTES; - code_start_addr = (void *)code + nheader_words*N_WORD_BYTES; - code_end_addr = (void *)code + nwords*N_WORD_BYTES; + constants_start_addr = code_addr + 5*N_WORD_BYTES; + constants_end_addr = code_addr + nheader_words*N_WORD_BYTES; + code_start_addr = code_addr + nheader_words*N_WORD_BYTES; + code_end_addr = code_addr + nwords*N_WORD_BYTES; /* Work through the unboxed code. */ for (p = code_start_addr; p < code_end_addr; p++) { @@ -1665,8 +1649,8 @@ sniff_code_object(struct code *code, unsigned long displacement) /* Check for code references. */ /* Check for a 32 bit word that looks like an absolute reference to within the code adea of the code object. */ - if ((data >= (code_start_addr-displacement)) - && (data < (code_end_addr-displacement))) { + if ((data >= (void*)(code_start_addr-displacement)) + && (data < (void*)(code_end_addr-displacement))) { /* function header */ if ((d4 == 0x5e) && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == @@ -1708,8 +1692,8 @@ sniff_code_object(struct code *code, unsigned long displacement) /* Check for a 32 bit word that looks like an absolute reference to within the constant vector. Constant references will be aligned. */ - if ((data >= (constants_start_addr-displacement)) - && (data < (constants_end_addr-displacement)) + if ((data >= (void*)(constants_start_addr-displacement)) + && (data < (void*)(constants_end_addr-displacement)) && (((unsigned)data & 0x3) == 0)) { /* Mov eax,m32 */ if (d1 == 0xa1) { @@ -1807,11 +1791,12 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* x86-64 uses pc-relative addressing instead of this kludge */ #ifndef LISP_FEATURE_X86_64 long nheader_words, ncode_words, nwords; - void *constants_start_addr, *constants_end_addr; - void *code_start_addr, *code_end_addr; + os_vm_address_t constants_start_addr, constants_end_addr; + os_vm_address_t code_start_addr, code_end_addr; + os_vm_address_t code_addr = (os_vm_address_t)new_code; + os_vm_address_t old_addr = (os_vm_address_t)old_code; + os_vm_size_t displacement = code_addr - old_addr; lispobj fixups = NIL; - unsigned long displacement = - (unsigned long)new_code - (unsigned long)old_code; struct vector *fixups_vector; ncode_words = fixnum_value(new_code->code_size); @@ -1820,10 +1805,10 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) /* FSHOW((stderr, "/compiled code object at %x: header words = %d, code words = %d\n", new_code, nheader_words, ncode_words)); */ - constants_start_addr = (void *)new_code + 5*N_WORD_BYTES; - constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES; - code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES; - code_end_addr = (void *)new_code + nwords*N_WORD_BYTES; + constants_start_addr = code_addr + 5*N_WORD_BYTES; + constants_end_addr = code_addr + nheader_words*N_WORD_BYTES; + code_start_addr = code_addr + nheader_words*N_WORD_BYTES; + code_end_addr = code_addr + nwords*N_WORD_BYTES; /* FSHOW((stderr, "/const start = %x, end = %x\n", @@ -1871,24 +1856,22 @@ gencgc_apply_code_fixups(struct code *old_code, struct code *new_code) long length = fixnum_value(fixups_vector->length); long i; for (i = 0; i < length; i++) { - unsigned long offset = fixups_vector->data[i]; + long offset = fixups_vector->data[i]; /* Now check the current value of offset. */ - unsigned long old_value = - *(unsigned long *)((unsigned long)code_start_addr + offset); + os_vm_address_t old_value = *(os_vm_address_t *)(code_start_addr + offset); /* If it's within the old_code object then it must be an * absolute fixup (relative ones are not saved) */ - if ((old_value >= (unsigned long)old_code) - && (old_value < ((unsigned long)old_code - + nwords*N_WORD_BYTES))) + if ((old_value >= old_addr) + && (old_value < (old_addr + nwords*N_WORD_BYTES))) /* So add the dispacement. */ - *(unsigned long *)((unsigned long)code_start_addr + offset) = + *(os_vm_address_t *)(code_start_addr + offset) = old_value + displacement; else /* It is outside the old code object so it must be a * relative fixup (absolute fixups are not saved). So * subtract the displacement. */ - *(unsigned long *)((unsigned long)code_start_addr + offset) = + *(os_vm_address_t *)(code_start_addr + offset) = old_value - displacement; } } else { @@ -2623,15 +2606,15 @@ scavenge_newspace_generation_one_scan(generation_index_t generation) static void scavenge_newspace_generation(generation_index_t generation) { - long i; + size_t i; /* the new_areas array currently being written to by gc_alloc() */ struct new_area (*current_new_areas)[] = &new_areas_1; - long current_new_areas_index; + size_t current_new_areas_index; /* the new_areas created by the previous scavenge cycle */ struct new_area (*previous_new_areas)[] = NULL; - long previous_new_areas_index; + size_t previous_new_areas_index; /* Flush the current regions updating the tables. */ gc_alloc_update_all_page_tables(); @@ -2865,8 +2848,8 @@ print_ptr(lispobj *addr) page_index_t pi1 = find_page_index((void*)addr); if (pi1 != -1) - fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n", - (unsigned long) addr, + fprintf(stderr," %p: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n", + addr, pi1, page_table[pi1].allocated, page_table[pi1].gen, @@ -3865,7 +3848,7 @@ collect_garbage(generation_index_t last_gen) /* Update auto_gc_trigger. Make sure we trigger the next GC before * running out of heap! */ - if (bytes_consed_between_gcs >= dynamic_space_size - bytes_allocated) + if (bytes_consed_between_gcs <= (dynamic_space_size - bytes_allocated)) auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs; else auto_gc_trigger = bytes_allocated + (dynamic_space_size - bytes_allocated)/2; @@ -4055,7 +4038,8 @@ gc_init(void) generations[i].num_gc = 0; generations[i].cum_sum_bytes_allocated = 0; /* the tune-able parameters */ - generations[i].bytes_consed_between_gc = bytes_consed_between_gcs; + generations[i].bytes_consed_between_gc + = bytes_consed_between_gcs/(os_vm_size_t)HIGHEST_NORMAL_GENERATION; generations[i].number_of_gcs_before_promotion = 1; generations[i].minimum_age_before_gc = 0.75; } @@ -4143,6 +4127,7 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg #endif void *new_obj; void *new_free_pointer; + os_vm_size_t trigger_bytes = 0; gc_assert(nbytes>0); @@ -4164,10 +4149,19 @@ general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *reg return(new_obj); /* yup */ } + /* We don't want to count nbytes against auto_gc_trigger unless we + * have to: it speeds up the tenuring of objects and slows down + * allocation. However, unless we do so when allocating _very_ + * large objects we are in danger of exhausting the heap without + * running sufficient GCs. + */ + if (nbytes >= bytes_consed_between_gcs) + trigger_bytes = nbytes; + /* we have to go the long way around, it seems. Check whether we * should GC in the near future */ - if (auto_gc_trigger && bytes_allocated+nbytes > auto_gc_trigger) { + if (auto_gc_trigger && (bytes_allocated+trigger_bytes > auto_gc_trigger)) { /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */