X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Fruntime%2Fgencgc.c;h=6b2f6f7395ab7a1aa27f2ab72e5fb0b9c8531e51;hb=5e92e9ed61903658015c2a75c79a32ad41dbd29d;hp=a35586b0ea7bf3c9b556fe173b367d35a43ab3ac;hpb=79cc569a97e444389350ea3f5b1017374fe16bec;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index a35586b..6b2f6f7 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -65,7 +65,7 @@ static void gencgc_pickup_dynamic(void); boolean enable_page_protection = 1; /* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun) /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD * so don't unmap there. * @@ -73,6 +73,8 @@ boolean enable_page_protection = 1; * old version of FreeBSD (pre-4.0), so this might no longer be true. * OTOH, if it is true, this behavior might exist on OpenBSD too, so * for now we don't unmap there either. -- WHN 2001-04-07 */ +/* Apparently this flag is required to be 0 for SunOS/x86, as there + * are reports of heap corruption otherwise. */ boolean gencgc_unmap_zero = 0; #else boolean gencgc_unmap_zero = 1; @@ -1405,7 +1407,7 @@ sniff_code_object(struct code *code, unsigned displacement) { long nheader_words, ncode_words, nwords; void *p; - void *constants_start_addr, *constants_end_addr; + void *constants_start_addr = NULL, *constants_end_addr; void *code_start_addr, *code_end_addr; int fixup_found = 0; @@ -1681,7 +1683,8 @@ trans_boxed_large(lispobj object) return copy_large_object(object, length); } - +/* Doesn't seem to be used, delete it after the grace period. */ +#if 0 static lispobj trans_unboxed_large(lispobj object) { @@ -1697,6 +1700,7 @@ trans_unboxed_large(lispobj object) return copy_large_unboxed_object(object, length); } +#endif /* @@ -1707,7 +1711,7 @@ trans_unboxed_large(lispobj object) /* FIXME: What does this mean? */ int gencgc_hash = 1; -static int +static long scav_vector(lispobj *where, lispobj object) { unsigned long kv_length; @@ -1744,7 +1748,7 @@ scav_vector(lispobj *where, lispobj object) if (!is_lisp_pointer(where[2])) { lose("no pointer at %x in hash table", where[2]); } - hash_table = (lispobj *)native_pointer(where[2]); + hash_table = (struct hash_table *)native_pointer(where[2]); /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { lose("hash table not instance (%x at %x)", @@ -1768,7 +1772,8 @@ scav_vector(lispobj *where, lispobj object) /* Scavenge hash table, which will fix the positions of the other * needed objects. */ - scavenge(hash_table, sizeof(struct hash_table) / sizeof(lispobj)); + scavenge((lispobj *)hash_table, + sizeof(struct hash_table) / sizeof(lispobj)); /* Cross-check the kv_vector. */ if (where != (lispobj *)native_pointer(hash_table->table)) { @@ -1785,7 +1790,8 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(index_vector_obj) && (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)) { - index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2; + index_vector = + ((unsigned long *)native_pointer(index_vector_obj)) + 2; /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/ length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]); /*FSHOW((stderr, "/length = %d\n", length));*/ @@ -1801,7 +1807,7 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(next_vector_obj) && (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)) { - next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2; + next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2; /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/ next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]); /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/ @@ -1817,7 +1823,8 @@ scav_vector(lispobj *where, lispobj object) if (is_lisp_pointer(hash_vector_obj) && (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) == SIMPLE_ARRAY_WORD_WIDETAG)){ - hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2; + hash_vector = + ((unsigned long *)native_pointer(hash_vector_obj)) + 2; /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/ gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1]) == next_vector_length); @@ -1859,7 +1866,8 @@ scav_vector(lispobj *where, lispobj object) #endif if ((old_index != new_index) && - ((!hash_vector) || (hash_vector[i] == 0x80000000)) && + ((!hash_vector) || + (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) && ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) { @@ -2127,6 +2135,7 @@ possibly_valid_dynamic_space_pointer(lispobj *pointer) } switch (widetag_of(start_addr[0])) { case UNBOUND_MARKER_WIDETAG: + case NO_TLS_VALUE_MARKER_WIDETAG: case CHARACTER_WIDETAG: #if N_WORD_BITS == 64 case SINGLE_FLOAT_WIDETAG: @@ -3573,14 +3582,17 @@ garbage_collect_generation(int generation, int raise) #ifdef LISP_FEATURE_SB_THREAD long i,free; if(th==arch_os_get_current_thread()) { - esp = (void **) &raise; + /* Somebody is going to burn in hell for this, but casting + * it in two steps shuts gcc up about strict aliasing. */ + esp = (void **)((void *)&raise); } else { void **esp1; free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); for(i=free-1;i>=0;i--) { os_context_t *c=th->interrupt_contexts[i]; esp1 = (void **) *os_context_register_addr(c,reg_SP); - if(esp1>=th->control_stack_start&& esp1control_stack_end){ + if (esp1>=(void **)th->control_stack_start && + esp1<(void **)th->control_stack_end) { if(esp1=(void **)c; ptr--) { preserve_pointer(*ptr); @@ -3589,7 +3601,7 @@ garbage_collect_generation(int generation, int raise) } } #else - esp = (void **) &raise; + esp = (void **)((void *)&raise); #endif for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) { preserve_pointer(*ptr); @@ -3610,28 +3622,29 @@ garbage_collect_generation(int generation, int raise) /* Scavenge the Lisp functions of the interrupt handlers, taking * care to avoid SIG_DFL and SIG_IGN. */ - for_each_thread(th) { - struct interrupt_data *data=th->interrupt_data; for (i = 0; i < NSIG; i++) { - union interrupt_handler handler = data->interrupt_handlers[i]; + union interrupt_handler handler = interrupt_handlers[i]; if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { - scavenge((lispobj *)(data->interrupt_handlers + i), 1); - } + scavenge((lispobj *)(interrupt_handlers + i), 1); } } + /* Scavenge the function list for INTERRUPT-THREAD. */ + for_each_thread(th) { + scavenge(&th->interrupt_fun,1); + } /* Scavenge the binding stacks. */ - { - struct thread *th; - for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - - th->binding_stack_start; - scavenge((lispobj *) th->binding_stack_start,len); + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD - /* do the tls as well */ - len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - - (sizeof (struct thread))/(sizeof (lispobj)); - scavenge((lispobj *) (th+1),len); + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); #endif } } @@ -4060,7 +4073,7 @@ gencgc_pickup_dynamic(void) page_table[page].first_object_offset = (void *)prev - page_address(page); page++; - } while (page_address(page) < alloc_ptr); + } while ((long)page_address(page) < alloc_ptr); generations[0].bytes_allocated = PAGE_BYTES*page; bytes_allocated = PAGE_BYTES*page; @@ -4092,10 +4105,10 @@ gc_initialize_pointers(void) char * alloc(long nbytes) { - struct thread *th=arch_os_get_current_thread(); + struct thread *thread=arch_os_get_current_thread(); struct alloc_region *region= #ifdef LISP_FEATURE_SB_THREAD - th ? &(th->alloc_region) : &boxed_region; + thread ? &(thread->alloc_region) : &boxed_region; #else &boxed_region; #endif @@ -4137,35 +4150,16 @@ alloc(long nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - struct thread *thread=arch_os_get_current_thread(); + gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ - if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) { + if (SymbolValue(GC_PENDING,thread) == NIL) { /* set things up so that GC happens when we finish the PA - * section. We only do this if there wasn't a pending - * handler already, in case it was a gc. If it wasn't a - * GC, the next allocation will get us back to this point - * anyway, so no harm done - */ - struct interrupt_data *data=th->interrupt_data; - sigset_t new_mask,old_mask; - sigemptyset(&new_mask); - sigaddset_blockable(&new_mask); - thread_sigmask(SIG_BLOCK,&new_mask,&old_mask); - - if(!data->pending_handler) { - if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0)) - lose("Not in atomic: %d.\n", - SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)); - /* Leave the signals blocked just as if it was - * deferred the normal way and set the - * pending_mask. */ - sigcopyset(&(data->pending_mask),&old_mask); - SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread); - } else { - thread_sigmask(SIG_SETMASK,&old_mask,0); - } + * section */ + SetSymbolValue(GC_PENDING,T,thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) + arch_set_pseudo_atomic_interrupted(0); } } new_obj = gc_alloc_with_region(nbytes,0,region,0); @@ -4255,4 +4249,3 @@ gc_set_region_empty(struct alloc_region *region) region->free_pointer = page_address(0); region->end_addr = page_address(0); } -