X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=b8df9f44e695fca67f72343bc963272bae1bd23c;hb=70227794f1eefb567c13ec04f7bd6d3b6794aa29;hp=320afda99ae1f80509731461d3c7a1d951542fea;hpb=426bde0954ef91387b8ab0d4528fad9ec02fa24c;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 320afda..b8df9f4 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -65,7 +65,7 @@ static void gencgc_pickup_dynamic(void); boolean enable_page_protection = 1; /* Should we unmap a page and re-mmap it to have it zero filled? */ -#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun) /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD * so don't unmap there. * @@ -73,6 +73,8 @@ boolean enable_page_protection = 1; * old version of FreeBSD (pre-4.0), so this might no longer be true. * OTOH, if it is true, this behavior might exist on OpenBSD too, so * for now we don't unmap there either. -- WHN 2001-04-07 */ +/* Apparently this flag is required to be 0 for SunOS/x86, as there + * are reports of heap corruption otherwise. */ boolean gencgc_unmap_zero = 0; #else boolean gencgc_unmap_zero = 1; @@ -1864,7 +1866,8 @@ scav_vector(lispobj *where, lispobj object) #endif if ((old_index != new_index) && - ((!hash_vector) || (hash_vector[i] == 0x80000000)) && + ((!hash_vector) || + (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) && ((new_key != empty_symbol) || (kv_vector[2*i] != empty_symbol))) { @@ -3620,26 +3623,30 @@ garbage_collect_generation(int generation, int raise) * care to avoid SIG_DFL and SIG_IGN. */ for_each_thread(th) { struct interrupt_data *data=th->interrupt_data; - for (i = 0; i < NSIG; i++) { + for (i = 0; i < NSIG; i++) { union interrupt_handler handler = data->interrupt_handlers[i]; - if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && - !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { + if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) && + !ARE_SAME_HANDLER(handler.c, SIG_DFL)) { scavenge((lispobj *)(data->interrupt_handlers + i), 1); } } } + /* Scavenge the function list for INTERRUPT-THREAD. */ + for_each_thread(th) { + scavenge(&th->interrupt_fun,1); + } /* Scavenge the binding stacks. */ - { - struct thread *th; - for_each_thread(th) { - long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - - th->binding_stack_start; - scavenge((lispobj *) th->binding_stack_start,len); + { + struct thread *th; + for_each_thread(th) { + long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) - + th->binding_stack_start; + scavenge((lispobj *) th->binding_stack_start,len); #ifdef LISP_FEATURE_SB_THREAD - /* do the tls as well */ - len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - - (sizeof (struct thread))/(sizeof (lispobj)); - scavenge((lispobj *) (th+1),len); + /* do the tls as well */ + len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) - + (sizeof (struct thread))/(sizeof (lispobj)); + scavenge((lispobj *) (th+1),len); #endif } } @@ -4100,10 +4107,10 @@ gc_initialize_pointers(void) char * alloc(long nbytes) { - struct thread *th=arch_os_get_current_thread(); + struct thread *thread=arch_os_get_current_thread(); struct alloc_region *region= #ifdef LISP_FEATURE_SB_THREAD - th ? &(th->alloc_region) : &boxed_region; + thread ? &(thread->alloc_region) : &boxed_region; #else &boxed_region; #endif @@ -4145,35 +4152,16 @@ alloc(long nbytes) * we should GC in the near future */ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) { - struct thread *thread=arch_os_get_current_thread(); + gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread))); /* Don't flood the system with interrupts if the need to gc is * already noted. This can happen for example when SUB-GC * allocates or after a gc triggered in a WITHOUT-GCING. */ - if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) { + if (SymbolValue(GC_PENDING,thread) == NIL) { /* set things up so that GC happens when we finish the PA - * section. We only do this if there wasn't a pending - * handler already, in case it was a gc. If it wasn't a - * GC, the next allocation will get us back to this point - * anyway, so no harm done - */ - struct interrupt_data *data=th->interrupt_data; - sigset_t new_mask,old_mask; - sigemptyset(&new_mask); - sigaddset_blockable(&new_mask); - thread_sigmask(SIG_BLOCK,&new_mask,&old_mask); - - if(!data->pending_handler) { - if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0)) - lose("Not in atomic: %d.\n", - SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)); - /* Leave the signals blocked just as if it was - * deferred the normal way and set the - * pending_mask. */ - sigcopyset(&(data->pending_mask),&old_mask); - SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread); - } else { - thread_sigmask(SIG_SETMASK,&old_mask,0); - } + * section */ + SetSymbolValue(GC_PENDING,T,thread); + if (SymbolValue(GC_INHIBIT,thread) == NIL) + arch_set_pseudo_atomic_interrupted(0); } } new_obj = gc_alloc_with_region(nbytes,0,region,0);