/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
+/* forward declarations */
+int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region);
+void gc_set_region_empty(struct alloc_region *region);
+void gc_alloc_update_all_page_tables(void);
+static void gencgc_pickup_dynamic(void);
+boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
+
\f
/*
* GC parameters
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-static unsigned long auto_gc_trigger = 0;
+extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
+unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
\f
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
- * majority of GC is single-threaded, but alloc() may be called
- * from >1 thread at a time and must be thread-safe */
+ * majority of GC is single-threaded, but alloc() may be called from
+ * >1 thread at a time and must be thread-safe. This lock must be
+ * seized before all accesses to generations[] or to parts of
+ * page_table[] that other threads may want to see */
+
static lispobj free_pages_lock=0;
\f
/ ((double)generations[gen].bytes_allocated);
}
+void fpu_save(int *); /* defined in x86-assem.S */
+void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
static void
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
(lispobj)(((char *)heap_base) + last_free_page*4096),
0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
+ /* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
page_table[next_page].allocated = FREE_PAGE;
next_page++;
}
-
+ release_spinlock(&free_pages_lock);
+ /* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
SetSymbolValue(ALLOCATION_POINTER,
(lispobj)(((char *)heap_base) + last_free_page*4096),0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing? */
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
static int
possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
/* Check that the object pointed to is consistent with the pointer
* low tag.
- *
- * FIXME: It's not safe to rely on the result from this check
- * before an object is initialized. Thus, if we were interrupted
- * just as an object had been allocated but not initialized, the
- * GC relying on this result could bogusly reclaim the memory.
- * However, we can't really afford to do without this check. So
- * we should make it safe somehow.
- * (1) Perhaps just review the code to make sure
- * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
- * thing is wrapped around critical sections where allocated
- * memory type bits haven't been set.
- * (2) Perhaps find some other hack to protect against this, e.g.
- * recording the result of the last call to allocate-lisp-memory,
- * and returning true from this function when *pointer is
- * a reference to that result.
- *
- * (surely pseudo-atomic is supposed to be used for exactly this?)
*/
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
case LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
boxed = BOXED_PAGE;
break;
case BIGNUM_WIDETAG:
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
* (or, as a special case which also requires dont_move, a return
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreter as
+ * probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
if (!(possibly_valid_dynamic_space_pointer(addr)))
return;
gc_assert(page_table[page].allocated != FREE_PAGE);
gc_assert(page_table[page].bytes_used != 0);
- /* Skip if it's already write-protected or an unboxed page. */
+ /* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
+ || page_table[page].dont_move
|| (page_table[page].allocated & UNBOXED_PAGE))
return (0);
case RATIO_WIDETAG:
case COMPLEX_WIDETAG:
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated == BOXED_PAGE)
&& (page_table[i].bytes_used != 0)
+ && !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
void *page_start;
/* Before any pointers are preserved, the dont_move flags on the
* pages need to be cleared. */
for (i = 0; i < last_free_page; i++)
- page_table[i].dont_move = 0;
+ if(page_table[i].gen==from_space)
+ page_table[i].dont_move = 0;
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
/* Scavenge the stacks' conservative roots. */
for_each_thread(th) {
void **ptr;
+ void **esp= (void **) &raise;
#ifdef LISP_FEATURE_SB_THREAD
- struct user_regs_struct regs;
- if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
- /* probably doesn't exist any more. */
- fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
- perror("PTRACE_GETREGS");
+ if(th!=arch_os_get_current_thread()) {
+ os_context_t *last_context=get_interrupt_context_for_thread(th);
+ esp = (void **)*os_context_register_addr(last_context,reg_ESP);
}
- preserve_pointer(regs.ebx);
- preserve_pointer(regs.ecx);
- preserve_pointer(regs.edx);
- preserve_pointer(regs.esi);
- preserve_pointer(regs.edi);
- preserve_pointer(regs.ebp);
- preserve_pointer(regs.eax);
-#endif
- for (ptr = th->control_stack_end;
-#ifdef LISP_FEATURE_SB_THREAD
- ptr > regs.esp;
-#else
- ptr > (void **)&raise;
#endif
- ptr--) {
+ for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
preserve_pointer(*ptr);
}
}
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
-
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ if(gencgc_verbose)
+ fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
\f
-extern boolean maybe_gc_pending ;
/* alloc(..) is the external interface for memory allocation. It
* allocates to generation 0. It is not called from within the garbage
* collector as it is only external uses that need the check for heap
/* there are a few places in the C code that allocate data in the
* heap before Lisp starts. This is before interrupts are enabled,
* so we don't need to check for pseudo-atomic */
- gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
-
+#ifdef LISP_FEATURE_SB_THREAD
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
+ th,getpid());
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2003.12.01, mail details to sbcl-devel\n");
+ }
+#else
+ gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
+#endif
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- auto_gc_trigger *= 2;
/* set things up so that GC happens when we finish the PA
* section. */
- maybe_gc_pending=1;
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
+ struct interrupt_data *data=th->interrupt_data;
+ maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
}
\f
-/*
- * noise to manipulate the gc trigger stuff
- */
-
-void
-set_auto_gc_trigger(os_vm_size_t dynamic_usage)
-{
- auto_gc_trigger += dynamic_usage;
-}
-
-void
-clear_auto_gc_trigger(void)
-{
- auto_gc_trigger = 0;
-}
-\f
/* Find the code object for the given pc, or return NULL on failure.
*
* FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
return 0;
} else {
-
- /* The only acceptable reason for an signal like this from the
- * heap is that the generational GC write-protected the page. */
- if (page_table[page_index].write_protected != 1) {
- lose("access failure in heap page not marked as write-protected");
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
}
-
- /* Unprotect the page. */
- os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
- page_table[page_index].write_protected = 0;
- page_table[page_index].write_protected_cleared = 1;
-
/* Don't worry, we can handle it. */
return 1;
}
}
-
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
* it's not just a case of the program hitting the write barrier, and
* are about to let Lisp deal with it. It's basically just a
unhandled_sigmemoryfault()
{}
-gc_alloc_update_all_page_tables(void)
+void gc_alloc_update_all_page_tables(void)
{
/* Flush the alloc regions updating the tables. */
struct thread *th;