#include "arch.h"
#include "gc.h"
#include "gc-internal.h"
+#include "thread.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#include "genesis/simple-fun.h"
-#include "genesis/static-symbols.h"
-#include "genesis/symbol.h"
+
+#ifdef LISP_FEATURE_SB_THREAD
+#include <sys/ptrace.h>
+#include <linux/user.h> /* threading is presently linux-only */
+#endif
+
/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-static unsigned long auto_gc_trigger = 0;
+extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
+unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
* integrated with the Lisp code. */
static int last_free_page;
\f
+/* This lock is to prevent multiple threads from simultaneously
+ * allocating new regions which overlap each other. Note that the
+ * majority of GC is single-threaded, but alloc() may be called from
+ * >1 thread at a time and must be thread-safe. This lock must be
+ * seized before all accesses to generations[] or to parts of
+ * page_table[] that other threads may want to see */
+
+static lispobj free_pages_lock=0;
+
+\f
/*
* miscellaneous heap functions
*/
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
-
+ get_spinlock(&free_pages_lock,alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
alloc_region->free_pointer = alloc_region->start_addr;
alloc_region->end_addr = alloc_region->start_addr + bytes_found;
- if (gencgc_zero_check) {
- int *p;
- for (p = (int *)alloc_region->start_addr;
- p < (int *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.", p);
- }
- }
- }
-
/* Set up the pages. */
/* The first page may have already been in use. */
alloc_region->start_addr - page_address(i);
page_table[i].allocated |= OPEN_REGION_PAGE ;
}
-
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
+ (lispobj)(((char *)heap_base) + last_free_page*4096),
+ 0);
+ }
+ free_pages_lock=0;
+
+ /* we can do this after releasing free_pages_lock */
+ if (gencgc_zero_check) {
+ int *p;
+ for (p = (int *)alloc_region->start_addr;
+ p < (int *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero.", p);
+ }
}
}
+}
+
/* If the record_new_objects flag is 2 then all new regions created
* are recorded.
*
next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ get_spinlock(&free_pages_lock,alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
+ /* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
page_table[next_page].allocated = FREE_PAGE;
next_page++;
}
-
+ free_pages_lock=0;
+ /* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
+ get_spinlock(&free_pages_lock,alloc_region);
+
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_large_unboxed_start_page;
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
+ (lispobj)(((char *)heap_base) + last_free_page*4096),0);
}
+ free_pages_lock=0;
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
int num_pages;
int large = !alloc_region && (nbytes >= large_object_size);
+ gc_assert(free_pages_lock);
/* Search for a contiguous free space of at least nbytes. If it's a
large object then align it on a page boundary by searching for a
free page. */
search_read_only_space(lispobj *pointer)
{
lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
- lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
+ lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < start) || (pointer >= end))
return NULL;
return (search_space(start, (pointer+2)-start, pointer));
search_static_space(lispobj *pointer)
{
lispobj* start = (lispobj*)STATIC_SPACE_START;
- lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
+ lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < start) || (pointer >= end))
return NULL;
return (search_space(start, (pointer+2)-start, pointer));
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing? */
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
static int
possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
/* Check that the object pointed to is consistent with the pointer
* low tag.
- *
- * FIXME: It's not safe to rely on the result from this check
- * before an object is initialized. Thus, if we were interrupted
- * just as an object had been allocated but not initialized, the
- * GC relying on this result could bogusly reclaim the memory.
- * However, we can't really afford to do without this check. So
- * we should make it safe somehow.
- * (1) Perhaps just review the code to make sure
- * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
- * thing is wrapped around critical sections where allocated
- * memory type bits haven't been set.
- * (2) Perhaps find some other hack to protect against this, e.g.
- * recording the result of the last call to allocate-lisp-memory,
- * and returning true from this function when *pointer is
- * a reference to that result. */
+ */
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
/* Start_addr should be the enclosing code object, or a closure
* (or, as a special case which also requires dont_move, a return
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreter as
+ * probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
if (!(possibly_valid_dynamic_space_pointer(addr)))
return;
int is_in_dynamic_space = (find_page_index((void*)start) != -1);
int is_in_readonly_space =
(READ_ONLY_SPACE_START <= (unsigned)start &&
- (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
+ (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
while (words > 0) {
size_t count = 1;
int page_index = find_page_index((void*)thing);
int to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
- thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
+ thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
int to_static_space =
(STATIC_SPACE_START <= thing &&
- thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
+ thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
/* Does it point to the dynamic space? */
if (page_index != -1) {
* to grep for all foo_size and rename the appropriate ones to
* foo_count. */
int read_only_space_size =
- (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
+ (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
- (lispobj*)READ_ONLY_SPACE_START;
int static_space_size =
- (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
+ (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
- (lispobj*)STATIC_SPACE_START;
+ struct thread *th;
+ for_each_thread(th) {
int binding_stack_size =
- (lispobj*)SymbolValue(BINDING_STACK_POINTER)
- - (lispobj*)BINDING_STACK_START;
-
+ (lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
+ - (lispobj*)th->binding_stack_start;
+ verify_space(th->binding_stack_start, binding_stack_size);
+ }
verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
- verify_space((lispobj*)BINDING_STACK_START , binding_stack_size);
}
static void
unsigned long bytes_freed;
unsigned long i;
unsigned long static_space_size;
-
+ struct thread *th;
gc_assert(generation <= (NUM_GENERATIONS-1));
/* The oldest generation can't be raised. */
* be un-protected anyway before unmapping later. */
unprotect_oldspace();
- /* Scavenge the stack's conservative roots. */
- {
+ /* Scavenge the stacks' conservative roots. */
+ for_each_thread(th) {
void **ptr;
- for (ptr = (void **)CONTROL_STACK_END - 1;
+#ifdef LISP_FEATURE_SB_THREAD
+ struct user_regs_struct regs;
+ if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
+ /* probably doesn't exist any more. */
+ fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
+ perror("PTRACE_GETREGS");
+ }
+ preserve_pointer(regs.ebx);
+ preserve_pointer(regs.ecx);
+ preserve_pointer(regs.edx);
+ preserve_pointer(regs.esi);
+ preserve_pointer(regs.edi);
+ preserve_pointer(regs.ebp);
+ preserve_pointer(regs.eax);
+#endif
+ for (ptr = th->control_stack_end;
+#ifdef LISP_FEATURE_SB_THREAD
+ ptr > regs.esp;
+#else
ptr > (void **)&raise;
+#endif
ptr--) {
preserve_pointer(*ptr);
}
/* Scavenge the Lisp functions of the interrupt handlers, taking
* care to avoid SIG_DFL and SIG_IGN. */
+ for_each_thread(th) {
+ struct interrupt_data *data=th->interrupt_data;
for (i = 0; i < NSIG; i++) {
- union interrupt_handler handler = interrupt_handlers[i];
+ union interrupt_handler handler = data->interrupt_handlers[i];
if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
!ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- scavenge((lispobj *)(interrupt_handlers + i), 1);
+ scavenge((lispobj *)(data->interrupt_handlers + i), 1);
+ }
+ }
+ }
+ /* Scavenge the binding stacks. */
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ th->binding_stack_start;
+ scavenge((lispobj *) th->binding_stack_start,len);
+#ifdef LISP_FEATURE_SB_THREAD
+ /* do the tls as well */
+ len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj));
+ scavenge((lispobj *) (th+1),len);
+#endif
}
}
-
- /* Scavenge the binding stack. */
- scavenge((lispobj *) BINDING_STACK_START,
- (lispobj *)SymbolValue(BINDING_STACK_POINTER) -
- (lispobj *)BINDING_STACK_START);
/* The original CMU CL code had scavenge-read-only-space code
* controlled by the Lisp-level variable
/* Scavenge static space. */
static_space_size =
- (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) -
+ (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
(lispobj *)STATIC_SPACE_START;
if (gencgc_verbose > 1) {
FSHOW((stderr,
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
+ (lispobj)(((char *)heap_base) + last_free_page*4096),0);
return 0; /* dummy value: return something ... */
}
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
-
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ if(gencgc_verbose)
+ fprintf(stderr,"Next gc when %d bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
gc_set_region_empty(&unboxed_region);
last_free_page = 0;
- SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
+ SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0);
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
{
int page = 0;
int addr = DYNAMIC_SPACE_START;
- int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
+ int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
/* Initialize the first region. */
do {
char *
alloc(int nbytes)
{
- struct alloc_region *region= &boxed_region;
+ struct thread *th=arch_os_get_current_thread();
+ struct alloc_region *region=
+ th ? &(th->alloc_region) : &boxed_region;
void *new_obj;
void *new_free_pointer;
/* Check for alignment allocation problems. */
gc_assert((((unsigned)region->free_pointer & 0x7) == 0)
&& ((nbytes & 0x7) == 0));
- /* At this point we should either be in pseudo-atomic, or early
- * enough in cold initn that interrupts are not yet enabled anyway.
- * It would be nice to assert same.
- */
- gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC));
-
+ if(all_threads)
+ /* there are a few places in the C code that allocate data in the
+ * heap before Lisp starts. This is before interrupts are enabled,
+ * so we don't need to check for pseudo-atomic */
+#ifdef LISP_FEATURE_SB_THREAD
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
+ th,getpid());
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2003.05.01, mail details to sbcl-devel\n");
+ }
+#else
+ gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
+#endif
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- auto_gc_trigger *= 2;
/* set things up so that GC happens when we finish the PA
* section. */
maybe_gc_pending=1;
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1));
+ SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
}
\f
-/*
- * noise to manipulate the gc trigger stuff
- */
-
-void
-set_auto_gc_trigger(os_vm_size_t dynamic_usage)
-{
- auto_gc_trigger += dynamic_usage;
-}
-
-void
-clear_auto_gc_trigger(void)
-{
- auto_gc_trigger = 0;
-}
-\f
/* Find the code object for the given pc, or return NULL on failure.
*
* FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
return 0;
} else {
-
- /* The only acceptable reason for an signal like this from the
- * heap is that the generational GC write-protected the page. */
- if (page_table[page_index].write_protected != 1) {
- lose("access failure in heap page not marked as write-protected");
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
+
+ /* Don't worry, we can handle it. */
+ return 1;
}
-
- /* Unprotect the page. */
- os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
- page_table[page_index].write_protected = 0;
- page_table[page_index].write_protected_cleared = 1;
-
- /* Don't worry, we can handle it. */
- return 1;
}
}
-
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
* it's not just a case of the program hitting the write barrier, and
* are about to let Lisp deal with it. It's basically just a
gc_alloc_update_all_page_tables(void)
{
/* Flush the alloc regions updating the tables. */
+ struct thread *th;
+ for_each_thread(th)
+ gc_alloc_update_page_tables(0, &th->alloc_region);
gc_alloc_update_page_tables(1, &unboxed_region);
gc_alloc_update_page_tables(0, &boxed_region);
}