#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#include "genesis/simple-fun.h"
+
+#ifdef LISP_FEATURE_SB_THREAD
+#include <sys/ptrace.h>
+#include <linux/user.h> /* threading is presently linux-only */
+#endif
+
/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
+/* forward declarations */
+int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region);
+void gc_set_region_empty(struct alloc_region *region);
+void gc_alloc_update_all_page_tables(void);
+static void gencgc_pickup_dynamic(void);
+boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
+
\f
/*
* GC parameters
#endif
/* the minimum size (in bytes) for a large object*/
-unsigned large_object_size = 4 * 4096;
+/* FIXME: Should this really be PAGE_BYTES? */
+unsigned large_object_size = 4 * PAGE_BYTES;
+
\f
/*
* debugging
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-static unsigned long auto_gc_trigger = 0;
+extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
+unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
int new_space;
-/* FIXME: It would be nice to use this symbolic constant instead of
- * bare 4096 almost everywhere. We could also use an assertion that
- * it's equal to getpagesize(). */
-
-#define PAGE_BYTES 4096
-
/* An array of page structures is statically allocated.
* This helps quickly map between an address its page structure.
* NUM_PAGES is set from the size of the dynamic space. */
inline void *
page_address(int page_num)
{
- return (heap_base + (page_num * 4096));
+ return (heap_base + (page_num * PAGE_BYTES));
}
/* Find the page index within the page_table for the given
int index = addr-heap_base;
if (index >= 0) {
- index = ((unsigned int)index)/4096;
+ index = ((unsigned int)index)/PAGE_BYTES;
if (index < NUM_PAGES)
return (index);
}
\f
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
- * majority of GC is single-threaded, but alloc() may be called
- * from >1 thread at a time and must be thread-safe */
+ * majority of GC is single-threaded, but alloc() may be called from
+ * >1 thread at a time and must be thread-safe. This lock must be
+ * seized before all accesses to generations[] or to parts of
+ * page_table[] that other threads may want to see */
+
static lispobj free_pages_lock=0;
\f
/ ((double)generations[gen].bytes_allocated);
}
+void fpu_save(int *); /* defined in x86-assem.S */
+void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
static void
i,
boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*4096
+ (count_generation_pages(i)*PAGE_BYTES
- generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
generations[gc_alloc_generation].alloc_start_page;
}
last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region);
- bytes_found=(4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
gc_abort();
}
- new_area_start = 4096*first_page + offset;
+ new_area_start = PAGE_BYTES*first_page + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
unsigned area_end =
- 4096*((*new_areas)[i].page)
+ PAGE_BYTES*((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
/*FSHOW((stderr,
max_new_areas = new_areas_index;
}
-/* Update the tables for the alloc_region. The region maybe added to
+/* Update the tables for the alloc_region. The region may be added to
* the new_areas.
*
* When done the alloc_region is set up so that the next quick alloc
next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
+ /* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
- bytes_used = 4096;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>4096) {
- bytes_used = 4096;
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
page_table[next_page].allocated = FREE_PAGE;
next_page++;
}
-
+ release_spinlock(&free_pages_lock);
+ /* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
/* Calc. the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
page_table[next_page].large_object = large;
page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - 4096*(next_page-first_page);
+ orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
(page_table[first_page].large_object == 0) &&
(gc_alloc_generation == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (4096-32)) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
(page_table[first_page].write_protected == 0) &&
(page_table[first_page].dont_move == 0))
break;
gc_assert(page_table[first_page].write_protected == 0);
last_page = first_page;
- bytes_found = 4096 - page_table[first_page].bytes_used;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
num_pages = 1;
while (((bytes_found < nbytes)
|| (alloc_region && (num_pages < 2)))
&& (page_table[last_page+1].allocated == FREE_PAGE)) {
last_page++;
num_pages++;
- bytes_found += 4096;
+ bytes_found += PAGE_BYTES;
gc_assert(page_table[last_page].write_protected == 0);
}
- region_size = (4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
gc_assert(bytes_found == region_size);
restart_page = last_page + 1;
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert(page_table[next_page].allocated == BOXED_PAGE);
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
page_table[next_page].write_protected = 0;
}
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
(page_table[next_page].allocated == BOXED_PAGE) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to bother zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected as they
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
page_table[next_page].allocated = UNBOXED_PAGE;
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected, even if
return (NULL);
}
-static lispobj*
+lispobj*
search_read_only_space(lispobj *pointer)
{
lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
return (search_space(start, (pointer+2)-start, pointer));
}
-static lispobj *
+lispobj *
search_static_space(lispobj *pointer)
{
lispobj* start = (lispobj*)STATIC_SPACE_START;
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing? */
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
static int
possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
/* Check that the object pointed to is consistent with the pointer
* low tag.
- *
- * FIXME: It's not safe to rely on the result from this check
- * before an object is initialized. Thus, if we were interrupted
- * just as an object had been allocated but not initialized, the
- * GC relying on this result could bogusly reclaim the memory.
- * However, we can't really afford to do without this check. So
- * we should make it safe somehow.
- * (1) Perhaps just review the code to make sure
- * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
- * thing is wrapped around critical sections where allocated
- * memory type bits haven't been set.
- * (2) Perhaps find some other hack to protect against this, e.g.
- * recording the result of the last call to allocate-lisp-memory,
- * and returning true from this function when *pointer is
- * a reference to that result.
- *
- * (surely pseudo-atomic is supposed to be used for exactly this?)
*/
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
case LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
boxed = BOXED_PAGE;
break;
case BIGNUM_WIDETAG:
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
gc_assert((page_table[next_page].allocated == BOXED_PAGE)
|| (page_table[next_page].allocated == UNBOXED_PAGE));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset ==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].allocated = boxed;
/* Shouldn't be write-protected at this stage. Essential that the
* pages aren't. */
gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
((page_table[next_page].allocated == UNBOXED_PAGE)
|| (page_table[next_page].allocated == BOXED_PAGE)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write protected as they
/* quick check 2: Check the offset within the page.
*
- * FIXME: The mask should have a symbolic name, and ideally should
- * be derived from page size instead of hardwired to 0xfff.
- * (Also fix other uses of 0xfff, elsewhere.) */
- if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
+ */
+ if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
* (or, as a special case which also requires dont_move, a return
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreter as
+ * probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
if (!(possibly_valid_dynamic_space_pointer(addr)))
return;
while (page_table[first_page].first_object_offset != 0) {
--first_page;
/* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == 4096);
+ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
if ((page_table[addr_page_index].allocated == FREE_PAGE)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned)addr & 0xfff)
+ || (((unsigned)addr & (PAGE_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < 4096)
- /* ..or it is 4096 and is the last in the block */
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
|| (page_table[i+1].allocated == FREE_PAGE)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
gc_assert(page_table[page].allocated != FREE_PAGE);
gc_assert(page_table[page].bytes_used != 0);
- /* Skip if it's already write-protected or an unboxed page. */
+ /* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
+ || page_table[page].dont_move
|| (page_table[page].allocated & UNBOXED_PAGE))
return (0);
/*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
os_protect((void *)page_addr,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
for (last_page = i; ; last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
#endif
{
scavenge(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
+ + (last_page-i)*PAGE_BYTES)/4);
/* Now scan the pages and write protect those
* that don't have pointers to younger
for (last_page = i; ;last_page++) {
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- page_table[i].first_object_offset)/4;
else
size = (page_table[last_page].bytes_used
- + (last_page-i)*4096
+ + (last_page-i)*PAGE_BYTES
- page_table[i].first_object_offset)/4;
{
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
}
}
void *page_start = (void *)page_address(last_page);
if (page_table[last_page].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[last_page].write_protected = 0;
}
}
page_start = (void *)page_address(first_page);
- os_invalidate(page_start, 4096*(last_page-first_page));
- addr = os_validate(page_start, 4096*(last_page-first_page));
+ os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
+ addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
if (addr == NULL || addr != page_start) {
/* Is this an error condition? I couldn't really tell from
* the old CMU CL code, which fprintf'ed a message with
int *page_start;
page_start = (int *)page_address(first_page);
- i586_bzero(page_start, 4096*(last_page-first_page));
+ i586_bzero(page_start, PAGE_BYTES*(last_page-first_page));
}
first_page = last_page;
case RATIO_WIDETAG:
case COMPLEX_WIDETAG:
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
break;
verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
+ + (last_page-i)*PAGE_BYTES)/4);
i = last_page;
}
}
}
}
} else {
- int free_bytes = 4096 - page_table[page].bytes_used;
+ int free_bytes = PAGE_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
int *start_addr = (int *)((unsigned)page_address(page)
+ page_table[page].bytes_used);
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated == BOXED_PAGE)
&& (page_table[i].bytes_used != 0)
+ && !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
void *page_start;
page_start = (void *)page_address(i);
os_protect(page_start,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
/* Before any pointers are preserved, the dont_move flags on the
* pages need to be cleared. */
for (i = 0; i < last_free_page; i++)
- page_table[i].dont_move = 0;
+ if(page_table[i].gen==from_space)
+ page_table[i].dont_move = 0;
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
unprotect_oldspace();
/* Scavenge the stacks' conservative roots. */
+
+ /* there are potentially two stacks for each thread: the main
+ * stack, which may contain Lisp pointers, and the alternate stack.
+ * We don't ever run Lisp code on the altstack, but it may
+ * host a sigcontext with lisp objects in it */
+
+ /* what we need to do: (1) find the stack pointer for the main
+ * stack; scavenge it (2) find the interrupt context on the
+ * alternate stack that might contain lisp values, and scavenge
+ * that */
+
+ /* we assume that none of the preceding applies to the thread that
+ * initiates GC. If you ever call GC from inside an altstack
+ * handler, you will lose. */
for_each_thread(th) {
void **ptr;
+ void **esp=(void **)-1;
+ int i,free;
#ifdef LISP_FEATURE_SB_THREAD
- struct user_regs_struct regs;
- if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
- /* probably doesn't exist any more. */
- fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
- perror("PTRACE_GETREGS");
+ if(th==arch_os_get_current_thread()) {
+ esp = (void **) &raise;
+ } else {
+ void **esp1;
+ free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+ for(i=free-1;i>=0;i--) {
+ os_context_t *c=th->interrupt_contexts[i];
+ esp1 = (void **) *os_context_register_addr(c,reg_ESP);
+ if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
+ if(esp1<esp) esp=esp1;
+ for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
+ }
+ }
+ }
}
- preserve_pointer(regs.ebx);
- preserve_pointer(regs.ecx);
- preserve_pointer(regs.edx);
- preserve_pointer(regs.esi);
- preserve_pointer(regs.edi);
- preserve_pointer(regs.ebp);
- preserve_pointer(regs.eax);
-#endif
- for (ptr = ((void **)
- ((void *)th->control_stack_start
- + THREAD_CONTROL_STACK_SIZE)
- -1);
-#ifdef LISP_FEATURE_SB_THREAD
- ptr > regs.esp;
#else
- ptr > (void **)&raise;
+ esp = (void **) &raise;
#endif
- ptr--) {
+ for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
preserve_pointer(*ptr);
}
}
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- /* FIXME: 4096 should be symbolic constant here and
- * prob'ly elsewhere too. */
- num_dont_move_pages * 4096);
+ num_dont_move_pages * PAGE_BYTES);
}
#endif
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
return 0; /* dummy value: return something ... */
}
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
-
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ if(gencgc_verbose)
+ fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
page_start = (void *)page_address(page);
/* First, remove any write-protection. */
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[page].write_protected = 0;
- os_invalidate(page_start,4096);
- addr = os_validate(page_start,4096);
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
if (addr == NULL || addr != page_start) {
lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
page_start,
do {
page_table[page].allocated = BOXED_PAGE;
page_table[page].gen = 0;
- page_table[page].bytes_used = 4096;
+ page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
page_table[page].first_object_offset =
(void *)DYNAMIC_SPACE_START - page_address(page);
- addr += 4096;
+ addr += PAGE_BYTES;
page++;
} while (addr < alloc_ptr);
- generations[0].bytes_allocated = 4096*page;
- bytes_allocated = 4096*page;
+ generations[0].bytes_allocated = PAGE_BYTES*page;
+ bytes_allocated = PAGE_BYTES*page;
}
\f
-extern boolean maybe_gc_pending ;
/* alloc(..) is the external interface for memory allocation. It
* allocates to generation 0. It is not called from within the garbage
* collector as it is only external uses that need the check for heap
/* there are a few places in the C code that allocate data in the
* heap before Lisp starts. This is before interrupts are enabled,
* so we don't need to check for pseudo-atomic */
- gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
-
+#ifdef LISP_FEATURE_SB_THREAD
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
+ th,getpid());
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2003.12.01, mail details to sbcl-devel\n");
+ }
+#else
+ gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
+#endif
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- auto_gc_trigger *= 2;
/* set things up so that GC happens when we finish the PA
- * section. */
- maybe_gc_pending=1;
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
+ * section. We only do this if there wasn't a pending handler
+ * already, in case it was a gc. If it wasn't a GC, the next
+ * allocation will get us back to this point anyway, so no harm done
+ */
+ struct interrupt_data *data=th->interrupt_data;
+ if(!data->pending_handler)
+ maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
}
\f
-/*
- * noise to manipulate the gc trigger stuff
- */
-
-void
-set_auto_gc_trigger(os_vm_size_t dynamic_usage)
-{
- auto_gc_trigger += dynamic_usage;
-}
-
-void
-clear_auto_gc_trigger(void)
-{
- auto_gc_trigger = 0;
-}
-\f
/* Find the code object for the given pc, or return NULL on failure.
*
* FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
return 0;
} else {
-
- /* The only acceptable reason for an signal like this from the
- * heap is that the generational GC write-protected the page. */
- if (page_table[page_index].write_protected != 1) {
- lose("access failure in heap page not marked as write-protected");
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
}
-
- /* Unprotect the page. */
- os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
- page_table[page_index].write_protected = 0;
- page_table[page_index].write_protected_cleared = 1;
-
/* Don't worry, we can handle it. */
return 1;
}
}
-
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
* it's not just a case of the program hitting the write barrier, and
* are about to let Lisp deal with it. It's basically just a
unhandled_sigmemoryfault()
{}
-gc_alloc_update_all_page_tables(void)
+void gc_alloc_update_all_page_tables(void)
{
/* Flush the alloc regions updating the tables. */
struct thread *th;