#include <stdio.h>
#include <signal.h>
+#include <errno.h>
#include "runtime.h"
#include "sbcl.h"
#include "os.h"
#include "arch.h"
#include "gc.h"
#include "gc-internal.h"
+#include "thread.h"
+#include "genesis/vector.h"
+#include "genesis/weak-pointer.h"
+#include "genesis/simple-fun.h"
+
+#ifdef LISP_FEATURE_SB_THREAD
+#include <sys/ptrace.h>
+#include <linux/user.h> /* threading is presently linux-only */
+#endif
/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-static unsigned long auto_gc_trigger = 0;
+extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
+unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
/* FIXME: It would be nice to use this symbolic constant instead of
* bare 4096 almost everywhere. We could also use an assertion that
* it's equal to getpagesize(). */
+
#define PAGE_BYTES 4096
/* An array of page structures is statically allocated.
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the Lisp code. */
static int last_free_page;
-static int last_used_page = 0;
+\f
+/* This lock is to prevent multiple threads from simultaneously
+ * allocating new regions which overlap each other. Note that the
+ * majority of GC is single-threaded, but alloc() may be called from
+ * >1 thread at a time and must be thread-safe. This lock must be
+ * seized before all accesses to generations[] or to parts of
+ * page_table[] that other threads may want to see */
+
+static lispobj free_pages_lock=0;
+
\f
/*
* miscellaneous heap functions
/* Count the number of boxed pages within the given
* generation. */
- if (page_table[j].allocated == BOXED_PAGE) {
+ if (page_table[j].allocated & BOXED_PAGE) {
if (page_table[j].large_object)
large_boxed_cnt++;
else
/* Count the number of unboxed pages within the given
* generation. */
- if (page_table[j].allocated == UNBOXED_PAGE) {
+ if (page_table[j].allocated & UNBOXED_PAGE) {
if (page_table[j].large_object)
large_unboxed_cnt++;
else
struct alloc_region boxed_region;
struct alloc_region unboxed_region;
-/* XX hack. Current Lisp code uses the following. Need copying in/out. */
-void *current_region_free_pointer;
-void *current_region_end_addr;
-
/* The generation currently being allocated to. */
static int gc_alloc_generation;
{
int first_page;
int last_page;
- int region_size;
- int restart_page;
int bytes_found;
- int num_pages;
int i;
/*
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
-
+ get_spinlock(&free_pages_lock,alloc_region);
if (unboxed) {
- restart_page =
+ first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
} else {
- restart_page =
+ first_page =
generations[gc_alloc_generation].alloc_start_page;
}
-
- /* Search for a contiguous free region of at least nbytes with the
- * given properties: boxed/unboxed, generation. */
- do {
- first_page = restart_page;
-
- /* First search for a page with at least 32 bytes free, which is
- * not write-protected, and which is not marked dont_move.
- *
- * FIXME: This looks extremely similar, perhaps identical, to
- * code in gc_alloc_large(). It should be shared somehow. */
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE) /* not free page */
- && ((unboxed &&
- (page_table[first_page].allocated != UNBOXED_PAGE))
- || (!unboxed &&
- (page_table[first_page].allocated != BOXED_PAGE))
- || (page_table[first_page].large_object != 0)
- || (page_table[first_page].gen != gc_alloc_generation)
- || (page_table[first_page].bytes_used >= (4096-32))
- || (page_table[first_page].write_protected != 0)
- || (page_table[first_page].dont_move != 0)))
- first_page++;
- /* Check for a failure. */
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- gc_assert(page_table[first_page].write_protected == 0);
-
- /*
- FSHOW((stderr,
- "/first_page=%d bytes_used=%d\n",
- first_page, page_table[first_page].bytes_used));
- */
-
- /* Now search forward to calculate the available region size. It
- * tries to keeps going until nbytes are found and the number of
- * pages is greater than some level. This helps keep down the
- * number of pages in a region. */
- last_page = first_page;
- bytes_found = 4096 - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes) || (num_pages < 2))
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE)) {
- last_page++;
- num_pages++;
- bytes_found += 4096;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (4096 - page_table[first_page].bytes_used)
+ last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region);
+ bytes_found=(4096 - page_table[first_page].bytes_used)
+ 4096*(last_page-first_page);
- gc_assert(bytes_found == region_size);
-
- /*
- FSHOW((stderr,
- "/last_page=%d bytes_found=%d num_pages=%d\n",
- last_page, bytes_found, num_pages));
- */
-
- restart_page = last_page + 1;
- } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
-
- /* Check for a failure. */
- if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_alloc_new_region() failed on restart_page, nbytes=%d.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- /*
- FSHOW((stderr,
- "/gc_alloc_new_region() gen %d: %d bytes: pages %d to %d: addr=%x\n",
- gc_alloc_generation,
- bytes_found,
- first_page,
- last_page,
- page_address(first_page)));
- */
-
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
alloc_region->last_page = last_page;
alloc_region->free_pointer = alloc_region->start_addr;
alloc_region->end_addr = alloc_region->start_addr + bytes_found;
- if (gencgc_zero_check) {
- int *p;
- for (p = (int *)alloc_region->start_addr;
- p < (int *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.", p);
- }
- }
- }
-
/* Set up the pages. */
/* The first page may have already been in use. */
gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
else
gc_assert(page_table[first_page].allocated == BOXED_PAGE);
+ page_table[first_page].allocated |= OPEN_REGION_PAGE;
+
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
* broken before!) */
page_table[i].first_object_offset =
alloc_region->start_addr - page_address(i);
+ page_table[i].allocated |= OPEN_REGION_PAGE ;
}
-
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
- if (last_page+1 > last_used_page)
- last_used_page = last_page+1;
+ (lispobj)(((char *)heap_base) + last_free_page*4096),
+ 0);
+ }
+ free_pages_lock=0;
+
+ /* we can do this after releasing free_pages_lock */
+ if (gencgc_zero_check) {
+ int *p;
+ for (p = (int *)alloc_region->start_addr;
+ p < (int *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero.", p);
+ }
}
}
+}
+
/* If the record_new_objects flag is 2 then all new regions created
* are recorded.
*
(*new_areas)[i].size,
first_page,
offset,
- size));*/
+ size);*/
(*new_areas)[i].size += size;
return;
}
}
- /*FSHOW((stderr, "/add_new_area S1 %d %d %d\n", i, c, new_area_start));*/
(*new_areas)[new_areas_index].page = first_page;
(*new_areas)[new_areas_index].offset = offset;
next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ get_spinlock(&free_pages_lock,alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
+ /* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
* first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
if (unboxed)
gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
* first_object_offset pointer to the start of the region, and set
* the bytes_used. */
while (more) {
+ page_table[next_page].allocated &= ~(OPEN_REGION_PAGE);
if (unboxed)
gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
else
} else {
/* There are no bytes allocated. Unallocate the first_page if
* there are 0 bytes_used. */
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
if (page_table[first_page].bytes_used == 0)
page_table[first_page].allocated = FREE_PAGE;
}
page_table[next_page].allocated = FREE_PAGE;
next_page++;
}
-
- /* Reset the alloc_region. */
- alloc_region->first_page = 0;
- alloc_region->last_page = -1;
- alloc_region->start_addr = page_address(0);
- alloc_region->free_pointer = page_address(0);
- alloc_region->end_addr = page_address(0);
+ free_pages_lock=0;
+ /* alloc_region is per-thread, we're ok to do this unlocked */
+ gc_set_region_empty(alloc_region);
}
static inline void *gc_quick_alloc(int nbytes);
{
int first_page;
int last_page;
- int region_size;
- int restart_page;
- int bytes_found;
- int num_pages;
int orig_first_page_bytes_used;
int byte_cnt;
int more;
*/
/* If the object is small, and there is room in the current region
- then allocation it in the current region. */
+ then allocate it in the current region. */
if (!large
&& ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
return gc_quick_alloc(nbytes);
- /* Search for a contiguous free region of at least nbytes. If it's a
- large object then align it on a page boundary by searching for a
- free page. */
-
/* To allow the allocation of small objects without the danger of
using a page in the current boxed region, the search starts after
the current boxed free region. XX could probably keep a page
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
+
+ get_spinlock(&free_pages_lock,alloc_region);
+
if (unboxed) {
- restart_page =
+ first_page =
generations[gc_alloc_generation].alloc_large_unboxed_start_page;
} else {
- restart_page = generations[gc_alloc_generation].alloc_large_start_page;
- }
- if (restart_page <= alloc_region->last_page) {
- restart_page = alloc_region->last_page+1;
+ first_page = generations[gc_alloc_generation].alloc_large_start_page;
}
-
- do {
- first_page = restart_page;
-
- if (large)
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE))
- first_page++;
- else
- /* FIXME: This looks extremely similar, perhaps identical,
- * to code in gc_alloc_new_region(). It should be shared
- * somehow. */
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE)
- && ((unboxed &&
- (page_table[first_page].allocated != UNBOXED_PAGE))
- || (!unboxed &&
- (page_table[first_page].allocated != BOXED_PAGE))
- || (page_table[first_page].large_object != 0)
- || (page_table[first_page].gen != gc_alloc_generation)
- || (page_table[first_page].bytes_used >= (4096-32))
- || (page_table[first_page].write_protected != 0)
- || (page_table[first_page].dont_move != 0)))
- first_page++;
-
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- gc_assert(page_table[first_page].write_protected == 0);
-
- /*
- FSHOW((stderr,
- "/first_page=%d bytes_used=%d\n",
- first_page, page_table[first_page].bytes_used));
- */
-
- last_page = first_page;
- bytes_found = 4096 - page_table[first_page].bytes_used;
- num_pages = 1;
- while ((bytes_found < nbytes)
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE)) {
- last_page++;
- num_pages++;
- bytes_found += 4096;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
-
- /*
- FSHOW((stderr,
- "/last_page=%d bytes_found=%d num_pages=%d\n",
- last_page, bytes_found, num_pages));
- */
-
- restart_page = last_page + 1;
- } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
-
- /* Check for a failure */
- if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
+ if (first_page <= alloc_region->last_page) {
+ first_page = alloc_region->last_page+1;
}
- /*
- if (large)
- FSHOW((stderr,
- "/gc_alloc_large() gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
- gc_alloc_generation,
- nbytes,
- bytes_found,
- first_page,
- last_page,
- page_address(first_page)));
- */
+ last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,0);
gc_assert(first_page > alloc_region->last_page);
if (unboxed)
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
- if (last_page+1 > last_used_page)
- last_used_page = last_page+1;
+ (lispobj)(((char *)heap_base) + last_free_page*4096),0);
}
+ free_pages_lock=0;
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
+int
+gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region)
+{
+ /* if alloc_region is 0, we assume this is for a potentially large
+ object */
+ int first_page;
+ int last_page;
+ int region_size;
+ int restart_page=*restart_page_ptr;
+ int bytes_found;
+ int num_pages;
+ int large = !alloc_region && (nbytes >= large_object_size);
+
+ gc_assert(free_pages_lock);
+ /* Search for a contiguous free space of at least nbytes. If it's a
+ large object then align it on a page boundary by searching for a
+ free page. */
+
+ /* To allow the allocation of small objects without the danger of
+ using a page in the current boxed region, the search starts after
+ the current boxed free region. XX could probably keep a page
+ index ahead of the current region and bumped up here to save a
+ lot of re-scanning. */
+
+ do {
+ first_page = restart_page;
+ if (large)
+ while ((first_page < NUM_PAGES)
+ && (page_table[first_page].allocated != FREE_PAGE))
+ first_page++;
+ else
+ while (first_page < NUM_PAGES) {
+ if(page_table[first_page].allocated == FREE_PAGE)
+ break;
+ /* I don't know why we need the gen=0 test, but it
+ * breaks randomly if that's omitted -dan 2003.02.26
+ */
+ if((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE : BOXED_PAGE)) &&
+ (page_table[first_page].large_object == 0) &&
+ (gc_alloc_generation == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].bytes_used < (4096-32)) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0))
+ break;
+ first_page++;
+ }
+
+ if (first_page >= NUM_PAGES) {
+ fprintf(stderr,
+ "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
+ nbytes);
+ print_generation_stats(1);
+ lose(NULL);
+ }
+
+ gc_assert(page_table[first_page].write_protected == 0);
+
+ last_page = first_page;
+ bytes_found = 4096 - page_table[first_page].bytes_used;
+ num_pages = 1;
+ while (((bytes_found < nbytes)
+ || (alloc_region && (num_pages < 2)))
+ && (last_page < (NUM_PAGES-1))
+ && (page_table[last_page+1].allocated == FREE_PAGE)) {
+ last_page++;
+ num_pages++;
+ bytes_found += 4096;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+
+ region_size = (4096 - page_table[first_page].bytes_used)
+ + 4096*(last_page-first_page);
+
+ gc_assert(bytes_found == region_size);
+ restart_page = last_page + 1;
+ } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
+
+ /* Check for a failure */
+ if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
+ fprintf(stderr,
+ "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n",
+ nbytes);
+ print_generation_stats(1);
+ lose(NULL);
+ }
+ *restart_page_ptr=first_page;
+ return last_page;
+}
+
/* Allocate bytes. All the rest of the special-purpose allocation
* functions will eventually call this (instead of just duplicating
* parts of its code) */
void *
-gc_general_alloc(int nbytes,int unboxed_p,int quick_p)
+gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region,
+ int quick_p)
{
void *new_free_pointer;
- struct alloc_region *my_region =
- unboxed_p ? &unboxed_region : &boxed_region;
/* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
/* Set up a new region. */
gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
}
+
return((void *)new_obj);
}
/* If so then allocate from the current region. */
void *new_obj = my_region->free_pointer;
my_region->free_pointer = new_free_pointer;
-
/* Check whether the current region is almost empty. */
if ((my_region->end_addr - my_region->free_pointer) <= 32) {
/* If so find, finished with the current region. */
return((void *) NIL); /* dummy value: return something ... */
}
+void *
+gc_general_alloc(int nbytes,int unboxed_p,int quick_p)
+{
+ struct alloc_region *my_region =
+ unboxed_p ? &unboxed_region : &boxed_region;
+ return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
+}
+
+
static void *
gc_alloc(int nbytes,int unboxed_p)
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated = BOXED_PAGE);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE);
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
search_read_only_space(lispobj *pointer)
{
lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
- lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
+ lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < start) || (pointer >= end))
return NULL;
return (search_space(start, (pointer+2)-start, pointer));
search_static_space(lispobj *pointer)
{
lispobj* start = (lispobj*)STATIC_SPACE_START;
- lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
+ lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < start) || (pointer >= end))
return NULL;
return (search_space(start, (pointer+2)-start, pointer));
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing? */
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
static int
possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
/* Check that the object pointed to is consistent with the pointer
* low tag.
- *
- * FIXME: It's not safe to rely on the result from this check
- * before an object is initialized. Thus, if we were interrupted
- * just as an object had been allocated but not initialized, the
- * GC relying on this result could bogusly reclaim the memory.
- * However, we can't really afford to do without this check. So
- * we should make it safe somehow.
- * (1) Perhaps just review the code to make sure
- * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
- * thing is wrapped around critical sections where allocated
- * memory type bits haven't been set.
- * (2) Perhaps find some other hack to protect against this, e.g.
- * recording the result of the last call to allocate-lisp-memory,
- * and returning true from this function when *pointer is
- * a reference to that result. */
+ */
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
/* Start_addr should be the enclosing code object, or a closure
#endif
case SIMPLE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case BIGNUM_WIDETAG:
case SIMPLE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
/* Skip if already marked dont_move. */
|| (page_table[addr_page_index].dont_move != 0))
return;
-
+ gc_assert(!(page_table[addr_page_index].allocated & OPEN_REGION_PAGE));
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
* (or, as a special case which also requires dont_move, a return
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreter as
+ * probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
- if (!possibly_valid_dynamic_space_pointer(addr))
+ if (!(possibly_valid_dynamic_space_pointer(addr)))
return;
+ first_page = addr_page_index;
/* Work backwards to find a page with a first_object_offset of 0.
* The pages should be contiguous with all bytes used in the same
* gen. Assumes the first_object_offset is negative or zero. */
- first_page = addr_page_index;
+
+ /* this is probably needlessly conservative. The first object in
+ * the page may not even be the one we were passed a pointer to:
+ * if this is the case, we will write-protect all the previous
+ * object's pages too.
+ */
+
while (page_table[first_page].first_object_offset != 0) {
--first_page;
/* Do some checks. */
/* Skip if it's already write-protected or an unboxed page. */
if (page_table[page].write_protected
- || (page_table[page].allocated == UNBOXED_PAGE))
+ || (page_table[page].allocated & UNBOXED_PAGE))
return (0);
/* Scan the page for pointers to younger generations or the
#endif
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated == BOXED_PAGE)
+ if ((page_table[i].allocated & BOXED_PAGE)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
int last_page;
* block. */
if ((page_table[last_page].bytes_used < 4096)
/* Or it is 4096 and is the last in the block */
- || (page_table[last_page+1].allocated != BOXED_PAGE)
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
FSHOW((stderr,
"/starting one full scan of newspace generation %d\n",
generation));
-
for (i = 0; i < last_free_page; i++) {
+ /* note that this skips over open regions when it encounters them */
if ((page_table[i].allocated == BOXED_PAGE)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
* contiguous block */
if ((page_table[last_page].bytes_used < 4096)
/* Or it is 4096 and is the last in the block */
- || (page_table[last_page+1].allocated != BOXED_PAGE)
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
int previous_new_areas_index;
/* Flush the current regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
/* Turn on the recording of new areas by gc_alloc(). */
new_areas = current_new_areas;
record_new_objects = 2;
/* Flush the current regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
/* Grab new_areas_index. */
current_new_areas_index = new_areas_index;
record_new_objects = 2;
/* Flush the current regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
} else {
int offset = (*previous_new_areas)[i].offset;
int size = (*previous_new_areas)[i].size / 4;
gc_assert((*previous_new_areas)[i].size % 4 == 0);
-
scavenge(page_address(page)+offset, size);
}
/* Flush the current regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
}
current_new_areas_index = new_areas_index;
int is_in_dynamic_space = (find_page_index((void*)start) != -1);
int is_in_readonly_space =
(READ_ONLY_SPACE_START <= (unsigned)start &&
- (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
+ (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
while (words > 0) {
size_t count = 1;
int page_index = find_page_index((void*)thing);
int to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
- thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
+ thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
int to_static_space =
(STATIC_SPACE_START <= thing &&
- thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
+ thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
/* Does it point to the dynamic space? */
if (page_index != -1) {
/* Does it point to a plausible object? This check slows
* it down a lot (so it's commented out).
*
- * FIXME: Add a variable to enable this dynamically. */
- /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- * lose("ptr %x to invalid object %x", thing, start); */
+ * "a lot" is serious: it ate 50 minutes cpu time on
+ * my duron 950 before I came back from lunch and
+ * killed it.
+ *
+ * FIXME: Add a variable to enable this
+ * dynamically. */
+ /*
+ if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
+ lose("ptr %x to invalid object %x", thing, start);
+ }
+ */
} else {
/* Verify that it points to another valid space. */
if (!to_readonly_space && !to_static_space
#endif
case SIMPLE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
* to grep for all foo_size and rename the appropriate ones to
* foo_count. */
int read_only_space_size =
- (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
+ (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
- (lispobj*)READ_ONLY_SPACE_START;
int static_space_size =
- (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
+ (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
- (lispobj*)STATIC_SPACE_START;
+ struct thread *th;
+ for_each_thread(th) {
int binding_stack_size =
- (lispobj*)SymbolValue(BINDING_STACK_POINTER)
- - (lispobj*)BINDING_STACK_START;
-
+ (lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
+ - (lispobj*)th->binding_stack_start;
+ verify_space(th->binding_stack_start, binding_stack_size);
+ }
verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
- verify_space((lispobj*)BINDING_STACK_START , binding_stack_size);
}
static void
gencgc_verify_zero_fill(void)
{
/* Flush the alloc regions updating the tables. */
- boxed_region.free_pointer = current_region_free_pointer;
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
SHOW("verifying zero fill");
verify_zero_fill();
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
}
static void
unsigned long bytes_freed;
unsigned long i;
unsigned long static_space_size;
-
+ struct thread *th;
gc_assert(generation <= (NUM_GENERATIONS-1));
/* The oldest generation can't be raised. */
* be un-protected anyway before unmapping later. */
unprotect_oldspace();
- /* Scavenge the stack's conservative roots. */
- {
+ /* Scavenge the stacks' conservative roots. */
+ for_each_thread(th) {
void **ptr;
- for (ptr = (void **)CONTROL_STACK_END - 1;
+#ifdef LISP_FEATURE_SB_THREAD
+ struct user_regs_struct regs;
+ if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
+ /* probably doesn't exist any more. */
+ fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
+ perror("PTRACE_GETREGS");
+ }
+ preserve_pointer(regs.ebx);
+ preserve_pointer(regs.ecx);
+ preserve_pointer(regs.edx);
+ preserve_pointer(regs.esi);
+ preserve_pointer(regs.edi);
+ preserve_pointer(regs.ebp);
+ preserve_pointer(regs.eax);
+#endif
+ for (ptr = th->control_stack_end;
+#ifdef LISP_FEATURE_SB_THREAD
+ ptr > regs.esp;
+#else
ptr > (void **)&raise;
+#endif
ptr--) {
preserve_pointer(*ptr);
}
/* Scavenge the Lisp functions of the interrupt handlers, taking
* care to avoid SIG_DFL and SIG_IGN. */
+ for_each_thread(th) {
+ struct interrupt_data *data=th->interrupt_data;
for (i = 0; i < NSIG; i++) {
- union interrupt_handler handler = interrupt_handlers[i];
+ union interrupt_handler handler = data->interrupt_handlers[i];
if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
!ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- scavenge((lispobj *)(interrupt_handlers + i), 1);
+ scavenge((lispobj *)(data->interrupt_handlers + i), 1);
+ }
+ }
+ }
+ /* Scavenge the binding stacks. */
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ th->binding_stack_start;
+ scavenge((lispobj *) th->binding_stack_start,len);
+#ifdef LISP_FEATURE_SB_THREAD
+ /* do the tls as well */
+ len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj));
+ scavenge((lispobj *) (th+1),len);
+#endif
}
}
-
- /* Scavenge the binding stack. */
- scavenge((lispobj *) BINDING_STACK_START,
- (lispobj *)SymbolValue(BINDING_STACK_POINTER) -
- (lispobj *)BINDING_STACK_START);
/* The original CMU CL code had scavenge-read-only-space code
* controlled by the Lisp-level variable
/* Scavenge static space. */
static_space_size =
- (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) -
+ (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
(lispobj *)STATIC_SPACE_START;
if (gencgc_verbose > 1) {
FSHOW((stderr,
scavenge_newspace_generation_one_scan(new_space);
/* Flush the current regions, updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
bytes_allocated = bytes_allocated - old_bytes_allocated;
scan_weak_pointers();
/* Flush the current regions, updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
/* Free the pages in oldspace, but not those marked dont_move. */
bytes_freed = free_oldspace();
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096));
+ (lispobj)(((char *)heap_base) + last_free_page*4096),0);
return 0; /* dummy value: return something ... */
}
int gen_to_wp;
int i;
- boxed_region.free_pointer = current_region_free_pointer;
-
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
if (last_gen > NUM_GENERATIONS) {
}
/* Flush the alloc regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_all_page_tables();
/* Verify the new objects created by Lisp code. */
if (pre_verify_gen_0) {
- SHOW((stderr, "pre-checking generation 0\n"));
+ FSHOW((stderr, "pre-checking generation 0\n"));
verify_generation(0);
}
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
-
- /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so
- * we needn't do it here: */
- /* zero_stack();*/
-
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
-
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ if(gencgc_verbose)
+ fprintf(stderr,"Next gc when %d bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
/* Initialize gc_alloc(). */
gc_alloc_generation = 0;
- boxed_region.first_page = 0;
- boxed_region.last_page = -1;
- boxed_region.start_addr = page_address(0);
- boxed_region.free_pointer = page_address(0);
- boxed_region.end_addr = page_address(0);
- unboxed_region.first_page = 0;
- unboxed_region.last_page = -1;
- unboxed_region.start_addr = page_address(0);
- unboxed_region.free_pointer = page_address(0);
- unboxed_region.end_addr = page_address(0);
-
-#if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */
- zero_stack();
-#endif
- last_free_page = 0;
- SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
+ gc_set_region_empty(&boxed_region);
+ gc_set_region_empty(&unboxed_region);
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
+ last_free_page = 0;
+ SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0);
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
generations[i].min_av_mem_age = 0.75;
}
- /* Initialize gc_alloc.
- *
- * FIXME: identical with code in gc_free_heap(), should be shared */
+ /* Initialize gc_alloc. */
gc_alloc_generation = 0;
- boxed_region.first_page = 0;
- boxed_region.last_page = -1;
- boxed_region.start_addr = page_address(0);
- boxed_region.free_pointer = page_address(0);
- boxed_region.end_addr = page_address(0);
- unboxed_region.first_page = 0;
- unboxed_region.last_page = -1;
- unboxed_region.start_addr = page_address(0);
- unboxed_region.free_pointer = page_address(0);
- unboxed_region.end_addr = page_address(0);
+ gc_set_region_empty(&boxed_region);
+ gc_set_region_empty(&unboxed_region);
last_free_page = 0;
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
}
/* Pick up the dynamic space from after a core load.
{
int page = 0;
int addr = DYNAMIC_SPACE_START;
- int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
+ int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
/* Initialize the first region. */
do {
generations[0].bytes_allocated = 4096*page;
bytes_allocated = 4096*page;
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
}
void
\f
-/* a counter for how deep we are in alloc(..) calls */
-int alloc_entered = 0;
+extern boolean maybe_gc_pending ;
/* alloc(..) is the external interface for memory allocation. It
* allocates to generation 0. It is not called from within the garbage
* collector as it is only external uses that need the check for heap
* (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
*
* The check for a GC trigger is only performed when the current
- * region is full, so in most cases it's not needed. Further MAYBE-GC
- * is only called once because Lisp will remember "need to collect
- * garbage" and get around to it when it can. */
+ * region is full, so in most cases it's not needed. */
+
char *
alloc(int nbytes)
{
+ struct thread *th=arch_os_get_current_thread();
+ struct alloc_region *region=
+ th ? &(th->alloc_region) : &boxed_region;
+ void *new_obj;
+ void *new_free_pointer;
+
/* Check for alignment allocation problems. */
- gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0)
+ gc_assert((((unsigned)region->free_pointer & 0x7) == 0)
&& ((nbytes & 0x7) == 0));
-
- if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {/* if already in a pseudo atomic */
-
- void *new_free_pointer;
-
- retry1:
- if (alloc_entered) {
- SHOW("alloc re-entered in already-pseudo-atomic case");
- }
- ++alloc_entered;
-
- /* Check whether there is room in the current region. */
- new_free_pointer = current_region_free_pointer + nbytes;
-
- /* FIXME: Shouldn't we be doing some sort of lock here, to
- * keep from getting screwed if an interrupt service routine
- * allocates memory between the time we calculate new_free_pointer
- * and the time we write it back to current_region_free_pointer?
- * Perhaps I just don't understand pseudo-atomics..
- *
- * Perhaps I don't. It looks as though what happens is if we
- * were interrupted any time during the pseudo-atomic
- * interval (which includes now) we discard the allocated
- * memory and try again. So, at least we don't return
- * a memory area that was allocated out from underneath us
- * by code in an ISR.
- * Still, that doesn't seem to prevent
- * current_region_free_pointer from getting corrupted:
- * We read current_region_free_pointer.
- * They read current_region_free_pointer.
- * They write current_region_free_pointer.
- * We write current_region_free_pointer, scribbling over
- * whatever they wrote. */
-
- if (new_free_pointer <= boxed_region.end_addr) {
- /* If so then allocate from the current region. */
- void *new_obj = current_region_free_pointer;
- current_region_free_pointer = new_free_pointer;
- alloc_entered--;
- return((void *)new_obj);
+ if(all_threads)
+ /* there are a few places in the C code that allocate data in the
+ * heap before Lisp starts. This is before interrupts are enabled,
+ * so we don't need to check for pseudo-atomic */
+#ifdef LISP_FEATURE_SB_THREAD
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
+ th,getpid());
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2003.05.01, mail details to sbcl-devel\n");
}
-
- if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- /* Double the trigger. */
- auto_gc_trigger *= 2;
- alloc_entered--;
- /* Exit the pseudo-atomic. */
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
- if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
- /* Handle any interrupts that occurred during
- * gc_alloc(..). */
- do_pending_interrupt();
- }
- funcall0(SymbolFunction(MAYBE_GC));
- /* Re-enter the pseudo-atomic. */
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
- goto retry1;
- }
- /* Call gc_alloc(). */
- boxed_region.free_pointer = current_region_free_pointer;
- {
- void *new_obj = gc_alloc(nbytes,0);
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
- alloc_entered--;
- return (new_obj);
- }
- } else {
- void *result;
- void *new_free_pointer;
-
- retry2:
- /* At least wrap this allocation in a pseudo atomic to prevent
- * gc_alloc() from being re-entered. */
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
-
- if (alloc_entered)
- SHOW("alloc re-entered in not-already-pseudo-atomic case");
- ++alloc_entered;
-
- /* Check whether there is room in the current region. */
- new_free_pointer = current_region_free_pointer + nbytes;
-
- if (new_free_pointer <= boxed_region.end_addr) {
- /* If so then allocate from the current region. */
- void *new_obj = current_region_free_pointer;
- current_region_free_pointer = new_free_pointer;
- alloc_entered--;
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
- if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)) {
- /* Handle any interrupts that occurred during
- * gc_alloc(..). */
- do_pending_interrupt();
- goto retry2;
- }
-
- return((void *)new_obj);
- }
-
- /* KLUDGE: There's lots of code around here shared with the
- * the other branch. Is there some way to factor out the
- * duplicate code? -- WHN 19991129 */
- if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- /* Double the trigger. */
- auto_gc_trigger *= 2;
- alloc_entered--;
- /* Exit the pseudo atomic. */
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
- if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
- /* Handle any interrupts that occurred during
- * gc_alloc(..); */
- do_pending_interrupt();
- }
- funcall0(SymbolFunction(MAYBE_GC));
- goto retry2;
- }
-
- /* Else call gc_alloc(). */
- boxed_region.free_pointer = current_region_free_pointer;
- result = gc_alloc(nbytes,0);
- current_region_free_pointer = boxed_region.free_pointer;
- current_region_end_addr = boxed_region.end_addr;
-
- alloc_entered--;
- SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
- if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
- /* Handle any interrupts that occurred during gc_alloc(..). */
- do_pending_interrupt();
- goto retry2;
- }
-
- return result;
+#else
+ gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
+#endif
+
+ /* maybe we can do this quickly ... */
+ new_free_pointer = region->free_pointer + nbytes;
+ if (new_free_pointer <= region->end_addr) {
+ new_obj = (void*)(region->free_pointer);
+ region->free_pointer = new_free_pointer;
+ return(new_obj); /* yup */
}
-}
-\f
-/*
- * noise to manipulate the gc trigger stuff
- */
-
-void
-set_auto_gc_trigger(os_vm_size_t dynamic_usage)
-{
- auto_gc_trigger += dynamic_usage;
+
+ /* we have to go the long way around, it seems. Check whether
+ * we should GC in the near future
+ */
+ if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
+ /* set things up so that GC happens when we finish the PA
+ * section. */
+ maybe_gc_pending=1;
+ SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
+ }
+ new_obj = gc_alloc_with_region(nbytes,0,region,0);
+ return (new_obj);
}
-void
-clear_auto_gc_trigger(void)
-{
- auto_gc_trigger = 0;
-}
\f
/* Find the code object for the given pc, or return NULL on failure.
*
return 0;
} else {
-
- /* The only acceptable reason for an signal like this from the
- * heap is that the generational GC write-protected the page. */
- if (page_table[page_index].write_protected != 1) {
- lose("access failure in heap page not marked as write-protected");
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
+
+ /* Don't worry, we can handle it. */
+ return 1;
}
-
- /* Unprotect the page. */
- os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
- page_table[page_index].write_protected = 0;
- page_table[page_index].write_protected_cleared = 1;
-
- /* Don't worry, we can handle it. */
- return 1;
}
}
-
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
* it's not just a case of the program hitting the write barrier, and
* are about to let Lisp deal with it. It's basically just a
void
unhandled_sigmemoryfault()
{}
+
+gc_alloc_update_all_page_tables(void)
+{
+ /* Flush the alloc regions updating the tables. */
+ struct thread *th;
+ for_each_thread(th)
+ gc_alloc_update_page_tables(0, &th->alloc_region);
+ gc_alloc_update_page_tables(1, &unboxed_region);
+ gc_alloc_update_page_tables(0, &boxed_region);
+}
+void
+gc_set_region_empty(struct alloc_region *region)
+{
+ region->first_page = 0;
+ region->last_page = -1;
+ region->start_addr = page_address(0);
+ region->free_pointer = page_address(0);
+ region->end_addr = page_address(0);
+}
+