#include <stdio.h>
#include <signal.h>
#include <errno.h>
+#include <string.h>
#include "runtime.h"
#include "sbcl.h"
#include "os.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#include "genesis/simple-fun.h"
+
/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
+/* forward declarations */
+int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed);
+void gc_set_region_empty(struct alloc_region *region);
+void gc_alloc_update_all_page_tables(void);
+static void gencgc_pickup_dynamic(void);
+boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
+
\f
/*
* GC parameters
#endif
/* the minimum size (in bytes) for a large object*/
-unsigned large_object_size = 4 * 4096;
+unsigned large_object_size = 4 * PAGE_BYTES;
+
\f
/*
* debugging
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-static unsigned long auto_gc_trigger = 0;
+extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
+unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
int new_space;
-/* FIXME: It would be nice to use this symbolic constant instead of
- * bare 4096 almost everywhere. We could also use an assertion that
- * it's equal to getpagesize(). */
-
-#define PAGE_BYTES 4096
-
/* An array of page structures is statically allocated.
* This helps quickly map between an address its page structure.
* NUM_PAGES is set from the size of the dynamic space. */
inline void *
page_address(int page_num)
{
- return (heap_base + (page_num * 4096));
+ return (heap_base + (page_num * PAGE_BYTES));
}
/* Find the page index within the page_table for the given
int index = addr-heap_base;
if (index >= 0) {
- index = ((unsigned int)index)/4096;
+ index = ((unsigned int)index)/PAGE_BYTES;
if (index < NUM_PAGES)
return (index);
}
\f
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
- * majority of GC is single-threaded, but alloc() may be called
- * from >1 thread at a time and must be thread-safe */
+ * majority of GC is single-threaded, but alloc() may be called from
+ * >1 thread at a time and must be thread-safe. This lock must be
+ * seized before all accesses to generations[] or to parts of
+ * page_table[] that other threads may want to see */
+
static lispobj free_pages_lock=0;
\f
int count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected == 1))
count++;
return count;
}
-/* Count the number of dont_move pages. */
+#if QSHOW
static int
count_dont_move_pages(void)
{
}
return count;
}
+#endif /* QSHOW */
/* Work through the pages and add up the number of bytes used for the
* given generation. */
/ ((double)generations[gen].bytes_allocated);
}
+void fpu_save(int *); /* defined in x86-assem.S */
+void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
static void
/* Print the heap stats. */
fprintf(stderr,
- " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
+ " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
for (i = 0; i < gens; i++) {
int j;
int unboxed_cnt = 0;
int large_boxed_cnt = 0;
int large_unboxed_cnt = 0;
+ int pinned_cnt=0;
for (j = 0; j < last_free_page; j++)
if (page_table[j].gen == i) {
/* Count the number of boxed pages within the given
* generation. */
- if (page_table[j].allocated & BOXED_PAGE) {
+ if (page_table[j].allocated & BOXED_PAGE_FLAG) {
if (page_table[j].large_object)
large_boxed_cnt++;
else
boxed_cnt++;
}
-
+ if(page_table[j].dont_move) pinned_cnt++;
/* Count the number of unboxed pages within the given
* generation. */
- if (page_table[j].allocated & UNBOXED_PAGE) {
+ if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
if (page_table[j].large_object)
large_unboxed_cnt++;
else
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
fprintf(stderr,
- " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
+ " %1d: %5d %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
i,
boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*4096
+ (count_generation_pages(i)*PAGE_BYTES
- generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
first_page =
generations[gc_alloc_generation].alloc_start_page;
}
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region);
- bytes_found=(4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+ bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
/* The first page may have already been in use. */
if (page_table[first_page].bytes_used == 0) {
if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE;
+ page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
else
- page_table[first_page].allocated = BOXED_PAGE;
+ page_table[first_page].allocated = BOXED_PAGE_FLAG;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
page_table[first_page].first_object_offset = 0;
}
if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE);
- page_table[first_page].allocated |= OPEN_REGION_PAGE;
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
if (unboxed)
- page_table[i].allocated = UNBOXED_PAGE;
+ page_table[i].allocated = UNBOXED_PAGE_FLAG;
else
- page_table[i].allocated = BOXED_PAGE;
+ page_table[i].allocated = BOXED_PAGE_FLAG;
page_table[i].gen = gc_alloc_generation;
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
page_table[i].first_object_offset =
alloc_region->start_addr - page_address(i);
- page_table[i].allocated |= OPEN_REGION_PAGE ;
+ page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
}
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
gc_abort();
}
- new_area_start = 4096*first_page + offset;
+ new_area_start = PAGE_BYTES*first_page + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
unsigned area_end =
- 4096*((*new_areas)[i].page)
+ PAGE_BYTES*((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
/*FSHOW((stderr,
max_new_areas = new_areas_index;
}
-/* Update the tables for the alloc_region. The region maybe added to
+/* Update the tables for the alloc_region. The region may be added to
* the new_areas.
*
* When done the alloc_region is set up so that the next quick alloc
int region_size;
int byte_cnt;
- /*
- FSHOW((stderr,
- "/gc_alloc_update_page_tables() to gen %d:\n",
- gc_alloc_generation));
- */
first_page = alloc_region->first_page;
next_page = first_page+1;
- /* Skip if no bytes were allocated. */
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
+ /* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
* first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE);
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
- bytes_used = 4096;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
* first_object_offset pointer to the start of the region, and set
* the bytes_used. */
while (more) {
- page_table[next_page].allocated &= ~(OPEN_REGION_PAGE);
+ page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
if (unboxed)
- gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
+ gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
/* Calculate the number of bytes used in this page. */
more = 0;
if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>4096) {
- bytes_used = 4096;
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
} else {
/* There are no bytes allocated. Unallocate the first_page if
* there are 0 bytes_used. */
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
if (page_table[first_page].bytes_used == 0)
- page_table[first_page].allocated = FREE_PAGE;
+ page_table[first_page].allocated = FREE_PAGE_FLAG;
}
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
gc_assert(page_table[next_page].bytes_used == 0);
- page_table[next_page].allocated = FREE_PAGE;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
next_page++;
}
-
+ release_spinlock(&free_pages_lock);
+ /* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
int more;
int bytes_used;
int next_page;
- int large = (nbytes >= large_object_size);
-
- /*
- if (nbytes > 200000)
- FSHOW((stderr, "/alloc_large %d\n", nbytes));
- */
-
- /*
- FSHOW((stderr,
- "/gc_alloc_large() for %d bytes from gen %d\n",
- nbytes, gc_alloc_generation));
- */
-
- /* If the object is small, and there is room in the current region
- then allocate it in the current region. */
- if (!large
- && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
- return gc_quick_alloc(nbytes);
-
- /* To allow the allocation of small objects without the danger of
- using a page in the current boxed region, the search starts after
- the current boxed free region. XX could probably keep a page
- index ahead of the current region and bumped up here to save a
- lot of re-scanning. */
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
first_page = alloc_region->last_page+1;
}
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,0);
+ last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
gc_assert(first_page > alloc_region->last_page);
if (unboxed)
* first_object_offset. */
if (page_table[first_page].bytes_used == 0) {
if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE;
+ page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
else
- page_table[first_page].allocated = BOXED_PAGE;
+ page_table[first_page].allocated = BOXED_PAGE_FLAG;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].first_object_offset = 0;
- page_table[first_page].large_object = large;
+ page_table[first_page].large_object = 1;
}
if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE);
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
- gc_assert(page_table[first_page].large_object == large);
+ gc_assert(page_table[first_page].large_object == 1);
byte_cnt = 0;
/* Calc. the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[first_page].bytes_used = bytes_used;
* first_object_offset pointer to the start of the region, and
* set the bytes_used. */
while (more) {
- gc_assert(page_table[next_page].allocated == FREE_PAGE);
+ gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
gc_assert(page_table[next_page].bytes_used == 0);
if (unboxed)
- page_table[next_page].allocated = UNBOXED_PAGE;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
else
- page_table[next_page].allocated = BOXED_PAGE;
+ page_table[next_page].allocated = BOXED_PAGE_FLAG;
page_table[next_page].gen = gc_alloc_generation;
- page_table[next_page].large_object = large;
+ page_table[next_page].large_object = 1;
page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - 4096*(next_page-first_page);
+ orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
- bytes_used = 4096;
+ if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
more = 1;
}
page_table[next_page].bytes_used = bytes_used;
+ page_table[next_page].write_protected=0;
+ page_table[next_page].dont_move=0;
byte_cnt += bytes_used;
-
next_page++;
}
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
int
-gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed)
{
- /* if alloc_region is 0, we assume this is for a potentially large
- object */
int first_page;
int last_page;
int region_size;
int restart_page=*restart_page_ptr;
int bytes_found;
int num_pages;
- int large = !alloc_region && (nbytes >= large_object_size);
-
+ int large_p=(nbytes>=large_object_size);
gc_assert(free_pages_lock);
- /* Search for a contiguous free space of at least nbytes. If it's a
- large object then align it on a page boundary by searching for a
- free page. */
- /* To allow the allocation of small objects without the danger of
- using a page in the current boxed region, the search starts after
- the current boxed free region. XX could probably keep a page
- index ahead of the current region and bumped up here to save a
- lot of re-scanning. */
+ /* Search for a contiguous free space of at least nbytes. If it's
+ * a large object then align it on a page boundary by searching
+ * for a free page. */
do {
first_page = restart_page;
- if (large)
+ if (large_p)
while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE))
+ && (page_table[first_page].allocated != FREE_PAGE_FLAG))
first_page++;
else
while (first_page < NUM_PAGES) {
- if(page_table[first_page].allocated == FREE_PAGE)
+ if(page_table[first_page].allocated == FREE_PAGE_FLAG)
break;
- /* I don't know why we need the gen=0 test, but it
- * breaks randomly if that's omitted -dan 2003.02.26
- */
if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE : BOXED_PAGE)) &&
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
(page_table[first_page].large_object == 0) &&
- (gc_alloc_generation == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (4096-32)) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
(page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0))
+ (page_table[first_page].dont_move == 0)) {
break;
+ }
first_page++;
}
gc_assert(page_table[first_page].write_protected == 0);
last_page = first_page;
- bytes_found = 4096 - page_table[first_page].bytes_used;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
num_pages = 1;
while (((bytes_found < nbytes)
- || (alloc_region && (num_pages < 2)))
+ || (!large_p && (num_pages < 2)))
&& (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE)) {
+ && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
last_page++;
num_pages++;
- bytes_found += 4096;
+ bytes_found += PAGE_BYTES;
gc_assert(page_table[last_page].write_protected == 0);
}
- region_size = (4096 - page_table[first_page].bytes_used)
- + 4096*(last_page-first_page);
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
gc_assert(bytes_found == region_size);
restart_page = last_page + 1;
}
/* Allocate bytes. All the rest of the special-purpose allocation
- * functions will eventually call this (instead of just duplicating
- * parts of its code) */
+ * functions will eventually call this */
void *
gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region,
{
void *new_free_pointer;
- /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
+ if(nbytes>=large_object_size)
+ return gc_alloc_large(nbytes,unboxed_p,my_region);
/* Check whether there is room in the current alloc region. */
new_free_pointer = my_region->free_pointer + nbytes;
return((void *)new_obj);
}
- /* Else not enough free space in the current region. */
-
- /* If there some room left in the current region, enough to be worth
- * saving, then allocate a large object. */
- /* FIXME: "32" should be a named parameter. */
- if ((my_region->end_addr-my_region->free_pointer) > 32)
- return gc_alloc_large(nbytes, unboxed_p, my_region);
-
- /* Else find a new region. */
+ /* Else not enough free space in the current region: retry with a
+ * new region. */
- /* Finished with the current region. */
gc_alloc_update_page_tables(unboxed_p, my_region);
-
- /* Set up a new region. */
gc_alloc_new_region(nbytes, unboxed_p, my_region);
-
- /* Should now be enough room. */
-
- /* Check whether there is room in the current region. */
- new_free_pointer = my_region->free_pointer + nbytes;
-
- if (new_free_pointer <= my_region->end_addr) {
- /* If so then allocate from the current region. */
- void *new_obj = my_region->free_pointer;
- my_region->free_pointer = new_free_pointer;
- /* Check whether the current region is almost empty. */
- if ((my_region->end_addr - my_region->free_pointer) <= 32) {
- /* If so find, finished with the current region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
-
- /* Set up a new region. */
- gc_alloc_new_region(32, unboxed_p, my_region);
- }
-
- return((void *)new_obj);
- }
-
- /* shouldn't happen */
- gc_assert(0);
- return((void *) NIL); /* dummy value: return something ... */
+ return gc_alloc_with_region(nbytes,unboxed_p,my_region,0);
}
+/* these are only used during GC: all allocation from the mutator calls
+ * alloc() -> gc_alloc_with_region() with the appropriate per-thread
+ * region */
+
void *
gc_general_alloc(int nbytes,int unboxed_p,int quick_p)
{
return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
}
-
-
-static void *
-gc_alloc(int nbytes,int unboxed_p)
-{
- /* this is the only function that the external interface to
- * allocation presently knows how to call: Lisp code will never
- * allocate large objects, or to unboxed space, or `quick'ly.
- * Any of that stuff will only ever happen inside of GC */
- return gc_general_alloc(nbytes,unboxed_p,0);
-}
-
-/* Allocate space from the boxed_region. If there is not enough free
- * space then call gc_alloc to do the job. A pointer to the start of
- * the object is returned. */
static inline void *
gc_quick_alloc(int nbytes)
{
return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
}
-/* Allocate space for the possibly large boxed object. If it is a
- * large object then do a large alloc else use gc_quick_alloc. Note
- * that gc_quick_alloc will eventually fall through to
- * gc_general_alloc which may allocate the object in a large way
- * anyway, but based on decisions about the free space in the current
- * region, not the object size itself */
-
static inline void *
gc_quick_alloc_large(int nbytes)
{
- if (nbytes >= large_object_size)
- return gc_alloc_large(nbytes, ALLOC_BOXED, &boxed_region);
- else
- return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
}
static inline void *
return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
-/* Allocate space for the object. If it is a large object then do a
- * large alloc else allocate from the current region. If there is not
- * enough free space then call general gc_alloc_unboxed() to do the job.
- *
- * A pointer to the start of the object is returned. */
static inline void *
gc_quick_alloc_large_unboxed(int nbytes)
{
- if (nbytes >= large_object_size)
- return gc_alloc_large(nbytes,ALLOC_UNBOXED,&unboxed_region);
- else
- return gc_quick_alloc_unboxed(nbytes);
+ return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
\f
/*
{
int tag;
lispobj *new;
- lispobj *source, *dest;
int first_page;
gc_assert(is_lisp_pointer(object));
gc_assert((nwords & 0x01) == 0);
- /* Check whether it's a large object. */
+ /* Check whether it's in a large object region. */
first_page = find_page_index((void *)object);
gc_assert(first_page >= 0);
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
page_table[next_page].write_protected = 0;
}
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE) &&
+ (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to bother zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected as they
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
page_table[next_page].bytes_used = 0;
bytes_freed += old_bytes_used;
next_page++;
/* Allocate space. */
new = gc_quick_alloc_large(nwords*4);
- dest = new;
- source = (lispobj *) native_pointer(object);
-
- /* Copy the object. */
- while (nwords > 0) {
- dest[0] = source[0];
- dest[1] = source[1];
- dest += 2;
- source += 2;
- nwords -= 2;
- }
+ memcpy(new,native_pointer(object),nwords*4);
/* Return Lisp pointer of new object. */
return ((lispobj) new) | tag;
{
int tag;
lispobj *new;
- lispobj *source, *dest;
gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
/* Allocate space. */
new = gc_quick_alloc_unboxed(nwords*4);
- dest = new;
- source = (lispobj *) native_pointer(object);
-
- /* Copy the object. */
- while (nwords > 0) {
- dest[0] = source[0];
- dest[1] = source[1];
- dest += 2;
- source += 2;
- nwords -= 2;
- }
+ memcpy(new,native_pointer(object),nwords*4);
/* Return Lisp pointer of new object. */
return ((lispobj) new) | tag;
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE));
+ gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE;
- remaining_bytes -= 4096;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE)) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected, even if
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
page_table[next_page].bytes_used = 0;
bytes_freed += old_bytes_used;
next_page++;
code objects. Check. */
fixups = new_code->constants[0];
- /* It will be 0 or the unbound-marker if there are no fixups, and
- * will be an other pointer if it is valid. */
+ /* It will be 0 or the unbound-marker if there are no fixups (as
+ * will be the case if the code object has been purified, for
+ * example) and will be an other pointer if it is valid. */
if ((fixups == 0) || (fixups == UNBOUND_MARKER_WIDETAG) ||
!is_lisp_pointer(fixups)) {
/* Check for possible errors. */
if (check_code_fixups)
sniff_code_object(new_code, displacement);
- /*fprintf(stderr,"Fixups for code object not found!?\n");
- fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
- new_code, nheader_words, ncode_words);
- fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
- constants_start_addr,constants_end_addr,
- code_start_addr,code_end_addr);*/
return;
}
fixups_vector = (struct vector *)native_pointer(fixups);
/* Could be pointing to a forwarding pointer. */
+ /* FIXME is this always in from_space? if so, could replace this code with
+ * forwarding_pointer_p/forwarding_pointer_value */
if (is_lisp_pointer(fixups) &&
(find_page_index((void*)fixups_vector) != -1) &&
(fixups_vector->header == 0x01)) {
return (NULL);
}
-static lispobj*
+lispobj*
search_read_only_space(lispobj *pointer)
{
lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
return (search_space(start, (pointer+2)-start, pointer));
}
-static lispobj *
+lispobj *
search_static_space(lispobj *pointer)
{
lispobj* start = (lispobj*)STATIC_SPACE_START;
lispobj *
search_dynamic_space(lispobj *pointer)
{
- int page_index = find_page_index(pointer);
+ int page_index = find_page_index(pointer);
lispobj *start;
/* The address may be invalid, so do some checks. */
- if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
+ if ((page_index == -1) ||
+ (page_table[page_index].allocated == FREE_PAGE_FLAG))
return NULL;
start = (lispobj *)((void *)page_address(page_index)
+ page_table[page_index].first_object_offset);
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing? */
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
static int
possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
/* Check that the object pointed to is consistent with the pointer
* low tag.
- *
- * FIXME: It's not safe to rely on the result from this check
- * before an object is initialized. Thus, if we were interrupted
- * just as an object had been allocated but not initialized, the
- * GC relying on this result could bogusly reclaim the memory.
- * However, we can't really afford to do without this check. So
- * we should make it safe somehow.
- * (1) Perhaps just review the code to make sure
- * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
- * thing is wrapped around critical sections where allocated
- * memory type bits haven't been set.
- * (2) Perhaps find some other hack to protect against this, e.g.
- * recording the result of the last call to allocate-lisp-memory,
- * and returning true from this function when *pointer is
- * a reference to that result.
- *
- * (surely pseudo-atomic is supposed to be used for exactly this?)
*/
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
case LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
/* Check whether it's a vector or bignum object. */
switch (widetag_of(where[0])) {
case SIMPLE_VECTOR_WIDETAG:
- boxed = BOXED_PAGE;
+ boxed = BOXED_PAGE_FLAG;
break;
case BIGNUM_WIDETAG:
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- boxed = UNBOXED_PAGE;
+ boxed = UNBOXED_PAGE_FLAG;
break;
default:
return;
next_page = first_page;
remaining_bytes = nwords*4;
- while (remaining_bytes > 4096) {
+ while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE)
- || (page_table[next_page].allocated == UNBOXED_PAGE));
+ gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].first_object_offset ==
- -4096*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == 4096);
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].allocated = boxed;
/* Shouldn't be write-protected at this stage. Essential that the
* pages aren't. */
gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= 4096;
+ remaining_bytes -= PAGE_BYTES;
next_page++;
}
/* Free any remaining pages; needs care. */
next_page++;
- while ((old_bytes_used == 4096) &&
+ while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE)
- || (page_table[next_page].allocated == BOXED_PAGE)) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
page_table[next_page].large_object &&
(page_table[next_page].first_object_offset ==
- -(next_page - first_page)*4096)) {
+ -(next_page - first_page)*PAGE_BYTES)) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write protected as they
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
page_table[next_page].bytes_used = 0;
bytes_freed += old_bytes_used;
next_page++;
* page_table so that it will not be relocated during a GC.
*
* This involves locating the page it points to, then backing up to
- * the first page that has its first object start at offset 0, and
- * then marking all pages dont_move from the first until a page that
- * ends by being full, or having free gen.
- *
- * This ensures that objects spanning pages are not broken.
+ * the start of its region, then marking all pages dont_move from there
+ * up to the first page that's not full or has a different generation
*
* It is assumed that all the page static flags have been cleared at
* the start of a GC.
/* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
- || (page_table[addr_page_index].allocated == FREE_PAGE)
+ || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
|| (page_table[addr_page_index].dont_move != 0))
return;
- gc_assert(!(page_table[addr_page_index].allocated & OPEN_REGION_PAGE));
+ gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
/* quick check 2: Check the offset within the page.
*
- * FIXME: The mask should have a symbolic name, and ideally should
- * be derived from page size instead of hardwired to 0xfff.
- * (Also fix other uses of 0xfff, elsewhere.) */
- if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
+ */
+ if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
* (or, as a special case which also requires dont_move, a return
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
- * probability that random garbage will be bogusly interpreter as
+ * probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
if (!(possibly_valid_dynamic_space_pointer(addr)))
return;
- first_page = addr_page_index;
- /* Work backwards to find a page with a first_object_offset of 0.
- * The pages should be contiguous with all bytes used in the same
- * gen. Assumes the first_object_offset is negative or zero. */
-
- /* this is probably needlessly conservative. The first object in
- * the page may not even be the one we were passed a pointer to:
- * if this is the case, we will write-protect all the previous
- * object's pages too.
- */
+ /* Find the beginning of the region. Note that there may be
+ * objects in the region preceding the one that we were passed a
+ * pointer to: if this is the case, we will write-protect all the
+ * previous objects' pages too. */
+#if 0
+ /* I think this'd work just as well, but without the assertions.
+ * -dan 2004.01.01 */
+ first_page=
+ find_page_index(page_address(addr_page_index)+
+ page_table[addr_page_index].first_object_offset);
+#else
+ first_page = addr_page_index;
while (page_table[first_page].first_object_offset != 0) {
--first_page;
/* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == 4096);
+ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
+#endif
/* Adjust any large objects before promotion as they won't be
* copied after promotion. */
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
- if ((page_table[addr_page_index].allocated == FREE_PAGE)
+ if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned)addr & 0xfff)
+ || (((unsigned)addr & (PAGE_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < 4096)
- /* ..or it is 4096 and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE)
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
+ || (page_table[i+1].allocated == FREE_PAGE_FLAG)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
|| (page_table[i+1].first_object_offset == 0))
int num_words = page_table[page].bytes_used / 4;
/* Shouldn't be a free page. */
- gc_assert(page_table[page].allocated != FREE_PAGE);
+ gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
gc_assert(page_table[page].bytes_used != 0);
- /* Skip if it's already write-protected or an unboxed page. */
+ /* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
- || (page_table[page].allocated & UNBOXED_PAGE))
+ || page_table[page].dont_move
+ || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
return (0);
/* Scan the page for pointers to younger generations or the
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
- ((page_table[index].allocated != FREE_PAGE)
+ ((page_table[index].allocated != FREE_PAGE_FLAG)
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == NUM_GENERATIONS)))
/*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
os_protect((void *)page_addr,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
/* Scavenge a generation.
*
* This will not resolve all pointers when generation is the new
- * space, as new objects may be added which are not check here - use
+ * space, as new objects may be added which are not checked here - use
* scavenge_newspace generation.
*
* Write-protected pages should not have any pointers to the
#endif
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated & BOXED_PAGE)
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
- int last_page;
+ int last_page,j;
+ int write_protected=1;
- /* This should be the start of a contiguous block. */
+ /* This should be the start of a region */
gc_assert(page_table[i].first_object_offset == 0);
- /* We need to find the full extent of this contiguous
- * block in case objects span pages. */
-
- /* Now work forward until the end of this contiguous area
- * is found. A small area is preferred as there is a
- * better chance of its pages being write-protected. */
- for (last_page = i; ; last_page++)
- /* Check whether this is the last page in this contiguous
- * block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE))
+ /* Now work forward until the end of the region */
+ for (last_page = i; ; last_page++) {
+ write_protected =
+ write_protected && page_table[last_page].write_protected;
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
break;
-
- /* Do a limited check for write_protected pages. If all pages
- * are write_protected then there is no need to scavenge. */
- {
- int j, all_wp = 1;
- for (j = i; j <= last_page; j++)
- if (page_table[j].write_protected == 0) {
- all_wp = 0;
- break;
- }
-#if !SC_GEN_CK
- if (all_wp == 0)
-#endif
- {
- scavenge(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
-
- /* Now scan the pages and write protect those
- * that don't have pointers to younger
- * generations. */
- if (enable_page_protection) {
- for (j = i; j <= last_page; j++) {
- num_wp += update_page_write_prot(j);
- }
- }
+ }
+ if (!write_protected) {
+ scavenge(page_address(i), (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES)/4);
+
+ /* Now scan the pages and write protect those that
+ * don't have pointers to younger generations. */
+ if (enable_page_protection) {
+ for (j = i; j <= last_page; j++) {
+ num_wp += update_page_write_prot(j);
}
+ }
}
i = last_page;
}
}
-
if ((gencgc_verbose > 1) && (num_wp != 0)) {
FSHOW((stderr,
"/write protected %d pages within generation %d\n",
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation ! =FREE_PAGE)
+ if ((page_table[i].allocation != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
"/starting one full scan of newspace generation %d\n",
generation));
for (i = 0; i < last_free_page; i++) {
- /* note that this skips over open regions when it encounters them */
- if ((page_table[i].allocated == BOXED_PAGE)
+ /* Note that this skips over open regions when it encounters them. */
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
* cleared before promotion.) */
|| (page_table[i].dont_move == 1))) {
int last_page;
+ int all_wp=1;
/* The scavenge will start at the first_object_offset of page i.
*
* is found. A small area is preferred as there is a
* better chance of its pages being write-protected. */
for (last_page = i; ;last_page++) {
+ /* If all pages are write-protected and movable,
+ * then no need to scavenge */
+ all_wp=all_wp && page_table[last_page].write_protected &&
+ !page_table[last_page].dont_move;
+
/* Check whether this is the last page in this
* contiguous block */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE))
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].first_object_offset == 0))
break;
}
- /* Do a limited check for write-protected pages. If all
- * pages are write-protected then no need to scavenge,
- * except if the pages are marked dont_move. */
- {
- int j, all_wp = 1;
- for (j = i; j <= last_page; j++)
- if ((page_table[j].write_protected == 0)
- || (page_table[j].dont_move != 0)) {
- all_wp = 0;
- break;
- }
-
- if (!all_wp) {
- int size;
-
- /* Calculate the size. */
- if (last_page == i)
- size = (page_table[last_page].bytes_used
- - page_table[i].first_object_offset)/4;
- else
- size = (page_table[last_page].bytes_used
- + (last_page-i)*4096
- - page_table[i].first_object_offset)/4;
-
- {
- new_areas_ignore_page = last_page;
-
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
-
- }
- }
+ /* Do a limited check for write-protected pages. */
+ if (!all_wp) {
+ int size;
+
+ size = (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES
+ - page_table[i].first_object_offset)/4;
+ new_areas_ignore_page = last_page;
+
+ scavenge(page_address(i) +
+ page_table[i].first_object_offset,
+ size);
+
}
-
i = last_page;
}
}
struct new_area (*current_new_areas)[] = &new_areas_1;
int current_new_areas_index;
- /* the new_areas created but the previous scavenge cycle */
+ /* the new_areas created by the previous scavenge cycle */
struct new_area (*previous_new_areas)[] = NULL;
int previous_new_areas_index;
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation != FREE_PAGE)
+ if ((page_table[i].allocation != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
int i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == from_space)) {
void *page_start;
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
}
}
do {
/* Find a first page for the next region of pages. */
while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE)
+ && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
|| (page_table[first_page].bytes_used == 0)
|| (page_table[first_page].gen != from_space)))
first_page++;
bytes_freed += page_table[last_page].bytes_used;
generations[page_table[last_page].gen].bytes_allocated -=
page_table[last_page].bytes_used;
- page_table[last_page].allocated = FREE_PAGE;
+ page_table[last_page].allocated = FREE_PAGE_FLAG;
page_table[last_page].bytes_used = 0;
/* Remove any write-protection. We should be able to rely
void *page_start = (void *)page_address(last_page);
if (page_table[last_page].write_protected) {
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[last_page].write_protected = 0;
}
}
last_page++;
}
while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE)
+ && (page_table[last_page].allocated != FREE_PAGE_FLAG)
&& (page_table[last_page].bytes_used != 0)
&& (page_table[last_page].gen == from_space));
page_start = (void *)page_address(first_page);
- os_invalidate(page_start, 4096*(last_page-first_page));
- addr = os_validate(page_start, 4096*(last_page-first_page));
+ os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
+ addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
if (addr == NULL || addr != page_start) {
/* Is this an error condition? I couldn't really tell from
* the old CMU CL code, which fprintf'ed a message with
int *page_start;
page_start = (int *)page_address(first_page);
- i586_bzero(page_start, 4096*(last_page-first_page));
+ i586_bzero(page_start, PAGE_BYTES*(last_page-first_page));
}
first_page = last_page;
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE)
+ if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
&& (page_table[page_index].bytes_used == 0))
lose ("Ptr %x @ %x sees free page.", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
}
}
} else {
- if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an
- * is_fixnum for this. */
-
+ if (!(fixnump(thing))) {
+ /* skip fixnums */
switch(widetag_of(*start)) {
/* boxed objects */
case RATIO_WIDETAG:
case COMPLEX_WIDETAG:
case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_STRING_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
* there's no byte compiler, but I've got
* too much to worry about right now to try
* to make sure. -- WHN 2001-10-06 */
- && !(code->trace_table_offset & 0x3)
+ && fixnump(code->trace_table_offset)
/* Only when enabled */
&& verify_dynamic_code_check) {
FSHOW((stderr,
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
int i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
int last_page;
for (last_page = i; ;last_page++)
/* Check whether this is the last page in this contiguous
* block. */
- if ((page_table[last_page].bytes_used < 4096)
- /* Or it is 4096 and is the last in the block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
break;
verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*4096)/4);
+ + (last_page-i)*PAGE_BYTES)/4);
i = last_page;
}
}
int page;
for (page = 0; page < last_free_page; page++) {
- if (page_table[page].allocated == FREE_PAGE) {
+ if (page_table[page].allocated == FREE_PAGE_FLAG) {
/* The whole page should be zero filled. */
int *start_addr = (int *)page_address(page);
int size = 1024;
}
}
} else {
- int free_bytes = 4096 - page_table[page].bytes_used;
+ int free_bytes = PAGE_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
int *start_addr = (int *)((unsigned)page_address(page)
+ page_table[page].bytes_used);
gc_assert(generation < NUM_GENERATIONS);
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated == BOXED_PAGE)
+ if ((page_table[i].allocated == BOXED_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
+ && !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
void *page_start;
page_start = (void *)page_address(i);
os_protect(page_start,
- 4096,
+ PAGE_BYTES,
OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
/* Note the page as protected in the page tables. */
/* Before any pointers are preserved, the dont_move flags on the
* pages need to be cleared. */
for (i = 0; i < last_free_page; i++)
- page_table[i].dont_move = 0;
+ if(page_table[i].gen==from_space)
+ page_table[i].dont_move = 0;
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
unprotect_oldspace();
/* Scavenge the stacks' conservative roots. */
+
+ /* there are potentially two stacks for each thread: the main
+ * stack, which may contain Lisp pointers, and the alternate stack.
+ * We don't ever run Lisp code on the altstack, but it may
+ * host a sigcontext with lisp objects in it */
+
+ /* what we need to do: (1) find the stack pointer for the main
+ * stack; scavenge it (2) find the interrupt context on the
+ * alternate stack that might contain lisp values, and scavenge
+ * that */
+
+ /* we assume that none of the preceding applies to the thread that
+ * initiates GC. If you ever call GC from inside an altstack
+ * handler, you will lose. */
for_each_thread(th) {
void **ptr;
+ void **esp=(void **)-1;
#ifdef LISP_FEATURE_SB_THREAD
- struct user_regs_struct regs;
- if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
- /* probably doesn't exist any more. */
- fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
- perror("PTRACE_GETREGS");
+ int i,free;
+ if(th==arch_os_get_current_thread()) {
+ esp = (void **) &raise;
+ } else {
+ void **esp1;
+ free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+ for(i=free-1;i>=0;i--) {
+ os_context_t *c=th->interrupt_contexts[i];
+ esp1 = (void **) *os_context_register_addr(c,reg_ESP);
+ if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
+ if(esp1<esp) esp=esp1;
+ for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
+ }
+ }
+ }
}
- preserve_pointer(regs.ebx);
- preserve_pointer(regs.ecx);
- preserve_pointer(regs.edx);
- preserve_pointer(regs.esi);
- preserve_pointer(regs.edi);
- preserve_pointer(regs.ebp);
- preserve_pointer(regs.eax);
-#endif
- for (ptr = ((void **)
- ((void *)th->control_stack_start
- + THREAD_CONTROL_STACK_SIZE)
- -1);
-#ifdef LISP_FEATURE_SB_THREAD
- ptr > regs.esp;
#else
- ptr > (void **)&raise;
+ esp = (void **) &raise;
#endif
- ptr--) {
+ for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
preserve_pointer(*ptr);
}
}
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- /* FIXME: 4096 should be symbolic constant here and
- * prob'ly elsewhere too. */
- num_dont_move_pages * 4096);
+ num_dont_move_pages * PAGE_BYTES);
}
#endif
int i;
for (i = 0; i < NUM_PAGES; i++)
- if ((page_table[i].allocated != FREE_PAGE)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0))
last_page = i;
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*4096),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
return 0; /* dummy value: return something ... */
}
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
-
+ auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
+ if(gencgc_verbose)
+ fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
for (page = 0; page < NUM_PAGES; page++) {
/* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE) {
+ if (page_table[page].allocated != FREE_PAGE_FLAG) {
void *page_start, *addr;
/* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE and bytes_used is 0 and it
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
* should not be write-protected -- except that the
* generation is used for the current region but it sets
* that up. */
- page_table[page].allocated = FREE_PAGE;
+ page_table[page].allocated = FREE_PAGE_FLAG;
page_table[page].bytes_used = 0;
/* Zero the page. */
page_start = (void *)page_address(page);
/* First, remove any write-protection. */
- os_protect(page_start, 4096, OS_VM_PROT_ALL);
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[page].write_protected = 0;
- os_invalidate(page_start,4096);
- addr = os_validate(page_start,4096);
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
if (addr == NULL || addr != page_start) {
lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
page_start,
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
int *page_start, i;
- gc_assert(page_table[page].allocated == FREE_PAGE);
+ gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
gc_assert(page_table[page].bytes_used == 0);
page_start = (int *)page_address(page);
for (i=0; i<1024; i++) {
/* Initialize each page structure. */
for (i = 0; i < NUM_PAGES; i++) {
/* Initialize all pages as free. */
- page_table[i].allocated = FREE_PAGE;
+ page_table[i].allocated = FREE_PAGE_FLAG;
page_table[i].bytes_used = 0;
/* Pages are not write-protected at startup. */
/* Pick up the dynamic space from after a core load.
*
* The ALLOCATION_POINTER points to the end of the dynamic space.
- *
- * XX A scan is needed to identify the closest first objects for pages. */
+ */
+
static void
gencgc_pickup_dynamic(void)
{
int page = 0;
- int addr = DYNAMIC_SPACE_START;
int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
+ lispobj *prev=(lispobj *)page_address(page);
- /* Initialize the first region. */
do {
- page_table[page].allocated = BOXED_PAGE;
+ lispobj *first,*ptr= (lispobj *)page_address(page);
+ page_table[page].allocated = BOXED_PAGE_FLAG;
page_table[page].gen = 0;
- page_table[page].bytes_used = 4096;
+ page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
+
+ first=search_space(prev,(ptr+2)-prev,ptr);
+ if(ptr == first) prev=ptr;
page_table[page].first_object_offset =
- (void *)DYNAMIC_SPACE_START - page_address(page);
- addr += 4096;
+ (void *)prev - page_address(page);
page++;
- } while (addr < alloc_ptr);
+ } while (page_address(page) < alloc_ptr);
- generations[0].bytes_allocated = 4096*page;
- bytes_allocated = 4096*page;
+ generations[0].bytes_allocated = PAGE_BYTES*page;
+ bytes_allocated = PAGE_BYTES*page;
}
+
void
gc_initialize_pointers(void)
{
\f
-extern boolean maybe_gc_pending ;
/* alloc(..) is the external interface for memory allocation. It
* allocates to generation 0. It is not called from within the garbage
* collector as it is only external uses that need the check for heap
/* there are a few places in the C code that allocate data in the
* heap before Lisp starts. This is before interrupts are enabled,
* so we don't need to check for pseudo-atomic */
- gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
-
+#ifdef LISP_FEATURE_SB_THREAD
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
+ th,getpid());
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
+ }
+#else
+ gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
+#endif
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- auto_gc_trigger *= 2;
/* set things up so that GC happens when we finish the PA
- * section. */
- maybe_gc_pending=1;
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
+ * section. We only do this if there wasn't a pending handler
+ * already, in case it was a gc. If it wasn't a GC, the next
+ * allocation will get us back to this point anyway, so no harm done
+ */
+ struct interrupt_data *data=th->interrupt_data;
+ if(!data->pending_handler)
+ maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
}
\f
-/*
- * noise to manipulate the gc trigger stuff
- */
-
-void
-set_auto_gc_trigger(os_vm_size_t dynamic_usage)
-{
- auto_gc_trigger += dynamic_usage;
-}
-
-void
-clear_auto_gc_trigger(void)
-{
- auto_gc_trigger = 0;
-}
-\f
/* Find the code object for the given pc, or return NULL on failure.
*
* FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
return 0;
} else {
-
- /* The only acceptable reason for an signal like this from the
- * heap is that the generational GC write-protected the page. */
- if (page_table[page_index].write_protected != 1) {
- lose("access failure in heap page not marked as write-protected");
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
}
-
- /* Unprotect the page. */
- os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
- page_table[page_index].write_protected = 0;
- page_table[page_index].write_protected_cleared = 1;
-
/* Don't worry, we can handle it. */
return 1;
}
}
-
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
* it's not just a case of the program hitting the write barrier, and
* are about to let Lisp deal with it. It's basically just a
unhandled_sigmemoryfault()
{}
-gc_alloc_update_all_page_tables(void)
+void gc_alloc_update_all_page_tables(void)
{
/* Flush the alloc regions updating the tables. */
struct thread *th;