* <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
*/
-/*
- * FIXME: GC :FULL T seems to be unable to recover a lot of unused
- * space. After cold init is complete, GC :FULL T gets us down to
- * about 44 Mb total used, but PURIFY gets us down to about 17 Mb
- * total used.
- */
-
#include <stdio.h>
#include <signal.h>
#include "runtime.h"
*/
/* the number of actual generations. (The number of 'struct
- * generation' objects is one more than this, because one serves as
- * scratch when GC'ing.) */
+ * generation' objects is one more than this, because one object
+ * serves as scratch when GC'ing.) */
#define NUM_GENERATIONS 6
/* Should we use page protection to help avoid the scavenging of pages
/* the minimum size (in bytes) for a large object*/
unsigned large_object_size = 4 * 4096;
-
-/* Should we filter stack/register pointers? This could reduce the
- * number of invalid pointers accepted. KLUDGE: It will probably
- * degrades interrupt safety during object initialization. */
-boolean enable_pointer_filter = 1;
\f
/*
* debugging
static void *heap_base = NULL;
/* Calculate the start address for the given page number. */
-inline void
-*page_address(int page_num)
+inline void *
+page_address(int page_num)
{
return (heap_base + (page_num * 4096));
}
/* a structure to hold the state of a generation */
struct generation {
- /* the first page that gc_alloc checks on its next call */
+ /* the first page that gc_alloc() checks on its next call */
int alloc_start_page;
- /* the first page that gc_alloc_unboxed checks on its next call */
+ /* the first page that gc_alloc_unboxed() checks on its next call */
int alloc_unboxed_start_page;
/* the first page that gc_alloc_large (boxed) considers on its next
count_write_protect_generation_pages(int generation)
{
int i;
- int cnt = 0;
+ int count = 0;
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != FREE_PAGE)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected == 1))
- cnt++;
- return(cnt);
+ count++;
+ return count;
}
-/* Count the number of pages within the given generation */
+/* Count the number of pages within the given generation. */
static int
count_generation_pages(int generation)
{
int i;
- int cnt = 0;
+ int count = 0;
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != 0)
&& (page_table[i].gen == generation))
- cnt++;
- return(cnt);
+ count++;
+ return count;
}
/* Count the number of dont_move pages. */
count_dont_move_pages(void)
{
int i;
- int cnt = 0;
-
- for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != 0)
- && (page_table[i].dont_move != 0))
- cnt++;
- return(cnt);
+ int count = 0;
+ for (i = 0; i < last_free_page; i++) {
+ if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
+ ++count;
+ }
+ }
+ return count;
}
/* Work through the pages and add up the number of bytes used for the
* given generation. */
static int
-generation_bytes_allocated (int gen)
+count_generation_bytes_allocated (int gen)
{
int i;
int result = 0;
-
for (i = 0; i < last_free_page; i++) {
if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
gc_assert(generations[i].bytes_allocated
- == generation_bytes_allocated(i));
+ == count_generation_bytes_allocated(i));
fprintf(stderr,
" %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
i,
* keeps the allocation contiguous when scavenging the newspace.
*
* The alloc_region should have been closed by a call to
- * gc_alloc_update_page_tables, and will thus be in an empty state.
+ * gc_alloc_update_page_tables(), and will thus be in an empty state.
*
* To assist the scavenging functions write-protected pages are not
* used. Free pages should not be write-protected.
first_page = restart_page;
/* First search for a page with at least 32 bytes free, which is
- * not write-protected, and which is not marked dont_move. */
+ * not write-protected, and which is not marked dont_move.
+ *
+ * FIXME: This looks extremely similar, perhaps identical, to
+ * code in gc_alloc_large(). It should be shared somehow. */
while ((first_page < NUM_PAGES)
&& (page_table[first_page].allocated != FREE_PAGE) /* not free page */
&& ((unboxed &&
/* Check for a failure. */
if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
fprintf(stderr,
- "Argh! gc_alloc_new_region failed on restart_page, nbytes=%d.\n",
+ "Argh! gc_alloc_new_region() failed on restart_page, nbytes=%d.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
/*
FSHOW((stderr,
- "/gc_alloc_new_region gen %d: %d bytes: pages %d to %d: addr=%x\n",
+ "/gc_alloc_new_region() gen %d: %d bytes: pages %d to %d: addr=%x\n",
gc_alloc_generation,
bytes_found,
first_page,
/*
FSHOW((stderr,
- "/gc_alloc_update_page_tables to gen %d:\n",
+ "/gc_alloc_update_page_tables() to gen %d:\n",
gc_alloc_generation));
*/
next_page = first_page+1;
- /* Skip if no bytes were allocated */
+ /* Skip if no bytes were allocated. */
if (alloc_region->free_pointer != alloc_region->start_addr) {
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* Update the first page. */
/* If the page was free then set up the gen, and
- first_object_offset. */
+ * first_object_offset. */
if (page_table[first_page].bytes_used == 0)
gc_assert(page_table[first_page].first_object_offset == 0);
byte_cnt = 0;
- /* Calc. the number of bytes used in this page. This is not always
- the number of new bytes, unless it was free. */
+ /* Calculate the number of bytes used in this page. This is not
+ * always the number of new bytes, unless it was free. */
more = 0;
if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
bytes_used = 4096;
byte_cnt += bytes_used;
- /* All the rest of the pages should be free. Need to set their
- first_object_offset pointer to the start of the region, and set
- the bytes_used. */
+ /* All the rest of the pages should be free. We need to set their
+ * first_object_offset pointer to the start of the region, and set
+ * the bytes_used. */
while (more) {
if (unboxed)
gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
/* Set the generations alloc restart page to the last page of
- the region. */
+ * the region. */
if (unboxed)
generations[gc_alloc_generation].alloc_unboxed_start_page =
next_page-1;
region_size,
gc_alloc_generation));
*/
- }
- else
- /* No bytes allocated. Unallocate the first_page if there are 0
- bytes_used. */
+ } else {
+ /* There are no bytes allocated. Unallocate the first_page if
+ * there are 0 bytes_used. */
if (page_table[first_page].bytes_used == 0)
page_table[first_page].allocated = FREE_PAGE;
+ }
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
static inline void *gc_quick_alloc(int nbytes);
/* Allocate a possibly large object. */
-static void
-*gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
+static void *
+gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
{
int first_page;
int last_page;
/*
FSHOW((stderr,
- "/gc_alloc_large for %d bytes from gen %d\n",
+ "/gc_alloc_large() for %d bytes from gen %d\n",
nbytes, gc_alloc_generation));
*/
the current boxed free region. XX could probably keep a page
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
- if (unboxed)
- restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
- else
+ if (unboxed) {
+ restart_page =
+ generations[gc_alloc_generation].alloc_large_unboxed_start_page;
+ } else {
restart_page = generations[gc_alloc_generation].alloc_large_start_page;
- if (restart_page <= alloc_region->last_page)
+ }
+ if (restart_page <= alloc_region->last_page) {
restart_page = alloc_region->last_page+1;
+ }
do {
first_page = restart_page;
&& (page_table[first_page].allocated != FREE_PAGE))
first_page++;
else
+ /* FIXME: This looks extremely similar, perhaps identical,
+ * to code in gc_alloc_new_region(). It should be shared
+ * somehow. */
while ((first_page < NUM_PAGES)
&& (page_table[first_page].allocated != FREE_PAGE)
&& ((unboxed &&
/*
if (large)
FSHOW((stderr,
- "/gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
+ "/gc_alloc_large() gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
gc_alloc_generation,
nbytes,
bytes_found,
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
-/* Allocate bytes from the boxed_region. It first checks if there is
- * room, if not then it calls gc_alloc_new_region to find a new region
- * with enough space. A pointer to the start of the region is returned. */
-static void
-*gc_alloc(int nbytes)
+/* Allocate bytes from the boxed_region. First checks whether there is
+ * room. If not then call gc_alloc_new_region() to find a new region
+ * with enough space. Return a pointer to the start of the region. */
+static void *
+gc_alloc(int nbytes)
{
void *new_free_pointer;
/* Allocate space from the boxed_region. If there is not enough free
* space then call gc_alloc to do the job. A pointer to the start of
* the region is returned. */
-static inline void
-*gc_quick_alloc(int nbytes)
+static inline void *
+gc_quick_alloc(int nbytes)
{
void *new_free_pointer;
new_free_pointer = boxed_region.free_pointer + nbytes;
if (new_free_pointer <= boxed_region.end_addr) {
- /* If so then allocate from the current region. */
+ /* Allocate from the current region. */
void *new_obj = boxed_region.free_pointer;
boxed_region.free_pointer = new_free_pointer;
return((void *)new_obj);
+ } else {
+ /* Let full gc_alloc() handle it. */
+ return gc_alloc(nbytes);
}
-
- /* Else call gc_alloc */
- return (gc_alloc(nbytes));
}
/* Allocate space for the boxed object. If it is a large object then
* do a large alloc else allocate from the current region. If there is
- * not enough free space then call gc_alloc to do the job. A pointer
+ * not enough free space then call gc_alloc() to do the job. A pointer
* to the start of the region is returned. */
-static inline void
-*gc_quick_alloc_large(int nbytes)
+static inline void *
+gc_quick_alloc_large(int nbytes)
{
void *new_free_pointer;
void *new_obj = boxed_region.free_pointer;
boxed_region.free_pointer = new_free_pointer;
return((void *)new_obj);
+ } else {
+ /* Let full gc_alloc() handle it. */
+ return gc_alloc(nbytes);
}
-
- /* Else call gc_alloc */
- return (gc_alloc(nbytes));
}
-static void
-*gc_alloc_unboxed(int nbytes)
+static void *
+gc_alloc_unboxed(int nbytes)
{
void *new_free_pointer;
/*
- FSHOW((stderr, "/gc_alloc_unboxed %d\n", nbytes));
+ FSHOW((stderr, "/gc_alloc_unboxed() %d\n", nbytes));
*/
/* Check whether there is room in the current region. */
/* Set up a new region. */
gc_alloc_new_region(nbytes, 1, &unboxed_region);
- /* Should now be enough room. */
+ /* (There should now be enough room.) */
/* Check whether there is room in the current region. */
new_free_pointer = unboxed_region.free_pointer + nbytes;
return((void *) NIL); /* dummy value: return something ... */
}
-static inline void
-*gc_quick_alloc_unboxed(int nbytes)
+static inline void *
+gc_quick_alloc_unboxed(int nbytes)
{
void *new_free_pointer;
unboxed_region.free_pointer = new_free_pointer;
return((void *)new_obj);
+ } else {
+ /* Let general gc_alloc_unboxed() handle it. */
+ return gc_alloc_unboxed(nbytes);
}
-
- /* Else call gc_alloc */
- return (gc_alloc_unboxed(nbytes));
}
/* Allocate space for the object. If it is a large object then do a
* large alloc else allocate from the current region. If there is not
- * enough free space then call gc_alloc to do the job.
+ * enough free space then call general gc_alloc_unboxed() to do the job.
*
* A pointer to the start of the region is returned. */
-static inline void
-*gc_quick_alloc_large_unboxed(int nbytes)
+static inline void *
+gc_quick_alloc_large_unboxed(int nbytes)
{
void *new_free_pointer;
/* Check whether there is room in the current region. */
new_free_pointer = unboxed_region.free_pointer + nbytes;
-
if (new_free_pointer <= unboxed_region.end_addr) {
- /* If so then allocate from the current region. */
+ /* Allocate from the current region. */
void *new_obj = unboxed_region.free_pointer;
unboxed_region.free_pointer = new_free_pointer;
-
return((void *)new_obj);
+ } else {
+ /* Let full gc_alloc() handle it. */
+ return gc_alloc_unboxed(nbytes);
}
-
- /* Else call gc_alloc. */
- return (gc_alloc_unboxed(nbytes));
}
\f
/*
lispobj *new;
lispobj *source, *dest;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
new = gc_quick_alloc(nwords*4);
dest = new;
- source = (lispobj *) PTR(object);
+ source = (lispobj *) native_pointer(object);
/* Copy the object. */
while (nwords > 0) {
lispobj *source, *dest;
int first_page;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
new = gc_quick_alloc_large(nwords*4);
dest = new;
- source = (lispobj *) PTR(object);
+ source = (lispobj *) native_pointer(object);
/* Copy the object. */
while (nwords > 0) {
lispobj *new;
lispobj *source, *dest;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
new = gc_quick_alloc_unboxed(nwords*4);
dest = new;
- source = (lispobj *) PTR(object);
+ source = (lispobj *) native_pointer(object);
/* Copy the object. */
while (nwords > 0) {
lispobj *source, *dest;
int first_page;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
new = gc_quick_alloc_large_unboxed(nwords*4);
dest = new;
- source = (lispobj *) PTR(object);
+ source = (lispobj *) native_pointer(object);
/* Copy the object. */
while (nwords > 0) {
* scavenging
*/
-#define DIRECT_SCAV 0
-
-/* FIXME: Most calls end up going to a little trouble to compute an
- * 'nwords' value. The system might be a little simpler if this
- * function used an 'end' parameter instead. */
+/* FIXME: Most calls end up going to some trouble to compute an
+ * 'n_words' value for this function. The system might be a little
+ * simpler if this function used an 'end' parameter instead. */
static void
-scavenge(lispobj *start, long nwords)
+scavenge(lispobj *start, long n_words)
{
- while (nwords > 0) {
- lispobj object;
-#if DIRECT_SCAV
- int type;
-#endif
- int words_scavenged;
+ lispobj *end = start + n_words;
+ lispobj *object_ptr;
+ int n_words_scavenged;
+
+ for (object_ptr = start;
+ object_ptr < end;
+ object_ptr += n_words_scavenged) {
- object = *start;
+ lispobj object = *object_ptr;
-/* FSHOW((stderr, "Scavenge: %p, %ld\n", start, nwords)); */
-
gc_assert(object != 0x01); /* not a forwarding pointer */
-#if DIRECT_SCAV
- type = TypeOf(object);
- words_scavenged = (scavtab[type])(start, object);
-#else
- if (Pointerp(object)) {
- /* It's a pointer. */
+ if (is_lisp_pointer(object)) {
if (from_space_p(object)) {
- /* It currently points to old space. Check for a forwarding
- * pointer. */
- lispobj *ptr = (lispobj *)PTR(object);
+ /* It currently points to old space. Check for a
+ * forwarding pointer. */
+ lispobj *ptr = (lispobj *)native_pointer(object);
lispobj first_word = *ptr;
-
if (first_word == 0x01) {
/* Yes, there's a forwarding pointer. */
- *start = ptr[1];
- words_scavenged = 1;
- }
- else
+ *object_ptr = ptr[1];
+ n_words_scavenged = 1;
+ } else {
/* Scavenge that pointer. */
- words_scavenged = (scavtab[TypeOf(object)])(start, object);
+ n_words_scavenged =
+ (scavtab[TypeOf(object)])(object_ptr, object);
+ }
} else {
- /* It points somewhere other than oldspace. Leave it alone. */
- words_scavenged = 1;
+ /* It points somewhere other than oldspace. Leave it
+ * alone. */
+ n_words_scavenged = 1;
}
+ } else if ((object & 3) == 0) {
+ /* It's a fixnum: really easy.. */
+ n_words_scavenged = 1;
} else {
- if ((object & 3) == 0) {
- /* It's a fixnum: really easy.. */
- words_scavenged = 1;
- } else {
- /* It's some sort of header object or another. */
- words_scavenged = (scavtab[TypeOf(object)])(start, object);
- }
+ /* It's some sort of header object or another. */
+ n_words_scavenged =
+ (scavtab[TypeOf(object)])(object_ptr, object);
}
-#endif
-
- start += words_scavenged;
- nwords -= words_scavenged;
}
- gc_assert(nwords == 0);
+ gc_assert(object_ptr == end);
}
-
\f
/*
* code and code-related objects
static lispobj trans_function_header(lispobj object);
static lispobj trans_boxed(lispobj object);
-#if DIRECT_SCAV
-static int
-scav_function_pointer(lispobj *where, lispobj object)
-{
- gc_assert(Pointerp(object));
-
- if (from_space_p(object)) {
- lispobj first, *first_pointer;
-
- /* object is a pointer into from space. Check to see whether
- * it has been forwarded. */
- first_pointer = (lispobj *) PTR(object);
- first = *first_pointer;
-
- if (first == 0x01) {
- /* Forwarded */
- *where = first_pointer[1];
- return 1;
- }
- else {
- int type;
- lispobj copy;
-
- /* must transport object -- object may point to either a
- * function header, a closure function header, or to a
- * closure header. */
-
- type = TypeOf(first);
- switch (type) {
- case type_FunctionHeader:
- case type_ClosureFunctionHeader:
- copy = trans_function_header(object);
- break;
- default:
- copy = trans_boxed(object);
- break;
- }
-
- if (copy != object) {
- /* Set forwarding pointer. */
- first_pointer[0] = 0x01;
- first_pointer[1] = copy;
- }
-
- first = copy;
- }
-
- gc_assert(Pointerp(first));
- gc_assert(!from_space_p(first));
-
- *where = first;
- }
- return 1;
-}
-#else
static int
scav_function_pointer(lispobj *where, lispobj object)
{
lispobj *first_pointer;
lispobj copy;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
/* Object is a pointer into from space - no a FP. */
- first_pointer = (lispobj *) PTR(object);
+ first_pointer = (lispobj *) native_pointer(object);
/* must transport object -- object may point to either a function
* header, a closure function header, or to a closure header. */
first_pointer[1] = copy;
}
- gc_assert(Pointerp(copy));
+ gc_assert(is_lisp_pointer(copy));
gc_assert(!from_space_p(copy));
*where = copy;
return 1;
}
-#endif
/* Scan a x86 compiled code object, looking for possible fixups that
* have been missed after a move.
unsigned d2 = *((unsigned char *)p - 2);
unsigned d3 = *((unsigned char *)p - 3);
unsigned d4 = *((unsigned char *)p - 4);
+#if QSHOW
unsigned d5 = *((unsigned char *)p - 5);
unsigned d6 = *((unsigned char *)p - 6);
+#endif
/* Check for code references. */
/* Check for a 32 bit word that looks like an absolute
/* It will be 0 or the unbound-marker if there are no fixups, and
* will be an other pointer if it is valid. */
- if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
+ if ((fixups == 0) || (fixups == type_UnboundMarker) ||
+ !is_lisp_pointer(fixups)) {
/* Check for possible errors. */
if (check_code_fixups)
sniff_code_object(new_code, displacement);
return;
}
- fixups_vector = (struct vector *)PTR(fixups);
+ fixups_vector = (struct vector *)native_pointer(fixups);
/* Could be pointing to a forwarding pointer. */
- if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
- && (fixups_vector->header == 0x01)) {
+ if (is_lisp_pointer(fixups) &&
+ (find_page_index((void*)fixups_vector) != -1) &&
+ (fixups_vector->header == 0x01)) {
/* If so, then follow it. */
/*SHOW("following pointer to a forwarding pointer");*/
- fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
+ fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
}
/*SHOW("got fixups");*/
nwords = CEILING(nwords, 2);
l_new_code = copy_large_object(l_code, nwords);
- new_code = (struct code *) PTR(l_new_code);
+ new_code = (struct code *) native_pointer(l_new_code);
/* may not have been moved.. */
if (new_code == code)
struct function *fheaderp, *nfheaderp;
lispobj nfheaderl;
- fheaderp = (struct function *) PTR(fheaderl);
+ fheaderp = (struct function *) native_pointer(fheaderl);
gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
/* Calculate the new function pointer and the new */
/* function header. */
nfheaderl = fheaderl + displacement;
- nfheaderp = (struct function *) PTR(nfheaderl);
+ nfheaderp = (struct function *) native_pointer(nfheaderl);
/* Set forwarding pointer. */
((lispobj *)fheaderp)[0] = 0x01;
scav_code_header(lispobj *where, lispobj object)
{
struct code *code;
- int nheader_words, ncode_words, nwords;
- lispobj fheaderl;
- struct function *fheaderp;
+ int n_header_words, n_code_words, n_words;
+ lispobj entry_point; /* tagged pointer to entry point */
+ struct function *function_ptr; /* untagged pointer to entry point */
code = (struct code *) where;
- ncode_words = fixnum_value(code->code_size);
- nheader_words = HeaderValue(object);
- nwords = ncode_words + nheader_words;
- nwords = CEILING(nwords, 2);
+ n_code_words = fixnum_value(code->code_size);
+ n_header_words = HeaderValue(object);
+ n_words = n_code_words + n_header_words;
+ n_words = CEILING(n_words, 2);
/* Scavenge the boxed section of the code data block. */
- scavenge(where + 1, nheader_words - 1);
+ scavenge(where + 1, n_header_words - 1);
/* Scavenge the boxed section of each function object in the */
/* code data block. */
- fheaderl = code->entry_points;
- while (fheaderl != NIL) {
- fheaderp = (struct function *) PTR(fheaderl);
- gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
+ for (entry_point = code->entry_points;
+ entry_point != NIL;
+ entry_point = function_ptr->next) {
- scavenge(&fheaderp->name, 1);
- scavenge(&fheaderp->arglist, 1);
- scavenge(&fheaderp->type, 1);
-
- fheaderl = fheaderp->next;
+ gc_assert(is_lisp_pointer(entry_point));
+
+ function_ptr = (struct function *) native_pointer(entry_point);
+ gc_assert(TypeOf(function_ptr->header) == type_FunctionHeader);
+
+ scavenge(&function_ptr->name, 1);
+ scavenge(&function_ptr->arglist, 1);
+ scavenge(&function_ptr->type, 1);
}
- return nwords;
+ return n_words;
}
static lispobj
{
struct code *ncode;
- ncode = trans_code((struct code *) PTR(object));
+ ncode = trans_code((struct code *) native_pointer(object));
return (lispobj) ncode | type_OtherPointer;
}
SHOW("/trans_return_pc_header: Will this work?");
- return_pc = (struct function *) PTR(object);
+ return_pc = (struct function *) native_pointer(object);
offset = HeaderValue(return_pc->header) * 4;
/* Transport the whole code object. */
unsigned long offset;
struct code *code, *ncode;
- fheader = (struct function *) PTR(object);
+ fheader = (struct function *) native_pointer(object);
offset = HeaderValue(fheader->header) * 4;
/* Transport the whole code object. */
* instances
*/
-#if DIRECT_SCAV
-static int
-scav_instance_pointer(lispobj *where, lispobj object)
-{
- if (from_space_p(object)) {
- lispobj first, *first_pointer;
-
- /* Object is a pointer into from space. Check to see */
- /* whether it has been forwarded. */
- first_pointer = (lispobj *) PTR(object);
- first = *first_pointer;
-
- if (first == 0x01) {
- /* forwarded */
- first = first_pointer[1];
- } else {
- first = trans_boxed(object);
- gc_assert(first != object);
- /* Set forwarding pointer. */
- first_pointer[0] = 0x01;
- first_pointer[1] = first;
- }
- *where = first;
- }
- return 1;
-}
-#else
static int
scav_instance_pointer(lispobj *where, lispobj object)
{
gc_assert(copy != object);
- first_pointer = (lispobj *) PTR(object);
+ first_pointer = (lispobj *) native_pointer(object);
/* Set forwarding pointer. */
first_pointer[0] = 0x01;
return 1;
}
-#endif
\f
/*
* lists and conses
static lispobj trans_list(lispobj object);
-#if DIRECT_SCAV
-static int
-scav_list_pointer(lispobj *where, lispobj object)
-{
- /* KLUDGE: There's lots of cut-and-paste duplication between this
- * and scav_instance_pointer(..), scav_other_pointer(..), and
- * perhaps other functions too. -- WHN 20000620 */
-
- gc_assert(Pointerp(object));
-
- if (from_space_p(object)) {
- lispobj first, *first_pointer;
-
- /* Object is a pointer into from space. Check to see whether it has
- * been forwarded. */
- first_pointer = (lispobj *) PTR(object);
- first = *first_pointer;
-
- if (first == 0x01) {
- /* forwarded */
- first = first_pointer[1];
- } else {
- first = trans_list(object);
-
- /* Set forwarding pointer */
- first_pointer[0] = 0x01;
- first_pointer[1] = first;
- }
-
- gc_assert(Pointerp(first));
- gc_assert(!from_space_p(first));
- *where = first;
- }
- return 1;
-}
-#else
static int
scav_list_pointer(lispobj *where, lispobj object)
{
lispobj first, *first_pointer;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
/* Object is a pointer into from space - not FP. */
first = trans_list(object);
gc_assert(first != object);
- first_pointer = (lispobj *) PTR(object);
+ first_pointer = (lispobj *) native_pointer(object);
/* Set forwarding pointer */
first_pointer[0] = 0x01;
first_pointer[1] = first;
- gc_assert(Pointerp(first));
+ gc_assert(is_lisp_pointer(first));
gc_assert(!from_space_p(first));
*where = first;
return 1;
}
-#endif
static lispobj
trans_list(lispobj object)
gc_assert(from_space_p(object));
- cons = (struct cons *) PTR(object);
+ cons = (struct cons *) native_pointer(object);
/* Copy 'object'. */
new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
struct cons *cdr_cons, *new_cdr_cons;
if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
- || (*((lispobj *)PTR(cdr)) == 0x01))
+ || (*((lispobj *)native_pointer(cdr)) == 0x01))
break;
- cdr_cons = (struct cons *) PTR(cdr);
+ cdr_cons = (struct cons *) native_pointer(cdr);
/* Copy 'cdr'. */
new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
* scavenging and transporting other pointers
*/
-#if DIRECT_SCAV
-static int
-scav_other_pointer(lispobj *where, lispobj object)
-{
- gc_assert(Pointerp(object));
-
- if (from_space_p(object)) {
- lispobj first, *first_pointer;
-
- /* Object is a pointer into from space. Check to see */
- /* whether it has been forwarded. */
- first_pointer = (lispobj *) PTR(object);
- first = *first_pointer;
-
- if (first == 0x01) {
- /* Forwarded. */
- first = first_pointer[1];
- *where = first;
- } else {
- first = (transother[TypeOf(first)])(object);
-
- if (first != object) {
- /* Set forwarding pointer */
- first_pointer[0] = 0x01;
- first_pointer[1] = first;
- *where = first;
- }
- }
-
- gc_assert(Pointerp(first));
- gc_assert(!from_space_p(first));
- }
- return 1;
-}
-#else
static int
scav_other_pointer(lispobj *where, lispobj object)
{
lispobj first, *first_pointer;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
/* Object is a pointer into from space - not FP. */
- first_pointer = (lispobj *) PTR(object);
+ first_pointer = (lispobj *) native_pointer(object);
first = (transother[TypeOf(*first_pointer)])(object);
*where = first;
}
- gc_assert(Pointerp(first));
+ gc_assert(is_lisp_pointer(first));
gc_assert(!from_space_p(first));
return 1;
}
-#endif
-
\f
/*
* immediate, boxed, and unboxed objects
lispobj header;
unsigned long length;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- header = *((lispobj *) PTR(object));
+ header = *((lispobj *) native_pointer(object));
length = HeaderValue(header) + 1;
length = CEILING(length, 2);
lispobj header;
unsigned long length;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- header = *((lispobj *) PTR(object));
+ header = *((lispobj *) native_pointer(object));
length = HeaderValue(header) + 1;
length = CEILING(length, 2);
unsigned long length;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- header = *((lispobj *) PTR(object));
+ header = *((lispobj *) native_pointer(object));
length = HeaderValue(header) + 1;
length = CEILING(length, 2);
unsigned long length;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- header = *((lispobj *) PTR(object));
+ header = *((lispobj *) native_pointer(object));
length = HeaderValue(header) + 1;
length = CEILING(length, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
/* NOTE: A string contains one more byte of data (a terminating
* '\0' to help when interfacing with C functions) than indicated
* by the length slot. */
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length) + 1;
nwords = CEILING(NWORDS(length, 4) + 2, 2);
/* Scavenge element 0, which may be a hash-table structure. */
scavenge(where+2, 1);
- if (!Pointerp(where[2])) {
+ if (!is_lisp_pointer(where[2])) {
lose("no pointer at %x in hash table", where[2]);
}
- hash_table = (lispobj *)PTR(where[2]);
+ hash_table = (lispobj *)native_pointer(where[2]);
/*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
if (TypeOf(hash_table[0]) != type_InstanceHeader) {
lose("hash table not instance (%x at %x)", hash_table[0], hash_table);
/* Scavenge element 1, which should be some internal symbol that
* the hash table code reserves for marking empty slots. */
scavenge(where+3, 1);
- if (!Pointerp(where[3])) {
+ if (!is_lisp_pointer(where[3])) {
lose("not empty-hash-table-slot symbol pointer: %x", where[3]);
}
empty_symbol = where[3];
/* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
- if (TypeOf(*(lispobj *)PTR(empty_symbol)) != type_SymbolHeader) {
+ if (TypeOf(*(lispobj *)native_pointer(empty_symbol)) != type_SymbolHeader) {
lose("not a symbol where empty-hash-table-slot symbol expected: %x",
- *(lispobj *)PTR(empty_symbol));
+ *(lispobj *)native_pointer(empty_symbol));
}
/* Scavenge hash table, which will fix the positions of the other
scavenge(hash_table, 16);
/* Cross-check the kv_vector. */
- if (where != (lispobj *)PTR(hash_table[9])) {
+ if (where != (lispobj *)native_pointer(hash_table[9])) {
lose("hash_table table!=this table %x", hash_table[9]);
}
{
lispobj index_vector_obj = hash_table[13];
- if (Pointerp(index_vector_obj) &&
- (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
- index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
+ if (is_lisp_pointer(index_vector_obj) &&
+ (TypeOf(*(lispobj *)native_pointer(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
+ index_vector = ((unsigned int *)native_pointer(index_vector_obj)) + 2;
/*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
- length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
+ length = fixnum_value(((unsigned int *)native_pointer(index_vector_obj))[1]);
/*FSHOW((stderr, "/length = %d\n", length));*/
} else {
lose("invalid index_vector %x", index_vector_obj);
{
lispobj next_vector_obj = hash_table[14];
- if (Pointerp(next_vector_obj) &&
- (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
- next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
+ if (is_lisp_pointer(next_vector_obj) &&
+ (TypeOf(*(lispobj *)native_pointer(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
+ next_vector = ((unsigned int *)native_pointer(next_vector_obj)) + 2;
/*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
- next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
+ next_vector_length = fixnum_value(((unsigned int *)native_pointer(next_vector_obj))[1]);
/*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
} else {
lose("invalid next_vector %x", next_vector_obj);
* probably other stuff too. Ugh.. */
lispobj hash_vector_obj = hash_table[15];
- if (Pointerp(hash_vector_obj) &&
- (TypeOf(*(lispobj *)PTR(hash_vector_obj))
+ if (is_lisp_pointer(hash_vector_obj) &&
+ (TypeOf(*(lispobj *)native_pointer(hash_vector_obj))
== type_SimpleArrayUnsignedByte32)) {
- hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
+ hash_vector = ((unsigned int *)native_pointer(hash_vector_obj)) + 2;
/*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
- gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
+ gc_assert(fixnum_value(((unsigned int *)native_pointer(hash_vector_obj))[1])
== next_vector_length);
} else {
hash_vector = NULL;
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(NWORDS(length, 32) + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(NWORDS(length, 16) + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(NWORDS(length, 8) + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(NWORDS(length, 4) + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(NWORDS(length, 2) + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length * 2 + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length * 3 + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length * 2 + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length * 4 + 2, 2);
struct vector *vector;
int length, nwords;
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
- vector = (struct vector *) PTR(object);
+ vector = (struct vector *) native_pointer(object);
length = fixnum_value(vector->length);
nwords = CEILING(length * 6 + 2, 2);
lispobj copy;
/* struct weak_pointer *wp; */
- gc_assert(Pointerp(object));
+ gc_assert(is_lisp_pointer(object));
#if defined(DEBUG_WEAK)
FSHOW((stderr, "Transporting weak pointer from 0x%08x\n", object));
/* been transported so they can be fixed up in a post-GC pass. */
copy = copy_object(object, WEAK_POINTER_NWORDS);
- /* wp = (struct weak_pointer *) PTR(copy);*/
+ /* wp = (struct weak_pointer *) native_pointer(copy);*/
/* Push the weak pointer onto the list of weak pointers. */
lispobj value = wp->value;
lispobj *first_pointer;
- first_pointer = (lispobj *)PTR(value);
+ first_pointer = (lispobj *)native_pointer(value);
/*
FSHOW((stderr, "/weak pointer at 0x%08x\n", (unsigned long) wp));
FSHOW((stderr, "/value: 0x%08x\n", (unsigned long) value));
*/
- if (Pointerp(value) && from_space_p(value)) {
+ if (is_lisp_pointer(value) && from_space_p(value)) {
/* Now, we need to check whether the object has been forwarded. If
* it has been, the weak pointer is still good and needs to be
* updated. Otherwise, the weak pointer needs to be nil'ed
size_t count = 1;
lispobj thing = *start;
- /* If thing is an immediate then this is a cons */
- if (Pointerp(thing)
+ /* If thing is an immediate then this is a cons. */
+ if (is_lisp_pointer(thing)
|| ((thing & 3) == 0) /* fixnum */
|| (TypeOf(thing) == type_BaseChar)
|| (TypeOf(thing) == type_UnboundMarker))
else
count = (sizetab[TypeOf(thing)])(start);
- /* Check whether the pointer is within this object? */
+ /* Check whether the pointer is within this object. */
if ((pointer >= start) && (pointer < (start+count))) {
/* found it! */
/*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
return(start);
}
- /* Round up the count */
+ /* Round up the count. */
count = CEILING(count,2);
start += count;
int page_index = find_page_index(pointer);
lispobj *start;
- /* Address may be invalid - do some checks. */
+ /* The address may be invalid, so do some checks. */
if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
return NULL;
start = (lispobj *)((void *)page_address(page_index)
return (search_space(start, (pointer+2)-start, pointer));
}
-/* FIXME: There is a strong family resemblance between this function
- * and the function of the same name in purify.c. Would it be possible
- * to implement them as exactly the same function? */
+/* Is there any possibility that pointer is a valid Lisp object
+ * reference, and/or something else (e.g. subroutine call return
+ * address) which should prevent us from moving the referred-to thing? */
static int
-valid_dynamic_space_pointer(lispobj *pointer)
+possibly_valid_dynamic_space_pointer(lispobj *pointer)
{
lispobj *start_addr;
- /* Find the object start address */
+ /* Find the object start address. */
if ((start_addr = search_dynamic_space(pointer)) == NULL) {
return 0;
}
/* We need to allow raw pointers into Code objects for return
- * addresses. This will also pickup pointers to functions in code
+ * addresses. This will also pick up pointers to functions in code
* objects. */
if (TypeOf(*start_addr) == type_CodeHeader) {
- /* X Could do some further checks here. */
+ /* XXX could do some further checks here */
return 1;
}
/* If it's not a return address then it needs to be a valid Lisp
* pointer. */
- if (!Pointerp((lispobj)pointer)) {
+ if (!is_lisp_pointer((lispobj)pointer)) {
return 0;
}
/* Check that the object pointed to is consistent with the pointer
- * low tag. */
+ * low tag.
+ *
+ * FIXME: It's not safe to rely on the result from this check
+ * before an object is initialized. Thus, if we were interrupted
+ * just as an object had been allocated but not initialized, the
+ * GC relying on this result could bogusly reclaim the memory.
+ * However, we can't really afford to do without this check. So
+ * we should make it safe somehow.
+ * (1) Perhaps just review the code to make sure
+ * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
+ * thing is wrapped around critical sections where allocated
+ * memory type bits haven't been set.
+ * (2) Perhaps find some other hack to protect against this, e.g.
+ * recording the result of the last call to allocate-lisp-memory,
+ * and returning true from this function when *pointer is
+ * a reference to that result. */
switch (LowtagOf((lispobj)pointer)) {
case type_FunctionPointer:
/* Start_addr should be the enclosing code object, or a closure
- header. */
+ * header. */
switch (TypeOf(*start_addr)) {
case type_CodeHeader:
/* This case is probably caught above. */
return 0;
}
/* Is it plausible cons? */
- if ((Pointerp(start_addr[0])
+ if ((is_lisp_pointer(start_addr[0])
|| ((start_addr[0] & 3) == 0) /* fixnum */
|| (TypeOf(start_addr[0]) == type_BaseChar)
|| (TypeOf(start_addr[0]) == type_UnboundMarker))
- && (Pointerp(start_addr[1])
+ && (is_lisp_pointer(start_addr[1])
|| ((start_addr[1] & 3) == 0) /* fixnum */
|| (TypeOf(start_addr[1]) == type_BaseChar)
|| (TypeOf(start_addr[1]) == type_UnboundMarker)))
pointer, start_addr, *start_addr));
return 0;
}
- /* Is it plausible? Not a cons. X should check the headers. */
- if (Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
+ /* Is it plausible? Not a cons. XXX should check the headers. */
+ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
if (gencgc_verbose)
FSHOW((stderr,
"/Wo2: %x %x %x\n",
return 1;
}
-/* Adjust large bignum and vector objects. This will adjust the allocated
- * region if the size has shrunk, and move unboxed objects into unboxed
- * pages. The pages are not promoted here, and the promoted region is not
- * added to the new_regions; this is really only designed to be called from
- * preserve_pointer. Shouldn't fail if this is missed, just may delay the
- * moving of objects to unboxed pages, and the freeing of pages. */
+/* Adjust large bignum and vector objects. This will adjust the
+ * allocated region if the size has shrunk, and move unboxed objects
+ * into unboxed pages. The pages are not promoted here, and the
+ * promoted region is not added to the new_regions; this is really
+ * only designed to be called from preserve_pointer(). Shouldn't fail
+ * if this is missed, just may delay the moving of objects to unboxed
+ * pages, and the freeing of pages. */
static void
maybe_adjust_large_object(lispobj *where)
{
next_page++;
}
- if ((bytes_freed > 0) && gencgc_verbose)
- FSHOW((stderr, "/adjust_large_object freed %d\n", bytes_freed));
+ if ((bytes_freed > 0) && gencgc_verbose) {
+ FSHOW((stderr,
+ "/maybe_adjust_large_object() freed %d\n",
+ bytes_freed));
+ }
generations[from_space].bytes_allocated -= bytes_freed;
bytes_allocated -= bytes_freed;
return;
}
-/* Take a possible pointer to a list object and mark the page_table
- * so that it will not need changing during a GC.
+/* Take a possible pointer to a Lisp object and mark its page in the
+ * page_table so that it will not be relocated during a GC.
*
* This involves locating the page it points to, then backing up to
* the first page that has its first object start at offset 0, and
- * then marking all pages dont_move from the first until a page that ends
- * by being full, or having free gen.
+ * then marking all pages dont_move from the first until a page that
+ * ends by being full, or having free gen.
*
* This ensures that objects spanning pages are not broken.
*
* It is assumed that all the page static flags have been cleared at
* the start of a GC.
*
- * It is also assumed that the current gc_alloc region has been flushed and
- * the tables updated. */
+ * It is also assumed that the current gc_alloc() region has been
+ * flushed and the tables updated. */
static void
preserve_pointer(void *addr)
{
int i;
unsigned region_allocation;
- /* Address is quite likely to have been invalid - do some checks. */
+ /* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
|| (page_table[addr_page_index].allocated == FREE_PAGE)
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
- /* Skip if already marked dont_move */
+ /* Skip if already marked dont_move. */
|| (page_table[addr_page_index].dont_move != 0))
return;
+ /* (Now that we know that addr_page_index is in range, it's
+ * safe to index into page_table[] with it.) */
region_allocation = page_table[addr_page_index].allocated;
- /* Check the offset within the page.
+ /* quick check 2: Check the offset within the page.
*
* FIXME: The mask should have a symbolic name, and ideally should
* be derived from page size instead of hardwired to 0xfff.
if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
return;
- if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
+ /* Filter out anything which can't be a pointer to a Lisp object
+ * (or, as a special case which also requires dont_move, a return
+ * address referring to something in a CodeObject). This is
+ * expensive but important, since it vastly reduces the
+ * probability that random garbage will be bogusly interpreter as
+ * a pointer which prevents a page from moving. */
+ if (!possibly_valid_dynamic_space_pointer(addr))
return;
/* Work backwards to find a page with a first_object_offset of 0.
* gen. Assumes the first_object_offset is negative or zero. */
first_page = addr_page_index;
while (page_table[first_page].first_object_offset != 0) {
- first_page--;
+ --first_page;
/* Do some checks. */
gc_assert(page_table[first_page].bytes_used == 4096);
gc_assert(page_table[first_page].gen == from_space);
gc_assert(page_table[first_page].allocated == region_allocation);
}
- /* Adjust any large objects before promotion as they won't be copied
- * after promotion. */
+ /* Adjust any large objects before promotion as they won't be
+ * copied after promotion. */
if (page_table[first_page].large_object) {
maybe_adjust_large_object(page_address(first_page));
- /* If a large object has shrunk then addr may now point to a free
- * area in which case it's ignored here. Note it gets through the
- * valid pointer test above because the tail looks like conses. */
+ /* If a large object has shrunk then addr may now point to a
+ * free area in which case it's ignored here. Note it gets
+ * through the valid pointer test above because the tail looks
+ * like conses. */
if ((page_table[addr_page_index].allocated == FREE_PAGE)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
/* Mark the page static. */
page_table[i].dont_move = 1;
- /* Move the page to the new_space. XX I'd rather not do this but
- * the GC logic is not quite able to copy with the static pages
- * remaining in the from space. This also requires the generation
- * bytes_allocated counters be updated. */
+ /* Move the page to the new_space. XX I'd rather not do this
+ * but the GC logic is not quite able to copy with the static
+ * pages remaining in the from space. This also requires the
+ * generation bytes_allocated counters be updated. */
page_table[i].gen = new_space;
generations[new_space].bytes_allocated += page_table[i].bytes_used;
generations[from_space].bytes_allocated -= page_table[i].bytes_used;
- /* It is essential that the pages are not write protected as they
- * may have pointers into the old-space which need scavenging. They
- * shouldn't be write protected at this stage. */
+ /* It is essential that the pages are not write protected as
+ * they may have pointers into the old-space which need
+ * scavenging. They shouldn't be write protected at this
+ * stage. */
gc_assert(!page_table[i].write_protected);
/* Check whether this is the last page in this contiguous block.. */
/* Check that the page is now static. */
gc_assert(page_table[addr_page_index].dont_move != 0);
-
- return;
}
-
-#ifdef CONTROL_STACKS
-/* Scavenge the thread stack conservative roots. */
-static void
-scavenge_thread_stacks(void)
-{
- lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
- int type = TypeOf(thread_stacks);
-
- if (LowtagOf(thread_stacks) == type_OtherPointer) {
- struct vector *vector = (struct vector *) PTR(thread_stacks);
- int length, i;
- if (TypeOf(vector->header) != type_SimpleVector)
- return;
- length = fixnum_value(vector->length);
- for (i = 0; i < length; i++) {
- lispobj stack_obj = vector->data[i];
- if (LowtagOf(stack_obj) == type_OtherPointer) {
- struct vector *stack = (struct vector *) PTR(stack_obj);
- int vector_length;
- if (TypeOf(stack->header) !=
- type_SimpleArrayUnsignedByte32) {
- return;
- }
- vector_length = fixnum_value(stack->length);
- if ((gencgc_verbose > 1) && (vector_length <= 0))
- FSHOW((stderr,
- "/weird? control stack vector length %d\n",
- vector_length));
- if (vector_length > 0) {
- lispobj *stack_pointer = (lispobj*)stack->data[0];
- if ((stack_pointer < (lispobj *)CONTROL_STACK_START) ||
- (stack_pointer > (lispobj *)CONTROL_STACK_END))
- lose("invalid stack pointer %x",
- (unsigned)stack_pointer);
- if ((stack_pointer > (lispobj *)CONTROL_STACK_START) &&
- (stack_pointer < (lispobj *)CONTROL_STACK_END)) {
- /* FIXME: Ick!
- * (1) hardwired word length = 4; and as usual,
- * when fixing this, check for other places
- * with the same problem
- * (2) calling it 'length' suggests bytes;
- * perhaps 'size' instead? */
- unsigned int length = ((unsigned)CONTROL_STACK_END -
- (unsigned)stack_pointer) / 4;
- int j;
- if (length >= vector_length) {
- lose("invalid stack size %d >= vector length %d",
- length,
- vector_length);
- }
- if (gencgc_verbose > 1) {
- FSHOW((stderr,
- "scavenging %d words of control stack %d of length %d words.\n",
- length, i, vector_length));
- }
- for (j = 0; j < length; j++) {
- preserve_pointer((void *)stack->data[1+j]);
- }
- }
- }
- }
- }
- }
-}
-#endif
-
\f
/* If the given page is not write-protected, then scan it for pointers
* to younger generations or the top temp. generation, if no
* suspicious pointers are found then the page is write-protected.
*
- * Care is taken to check for pointers to the current gc_alloc region
- * if it is a younger generation or the temp. generation. This frees
- * the caller from doing a gc_alloc_update_page_tables. Actually the
- * gc_alloc_generation does not need to be checked as this is only
- * called from scavenge_generation when the gc_alloc generation is
+ * Care is taken to check for pointers to the current gc_alloc()
+ * region if it is a younger generation or the temp. generation. This
+ * frees the caller from doing a gc_alloc_update_page_tables(). Actually
+ * the gc_alloc_generation does not need to be checked as this is only
+ * called from scavenge_generation() when the gc_alloc generation is
* younger, so it just checks if there is a pointer to the current
* region.
*
- * We return 1 if the page was write-protected, else 0.
- */
+ * We return 1 if the page was write-protected, else 0. */
static int
update_page_write_prot(int page)
{
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == NUM_GENERATIONS)))
- /* Or does it point within a current gc_alloc region? */
+ /* Or does it point within a current gc_alloc() region? */
|| ((boxed_region.start_addr <= ptr)
&& (ptr <= boxed_region.free_pointer))
|| ((unboxed_region.start_addr <= ptr)
/* Now work forward until the end of this contiguous area
* is found. A small area is preferred as there is a
* better chance of its pages being write-protected. */
- for (last_page = i; ;last_page++)
+ for (last_page = i; ; last_page++)
/* Check whether this is the last page in this contiguous
* block. */
if ((page_table[last_page].bytes_used < 4096)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
- FSHOW((stderr, "/scavenge_generation %d\n", generation));
+ FSHOW((stderr, "/scavenge_generation() %d\n", generation));
FSHOW((stderr,
"/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
page_table[i].bytes_used,
page_table[i].first_object_offset,
page_table[i].dont_move));
- lose("write-protected page %d written to in scavenge_generation",
- i);
+ lose("write to protected page %d in scavenge_generation()", i);
}
}
#endif
* newspace generation.
*
* To help improve the efficiency, areas written are recorded by
- * gc_alloc and only these scavenged. Sometimes a little more will be
+ * gc_alloc() and only these scavenged. Sometimes a little more will be
* scavenged, but this causes no harm. An easy check is done that the
* scavenged bytes equals the number allocated in the previous
* scavenge.
*
* Write-protected pages could potentially be written by alloc however
* to avoid having to handle re-scavenging of write-protected pages
- * gc_alloc does not write to write-protected pages.
+ * gc_alloc() does not write to write-protected pages.
*
* New areas of objects allocated are recorded alternatively in the two
* new_areas arrays below. */
/* The scavenge will start at the first_object_offset of page i.
*
- * We need to find the full extent of this contiguous block in case
- * objects span pages.
+ * We need to find the full extent of this contiguous
+ * block in case objects span pages.
*
- * Now work forward until the end of this contiguous area is
- * found. A small area is preferred as there is a better chance
- * of its pages being write-protected. */
+ * Now work forward until the end of this contiguous area
+ * is found. A small area is preferred as there is a
+ * better chance of its pages being write-protected. */
for (last_page = i; ;last_page++) {
- /* Check whether this is the last page in this contiguous
- * block */
+ /* Check whether this is the last page in this
+ * contiguous block */
if ((page_table[last_page].bytes_used < 4096)
/* Or it is 4096 and is the last in the block */
|| (page_table[last_page+1].allocated != BOXED_PAGE)
break;
}
- /* Do a limited check for write_protected pages. If all pages
- * are write_protected then no need to scavenge. Except if the
- * pages are marked dont_move. */
+ /* Do a limited check for write-protected pages. If all
+ * pages are write-protected then no need to scavenge,
+ * except if the pages are marked dont_move. */
{
int j, all_wp = 1;
for (j = i; j <= last_page; j++)
all_wp = 0;
break;
}
-#if !SC_NS_GEN_CK
- if (all_wp == 0)
-#endif
- {
- int size;
-
- /* Calculate the size. */
- if (last_page == i)
- size = (page_table[last_page].bytes_used
- - page_table[i].first_object_offset)/4;
- else
- size = (page_table[last_page].bytes_used
- + (last_page-i)*4096
- - page_table[i].first_object_offset)/4;
-
- {
-#if SC_NS_GEN_CK
- int a1 = bytes_allocated;
-#endif
- /* FSHOW((stderr,
- "/scavenge(%x,%d)\n",
- page_address(i)
- + page_table[i].first_object_offset,
- size)); */
- new_areas_ignore_page = last_page;
+ if (!all_wp) {
+ int size;
- scavenge(page_address(i)+page_table[i].first_object_offset,size);
+ /* Calculate the size. */
+ if (last_page == i)
+ size = (page_table[last_page].bytes_used
+ - page_table[i].first_object_offset)/4;
+ else
+ size = (page_table[last_page].bytes_used
+ + (last_page-i)*4096
+ - page_table[i].first_object_offset)/4;
+
+ {
+ new_areas_ignore_page = last_page;
+
+ scavenge(page_address(i) +
+ page_table[i].first_object_offset,
+ size);
-#if SC_NS_GEN_CK
- /* Flush the alloc regions updating the tables. */
- gc_alloc_update_page_tables(0, &boxed_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
-
- if ((all_wp != 0) && (a1 != bytes_allocated)) {
- FSHOW((stderr,
- "alloc'ed over %d to %d\n",
- i, last_page));
- FSHOW((stderr,
- "/page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
- page_table[i].bytes_used,
- page_table[i].first_object_offset,
- page_table[i].dont_move,
- page_table[i].write_protected,
- page_table[i].write_protected_cleared));
- }
-#endif
- }
}
+ }
}
i = last_page;
}
}
+ FSHOW((stderr,
+ "/done with one full scan of newspace generation %d\n",
+ generation));
}
/* Do a complete scavenge of the newspace generation. */
{
int i;
- /* the new_areas array currently being written to by gc_alloc */
- struct new_area (*current_new_areas)[] = &new_areas_1;
+ /* the new_areas array currently being written to by gc_alloc() */
+ struct new_area (*current_new_areas)[] = &new_areas_1;
int current_new_areas_index;
/* the new_areas created but the previous scavenge cycle */
- struct new_area (*previous_new_areas)[] = NULL;
+ struct new_area (*previous_new_areas)[] = NULL;
int previous_new_areas_index;
-#define SC_NS_GEN_CK 0
-#if SC_NS_GEN_CK
- /* Clear the write_protected_cleared flags on all pages. */
- for (i = 0; i < NUM_PAGES; i++)
- page_table[i].write_protected_cleared = 0;
-#endif
-
/* Flush the current regions updating the tables. */
gc_alloc_update_page_tables(0, &boxed_region);
gc_alloc_update_page_tables(1, &unboxed_region);
- /* Turn on the recording of new areas by gc_alloc. */
+ /* Turn on the recording of new areas by gc_alloc(). */
new_areas = current_new_areas;
new_areas_index = 0;
else
current_new_areas = &new_areas_1;
- /* Set up for gc_alloc. */
+ /* Set up for gc_alloc(). */
new_areas = current_new_areas;
new_areas_index = 0;
/* Check whether previous_new_areas had overflowed. */
if (previous_new_areas_index >= NUM_NEW_AREAS) {
+
/* New areas of objects allocated have been lost so need to do a
* full scan to be sure! If this becomes a problem try
* increasing NUM_NEW_AREAS. */
/* Flush the current regions updating the tables. */
gc_alloc_update_page_tables(0, &boxed_region);
gc_alloc_update_page_tables(1, &unboxed_region);
+
} else {
+
/* Work through previous_new_areas. */
for (i = 0; i < previous_new_areas_index; i++) {
+ /* FIXME: All these bare *4 and /4 should be something
+ * like BYTES_PER_WORD or WBYTES. */
int page = (*previous_new_areas)[i].page;
int offset = (*previous_new_areas)[i].offset;
int size = (*previous_new_areas)[i].size / 4;
gc_assert((*previous_new_areas)[i].size % 4 == 0);
-
- /* FIXME: All these bare *4 and /4 should be something
- * like BYTES_PER_WORD or WBYTES. */
- /*FSHOW((stderr,
- "/S page %d offset %d size %d\n",
- page, offset, size*4));*/
scavenge(page_address(page)+offset, size);
}
current_new_areas_index));*/
}
- /* Turn off recording of areas allocated by gc_alloc. */
+ /* Turn off recording of areas allocated by gc_alloc(). */
record_new_objects = 0;
#if SC_NS_GEN_CK
return bytes_freed;
}
\f
+#if 0
/* Print some information about a pointer at the given address. */
static void
print_ptr(lispobj *addr)
*(addr+3),
*(addr+4));
}
+#endif
extern int undefined_tramp;
size_t count = 1;
lispobj thing = *(lispobj*)start;
- if (Pointerp(thing)) {
+ if (is_lisp_pointer(thing)) {
int page_index = find_page_index((void*)thing);
int to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
&& (page_table[page_index].bytes_used == 0))
lose ("Ptr %x @ %x sees free page.", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
- if (*((lispobj *)PTR(thing)) == 0x01) {
+ if (*((lispobj *)native_pointer(thing)) == 0x01) {
lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
}
/* Check that its not in the RO space as it would then be a
* it down a lot (so it's commented out).
*
* FIXME: Add a variable to enable this dynamically. */
- /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
+ /* if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
* lose("ptr %x to invalid object %x", thing, start); */
} else {
/* Verify that it points to another valid space. */
* the code data block. */
fheaderl = code->entry_points;
while (fheaderl != NIL) {
- fheaderp = (struct function *) PTR(fheaderl);
+ fheaderp = (struct function *) native_pointer(fheaderl);
gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
verify_space(&fheaderp->name, 1);
verify_space(&fheaderp->arglist, 1);
}
}
-/* Check the all the free space is zero filled. */
+/* Check that all the free space is zero filled. */
static void
verify_zero_fill(void)
{
}
}
-/* Garbage collect a generation. If raise is 0 the remains of the
+/* Garbage collect a generation. If raise is 0 then the remains of the
* generation are not raised to the next generation. */
static void
garbage_collect_generation(int generation, int raise)
{
unsigned long bytes_freed;
unsigned long i;
- unsigned long read_only_space_size, static_space_size;
+ unsigned long static_space_size;
gc_assert(generation <= (NUM_GENERATIONS-1));
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
* which need to be scavenged. It also helps avoid unnecessary page
- * faults as forwarding pointer are written into them. They need to
+ * faults as forwarding pointers are written into them. They need to
* be un-protected anyway before unmapping later. */
unprotect_oldspace();
/* Scavenge the stack's conservative roots. */
{
- lispobj **ptr;
- for (ptr = (lispobj **)CONTROL_STACK_END - 1;
- ptr > (lispobj **)&raise;
+ void **ptr;
+ for (ptr = (void **)CONTROL_STACK_END - 1;
+ ptr > (void **)&raise;
ptr--) {
preserve_pointer(*ptr);
}
}
-#ifdef CONTROL_STACKS
- scavenge_thread_stacks();
-#endif
+#if QSHOW
if (gencgc_verbose > 1) {
int num_dont_move_pages = count_dont_move_pages();
- FSHOW((stderr,
- "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
- num_dont_move_pages,
- /* FIXME: 4096 should be symbolic constant here and
- * prob'ly elsewhere too. */
- num_dont_move_pages * 4096));
+ fprintf(stderr,
+ "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
+ num_dont_move_pages,
+ /* FIXME: 4096 should be symbolic constant here and
+ * prob'ly elsewhere too. */
+ num_dont_move_pages * 4096);
}
+#endif
/* Scavenge all the rest of the roots. */
/* Scavenge the Lisp functions of the interrupt handlers, taking
- * care to avoid SIG_DFL, SIG_IGN. */
+ * care to avoid SIG_DFL and SIG_IGN. */
for (i = 0; i < NSIG; i++) {
union interrupt_handler handler = interrupt_handlers[i];
if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
}
/* Scavenge the binding stack. */
- scavenge( (lispobj *) BINDING_STACK_START,
+ scavenge((lispobj *) BINDING_STACK_START,
(lispobj *)SymbolValue(BINDING_STACK_POINTER) -
(lispobj *)BINDING_STACK_START);
+ /* The original CMU CL code had scavenge-read-only-space code
+ * controlled by the Lisp-level variable
+ * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
+ * wasn't documented under what circumstances it was useful or
+ * safe to turn it on, so it's been turned off in SBCL. If you
+ * want/need this functionality, and can test and document it,
+ * please submit a patch. */
+#if 0
if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
- read_only_space_size =
+ unsigned long read_only_space_size =
(lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
(lispobj*)READ_ONLY_SPACE_START;
FSHOW((stderr,
read_only_space_size * sizeof(lispobj)));
scavenge( (lispobj *) READ_ONLY_SPACE_START, read_only_space_size);
}
+#endif
+ /* Scavenge static space. */
static_space_size =
(lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) -
(lispobj *)STATIC_SPACE_START;
- if (gencgc_verbose > 1)
+ if (gencgc_verbose > 1) {
FSHOW((stderr,
"/scavenge static space: %d bytes\n",
static_space_size * sizeof(lispobj)));
+ }
scavenge( (lispobj *) STATIC_SPACE_START, static_space_size);
/* All generations but the generation being GCed need to be
* scavenged. The new_space generation needs special handling as
* objects may be moved in - it is handled separately below. */
- for (i = 0; i < NUM_GENERATIONS; i++)
- if ((i != generation) && (i != new_space))
+ for (i = 0; i < NUM_GENERATIONS; i++) {
+ if ((i != generation) && (i != new_space)) {
scavenge_generation(i);
+ }
+ }
/* Finally scavenge the new_space generation. Keep going until no
* more objects are moved into the new generation */
scavenge_newspace_generation(new_space);
+ /* FIXME: I tried reenabling this check when debugging unrelated
+ * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
+ * Since the current GC code seems to work well, I'm guessing that
+ * this debugging code is just stale, but I haven't tried to
+ * figure it out. It should be figured out and then either made to
+ * work or just deleted. */
#define RESCAN_CHECK 0
#if RESCAN_CHECK
/* As a check re-scavenge the newspace once; no new objects should
++generations[generation].num_gc;
}
-/* Update last_free_page then ALLOCATION_POINTER */
+/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
int
update_x86_dynamic_space_free_pointer(void)
{
if (gencgc_verbose > 1) {
FSHOW((stderr,
- "Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
+ "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
gen,
raise,
generations[gen].bytes_allocated,
generations[gen].num_gc));
}
- /* If an older generation is being filled then update its memory
- * age. */
+ /* If an older generation is being filled, then update its
+ * memory age. */
if (raise == 1) {
generations[gen+1].cum_sum_bytes_allocated +=
generations[gen+1].bytes_allocated;
write_protect_generation_pages(gen_to_wp);
}
- /* Set gc_alloc back to generation 0. The current regions should
- * be flushed after the above GCs */
+ /* Set gc_alloc() back to generation 0. The current regions should
+ * be flushed after the above GCs. */
gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
gc_alloc_generation = 0;
update_x86_dynamic_space_free_pointer();
- /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so we
- * needn't do it here: */
+ /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so
+ * we needn't do it here: */
/* zero_stack();*/
current_region_free_pointer = boxed_region.free_pointer;
/* This is called by Lisp PURIFY when it is finished. All live objects
* will have been moved to the RO and Static heaps. The dynamic space
* will need a full re-initialization. We don't bother having Lisp
- * PURIFY flush the current gc_alloc region, as the page_tables are
+ * PURIFY flush the current gc_alloc() region, as the page_tables are
* re-initialized, and every page is zeroed to be sure. */
void
gc_free_heap(void)
if (gencgc_verbose > 1)
print_generation_stats(0);
- /* Initialize gc_alloc */
+ /* Initialize gc_alloc(). */
gc_alloc_generation = 0;
boxed_region.first_page = 0;
boxed_region.last_page = -1;
boxed_region.start_addr = page_address(0);
boxed_region.free_pointer = page_address(0);
boxed_region.end_addr = page_address(0);
-
unboxed_region.first_page = 0;
unboxed_region.last_page = -1;
unboxed_region.start_addr = page_address(0);
bytes_allocated = 0;
- /* Initialize the generations. */
+ /* Initialize the generations.
+ *
+ * FIXME: very similar to code in gc_free_heap(), should be shared */
for (i = 0; i < NUM_GENERATIONS; i++) {
generations[i].alloc_start_page = 0;
generations[i].alloc_unboxed_start_page = 0;
generations[i].min_av_mem_age = 0.75;
}
- /* Initialize gc_alloc. */
+ /* Initialize gc_alloc.
+ *
+ * FIXME: identical with code in gc_free_heap(), should be shared */
gc_alloc_generation = 0;
boxed_region.first_page = 0;
boxed_region.last_page = -1;
boxed_region.start_addr = page_address(0);
boxed_region.free_pointer = page_address(0);
boxed_region.end_addr = page_address(0);
-
unboxed_region.first_page = 0;
unboxed_region.last_page = -1;
unboxed_region.start_addr = page_address(0);
SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
goto retry1;
}
- /* Call gc_alloc. */
+ /* Call gc_alloc(). */
boxed_region.free_pointer = current_region_free_pointer;
{
void *new_obj = gc_alloc(nbytes);
retry2:
/* At least wrap this allocation in a pseudo atomic to prevent
- * gc_alloc from being re-entered. */
+ * gc_alloc() from being re-entered. */
SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
goto retry2;
}
- /* Else call gc_alloc. */
+ /* Else call gc_alloc(). */
boxed_region.free_pointer = current_region_free_pointer;
result = gc_alloc(nbytes);
current_region_free_pointer = boxed_region.free_pointer;
alloc_entered--;
SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
- /* Handle any interrupts that occurred during
- * gc_alloc(..). */
+ /* Handle any interrupts that occurred during gc_alloc(..). */
do_pending_interrupt();
goto retry2;
}
* catch GENCGC-related write-protect violations
*/
+void unhandled_sigmemoryfault(void);
+
/* Depending on which OS we're running under, different signals might
* be raised for a violation of write protection in the heap. This
* function factors out the common generational GC magic which needs
/* Check whether the fault is within the dynamic space. */
if (page_index == (-1)) {
+ /* It can be helpful to be able to put a breakpoint on this
+ * case to help diagnose low-level problems. */
+ unhandled_sigmemoryfault();
+
/* not within the dynamic space -- not our responsibility */
return 0;
return 1;
}
}
+
+/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
+ * it's not just a case of the program hitting the write barrier, and
+ * are about to let Lisp deal with it. It's basically just a
+ * convenient place to set a gdb breakpoint. */
+void
+unhandled_sigmemoryfault()
+{}