/*
- * GENerational Conservative Garbage Collector for SBCL x86
+ * GENerational Conservative Garbage Collector for SBCL
*/
/*
* <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
*/
+#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <errno.h>
#include "validate.h"
#include "lispregs.h"
#include "arch.h"
-#include "fixnump.h"
#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
+#include "pseudo-atomic.h"
+#include "alloc.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
+#include "genesis/fdefn.h"
#include "genesis/simple-fun.h"
+#include "save.h"
#include "genesis/hash-table.h"
+#include "genesis/instance.h"
+#include "genesis/layout.h"
+#include "gencgc.h"
+#if defined(LUTEX_WIDETAG)
+#include "pthread-lutex.h"
+#endif
/* forward declarations */
-long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed);
-static void gencgc_pickup_dynamic(void);
+page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
+ int page_type_flag);
\f
/*
* GC parameters
*/
-/* the number of actual generations. (The number of 'struct
- * generation' objects is one more than this, because one object
- * serves as scratch when GC'ing.) */
-#define NUM_GENERATIONS 6
+/* Generations 0-5 are normal collected generations, 6 is only used as
+ * scratch space by the collector, and should never get collected.
+ */
+enum {
+ SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
+ NUM_GENERATIONS
+};
/* Should we use page protection to help avoid the scavenging of pages
* that don't have pointers to younger generations? */
boolean enable_page_protection = 1;
-/* Should we unmap a page and re-mmap it to have it zero filled? */
-#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun)
-/* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
- * so don't unmap there.
- *
- * The CMU CL comment didn't specify a version, but was probably an
- * old version of FreeBSD (pre-4.0), so this might no longer be true.
- * OTOH, if it is true, this behavior might exist on OpenBSD too, so
- * for now we don't unmap there either. -- WHN 2001-04-07 */
-/* Apparently this flag is required to be 0 for SunOS/x86, as there
- * are reports of heap corruption otherwise. */
-boolean gencgc_unmap_zero = 0;
-#else
-boolean gencgc_unmap_zero = 1;
-#endif
-
/* the minimum size (in bytes) for a large object*/
-unsigned large_object_size = 4 * PAGE_BYTES;
+long large_object_size = 4 * PAGE_BYTES;
\f
/*
* debugging
*/
-
-
/* the verbosity level. All non-error messages are disabled at level 0;
* and only a few rare messages are printed at level 1. */
-#ifdef QSHOW
-unsigned gencgc_verbose = 1;
+#if QSHOW
+boolean gencgc_verbose = 1;
#else
-unsigned gencgc_verbose = 0;
+boolean gencgc_verbose = 0;
#endif
/* FIXME: At some point enable the various error-checking things below
* and see what they say. */
/* We hunt for pointers to old-space, when GCing generations >= verify_gen.
- * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
-int verify_gens = NUM_GENERATIONS;
+ * Set verify_gens to HIGHEST_NORMAL_GENERATION + 1 to disable this kind of
+ * check. */
+generation_index_t verify_gens = HIGHEST_NORMAL_GENERATION + 1;
/* Should we do a pre-scan verify of generation 0 before it's GCed? */
boolean pre_verify_gen_0 = 0;
/* Should we check that free pages are zero filled during gc_free_heap
* called after Lisp PURIFY? */
boolean gencgc_zero_check_during_free_heap = 0;
+
+/* When loading a core, don't do a full scan of the memory for the
+ * memory region boundaries. (Set to true by coreparse.c if the core
+ * contained a pagetable entry).
+ */
+boolean gencgc_partial_pickup = 0;
+
+/* If defined, free pages are read-protected to ensure that nothing
+ * accesses them.
+ */
+
+/* #define READ_PROTECT_FREE_PAGES */
+
\f
/*
* GC structures and variables
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
* scavenging. */
-long from_space;
-long new_space;
+generation_index_t from_space;
+generation_index_t new_space;
+
+/* Set to 1 when in GC */
+boolean gc_active_p = 0;
+/* should the GC be conservative on stack. If false (only right before
+ * saving a core), don't scan the stack / mark pages dont_move. */
+static boolean conservative_stack = 1;
-/* An array of page structures is statically allocated.
+/* An array of page structures is allocated on gc initialization.
* This helps quickly map between an address its page structure.
- * NUM_PAGES is set from the size of the dynamic space. */
-struct page page_table[NUM_PAGES];
+ * page_table_pages is set from the size of the dynamic space. */
+page_index_t page_table_pages;
+struct page *page_table;
+
+static inline boolean page_allocated_p(page_index_t page) {
+ return (page_table[page].allocated != FREE_PAGE_FLAG);
+}
+
+static inline boolean page_no_region_p(page_index_t page) {
+ return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG);
+}
+
+static inline boolean page_allocated_no_region_p(page_index_t page) {
+ return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG))
+ && page_no_region_p(page));
+}
+
+static inline boolean page_free_p(page_index_t page) {
+ return (page_table[page].allocated == FREE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_p(page_index_t page) {
+ return (page_table[page].allocated & BOXED_PAGE_FLAG);
+}
+
+static inline boolean code_page_p(page_index_t page) {
+ return (page_table[page].allocated & CODE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_no_region_p(page_index_t page) {
+ return page_boxed_p(page) && page_no_region_p(page);
+}
+
+static inline boolean page_unboxed_p(page_index_t page) {
+ /* Both flags set == boxed code page */
+ return ((page_table[page].allocated & UNBOXED_PAGE_FLAG)
+ && !page_boxed_p(page));
+}
+
+static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
+ return (page_boxed_no_region_p(page)
+ && (page_table[page].bytes_used != 0)
+ && !page_table[page].dont_move
+ && (page_table[page].gen == generation));
+}
/* To map addresses to page structures the address of the first page
* is needed. */
static void *heap_base = NULL;
-#if N_WORD_BITS == 32
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
-#elif N_WORD_BITS == 64
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
-#endif
-
/* Calculate the start address for the given page number. */
inline void *
-page_address(long page_num)
+page_address(page_index_t page_num)
{
return (heap_base + (page_num * PAGE_BYTES));
}
+/* Calculate the address where the allocation region associated with
+ * the page starts. */
+static inline void *
+page_region_start(page_index_t page_index)
+{
+ return page_address(page_index)-page_table[page_index].region_start_offset;
+}
+
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
-inline long
+inline page_index_t
find_page_index(void *addr)
{
- long index = addr-heap_base;
-
- if (index >= 0) {
- index = ((unsigned long)index)/PAGE_BYTES;
- if (index < NUM_PAGES)
+ if (addr >= heap_base) {
+ page_index_t index = ((pointer_sized_uint_t)addr -
+ (pointer_sized_uint_t)heap_base) / PAGE_BYTES;
+ if (index < page_table_pages)
return (index);
}
-
return (-1);
}
-/* a structure to hold the state of a generation */
+static size_t
+npage_bytes(long npages)
+{
+ gc_assert(npages>=0);
+ return ((unsigned long)npages)*PAGE_BYTES;
+}
+
+/* Check that X is a higher address than Y and return offset from Y to
+ * X in bytes. */
+static inline
+size_t void_diff(void *x, void *y)
+{
+ gc_assert(x >= y);
+ return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
+}
+
+/* a structure to hold the state of a generation
+ *
+ * CAUTION: If you modify this, make sure to touch up the alien
+ * definition in src/code/gc.lisp accordingly. ...or better yes,
+ * deal with the FIXME there...
+ */
struct generation {
/* the first page that gc_alloc() checks on its next call */
- long alloc_start_page;
+ page_index_t alloc_start_page;
/* the first page that gc_alloc_unboxed() checks on its next call */
- long alloc_unboxed_start_page;
+ page_index_t alloc_unboxed_start_page;
/* the first page that gc_alloc_large (boxed) considers on its next
* call. (Although it always allocates after the boxed_region.) */
- long alloc_large_start_page;
+ page_index_t alloc_large_start_page;
/* the first page that gc_alloc_large (unboxed) considers on its
* next call. (Although it always allocates after the
* current_unboxed_region.) */
- long alloc_large_unboxed_start_page;
+ page_index_t alloc_large_unboxed_start_page;
/* the bytes allocated to this generation */
- long bytes_allocated;
+ unsigned long bytes_allocated;
/* the number of bytes at which to trigger a GC */
- long gc_trigger;
+ unsigned long gc_trigger;
/* to calculate a new level for gc_trigger */
- long bytes_consed_between_gc;
+ unsigned long bytes_consed_between_gc;
/* the number of GCs since the last raise */
int num_gc;
- /* the average age after which a GC will raise objects to the
+ /* the number of GCs to run on the generations before raising objects to the
* next generation */
- int trigger_age;
+ int number_of_gcs_before_promotion;
/* the cumulative sum of the bytes allocated to this generation. It is
* cleared after a GC on this generations, and update before new
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC. */
- long cum_sum_bytes_allocated;
+ unsigned long cum_sum_bytes_allocated;
/* a minimum average memory age before a GC will occur helps
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
- double min_av_mem_age;
+ double minimum_age_before_gc;
+
+ /* A linked list of lutex structures in this generation, used for
+ * implementing lutex finalization. */
+#ifdef LUTEX_WIDETAG
+ struct lutex *lutexes;
+#else
+ void *lutexes;
+#endif
};
-/* the number of actual generations. (The number of 'struct
- * generation' objects is one more than this, because one object
- * serves as scratch when GC'ing.) */
-#define NUM_GENERATIONS 6
/* an array of generation structures. There needs to be one more
* generation structure than actual generations as the oldest
* generation is temporarily raised then lowered. */
-struct generation generations[NUM_GENERATIONS+1];
+struct generation generations[NUM_GENERATIONS];
/* the oldest generation that is will currently be GCed by default.
- * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
+ * Valid values are: 0, 1, ... HIGHEST_NORMAL_GENERATION
*
- * The default of (NUM_GENERATIONS-1) enables GC on all generations.
+ * The default of HIGHEST_NORMAL_GENERATION enables GC on all generations.
*
* Setting this to 0 effectively disables the generational nature of
* the GC. In some applications generational GC may not be useful
* An intermediate value could be handy after moving long-lived data
* into an older generation so an unnecessary GC of this long-lived
* data can be avoided. */
-unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
+generation_index_t gencgc_oldest_gen_to_gc = HIGHEST_NORMAL_GENERATION;
/* The maximum free page in the heap is maintained and used to update
* ALLOCATION_POINTER which is used by the room function to limit its
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the Lisp code. */
-static long last_free_page;
+page_index_t last_free_page;
\f
+#ifdef LISP_FEATURE_SB_THREAD
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
* majority of GC is single-threaded, but alloc() may be called from
* >1 thread at a time and must be thread-safe. This lock must be
* seized before all accesses to generations[] or to parts of
* page_table[] that other threads may want to see */
-
-#ifdef LISP_FEATURE_SB_THREAD
static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
+/* This lock is used to protect non-thread-local allocation. */
+static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
\f
/* Count the number of pages which are write-protected within the
* given generation. */
static long
-count_write_protect_generation_pages(int generation)
+count_write_protect_generation_pages(generation_index_t generation)
{
- long i;
- long count = 0;
+ page_index_t i;
+ unsigned long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected == 1))
count++;
/* Count the number of pages within the given generation. */
static long
-count_generation_pages(int generation)
+count_generation_pages(generation_index_t generation)
{
- long i;
+ page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != 0)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation))
count++;
return count;
}
-#ifdef QSHOW
+#if QSHOW
static long
count_dont_move_pages(void)
{
- long i;
+ page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
+ if (page_allocated_p(i)
+ && (page_table[i].dont_move != 0)) {
++count;
}
}
/* Work through the pages and add up the number of bytes used for the
* given generation. */
-static long
-count_generation_bytes_allocated (int gen)
+static unsigned long
+count_generation_bytes_allocated (generation_index_t gen)
{
- long i;
- long result = 0;
+ page_index_t i;
+ unsigned long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
+ if (page_allocated_p(i)
+ && (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
return result;
}
/* Return the average age of the memory in a generation. */
-static double
-gen_av_mem_age(int gen)
+extern double
+generation_average_age(generation_index_t gen)
{
if (generations[gen].bytes_allocated == 0)
return 0.0;
/ ((double)generations[gen].bytes_allocated);
}
-void fpu_save(int *); /* defined in x86-assem.S */
-void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
-static void
-print_generation_stats(int verbose) /* FIXME: should take FILE argument */
+extern void
+print_generation_stats() /* FIXME: should take FILE argument, or construct a string */
{
- int i, gens;
- int fpu_state[27];
+ generation_index_t i;
+
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+#define FPU_STATE_SIZE 27
+ int fpu_state[FPU_STATE_SIZE];
+#elif defined(LISP_FEATURE_PPC)
+#define FPU_STATE_SIZE 32
+ long long fpu_state[FPU_STATE_SIZE];
+#endif
/* This code uses the FP instructions which may be set up for Lisp
* so they need to be saved and reset for C. */
fpu_save(fpu_state);
- /* number of generations to print */
- if (verbose)
- gens = NUM_GENERATIONS+1;
- else
- gens = NUM_GENERATIONS;
-
/* Print the heap stats. */
fprintf(stderr,
- " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
+ " Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
- for (i = 0; i < gens; i++) {
- int j;
- int boxed_cnt = 0;
- int unboxed_cnt = 0;
- int large_boxed_cnt = 0;
- int large_unboxed_cnt = 0;
- int pinned_cnt=0;
+ for (i = 0; i < SCRATCH_GENERATION; i++) {
+ page_index_t j;
+ long boxed_cnt = 0;
+ long unboxed_cnt = 0;
+ long large_boxed_cnt = 0;
+ long large_unboxed_cnt = 0;
+ long pinned_cnt=0;
for (j = 0; j < last_free_page; j++)
if (page_table[j].gen == i) {
/* Count the number of boxed pages within the given
* generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_boxed_p(j)) {
if (page_table[j].large_object)
large_boxed_cnt++;
else
if(page_table[j].dont_move) pinned_cnt++;
/* Count the number of unboxed pages within the given
* generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_unboxed_p(j)) {
if (page_table[j].large_object)
large_unboxed_cnt++;
else
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
fprintf(stderr,
- " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ generations[i].alloc_start_page,
+ generations[i].alloc_unboxed_start_page,
+ generations[i].alloc_large_start_page,
+ generations[i].alloc_large_unboxed_start_page,
+ boxed_cnt,
+ unboxed_cnt,
+ large_boxed_cnt,
+ large_unboxed_cnt,
pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
+ (npage_bytes(count_generation_pages(i))
- generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
- gen_av_mem_age(i));
+ generation_average_age(i));
}
- fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated);
+ fprintf(stderr," Total bytes allocated = %lu\n", bytes_allocated);
+ fprintf(stderr," Dynamic-space-size bytes = %u\n", dynamic_space_size);
fpu_restore(fpu_state);
}
\f
-/*
- * allocation routines
+
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+void fast_bzero(void*, size_t); /* in <arch>-assem.S */
+#endif
+
+/* Zero the pages from START to END (inclusive), but use mmap/munmap instead
+ * if zeroing it ourselves, i.e. in practice give the memory back to the
+ * OS. Generally done after a large GC.
+ */
+void zero_pages_with_mmap(page_index_t start, page_index_t end) {
+ int i;
+ void *addr = page_address(start), *new_addr;
+ size_t length = npage_bytes(1+end-start);
+
+ if (start > end)
+ return;
+
+ os_invalidate(addr, length);
+ new_addr = os_validate(addr, length);
+ if (new_addr == NULL || new_addr != addr) {
+ lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
+ start, new_addr);
+ }
+
+ for (i = start; i <= end; i++) {
+ page_table[i].need_to_zero = 0;
+ }
+}
+
+/* Zero the pages from START to END (inclusive). Generally done just after
+ * a new region has been allocated.
+ */
+static void
+zero_pages(page_index_t start, page_index_t end) {
+ if (start > end)
+ return;
+
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ fast_bzero(page_address(start), npage_bytes(1+end-start));
+#else
+ bzero(page_address(start), npage_bytes(1+end-start));
+#endif
+
+}
+
+/* Zero the pages from START to END (inclusive), except for those
+ * pages that are known to already zeroed. Mark all pages in the
+ * ranges as non-zeroed.
*/
+static void
+zero_dirty_pages(page_index_t start, page_index_t end) {
+ page_index_t i;
+
+ for (i = start; i <= end; i++) {
+ if (page_table[i].need_to_zero == 1) {
+ zero_pages(start, end);
+ break;
+ }
+ }
+
+ for (i = start; i <= end; i++) {
+ page_table[i].need_to_zero = 1;
+ }
+}
+
/*
* To support quick and inline allocation, regions of memory can be
struct alloc_region unboxed_region;
/* The generation currently being allocated to. */
-static int gc_alloc_generation;
+static generation_index_t gc_alloc_generation;
+
+static inline page_index_t
+generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
+{
+ if (large) {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_large_unboxed_start_page;
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
+ return generations[generation].alloc_large_start_page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ } else {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_unboxed_start_page;
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
+ return generations[generation].alloc_start_page;
+ } else {
+ lose("bad page_type_flag: %d", page_type_flag);
+ }
+ }
+}
+
+static inline void
+set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
+ page_index_t page)
+{
+ if (large) {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_large_unboxed_start_page = page;
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
+ generations[generation].alloc_large_start_page = page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ } else {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_unboxed_start_page = page;
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
+ generations[generation].alloc_start_page = page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ }
+}
/* Find a new region with room for at least the given number of bytes.
*
* are allocated, although they will initially be empty.
*/
static void
-gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
- long first_page;
- long last_page;
- long bytes_found;
- long i;
+ page_index_t first_page;
+ page_index_t last_page;
+ unsigned long bytes_found;
+ page_index_t i;
+ int ret;
/*
FSHOW((stderr,
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- thread_mutex_lock(&free_pages_lock);
- if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_unboxed_start_page;
- } else {
- first_page =
- generations[gc_alloc_generation].alloc_start_page;
- }
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
+ first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
+ last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
+ + npage_bytes(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
/* The first page may have already been in use. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
- page_table[first_page].first_object_offset = 0;
+ page_table[first_page].region_start_offset = 0;
}
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == page_type_flag);
page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
- if (unboxed)
- page_table[i].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[i].allocated = BOXED_PAGE_FLAG;
+ page_table[i].allocated = page_type_flag;
page_table[i].gen = gc_alloc_generation;
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* broken before!) */
- page_table[i].first_object_offset =
- alloc_region->start_addr - page_address(i);
+ page_table[i].region_start_offset =
+ void_diff(page_address(i),alloc_region->start_addr);
page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
}
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
- 0);
+ /* do we only want to call this on special occasions? like for
+ * boxed_region? */
+ set_alloc_pointer((lispobj)page_address(last_free_page));
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
+
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(first_page),
+ npage_bytes(1+last_page-first_page),
+ OS_VM_PROT_ALL);
+#endif
+
+ /* If the first page was only partial, don't check whether it's
+ * zeroed (it won't be) and don't zero it (since the parts that
+ * we're interested in are guaranteed to be zeroed).
+ */
+ if (page_table[first_page].bytes_used) {
+ first_page++;
+ }
+
+ zero_dirty_pages(first_page, last_page);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
* (long) in code like this, so that it is less likely to
* break randomly when running on a machine with different
* word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.", p);
+ lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ p, alloc_region->start_addr, alloc_region->end_addr);
}
+ }
}
}
-}
-
/* If the record_new_objects flag is 2 then all new regions created
* are recorded.
*
* scavenge of a generation. */
#define NUM_NEW_AREAS 512
static int record_new_objects = 0;
-static long new_areas_ignore_page;
+static page_index_t new_areas_ignore_page;
struct new_area {
- long page;
- long offset;
- long size;
+ page_index_t page;
+ size_t offset;
+ size_t size;
};
static struct new_area (*new_areas)[];
static long new_areas_index;
/* Add a new area to new_areas. */
static void
-add_new_area(long first_page, long offset, long size)
+add_new_area(page_index_t first_page, size_t offset, size_t size)
{
- unsigned new_area_start,c;
+ unsigned long new_area_start,c;
long i;
/* Ignore if full. */
gc_abort();
}
- new_area_start = PAGE_BYTES*first_page + offset;
+ new_area_start = npage_bytes(first_page) + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
- unsigned area_end =
- PAGE_BYTES*((*new_areas)[i].page)
+ unsigned long area_end =
+ npage_bytes((*new_areas)[i].page)
+ (*new_areas)[i].offset
+ (*new_areas)[i].size;
/*FSHOW((stderr,
* it is safe to try to re-update the page table of this reset
* alloc_region. */
void
-gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
+gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
{
- long more;
- long first_page;
- long next_page;
- long bytes_used;
- long orig_first_page_bytes_used;
- long region_size;
- long byte_cnt;
+ int more;
+ page_index_t first_page;
+ page_index_t next_page;
+ unsigned long bytes_used;
+ unsigned long orig_first_page_bytes_used;
+ unsigned long region_size;
+ unsigned long byte_cnt;
+ int ret;
first_page = alloc_region->first_page;
next_page = first_page+1;
- thread_mutex_lock(&free_pages_lock);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (alloc_region->free_pointer != alloc_region->start_addr) {
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+ gc_assert(alloc_region->start_addr ==
+ (page_address(first_page)
+ + page_table[first_page].bytes_used));
/* All the pages used need to be updated */
/* Update the first page. */
/* If the page was free then set up the gen, and
- * first_object_offset. */
+ * region_start_offset. */
if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated & page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ if ((bytes_used = void_diff(alloc_region->free_pointer,
+ page_address(first_page)))
+ >PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
byte_cnt += bytes_used;
- /* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and set
- * the bytes_used. */
+ /* All the rest of the pages should be free. We need to set
+ * their region_start_offset pointer to the start of the
+ * region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].allocated & page_type_flag);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
- gc_assert(page_table[next_page].first_object_offset ==
- alloc_region->start_addr - page_address(next_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ void_diff(page_address(next_page),
+ alloc_region->start_addr));
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>PAGE_BYTES) {
+ if ((bytes_used = void_diff(alloc_region->free_pointer,
+ page_address(next_page)))>PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
next_page++;
}
- region_size = alloc_region->free_pointer - alloc_region->start_addr;
+ region_size = void_diff(alloc_region->free_pointer,
+ alloc_region->start_addr);
bytes_allocated += region_size;
generations[gc_alloc_generation].bytes_allocated += region_size;
/* Set the generations alloc restart page to the last page of
* the region. */
- if (unboxed)
- generations[gc_alloc_generation].alloc_unboxed_start_page =
- next_page-1;
- else
- generations[gc_alloc_generation].alloc_start_page = next_page-1;
+ set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
/* Add the region to the new_areas if requested. */
- if (!unboxed)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used, region_size);
/*
page_table[next_page].allocated = FREE_PAGE_FLAG;
next_page++;
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
+
/* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
/* Allocate a possibly large object. */
void *
-gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
- long first_page;
- long last_page;
- long orig_first_page_bytes_used;
+ page_index_t first_page;
+ page_index_t last_page;
+ int orig_first_page_bytes_used;
long byte_cnt;
- long more;
- long bytes_used;
- long next_page;
+ int more;
+ unsigned long bytes_used;
+ page_index_t next_page;
+ int ret;
- thread_mutex_lock(&free_pages_lock);
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
- if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_large_unboxed_start_page;
- } else {
- first_page = generations[gc_alloc_generation].alloc_large_start_page;
- }
+ first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
if (first_page <= alloc_region->last_page) {
first_page = alloc_region->last_page+1;
}
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+ last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
gc_assert(first_page > alloc_region->last_page);
- if (unboxed)
- generations[gc_alloc_generation].alloc_large_unboxed_start_page =
- last_page;
- else
- generations[gc_alloc_generation].alloc_large_start_page = last_page;
+
+ set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
/* Set up the pages. */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* If the first page was free then set up the gen, and
- * first_object_offset. */
+ * region_start_offset. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
- page_table[first_page].first_object_offset = 0;
+ page_table[first_page].region_start_offset = 0;
page_table[first_page].large_object = 1;
}
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 1);
next_page = first_page+1;
/* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and
- * set the bytes_used. */
+ * region_start_offset pointer to the start of the region, and set
+ * the bytes_used. */
while (more) {
- gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_free_p(next_page));
gc_assert(page_table[next_page].bytes_used == 0);
- if (unboxed)
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[next_page].allocated = BOXED_PAGE_FLAG;
+ page_table[next_page].allocated = page_type_flag;
page_table[next_page].gen = gc_alloc_generation;
page_table[next_page].large_object = 1;
- page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
+ page_table[next_page].region_start_offset =
+ npage_bytes(next_page-first_page) - orig_first_page_bytes_used;
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
+ if (bytes_used > PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
generations[gc_alloc_generation].bytes_allocated += nbytes;
/* Add the region to the new_areas if requested. */
- if (!unboxed)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used,nbytes);
/* Bump up last_free_page */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
+ set_alloc_pointer((lispobj)(page_address(last_free_page)));
}
- thread_mutex_unlock(&free_pages_lock);
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
+
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(first_page),
+ npage_bytes(1+last_page-first_page),
+ OS_VM_PROT_ALL);
+#endif
- return((void *)(page_address(first_page)+orig_first_page_bytes_used));
+ zero_dirty_pages(first_page, last_page);
+
+ return page_address(first_page);
}
-long
-gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed)
+static page_index_t gencgc_alloc_start_page = -1;
+
+void
+gc_heap_exhausted_error_or_lose (long available, long requested)
+{
+ struct thread *thread = arch_os_get_current_thread();
+ /* Write basic information before doing anything else: if we don't
+ * call to lisp this is a must, and even if we do there is always
+ * the danger that we bounce back here before the error has been
+ * handled, or indeed even printed.
+ */
+ fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
+ gc_active_p ? "garbage collection" : "allocation",
+ available, requested);
+ if (gc_active_p || (available == 0)) {
+ /* If we are in GC, or totally out of memory there is no way
+ * to sanely transfer control to the lisp-side of things.
+ */
+ print_generation_stats();
+ fprintf(stderr, "GC control variables:\n");
+ fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
+ SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
+ (SymbolValue(GC_PENDING, thread) == T) ?
+ "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
+ "false" : "in progress"));
+#ifdef LISP_FEATURE_SB_THREAD
+ fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n",
+ SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
+#endif
+ lose("Heap exhausted, game over.");
+ }
+ else {
+ /* FIXME: assert free_pages_lock held */
+ (void)thread_mutex_unlock(&free_pages_lock);
+ gc_assert(get_pseudo_atomic_atomic(thread));
+ clear_pseudo_atomic_atomic(thread);
+ if (get_pseudo_atomic_interrupted(thread))
+ do_pending_interrupt();
+ /* Another issue is that signalling HEAP-EXHAUSTED error leads
+ * to running user code at arbitrary places, even in a
+ * WITHOUT-INTERRUPTS which may lead to a deadlock without
+ * running out of the heap. So at this point all bets are
+ * off. */
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ corruption_warning_and_maybe_lose
+ ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
+ funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
+ alloc_number(available), alloc_number(requested));
+ lose("HEAP-EXHAUSTED-ERROR fell through");
+ }
+}
+
+page_index_t
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
+ int page_type_flag)
{
- long first_page;
- long last_page;
- long region_size;
- long restart_page=*restart_page_ptr;
- long bytes_found;
- long num_pages;
- long large_p=(nbytes>=large_object_size);
+ page_index_t first_page, last_page;
+ page_index_t restart_page = *restart_page_ptr;
+ long bytes_found = 0;
+ long most_bytes_found = 0;
/* FIXME: assert(free_pages_lock is held); */
- /* Search for a contiguous free space of at least nbytes. If it's
- * a large object then align it on a page boundary by searching
- * for a free page. */
+ /* Toggled by gc_and_save for heap compaction, normally -1. */
+ if (gencgc_alloc_start_page != -1) {
+ restart_page = gencgc_alloc_start_page;
+ }
- do {
- first_page = restart_page;
- if (large_p)
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < NUM_PAGES) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
+ gc_assert(nbytes>=0);
+ if (((unsigned long)nbytes)>=PAGE_BYTES) {
+ /* Search for a contiguous free space of at least nbytes,
+ * aligned on a page boundary. The page-alignment is strictly
+ * speaking needed only for objects at least large_object_size
+ * bytes in size. */
+ do {
+ first_page = restart_page;
+ while ((first_page < page_table_pages) &&
+ page_allocated_p(first_page))
first_page++;
+
+ last_page = first_page;
+ bytes_found = PAGE_BYTES;
+ while ((bytes_found < nbytes) &&
+ (last_page < (page_table_pages-1)) &&
+ page_free_p(last_page+1)) {
+ last_page++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(0 == page_table[last_page].bytes_used);
+ gc_assert(0 == page_table[last_page].write_protected);
}
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ restart_page = last_page + 1;
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
+ } else {
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism. */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ if (page_free_p(first_page))
+ {
+ gc_assert(0 == page_table[first_page].bytes_used);
+ bytes_found = PAGE_BYTES;
+ break;
+ }
+ else if ((page_table[first_page].allocated == page_type_flag) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0))
+ {
+ bytes_found = PAGE_BYTES
+ - page_table[first_page].bytes_used;
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ if (bytes_found >= nbytes)
+ break;
+ }
+ first_page++;
}
-
- gc_assert(page_table[first_page].write_protected == 0);
-
last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
- } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
+ restart_page = first_page + 1;
+ }
/* Check for a failure */
- if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
+ if (bytes_found < nbytes) {
+ gc_assert(restart_page >= page_table_pages);
+ gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
}
- *restart_page_ptr=first_page;
+
+ gc_assert(page_table[first_page].write_protected == 0);
+
+ *restart_page_ptr = first_page;
return last_page;
}
* functions will eventually call this */
void *
-gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
+gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region,
int quick_p)
{
void *new_free_pointer;
- if(nbytes>=large_object_size)
- return gc_alloc_large(nbytes,unboxed_p,my_region);
+ if (nbytes>=large_object_size)
+ return gc_alloc_large(nbytes, page_type_flag, my_region);
/* Check whether there is room in the current alloc region. */
new_free_pointer = my_region->free_pointer + nbytes;
/* Unless a `quick' alloc was requested, check whether the
alloc region is almost empty. */
if (!quick_p &&
- (my_region->end_addr - my_region->free_pointer) <= 32) {
+ void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
/* If so, finished with the current region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
+ gc_alloc_update_page_tables(page_type_flag, my_region);
/* Set up a new region. */
- gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
+ gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
}
return((void *)new_obj);
/* Else not enough free space in the current region: retry with a
* new region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
- gc_alloc_new_region(nbytes, unboxed_p, my_region);
- return gc_alloc_with_region(nbytes,unboxed_p,my_region,0);
+ gc_alloc_update_page_tables(page_type_flag, my_region);
+ gc_alloc_new_region(nbytes, page_type_flag, my_region);
+ return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
}
/* these are only used during GC: all allocation from the mutator calls
* alloc() -> gc_alloc_with_region() with the appropriate per-thread
* region */
-void *
-gc_general_alloc(long nbytes,int unboxed_p,int quick_p)
-{
- struct alloc_region *my_region =
- unboxed_p ? &unboxed_region : &boxed_region;
- return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
-}
-
static inline void *
gc_quick_alloc(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK);
}
static inline void *
gc_quick_alloc_large(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
}
static inline void *
gc_alloc_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,0);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
}
static inline void *
gc_quick_alloc_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
static inline void *
gc_quick_alloc_large_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
\f
-/*
- * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
- */
-
-extern long (*scavtab[256])(lispobj *where, lispobj object);
-extern lispobj (*transother[256])(lispobj object);
-extern long (*sizetab[256])(lispobj *where);
/* Copy a large boxed object. If the object is in a large object
* region then it is simply promoted, else it is copied. If it's large
{
int tag;
lispobj *new;
- long first_page;
+ page_index_t first_page;
gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
/* Promote the object. */
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long remaining_bytes;
+ page_index_t next_page;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
/* Note: Any page write-protection must be removed, else a
* later scavenge_newspace may incorrectly not scavenge these
* new areas, but let's do it for them all (they'll probably
* be written anyway?). */
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ /* Should have been unprotected by unprotect_oldspace(). */
+ gc_assert(page_table[next_page].write_protected == 0);
page_table[next_page].gen = new_space;
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
remaining_bytes -= PAGE_BYTES;
next_page++;
}
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_boxed_p(next_page) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* Checks out OK, free the page. Don't need to bother zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected as they
next_page++;
}
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
- bytes_freed;
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+ + bytes_freed;
generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
bytes_allocated -= bytes_freed;
{
int tag;
lispobj *new;
- long first_page;
+ page_index_t first_page;
gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
- if ((nwords > 1024*1024) && gencgc_verbose)
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
+ if ((nwords > 1024*1024) && gencgc_verbose) {
+ FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
+ nwords*N_WORD_BYTES));
+ }
/* Check whether it's a large object. */
first_page = find_page_index((void *)object);
/* Promote the object. Note: Unboxed objects may have been
* allocated to a BOXED region so it may be necessary to
* change the region to UNBOXED. */
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long remaining_bytes;
+ page_index_t next_page;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].gen = new_space;
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write-protected, even if
next_page++;
}
- if ((bytes_freed > 0) && gencgc_verbose)
+ if ((bytes_freed > 0) && gencgc_verbose) {
FSHOW((stderr,
"/copy_large_unboxed bytes_freed=%d\n",
bytes_freed));
+ }
- generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[from_space].bytes_allocated -=
+ nwords*N_WORD_BYTES + bytes_freed;
generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
bytes_allocated -= bytes_freed;
* Currently only absolute fixups to the constant vector, or to the
* code area are checked. */
void
-sniff_code_object(struct code *code, unsigned displacement)
+sniff_code_object(struct code *code, unsigned long displacement)
{
+#ifdef LISP_FEATURE_X86
long nheader_words, ncode_words, nwords;
void *p;
void *constants_start_addr = NULL, *constants_end_addr;
if (!check_code_fixups)
return;
+ FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
+
ncode_words = fixnum_value(code->code_size);
nheader_words = HeaderValue(*(lispobj *)code);
nwords = ncode_words + nheader_words;
unsigned d2 = *((unsigned char *)p - 2);
unsigned d3 = *((unsigned char *)p - 3);
unsigned d4 = *((unsigned char *)p - 4);
-#ifdef QSHOW
+#if QSHOW
unsigned d5 = *((unsigned char *)p - 5);
unsigned d6 = *((unsigned char *)p - 6);
#endif
&& (data < (code_end_addr-displacement))) {
/* function header */
if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
+ (unsigned)code)) {
/* Skip the function header */
p += 6*4 - 4 - 1;
continue;
"/code start = %x, end = %x\n",
code_start_addr, code_end_addr));
}
+#endif
}
void
gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
{
+/* x86-64 uses pc-relative addressing instead of this kludge */
+#ifndef LISP_FEATURE_X86_64
long nheader_words, ncode_words, nwords;
void *constants_start_addr, *constants_end_addr;
void *code_start_addr, *code_end_addr;
lispobj fixups = NIL;
- unsigned displacement = (unsigned)new_code - (unsigned)old_code;
+ unsigned long displacement =
+ (unsigned long)new_code - (unsigned long)old_code;
struct vector *fixups_vector;
ncode_words = fixnum_value(new_code->code_size);
(fixups_vector->header == 0x01)) {
/* If so, then follow it. */
/*SHOW("following pointer to a forwarding pointer");*/
- fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
+ fixups_vector =
+ (struct vector *)native_pointer((lispobj)fixups_vector->length);
}
/*SHOW("got fixups");*/
long length = fixnum_value(fixups_vector->length);
long i;
for (i = 0; i < length; i++) {
- unsigned offset = fixups_vector->data[i];
+ unsigned long offset = fixups_vector->data[i];
/* Now check the current value of offset. */
- unsigned old_value =
- *(unsigned *)((unsigned)code_start_addr + offset);
+ unsigned long old_value =
+ *(unsigned long *)((unsigned long)code_start_addr + offset);
/* If it's within the old_code object then it must be an
* absolute fixup (relative ones are not saved) */
- if ((old_value >= (unsigned)old_code)
- && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES)))
+ if ((old_value >= (unsigned long)old_code)
+ && (old_value < ((unsigned long)old_code
+ + nwords*N_WORD_BYTES)))
/* So add the dispacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
+ *(unsigned long *)((unsigned long)code_start_addr + offset) =
old_value + displacement;
else
/* It is outside the old code object so it must be a
* relative fixup (absolute fixups are not saved). So
* subtract the displacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
+ *(unsigned long *)((unsigned long)code_start_addr + offset) =
old_value - displacement;
}
} else {
- fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
+ /* This used to just print a note to stderr, but a bogus fixup seems to
+ * indicate real heap corruption, so a hard hailure is in order. */
+ lose("fixup vector %p has a bad widetag: %d\n",
+ fixups_vector, widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
if (check_code_fixups) {
sniff_code_object(new_code,displacement);
}
+#endif
}
lispobj header;
unsigned long length;
-
gc_assert(is_lisp_pointer(object));
header = *((lispobj *) native_pointer(object));
\f
/*
- * vector-like objects
+ * Lutexes. Using the normal finalization machinery for finalizing
+ * lutexes is tricky, since the finalization depends on working lutexes.
+ * So we track the lutexes in the GC and finalize them manually.
*/
+#if defined(LUTEX_WIDETAG)
-/* FIXME: What does this mean? */
-int gencgc_hash = 1;
+/*
+ * Start tracking LUTEX in the GC, by adding it to the linked list of
+ * lutexes in the nursery generation. The caller is responsible for
+ * locking, and GCs must be inhibited until the registration is
+ * complete.
+ */
+void
+gencgc_register_lutex (struct lutex *lutex) {
+ int index = find_page_index(lutex);
+ generation_index_t gen;
+ struct lutex *head;
-static long
-scav_vector(lispobj *where, lispobj object)
-{
- unsigned long kv_length;
- lispobj *kv_vector;
- unsigned long length = 0; /* (0 = dummy to stop GCC warning) */
- struct hash_table *hash_table;
- lispobj empty_symbol;
- unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- lispobj weak_p_obj;
- unsigned next_vector_length = 0;
-
- /* FIXME: A comment explaining this would be nice. It looks as
- * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
- * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
- if (HeaderValue(object) != subtype_VectorValidHashing)
- return 1;
+ /* This lutex is in static space, so we don't need to worry about
+ * finalizing it.
+ */
+ if (index == -1)
+ return;
- if (!gencgc_hash) {
- /* This is set for backward compatibility. FIXME: Do we need
- * this any more? */
- *where =
- (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
- return 1;
- }
+ gen = page_table[index].gen;
- kv_length = fixnum_value(where[1]);
- kv_vector = where + 2; /* Skip the header and length. */
- /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
+ gc_assert(gen >= 0);
+ gc_assert(gen < NUM_GENERATIONS);
- /* Scavenge element 0, which may be a hash-table structure. */
- scavenge(where+2, 1);
- if (!is_lisp_pointer(where[2])) {
- lose("no pointer at %x in hash table", where[2]);
- }
- hash_table = (struct hash_table *)native_pointer(where[2]);
- /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
- if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
- lose("hash table not instance (%x at %x)",
- hash_table->header,
- hash_table);
- }
+ head = generations[gen].lutexes;
- /* Scavenge element 1, which should be some internal symbol that
- * the hash table code reserves for marking empty slots. */
- scavenge(where+3, 1);
- if (!is_lisp_pointer(where[3])) {
- lose("not empty-hash-table-slot symbol pointer: %x", where[3]);
- }
- empty_symbol = where[3];
- /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
- if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) !=
- SYMBOL_HEADER_WIDETAG) {
- lose("not a symbol where empty-hash-table-slot symbol expected: %x",
- *(lispobj *)native_pointer(empty_symbol));
- }
+ lutex->gen = gen;
+ lutex->next = head;
+ lutex->prev = NULL;
+ if (head)
+ head->prev = lutex;
+ generations[gen].lutexes = lutex;
+}
- /* Scavenge hash table, which will fix the positions of the other
- * needed objects. */
- scavenge((lispobj *)hash_table,
- sizeof(struct hash_table) / sizeof(lispobj));
+/*
+ * Stop tracking LUTEX in the GC by removing it from the appropriate
+ * linked lists. This will only be called during GC, so no locking is
+ * needed.
+ */
+void
+gencgc_unregister_lutex (struct lutex *lutex) {
+ if (lutex->prev) {
+ lutex->prev->next = lutex->next;
+ } else {
+ generations[lutex->gen].lutexes = lutex->next;
+ }
- /* Cross-check the kv_vector. */
- if (where != (lispobj *)native_pointer(hash_table->table)) {
- lose("hash_table table!=this table %x", hash_table->table);
+ if (lutex->next) {
+ lutex->next->prev = lutex->prev;
}
- /* WEAK-P */
- weak_p_obj = hash_table->weak_p;
+ lutex->next = NULL;
+ lutex->prev = NULL;
+ lutex->gen = -1;
+}
- /* index vector */
- {
- lispobj index_vector_obj = hash_table->index_vector;
-
- if (is_lisp_pointer(index_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- index_vector =
- ((unsigned long *)native_pointer(index_vector_obj)) + 2;
- /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
- length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
- /*FSHOW((stderr, "/length = %d\n", length));*/
- } else {
- lose("invalid index_vector %x", index_vector_obj);
- }
- }
+/*
+ * Mark all lutexes in generation GEN as not live.
+ */
+static void
+unmark_lutexes (generation_index_t gen) {
+ struct lutex *lutex = generations[gen].lutexes;
- /* next vector */
- {
- lispobj next_vector_obj = hash_table->next_vector;
-
- if (is_lisp_pointer(next_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2;
- /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
- next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
- /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
- } else {
- lose("invalid next_vector %x", next_vector_obj);
- }
+ while (lutex) {
+ lutex->live = 0;
+ lutex = lutex->next;
}
+}
- /* maybe hash vector */
- {
- lispobj hash_vector_obj = hash_table->hash_vector;
-
- if (is_lisp_pointer(hash_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)){
- hash_vector =
- ((unsigned long *)native_pointer(hash_vector_obj)) + 2;
- /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
- gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
- == next_vector_length);
- } else {
- hash_vector = NULL;
- /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
+/*
+ * Finalize all lutexes in generation GEN that have not been marked live.
+ */
+static void
+reap_lutexes (generation_index_t gen) {
+ struct lutex *lutex = generations[gen].lutexes;
+
+ while (lutex) {
+ struct lutex *next = lutex->next;
+ if (!lutex->live) {
+ lutex_destroy((tagged_lutex_t) lutex);
+ gencgc_unregister_lutex(lutex);
}
+ lutex = next;
}
+}
- /* These lengths could be different as the index_vector can be a
- * different length from the others, a larger index_vector could help
- * reduce collisions. */
- gc_assert(next_vector_length*2 == kv_length);
+/*
+ * Mark LUTEX as live.
+ */
+static void
+mark_lutex (lispobj tagged_lutex) {
+ struct lutex *lutex = (struct lutex*) native_pointer(tagged_lutex);
- /* now all set up.. */
+ lutex->live = 1;
+}
- /* Work through the KV vector. */
- {
- long i;
- for (i = 1; i < next_vector_length; i++) {
- lispobj old_key = kv_vector[2*i];
+/*
+ * Move all lutexes in generation FROM to generation TO.
+ */
+static void
+move_lutexes (generation_index_t from, generation_index_t to) {
+ struct lutex *tail = generations[from].lutexes;
-#if N_WORD_BITS == 32
- unsigned long old_index = (old_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
- unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
-#endif
+ /* Nothing to move */
+ if (!tail)
+ return;
- /* Scavenge the key and value. */
- scavenge(&kv_vector[2*i],2);
+ /* Change the generation of the lutexes in FROM. */
+ while (tail->next) {
+ tail->gen = to;
+ tail = tail->next;
+ }
+ tail->gen = to;
- /* Check whether the key has moved and is EQ based. */
- {
- lispobj new_key = kv_vector[2*i];
-#if N_WORD_BITS == 32
- unsigned long new_index = (new_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
- unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
-#endif
+ /* Link the last lutex in the FROM list to the start of the TO list */
+ tail->next = generations[to].lutexes;
- if ((old_index != new_index) &&
- ((!hash_vector) ||
- (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) &&
- ((new_key != empty_symbol) ||
- (kv_vector[2*i] != empty_symbol))) {
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
- /* Unlink the key from the old_index chain. */
- if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned prior = index_vector[old_index];
- unsigned next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
- }
- }
- }
- }
+ /* And vice versa */
+ if (generations[to].lutexes) {
+ generations[to].lutexes->prev = tail;
}
- return (CEILING(kv_length + 2, 2));
+
+ /* And update the generations structures to match this */
+ generations[to].lutexes = generations[from].lutexes;
+ generations[from].lutexes = NULL;
+}
+
+static long
+scav_lutex(lispobj *where, lispobj object)
+{
+ mark_lutex((lispobj) where);
+
+ return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
}
+static lispobj
+trans_lutex(lispobj object)
+{
+ struct lutex *lutex = (struct lutex *) native_pointer(object);
+ lispobj copied;
+ size_t words = CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+ gc_assert(is_lisp_pointer(object));
+ copied = copy_object(object, words);
+
+ /* Update the links, since the lutex moved in memory. */
+ if (lutex->next) {
+ lutex->next->prev = (struct lutex *) native_pointer(copied);
+ }
+
+ if (lutex->prev) {
+ lutex->prev->next = (struct lutex *) native_pointer(copied);
+ } else {
+ generations[lutex->gen].lutexes =
+ (struct lutex *) native_pointer(copied);
+ }
+
+ return copied;
+}
+
+static long
+size_lutex(lispobj *where)
+{
+ return CEILING(sizeof(struct lutex)/sizeof(lispobj), 2);
+}
+#endif /* LUTEX_WIDETAG */
\f
/*
static long
scav_weak_pointer(lispobj *where, lispobj object)
{
- struct weak_pointer *wp = weak_pointers;
- /* Push the weak pointer onto the list of weak pointers.
- * Do I have to watch for duplicates? Originally this was
- * part of trans_weak_pointer but that didn't work in the
- * case where the WP was in a promoted region.
+ /* Since we overwrite the 'next' field, we have to make
+ * sure not to do so for pointers already in the list.
+ * Instead of searching the list of weak_pointers each
+ * time, we ensure that next is always NULL when the weak
+ * pointer isn't in the list, and not NULL otherwise.
+ * Since we can't use NULL to denote end of list, we
+ * use a pointer back to the same weak_pointer.
*/
+ struct weak_pointer * wp = (struct weak_pointer*)where;
- /* Check whether it's already in the list. */
- while (wp != NULL) {
- if (wp == (struct weak_pointer*)where) {
- break;
- }
- wp = wp->next;
- }
- if (wp == NULL) {
- /* Add it to the start of the list. */
- wp = (struct weak_pointer*)where;
- if (wp->next != weak_pointers) {
- wp->next = weak_pointers;
- } else {
- /*SHOW("avoided write to weak pointer");*/
- }
+ if (NULL == wp->next) {
+ wp->next = weak_pointers;
weak_pointers = wp;
+ if (NULL == wp->next)
+ wp->next = wp;
}
/* Do not let GC scavenge the value slot of the weak pointer.
lispobj *
search_dynamic_space(void *pointer)
{
- long page_index = find_page_index(pointer);
+ page_index_t page_index = find_page_index(pointer);
lispobj *start;
/* The address may be invalid, so do some checks. */
- if ((page_index == -1) ||
- (page_table[page_index].allocated == FREE_PAGE_FLAG))
+ if ((page_index == -1) || page_free_p(page_index))
return NULL;
- start = (lispobj *)((void *)page_address(page_index)
- + page_table[page_index].first_object_offset);
+ start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
(((lispobj *)pointer)+2)-start,
(lispobj *)pointer));
}
-/* Is there any possibility that pointer is a valid Lisp object
- * reference, and/or something else (e.g. subroutine call return
- * address) which should prevent us from moving the referred-to thing?
- * This is called from preserve_pointers() */
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
+/* Helper for valid_lisp_pointer_p and
+ * possibly_valid_dynamic_space_pointer.
+ *
+ * pointer is the pointer to validate, and start_addr is the address
+ * of the enclosing object.
+ */
static int
-possibly_valid_dynamic_space_pointer(lispobj *pointer)
+looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
{
- lispobj *start_addr;
-
- /* Find the object start address. */
- if ((start_addr = search_dynamic_space(pointer)) == NULL) {
- return 0;
- }
-
- /* We need to allow raw pointers into Code objects for return
- * addresses. This will also pick up pointers to functions in code
- * objects. */
- if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
- /* XXX could do some further checks here */
- return 1;
- }
-
- /* If it's not a return address then it needs to be a valid Lisp
- * pointer. */
if (!is_lisp_pointer((lispobj)pointer)) {
return 0;
}
/* Check that the object pointed to is consistent with the pointer
- * low tag.
- */
+ * low tag. */
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
/* Start_addr should be the enclosing code object, or a closure
break;
case CLOSURE_HEADER_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+FUN_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if ((unsigned long)pointer !=
+ ((unsigned long)start_addr+FUN_POINTER_LOWTAG)) {
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wf2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wf3: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
case LIST_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+LIST_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if ((unsigned long)pointer !=
+ ((unsigned long)start_addr+LIST_POINTER_LOWTAG)) {
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wl1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
/* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+ if ((is_lisp_pointer(start_addr[0]) ||
+ is_lisp_immediate(start_addr[0])) &&
+ (is_lisp_pointer(start_addr[1]) ||
+ is_lisp_immediate(start_addr[1])))
break;
else {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wl2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
case INSTANCE_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if ((unsigned long)pointer !=
+ ((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) {
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wi1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wi2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
case OTHER_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((int)start_addr+OTHER_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if ((unsigned long)pointer !=
+ ((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) {
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
/* Is it plausible? Not a cons. XXX should check the headers. */
if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
switch (widetag_of(start_addr[0])) {
#if N_WORD_BITS == 64
case SINGLE_FLOAT_WIDETAG:
#endif
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo3: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
/* only pointed to by function pointers? */
case CLOSURE_HEADER_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo4: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
case INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo5: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
/* the valid other immediate pointer objects */
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
+#ifdef LUTEX_WIDETAG
+ case LUTEX_WIDETAG:
+#endif
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo6: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*W?: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
return 1;
}
+/* Used by the debugger to validate possibly bogus pointers before
+ * calling MAKE-LISP-OBJ on them.
+ *
+ * FIXME: We would like to make this perfect, because if the debugger
+ * constructs a reference to a bugs lisp object, and it ends up in a
+ * location scavenged by the GC all hell breaks loose.
+ *
+ * Whereas possibly_valid_dynamic_space_pointer has to be conservative
+ * and return true for all valid pointers, this could actually be eager
+ * and lie about a few pointers without bad results... but that should
+ * be reflected in the name.
+ */
+int
+valid_lisp_pointer_p(lispobj *pointer)
+{
+ lispobj *start;
+ if (((start=search_dynamic_space(pointer))!=NULL) ||
+ ((start=search_static_space(pointer))!=NULL) ||
+ ((start=search_read_only_space(pointer))!=NULL))
+ return looks_like_valid_lisp_pointer_p(pointer, start);
+ else
+ return 0;
+}
+
+/* Is there any possibility that pointer is a valid Lisp object
+ * reference, and/or something else (e.g. subroutine call return
+ * address) which should prevent us from moving the referred-to thing?
+ * This is called from preserve_pointers() */
+static int
+possibly_valid_dynamic_space_pointer(lispobj *pointer)
+{
+ lispobj *start_addr;
+
+ /* Find the object start address. */
+ if ((start_addr = search_dynamic_space(pointer)) == NULL) {
+ return 0;
+ }
+
+ return looks_like_valid_lisp_pointer_p(pointer, start_addr);
+}
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
static void
maybe_adjust_large_object(lispobj *where)
{
- long first_page;
+ page_index_t first_page;
+ page_index_t next_page;
long nwords;
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
+ unsigned long remaining_bytes;
+ unsigned long bytes_freed;
+ unsigned long old_bytes_used;
int boxed;
* but lets do it for them all (they'll probably be written
* anyway?). */
- gc_assert(page_table[first_page].first_object_offset == 0);
+ gc_assert(page_table[first_page].region_start_offset == 0);
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset ==
- -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].region_start_offset ==
+ npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
page_table[next_page].allocated = boxed;
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
+ (page_table[next_page].region_start_offset ==
+ npage_bytes(next_page - first_page))) {
/* It checks out OK, free the page. We don't need to both zeroing
* pages as this should have been done before shrinking the
* object. These pages shouldn't be write protected as they
*
* It is also assumed that the current gc_alloc() region has been
* flushed and the tables updated. */
+
static void
preserve_pointer(void *addr)
{
- long addr_page_index = find_page_index(addr);
- long first_page;
- long i;
- unsigned region_allocation;
+ page_index_t addr_page_index = find_page_index(addr);
+ page_index_t first_page;
+ page_index_t i;
+ unsigned int region_allocation;
/* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
- || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ || page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
+ if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
* expensive but important, since it vastly reduces the
* probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
- if (!(possibly_valid_dynamic_space_pointer(addr)))
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer((lispobj)addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
return;
/* Find the beginning of the region. Note that there may be
#if 0
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
- first_page=
- find_page_index(page_address(addr_page_index)+
- page_table[addr_page_index].first_object_offset);
+ first_page = find_page_index(page_region_start(addr_page_index))
#else
first_page = addr_page_index;
- while (page_table[first_page].first_object_offset != 0) {
+ while (page_table[first_page].region_start_offset != 0) {
--first_page;
/* Do some checks. */
gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
- if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned)addr & (PAGE_BYTES - 1))
+ || (((unsigned long)addr & (PAGE_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
/* Check whether this is the last page in this contiguous block.. */
if ((page_table[i].bytes_used < PAGE_BYTES)
/* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || page_free_p(i+1)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].first_object_offset == 0))
+ || (page_table[i+1].region_start_offset == 0))
break;
}
/* Check that the page is now static. */
gc_assert(page_table[addr_page_index].dont_move != 0);
}
+
+#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
\f
/* If the given page is not write-protected, then scan it for pointers
* to younger generations or the top temp. generation, if no
*
* We return 1 if the page was write-protected, else 0. */
static int
-update_page_write_prot(long page)
+update_page_write_prot(page_index_t page)
{
- int gen = page_table[page].gen;
+ generation_index_t gen = page_table[page].gen;
long j;
int wp_it = 1;
void **page_addr = (void **)page_address(page);
long num_words = page_table[page].bytes_used / N_WORD_BYTES;
/* Shouldn't be a free page. */
- gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
+ gc_assert(page_allocated_p(page));
gc_assert(page_table[page].bytes_used != 0);
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
+ /* FIXME: What's the reason for not write-protecting pinned pages? */
|| page_table[page].dont_move
- || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
+ || page_unboxed_p(page))
return (0);
/* Scan the page for pointers to younger generations or the
for (j = 0; j < num_words; j++) {
void *ptr = *(page_addr+j);
- long index = find_page_index(ptr);
+ page_index_t index = find_page_index(ptr);
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
- ((page_table[index].allocated != FREE_PAGE_FLAG)
+ (page_allocated_p(index)
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
- || (page_table[index].gen == NUM_GENERATIONS)))
+ || (page_table[index].gen == SCRATCH_GENERATION)))
/* Or does it point within a current gc_alloc() region? */
|| ((boxed_region.start_addr <= ptr)
return (wp_it);
}
-/* Scavenge a generation.
- *
- * This will not resolve all pointers when generation is the new
- * space, as new objects may be added which are not checked here - use
- * scavenge_newspace generation.
+/* Scavenge all generations from FROM to TO, inclusive, except for
+ * new_space which needs special handling, as new objects may be
+ * added which are not checked here - use scavenge_newspace generation.
*
* Write-protected pages should not have any pointers to the
* from_space so do need scavenging; thus write-protected pages are
* pointers as the objects contain a link to the next and are written
* if a weak pointer is scavenged. Still it's a useful check. */
static void
-scavenge_generation(int generation)
+scavenge_generations(generation_index_t from, generation_index_t to)
{
- long i;
+ page_index_t i;
int num_wp = 0;
#define SC_GEN_CK 0
#if SC_GEN_CK
/* Clear the write_protected_cleared flags on all pages. */
- for (i = 0; i < NUM_PAGES; i++)
+ for (i = 0; i < page_table_pages; i++)
page_table[i].write_protected_cleared = 0;
#endif
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ generation_index_t generation = page_table[i].gen;
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)) {
- long last_page,j;
+ && (generation != new_space)
+ && (generation >= from)
+ && (generation <= to)) {
+ page_index_t last_page,j;
int write_protected=1;
/* This should be the start of a region */
- gc_assert(page_table[i].first_object_offset == 0);
+ gc_assert(page_table[i].region_start_offset == 0);
/* Now work forward until the end of the region */
for (last_page = i; ; last_page++) {
write_protected && page_table[last_page].write_protected;
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
}
if (!write_protected) {
scavenge(page_address(i),
- (page_table[last_page].bytes_used +
- (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ ((unsigned long)(page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)))
+ /N_WORD_BYTES);
/* Now scan the pages and write protect those that
* don't have pointers to younger generations. */
num_wp += update_page_write_prot(j);
}
}
+ if ((gencgc_verbose > 1) && (num_wp != 0)) {
+ FSHOW((stderr,
+ "/write protected %d pages within generation %d\n",
+ num_wp, generation));
+ }
}
i = last_page;
}
}
- if ((gencgc_verbose > 1) && (num_wp != 0)) {
- FSHOW((stderr,
- "/write protected %d pages within generation %d\n",
- num_wp, generation));
- }
#if SC_GEN_CK
/* Check that none of the write_protected pages in this generation
* have been written to. */
- for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ for (i = 0; i < page_table_pages; i++) {
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
FSHOW((stderr, "/scavenge_generation() %d\n", generation));
FSHOW((stderr,
- "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
+ "/page bytes_used=%d region_start_offset=%lu dont_move=%d\n",
page_table[i].bytes_used,
- page_table[i].first_object_offset,
+ page_table[i].region_start_offset,
page_table[i].dont_move));
- lose("write to protected page %d in scavenge_generation()", i);
+ lose("write to protected page %d in scavenge_generation()\n", i);
}
}
#endif
* complete the job as new objects may be added to the generation in
* the process which are not scavenged. */
static void
-scavenge_newspace_generation_one_scan(int generation)
+scavenge_newspace_generation_one_scan(generation_index_t generation)
{
- long i;
+ page_index_t i;
FSHOW((stderr,
"/starting one full scan of newspace generation %d\n",
generation));
for (i = 0; i < last_free_page; i++) {
/* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
/* (This may be redundant as write_protected is now
* cleared before promotion.) */
|| (page_table[i].dont_move == 1))) {
- long last_page;
+ page_index_t last_page;
int all_wp=1;
- /* The scavenge will start at the first_object_offset of page i.
+ /* The scavenge will start at the region_start_offset of
+ * page i.
*
* We need to find the full extent of this contiguous
* block in case objects span pages.
* contiguous block */
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
}
/* Do a limited check for write-protected pages. */
if (!all_wp) {
- long size;
-
- size = (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/N_WORD_BYTES;
+ long nwords = (((unsigned long)
+ (page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)
+ + page_table[i].region_start_offset))
+ / N_WORD_BYTES);
new_areas_ignore_page = last_page;
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
+ scavenge(page_region_start(i), nwords);
}
i = last_page;
/* Do a complete scavenge of the newspace generation. */
static void
-scavenge_newspace_generation(int generation)
+scavenge_newspace_generation(generation_index_t generation)
{
long i;
/* Record all new areas now. */
record_new_objects = 2;
+ /* Give a chance to weak hash tables to make other objects live.
+ * FIXME: The algorithm implemented here for weak hash table gcing
+ * is O(W^2+N) as Bruno Haible warns in
+ * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
+ * see "Implementation 2". */
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
/* New areas of objects allocated have been lost so need to do a
* full scan to be sure! If this becomes a problem try
* increasing NUM_NEW_AREAS. */
- if (gencgc_verbose)
+ if (gencgc_verbose) {
SHOW("new_areas overflow, doing full scavenge");
+ }
- /* Don't need to record new areas that get scavenge anyway
- * during scavenge_newspace_generation_one_scan. */
+ /* Don't need to record new areas that get scavenged
+ * anyway during scavenge_newspace_generation_one_scan. */
record_new_objects = 1;
scavenge_newspace_generation_one_scan(generation);
/* Record all new areas now. */
record_new_objects = 2;
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
/* Work through previous_new_areas. */
for (i = 0; i < previous_new_areas_index; i++) {
- long page = (*previous_new_areas)[i].page;
- long offset = (*previous_new_areas)[i].offset;
- long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ page_index_t page = (*previous_new_areas)[i].page;
+ size_t offset = (*previous_new_areas)[i].offset;
+ size_t size = (*previous_new_areas)[i].size / N_WORD_BYTES;
gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
scavenge(page_address(page)+offset, size);
}
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
}
#if SC_NS_GEN_CK
/* Check that none of the write_protected pages in this generation
* have been written to. */
- for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ for (i = 0; i < page_table_pages; i++) {
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
&& (page_table[i].dont_move == 0)) {
- lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
+ lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d\n",
i, generation, page_table[i].dont_move);
}
}
static void
unprotect_oldspace(void)
{
- long i;
+ page_index_t i;
+ void *region_addr = 0;
+ void *page_addr = 0;
+ unsigned long region_bytes = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == from_space)) {
- void *page_start;
-
- page_start = (void *)page_address(i);
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
+ page_addr = page_address(i);
+ if (!region_addr) {
+ /* First region. */
+ region_addr = page_addr;
+ region_bytes = PAGE_BYTES;
+ } else if (region_addr + region_bytes == page_addr) {
+ /* Region continue. */
+ region_bytes += PAGE_BYTES;
+ } else {
+ /* Unprotect previous region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ /* First page in new region. */
+ region_addr = page_addr;
+ region_bytes = PAGE_BYTES;
+ }
}
}
}
+ if (region_addr) {
+ /* Unprotect last region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ }
}
/* Work through all the pages and free any in from_space. This
* assumes that all objects have been copied or promoted to an older
* generation. Bytes_allocated and the generation bytes_allocated
* counter are updated. The number of bytes freed is returned. */
-static long
+static unsigned long
free_oldspace(void)
{
- long bytes_freed = 0;
- long first_page, last_page;
+ unsigned long bytes_freed = 0;
+ page_index_t first_page, last_page;
first_page = 0;
do {
/* Find a first page for the next region of pages. */
while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ && (page_free_p(first_page)
|| (page_table[first_page].bytes_used == 0)
|| (page_table[first_page].gen != from_space)))
first_page++;
page_table[last_page].bytes_used;
page_table[last_page].allocated = FREE_PAGE_FLAG;
page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
+ /* Should already be unprotected by unprotect_oldspace(). */
+ gc_assert(!page_table[last_page].write_protected);
last_page++;
}
while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && page_allocated_p(last_page)
&& (page_table[last_page].bytes_used != 0)
&& (page_table[last_page].gen == from_space));
- /* Zero pages from first_page to (last_page-1).
- *
- * FIXME: Why not use os_zero(..) function instead of
- * hand-coding this again? (Check other gencgc_unmap_zero
- * stuff too. */
- if (gencgc_unmap_zero) {
- void *page_start, *addr;
-
- page_start = (void *)page_address(first_page);
-
- os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
- addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
- if (addr == NULL || addr != page_start) {
- lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
- addr);
- }
- } else {
- long *page_start;
-
- page_start = (long *)page_address(first_page);
- memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
- }
-
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(first_page),
+ npage_bytes(last_page-first_page),
+ OS_VM_PROT_NONE);
+#endif
first_page = last_page;
-
} while (first_page < last_free_page);
bytes_allocated -= bytes_freed;
print_ptr(lispobj *addr)
{
/* If addr is in the dynamic space then out the page information. */
- long pi1 = find_page_index((void*)addr);
+ page_index_t pi1 = find_page_index((void*)addr);
if (pi1 != -1)
- fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
+ fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %lu dont_move %d\n",
(unsigned long) addr,
pi1,
page_table[pi1].allocated,
page_table[pi1].gen,
page_table[pi1].bytes_used,
- page_table[pi1].first_object_offset,
+ page_table[pi1].region_start_offset,
page_table[pi1].dont_move);
fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
*(addr-4),
}
#endif
-extern long undefined_tramp;
-
static void
verify_space(lispobj *start, size_t words)
{
int is_in_dynamic_space = (find_page_index((void*)start) != -1);
int is_in_readonly_space =
- (READ_ONLY_SPACE_START <= (unsigned)start &&
- (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ (READ_ONLY_SPACE_START <= (unsigned long)start &&
+ (unsigned long)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
while (words > 0) {
size_t count = 1;
lispobj thing = *(lispobj*)start;
if (is_lisp_pointer(thing)) {
- long page_index = find_page_index((void*)thing);
+ page_index_t page_index = find_page_index((void*)thing);
long to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(page_index)
&& (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.", thing, start);
+ lose ("Ptr %x @ %x sees free page.\n", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
+ lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start);
}
/* Check that its not in the RO space as it would then be a
* pointer from the RO to the dynamic space. */
if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x",
+ lose("ptr to dynamic space %x from RO space %x\n",
thing, start);
}
/* Does it point to a plausible object? This check slows
* dynamically. */
/*
if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x", thing, start);
+ lose("ptr %x to invalid object %x\n", thing, start);
}
*/
} else {
/* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space
- && (thing != (unsigned)&undefined_tramp)) {
- lose("Ptr %x @ %x sees junk.", thing, start);
+ if (!to_readonly_space && !to_static_space) {
+ lose("Ptr %x @ %x sees junk.\n", thing, start);
}
}
} else {
case SINGLE_FLOAT_WIDETAG:
#endif
case UNBOUND_MARKER_WIDETAG:
- case INSTANCE_HEADER_WIDETAG:
case FDEFN_WIDETAG:
count = 1;
break;
+ case INSTANCE_HEADER_WIDETAG:
+ {
+ lispobj nuntagged;
+ long ntotal = HeaderValue(thing);
+ lispobj layout = ((struct instance *)start)->slots[0];
+ if (!layout) {
+ count = 1;
+ break;
+ }
+ nuntagged = ((struct layout *)
+ native_pointer(layout))->n_untagged_slots;
+ verify_space(start + 1,
+ ntotal - fixnum_value(nuntagged));
+ count = ntotal + 1;
+ break;
+ }
case CODE_HEADER_WIDETAG:
{
lispobj object = *start;
while (fheaderl != NIL) {
fheaderp =
(struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ gc_assert(widetag_of(fheaderp->header) ==
+ SIMPLE_FUN_HEADER_WIDETAG);
verify_space(&fheaderp->name, 1);
verify_space(&fheaderp->arglist, 1);
verify_space(&fheaderp->type, 1);
#endif
case SAP_WIDETAG:
case WEAK_POINTER_WIDETAG:
+#ifdef LUTEX_WIDETAG
+ case LUTEX_WIDETAG:
+#endif
+#ifdef NO_TLS_VALUE_MARKER_WIDETAG
+ case NO_TLS_VALUE_MARKER_WIDETAG:
+#endif
count = (sizetab[widetag_of(*start)])(start);
break;
default:
- gc_abort();
+ lose("Unhandled widetag 0x%x at 0x%x\n",
+ widetag_of(*start), start);
}
}
}
struct thread *th;
for_each_thread(th) {
long binding_stack_size =
- (lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
+ (lispobj*)get_binding_stack_pointer(th)
- (lispobj*)th->binding_stack_start;
verify_space(th->binding_stack_start, binding_stack_size);
}
}
static void
-verify_generation(int generation)
+verify_generation(generation_index_t generation)
{
- int i;
+ page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
- long last_page;
+ page_index_t last_page;
int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
- gc_assert(page_table[i].first_object_offset == 0);
+ gc_assert(page_table[i].region_start_offset == 0);
/* Need to find the full extent of this contiguous block in case
objects span pages. */
|| (page_table[last_page+1].allocated != region_allocation)
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
+ || (page_table[last_page+1].region_start_offset == 0))
break;
- verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ verify_space(page_address(i),
+ ((unsigned long)
+ (page_table[last_page].bytes_used
+ + npage_bytes(last_page-i)))
+ / N_WORD_BYTES);
i = last_page;
}
}
static void
verify_zero_fill(void)
{
- long page;
+ page_index_t page;
for (page = 0; page < last_free_page; page++) {
- if (page_table[page].allocated == FREE_PAGE_FLAG) {
+ if (page_free_p(page)) {
/* The whole page should be zero filled. */
long *start_addr = (long *)page_address(page);
long size = 1024;
long i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
- lose("free page not zero at %x", start_addr + i);
+ lose("free page not zero at %x\n", start_addr + i);
}
}
} else {
long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
- long *start_addr = (long *)((unsigned)page_address(page)
+ long *start_addr = (long *)((unsigned long)page_address(page)
+ page_table[page].bytes_used);
long size = free_bytes / N_WORD_BYTES;
long i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
- lose("free region not zero at %x", start_addr + i);
+ lose("free region not zero at %x\n", start_addr + i);
}
}
}
static void
verify_dynamic_space(void)
{
- long i;
+ generation_index_t i;
- for (i = 0; i < NUM_GENERATIONS; i++)
+ for (i = 0; i <= HIGHEST_NORMAL_GENERATION; i++)
verify_generation(i);
if (gencgc_enable_verify_zero_fill)
\f
/* Write-protect all the dynamic boxed pages in the given generation. */
static void
-write_protect_generation_pages(int generation)
+write_protect_generation_pages(generation_index_t generation)
{
- long i;
+ page_index_t start;
- gc_assert(generation < NUM_GENERATIONS);
+ gc_assert(generation < SCRATCH_GENERATION);
- for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated == BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && !page_table[i].dont_move
- && (page_table[i].gen == generation)) {
+ for (start = 0; start < last_free_page; start++) {
+ if (protect_page_p(start, generation)) {
void *page_start;
+ page_index_t last;
+
+ /* Note the page as protected in the page tables. */
+ page_table[start].write_protected = 1;
- page_start = (void *)page_address(i);
+ for (last = start + 1; last < last_free_page; last++) {
+ if (!protect_page_p(last, generation))
+ break;
+ page_table[last].write_protected = 1;
+ }
+
+ page_start = (void *)page_address(start);
os_protect(page_start,
- PAGE_BYTES,
+ npage_bytes(last - start),
OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
- /* Note the page as protected in the page tables. */
- page_table[i].write_protected = 1;
+ start = last;
}
+ }
if (gencgc_verbose > 1) {
FSHOW((stderr,
}
}
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+
+static void
+scavenge_control_stack()
+{
+ unsigned long control_stack_size;
+
+ /* This is going to be a big problem when we try to port threads
+ * to PPC... CLH */
+ struct thread *th = arch_os_get_current_thread();
+ lispobj *control_stack =
+ (lispobj *)(th->control_stack_start);
+
+ control_stack_size = current_control_stack_pointer - control_stack;
+ scavenge(control_stack, control_stack_size);
+}
+
+/* Scavenging Interrupt Contexts */
+
+static int boxed_registers[] = BOXED_REGISTERS;
+
+static void
+scavenge_interrupt_context(os_context_t * context)
+{
+ int i;
+
+#ifdef reg_LIP
+ unsigned long lip;
+ unsigned long lip_offset;
+ int lip_register_pair;
+#endif
+ unsigned long pc_code_offset;
+
+#ifdef ARCH_HAS_LINK_REGISTER
+ unsigned long lr_code_offset;
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ unsigned long npc_code_offset;
+#endif
+
+#ifdef reg_LIP
+ /* Find the LIP's register pair and calculate it's offset */
+ /* before we scavenge the context. */
+
+ /*
+ * I (RLT) think this is trying to find the boxed register that is
+ * closest to the LIP address, without going past it. Usually, it's
+ * reg_CODE or reg_LRA. But sometimes, nothing can be found.
+ */
+ lip = *os_context_register_addr(context, reg_LIP);
+ lip_offset = 0x7FFFFFFF;
+ lip_register_pair = -1;
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ unsigned long reg;
+ long offset;
+ int index;
+
+ index = boxed_registers[i];
+ reg = *os_context_register_addr(context, index);
+ if ((reg & ~((1L<<N_LOWTAG_BITS)-1)) <= lip) {
+ offset = lip - reg;
+ if (offset < lip_offset) {
+ lip_offset = offset;
+ lip_register_pair = index;
+ }
+ }
+ }
+#endif /* reg_LIP */
+
+ /* Compute the PC's offset from the start of the CODE */
+ /* register. */
+ pc_code_offset = *os_context_pc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
+#ifdef ARCH_HAS_NPC_REGISTER
+ npc_code_offset = *os_context_npc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
+#endif /* ARCH_HAS_NPC_REGISTER */
+
+#ifdef ARCH_HAS_LINK_REGISTER
+ lr_code_offset =
+ *os_context_lr_addr(context) -
+ *os_context_register_addr(context, reg_CODE);
+#endif
+
+ /* Scanvenge all boxed registers in the context. */
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ int index;
+ lispobj foo;
+
+ index = boxed_registers[i];
+ foo = *os_context_register_addr(context, index);
+ scavenge(&foo, 1);
+ *os_context_register_addr(context, index) = foo;
+
+ scavenge((lispobj*) &(*os_context_register_addr(context, index)), 1);
+ }
+
+#ifdef reg_LIP
+ /* Fix the LIP */
+
+ /*
+ * But what happens if lip_register_pair is -1?
+ * *os_context_register_addr on Solaris (see
+ * solaris_register_address in solaris-os.c) will return
+ * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
+ * that what we really want? My guess is that that is not what we
+ * want, so if lip_register_pair is -1, we don't touch reg_LIP at
+ * all. But maybe it doesn't really matter if LIP is trashed?
+ */
+ if (lip_register_pair >= 0) {
+ *os_context_register_addr(context, reg_LIP) =
+ *os_context_register_addr(context, lip_register_pair)
+ + lip_offset;
+ }
+#endif /* reg_LIP */
+
+ /* Fix the PC if it was in from space */
+ if (from_space_p(*os_context_pc_addr(context)))
+ *os_context_pc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+
+#ifdef ARCH_HAS_LINK_REGISTER
+ /* Fix the LR ditto; important if we're being called from
+ * an assembly routine that expects to return using blr, otherwise
+ * harmless */
+ if (from_space_p(*os_context_lr_addr(context)))
+ *os_context_lr_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + lr_code_offset;
+#endif
+
+#ifdef ARCH_HAS_NPC_REGISTER
+ if (from_space_p(*os_context_npc_addr(context)))
+ *os_context_npc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+#endif /* ARCH_HAS_NPC_REGISTER */
+}
+
+void
+scavenge_interrupt_contexts(void)
+{
+ int i, index;
+ os_context_t *context;
+
+ struct thread *th=arch_os_get_current_thread();
+
+ index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0));
+
+#if defined(DEBUG_PRINT_CONTEXT_INDEX)
+ printf("Number of active contexts: %d\n", index);
+#endif
+
+ for (i = 0; i < index; i++) {
+ context = th->interrupt_contexts[i];
+ scavenge_interrupt_context(context);
+ }
+}
+
+#endif
+
+#if defined(LISP_FEATURE_SB_THREAD)
+static void
+preserve_context_registers (os_context_t *c)
+{
+ void **ptr;
+ /* On Darwin the signal context isn't a contiguous block of memory,
+ * so just preserve_pointering its contents won't be sufficient.
+ */
+#if defined(LISP_FEATURE_DARWIN)
+#if defined LISP_FEATURE_X86
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ECX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
+ preserve_pointer((void*)*os_context_pc_addr(c));
+#elif defined LISP_FEATURE_X86_64
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RCX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RSI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R8));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R9));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R10));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R11));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R12));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R13));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R14));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R15));
+ preserve_pointer((void*)*os_context_pc_addr(c));
+#else
+ #error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
+#endif
+#endif
+ for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
+ }
+}
+#endif
+
/* Garbage collect a generation. If raise is 0 then the remains of the
* generation are not raised to the next generation. */
static void
-garbage_collect_generation(int generation, int raise)
+garbage_collect_generation(generation_index_t generation, int raise)
{
unsigned long bytes_freed;
- unsigned long i;
+ page_index_t i;
unsigned long static_space_size;
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
struct thread *th;
- gc_assert(generation <= (NUM_GENERATIONS-1));
+#endif
+ gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
/* The oldest generation can't be raised. */
- gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
+ gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
+
+ /* Check if weak hash tables were processed in the previous GC. */
+ gc_assert(weak_hash_tables == NULL);
/* Initialize the weak pointer list. */
weak_pointers = NULL;
+#ifdef LUTEX_WIDETAG
+ unmark_lutexes(generation);
+#endif
+
/* When a generation is not being raised it is transported to a
* temporary generation (NUM_GENERATIONS), and lowered when
* done. Set up this new generation. There should be no pages
* allocated to it yet. */
if (!raise) {
- gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
+ gc_assert(generations[SCRATCH_GENERATION].bytes_allocated == 0);
}
/* Set the global src and dest. generations */
if (raise)
new_space = generation+1;
else
- new_space = NUM_GENERATIONS;
+ new_space = SCRATCH_GENERATION;
/* Change to a new space for allocation, resetting the alloc_start_page */
gc_alloc_generation = new_space;
/* we assume that none of the preceding applies to the thread that
* initiates GC. If you ever call GC from inside an altstack
* handler, you will lose. */
- for_each_thread(th) {
- void **ptr;
- void **esp=(void **)-1;
+
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ /* And if we're saving a core, there's no point in being conservative. */
+ if (conservative_stack) {
+ for_each_thread(th) {
+ void **ptr;
+ void **esp=(void **)-1;
#ifdef LISP_FEATURE_SB_THREAD
- long i,free;
- if(th==arch_os_get_current_thread()) {
- /* Somebody is going to burn in hell for this, but casting
- * it in two steps shuts gcc up about strict aliasing. */
- esp = (void **)((void *)&raise);
- } else {
- void **esp1;
- free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
- for(i=free-1;i>=0;i--) {
- os_context_t *c=th->interrupt_contexts[i];
- esp1 = (void **) *os_context_register_addr(c,reg_SP);
- if (esp1>=(void **)th->control_stack_start &&
- esp1<(void **)th->control_stack_end) {
- if(esp1<esp) esp=esp1;
- for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
- preserve_pointer(*ptr);
+ long i,free;
+ if(th==arch_os_get_current_thread()) {
+ /* Somebody is going to burn in hell for this, but casting
+ * it in two steps shuts gcc up about strict aliasing. */
+ esp = (void **)((void *)&raise);
+ } else {
+ void **esp1;
+ free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+ for(i=free-1;i>=0;i--) {
+ os_context_t *c=th->interrupt_contexts[i];
+ esp1 = (void **) *os_context_register_addr(c,reg_SP);
+ if (esp1>=(void **)th->control_stack_start &&
+ esp1<(void **)th->control_stack_end) {
+ if(esp1<esp) esp=esp1;
+ preserve_context_registers(c);
}
}
}
- }
#else
- esp = (void **)((void *)&raise);
+ esp = (void **)((void *)&raise);
#endif
- for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
- preserve_pointer(*ptr);
+ for (ptr = ((void **)th->control_stack_end)-1; ptr >= esp; ptr--) {
+ preserve_pointer(*ptr);
+ }
}
}
+#endif
-#ifdef QSHOW
+#if QSHOW
if (gencgc_verbose > 1) {
long num_dont_move_pages = count_dont_move_pages();
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- num_dont_move_pages * PAGE_BYTES);
+ npage_bytes(num_dont_move_pages));
}
#endif
/* Scavenge all the rest of the roots. */
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ /*
+ * If not x86, we need to scavenge the interrupt context(s) and the
+ * control stack.
+ */
+ scavenge_interrupt_contexts();
+ scavenge_control_stack();
+#endif
+
/* Scavenge the Lisp functions of the interrupt handlers, taking
* care to avoid SIG_DFL and SIG_IGN. */
for (i = 0; i < NSIG; i++) {
{
struct thread *th;
for_each_thread(th) {
- long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ long len= (lispobj *)get_binding_stack_pointer(th) -
th->binding_stack_start;
scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
/* All generations but the generation being GCed need to be
* scavenged. The new_space generation needs special handling as
* objects may be moved in - it is handled separately below. */
- for (i = 0; i < NUM_GENERATIONS; i++) {
- if ((i != generation) && (i != new_space)) {
- scavenge_generation(i);
- }
- }
+ scavenge_generations(generation+1, PSEUDO_STATIC_GENERATION);
/* Finally scavenge the new_space generation. Keep going until no
* more objects are moved into the new generation */
bytes_allocated = bytes_allocated - old_bytes_allocated;
if (bytes_allocated != 0) {
- lose("Rescan of new_space allocated %d more bytes.",
+ lose("Rescan of new_space allocated %d more bytes.\n",
bytes_allocated);
}
}
#endif
+ scan_weak_hash_tables();
scan_weak_pointers();
/* Flush the current regions, updating the tables. */
if (!raise) {
for (i = 0; i < last_free_page; i++)
if ((page_table[i].bytes_used != 0)
- && (page_table[i].gen == NUM_GENERATIONS))
+ && (page_table[i].gen == SCRATCH_GENERATION))
page_table[i].gen = generation;
gc_assert(generations[generation].bytes_allocated == 0);
generations[generation].bytes_allocated =
- generations[NUM_GENERATIONS].bytes_allocated;
- generations[NUM_GENERATIONS].bytes_allocated = 0;
+ generations[SCRATCH_GENERATION].bytes_allocated;
+ generations[SCRATCH_GENERATION].bytes_allocated = 0;
}
/* Reset the alloc_start_page for generation. */
generations[generation].alloc_large_unboxed_start_page = 0;
if (generation >= verify_gens) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
SHOW("verifying");
+ }
verify_gc();
verify_dynamic_space();
}
generations[generation].num_gc = 0;
else
++generations[generation].num_gc;
+
+#ifdef LUTEX_WIDETAG
+ reap_lutexes(generation);
+ if (raise)
+ move_lutexes(generation, generation+1);
+#endif
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
long
-update_x86_dynamic_space_free_pointer(void)
+update_dynamic_space_free_pointer(void)
{
- long last_page = -1;
- long i;
+ page_index_t last_page = -1, i;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0))
+ if (page_allocated_p(i) && (page_table[i].bytes_used != 0))
last_page = i;
last_free_page = last_page+1;
- SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
+ set_alloc_pointer((lispobj)(page_address(last_free_page)));
return 0; /* dummy value: return something ... */
}
+static void
+remap_free_pages (page_index_t from, page_index_t to)
+{
+ page_index_t first_page, last_page;
+
+ for (first_page = from; first_page <= to; first_page++) {
+ if (page_allocated_p(first_page) ||
+ (page_table[first_page].need_to_zero == 0)) {
+ continue;
+ }
+
+ last_page = first_page + 1;
+ while (page_free_p(last_page) &&
+ (last_page < to) &&
+ (page_table[last_page].need_to_zero == 1)) {
+ last_page++;
+ }
+
+ /* There's a mysterious Solaris/x86 problem with using mmap
+ * tricks for memory zeroing. See sbcl-devel thread
+ * "Re: patch: standalone executable redux".
+ */
+#if defined(LISP_FEATURE_SUNOS)
+ zero_pages(first_page, last_page-1);
+#else
+ zero_pages_with_mmap(first_page, last_page-1);
+#endif
+
+ first_page = last_page;
+ }
+}
+
+generation_index_t small_generation_limit = 1;
+
/* GC all generations newer than last_gen, raising the objects in each
* to the next older generation - we finish when all generations below
* last_gen are empty. Then if last_gen is due for a GC, or if
*
* We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
* last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
-
void
-collect_garbage(unsigned last_gen)
+collect_garbage(generation_index_t last_gen)
{
- int gen = 0;
+ generation_index_t gen = 0, i;
int raise;
int gen_to_wp;
- long i;
+ /* The largest value of last_free_page seen since the time
+ * remap_free_pages was called. */
+ static page_index_t high_water_mark = 0;
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
- if (last_gen > NUM_GENERATIONS) {
+ gc_active_p = 1;
+
+ if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
FSHOW((stderr,
"/collect_garbage: last_gen = %d, doing a level 0 GC\n",
last_gen));
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats();
do {
/* Collect the generation. */
} else {
raise =
(gen < last_gen)
- || (generations[gen].num_gc >= generations[gen].trigger_age);
+ || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
}
if (gencgc_verbose > 1) {
if (gencgc_verbose > 1) {
FSHOW((stderr, "GC of generation %d finished:\n", gen));
- print_generation_stats(0);
+ print_generation_stats();
}
gen++;
&& raise
&& (generations[gen].bytes_allocated
> generations[gen].gc_trigger)
- && (gen_av_mem_age(gen)
- > generations[gen].min_av_mem_age))));
+ && (generation_average_age(gen)
+ > generations[gen].minimum_age_before_gc))));
/* Now if gen-1 was raised all generations before gen are empty.
* If it wasn't raised then all generations before gen-1 are empty.
/* Check that they are all empty. */
for (i = 0; i < gen_to_wp; i++) {
if (generations[i].bytes_allocated)
- lose("trying to write-protect gen. %d when gen. %d nonempty",
+ lose("trying to write-protect gen. %d when gen. %d nonempty\n",
gen_to_wp, i);
}
write_protect_generation_pages(gen_to_wp);
gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
gc_alloc_generation = 0;
- update_x86_dynamic_space_free_pointer();
+ /* Save the high-water mark before updating last_free_page */
+ if (last_free_page > high_water_mark)
+ high_water_mark = last_free_page;
+
+ update_dynamic_space_free_pointer();
+
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
if(gencgc_verbose)
fprintf(stderr,"Next gc when %ld bytes have been consed\n",
auto_gc_trigger);
+
+ /* If we did a big GC (arbitrarily defined as gen > 1), release memory
+ * back to the OS.
+ */
+ if (gen > small_generation_limit) {
+ if (last_free_page > high_water_mark)
+ high_water_mark = last_free_page;
+ remap_free_pages(0, high_water_mark);
+ high_water_mark = 0;
+ }
+
+ gc_active_p = 0;
+
SHOW("returning from collect_garbage");
}
void
gc_free_heap(void)
{
- long page;
+ page_index_t page;
- if (gencgc_verbose > 1)
+ if (gencgc_verbose > 1) {
SHOW("entering gc_free_heap");
+ }
- for (page = 0; page < NUM_PAGES; page++) {
+ for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ if (page_allocated_p(page)) {
void *page_start, *addr;
/* Mark the page free. The other slots are assumed invalid
page_table[page].allocated = FREE_PAGE_FLAG;
page_table[page].bytes_used = 0;
+#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
+ * about this change. */
/* Zero the page. */
page_start = (void *)page_address(page);
os_invalidate(page_start,PAGE_BYTES);
addr = os_validate(page_start,PAGE_BYTES);
if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
+ lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x\n",
page_start,
addr);
}
+#else
+ page_table[page].write_protected = 0;
+#endif
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
- long *page_start, i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ long *page_start;
+ page_index_t i;
+ gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
page_start = (long *)page_address(page);
for (i=0; i<1024; i++) {
if (page_start[i] != 0) {
- lose("free region not zero at %x", page_start + i);
+ lose("free region not zero at %x\n", page_start + i);
}
}
}
generations[page].gc_trigger = 2000000;
generations[page].num_gc = 0;
generations[page].cum_sum_bytes_allocated = 0;
+ generations[page].lutexes = NULL;
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats();
/* Initialize gc_alloc(). */
gc_alloc_generation = 0;
gc_set_region_empty(&unboxed_region);
last_free_page = 0;
- SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0);
+ set_alloc_pointer((lispobj)((char *)heap_base));
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
- if (gencgc_verbose)
- SHOW("checking after free_heap\n");
+ FSHOW((stderr, "checking after free_heap\n"));
verify_gc();
}
}
void
gc_init(void)
{
- long i;
+ page_index_t i;
+
+ /* Compute the number of pages needed for the dynamic space.
+ * Dynamic space size should be aligned on page size. */
+ page_table_pages = dynamic_space_size/PAGE_BYTES;
+ gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
+
+ page_table = calloc(page_table_pages, sizeof(struct page));
+ gc_assert(page_table);
gc_init_tables();
- scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector;
scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
+#ifdef LUTEX_WIDETAG
+ scavtab[LUTEX_WIDETAG] = scav_lutex;
+ transother[LUTEX_WIDETAG] = trans_lutex;
+ sizetab[LUTEX_WIDETAG] = size_lutex;
+#endif
+
heap_base = (void*)DYNAMIC_SPACE_START;
/* Initialize each page structure. */
- for (i = 0; i < NUM_PAGES; i++) {
+ for (i = 0; i < page_table_pages; i++) {
/* Initialize all pages as free. */
page_table[i].allocated = FREE_PAGE_FLAG;
page_table[i].bytes_used = 0;
generations[i].cum_sum_bytes_allocated = 0;
/* the tune-able parameters */
generations[i].bytes_consed_between_gc = 2000000;
- generations[i].trigger_age = 1;
- generations[i].min_av_mem_age = 0.75;
+ generations[i].number_of_gcs_before_promotion = 1;
+ generations[i].minimum_age_before_gc = 0.75;
+ generations[i].lutexes = NULL;
}
/* Initialize gc_alloc. */
gc_set_region_empty(&unboxed_region);
last_free_page = 0;
-
}
/* Pick up the dynamic space from after a core load.
static void
gencgc_pickup_dynamic(void)
{
- long page = 0;
- long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
+ page_index_t page = 0;
+ void *alloc_ptr = (void *)get_alloc_pointer();
lispobj *prev=(lispobj *)page_address(page);
-
+ generation_index_t gen = PSEUDO_STATIC_GENERATION;
do {
lispobj *first,*ptr= (lispobj *)page_address(page);
- page_table[page].allocated = BOXED_PAGE_FLAG;
- page_table[page].gen = 0;
+ page_table[page].gen = gen;
page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
-
- first=gc_search_space(prev,(ptr+2)-prev,ptr);
- if(ptr == first) prev=ptr;
- page_table[page].first_object_offset =
- (void *)prev - page_address(page);
+ page_table[page].write_protected = 0;
+ page_table[page].write_protected_cleared = 0;
+ page_table[page].dont_move = 0;
+ page_table[page].need_to_zero = 1;
+
+ if (!gencgc_partial_pickup) {
+ page_table[page].allocated = BOXED_PAGE_FLAG;
+ first=gc_search_space(prev,(ptr+2)-prev,ptr);
+ if(ptr == first)
+ prev=ptr;
+ page_table[page].region_start_offset =
+ page_address(page) - (void *)prev;
+ }
page++;
- } while ((long)page_address(page) < alloc_ptr);
+ } while (page_address(page) < alloc_ptr);
- generations[0].bytes_allocated = PAGE_BYTES*page;
- bytes_allocated = PAGE_BYTES*page;
+#ifdef LUTEX_WIDETAG
+ /* Lutexes have been registered in generation 0 by coreparse, and
+ * need to be moved to the right one manually.
+ */
+ move_lutexes(0, PSEUDO_STATIC_GENERATION);
+#endif
-}
+ last_free_page = page;
+ generations[gen].bytes_allocated = npage_bytes(page);
+ bytes_allocated = npage_bytes(page);
+
+ gc_alloc_update_all_page_tables();
+ write_protect_generation_pages(gen);
+}
void
gc_initialize_pointers(void)
{
gencgc_pickup_dynamic();
}
-
-
\f
/* alloc(..) is the external interface for memory allocation. It
* The check for a GC trigger is only performed when the current
* region is full, so in most cases it's not needed. */
-char *
-alloc(long nbytes)
+static inline lispobj *
+general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region,
+ struct thread *thread)
{
- struct thread *thread=arch_os_get_current_thread();
- struct alloc_region *region=
-#ifdef LISP_FEATURE_SB_THREAD
- thread ? &(thread->alloc_region) : &boxed_region;
-#else
- &boxed_region;
+#ifndef LISP_FEATURE_WIN32
+ lispobj alloc_signal;
#endif
void *new_obj;
void *new_free_pointer;
+
gc_assert(nbytes>0);
+
/* Check for alignment allocation problems. */
- gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0)
+ gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0)
&& ((nbytes & LOWTAG_MASK) == 0));
-#if 0
- if(all_threads)
- /* there are a few places in the C code that allocate data in the
- * heap before Lisp starts. This is before interrupts are enabled,
- * so we don't need to check for pseudo-atomic */
-#ifdef LISP_FEATURE_SB_THREAD
- if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
- register u32 fs;
- fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
- th,th->os_thread);
- __asm__("movl %fs,%0" : "=r" (fs) : );
- fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
- debug_get_fs(),th->tls_cookie);
- lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
- }
-#else
- gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
-#endif
-#endif
+
+ /* Must be inside a PA section. */
+ gc_assert(get_pseudo_atomic_atomic(thread));
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
return(new_obj); /* yup */
}
- /* we have to go the long way around, it seems. Check whether
- * we should GC in the near future
+ /* we have to go the long way around, it seems. Check whether we
+ * should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)));
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
/* set things up so that GC happens when we finish the PA
* section */
SetSymbolValue(GC_PENDING,T,thread);
- if (SymbolValue(GC_INHIBIT,thread) == NIL)
- arch_set_pseudo_atomic_interrupted(0);
+ if (SymbolValue(GC_INHIBIT,thread) == NIL) {
+ set_pseudo_atomic_interrupted(thread);
+#ifdef LISP_FEATURE_PPC
+ /* PPC calls alloc() from a trap or from pa_alloc(),
+ * look up the most context if it's from a trap. */
+ {
+ os_context_t *context =
+ thread->interrupt_data->allocation_trap_context;
+ maybe_save_gc_mask_and_block_deferrables
+ (context ? os_context_sigmask_addr(context) : NULL);
+ }
+#else
+ maybe_save_gc_mask_and_block_deferrables(NULL);
+#endif
+ }
}
}
- new_obj = gc_alloc_with_region(nbytes,0,region,0);
+ new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
+
+#ifndef LISP_FEATURE_WIN32
+ alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
+ if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
+ if ((signed long) alloc_signal <= 0) {
+ SetSymbolValue(ALLOC_SIGNAL, T, thread);
+ raise(SIGPROF);
+ } else {
+ SetSymbolValue(ALLOC_SIGNAL,
+ alloc_signal - (1 << N_FIXNUM_TAG_BITS),
+ thread);
+ }
+ }
+#endif
+
return (new_obj);
}
+
+lispobj *
+general_alloc(long nbytes, int page_type_flag)
+{
+ struct thread *thread = arch_os_get_current_thread();
+ /* Select correct region, and call general_alloc_internal with it.
+ * For other then boxed allocation we must lock first, since the
+ * region is shared. */
+ if (BOXED_PAGE_FLAG & page_type_flag) {
+#ifdef LISP_FEATURE_SB_THREAD
+ struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
+#else
+ struct alloc_region *region = &boxed_region;
+#endif
+ return general_alloc_internal(nbytes, page_type_flag, region, thread);
+ } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ lispobj * obj;
+ gc_assert(0 == thread_mutex_lock(&allocation_lock));
+ obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
+ gc_assert(0 == thread_mutex_unlock(&allocation_lock));
+ return obj;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+}
+
+lispobj *
+alloc(long nbytes)
+{
+ gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
+ return general_alloc(nbytes, BOXED_PAGE_FLAG);
+}
\f
/*
* shared support for the OS-dependent signal handlers which
* catch GENCGC-related write-protect violations
*/
-
-void unhandled_sigmemoryfault(void);
+void unhandled_sigmemoryfault(void* addr);
/* Depending on which OS we're running under, different signals might
* be raised for a violation of write protection in the heap. This
int
gencgc_handle_wp_violation(void* fault_addr)
{
- long page_index = find_page_index(fault_addr);
+ page_index_t page_index = find_page_index(fault_addr);
-#ifdef QSHOW_SIGNALS
+#if QSHOW_SIGNALS
FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
fault_addr, page_index));
#endif
/* It can be helpful to be able to put a breakpoint on this
* case to help diagnose low-level problems. */
- unhandled_sigmemoryfault();
+ unhandled_sigmemoryfault(fault_addr);
/* not within the dynamic space -- not our responsibility */
return 0;
} else {
+ int ret;
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (page_table[page_index].write_protected) {
/* Unprotect the page. */
os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
* does this test after the first one has already set wp=0
*/
if(page_table[page_index].write_protected_cleared != 1)
- lose("fault in heap page not marked as write-protected");
+ lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
+ page_index, boxed_region.first_page,
+ boxed_region.last_page);
}
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
/* Don't worry, we can handle it. */
return 1;
}
* are about to let Lisp deal with it. It's basically just a
* convenient place to set a gdb breakpoint. */
void
-unhandled_sigmemoryfault()
+unhandled_sigmemoryfault(void *addr)
{}
void gc_alloc_update_all_page_tables(void)
/* Flush the alloc regions updating the tables. */
struct thread *th;
for_each_thread(th)
- gc_alloc_update_page_tables(0, &th->alloc_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
- gc_alloc_update_page_tables(0, &boxed_region);
+ gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
+ gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
+ gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
}
+
void
gc_set_region_empty(struct alloc_region *region)
{
region->free_pointer = page_address(0);
region->end_addr = page_address(0);
}
+
+static void
+zero_all_free_pages()
+{
+ page_index_t i;
+
+ for (i = 0; i < last_free_page; i++) {
+ if (page_free_p(i)) {
+#ifdef READ_PROTECT_FREE_PAGES
+ os_protect(page_address(i),
+ PAGE_BYTES,
+ OS_VM_PROT_ALL);
+#endif
+ zero_pages(i, i);
+ }
+ }
+}
+
+/* Things to do before doing a final GC before saving a core (without
+ * purify).
+ *
+ * + Pages in large_object pages aren't moved by the GC, so we need to
+ * unset that flag from all pages.
+ * + The pseudo-static generation isn't normally collected, but it seems
+ * reasonable to collect it at least when saving a core. So move the
+ * pages to a normal generation.
+ */
+static void
+prepare_for_final_gc ()
+{
+ page_index_t i;
+ for (i = 0; i < last_free_page; i++) {
+ page_table[i].large_object = 0;
+ if (page_table[i].gen == PSEUDO_STATIC_GENERATION) {
+ int used = page_table[i].bytes_used;
+ page_table[i].gen = HIGHEST_NORMAL_GENERATION;
+ generations[PSEUDO_STATIC_GENERATION].bytes_allocated -= used;
+ generations[HIGHEST_NORMAL_GENERATION].bytes_allocated += used;
+ }
+ }
+}
+
+
+/* Do a non-conservative GC, and then save a core with the initial
+ * function being set to the value of the static symbol
+ * SB!VM:RESTART-LISP-FUNCTION */
+void
+gc_and_save(char *filename, boolean prepend_runtime,
+ boolean save_runtime_options)
+{
+ FILE *file;
+ void *runtime_bytes = NULL;
+ size_t runtime_size;
+
+ file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
+ &runtime_size);
+ if (file == NULL)
+ return;
+
+ conservative_stack = 0;
+
+ /* The filename might come from Lisp, and be moved by the now
+ * non-conservative GC. */
+ filename = strdup(filename);
+
+ /* Collect twice: once into relatively high memory, and then back
+ * into low memory. This compacts the retained data into the lower
+ * pages, minimizing the size of the core file.
+ */
+ prepare_for_final_gc();
+ gencgc_alloc_start_page = last_free_page;
+ collect_garbage(HIGHEST_NORMAL_GENERATION+1);
+
+ prepare_for_final_gc();
+ gencgc_alloc_start_page = -1;
+ collect_garbage(HIGHEST_NORMAL_GENERATION+1);
+
+ if (prepend_runtime)
+ save_runtime_to_filehandle(file, runtime_bytes, runtime_size);
+
+ /* The dumper doesn't know that pages need to be zeroed before use. */
+ zero_all_free_pages();
+ save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
+ prepend_runtime, save_runtime_options);
+ /* Oops. Save still managed to fail. Since we've mangled the stack
+ * beyond hope, there's not much we can do.
+ * (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's
+ * going to be rather unsatisfactory too... */
+ lose("Attempt to save core after non-conservative GC failed.\n");
+}