#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
+#include "pseudo-atomic.h"
#include "alloc.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#if defined(LUTEX_WIDETAG)
#include "pthread-lutex.h"
#endif
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+#include "genesis/cons.h"
+#endif
/* forward declarations */
page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
* scratch space by the collector, and should never get collected.
*/
enum {
- HIGHEST_NORMAL_GENERATION = 5,
- PSEUDO_STATIC_GENERATION,
- SCRATCH_GENERATION,
+ SCRATCH_GENERATION = PSEUDO_STATIC_GENERATION+1,
NUM_GENERATIONS
};
/* the verbosity level. All non-error messages are disabled at level 0;
* and only a few rare messages are printed at level 1. */
-#ifdef QSHOW
+#if QSHOW
boolean gencgc_verbose = 1;
#else
boolean gencgc_verbose = 0;
page_index_t page_table_pages;
struct page *page_table;
+static inline boolean page_allocated_p(page_index_t page) {
+ return (page_table[page].allocated != FREE_PAGE_FLAG);
+}
+
+static inline boolean page_no_region_p(page_index_t page) {
+ return !(page_table[page].allocated & OPEN_REGION_PAGE_FLAG);
+}
+
+static inline boolean page_allocated_no_region_p(page_index_t page) {
+ return ((page_table[page].allocated & (UNBOXED_PAGE_FLAG | BOXED_PAGE_FLAG))
+ && page_no_region_p(page));
+}
+
+static inline boolean page_free_p(page_index_t page) {
+ return (page_table[page].allocated == FREE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_p(page_index_t page) {
+ return (page_table[page].allocated & BOXED_PAGE_FLAG);
+}
+
+static inline boolean code_page_p(page_index_t page) {
+ return (page_table[page].allocated & CODE_PAGE_FLAG);
+}
+
+static inline boolean page_boxed_no_region_p(page_index_t page) {
+ return page_boxed_p(page) && page_no_region_p(page);
+}
+
+static inline boolean page_unboxed_p(page_index_t page) {
+ /* Both flags set == boxed code page */
+ return ((page_table[page].allocated & UNBOXED_PAGE_FLAG)
+ && !page_boxed_p(page));
+}
+
+static inline boolean protect_page_p(page_index_t page, generation_index_t generation) {
+ return (page_boxed_no_region_p(page)
+ && (page_table[page].bytes_used != 0)
+ && !page_table[page].dont_move
+ && (page_table[page].gen == generation));
+}
+
/* To map addresses to page structures the address of the first page
* is needed. */
static void *heap_base = NULL;
return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
}
-/* a structure to hold the state of a generation */
+/* a structure to hold the state of a generation
+ *
+ * CAUTION: If you modify this, make sure to touch up the alien
+ * definition in src/code/gc.lisp accordingly. ...or better yes,
+ * deal with the FIXME there...
+ */
struct generation {
/* the first page that gc_alloc() checks on its next call */
/* the number of GCs since the last raise */
int num_gc;
- /* the average age after which a GC will raise objects to the
+ /* the number of GCs to run on the generations before raising objects to the
* next generation */
- int trigger_age;
+ int number_of_gcs_before_promotion;
/* the cumulative sum of the bytes allocated to this generation. It is
* cleared after a GC on this generations, and update before new
/* a minimum average memory age before a GC will occur helps
* prevent a GC when a large number of new live objects have been
* added, in which case a GC could be a waste of time */
- double min_av_mem_age;
+ double minimum_age_before_gc;
/* A linked list of lutex structures in this generation, used for
* implementing lutex finalization. */
* integrated with the Lisp code. */
page_index_t last_free_page;
\f
+#ifdef LISP_FEATURE_SB_THREAD
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
* majority of GC is single-threaded, but alloc() may be called from
* >1 thread at a time and must be thread-safe. This lock must be
* seized before all accesses to generations[] or to parts of
* page_table[] that other threads may want to see */
-
-#ifdef LISP_FEATURE_SB_THREAD
static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
+/* This lock is used to protect non-thread-local allocation. */
+static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
\f
unsigned long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected == 1))
count++;
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == generation))
count++;
return count;
}
-#ifdef QSHOW
+#if QSHOW
static long
count_dont_move_pages(void)
{
page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].dont_move != 0)) {
++count;
}
page_index_t i;
unsigned long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
}
/* Return the average age of the memory in a generation. */
-static double
-gen_av_mem_age(generation_index_t gen)
+extern double
+generation_average_age(generation_index_t gen)
{
if (generations[gen].bytes_allocated == 0)
return 0.0;
/ ((double)generations[gen].bytes_allocated);
}
-/* The verbose argument controls how much to print: 0 for normal
- * level of detail; 1 for debugging. */
-static void
-print_generation_stats(int verbose) /* FIXME: should take FILE argument */
+extern void
+write_generation_stats(FILE *file)
{
- generation_index_t i, gens;
+ generation_index_t i;
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
#define FPU_STATE_SIZE 27
* so they need to be saved and reset for C. */
fpu_save(fpu_state);
- /* highest generation to print */
- if (verbose)
- gens = SCRATCH_GENERATION;
- else
- gens = PSEUDO_STATIC_GENERATION;
-
/* Print the heap stats. */
- fprintf(stderr,
+ fprintf(file,
" Gen StaPg UbSta LaSta LUbSt Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
- for (i = 0; i < gens; i++) {
+ for (i = 0; i < SCRATCH_GENERATION; i++) {
page_index_t j;
long boxed_cnt = 0;
long unboxed_cnt = 0;
/* Count the number of boxed pages within the given
* generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_boxed_p(j)) {
if (page_table[j].large_object)
large_boxed_cnt++;
else
if(page_table[j].dont_move) pinned_cnt++;
/* Count the number of unboxed pages within the given
* generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_unboxed_p(j)) {
if (page_table[j].large_object)
large_unboxed_cnt++;
else
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
- fprintf(stderr,
+ fprintf(file,
" %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
generations[i].alloc_start_page,
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
- gen_av_mem_age(i));
+ generation_average_age(i));
}
- fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated);
+ fprintf(file," Total bytes allocated = %lu\n", bytes_allocated);
+ fprintf(file," Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
fpu_restore(fpu_state);
}
+
+extern void
+write_heap_exhaustion_report(FILE *file, long available, long requested,
+ struct thread *thread)
+{
+ fprintf(file,
+ "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
+ gc_active_p ? "garbage collection" : "allocation",
+ available,
+ requested);
+ write_generation_stats(file);
+ fprintf(file, "GC control variables:\n");
+ fprintf(file, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
+ SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
+ (SymbolValue(GC_PENDING, thread) == T) ?
+ "true" : ((SymbolValue(GC_PENDING, thread) == NIL) ?
+ "false" : "in progress"));
+#ifdef LISP_FEATURE_SB_THREAD
+ fprintf(file, " *STOP-FOR-GC-PENDING* = %s\n",
+ SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
+#endif
+}
+
+extern void
+print_generation_stats(void)
+{
+ write_generation_stats(stderr);
+}
+
+extern char* gc_logfile;
+char * gc_logfile = NULL;
+
+extern void
+log_generation_stats(char *logfile, char *header)
+{
+ if (logfile) {
+ FILE * log = fopen(logfile, "a");
+ if (log) {
+ fprintf(log, "%s\n", header);
+ write_generation_stats(log);
+ fclose(log);
+ } else {
+ fprintf(stderr, "Could not open gc logfile: %s\n", logfile);
+ fflush(stderr);
+ }
+ }
+}
+
+extern void
+report_heap_exhaustion(long available, long requested, struct thread *th)
+{
+ if (gc_logfile) {
+ FILE * log = fopen(gc_logfile, "a");
+ if (log) {
+ write_heap_exhaustion_report(log, available, requested, th);
+ fclose(log);
+ } else {
+ fprintf(stderr, "Could not open gc logfile: %s\n", gc_logfile);
+ fflush(stderr);
+ }
+ }
+ /* Always to stderr as well. */
+ write_heap_exhaustion_report(stderr, available, requested, th);
+}
\f
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
*/
static void
zero_dirty_pages(page_index_t start, page_index_t end) {
- page_index_t i;
+ page_index_t i, j;
for (i = start; i <= end; i++) {
- if (page_table[i].need_to_zero == 1) {
- zero_pages(start, end);
- break;
- }
+ if (!page_table[i].need_to_zero) continue;
+ for (j = i+1; (j <= end) && (page_table[j].need_to_zero); j++);
+ zero_pages(i, j-1);
+ i = j;
}
for (i = start; i <= end; i++) {
if (large) {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
return generations[generation].alloc_large_unboxed_start_page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
return generations[generation].alloc_large_start_page;
} else {
lose("bad page type flag: %d", page_type_flag);
} else {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
return generations[generation].alloc_unboxed_start_page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
return generations[generation].alloc_start_page;
} else {
lose("bad page_type_flag: %d", page_type_flag);
if (large) {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
generations[generation].alloc_large_unboxed_start_page = page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
generations[generation].alloc_large_start_page = page;
} else {
lose("bad page type flag: %d", page_type_flag);
} else {
if (UNBOXED_PAGE_FLAG == page_type_flag) {
generations[generation].alloc_unboxed_start_page = page;
- } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ } else if (BOXED_PAGE_FLAG & page_type_flag) {
+ /* Both code and data. */
generations[generation].alloc_start_page = page;
} else {
lose("bad page type flag: %d", page_type_flag);
gc_assert(page_table[first_page].region_start_offset == 0);
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- gc_assert(page_table[first_page].allocated == page_type_flag);
+ gc_assert(page_table[first_page].allocated & page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
* region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- gc_assert(page_table[next_page].allocated==page_type_flag);
+ gc_assert(page_table[next_page].allocated & page_type_flag);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
/* Add the region to the new_areas if requested. */
- if (BOXED_PAGE_FLAG == page_type_flag)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used, region_size);
/*
int orig_first_page_bytes_used;
long byte_cnt;
int more;
- long bytes_used;
+ unsigned long bytes_used;
page_index_t next_page;
int ret;
* region_start_offset pointer to the start of the region, and set
* the bytes_used. */
while (more) {
- gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_free_p(next_page));
gc_assert(page_table[next_page].bytes_used == 0);
page_table[next_page].allocated = page_type_flag;
page_table[next_page].gen = gc_alloc_generation;
generations[gc_alloc_generation].bytes_allocated += nbytes;
/* Add the region to the new_areas if requested. */
- if (BOXED_PAGE_FLAG == page_type_flag)
+ if (BOXED_PAGE_FLAG & page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used,nbytes);
/* Bump up last_free_page */
void
gc_heap_exhausted_error_or_lose (long available, long requested)
{
+ struct thread *thread = arch_os_get_current_thread();
/* Write basic information before doing anything else: if we don't
* call to lisp this is a must, and even if we do there is always
* the danger that we bounce back here before the error has been
* handled, or indeed even printed.
*/
- fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
- gc_active_p ? "garbage collection" : "allocation",
- available, requested);
+ report_heap_exhaustion(available, requested, thread);
if (gc_active_p || (available == 0)) {
/* If we are in GC, or totally out of memory there is no way
* to sanely transfer control to the lisp-side of things.
*/
- struct thread *thread = arch_os_get_current_thread();
- print_generation_stats(1);
- fprintf(stderr, "GC control variables:\n");
- fprintf(stderr, " *GC-INHIBIT* = %s\n *GC-PENDING* = %s\n",
- SymbolValue(GC_INHIBIT,thread)==NIL ? "false" : "true",
- SymbolValue(GC_PENDING,thread)==NIL ? "false" : "true");
-#ifdef LISP_FEATURE_SB_THREAD
- fprintf(stderr, " *STOP-FOR-GC-PENDING* = %s\n",
- SymbolValue(STOP_FOR_GC_PENDING,thread)==NIL ? "false" : "true");
-#endif
lose("Heap exhausted, game over.");
}
else {
/* FIXME: assert free_pages_lock held */
(void)thread_mutex_unlock(&free_pages_lock);
+ gc_assert(get_pseudo_atomic_atomic(thread));
+ clear_pseudo_atomic_atomic(thread);
+ if (get_pseudo_atomic_interrupted(thread))
+ do_pending_interrupt();
+ /* Another issue is that signalling HEAP-EXHAUSTED error leads
+ * to running user code at arbitrary places, even in a
+ * WITHOUT-INTERRUPTS which may lead to a deadlock without
+ * running out of the heap. So at this point all bets are
+ * off. */
+ if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL)
+ corruption_warning_and_maybe_lose
+ ("Signalling HEAP-EXHAUSTED in a WITHOUT-INTERRUPTS.");
funcall2(StaticSymbolFunction(HEAP_EXHAUSTED_ERROR),
alloc_number(available), alloc_number(requested));
lose("HEAP-EXHAUSTED-ERROR fell through");
}
page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag)
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
+ int page_type_flag)
{
page_index_t first_page, last_page;
page_index_t restart_page = *restart_page_ptr;
restart_page = gencgc_alloc_start_page;
}
- if (nbytes>=PAGE_BYTES) {
+ gc_assert(nbytes>=0);
+ if (((unsigned long)nbytes)>=PAGE_BYTES) {
/* Search for a contiguous free space of at least nbytes,
* aligned on a page boundary. The page-alignment is strictly
* speaking needed only for objects at least large_object_size
do {
first_page = restart_page;
while ((first_page < page_table_pages) &&
- (page_table[first_page].allocated != FREE_PAGE_FLAG))
+ page_allocated_p(first_page))
first_page++;
last_page = first_page;
bytes_found = PAGE_BYTES;
while ((bytes_found < nbytes) &&
(last_page < (page_table_pages-1)) &&
- (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ page_free_p(last_page+1)) {
last_page++;
bytes_found += PAGE_BYTES;
gc_assert(0 == page_table[last_page].bytes_used);
* pages: this helps avoid excessive conservativism. */
first_page = restart_page;
while (first_page < page_table_pages) {
- if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+ if (page_free_p(first_page))
{
gc_assert(0 == page_table[first_page].bytes_used);
bytes_found = PAGE_BYTES;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+ /* Should have been unprotected by unprotect_oldspace(). */
+ gc_assert(page_table[next_page].write_protected == 0);
page_table[next_page].gen = new_space;
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
remaining_bytes -= PAGE_BYTES;
next_page++;
}
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_boxed_p(next_page));
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_boxed_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
- if ((nwords > 1024*1024) && gencgc_verbose)
+ if ((nwords > 1024*1024) && gencgc_verbose) {
FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
nwords*N_WORD_BYTES));
+ }
/* Check whether it's a large object. */
first_page = find_page_index((void *)object);
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
next_page++;
}
- if ((bytes_freed > 0) && gencgc_verbose)
+ if ((bytes_freed > 0) && gencgc_verbose) {
FSHOW((stderr,
"/copy_large_unboxed bytes_freed=%d\n",
bytes_freed));
+ }
generations[from_space].bytes_allocated -=
nwords*N_WORD_BYTES + bytes_freed;
unsigned d2 = *((unsigned char *)p - 2);
unsigned d3 = *((unsigned char *)p - 3);
unsigned d4 = *((unsigned char *)p - 4);
-#ifdef QSHOW
+#if QSHOW
unsigned d5 = *((unsigned char *)p - 5);
unsigned d6 = *((unsigned char *)p - 6);
#endif
lispobj *start;
/* The address may be invalid, so do some checks. */
- if ((page_index == -1) ||
- (page_table[page_index].allocated == FREE_PAGE_FLAG))
+ if ((page_index == -1) || page_free_p(page_index))
return NULL;
start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
(lispobj *)pointer));
}
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
/* Helper for valid_lisp_pointer_p and
* possibly_valid_dynamic_space_pointer.
*
static int
looks_like_valid_lisp_pointer_p(lispobj *pointer, lispobj *start_addr)
{
- /* We need to allow raw pointers into Code objects for return
- * addresses. This will also pick up pointers to functions in code
- * objects. */
- if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG)
- /* XXX could do some further checks here */
- return 1;
-
if (!is_lisp_pointer((lispobj)pointer)) {
return 0;
}
* header. */
switch (widetag_of(*start_addr)) {
case CODE_HEADER_WIDETAG:
- /* This case is probably caught above. */
- break;
+ /* Make sure we actually point to a function in the code object,
+ * as opposed to a random point there. */
+ if (SIMPLE_FUN_HEADER_WIDETAG==widetag_of(*(pointer-FUN_POINTER_LOWTAG)))
+ return 1;
+ else
+ return 0;
case CLOSURE_HEADER_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
if ((unsigned long)pointer !=
((unsigned long)start_addr+FUN_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wf2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wf3: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
case LIST_POINTER_LOWTAG:
if ((unsigned long)pointer !=
((unsigned long)start_addr+LIST_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wl1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
/* Is it plausible cons? */
is_lisp_immediate(start_addr[1])))
break;
else {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wl2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
case INSTANCE_POINTER_LOWTAG:
if ((unsigned long)pointer !=
((unsigned long)start_addr+INSTANCE_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wi1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wi2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
case OTHER_POINTER_LOWTAG:
+
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+ /* The all-architecture test below is good as far as it goes,
+ * but an LRA object is similar to a FUN-POINTER: It is
+ * embedded within a CODE-OBJECT pointed to by start_addr, and
+ * cannot be found by simply walking the heap, therefore we
+ * need to check for it. -- AB, 2010-Jun-04 */
+ if ((widetag_of(start_addr[0]) == CODE_HEADER_WIDETAG)) {
+ lispobj *potential_lra =
+ (lispobj *)(((unsigned long)pointer) - OTHER_POINTER_LOWTAG);
+ if ((widetag_of(potential_lra[0]) == RETURN_PC_HEADER_WIDETAG) &&
+ ((potential_lra - HeaderValue(potential_lra[0])) == start_addr)) {
+ return 1; /* It's as good as we can verify. */
+ }
+ }
+#endif
+
if ((unsigned long)pointer !=
((unsigned long)start_addr+OTHER_POINTER_LOWTAG)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo1: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
/* Is it plausible? Not a cons. XXX should check the headers. */
if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo2: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
switch (widetag_of(start_addr[0])) {
#if N_WORD_BITS == 64
case SINGLE_FLOAT_WIDETAG:
#endif
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo3: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
/* only pointed to by function pointers? */
case CLOSURE_HEADER_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo4: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
case INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*Wo5: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
/* the valid other immediate pointer objects */
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"/Wo6: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
break;
default:
- if (gencgc_verbose)
+ if (gencgc_verbose) {
FSHOW((stderr,
"*W?: %x %x %x\n",
pointer, start_addr, *start_addr));
+ }
return 0;
}
return 0;
}
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
/* Is there any possibility that pointer is a valid Lisp object
* reference, and/or something else (e.g. subroutine call return
* address) which should prevent us from moving the referred-to thing?
return looks_like_valid_lisp_pointer_p(pointer, start_addr);
}
+#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+
/* Adjust large bignum and vector objects. This will adjust the
* allocated region if the size has shrunk, and move unboxed objects
* into unboxed pages. The pages are not promoted here, and the
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
+ gc_assert(page_allocated_no_region_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
(page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_allocated_no_region_p(next_page) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
/* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
- || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ || page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
|| (page_table[addr_page_index].gen != from_space)
/* Skip if already marked dont_move. */
* address referring to something in a CodeObject). This is
* expensive but important, since it vastly reduces the
* probability that random garbage will be bogusly interpreted as
- * a pointer which prevents a page from moving. */
- if (!(possibly_valid_dynamic_space_pointer(addr)))
+ * a pointer which prevents a page from moving.
+ *
+ * This only needs to happen on x86oids, where this is used for
+ * conservative roots. Non-x86oid systems only ever call this
+ * function on known-valid lisp objects. */
+#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+ if (!(code_page_p(addr_page_index)
+ || (is_lisp_pointer((lispobj)addr) &&
+ possibly_valid_dynamic_space_pointer(addr))))
return;
+#endif
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
* free area in which case it's ignored here. Note it gets
* through the valid pointer test above because the tail looks
* like conses. */
- if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
|| (((unsigned long)addr & (PAGE_BYTES - 1))
/* Check whether this is the last page in this contiguous block.. */
if ((page_table[i].bytes_used < PAGE_BYTES)
/* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || page_free_p(i+1)
|| (page_table[i+1].bytes_used == 0) /* next page free */
|| (page_table[i+1].gen != from_space) /* diff. gen */
|| (page_table[i+1].region_start_offset == 0))
/* Check that the page is now static. */
gc_assert(page_table[addr_page_index].dont_move != 0);
}
-
-#endif // defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
\f
/* If the given page is not write-protected, then scan it for pointers
* to younger generations or the top temp. generation, if no
long num_words = page_table[page].bytes_used / N_WORD_BYTES;
/* Shouldn't be a free page. */
- gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
+ gc_assert(page_allocated_p(page));
gc_assert(page_table[page].bytes_used != 0);
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
/* FIXME: What's the reason for not write-protecting pinned pages? */
|| page_table[page].dont_move
- || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
+ || page_unboxed_p(page))
return (0);
/* Scan the page for pointers to younger generations or the
/* Check that it's in the dynamic space */
if (index != -1)
if (/* Does it point to a younger or the temp. generation? */
- ((page_table[index].allocated != FREE_PAGE_FLAG)
+ (page_allocated_p(index)
&& (page_table[index].bytes_used != 0)
&& ((page_table[index].gen < gen)
|| (page_table[index].gen == SCRATCH_GENERATION)))
for (i = 0; i < last_free_page; i++) {
generation_index_t generation = page_table[i].gen;
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
&& (generation != new_space)
&& (generation >= from)
write_protected && page_table[last_page].write_protected;
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].region_start_offset == 0))
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < page_table_pages; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)) {
generation));
for (i = 0; i < last_free_page; i++) {
/* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ if (page_boxed_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& ((page_table[i].write_protected == 0)
* contiguous block */
if ((page_table[last_page].bytes_used < PAGE_BYTES)
/* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (!page_boxed_p(last_page+1))
|| (page_table[last_page+1].bytes_used == 0)
|| (page_table[last_page+1].gen != generation)
|| (page_table[last_page+1].region_start_offset == 0))
/* New areas of objects allocated have been lost so need to do a
* full scan to be sure! If this becomes a problem try
* increasing NUM_NEW_AREAS. */
- if (gencgc_verbose)
+ if (gencgc_verbose) {
SHOW("new_areas overflow, doing full scavenge");
+ }
/* Don't need to record new areas that get scavenged
* anyway during scavenge_newspace_generation_one_scan. */
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < page_table_pages; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
&& (page_table[i].write_protected_cleared != 0)
unprotect_oldspace(void)
{
page_index_t i;
+ void *region_addr = 0;
+ void *page_addr = 0;
+ unsigned long region_bytes = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == from_space)) {
- void *page_start;
-
- page_start = (void *)page_address(i);
/* Remove any write-protection. We should be able to rely
* on the write-protect flag to avoid redundant calls. */
if (page_table[i].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
page_table[i].write_protected = 0;
+ page_addr = page_address(i);
+ if (!region_addr) {
+ /* First region. */
+ region_addr = page_addr;
+ region_bytes = PAGE_BYTES;
+ } else if (region_addr + region_bytes == page_addr) {
+ /* Region continue. */
+ region_bytes += PAGE_BYTES;
+ } else {
+ /* Unprotect previous region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ /* First page in new region. */
+ region_addr = page_addr;
+ region_bytes = PAGE_BYTES;
+ }
}
}
}
+ if (region_addr) {
+ /* Unprotect last region. */
+ os_protect(region_addr, region_bytes, OS_VM_PROT_ALL);
+ }
}
/* Work through all the pages and free any in from_space. This
do {
/* Find a first page for the next region of pages. */
while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ && (page_free_p(first_page)
|| (page_table[first_page].bytes_used == 0)
|| (page_table[first_page].gen != from_space)))
first_page++;
page_table[last_page].bytes_used;
page_table[last_page].allocated = FREE_PAGE_FLAG;
page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
+ /* Should already be unprotected by unprotect_oldspace(). */
+ gc_assert(!page_table[last_page].write_protected);
last_page++;
}
while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && page_allocated_p(last_page)
&& (page_table[last_page].bytes_used != 0)
&& (page_table[last_page].gen == from_space));
}
#endif
+static int
+is_in_stack_space(lispobj ptr)
+{
+ /* For space verification: Pointers can be valid if they point
+ * to a thread stack space. This would be faster if the thread
+ * structures had page-table entries as if they were part of
+ * the heap space. */
+ struct thread *th;
+ for_each_thread(th) {
+ if ((th->control_stack_start <= (lispobj *)ptr) &&
+ (th->control_stack_end >= (lispobj *)ptr)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
static void
verify_space(lispobj *start, size_t words)
{
if (page_index != -1) {
/* If it's within the dynamic space it should point to a used
* page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(page_index)
&& (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.\n", thing, start);
+ lose ("Ptr %p @ %p sees free page.\n", thing, start);
/* Check that it doesn't point to a forwarding pointer! */
if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.\n", thing, start);
+ lose("Ptr %p @ %p sees forwarding ptr.\n", thing, start);
}
/* Check that its not in the RO space as it would then be a
* pointer from the RO to the dynamic space. */
if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x\n",
+ lose("ptr to dynamic space %p from RO space %x\n",
thing, start);
}
/* Does it point to a plausible object? This check slows
* dynamically. */
/*
if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x\n", thing, start);
+ lose("ptr %p to invalid object %p\n", thing, start);
}
*/
} else {
+ extern void funcallable_instance_tramp;
/* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space) {
- lose("Ptr %x @ %x sees junk.\n", thing, start);
+ if (!to_readonly_space && !to_static_space
+ && (thing != (lispobj)&funcallable_instance_tramp)
+ && !is_in_stack_space(thing)) {
+ lose("Ptr %p @ %p sees junk.\n", thing, start);
}
}
} else {
/* Only when enabled */
&& verify_dynamic_code_check) {
FSHOW((stderr,
- "/code object at %x in the dynamic space\n",
+ "/code object at %p in the dynamic space\n",
start));
}
break;
default:
- lose("Unhandled widetag 0x%x at 0x%x\n",
+ lose("Unhandled widetag %p at %p\n",
widetag_of(*start), start);
}
}
page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ if (page_allocated_p(i)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
page_index_t last_page;
page_index_t page;
for (page = 0; page < last_free_page; page++) {
- if (page_table[page].allocated == FREE_PAGE_FLAG) {
+ if (page_free_p(page)) {
/* The whole page should be zero filled. */
long *start_addr = (long *)page_address(page);
long size = 1024;
gc_assert(generation < SCRATCH_GENERATION);
for (start = 0; start < last_free_page; start++) {
- if ((page_table[start].allocated == BOXED_PAGE_FLAG)
- && (page_table[start].bytes_used != 0)
- && !page_table[start].dont_move
- && (page_table[start].gen == generation)) {
+ if (protect_page_p(start, generation)) {
void *page_start;
page_index_t last;
page_table[start].write_protected = 1;
for (last = start + 1; last < last_free_page; last++) {
- if ((page_table[last].allocated != BOXED_PAGE_FLAG)
- || (page_table[last].bytes_used == 0)
- || page_table[last].dont_move
- || (page_table[last].gen != generation))
+ if (!protect_page_p(last, generation))
break;
page_table[last].write_protected = 1;
}
}
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
-
static void
-scavenge_control_stack()
+scavenge_control_stack(struct thread *th)
{
- unsigned long control_stack_size;
-
- /* This is going to be a big problem when we try to port threads
- * to PPC... CLH */
- struct thread *th = arch_os_get_current_thread();
lispobj *control_stack =
(lispobj *)(th->control_stack_start);
+ unsigned long control_stack_size =
+ access_control_stack_pointer(th) - control_stack;
- control_stack_size = current_control_stack_pointer - control_stack;
scavenge(control_stack, control_stack_size);
}
-
-/* Scavenging Interrupt Contexts */
-
-static int boxed_registers[] = BOXED_REGISTERS;
-
-static void
-scavenge_interrupt_context(os_context_t * context)
-{
- int i;
-
-#ifdef reg_LIP
- unsigned long lip;
- unsigned long lip_offset;
- int lip_register_pair;
#endif
- unsigned long pc_code_offset;
-#ifdef ARCH_HAS_LINK_REGISTER
- unsigned long lr_code_offset;
-#endif
-#ifdef ARCH_HAS_NPC_REGISTER
- unsigned long npc_code_offset;
-#endif
-
-#ifdef reg_LIP
- /* Find the LIP's register pair and calculate it's offset */
- /* before we scavenge the context. */
-
- /*
- * I (RLT) think this is trying to find the boxed register that is
- * closest to the LIP address, without going past it. Usually, it's
- * reg_CODE or reg_LRA. But sometimes, nothing can be found.
- */
- lip = *os_context_register_addr(context, reg_LIP);
- lip_offset = 0x7FFFFFFF;
- lip_register_pair = -1;
- for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- unsigned long reg;
- long offset;
- int index;
-
- index = boxed_registers[i];
- reg = *os_context_register_addr(context, index);
- if ((reg & ~((1L<<N_LOWTAG_BITS)-1)) <= lip) {
- offset = lip - reg;
- if (offset < lip_offset) {
- lip_offset = offset;
- lip_register_pair = index;
- }
- }
- }
-#endif /* reg_LIP */
-
- /* Compute the PC's offset from the start of the CODE */
- /* register. */
- pc_code_offset = *os_context_pc_addr(context)
- - *os_context_register_addr(context, reg_CODE);
-#ifdef ARCH_HAS_NPC_REGISTER
- npc_code_offset = *os_context_npc_addr(context)
- - *os_context_register_addr(context, reg_CODE);
-#endif /* ARCH_HAS_NPC_REGISTER */
-
-#ifdef ARCH_HAS_LINK_REGISTER
- lr_code_offset =
- *os_context_lr_addr(context) -
- *os_context_register_addr(context, reg_CODE);
-#endif
-
- /* Scanvenge all boxed registers in the context. */
- for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- int index;
- lispobj foo;
-
- index = boxed_registers[i];
- foo = *os_context_register_addr(context, index);
- scavenge(&foo, 1);
- *os_context_register_addr(context, index) = foo;
-
- scavenge((lispobj*) &(*os_context_register_addr(context, index)), 1);
- }
-
-#ifdef reg_LIP
- /* Fix the LIP */
-
- /*
- * But what happens if lip_register_pair is -1?
- * *os_context_register_addr on Solaris (see
- * solaris_register_address in solaris-os.c) will return
- * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
- * that what we really want? My guess is that that is not what we
- * want, so if lip_register_pair is -1, we don't touch reg_LIP at
- * all. But maybe it doesn't really matter if LIP is trashed?
- */
- if (lip_register_pair >= 0) {
- *os_context_register_addr(context, reg_LIP) =
- *os_context_register_addr(context, lip_register_pair)
- + lip_offset;
- }
-#endif /* reg_LIP */
-
- /* Fix the PC if it was in from space */
- if (from_space_p(*os_context_pc_addr(context)))
- *os_context_pc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + pc_code_offset;
-
-#ifdef ARCH_HAS_LINK_REGISTER
- /* Fix the LR ditto; important if we're being called from
- * an assembly routine that expects to return using blr, otherwise
- * harmless */
- if (from_space_p(*os_context_lr_addr(context)))
- *os_context_lr_addr(context) =
- *os_context_register_addr(context, reg_CODE) + lr_code_offset;
-#endif
-
-#ifdef ARCH_HAS_NPC_REGISTER
- if (from_space_p(*os_context_npc_addr(context)))
- *os_context_npc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + npc_code_offset;
-#endif /* ARCH_HAS_NPC_REGISTER */
-}
-
-void
-scavenge_interrupt_contexts(void)
-{
- int i, index;
- os_context_t *context;
-
- struct thread *th=arch_os_get_current_thread();
-
- index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,0));
-
-#if defined(DEBUG_PRINT_CONTEXT_INDEX)
- printf("Number of active contexts: %d\n", index);
-#endif
-
- for (i = 0; i < index; i++) {
- context = th->interrupt_contexts[i];
- scavenge_interrupt_context(context);
- }
-}
-
-#endif
-
-#if defined(LISP_FEATURE_SB_THREAD)
+#if defined(LISP_FEATURE_SB_THREAD) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
static void
preserve_context_registers (os_context_t *c)
{
unsigned long bytes_freed;
page_index_t i;
unsigned long static_space_size;
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
struct thread *th;
-#endif
+
gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
/* The oldest generation can't be raised. */
}
}
}
+#else
+ /* Non-x86oid systems don't have "conservative roots" as such, but
+ * the same mechanism is used for objects pinned for use by alien
+ * code. */
+ for_each_thread(th) {
+ lispobj pin_list = SymbolTlValue(PINNED_OBJECTS,th);
+ while (pin_list != NIL) {
+ struct cons *list_entry =
+ (struct cons *)native_pointer(pin_list);
+ preserve_pointer(list_entry->car);
+ pin_list = list_entry->cdr;
+ }
+ }
#endif
-#ifdef QSHOW
+#if QSHOW
if (gencgc_verbose > 1) {
long num_dont_move_pages = count_dont_move_pages();
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
- npage_bytes(num_dont_move_pages);
+ npage_bytes(num_dont_move_pages));
}
#endif
* If not x86, we need to scavenge the interrupt context(s) and the
* control stack.
*/
- scavenge_interrupt_contexts();
- scavenge_control_stack();
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ scavenge_interrupt_contexts(th);
+ scavenge_control_stack(th);
+ }
+
+ /* Scrub the unscavenged control stack space, so that we can't run
+ * into any stale pointers in a later GC (this is done by the
+ * stop-for-gc handler in the other threads). */
+ scrub_control_stack();
+ }
#endif
/* Scavenge the Lisp functions of the interrupt handlers, taking
generations[generation].alloc_large_unboxed_start_page = 0;
if (generation >= verify_gens) {
- if (gencgc_verbose)
+ if (gencgc_verbose) {
SHOW("verifying");
+ }
verify_gc();
verify_dynamic_space();
}
page_index_t last_page = -1, i;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0))
+ if (page_allocated_p(i) && (page_table[i].bytes_used != 0))
last_page = i;
last_free_page = last_page+1;
page_index_t first_page, last_page;
for (first_page = from; first_page <= to; first_page++) {
- if (page_table[first_page].allocated != FREE_PAGE_FLAG ||
- page_table[first_page].need_to_zero == 0) {
+ if (page_allocated_p(first_page) ||
+ (page_table[first_page].need_to_zero == 0)) {
continue;
}
last_page = first_page + 1;
- while (page_table[last_page].allocated == FREE_PAGE_FLAG &&
- last_page < to &&
- page_table[last_page].need_to_zero == 1) {
+ while (page_free_p(last_page) &&
+ (last_page < to) &&
+ (page_table[last_page].need_to_zero == 1)) {
last_page++;
}
static page_index_t high_water_mark = 0;
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
+ log_generation_stats(gc_logfile, "=== GC Start ===");
gc_active_p = 1;
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats();
do {
/* Collect the generation. */
} else {
raise =
(gen < last_gen)
- || (generations[gen].num_gc >= generations[gen].trigger_age);
+ || (generations[gen].num_gc >= generations[gen].number_of_gcs_before_promotion);
}
if (gencgc_verbose > 1) {
if (gencgc_verbose > 1) {
FSHOW((stderr, "GC of generation %d finished:\n", gen));
- print_generation_stats(0);
+ print_generation_stats();
}
gen++;
&& raise
&& (generations[gen].bytes_allocated
> generations[gen].gc_trigger)
- && (gen_av_mem_age(gen)
- > generations[gen].min_av_mem_age))));
+ && (generation_average_age(gen)
+ > generations[gen].minimum_age_before_gc))));
/* Now if gen-1 was raised all generations before gen are empty.
* If it wasn't raised then all generations before gen-1 are empty.
gc_active_p = 0;
+ log_generation_stats(gc_logfile, "=== GC End ===");
SHOW("returning from collect_garbage");
}
{
page_index_t page;
- if (gencgc_verbose > 1)
+ if (gencgc_verbose > 1) {
SHOW("entering gc_free_heap");
+ }
for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ if (page_allocated_p(page)) {
void *page_start, *addr;
/* Mark the page free. The other slots are assumed invalid
/* Double-check that the page is zero filled. */
long *page_start;
page_index_t i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
page_start = (long *)page_address(page);
for (i=0; i<1024; i++) {
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats();
/* Initialize gc_alloc(). */
gc_alloc_generation = 0;
page_table_pages = dynamic_space_size/PAGE_BYTES;
gc_assert(dynamic_space_size == npage_bytes(page_table_pages));
+ /* The page_table must be allocated using "calloc" to initialize
+ * the page structures correctly. There used to be a separate
+ * initialization loop (now commented out; see below) but that was
+ * unnecessary and did hurt startup time. */
page_table = calloc(page_table_pages, sizeof(struct page));
gc_assert(page_table);
heap_base = (void*)DYNAMIC_SPACE_START;
- /* Initialize each page structure. */
- for (i = 0; i < page_table_pages; i++) {
- /* Initialize all pages as free. */
- page_table[i].allocated = FREE_PAGE_FLAG;
- page_table[i].bytes_used = 0;
-
- /* Pages are not write-protected at startup. */
- page_table[i].write_protected = 0;
+ /* The page structures are initialized implicitly when page_table
+ * is allocated with "calloc" above. Formerly we had the following
+ * explicit initialization here (comments converted to C99 style
+ * for readability as C's block comments don't nest):
+ *
+ * // Initialize each page structure.
+ * for (i = 0; i < page_table_pages; i++) {
+ * // Initialize all pages as free.
+ * page_table[i].allocated = FREE_PAGE_FLAG;
+ * page_table[i].bytes_used = 0;
+ *
+ * // Pages are not write-protected at startup.
+ * page_table[i].write_protected = 0;
+ * }
+ *
+ * Without this loop the image starts up much faster when dynamic
+ * space is large -- which it is on 64-bit platforms already by
+ * default -- and when "calloc" for large arrays is implemented
+ * using copy-on-write of a page of zeroes -- which it is at least
+ * on Linux. In this case the pages that page_table_pages is stored
+ * in are mapped and cleared not before the corresponding part of
+ * dynamic space is used. For example, this saves clearing 16 MB of
+ * memory at startup if the page size is 4 KB and the size of
+ * dynamic space is 4 GB.
+ * FREE_PAGE_FLAG must be 0 for this to work correctly which is
+ * asserted below: */
+ {
+ /* Compile time assertion: If triggered, declares an array
+ * of dimension -1 forcing a syntax error. The intent of the
+ * assignment is to avoid an "unused variable" warning. */
+ char assert_free_page_flag_0[(FREE_PAGE_FLAG) ? -1 : 1];
+ assert_free_page_flag_0[0] = assert_free_page_flag_0[0];
}
bytes_allocated = 0;
generations[i].cum_sum_bytes_allocated = 0;
/* the tune-able parameters */
generations[i].bytes_consed_between_gc = 2000000;
- generations[i].trigger_age = 1;
- generations[i].min_av_mem_age = 0.75;
+ generations[i].number_of_gcs_before_promotion = 1;
+ generations[i].minimum_age_before_gc = 0.75;
generations[i].lutexes = NULL;
}
void *alloc_ptr = (void *)get_alloc_pointer();
lispobj *prev=(lispobj *)page_address(page);
generation_index_t gen = PSEUDO_STATIC_GENERATION;
-
do {
lispobj *first,*ptr= (lispobj *)page_address(page);
- page_table[page].allocated = BOXED_PAGE_FLAG;
- page_table[page].gen = gen;
- page_table[page].bytes_used = PAGE_BYTES;
- page_table[page].large_object = 0;
- page_table[page].write_protected = 0;
- page_table[page].write_protected_cleared = 0;
- page_table[page].dont_move = 0;
- page_table[page].need_to_zero = 1;
+
+ if (!gencgc_partial_pickup || page_allocated_p(page)) {
+ /* It is possible, though rare, for the saved page table
+ * to contain free pages below alloc_ptr. */
+ page_table[page].gen = gen;
+ page_table[page].bytes_used = PAGE_BYTES;
+ page_table[page].large_object = 0;
+ page_table[page].write_protected = 0;
+ page_table[page].write_protected_cleared = 0;
+ page_table[page].dont_move = 0;
+ page_table[page].need_to_zero = 1;
+ }
if (!gencgc_partial_pickup) {
+ page_table[page].allocated = BOXED_PAGE_FLAG;
first=gc_search_space(prev,(ptr+2)-prev,ptr);
- if(ptr == first) prev=ptr;
+ if(ptr == first)
+ prev=ptr;
page_table[page].region_start_offset =
page_address(page) - (void *)prev;
}
{
gencgc_pickup_dynamic();
}
-
-
\f
/* alloc(..) is the external interface for memory allocation. It
* The check for a GC trigger is only performed when the current
* region is full, so in most cases it's not needed. */
-lispobj *
-alloc(long nbytes)
+static inline lispobj *
+general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region,
+ struct thread *thread)
{
- struct thread *thread=arch_os_get_current_thread();
- struct alloc_region *region=
-#ifdef LISP_FEATURE_SB_THREAD
- thread ? &(thread->alloc_region) : &boxed_region;
-#else
- &boxed_region;
-#endif
#ifndef LISP_FEATURE_WIN32
lispobj alloc_signal;
#endif
gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0)
&& ((nbytes & LOWTAG_MASK) == 0));
-#if 0
- if(all_threads)
- /* there are a few places in the C code that allocate data in the
- * heap before Lisp starts. This is before interrupts are enabled,
- * so we don't need to check for pseudo-atomic */
-#ifdef LISP_FEATURE_SB_THREAD
- if(!get_psuedo_atomic_atomic(th)) {
- register u32 fs;
- fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
- th,th->os_thread);
- __asm__("movl %fs,%0" : "=r" (fs) : );
- fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
- debug_get_fs(),th->tls_cookie);
- lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
- }
-#else
- gc_assert(get_pseudo_atomic_atomic(th));
-#endif
-#endif
+ /* Must be inside a PA section. */
+ gc_assert(get_pseudo_atomic_atomic(thread));
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
return(new_obj); /* yup */
}
- /* we have to go the long way around, it seems. Check whether
- * we should GC in the near future
+ /* we have to go the long way around, it seems. Check whether we
+ * should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- gc_assert(get_pseudo_atomic_atomic(thread));
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
/* set things up so that GC happens when we finish the PA
* section */
SetSymbolValue(GC_PENDING,T,thread);
- if (SymbolValue(GC_INHIBIT,thread) == NIL)
- set_pseudo_atomic_interrupted(thread);
+ if (SymbolValue(GC_INHIBIT,thread) == NIL) {
+ set_pseudo_atomic_interrupted(thread);
+#ifdef LISP_FEATURE_PPC
+ /* PPC calls alloc() from a trap or from pa_alloc(),
+ * look up the most context if it's from a trap. */
+ {
+ os_context_t *context =
+ thread->interrupt_data->allocation_trap_context;
+ maybe_save_gc_mask_and_block_deferrables
+ (context ? os_context_sigmask_addr(context) : NULL);
+ }
+#else
+ maybe_save_gc_mask_and_block_deferrables(NULL);
+#endif
+ }
}
}
- new_obj = gc_alloc_with_region(nbytes, BOXED_PAGE_FLAG, region, 0);
+ new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
#ifndef LISP_FEATURE_WIN32
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
if ((signed long) alloc_signal <= 0) {
SetSymbolValue(ALLOC_SIGNAL, T, thread);
-#ifdef LISP_FEATURE_SB_THREAD
- kill_thread_safely(thread->os_thread, SIGPROF);
-#else
raise(SIGPROF);
-#endif
} else {
SetSymbolValue(ALLOC_SIGNAL,
alloc_signal - (1 << N_FIXNUM_TAG_BITS),
return (new_obj);
}
+
+lispobj *
+general_alloc(long nbytes, int page_type_flag)
+{
+ struct thread *thread = arch_os_get_current_thread();
+ /* Select correct region, and call general_alloc_internal with it.
+ * For other then boxed allocation we must lock first, since the
+ * region is shared. */
+ if (BOXED_PAGE_FLAG & page_type_flag) {
+#ifdef LISP_FEATURE_SB_THREAD
+ struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
+#else
+ struct alloc_region *region = &boxed_region;
+#endif
+ return general_alloc_internal(nbytes, page_type_flag, region, thread);
+ } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ lispobj * obj;
+ gc_assert(0 == thread_mutex_lock(&allocation_lock));
+ obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
+ gc_assert(0 == thread_mutex_unlock(&allocation_lock));
+ return obj;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+}
+
+lispobj *
+alloc(long nbytes)
+{
+ gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
+ return general_alloc(nbytes, BOXED_PAGE_FLAG);
+}
\f
/*
* shared support for the OS-dependent signal handlers which
* catch GENCGC-related write-protect violations
*/
-
void unhandled_sigmemoryfault(void* addr);
/* Depending on which OS we're running under, different signals might
{
page_index_t page_index = find_page_index(fault_addr);
-#ifdef QSHOW_SIGNALS
+#if QSHOW_SIGNALS
FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
fault_addr, page_index));
#endif
return 0;
} else {
+ int ret;
+ ret = thread_mutex_lock(&free_pages_lock);
+ gc_assert(ret == 0);
if (page_table[page_index].write_protected) {
/* Unprotect the page. */
os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
page_index, boxed_region.first_page,
boxed_region.last_page);
}
+ ret = thread_mutex_unlock(&free_pages_lock);
+ gc_assert(ret == 0);
/* Don't worry, we can handle it. */
return 1;
}
page_index_t i;
for (i = 0; i < last_free_page; i++) {
- if (page_table[i].allocated == FREE_PAGE_FLAG) {
+ if (page_free_p(i)) {
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(i),
PAGE_BYTES,