#include "validate.h"
#include "lispregs.h"
#include "arch.h"
+#include "fixnump.h"
#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
void do_pending_interrupt(void);
/* forward declarations */
-int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed);
+long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed);
static void gencgc_pickup_dynamic(void);
boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
/* the source and destination generations. These are set before a GC starts
* scavenging. */
-int from_space;
-int new_space;
+long from_space;
+long new_space;
/* An array of page structures is statically allocated.
* is needed. */
static void *heap_base = NULL;
+#if N_WORD_BITS == 32
+ #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
+#elif N_WORD_BITS == 64
+ #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
+#endif
/* Calculate the start address for the given page number. */
inline void *
-page_address(int page_num)
+page_address(long page_num)
{
return (heap_base + (page_num * PAGE_BYTES));
}
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
-inline int
+inline long
find_page_index(void *addr)
{
- int index = addr-heap_base;
+ long index = addr-heap_base;
if (index >= 0) {
- index = ((unsigned int)index)/PAGE_BYTES;
+ index = ((unsigned long)index)/PAGE_BYTES;
if (index < NUM_PAGES)
return (index);
}
struct generation {
/* the first page that gc_alloc() checks on its next call */
- int alloc_start_page;
+ long alloc_start_page;
/* the first page that gc_alloc_unboxed() checks on its next call */
- int alloc_unboxed_start_page;
+ long alloc_unboxed_start_page;
/* the first page that gc_alloc_large (boxed) considers on its next
* call. (Although it always allocates after the boxed_region.) */
- int alloc_large_start_page;
+ long alloc_large_start_page;
/* the first page that gc_alloc_large (unboxed) considers on its
* next call. (Although it always allocates after the
* current_unboxed_region.) */
- int alloc_large_unboxed_start_page;
+ long alloc_large_unboxed_start_page;
/* the bytes allocated to this generation */
- int bytes_allocated;
+ long bytes_allocated;
/* the number of bytes at which to trigger a GC */
- int gc_trigger;
+ long gc_trigger;
/* to calculate a new level for gc_trigger */
- int bytes_consed_between_gc;
+ long bytes_consed_between_gc;
/* the number of GCs since the last raise */
int num_gc;
* objects are added from a GC of a younger generation. Dividing by
* the bytes_allocated will give the average age of the memory in
* this generation since its last GC. */
- int cum_sum_bytes_allocated;
+ long cum_sum_bytes_allocated;
/* a minimum average memory age before a GC will occur helps
* prevent a GC when a large number of new live objects have been
* ALLOCATION_POINTER which is used by the room function to limit its
* search of the heap. XX Gencgc obviously needs to be better
* integrated with the Lisp code. */
-static int last_free_page;
+static long last_free_page;
\f
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
/* Count the number of pages which are write-protected within the
* given generation. */
-static int
+static long
count_write_protect_generation_pages(int generation)
{
- int i;
- int count = 0;
+ long i;
+ long count = 0;
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != FREE_PAGE_FLAG)
}
/* Count the number of pages within the given generation. */
-static int
+static long
count_generation_pages(int generation)
{
- int i;
- int count = 0;
+ long i;
+ long count = 0;
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != 0)
}
#ifdef QSHOW
-static int
+static long
count_dont_move_pages(void)
{
- int i;
- int count = 0;
+ long i;
+ long count = 0;
for (i = 0; i < last_free_page; i++) {
if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
++count;
/* Work through the pages and add up the number of bytes used for the
* given generation. */
-static int
+static long
count_generation_bytes_allocated (int gen)
{
- int i;
- int result = 0;
+ long i;
+ long result = 0;
for (i = 0; i < last_free_page; i++) {
if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
result += page_table[i].bytes_used;
* are allocated, although they will initially be empty.
*/
static void
-gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
{
- int first_page;
- int last_page;
- int bytes_found;
- int i;
+ long first_page;
+ long last_page;
+ long bytes_found;
+ long i;
/*
FSHOW((stderr,
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,(int) alloc_region);
+ get_spinlock(&free_pages_lock,(long) alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
- int *p;
- for (p = (int *)alloc_region->start_addr;
- p < (int *)alloc_region->end_addr; p++) {
+ long *p;
+ for (p = (long *)alloc_region->start_addr;
+ p < (long *)alloc_region->end_addr; p++) {
if (*p != 0) {
/* KLUDGE: It would be nice to use %lx and explicit casts
* (long) in code like this, so that it is less likely to
* scavenge of a generation. */
#define NUM_NEW_AREAS 512
static int record_new_objects = 0;
-static int new_areas_ignore_page;
+static long new_areas_ignore_page;
struct new_area {
- int page;
- int offset;
- int size;
+ long page;
+ long offset;
+ long size;
};
static struct new_area (*new_areas)[];
-static int new_areas_index;
-int max_new_areas;
+static long new_areas_index;
+long max_new_areas;
/* Add a new area to new_areas. */
static void
-add_new_area(int first_page, int offset, int size)
+add_new_area(long first_page, long offset, long size)
{
unsigned new_area_start,c;
- int i;
+ long i;
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
void
gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
{
- int more;
- int first_page;
- int next_page;
- int bytes_used;
- int orig_first_page_bytes_used;
- int region_size;
- int byte_cnt;
+ long more;
+ long first_page;
+ long next_page;
+ long bytes_used;
+ long orig_first_page_bytes_used;
+ long region_size;
+ long byte_cnt;
first_page = alloc_region->first_page;
next_page = first_page+1;
- get_spinlock(&free_pages_lock,(int) alloc_region);
+ get_spinlock(&free_pages_lock,(long) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
gc_set_region_empty(alloc_region);
}
-static inline void *gc_quick_alloc(int nbytes);
+static inline void *gc_quick_alloc(long nbytes);
/* Allocate a possibly large object. */
void *
-gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
{
- int first_page;
- int last_page;
- int orig_first_page_bytes_used;
- int byte_cnt;
- int more;
- int bytes_used;
- int next_page;
+ long first_page;
+ long last_page;
+ long orig_first_page_bytes_used;
+ long byte_cnt;
+ long more;
+ long bytes_used;
+ long next_page;
- get_spinlock(&free_pages_lock,(int) alloc_region);
+ get_spinlock(&free_pages_lock,(long) alloc_region);
if (unboxed) {
first_page =
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
-int
-gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed)
+long
+gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed)
{
- int first_page;
- int last_page;
- int region_size;
- int restart_page=*restart_page_ptr;
- int bytes_found;
- int num_pages;
- int large_p=(nbytes>=large_object_size);
+ long first_page;
+ long last_page;
+ long region_size;
+ long restart_page=*restart_page_ptr;
+ long bytes_found;
+ long num_pages;
+ long large_p=(nbytes>=large_object_size);
gc_assert(free_pages_lock);
/* Search for a contiguous free space of at least nbytes. If it's
* functions will eventually call this */
void *
-gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region,
+gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
int quick_p)
{
void *new_free_pointer;
/* Check whether there is room in the current alloc region. */
new_free_pointer = my_region->free_pointer + nbytes;
+ /* fprintf(stderr, "alloc %d bytes from %p to %p\n", nbytes,
+ my_region->free_pointer, new_free_pointer); */
+
if (new_free_pointer <= my_region->end_addr) {
/* If so then allocate from the current alloc region. */
void *new_obj = my_region->free_pointer;
* region */
void *
-gc_general_alloc(int nbytes,int unboxed_p,int quick_p)
+gc_general_alloc(long nbytes,int unboxed_p,int quick_p)
{
struct alloc_region *my_region =
unboxed_p ? &unboxed_region : &boxed_region;
}
static inline void *
-gc_quick_alloc(int nbytes)
+gc_quick_alloc(long nbytes)
{
return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
}
static inline void *
-gc_quick_alloc_large(int nbytes)
+gc_quick_alloc_large(long nbytes)
{
return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
}
static inline void *
-gc_alloc_unboxed(int nbytes)
+gc_alloc_unboxed(long nbytes)
{
return gc_general_alloc(nbytes,ALLOC_UNBOXED,0);
}
static inline void *
-gc_quick_alloc_unboxed(int nbytes)
+gc_quick_alloc_unboxed(long nbytes)
{
return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
static inline void *
-gc_quick_alloc_large_unboxed(int nbytes)
+gc_quick_alloc_large_unboxed(long nbytes)
{
return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
* scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
*/
-extern int (*scavtab[256])(lispobj *where, lispobj object);
+extern long (*scavtab[256])(lispobj *where, lispobj object);
extern lispobj (*transother[256])(lispobj object);
-extern int (*sizetab[256])(lispobj *where);
+extern long (*sizetab[256])(lispobj *where);
/* Copy a large boxed object. If the object is in a large object
* region then it is simply promoted, else it is copied. If it's large
* Vectors may have shrunk. If the object is not copied the space
* needs to be reclaimed, and the page_tables corrected. */
lispobj
-copy_large_object(lispobj object, int nwords)
+copy_large_object(lispobj object, long nwords)
{
int tag;
lispobj *new;
- int first_page;
+ long first_page;
gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
/* Promote the object. */
- int remaining_bytes;
- int next_page;
- int bytes_freed;
- int old_bytes_used;
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
/* Note: Any page write-protection must be removed, else a
* later scavenge_newspace may incorrectly not scavenge these
next_page++;
}
- generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
- generations[new_space].bytes_allocated += 4*nwords;
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
+ bytes_freed;
+ generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
bytes_allocated -= bytes_freed;
/* Add the region to the new_areas if requested. */
/* to copy unboxed objects */
lispobj
-copy_unboxed_object(lispobj object, int nwords)
+copy_unboxed_object(lispobj object, long nwords)
{
- int tag;
+ long tag;
lispobj *new;
gc_assert(is_lisp_pointer(object));
* KLUDGE: There's a lot of cut-and-paste duplication between this
* function and copy_large_object(..). -- WHN 20000619 */
lispobj
-copy_large_unboxed_object(lispobj object, int nwords)
+copy_large_unboxed_object(lispobj object, long nwords)
{
int tag;
lispobj *new;
- int first_page;
+ long first_page;
gc_assert(is_lisp_pointer(object));
gc_assert(from_space_p(object));
/* Promote the object. Note: Unboxed objects may have been
* allocated to a BOXED region so it may be necessary to
* change the region to UNBOXED. */
- int remaining_bytes;
- int next_page;
- int bytes_freed;
- int old_bytes_used;
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
gc_assert(page_table[first_page].first_object_offset == 0);
void
sniff_code_object(struct code *code, unsigned displacement)
{
- int nheader_words, ncode_words, nwords;
+ long nheader_words, ncode_words, nwords;
void *p;
void *constants_start_addr, *constants_end_addr;
void *code_start_addr, *code_end_addr;
void
gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
{
- int nheader_words, ncode_words, nwords;
+ long nheader_words, ncode_words, nwords;
void *constants_start_addr, *constants_end_addr;
void *code_start_addr, *code_end_addr;
lispobj fixups = NIL;
/*SHOW("got fixups");*/
- if (widetag_of(fixups_vector->header) ==
- SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG) {
+ if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
/* Got the fixups for the code block. Now work through the vector,
and apply a fixup at each address. */
- int length = fixnum_value(fixups_vector->length);
- int i;
+ long length = fixnum_value(fixups_vector->length);
+ long i;
for (i = 0; i < length; i++) {
unsigned offset = fixups_vector->data[i];
/* Now check the current value of offset. */
*(unsigned *)((unsigned)code_start_addr + offset) =
old_value - displacement;
}
+ } else {
+ fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
static int
scav_vector(lispobj *where, lispobj object)
{
- unsigned int kv_length;
+ unsigned long kv_length;
lispobj *kv_vector;
- unsigned int length = 0; /* (0 = dummy to stop GCC warning) */
+ unsigned long length = 0; /* (0 = dummy to stop GCC warning) */
lispobj *hash_table;
lispobj empty_symbol;
- unsigned int *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned int *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned int *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
+ unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
+ unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
+ unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
lispobj weak_p_obj;
unsigned next_vector_length = 0;
if (is_lisp_pointer(index_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
- SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
- index_vector = ((unsigned int *)native_pointer(index_vector_obj)) + 2;
+ SIMPLE_ARRAY_WORD_WIDETAG)) {
+ index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2;
/*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
- length = fixnum_value(((unsigned int *)native_pointer(index_vector_obj))[1]);
+ length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
/*FSHOW((stderr, "/length = %d\n", length));*/
} else {
lose("invalid index_vector %x", index_vector_obj);
if (is_lisp_pointer(next_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
- SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
- next_vector = ((unsigned int *)native_pointer(next_vector_obj)) + 2;
+ SIMPLE_ARRAY_WORD_WIDETAG)) {
+ next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2;
/*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
- next_vector_length = fixnum_value(((unsigned int *)native_pointer(next_vector_obj))[1]);
+ next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
/*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
} else {
lose("invalid next_vector %x", next_vector_obj);
lispobj hash_vector_obj = hash_table[15];
if (is_lisp_pointer(hash_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(hash_vector_obj))
- == SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
- hash_vector = ((unsigned int *)native_pointer(hash_vector_obj)) + 2;
+ (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
+ SIMPLE_ARRAY_WORD_WIDETAG)){
+ hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2;
/*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
- gc_assert(fixnum_value(((unsigned int *)native_pointer(hash_vector_obj))[1])
+ gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
== next_vector_length);
} else {
hash_vector = NULL;
/* Work through the KV vector. */
{
- int i;
+ long i;
for (i = 1; i < next_vector_length; i++) {
lispobj old_key = kv_vector[2*i];
- unsigned int old_index = (old_key & 0x1fffffff)%length;
+
+#if N_WORD_BITS == 32
+ unsigned long old_index = (old_key & 0x1fffffff)%length;
+#elif N_WORD_BITS == 64
+ unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
+#endif
/* Scavenge the key and value. */
scavenge(&kv_vector[2*i],2);
/* Check whether the key has moved and is EQ based. */
{
lispobj new_key = kv_vector[2*i];
- unsigned int new_index = (new_key & 0x1fffffff)%length;
+#if N_WORD_BITS == 32
+ unsigned long new_index = (new_key & 0x1fffffff)%length;
+#elif N_WORD_BITS == 64
+ unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
+#endif
if ((old_index != new_index) &&
((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
((new_key != empty_symbol) ||
(kv_vector[2*i] != empty_symbol))) {
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
+ /*FSHOW((stderr,
+ "* EQ key %d moved from %x to %x; index %d to %d\n",
+ i, old_key, new_key, old_index, new_index));*/
if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
+ /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
/* Unlink the key from the old_index chain. */
if (index_vector[old_index] == i) {
/*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
+ /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
if (next == i) {
/* Unlink it. */
next_vector[prior] = next_vector[next];
#define WEAK_POINTER_NWORDS \
CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
-static int
+static long
scav_weak_pointer(lispobj *where, lispobj object)
{
struct weak_pointer *wp = weak_pointers;
lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
return NULL;
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
lispobj *
lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
return NULL;
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
/* a faster version for searching the dynamic space. This will work even
lispobj *
search_dynamic_space(void *pointer)
{
- int page_index = find_page_index(pointer);
+ long page_index = find_page_index(pointer);
lispobj *start;
/* The address may be invalid, so do some checks. */
return NULL;
start = (lispobj *)((void *)page_address(page_index)
+ page_table[page_index].first_object_offset);
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
/* Is there any possibility that pointer is a valid Lisp object
/* Is it plausible cons? */
if ((is_lisp_pointer(start_addr[0])
|| (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == BASE_CHAR_WIDETAG)
+ || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
+#if N_WORD_BITS == 64
+ || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
+#endif
|| (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
&& (is_lisp_pointer(start_addr[1])
|| (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == BASE_CHAR_WIDETAG)
+ || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
+#if N_WORD_BITS == 64
+ || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
+#endif
|| (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
break;
else {
}
switch (widetag_of(start_addr[0])) {
case UNBOUND_MARKER_WIDETAG:
- case BASE_CHAR_WIDETAG:
+ case CHARACTER_WIDETAG:
+#if N_WORD_BITS == 64
+ case SINGLE_FLOAT_WIDETAG:
+#endif
if (gencgc_verbose)
FSHOW((stderr,
"*Wo3: %x %x %x\n",
#endif
case SIMPLE_ARRAY_WIDETAG:
case COMPLEX_BASE_STRING_WIDETAG:
+#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
+#endif
case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case FDEFN_WIDETAG:
case CODE_HEADER_WIDETAG:
case BIGNUM_WIDETAG:
+#if N_WORD_BITS != 64
case SINGLE_FLOAT_WIDETAG:
+#endif
case DOUBLE_FLOAT_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
case LONG_FLOAT_WIDETAG:
#endif
case SIMPLE_BASE_STRING_WIDETAG:
+#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
+#endif
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+#endif
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
static void
maybe_adjust_large_object(lispobj *where)
{
- int first_page;
- int nwords;
+ long first_page;
+ long nwords;
- int remaining_bytes;
- int next_page;
- int bytes_freed;
- int old_bytes_used;
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
int boxed;
break;
case BIGNUM_WIDETAG:
case SIMPLE_BASE_STRING_WIDETAG:
+#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
+#endif
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+#endif
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
static void
preserve_pointer(void *addr)
{
- int addr_page_index = find_page_index(addr);
- int first_page;
- int i;
+ long addr_page_index = find_page_index(addr);
+ long first_page;
+ long i;
unsigned region_allocation;
/* quick check 1: Address is quite likely to have been invalid. */
*
* We return 1 if the page was write-protected, else 0. */
static int
-update_page_write_prot(int page)
+update_page_write_prot(long page)
{
int gen = page_table[page].gen;
- int j;
+ long j;
int wp_it = 1;
void **page_addr = (void **)page_address(page);
- int num_words = page_table[page].bytes_used / N_WORD_BYTES;
+ long num_words = page_table[page].bytes_used / N_WORD_BYTES;
/* Shouldn't be a free page. */
gc_assert(page_table[page].allocated != FREE_PAGE_FLAG);
for (j = 0; j < num_words; j++) {
void *ptr = *(page_addr+j);
- int index = find_page_index(ptr);
+ long index = find_page_index(ptr);
/* Check that it's in the dynamic space */
if (index != -1)
static void
scavenge_generation(int generation)
{
- int i;
+ long i;
int num_wp = 0;
#define SC_GEN_CK 0
if ((page_table[i].allocated & BOXED_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
- int last_page,j;
+ long last_page,j;
int write_protected=1;
/* This should be the start of a region */
break;
}
if (!write_protected) {
- scavenge(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/4);
+ scavenge(page_address(i),
+ (page_table[last_page].bytes_used +
+ (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
/* Now scan the pages and write protect those that
* don't have pointers to younger generations. */
static void
scavenge_newspace_generation_one_scan(int generation)
{
- int i;
+ long i;
FSHOW((stderr,
"/starting one full scan of newspace generation %d\n",
/* (This may be redundant as write_protected is now
* cleared before promotion.) */
|| (page_table[i].dont_move == 1))) {
- int last_page;
+ long last_page;
int all_wp=1;
/* The scavenge will start at the first_object_offset of page i.
/* Do a limited check for write-protected pages. */
if (!all_wp) {
- int size;
+ long size;
size = (page_table[last_page].bytes_used
+ (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/4;
+ - page_table[i].first_object_offset)/N_WORD_BYTES;
new_areas_ignore_page = last_page;
scavenge(page_address(i) +
static void
scavenge_newspace_generation(int generation)
{
- int i;
+ long i;
/* the new_areas array currently being written to by gc_alloc() */
struct new_area (*current_new_areas)[] = &new_areas_1;
- int current_new_areas_index;
+ long current_new_areas_index;
/* the new_areas created by the previous scavenge cycle */
struct new_area (*previous_new_areas)[] = NULL;
- int previous_new_areas_index;
+ long previous_new_areas_index;
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
/* Work through previous_new_areas. */
for (i = 0; i < previous_new_areas_index; i++) {
- int page = (*previous_new_areas)[i].page;
- int offset = (*previous_new_areas)[i].offset;
- int size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ long page = (*previous_new_areas)[i].page;
+ long offset = (*previous_new_areas)[i].offset;
+ long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
scavenge(page_address(page)+offset, size);
}
static void
unprotect_oldspace(void)
{
- int i;
+ long i;
for (i = 0; i < last_free_page; i++) {
if ((page_table[i].allocated != FREE_PAGE_FLAG)
* assumes that all objects have been copied or promoted to an older
* generation. Bytes_allocated and the generation bytes_allocated
* counter are updated. The number of bytes freed is returned. */
-static int
+static long
free_oldspace(void)
{
- int bytes_freed = 0;
- int first_page, last_page;
+ long bytes_freed = 0;
+ long first_page, last_page;
first_page = 0;
addr);
}
} else {
- int *page_start;
+ long *page_start;
- page_start = (int *)page_address(first_page);
+ page_start = (long *)page_address(first_page);
memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
}
print_ptr(lispobj *addr)
{
/* If addr is in the dynamic space then out the page information. */
- int pi1 = find_page_index((void*)addr);
+ long pi1 = find_page_index((void*)addr);
if (pi1 != -1)
fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
- (unsigned int) addr,
+ (unsigned long) addr,
pi1,
page_table[pi1].allocated,
page_table[pi1].gen,
}
#endif
-extern int undefined_tramp;
+extern long undefined_tramp;
static void
verify_space(lispobj *start, size_t words)
lispobj thing = *(lispobj*)start;
if (is_lisp_pointer(thing)) {
- int page_index = find_page_index((void*)thing);
- int to_readonly_space =
+ long page_index = find_page_index((void*)thing);
+ long to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
- int to_static_space =
+ long to_static_space =
(STATIC_SPACE_START <= thing &&
thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
case COMPLEX_WIDETAG:
case SIMPLE_ARRAY_WIDETAG:
case COMPLEX_BASE_STRING_WIDETAG:
+#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
+#endif
case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
case VALUE_CELL_HEADER_WIDETAG:
case SYMBOL_HEADER_WIDETAG:
- case BASE_CHAR_WIDETAG:
+ case CHARACTER_WIDETAG:
+#if N_WORD_BITS == 64
+ case SINGLE_FLOAT_WIDETAG:
+#endif
case UNBOUND_MARKER_WIDETAG:
case INSTANCE_HEADER_WIDETAG:
case FDEFN_WIDETAG:
{
lispobj object = *start;
struct code *code;
- int nheader_words, ncode_words, nwords;
+ long nheader_words, ncode_words, nwords;
lispobj fheaderl;
struct simple_fun *fheaderp;
/* unboxed objects */
case BIGNUM_WIDETAG:
+#if N_WORD_BITS != 64
case SINGLE_FLOAT_WIDETAG:
+#endif
case DOUBLE_FLOAT_WIDETAG:
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
case LONG_FLOAT_WIDETAG:
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case SIMPLE_BASE_STRING_WIDETAG:
+#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
+#endif
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_NIL_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+#endif
+#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+#endif
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
* Some counts of lispobjs are called foo_count; it might be good
* to grep for all foo_size and rename the appropriate ones to
* foo_count. */
- int read_only_space_size =
+ long read_only_space_size =
(lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
- (lispobj*)READ_ONLY_SPACE_START;
- int static_space_size =
+ long static_space_size =
(lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
- (lispobj*)STATIC_SPACE_START;
struct thread *th;
for_each_thread(th) {
- int binding_stack_size =
+ long binding_stack_size =
(lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
- (lispobj*)th->binding_stack_start;
verify_space(th->binding_stack_start, binding_stack_size);
if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)) {
- int last_page;
+ long last_page;
int region_allocation = page_table[i].allocated;
/* This should be the start of a contiguous block */
break;
verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/4);
+ + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
i = last_page;
}
}
static void
verify_zero_fill(void)
{
- int page;
+ long page;
for (page = 0; page < last_free_page; page++) {
if (page_table[page].allocated == FREE_PAGE_FLAG) {
/* The whole page should be zero filled. */
- int *start_addr = (int *)page_address(page);
- int size = 1024;
- int i;
+ long *start_addr = (long *)page_address(page);
+ long size = 1024;
+ long i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
lose("free page not zero at %x", start_addr + i);
}
}
} else {
- int free_bytes = PAGE_BYTES - page_table[page].bytes_used;
+ long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
- int *start_addr = (int *)((unsigned)page_address(page)
+ long *start_addr = (long *)((unsigned)page_address(page)
+ page_table[page].bytes_used);
- int size = free_bytes / N_WORD_BYTES;
- int i;
+ long size = free_bytes / N_WORD_BYTES;
+ long i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
lose("free region not zero at %x", start_addr + i);
static void
verify_dynamic_space(void)
{
- int i;
+ long i;
for (i = 0; i < NUM_GENERATIONS; i++)
verify_generation(i);
static void
write_protect_generation_pages(int generation)
{
- int i;
+ long i;
gc_assert(generation < NUM_GENERATIONS);
* temporary generation (NUM_GENERATIONS), and lowered when
* done. Set up this new generation. There should be no pages
* allocated to it yet. */
- if (!raise)
- gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
+ if (!raise) {
+ gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
+ }
/* Set the global src and dest. generations */
from_space = generation;
void **ptr;
void **esp=(void **)-1;
#ifdef LISP_FEATURE_SB_THREAD
- int i,free;
+ long i,free;
if(th==arch_os_get_current_thread()) {
esp = (void **) &raise;
} else {
free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
for(i=free-1;i>=0;i--) {
os_context_t *c=th->interrupt_contexts[i];
- esp1 = (void **) *os_context_register_addr(c,reg_ESP);
+ esp1 = (void **) *os_context_register_addr(c,reg_SP);
if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
if(esp1<esp) esp=esp1;
for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
#ifdef QSHOW
if (gencgc_verbose > 1) {
- int num_dont_move_pages = count_dont_move_pages();
+ long num_dont_move_pages = count_dont_move_pages();
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
/* As a check re-scavenge the newspace once; no new objects should
* be found. */
{
- int old_bytes_allocated = bytes_allocated;
- int bytes_allocated;
+ long old_bytes_allocated = bytes_allocated;
+ long bytes_allocated;
/* Start with a full scavenge. */
scavenge_newspace_generation_one_scan(new_space);
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
-int
+long
update_x86_dynamic_space_free_pointer(void)
{
- int last_page = -1;
- int i;
+ long last_page = -1;
+ long i;
- for (i = 0; i < NUM_PAGES; i++)
+ for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0))
last_page = i;
int gen = 0;
int raise;
int gen_to_wp;
- int i;
+ long i;
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
void
gc_free_heap(void)
{
- int page;
+ long page;
if (gencgc_verbose > 1)
SHOW("entering gc_free_heap");
}
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
- int *page_start, i;
+ long *page_start, i;
gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
gc_assert(page_table[page].bytes_used == 0);
- page_start = (int *)page_address(page);
+ page_start = (long *)page_address(page);
for (i=0; i<1024; i++) {
if (page_start[i] != 0) {
lose("free region not zero at %x", page_start + i);
void
gc_init(void)
{
- int i;
+ long i;
gc_init_tables();
scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector;
static void
gencgc_pickup_dynamic(void)
{
- int page = 0;
- int alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
+ long page = 0;
+ long alloc_ptr = SymbolValue(ALLOCATION_POINTER,0);
lispobj *prev=(lispobj *)page_address(page);
do {
page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
- first=search_space(prev,(ptr+2)-prev,ptr);
+ first=gc_search_space(prev,(ptr+2)-prev,ptr);
if(ptr == first) prev=ptr;
page_table[page].first_object_offset =
(void *)prev - page_address(page);
* region is full, so in most cases it's not needed. */
char *
-alloc(int nbytes)
+alloc(long nbytes)
{
struct thread *th=arch_os_get_current_thread();
struct alloc_region *region=
#endif
void *new_obj;
void *new_free_pointer;
-
+ gc_assert(nbytes>0);
/* Check for alignment allocation problems. */
- gc_assert((((unsigned)region->free_pointer & 0x7) == 0)
- && ((nbytes & 0x7) == 0));
+ gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0)
+ && ((nbytes & LOWTAG_MASK) == 0));
+#if 0
if(all_threads)
/* there are a few places in the C code that allocate data in the
* heap before Lisp starts. This is before interrupts are enabled,
#else
gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
#endif
+#endif
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
* already, in case it was a gc. If it wasn't a GC, the next
* allocation will get us back to this point anyway, so no harm done
*/
+ sigset_t new_mask,old_mask;
+ sigemptyset(&new_mask);
+ sigaddset_blockable(&new_mask);
+ sigprocmask(SIG_BLOCK,&new_mask,&old_mask);
+
struct interrupt_data *data=th->interrupt_data;
- if(!data->pending_handler)
- maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
+ if((!data->pending_handler) &&
+ maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0)) {
+ /* Leave the signals blocked just as if it was deferred
+ * the normal way and set the pending_mask. */
+ sigcopyset(&(data->pending_mask),&old_mask);
+ } else {
+ sigprocmask(SIG_SETMASK,&old_mask,0);
+ }
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
int
gencgc_handle_wp_violation(void* fault_addr)
{
- int page_index = find_page_index(fault_addr);
+ long page_index = find_page_index(fault_addr);
#ifdef QSHOW_SIGNALS
FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",