#include "validate.h"
#include "lispregs.h"
#include "arch.h"
-#include "fixnump.h"
#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
return (heap_base + (page_num * PAGE_BYTES));
}
+/* Calculate the address where the allocation region associated with
+ * the page starts. */
+inline void *
+page_region_start(page_index_t page_index)
+{
+ return page_address(page_index)+page_table[page_index].first_object_offset;
+}
+
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
inline page_index_t
large_unboxed_cnt,
pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
+ (count_generation_pages(i)*PAGE_BYTES
+ - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
os_invalidate(addr, length);
new_addr = os_validate(addr, length);
if (new_addr == NULL || new_addr != addr) {
- lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x", start, new_addr);
+ lose("remap_free_pages: page moved, 0x%08x ==> 0x%08x",
+ start, new_addr);
}
for (i = start; i <= end; i++) {
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- /* do we only want to call this on special occasions? like for boxed_region? */
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ /* do we only want to call this on special occasions? like for
+ * boxed_region? */
+ set_alloc_pointer((lispobj)(((char *)heap_base)
+ + last_free_page*PAGE_BYTES));
}
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+ gc_assert(alloc_region->start_addr ==
+ (page_address(first_page)
+ + page_table[first_page].bytes_used));
/* All the pages used need to be updated */
/* Calculate the number of bytes used in this page. This is not
* always the number of new bytes, unless it was free. */
more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ if ((bytes_used = (alloc_region->free_pointer
+ - page_address(first_page)))>PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
/* Calculate the number of bytes used in this page. */
more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt;
+ if (bytes_used > PAGE_BYTES) {
bytes_used = PAGE_BYTES;
more = 1;
}
/* Bump up last_free_page */
if (last_page+1 > last_free_page) {
last_free_page = last_page+1;
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ set_alloc_pointer((lispobj)(((char *)heap_base)
+ + last_free_page*PAGE_BYTES));
}
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
* handled, or indeed even printed.
*/
fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
- gc_active_p ? "garbage collection" : "allocation", available, requested);
+ gc_active_p ? "garbage collection" : "allocation",
+ available, requested);
if (gc_active_p || (available == 0)) {
/* If we are in GC, or totally out of memory there is no way
* to sanely transfer control to the lisp-side of things.
page_index_t
gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
{
- page_index_t first_page;
- page_index_t last_page;
- long region_size;
- page_index_t restart_page=*restart_page_ptr;
- long bytes_found;
- long num_pages;
- int large_p=(nbytes>=large_object_size);
+ page_index_t first_page, last_page;
+ page_index_t restart_page = *restart_page_ptr;
+ long bytes_found = 0;
+ long most_bytes_found = 0;
/* FIXME: assert(free_pages_lock is held); */
- /* Search for a contiguous free space of at least nbytes. If it's
- * a large object then align it on a page boundary by searching
- * for a free page. */
-
+ /* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
- do {
- first_page = restart_page;
- if (large_p)
- while ((first_page < page_table_pages)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < page_table_pages) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
+ if (nbytes>=PAGE_BYTES) {
+ /* Search for a contiguous free space of at least nbytes,
+ * aligned on a page boundary. The page-alignment is strictly
+ * speaking needed only for objects at least large_object_size
+ * bytes in size. */
+ do {
+ first_page = restart_page;
+ while ((first_page < page_table_pages) &&
+ (page_table[first_page].allocated != FREE_PAGE_FLAG))
first_page++;
- }
-
- if (first_page >= page_table_pages)
- gc_heap_exhausted_error_or_lose(0, nbytes);
- gc_assert(page_table[first_page].write_protected == 0);
+ last_page = first_page;
+ bytes_found = PAGE_BYTES;
+ while ((bytes_found < nbytes) &&
+ (last_page < (page_table_pages-1)) &&
+ (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ restart_page = last_page + 1;
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (page_table_pages-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
+ } else {
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism. */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+ {
+ bytes_found = PAGE_BYTES;
+ break;
+ }
+ else if ((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0))
+ {
+ bytes_found = PAGE_BYTES
+ - page_table[first_page].bytes_used;
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ if (bytes_found >= nbytes)
+ break;
+ }
+ first_page++;
}
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
- } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
+ last_page = first_page;
+ restart_page = first_page + 1;
+ }
/* Check for a failure */
- if ((restart_page >= page_table_pages) && (bytes_found < nbytes))
- gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+ if (bytes_found < nbytes) {
+ gc_assert(restart_page >= page_table_pages);
+ gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
+ }
- *restart_page_ptr=first_page;
+ gc_assert(page_table[first_page].write_protected == 0);
+ *restart_page_ptr = first_page;
return last_page;
}
return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
}
\f
-/*
- * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
- */
-
-extern long (*scavtab[256])(lispobj *where, lispobj object);
-extern lispobj (*transother[256])(lispobj object);
-extern long (*sizetab[256])(lispobj *where);
/* Copy a large boxed object. If the object is in a large object
* region then it is simply promoted, else it is copied. If it's large
next_page++;
}
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
- bytes_freed;
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+ + bytes_freed;
generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
bytes_allocated -= bytes_freed;
gc_assert((nwords & 0x01) == 0);
if ((nwords > 1024*1024) && gencgc_verbose)
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
+ FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
+ nwords*N_WORD_BYTES));
/* Check whether it's a large object. */
first_page = find_page_index((void *)object);
"/copy_large_unboxed bytes_freed=%d\n",
bytes_freed));
- generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[from_space].bytes_allocated -=
+ nwords*N_WORD_BYTES + bytes_freed;
generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
bytes_allocated -= bytes_freed;
&& (data < (code_end_addr-displacement))) {
/* function header */
if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) ==
+ (unsigned)code)) {
/* Skip the function header */
p += 6*4 - 4 - 1;
continue;
void *constants_start_addr, *constants_end_addr;
void *code_start_addr, *code_end_addr;
lispobj fixups = NIL;
- unsigned long displacement = (unsigned long)new_code - (unsigned long)old_code;
+ unsigned long displacement =
+ (unsigned long)new_code - (unsigned long)old_code;
struct vector *fixups_vector;
ncode_words = fixnum_value(new_code->code_size);
(fixups_vector->header == 0x01)) {
/* If so, then follow it. */
/*SHOW("following pointer to a forwarding pointer");*/
- fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
+ fixups_vector =
+ (struct vector *)native_pointer((lispobj)fixups_vector->length);
}
/*SHOW("got fixups");*/
/* If it's within the old_code object then it must be an
* absolute fixup (relative ones are not saved) */
if ((old_value >= (unsigned long)old_code)
- && (old_value < ((unsigned long)old_code + nwords*N_WORD_BYTES)))
+ && (old_value < ((unsigned long)old_code
+ + nwords*N_WORD_BYTES)))
/* So add the dispacement. */
*(unsigned long *)((unsigned long)code_start_addr + offset) =
old_value + displacement;
} else {
/* This used to just print a note to stderr, but a bogus fixup seems to
* indicate real heap corruption, so a hard hailure is in order. */
- lose("fixup vector %p has a bad widetag: %d\n", fixups_vector, widetag_of(fixups_vector->header));
+ lose("fixup vector %p has a bad widetag: %d\n",
+ fixups_vector, widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
if ((page_index == -1) ||
(page_table[page_index].allocated == FREE_PAGE_FLAG))
return NULL;
- start = (lispobj *)((void *)page_address(page_index)
- + page_table[page_index].first_object_offset);
+ start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
(((lispobj *)pointer)+2)-start,
(lispobj *)pointer));
return 0;
}
/* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+ if ((is_lisp_pointer(start_addr[0]) ||
+ is_lisp_immediate(start_addr[0])) &&
+ (is_lisp_pointer(start_addr[1]) ||
+ is_lisp_immediate(start_addr[1])))
break;
else {
if (gencgc_verbose)
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned long)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
+ if (((unsigned long)addr & (PAGE_BYTES - 1)) >
+ page_table[addr_page_index].bytes_used)
return;
/* Filter out anything which can't be a pointer to a Lisp object
#if 0
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
- first_page=
- find_page_index(page_address(addr_page_index)+
- page_table[addr_page_index].first_object_offset);
+ first_page = find_page_index(page_region_start(addr_page_index))
#else
first_page = addr_page_index;
while (page_table[first_page].first_object_offset != 0) {
}
if (!write_protected) {
scavenge(page_address(i),
- (page_table[last_page].bytes_used +
- (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
/* Now scan the pages and write protect those that
* don't have pointers to younger generations. */
- page_table[i].first_object_offset)/N_WORD_BYTES;
new_areas_ignore_page = last_page;
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
+ scavenge(page_region_start(i), size);
}
i = last_page;
count = 1;
break;
}
- nuntagged = ((struct layout *)native_pointer(layout))->n_untagged_slots;
- verify_space(start + 1, ntotal - fixnum_value(nuntagged));
+ nuntagged = ((struct layout *)
+ native_pointer(layout))->n_untagged_slots;
+ verify_space(start + 1,
+ ntotal - fixnum_value(nuntagged));
count = ntotal + 1;
break;
}
while (fheaderl != NIL) {
fheaderp =
(struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ gc_assert(widetag_of(fheaderp->header) ==
+ SIMPLE_FUN_HEADER_WIDETAG);
verify_space(&fheaderp->name, 1);
verify_space(&fheaderp->arglist, 1);
verify_space(&fheaderp->type, 1);
break;
default:
- lose("Unhandled widetag 0x%x at 0x%x\n", widetag_of(*start), start);
+ lose("Unhandled widetag 0x%x at 0x%x\n",
+ widetag_of(*start), start);
}
}
}
|| (page_table[last_page+1].first_object_offset == 0))
break;
- verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ verify_space(page_address(i),
+ (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
i = last_page;
}
}
/* Compute the PC's offset from the start of the CODE */
/* register. */
- pc_code_offset = *os_context_pc_addr(context) - *os_context_register_addr(context, reg_CODE);
+ pc_code_offset = *os_context_pc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
#ifdef ARCH_HAS_NPC_REGISTER
- npc_code_offset = *os_context_npc_addr(context) - *os_context_register_addr(context, reg_CODE);
+ npc_code_offset = *os_context_npc_addr(context)
+ - *os_context_register_addr(context, reg_CODE);
#endif /* ARCH_HAS_NPC_REGISTER */
#ifdef ARCH_HAS_LINK_REGISTER
/* Fix the LIP */
/*
- * But what happens if lip_register_pair is -1? *os_context_register_addr on Solaris
- * (see solaris_register_address in solaris-os.c) will return
- * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
- * that what we really want? My guess is that that is not what we
+ * But what happens if lip_register_pair is -1?
+ * *os_context_register_addr on Solaris (see
+ * solaris_register_address in solaris-os.c) will return
+ * &context->uc_mcontext.gregs[2]. But gregs[2] is REG_nPC. Is
+ * that what we really want? My guess is that that is not what we
* want, so if lip_register_pair is -1, we don't touch reg_LIP at
- * all. But maybe it doesn't really matter if LIP is trashed?
+ * all. But maybe it doesn't really matter if LIP is trashed?
*/
if (lip_register_pair >= 0) {
*os_context_register_addr(context, reg_LIP) =
- *os_context_register_addr(context, lip_register_pair) + lip_offset;
+ *os_context_register_addr(context, lip_register_pair)
+ + lip_offset;
}
#endif /* reg_LIP */
/* Fix the PC if it was in from space */
if (from_space_p(*os_context_pc_addr(context)))
- *os_context_pc_addr(context) = *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+ *os_context_pc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + pc_code_offset;
#ifdef ARCH_HAS_LINK_REGISTER
/* Fix the LR ditto; important if we're being called from
#ifdef ARCH_HAS_NPC_REGISTER
if (from_space_p(*os_context_npc_addr(context)))
- *os_context_npc_addr(context) = *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+ *os_context_npc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + npc_code_offset;
#endif /* ARCH_HAS_NPC_REGISTER */
}
last_free_page = last_page+1;
- set_alloc_pointer((lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES));
+ set_alloc_pointer((lispobj)(((char *)heap_base)
+ + last_free_page*PAGE_BYTES));
return 0; /* dummy value: return something ... */
}
page_table[page].allocated = FREE_PAGE_FLAG;
page_table[page].bytes_used = 0;
-#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure about this change. */
+#ifndef LISP_FEATURE_WIN32 /* Pages already zeroed on win32? Not sure
+ * about this change. */
/* Zero the page. */
page_start = (void *)page_address(page);
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
if ((signed long) alloc_signal <= 0) {
+ SetSymbolValue(ALLOC_SIGNAL, T, thread);
#ifdef LISP_FEATURE_SB_THREAD
kill_thread_safely(thread->os_thread, SIGPROF);
#else
*/
if(page_table[page_index].write_protected_cleared != 1)
lose("fault in heap page %d not marked as write-protected\nboxed_region.first_page: %d, boxed_region.last_page %d\n",
- page_index, boxed_region.first_page, boxed_region.last_page);
+ page_index, boxed_region.first_page,
+ boxed_region.last_page);
}
/* Don't worry, we can handle it. */
return 1;