#include "validate.h"
#include "lispregs.h"
#include "arch.h"
-#include "fixnump.h"
#include "gc.h"
#include "gc-internal.h"
#include "thread.h"
return (heap_base + (page_num * PAGE_BYTES));
}
+/* Calculate the address where the allocation region associated with the page starts. */
+inline void *
+page_region_start(page_index_t page_index)
+{
+ return page_address(page_index)+page_table[page_index].first_object_offset;
+}
+
/* Find the page index within the page_table for the given
* address. Return -1 on failure. */
inline page_index_t
ret = thread_mutex_unlock(&free_pages_lock);
gc_assert(ret == 0);
- /* we can do this after releasing free_pages_lock */
- if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.\n", p);
- }
- }
- }
-
#ifdef READ_PROTECT_FREE_PAGES
os_protect(page_address(first_page),
PAGE_BYTES*(1+last_page-first_page),
}
zero_dirty_pages(first_page, last_page);
+
+ /* we can do this after releasing free_pages_lock */
+ if (gencgc_zero_check) {
+ long *p;
+ for (p = (long *)alloc_region->start_addr;
+ p < (long *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ p, alloc_region->start_addr, alloc_region->end_addr);
+ }
+ }
+ }
}
/* If the record_new_objects flag is 2 then all new regions created
page_index_t
gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
{
- page_index_t first_page;
- page_index_t last_page;
- long region_size;
- page_index_t restart_page=*restart_page_ptr;
- long bytes_found;
- long num_pages;
- int large_p=(nbytes>=large_object_size);
+ page_index_t first_page, last_page;
+ page_index_t restart_page = *restart_page_ptr;
+ long bytes_found = 0;
+ long most_bytes_found = 0;
/* FIXME: assert(free_pages_lock is held); */
- /* Search for a contiguous free space of at least nbytes. If it's
- * a large object then align it on a page boundary by searching
- * for a free page. */
-
+ /* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
- do {
- first_page = restart_page;
- if (large_p)
- while ((first_page < page_table_pages)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < page_table_pages) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
+ if (nbytes>=PAGE_BYTES) {
+ /* Search for a contiguous free space of at least nbytes,
+ * aligned on a page boundary. The page-alignment is strictly
+ * speaking needed only for objects at least large_object_size
+ * bytes in size. */
+ do {
+ first_page = restart_page;
+ while ((first_page < page_table_pages) &&
+ (page_table[first_page].allocated != FREE_PAGE_FLAG))
first_page++;
- }
-
- if (first_page >= page_table_pages)
- gc_heap_exhausted_error_or_lose(0, nbytes);
- gc_assert(page_table[first_page].write_protected == 0);
+ last_page = first_page;
+ bytes_found = PAGE_BYTES;
+ while ((bytes_found < nbytes) &&
+ (last_page < (page_table_pages-1)) &&
+ (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ restart_page = last_page + 1;
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (page_table_pages-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
+ } else {
+ /* Search for a page with at least nbytes of space. We prefer
+ * not to split small objects on multiple pages, to reduce the
+ * number of contiguous allocation regions spaning multiple
+ * pages: this helps avoid excessive conservativism. */
+ first_page = restart_page;
+ while (first_page < page_table_pages) {
+ if (page_table[first_page].allocated == FREE_PAGE_FLAG)
+ {
+ bytes_found = PAGE_BYTES;
+ break;
+ }
+ else if ((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0))
+ {
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
+ if (bytes_found > most_bytes_found)
+ most_bytes_found = bytes_found;
+ if (bytes_found >= nbytes)
+ break;
+ }
+ first_page++;
}
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
- } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
+ last_page = first_page;
+ restart_page = first_page + 1;
+ }
/* Check for a failure */
- if ((restart_page >= page_table_pages) && (bytes_found < nbytes))
- gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+ if (bytes_found < nbytes) {
+ gc_assert(restart_page >= page_table_pages);
+ gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
+ }
- *restart_page_ptr=first_page;
+ gc_assert(page_table[first_page].write_protected == 0);
+ *restart_page_ptr = first_page;
return last_page;
}
if (!check_code_fixups)
return;
+ FSHOW((stderr, "/sniffing code: %p, %lu\n", code, displacement));
+
ncode_words = fixnum_value(code->code_size);
nheader_words = HeaderValue(*(lispobj *)code);
nwords = ncode_words + nheader_words;
if ((page_index == -1) ||
(page_table[page_index].allocated == FREE_PAGE_FLAG))
return NULL;
- start = (lispobj *)((void *)page_address(page_index)
- + page_table[page_index].first_object_offset);
+ start = (lispobj *)page_region_start(page_index);
return (gc_search_space(start,
(((lispobj *)pointer)+2)-start,
(lispobj *)pointer));
return 0;
}
/* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+ if ((is_lisp_pointer(start_addr[0]) || is_lisp_immediate(start_addr[0])) &&
+ (is_lisp_pointer(start_addr[1]) || is_lisp_immediate(start_addr[1])))
break;
else {
if (gencgc_verbose)
#if 0
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
- first_page=
- find_page_index(page_address(addr_page_index)+
- page_table[addr_page_index].first_object_offset);
+ first_page = find_page_index(page_region_start(addr_page_index))
#else
first_page = addr_page_index;
while (page_table[first_page].first_object_offset != 0) {
- page_table[i].first_object_offset)/N_WORD_BYTES;
new_areas_ignore_page = last_page;
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
+ scavenge(page_region_start(i), size);
}
i = last_page;
#ifdef LUTEX_WIDETAG
case LUTEX_WIDETAG:
#endif
+#ifdef NO_TLS_VALUE_MARKER_WIDETAG
+ case NO_TLS_VALUE_MARKER_WIDETAG:
+#endif
count = (sizetab[widetag_of(*start)])(start);
break;
default:
- FSHOW((stderr,
- "/Unhandled widetag 0x%x at 0x%x\n",
- widetag_of(*start), start));
- fflush(stderr);
- gc_abort();
+ lose("Unhandled widetag 0x%x at 0x%x\n", widetag_of(*start), start);
}
}
}
if (verify_after_free_heap) {
/* Check whether purify has left any bad pointers. */
- if (gencgc_verbose)
- SHOW("checking after free_heap\n");
+ FSHOW((stderr, "checking after free_heap\n"));
verify_gc();
}
}
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
if ((signed long) alloc_signal <= 0) {
+ SetSymbolValue(ALLOC_SIGNAL, T, thread);
#ifdef LISP_FEATURE_SB_THREAD
kill_thread_safely(thread->os_thread, SIGPROF);
#else