/* the minimum size (in bytes) for a large object*/
#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
-long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+os_vm_size_t large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
-long large_object_size = 4 * GENCGC_CARD_BYTES;
+os_vm_size_t large_object_size = 4 * GENCGC_CARD_BYTES;
#else
-long large_object_size = 4 * PAGE_BYTES;
+os_vm_size_t large_object_size = 4 * PAGE_BYTES;
#endif
\f
/* Check that X is a higher address than Y and return offset from Y to
* X in bytes. */
-static inline
-size_t void_diff(void *x, void *y)
+static inline os_vm_size_t
+void_diff(void *x, void *y)
{
gc_assert(x >= y);
return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
boxed_cnt, unboxed_cnt, large_boxed_cnt,
large_unboxed_cnt, pinned_cnt);
- fprintf(file
- " %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ fprintf(file,
+ " %8"OS_VM_SIZE_FMT
+ " %5"OS_VM_SIZE_FMT
+ " %8"OS_VM_SIZE_FMT
+ " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
generations[i].bytes_allocated,
- (npage_bytes(count_generation_pages(i))
- - generations[i].bytes_allocated),
+ (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
generation_average_age(i));
}
- fprintf(file," Total bytes allocated = %lu\n", (unsigned long)bytes_allocated);
- fprintf(file," Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
+ fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
+ fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
fpu_restore(fpu_state);
}
{
page_index_t first_page;
page_index_t last_page;
- unsigned long bytes_found;
+ os_vm_size_t bytes_found;
page_index_t i;
int ret;
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
+ word_t *p;
+ for (p = (word_t *)alloc_region->start_addr;
+ p < (word_t *)alloc_region->end_addr; p++) {
if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ lose("The new region is not zero at %p (start=%p, end=%p).\n",
p, alloc_region->start_addr, alloc_region->end_addr);
}
}
void
gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
{
- int more;
+ boolean more;
page_index_t first_page;
page_index_t next_page;
- unsigned long bytes_used;
- unsigned long orig_first_page_bytes_used;
- unsigned long region_size;
- unsigned long byte_cnt;
+ os_vm_size_t bytes_used;
+ os_vm_size_t region_size;
+ os_vm_size_t byte_cnt;
+ page_bytes_t orig_first_page_bytes_used;
int ret;
void *
gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
- page_index_t first_page;
- page_index_t last_page;
- int orig_first_page_bytes_used;
- long byte_cnt;
- int more;
- unsigned long bytes_used;
- page_index_t next_page;
+ boolean more;
+ page_index_t first_page, next_page, last_page;
+ page_bytes_t orig_first_page_bytes_used;
+ os_vm_size_t byte_cnt;
+ os_vm_size_t bytes_used;
int ret;
ret = thread_mutex_lock(&free_pages_lock);
}
static inline void *
-gc_quick_alloc_large(long nbytes)
-{
- return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
-}
-
-static inline void *
gc_alloc_unboxed(long nbytes)
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
-
-static inline void *
-gc_quick_alloc_large_unboxed(long nbytes)
-{
- return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
-}
\f
-
-/* Copy a large boxed object. If the object is in a large object
- * region then it is simply promoted, else it is copied. If it's large
- * enough then it's copied to a large object region.
+/* Copy a large object. If the object is in a large object region then
+ * it is simply promoted, else it is copied. If it's large enough then
+ * it's copied to a large object region.
*
- * Vectors may have shrunk. If the object is not copied the space
- * needs to be reclaimed, and the page_tables corrected. */
-lispobj
-copy_large_object(lispobj object, long nwords)
+ * Bignums and vectors may have shrunk. If the object is not copied
+ * the space needs to be reclaimed, and the page_tables corrected. */
+static lispobj
+general_copy_large_object(lispobj object, long nwords, boolean boxedp)
{
int tag;
lispobj *new;
gc_assert(from_space_p(object));
gc_assert((nwords & 0x01) == 0);
+ if ((nwords > 1024*1024) && gencgc_verbose) {
+ FSHOW((stderr, "/general_copy_large_object: %d bytes\n",
+ nwords*N_WORD_BYTES));
+ }
- /* Check whether it's in a large object region. */
+ /* Check whether it's a large object. */
first_page = find_page_index((void *)object);
gc_assert(first_page >= 0);
if (page_table[first_page].large_object) {
-
- /* Promote the object. */
-
- unsigned long remaining_bytes;
+ /* Promote the object. Note: Unboxed objects may have been
+ * allocated to a BOXED region so it may be necessary to
+ * change the region to UNBOXED. */
+ os_vm_size_t remaining_bytes;
+ os_vm_size_t bytes_freed;
page_index_t next_page;
- unsigned long bytes_freed;
- unsigned long old_bytes_used;
+ page_bytes_t old_bytes_used;
- /* Note: Any page write-protection must be removed, else a
+ /* FIXME: This comment is somewhat stale.
+ *
+ * Note: Any page write-protection must be removed, else a
* later scavenge_newspace may incorrectly not scavenge these
* pages. This would not be necessary if they are added to the
* new areas, but let's do it for them all (they'll probably
* be written anyway?). */
gc_assert(page_table[first_page].region_start_offset == 0);
-
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
+
while (remaining_bytes > GENCGC_CARD_BYTES) {
gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_boxed_p(next_page));
gc_assert(page_table[next_page].large_object);
gc_assert(page_table[next_page].region_start_offset ==
npage_bytes(next_page-first_page));
gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
- /* Should have been unprotected by unprotect_oldspace(). */
- gc_assert(page_table[next_page].write_protected == 0);
-
+ /* Should have been unprotected by unprotect_oldspace()
+ * for boxed objects, and after promotion unboxed ones
+ * should not be on protected pages at all. */
+ gc_assert(!page_table[next_page].write_protected);
+
+ if (boxedp)
+ gc_assert(page_boxed_p(next_page));
+ else {
+ gc_assert(page_allocated_no_region_p(next_page));
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ }
page_table[next_page].gen = new_space;
remaining_bytes -= GENCGC_CARD_BYTES;
next_page++;
}
- /* Now only one page remains, but the object may have shrunk
- * so there may be more unused pages which will be freed. */
+ /* Now only one page remains, but the object may have shrunk so
+ * there may be more unused pages which will be freed. */
- /* The object may have shrunk but shouldn't have grown. */
+ /* Object may have shrunk but shouldn't have grown - check. */
gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
page_table[next_page].gen = new_space;
- gc_assert(page_boxed_p(next_page));
+
+ if (boxedp)
+ gc_assert(page_boxed_p(next_page));
+ else
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
while ((old_bytes_used == GENCGC_CARD_BYTES) &&
(page_table[next_page].gen == from_space) &&
- page_boxed_p(next_page) &&
+ /* FIXME: It is not obvious to me why this is necessary
+ * as a loop condition: it seems to me that the
+ * region_start_offset test should be sufficient, but
+ * experimentally that is not the case. --NS
+ * 2011-11-28 */
+ (boxedp ?
+ page_boxed_p(next_page) :
+ page_allocated_no_region_p(next_page)) &&
page_table[next_page].large_object &&
(page_table[next_page].region_start_offset ==
npage_bytes(next_page - first_page))) {
- /* Checks out OK, free the page. Don't need to bother zeroing
+ /* Checks out OK, free the page. Don't need to both zeroing
* pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected as they
- * should be zero filled. */
+ * object. These pages shouldn't be write-protected, even if
+ * boxed they should be zero filled. */
gc_assert(page_table[next_page].write_protected == 0);
old_bytes_used = page_table[next_page].bytes_used;
next_page++;
}
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords
+ if ((bytes_freed > 0) && gencgc_verbose) {
+ FSHOW((stderr,
+ "/general_copy_large_object bytes_freed=%"OS_VM_SIZE_FMT"\n",
+ bytes_freed));
+ }
+
+ generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES
+ bytes_freed;
- generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+ generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
bytes_allocated -= bytes_freed;
/* Add the region to the new_areas if requested. */
- add_new_area(first_page,0,nwords*N_WORD_BYTES);
+ if (boxedp)
+ add_new_area(first_page,0,nwords*N_WORD_BYTES);
return(object);
+
} else {
/* Get tag of object. */
tag = lowtag_of(object);
/* Allocate space. */
- new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
+ new = gc_general_alloc(nwords*N_WORD_BYTES,
+ (boxedp ? BOXED_PAGE_FLAG : UNBOXED_PAGE_FLAG),
+ ALLOC_QUICK);
+ /* Copy the object. */
memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
/* Return Lisp pointer of new object. */
}
}
+lispobj
+copy_large_object(lispobj object, long nwords)
+{
+ return general_copy_large_object(object, nwords, 1);
+}
+
+lispobj
+copy_large_unboxed_object(lispobj object, long nwords)
+{
+ return general_copy_large_object(object, nwords, 0);
+}
+
/* to copy unboxed objects */
lispobj
copy_unboxed_object(lispobj object, long nwords)
/* Return Lisp pointer of new object. */
return ((lispobj) new) | tag;
}
-
-/* to copy large unboxed objects
- *
- * If the object is in a large object region then it is simply
- * promoted, else it is copied. If it's large enough then it's copied
- * to a large object region.
- *
- * Bignums and vectors may have shrunk. If the object is not copied
- * the space needs to be reclaimed, and the page_tables corrected.
- *
- * KLUDGE: There's a lot of cut-and-paste duplication between this
- * function and copy_large_object(..). -- WHN 20000619 */
-lispobj
-copy_large_unboxed_object(lispobj object, long nwords)
-{
- int tag;
- lispobj *new;
- page_index_t first_page;
-
- gc_assert(is_lisp_pointer(object));
- gc_assert(from_space_p(object));
- gc_assert((nwords & 0x01) == 0);
-
- if ((nwords > 1024*1024) && gencgc_verbose) {
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n",
- nwords*N_WORD_BYTES));
- }
-
- /* Check whether it's a large object. */
- first_page = find_page_index((void *)object);
- gc_assert(first_page >= 0);
-
- if (page_table[first_page].large_object) {
- /* Promote the object. Note: Unboxed objects may have been
- * allocated to a BOXED region so it may be necessary to
- * change the region to UNBOXED. */
- unsigned long remaining_bytes;
- page_index_t next_page;
- unsigned long bytes_freed;
- unsigned long old_bytes_used;
-
- gc_assert(page_table[first_page].region_start_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > GENCGC_CARD_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_allocated_no_region_p(next_page));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].region_start_offset ==
- npage_bytes(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == GENCGC_CARD_BYTES);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= GENCGC_CARD_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk so
- * there may be more unused pages which will be freed. */
-
- /* Object may have shrunk but shouldn't have grown - check. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == GENCGC_CARD_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- page_allocated_no_region_p(next_page) &&
- page_table[next_page].large_object &&
- (page_table[next_page].region_start_offset ==
- npage_bytes(next_page - first_page))) {
- /* Checks out OK, free the page. Don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected, even if
- * boxed they should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- if ((bytes_freed > 0) && gencgc_verbose) {
- FSHOW((stderr,
- "/copy_large_unboxed bytes_freed=%d\n",
- bytes_freed));
- }
-
- generations[from_space].bytes_allocated -=
- nwords*N_WORD_BYTES + bytes_freed;
- generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
- bytes_allocated -= bytes_freed;
-
- return(object);
- }
- else {
- /* Get tag of object. */
- tag = lowtag_of(object);
-
- /* Allocate space. */
- new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
-
- /* Copy the object. */
- memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
-
- /* Return Lisp pointer of new object. */
- return ((lispobj) new) | tag;
- }
-}
-
-
-
\f
/*
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
if(gencgc_verbose)
- fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ fprintf(stderr,"Next gc when %"OS_VM_SIZE_FMT" bytes have been consed\n",
auto_gc_trigger);
/* If we did a big GC (arbitrarily defined as gen > 1), release memory
for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
if (page_allocated_p(page)) {
- void *page_start, *addr;
+ void *page_start;
for (last_page = page;
(last_page < page_table_pages) && page_allocated_p(last_page);
last_page++) {