return (-1);
}
-static size_t
+static os_vm_size_t
npage_bytes(page_index_t npages)
{
gc_assert(npages>=0);
- return ((unsigned long)npages)*GENCGC_CARD_BYTES;
+ return ((os_vm_size_t)npages)*GENCGC_CARD_BYTES;
}
/* Check that X is a higher address than Y and return offset from Y to
static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
-extern unsigned long gencgc_release_granularity;
-unsigned long gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
+extern os_vm_size_t gencgc_release_granularity;
+os_vm_size_t gencgc_release_granularity = GENCGC_RELEASE_GRANULARITY;
-extern unsigned long gencgc_alloc_granularity;
-unsigned long gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
+extern os_vm_size_t gencgc_alloc_granularity;
+os_vm_size_t gencgc_alloc_granularity = GENCGC_ALLOC_GRANULARITY;
\f
/*
/* Work through the pages and add up the number of bytes used for the
* given generation. */
-static unsigned long
+static os_vm_size_t
count_generation_bytes_allocated (generation_index_t gen)
{
page_index_t i;
- unsigned long result = 0;
+ os_vm_size_t result = 0;
for (i = 0; i < last_free_page; i++) {
if (page_allocated_p(i)
&& (page_table[i].gen == gen))
" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
boxed_cnt, unboxed_cnt, large_boxed_cnt,
large_unboxed_cnt, pinned_cnt);
- fprintf(file
- " %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ fprintf(file,
+ " %8"OS_VM_SIZE_FMT
+ " %5"OS_VM_SIZE_FMT
+ " %8"OS_VM_SIZE_FMT
+ " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
generations[i].bytes_allocated,
- (npage_bytes(count_generation_pages(i))
- - generations[i].bytes_allocated),
+ (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
generation_average_age(i));
}
- fprintf(file," Total bytes allocated = %lu\n", (unsigned long)bytes_allocated);
- fprintf(file," Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
+ fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
+ fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
fpu_restore(fpu_state);
}
void zero_pages_with_mmap(page_index_t start, page_index_t end) {
page_index_t i;
void *addr = page_address(start), *new_addr;
- size_t length = npage_bytes(1+end-start);
+ os_vm_size_t length = npage_bytes(1+end-start);
if (start > end)
return;
{
page_index_t first_page;
page_index_t last_page;
- unsigned long bytes_found;
+ os_vm_size_t bytes_found;
page_index_t i;
int ret;
}
page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long bytes,
int page_type_flag)
{
- page_index_t first_page, last_page;
- page_index_t restart_page = *restart_page_ptr;
- long nbytes_goal = nbytes;
- long bytes_found = 0;
- long most_bytes_found = 0;
- page_index_t most_bytes_found_from, most_bytes_found_to;
- int small_object = nbytes < GENCGC_CARD_BYTES;
+ page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
+ page_index_t first_page, last_page, restart_page = *restart_page_ptr;
+ os_vm_size_t nbytes = bytes;
+ os_vm_size_t nbytes_goal = nbytes;
+ os_vm_size_t bytes_found = 0;
+ os_vm_size_t most_bytes_found = 0;
+ boolean small_object = nbytes < GENCGC_CARD_BYTES;
/* FIXME: assert(free_pages_lock is held); */
if (nbytes_goal < gencgc_alloc_granularity)
- nbytes_goal = gencgc_alloc_granularity;
+ nbytes_goal = gencgc_alloc_granularity;
/* Toggled by gc_and_save for heap compaction, normally -1. */
if (gencgc_alloc_start_page != -1) {
restart_page = gencgc_alloc_start_page;
}
- gc_assert(nbytes>=0);
+ /* FIXME: This is on bytes instead of nbytes pending cleanup of
+ * long from the interface. */
+ gc_assert(bytes>=0);
/* Search for a page with at least nbytes of space. We prefer
* not to split small objects on multiple pages, to reduce the
* number of contiguous allocation regions spaning multiple
gc_heap_exhausted_error_or_lose(most_bytes_found, nbytes);
}
+ gc_assert(most_bytes_found_to);
*restart_page_ptr = most_bytes_found_from;
return most_bytes_found_to-1;
}
for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
if (page_allocated_p(page)) {
- void *page_start, *addr;
+ void *page_start;
for (last_page = page;
(last_page < page_table_pages) && page_allocated_p(last_page);
last_page++) {