/* the minimum size (in bytes) for a large object*/
#if (GENCGC_ALLOC_GRANULARITY >= PAGE_BYTES) && (GENCGC_ALLOC_GRANULARITY >= GENCGC_CARD_BYTES)
-long large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
+os_vm_size_t large_object_size = 4 * GENCGC_ALLOC_GRANULARITY;
#elif (GENCGC_CARD_BYTES >= PAGE_BYTES) && (GENCGC_CARD_BYTES >= GENCGC_ALLOC_GRANULARITY)
-long large_object_size = 4 * GENCGC_CARD_BYTES;
+os_vm_size_t large_object_size = 4 * GENCGC_CARD_BYTES;
#else
-long large_object_size = 4 * PAGE_BYTES;
+os_vm_size_t large_object_size = 4 * PAGE_BYTES;
#endif
\f
/* Check that X is a higher address than Y and return offset from Y to
* X in bytes. */
-static inline
-size_t void_diff(void *x, void *y)
+static inline os_vm_size_t
+void_diff(void *x, void *y)
{
gc_assert(x >= y);
return (pointer_sized_uint_t)x - (pointer_sized_uint_t)y;
" %5"PAGE_INDEX_FMT" %5"PAGE_INDEX_FMT,
boxed_cnt, unboxed_cnt, large_boxed_cnt,
large_unboxed_cnt, pinned_cnt);
- fprintf(file
- " %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ fprintf(file,
+ " %8"OS_VM_SIZE_FMT
+ " %5"OS_VM_SIZE_FMT
+ " %8"OS_VM_SIZE_FMT
+ " %4"PAGE_INDEX_FMT" %3d %7.4f\n",
generations[i].bytes_allocated,
- (npage_bytes(count_generation_pages(i))
- - generations[i].bytes_allocated),
+ (npage_bytes(count_generation_pages(i)) - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
generation_average_age(i));
}
- fprintf(file," Total bytes allocated = %lu\n", (unsigned long)bytes_allocated);
- fprintf(file," Dynamic-space-size bytes = %lu\n", (unsigned long)dynamic_space_size);
+ fprintf(file," Total bytes allocated = %"OS_VM_SIZE_FMT"\n", bytes_allocated);
+ fprintf(file," Dynamic-space-size bytes = %"OS_VM_SIZE_FMT"\n", dynamic_space_size);
fpu_restore(fpu_state);
}
{
page_index_t first_page;
page_index_t last_page;
- unsigned long bytes_found;
+ os_vm_size_t bytes_found;
page_index_t i;
int ret;
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
+ word_t *p;
+ for (p = (word_t *)alloc_region->start_addr;
+ p < (word_t *)alloc_region->end_addr; p++) {
if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero (start=%p, end=%p).\n",
+ lose("The new region is not zero at %p (start=%p, end=%p).\n",
p, alloc_region->start_addr, alloc_region->end_addr);
}
}
void
gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
{
- int more;
+ boolean more;
page_index_t first_page;
page_index_t next_page;
- unsigned long bytes_used;
- unsigned long orig_first_page_bytes_used;
- unsigned long region_size;
- unsigned long byte_cnt;
+ os_vm_size_t bytes_used;
+ os_vm_size_t region_size;
+ os_vm_size_t byte_cnt;
+ page_bytes_t orig_first_page_bytes_used;
int ret;
void *
gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
- page_index_t first_page;
- page_index_t last_page;
- int orig_first_page_bytes_used;
- long byte_cnt;
- int more;
- unsigned long bytes_used;
- page_index_t next_page;
+ boolean more;
+ page_index_t first_page, next_page, last_page;
+ page_bytes_t orig_first_page_bytes_used;
+ os_vm_size_t byte_cnt;
+ os_vm_size_t bytes_used;
int ret;
ret = thread_mutex_lock(&free_pages_lock);