/*
- * GENerational Conservative Garbage Collector for SBCL x86
+ * GENerational Conservative Garbage Collector for SBCL
*/
/*
* <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
*/
+#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <errno.h>
#include "thread.h"
#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
+#include "genesis/fdefn.h"
#include "genesis/simple-fun.h"
#include "save.h"
#include "genesis/hash-table.h"
/* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
unsigned long bytes_allocated = 0;
-extern unsigned long bytes_consed_between_gcs; /* gc-common.c */
unsigned long auto_gc_trigger = 0;
/* the source and destination generations. These are set before a GC starts
generation_index_t from_space;
generation_index_t new_space;
+/* Set to 1 when in GC */
+boolean gc_active_p = 0;
+
/* should the GC be conservative on stack. If false (only right before
* saving a core), don't scan the stack / mark pages dont_move. */
static boolean conservative_stack = 1;
-/* An array of page structures is statically allocated.
+/* An array of page structures is allocated on gc initialization.
* This helps quickly map between an address its page structure.
- * NUM_PAGES is set from the size of the dynamic space. */
-struct page page_table[NUM_PAGES];
+ * page_table_pages is set from the size of the dynamic space. */
+unsigned page_table_pages;
+struct page *page_table;
/* To map addresses to page structures the address of the first page
* is needed. */
static void *heap_base = NULL;
-#if N_WORD_BITS == 32
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG
-#elif N_WORD_BITS == 64
- #define SIMPLE_ARRAY_WORD_WIDETAG SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
-#endif
-
/* Calculate the start address for the given page number. */
inline void *
page_address(page_index_t page_num)
if (index >= 0) {
index = ((unsigned long)index)/PAGE_BYTES;
- if (index < NUM_PAGES)
+ if (index < page_table_pages)
return (index);
}
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != 0)
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
&& (page_table[i].gen == generation))
count++;
return count;
page_index_t i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].dont_move != 0)) {
++count;
}
}
page_index_t i;
long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].gen == gen))
result += page_table[i].bytes_used;
}
return result;
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
fprintf(stderr,
- " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ " %1d: %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %5ld %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
generations[i].alloc_start_page,
generations[i].alloc_unboxed_start_page,
generations[i].alloc_large_start_page,
generations[i].alloc_large_unboxed_start_page,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ boxed_cnt,
+ unboxed_cnt,
+ large_boxed_cnt,
+ large_unboxed_cnt,
pinned_cnt,
generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
- - generations[i].bytes_allocated),
+ (count_generation_pages(i)*PAGE_BYTES - generations[i].bytes_allocated),
generations[i].gc_trigger,
count_write_protect_generation_pages(i),
generations[i].num_gc,
static page_index_t gencgc_alloc_start_page = -1;
+void
+gc_heap_exhausted_error_or_lose (long available, long requested)
+{
+ /* Write basic information before doing anything else: if we don't
+ * call to lisp this is a must, and even if we do there is always the
+ * danger that we bounce back here before the error has been handled,
+ * or indeed even printed.
+ */
+ fprintf(stderr, "Heap exhausted during %s: %ld bytes available, %ld requested.\n",
+ gc_active_p ? "garbage collection" : "allocation", available, requested);
+ if (gc_active_p || (available == 0)) {
+ /* If we are in GC, or totally out of memory there is no way
+ * to sanely transfer control to the lisp-side of things.
+ */
+ print_generation_stats(1);
+ lose("Heap exhausted, game over.");
+ }
+ else {
+ /* FIXME: assert free_pages_lock held */
+ thread_mutex_unlock(&free_pages_lock);
+ funcall2(SymbolFunction(HEAP_EXHAUSTED_ERROR),
+ make_fixnum(available), make_fixnum(requested));
+ lose("HEAP-EXHAUSTED-ERROR fell through");
+ }
+}
+
page_index_t
gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
{
do {
first_page = restart_page;
if (large_p)
- while ((first_page < NUM_PAGES)
+ while ((first_page < page_table_pages)
&& (page_table[first_page].allocated != FREE_PAGE_FLAG))
first_page++;
else
- while (first_page < NUM_PAGES) {
+ while (first_page < page_table_pages) {
if(page_table[first_page].allocated == FREE_PAGE_FLAG)
break;
if((page_table[first_page].allocated ==
first_page++;
}
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose("\n");
- }
+ if (first_page >= page_table_pages)
+ gc_heap_exhausted_error_or_lose(0, nbytes);
gc_assert(page_table[first_page].write_protected == 0);
num_pages = 1;
while (((bytes_found < nbytes)
|| (!large_p && (num_pages < 2)))
- && (last_page < (NUM_PAGES-1))
+ && (last_page < (page_table_pages-1))
&& (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
last_page++;
num_pages++;
gc_assert(bytes_found == region_size);
restart_page = last_page + 1;
- } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
+ } while ((restart_page < page_table_pages) && (bytes_found < nbytes));
/* Check for a failure */
- if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose("\n");
- }
+ if ((restart_page >= page_table_pages) && (bytes_found < nbytes))
+ gc_heap_exhausted_error_or_lose(bytes_found, nbytes);
+
*restart_page_ptr=first_page;
return last_page;
\f
/*
- * vector-like objects
- */
-
-
-/* FIXME: What does this mean? */
-int gencgc_hash = 1;
-
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
-
-static long
-scav_vector(lispobj *where, lispobj object)
-{
- unsigned long kv_length;
- lispobj *kv_vector;
- unsigned long length = 0; /* (0 = dummy to stop GCC warning) */
- struct hash_table *hash_table;
- lispobj empty_symbol;
- unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- unsigned long *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
- lispobj weak_p_obj;
- unsigned long next_vector_length = 0;
-
- /* FIXME: A comment explaining this would be nice. It looks as
- * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
- * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
- if (HeaderValue(object) != subtype_VectorValidHashing)
- return 1;
-
- if (!gencgc_hash) {
- /* This is set for backward compatibility. FIXME: Do we need
- * this any more? */
- *where =
- (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
- return 1;
- }
-
- kv_length = fixnum_value(where[1]);
- kv_vector = where + 2; /* Skip the header and length. */
- /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
-
- /* Scavenge element 0, which may be a hash-table structure. */
- scavenge(where+2, 1);
- if (!is_lisp_pointer(where[2])) {
- lose("no pointer at %x in hash table\n", where[2]);
- }
- hash_table = (struct hash_table *)native_pointer(where[2]);
- /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
- if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
- lose("hash table not instance (%x at %x)\n",
- hash_table->header,
- hash_table);
- }
-
- /* Scavenge element 1, which should be some internal symbol that
- * the hash table code reserves for marking empty slots. */
- scavenge(where+3, 1);
- if (!is_lisp_pointer(where[3])) {
- lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]);
- }
- empty_symbol = where[3];
- /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
- if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) !=
- SYMBOL_HEADER_WIDETAG) {
- lose("not a symbol where empty-hash-table-slot symbol expected: %x\n",
- *(lispobj *)native_pointer(empty_symbol));
- }
-
- /* Scavenge hash table, which will fix the positions of the other
- * needed objects. */
- scavenge((lispobj *)hash_table,
- sizeof(struct hash_table) / sizeof(lispobj));
-
- /* Cross-check the kv_vector. */
- if (where != (lispobj *)native_pointer(hash_table->table)) {
- lose("hash_table table!=this table %x\n", hash_table->table);
- }
-
- /* WEAK-P */
- weak_p_obj = hash_table->weak_p;
-
- /* index vector */
- {
- lispobj index_vector_obj = hash_table->index_vector;
-
- if (is_lisp_pointer(index_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- index_vector =
- ((unsigned long *)native_pointer(index_vector_obj)) + 2;
- /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
- length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
- /*FSHOW((stderr, "/length = %d\n", length));*/
- } else {
- lose("invalid index_vector %x\n", index_vector_obj);
- }
- }
-
- /* next vector */
- {
- lispobj next_vector_obj = hash_table->next_vector;
-
- if (is_lisp_pointer(next_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2;
- /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
- next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
- /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
- } else {
- lose("invalid next_vector %x\n", next_vector_obj);
- }
- }
-
- /* maybe hash vector */
- {
- lispobj hash_vector_obj = hash_table->hash_vector;
-
- if (is_lisp_pointer(hash_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)){
- hash_vector =
- ((unsigned long *)native_pointer(hash_vector_obj)) + 2;
- /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
- gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
- == next_vector_length);
- } else {
- hash_vector = NULL;
- /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
- }
- }
-
- /* These lengths could be different as the index_vector can be a
- * different length from the others, a larger index_vector could help
- * reduce collisions. */
- gc_assert(next_vector_length*2 == kv_length);
-
- /* now all set up.. */
-
- /* Work through the KV vector. */
- {
- long i;
- for (i = 1; i < next_vector_length; i++) {
- lispobj old_key = kv_vector[2*i];
-
-#if N_WORD_BITS == 32
- unsigned long old_index = (old_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
- unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
-#endif
-
- /* Scavenge the key and value. */
- scavenge(&kv_vector[2*i],2);
-
- /* Check whether the key has moved and is EQ based. */
- {
- lispobj new_key = kv_vector[2*i];
-#if N_WORD_BITS == 32
- unsigned long new_index = (new_key & 0x1fffffff)%length;
-#elif N_WORD_BITS == 64
- unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
-#endif
-
- if ((old_index != new_index) &&
- ((!hash_vector) ||
- (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) &&
- ((new_key != empty_symbol) ||
- (kv_vector[2*i] != empty_symbol))) {
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
- /* Unlink the key from the old_index chain. */
- if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned long prior = index_vector[old_index];
- unsigned long next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
- }
- }
- }
- }
- }
- return (CEILING(kv_length + 2, 2));
-}
-
-#else
-
-static long
-scav_vector(lispobj *where, lispobj object)
-{
- if (HeaderValue(object) == subtype_VectorValidHashing) {
- *where =
- (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
- }
- return 1;
-}
-
-#endif
-
-\f
-/*
* Lutexes. Using the normal finalization machinery for finalizing
* lutexes is tricky, since the finalization depends on working lutexes.
* So we track the lutexes in the GC and finalize them manually.
#define SC_GEN_CK 0
#if SC_GEN_CK
/* Clear the write_protected_cleared flags on all pages. */
- for (i = 0; i < NUM_PAGES; i++)
+ for (i = 0; i < page_table_pages; i++)
page_table[i].write_protected_cleared = 0;
#endif
#if SC_GEN_CK
/* Check that none of the write_protected pages in this generation
* have been written to. */
- for (i = 0; i < NUM_PAGES; i++) {
+ for (i = 0; i < page_table_pages; i++) {
if ((page_table[i].allocation != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
/* Record all new areas now. */
record_new_objects = 2;
+ /* Give a chance to weak hash tables to make other objects live.
+ * FIXME: The algorithm implemented here for weak hash table gcing
+ * is O(W^2+N) as Bruno Haible warns in
+ * http://www.haible.de/bruno/papers/cs/weak/WeakDatastructures-writeup.html
+ * see "Implementation 2". */
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
if (gencgc_verbose)
SHOW("new_areas overflow, doing full scavenge");
- /* Don't need to record new areas that get scavenge anyway
- * during scavenge_newspace_generation_one_scan. */
+ /* Don't need to record new areas that get scavenged
+ * anyway during scavenge_newspace_generation_one_scan. */
record_new_objects = 1;
scavenge_newspace_generation_one_scan(generation);
/* Record all new areas now. */
record_new_objects = 2;
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
scavenge(page_address(page)+offset, size);
}
+ scav_weak_hash_tables();
+
/* Flush the current regions updating the tables. */
gc_alloc_update_all_page_tables();
}
#if SC_NS_GEN_CK
/* Check that none of the write_protected pages in this generation
* have been written to. */
- for (i = 0; i < NUM_PAGES; i++) {
+ for (i = 0; i < page_table_pages; i++) {
if ((page_table[i].allocation != FREE_PAGE_FLAG)
&& (page_table[i].bytes_used != 0)
&& (page_table[i].gen == generation)
}
#endif
-#if defined(LISP_FEATURE_PPC)
-extern int closure_tramp;
-extern int undefined_tramp;
-#else
-extern int undefined_tramp;
-#endif
-
static void
verify_space(lispobj *start, size_t words)
{
*/
} else {
/* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space &&
-#if defined(LISP_FEATURE_PPC)
- !((thing == &closure_tramp) ||
- (thing == &undefined_tramp))
-#else
- thing != (unsigned long)&undefined_tramp
-#endif
- ) {
+ if (!to_readonly_space && !to_static_space) {
lose("Ptr %x @ %x sees junk.\n", thing, start);
}
}
}
}
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+
static void
scavenge_control_stack()
{
scavenge(control_stack, control_stack_size);
}
-#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
/* Scavenging Interrupt Contexts */
static int boxed_registers[] = BOXED_REGISTERS;
#endif
+#if defined(LISP_FEATURE_SB_THREAD)
static void
preserve_context_registers (os_context_t *c)
{
preserve_pointer((void*)*os_context_register_addr(c,reg_ESI));
preserve_pointer((void*)*os_context_register_addr(c,reg_EDI));
preserve_pointer((void*)*os_context_pc_addr(c));
+#elif defined LISP_FEATURE_X86_64
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RAX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RCX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RBX));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RSI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_RDI));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R8));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R9));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R10));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R11));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R12));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R13));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R14));
+ preserve_pointer((void*)*os_context_register_addr(c,reg_R15));
+ preserve_pointer((void*)*os_context_pc_addr(c));
#else
#error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
#endif
#endif
- for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+ for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
preserve_pointer(*ptr);
}
}
+#endif
/* Garbage collect a generation. If raise is 0 then the remains of the
* generation are not raised to the next generation. */
/* The oldest generation can't be raised. */
gc_assert((generation != HIGHEST_NORMAL_GENERATION) || (raise == 0));
+ /* Check if weak hash tables were processed in the previous GC. */
+ gc_assert(weak_hash_tables == NULL);
+
/* Initialize the weak pointer list. */
weak_pointers = NULL;
#else
esp = (void **)((void *)&raise);
#endif
- for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
+ for (ptr = ((void **)th->control_stack_end)-1; ptr > esp; ptr--) {
preserve_pointer(*ptr);
}
}
}
#endif
+ scan_weak_hash_tables();
scan_weak_pointers();
/* Flush the current regions, updating the tables. */
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
+ gc_active_p = 1;
+
if (last_gen > HIGHEST_NORMAL_GENERATION+1) {
FSHOW((stderr,
"/collect_garbage: last_gen = %d, doing a level 0 GC\n",
high_water_mark = 0;
}
+ gc_active_p = 0;
+
SHOW("returning from collect_garbage");
}
if (gencgc_verbose > 1)
SHOW("entering gc_free_heap");
- for (page = 0; page < NUM_PAGES; page++) {
+ for (page = 0; page < page_table_pages; page++) {
/* Skip free pages which should already be zero filled. */
if (page_table[page].allocated != FREE_PAGE_FLAG) {
void *page_start, *addr;
{
page_index_t i;
+ /* Compute the number of pages needed for the dynamic space.
+ * Dynamic space size should be aligned on page size. */
+ page_table_pages = dynamic_space_size/PAGE_BYTES;
+ gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES);
+
+ page_table = calloc(page_table_pages, sizeof(struct page));
+ gc_assert(page_table);
+
gc_init_tables();
- scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector;
scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
heap_base = (void*)DYNAMIC_SPACE_START;
/* Initialize each page structure. */
- for (i = 0; i < NUM_PAGES; i++) {
+ for (i = 0; i < page_table_pages; i++) {
/* Initialize all pages as free. */
page_table[i].allocated = FREE_PAGE_FLAG;
page_table[i].bytes_used = 0;
#else
&boxed_region;
#endif
+#ifndef LISP_FEATURE_WIN32
+ lispobj alloc_signal;
+#endif
void *new_obj;
void *new_free_pointer;
+
gc_assert(nbytes>0);
/* Check for alignment allocation problems. */
}
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
+
+#ifndef LISP_FEATURE_WIN32
+ alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
+ if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
+ if ((signed long) alloc_signal <= 0) {
+#ifdef LISP_FEATURE_SB_THREAD
+ kill_thread_safely(thread->os_thread, SIGPROF);
+#else
+ raise(SIGPROF);
+#endif
+ } else {
+ SetSymbolValue(ALLOC_SIGNAL,
+ alloc_signal - (1 << N_FIXNUM_TAG_BITS),
+ thread);
+ }
+ }
+#endif
+
return (new_obj);
}
\f
void *runtime_bytes = NULL;
size_t runtime_size;
- file = prepare_to_save(filename, prepend_runtime, &runtime_bytes, &runtime_size);
+ file = prepare_to_save(filename, prepend_runtime, &runtime_bytes,
+ &runtime_size);
if (file == NULL)
return;