#include "validate.h"
#include "lispregs.h"
#include "arch.h"
-#include "fixnump.h"
#include "gc.h"
#include "genesis/primitive-objects.h"
#include "genesis/static-symbols.h"
#endif
size_t dynamic_space_size = DEFAULT_DYNAMIC_SPACE_SIZE;
+size_t thread_control_stack_size = DEFAULT_CONTROL_STACK_SIZE;
inline static boolean
forwarding_pointer_p(lispobj *pointer) {
/*
* copying objects
*/
-
-/* to copy a boxed object */
+static
lispobj
-copy_object(lispobj object, long nwords)
+gc_general_copy_object(lispobj object, long nwords, int page_type_flag)
{
int tag;
lispobj *new;
tag = lowtag_of(object);
/* Allocate space. */
- new = gc_general_alloc(nwords*N_WORD_BYTES,ALLOC_BOXED,ALLOC_QUICK);
+ new = gc_general_alloc(nwords*N_WORD_BYTES, page_type_flag, ALLOC_QUICK);
/* Copy the object. */
memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
return make_lispobj(new,tag);
}
+/* to copy a boxed object */
+lispobj
+copy_object(lispobj object, long nwords)
+{
+ return gc_general_copy_object(object, nwords, BOXED_PAGE_FLAG);
+}
+
+lispobj
+copy_code_object(lispobj object, long nwords)
+{
+ return gc_general_copy_object(object, nwords, CODE_PAGE_FLAG);
+}
+
static long scav_lose(lispobj *where, lispobj object); /* forward decl */
/* FIXME: Most calls end up going to some trouble to compute an
lispobj object = *object_ptr;
#ifdef LISP_FEATURE_GENCGC
- gc_assert(!forwarding_pointer_p(object_ptr));
+ if (forwarding_pointer_p(object_ptr))
+ lose("unexpect forwarding pointer in scavenge: %p, start=%p, n=%l\n",
+ object_ptr, start, n_words);
#endif
if (is_lisp_pointer(object)) {
if (from_space_p(object)) {
nwords = ncode_words + nheader_words;
nwords = CEILING(nwords, 2);
- l_new_code = copy_object(l_code, nwords);
+ l_new_code = copy_code_object(l_code, nwords);
new_code = (struct code *) native_pointer(l_new_code);
#if defined(DEBUG_CODE_GC)
scavenge(&function_ptr->name, 1);
scavenge(&function_ptr->arglist, 1);
scavenge(&function_ptr->type, 1);
- scavenge(&function_ptr->xrefs, 1);
+ scavenge(&function_ptr->info, 1);
}
return n_words;
/* Copy 'object'. */
new_cons = (struct cons *)
- gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
+ gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK);
new_cons->car = cons->car;
new_cons->cdr = cons->cdr; /* updated later */
new_list_pointer = make_lispobj(new_cons,lowtag_of(object));
/* Copy 'cdr'. */
new_cdr_cons = (struct cons*)
- gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
+ gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK);
new_cdr_cons->car = cdr_cons->car;
new_cdr_cons->cdr = cdr_cons->cdr;
new_cdr = make_lispobj(new_cdr_cons, lowtag_of(cdr));
/* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
fdefn->fun, fdefn->raw_addr)); */
- if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET)
- == (char *)((unsigned long)(fdefn->raw_addr))) {
+ if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET) == fdefn->raw_addr) {
scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
/* Don't write unnecessarily. */
void scan_weak_pointers(void)
{
- struct weak_pointer *wp;
- for (wp = weak_pointers; wp != NULL; wp=wp->next) {
+ struct weak_pointer *wp, *next_wp;
+ for (wp = weak_pointers, next_wp = NULL; wp != NULL; wp = next_wp) {
lispobj value = wp->value;
lispobj *first_pointer;
gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
+
+ next_wp = wp->next;
+ wp->next = NULL;
+ if (next_wp == wp) /* gencgc uses a ref to self for end of list */
+ next_wp = NULL;
+
if (!(is_lisp_pointer(value) && from_space_p(value)))
continue;
unsigned long hash_vector_length;
lispobj empty_symbol;
lispobj weakness = hash_table->weakness;
- long i;
+ unsigned long i;
kv_vector = get_array_data(hash_table->table,
SIMPLE_VECTOR_WIDETAG, &kv_length);
/* Scavenge the key and value. */
scavenge(&kv_vector[2*i],2);
- /* Rehashing of EQ based keys. */
- if ((!hash_vector) ||
- (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) {
-#ifndef LISP_FEATURE_GENCGC
- /* For GENCGC scav_hash_table_entries only rehashes
- * the entries whose keys were moved. Cheneygc always
- * moves the objects so here we let the lisp side know
- * that rehashing is needed for the whole table. */
- *(kv_vector - 2) = (subtype_VectorMustRehash<<N_WIDETAG_BITS) |
- SIMPLE_VECTOR_WIDETAG;
-#else
- unsigned long old_index = EQ_HASH(old_key)%length;
+ /* If an EQ-based key has moved, mark the hash-table for
+ * rehashing. */
+ if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) {
lispobj new_key = kv_vector[2*i];
- unsigned long new_index = EQ_HASH(new_key)%length;
- /* Check whether the key has moved. */
- if ((old_index != new_index) &&
- (new_key != empty_symbol)) {
- gc_assert(kv_vector[2*i+1] != empty_symbol);
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- /* Unlink the key from the old_index chain. */
- if (!index_vector[old_index]) {
- /* It's not here, must be on the
- * needing_rehash chain. */
- } else if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned long prior = index_vector[old_index];
- unsigned long next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
+
+ if (old_key != new_key && new_key != empty_symbol) {
+ hash_table->needs_rehash_p = T;
}
-#endif
}
}
}
/* Scavenge element 0, which may be a hash-table structure. */
scavenge(where+2, 1);
if (!is_lisp_pointer(where[2])) {
- lose("no pointer at %x in hash table\n", where[2]);
+ /* This'll happen when REHASH clears the header of old-kv-vector
+ * and fills it with zero, but some other thread simulatenously
+ * sets the header in %%PUTHASH.
+ */
+ fprintf(stderr,
+ "Warning: no pointer at %lx in hash table: this indicates "
+ "non-fatal corruption caused by concurrent access to a "
+ "hash-table from multiple threads. Any accesses to "
+ "hash-tables shared between threads should be protected "
+ "by locks.\n", (unsigned long)&where[2]);
+ // We've scavenged three words.
+ return 3;
}
hash_table = (struct hash_table *)native_pointer(where[2]);
/*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
lispobj *hash_vector;
lispobj empty_symbol;
lispobj weakness = hash_table->weakness;
- long i;
+ unsigned long i;
kv_vector = get_array_data(hash_table->table,
SIMPLE_VECTOR_WIDETAG, NULL);
kv_vector, index_vector, next_vector,
hash_vector, empty_symbol, weakness);
}
- {
- lispobj first = fixnum_value(hash_table->needing_rehash);
- scan_weak_hash_table_chain(hash_table, &first,
- kv_vector, index_vector, next_vector,
- hash_vector, empty_symbol, weakness);
- hash_table->needing_rehash = make_fixnum(first);
- }
}
/* Remove dead entries from weak hash tables. */
{
lose("no scavenge function for object 0x%08x (widetag 0x%x)\n",
(unsigned long)object,
- widetag_of(*(lispobj*)native_pointer(object)));
+ widetag_of(object));
return 0; /* bogus return value to satisfy static type checking */
}
void
gc_init_tables(void)
{
- long i;
+ unsigned long i;
/* Set default value in all slots of scavenge table. FIXME
* replace this gnarly sizeof with something based on
/* skipping OTHER_IMMEDIATE_0_LOWTAG */
scavtab[LIST_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_list_pointer;
scavtab[ODD_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_immediate;
- scavtab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_instance_pointer;
+ scavtab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] =
+ scav_instance_pointer;
/* skipping OTHER_IMMEDIATE_1_LOWTAG */
scavtab[OTHER_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_other_pointer;
}
lispobj thing = *start;
/* If thing is an immediate then this is a cons. */
- if (is_lisp_pointer(thing)
- || (fixnump(thing))
- || (widetag_of(thing) == CHARACTER_WIDETAG)
-#if N_WORD_BITS == 64
- || (widetag_of(thing) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(thing) == UNBOUND_MARKER_WIDETAG))
+ if (is_lisp_pointer(thing) || is_lisp_immediate(thing))
count = 2;
else
count = (sizetab[widetag_of(thing)])(start);
boolean
maybe_gc(os_context_t *context)
{
-#ifndef LISP_FEATURE_WIN32
+ lispobj gc_happened;
struct thread *thread = arch_os_get_current_thread();
-#endif
fake_foreign_function_call(context);
/* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
* outer context.
*/
#ifndef LISP_FEATURE_WIN32
- if(SymbolValue(INTERRUPTS_ENABLED,thread)!=NIL) {
+ check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context));
+ unblock_gc_signals(0, 0);
+#endif
+ FSHOW((stderr, "/maybe_gc: calling SUB_GC\n"));
+ /* FIXME: Nothing must go wrong during GC else we end up running
+ * the debugger, error handlers, and user code in general in a
+ * potentially unsafe place. Running out of the control stack or
+ * the heap in SUB-GC are ways to lose. Of course, deferrables
+ * cannot be unblocked because there may be a pending handler, or
+ * we may even be in a WITHOUT-INTERRUPTS. */
+ gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
+ FSHOW((stderr, "/maybe_gc: gc_happened=%s\n",
+ (gc_happened == NIL) ? "NIL" : "T"));
+ if ((gc_happened != NIL) &&
+ /* See if interrupts are enabled or it's possible to enable
+ * them. POST-GC has a similar check, but we don't want to
+ * unlock deferrables in that case and get a pending interrupt
+ * here. */
+ ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) ||
+ (SymbolValue(ALLOW_WITH_INTERRUPTS,thread) != NIL))) {
+#ifndef LISP_FEATURE_WIN32
sigset_t *context_sigmask = os_context_sigmask_addr(context);
-#ifdef LISP_FEATURE_SB_THREAD
- /* What if the context we'd like to restore has GC signals
- * blocked? Just skip the GC: we can't set GC_PENDING, because
- * that would block the next attempt, and we don't know when
- * we'd next check for it -- and it's hard to be sure that
- * unblocking would be safe.
- *
- * FIXME: This is not actually much better: we may already have
- * GC_PENDING set, and presumably our caller assumes that we will
- * clear it. Perhaps we should, even though we don't actually GC? */
- if (sigismember(context_sigmask,SIG_STOP_FOR_GC)) {
- undo_fake_foreign_function_call(context);
- return 1;
+ if (!deferrables_blocked_p(context_sigmask)) {
+ thread_sigmask(SIG_SETMASK, context_sigmask, 0);
+ check_gc_signals_unblocked_or_lose(0);
+#endif
+ FSHOW((stderr, "/maybe_gc: calling POST_GC\n"));
+ funcall0(StaticSymbolFunction(POST_GC));
+#ifndef LISP_FEATURE_WIN32
+ } else {
+ FSHOW((stderr, "/maybe_gc: punting on POST_GC due to blockage\n"));
}
#endif
- thread_sigmask(SIG_SETMASK, context_sigmask, 0);
}
- else
- unblock_gc_signals();
-#endif
- /* SIG_STOP_FOR_GC needs to be enabled before we can call lisp:
- * otherwise two threads racing here may deadlock: the other will
- * wait on the GC lock, and the other cannot stop the first one... */
- funcall0(SymbolFunction(SUB_GC));
undo_fake_foreign_function_call(context);
- return 1;
+ FSHOW((stderr, "/maybe_gc: returning\n"));
+ return (gc_happened != NIL);
+}
+
+#define BYTES_ZERO_BEFORE_END (1<<12)
+
+/* There used to be a similar function called SCRUB-CONTROL-STACK in
+ * Lisp and another called zero_stack() in cheneygc.c, but since it's
+ * shorter to express in, and more often called from C, I keep only
+ * the C one after fixing it. -- MG 2009-03-25 */
+
+/* Zero the unused portion of the control stack so that old objects
+ * are not kept alive because of uninitialized stack variables.
+ *
+ * "To summarize the problem, since not all allocated stack frame
+ * slots are guaranteed to be written by the time you call an another
+ * function or GC, there may be garbage pointers retained in your dead
+ * stack locations. The stack scrubbing only affects the part of the
+ * stack from the SP to the end of the allocated stack." - ram, on
+ * cmucl-imp, Tue, 25 Sep 2001
+ *
+ * So, as an (admittedly lame) workaround, from time to time we call
+ * scrub-control-stack to zero out all the unused portion. This is
+ * supposed to happen when the stack is mostly empty, so that we have
+ * a chance of clearing more of it: callers are currently (2002.07.18)
+ * REPL, SUB-GC and sig_stop_for_gc_handler. */
+
+/* Take care not to tread on the guard page and the hard guard page as
+ * it would be unkind to sig_stop_for_gc_handler. Touching the return
+ * guard page is not dangerous. For this to work the guard page must
+ * be zeroed when protected. */
+
+/* FIXME: I think there is no guarantee that once
+ * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
+ * may be what the "lame" adjective in the above comment is for. In
+ * this case, exact gc may lose badly. */
+void
+scrub_control_stack(void)
+{
+ struct thread *th = arch_os_get_current_thread();
+ os_vm_address_t guard_page_address = CONTROL_STACK_GUARD_PAGE(th);
+ os_vm_address_t hard_guard_page_address = CONTROL_STACK_HARD_GUARD_PAGE(th);
+ lispobj *sp;
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ sp = (lispobj *)&sp - 1;
+#else
+ sp = current_control_stack_pointer;
+#endif
+ scrub:
+ if ((((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size)) &&
+ ((os_vm_address_t)sp >= hard_guard_page_address)) ||
+ (((os_vm_address_t)sp < (guard_page_address + os_vm_page_size)) &&
+ ((os_vm_address_t)sp >= guard_page_address) &&
+ (th->control_stack_guard_page_protected != NIL)))
+ return;
+#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
+ do {
+ *sp = 0;
+ } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1));
+ if ((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size))
+ return;
+ do {
+ if (*sp)
+ goto scrub;
+ } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1));
+#else
+ do {
+ *sp = 0;
+ } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1));
+ if ((os_vm_address_t)sp >= hard_guard_page_address)
+ return;
+ do {
+ if (*sp)
+ goto scrub;
+ } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1));
+#endif
+}
+\f
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+
+/* Scavenging Interrupt Contexts */
+
+static int boxed_registers[] = BOXED_REGISTERS;
+
+/* The GC has a notion of an "interior pointer" register, an unboxed
+ * register that typically contains a pointer to inside an object
+ * referenced by another pointer. The most obvious of these is the
+ * program counter, although many compiler backends define a "Lisp
+ * Interior Pointer" register known to the runtime as reg_LIP, and
+ * various CPU architectures have other registers that also partake of
+ * the interior-pointer nature. As the code for pairing an interior
+ * pointer value up with its "base" register, and fixing it up after
+ * scavenging is complete is horribly repetitive, a few macros paper
+ * over the monotony. --AB, 2010-Jul-14 */
+
+/* These macros are only ever used over a lexical environment which
+ * defines a pointer to an os_context_t called context, thus we don't
+ * bother to pass that context in as a parameter. */
+
+/* Define how to access a given interior pointer. */
+#define ACCESS_INTERIOR_POINTER_pc \
+ *os_context_pc_addr(context)
+#define ACCESS_INTERIOR_POINTER_lip \
+ *os_context_register_addr(context, reg_LIP)
+#define ACCESS_INTERIOR_POINTER_lr \
+ *os_context_lr_addr(context)
+#define ACCESS_INTERIOR_POINTER_npc \
+ *os_context_npc_addr(context)
+#define ACCESS_INTERIOR_POINTER_ctr \
+ *os_context_ctr_addr(context)
+
+#define INTERIOR_POINTER_VARS(name) \
+ unsigned long name##_offset; \
+ int name##_register_pair
+
+#define PAIR_INTERIOR_POINTER(name) \
+ pair_interior_pointer(context, \
+ ACCESS_INTERIOR_POINTER_##name, \
+ &name##_offset, \
+ &name##_register_pair)
+
+/* One complexity here is that if a paired register is not found for
+ * an interior pointer, then that pointer does not get updated.
+ * Originally, there was some commentary about using an index of -1
+ * when calling os_context_register_addr() on SPARC referring to the
+ * program counter, but the real reason is to allow an interior
+ * pointer register to point to the runtime, read-only space, or
+ * static space without problems. */
+#define FIXUP_INTERIOR_POINTER(name) \
+ do { \
+ if (name##_register_pair >= 0) { \
+ ACCESS_INTERIOR_POINTER_##name = \
+ (*os_context_register_addr(context, \
+ name##_register_pair) \
+ & ~LOWTAG_MASK) \
+ + name##_offset; \
+ } \
+ } while (0)
+
+
+static void
+pair_interior_pointer(os_context_t *context, unsigned long pointer,
+ unsigned long *saved_offset, int *register_pair)
+{
+ int i;
+
+ /*
+ * I (RLT) think this is trying to find the boxed register that is
+ * closest to the LIP address, without going past it. Usually, it's
+ * reg_CODE or reg_LRA. But sometimes, nothing can be found.
+ */
+ /* 0x7FFFFFFF on 32-bit platforms;
+ 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
+ *saved_offset = (((unsigned long)1) << (N_WORD_BITS - 1)) - 1;
+ *register_pair = -1;
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ unsigned long reg;
+ long offset;
+ int index;
+
+ index = boxed_registers[i];
+ reg = *os_context_register_addr(context, index);
+
+ /* An interior pointer is never relative to a non-pointer
+ * register (an oversight in the original implementation).
+ * The simplest argument for why this is true is to consider
+ * the fixnum that happens by coincide to be the word-index in
+ * memory of the header for some object plus two. This is
+ * happenstance would cause the register containing the fixnum
+ * to be selected as the register_pair if the interior pointer
+ * is to anywhere after the first two words of the object.
+ * The fixnum won't be changed during GC, but the object might
+ * move, thus destroying the interior pointer. --AB,
+ * 2010-Jul-14 */
+
+ if (is_lisp_pointer(reg) &&
+ ((reg & ~LOWTAG_MASK) <= pointer)) {
+ offset = pointer - (reg & ~LOWTAG_MASK);
+ if (offset < *saved_offset) {
+ *saved_offset = offset;
+ *register_pair = index;
+ }
+ }
+ }
+}
+
+static void
+scavenge_interrupt_context(os_context_t * context)
+{
+ int i;
+
+ /* FIXME: The various #ifdef noise here is precisely that: noise.
+ * Is it possible to fold it into the macrology so that we have
+ * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
+ * compile out for the registers that don't exist on a given
+ * platform? */
+
+ INTERIOR_POINTER_VARS(pc);
+#ifdef reg_LIP
+ INTERIOR_POINTER_VARS(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ INTERIOR_POINTER_VARS(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ INTERIOR_POINTER_VARS(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ INTERIOR_POINTER_VARS(ctr);
+#endif
+
+ PAIR_INTERIOR_POINTER(pc);
+#ifdef reg_LIP
+ PAIR_INTERIOR_POINTER(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ PAIR_INTERIOR_POINTER(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ PAIR_INTERIOR_POINTER(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ PAIR_INTERIOR_POINTER(ctr);
+#endif
+
+ /* Scavenge all boxed registers in the context. */
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ int index;
+ lispobj foo;
+
+ index = boxed_registers[i];
+ foo = *os_context_register_addr(context, index);
+ scavenge(&foo, 1);
+ *os_context_register_addr(context, index) = foo;
+
+ /* this is unlikely to work as intended on bigendian
+ * 64 bit platforms */
+
+ scavenge((lispobj *) os_context_register_addr(context, index), 1);
+ }
+
+ /* Now that the scavenging is done, repair the various interior
+ * pointers. */
+ FIXUP_INTERIOR_POINTER(pc);
+#ifdef reg_LIP
+ FIXUP_INTERIOR_POINTER(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ FIXUP_INTERIOR_POINTER(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ FIXUP_INTERIOR_POINTER(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ FIXUP_INTERIOR_POINTER(ctr);
+#endif
+}
+
+void
+scavenge_interrupt_contexts(struct thread *th)
+{
+ int i, index;
+ os_context_t *context;
+
+ index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+
+#if defined(DEBUG_PRINT_CONTEXT_INDEX)
+ printf("Number of active contexts: %d\n", index);
+#endif
+
+ for (i = 0; i < index; i++) {
+ context = th->interrupt_contexts[i];
+ scavenge_interrupt_context(context);
+ }
}
+#endif /* x86oid targets */