+
+\f
+/* Find the code object for the given pc, or return NULL on
+ failure. */
+lispobj *
+component_ptr_from_pc(lispobj *pc)
+{
+ lispobj *object = NULL;
+
+ if ( (object = search_read_only_space(pc)) )
+ ;
+ else if ( (object = search_static_space(pc)) )
+ ;
+ else
+ object = search_dynamic_space(pc);
+
+ if (object) /* if we found something */
+ if (widetag_of(*object) == CODE_HEADER_WIDETAG)
+ return(object);
+
+ return (NULL);
+}
+
+/* Scan an area looking for an object which encloses the given pointer.
+ * Return the object start on success or NULL on failure. */
+lispobj *
+gc_search_space(lispobj *start, size_t words, lispobj *pointer)
+{
+ while (words > 0) {
+ size_t count = 1;
+ lispobj thing = *start;
+
+ /* If thing is an immediate then this is a cons. */
+ if (is_lisp_pointer(thing) || is_lisp_immediate(thing))
+ count = 2;
+ else
+ count = (sizetab[widetag_of(thing)])(start);
+
+ /* Check whether the pointer is within this object. */
+ if ((pointer >= start) && (pointer < (start+count))) {
+ /* found it! */
+ /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
+ return(start);
+ }
+
+ /* Round up the count. */
+ count = CEILING(count,2);
+
+ start += count;
+ words -= count;
+ }
+ return (NULL);
+}
+
+boolean
+maybe_gc(os_context_t *context)
+{
+ lispobj gc_happened;
+ struct thread *thread = arch_os_get_current_thread();
+
+ fake_foreign_function_call(context);
+ /* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
+ * which case we will be running with no gc trigger barrier
+ * thing for a while. But it shouldn't be long until the end
+ * of WITHOUT-GCING.
+ *
+ * FIXME: It would be good to protect the end of dynamic space for
+ * CheneyGC and signal a storage condition from there.
+ */
+
+ /* Restore the signal mask from the interrupted context before
+ * calling into Lisp if interrupts are enabled. Why not always?
+ *
+ * Suppose there is a WITHOUT-INTERRUPTS block far, far out. If an
+ * interrupt hits while in SUB-GC, it is deferred and the
+ * os_context_sigmask of that interrupt is set to block further
+ * deferrable interrupts (until the first one is
+ * handled). Unfortunately, that context refers to this place and
+ * when we return from here the signals will not be blocked.
+ *
+ * A kludgy alternative is to propagate the sigmask change to the
+ * outer context.
+ */
+#ifndef LISP_FEATURE_WIN32
+ check_gc_signals_unblocked_or_lose(os_context_sigmask_addr(context));
+ unblock_gc_signals(0, 0);
+#endif
+ FSHOW((stderr, "/maybe_gc: calling SUB_GC\n"));
+ /* FIXME: Nothing must go wrong during GC else we end up running
+ * the debugger, error handlers, and user code in general in a
+ * potentially unsafe place. Running out of the control stack or
+ * the heap in SUB-GC are ways to lose. Of course, deferrables
+ * cannot be unblocked because there may be a pending handler, or
+ * we may even be in a WITHOUT-INTERRUPTS. */
+ gc_happened = funcall0(StaticSymbolFunction(SUB_GC));
+ FSHOW((stderr, "/maybe_gc: gc_happened=%s\n",
+ (gc_happened == NIL) ? "NIL" : "T"));
+ if ((gc_happened != NIL) &&
+ /* See if interrupts are enabled or it's possible to enable
+ * them. POST-GC has a similar check, but we don't want to
+ * unlock deferrables in that case and get a pending interrupt
+ * here. */
+ ((SymbolValue(INTERRUPTS_ENABLED,thread) != NIL) ||
+ (SymbolValue(ALLOW_WITH_INTERRUPTS,thread) != NIL))) {
+#ifndef LISP_FEATURE_WIN32
+ sigset_t *context_sigmask = os_context_sigmask_addr(context);
+ if (!deferrables_blocked_p(context_sigmask)) {
+ thread_sigmask(SIG_SETMASK, context_sigmask, 0);
+ check_gc_signals_unblocked_or_lose(0);
+#endif
+ FSHOW((stderr, "/maybe_gc: calling POST_GC\n"));
+ funcall0(StaticSymbolFunction(POST_GC));
+#ifndef LISP_FEATURE_WIN32
+ } else {
+ FSHOW((stderr, "/maybe_gc: punting on POST_GC due to blockage\n"));
+ }
+#endif
+ }
+ undo_fake_foreign_function_call(context);
+ FSHOW((stderr, "/maybe_gc: returning\n"));
+ return (gc_happened != NIL);
+}
+
+#define BYTES_ZERO_BEFORE_END (1<<12)
+
+/* There used to be a similar function called SCRUB-CONTROL-STACK in
+ * Lisp and another called zero_stack() in cheneygc.c, but since it's
+ * shorter to express in, and more often called from C, I keep only
+ * the C one after fixing it. -- MG 2009-03-25 */
+
+/* Zero the unused portion of the control stack so that old objects
+ * are not kept alive because of uninitialized stack variables.
+ *
+ * "To summarize the problem, since not all allocated stack frame
+ * slots are guaranteed to be written by the time you call an another
+ * function or GC, there may be garbage pointers retained in your dead
+ * stack locations. The stack scrubbing only affects the part of the
+ * stack from the SP to the end of the allocated stack." - ram, on
+ * cmucl-imp, Tue, 25 Sep 2001
+ *
+ * So, as an (admittedly lame) workaround, from time to time we call
+ * scrub-control-stack to zero out all the unused portion. This is
+ * supposed to happen when the stack is mostly empty, so that we have
+ * a chance of clearing more of it: callers are currently (2002.07.18)
+ * REPL, SUB-GC and sig_stop_for_gc_handler. */
+
+/* Take care not to tread on the guard page and the hard guard page as
+ * it would be unkind to sig_stop_for_gc_handler. Touching the return
+ * guard page is not dangerous. For this to work the guard page must
+ * be zeroed when protected. */
+
+/* FIXME: I think there is no guarantee that once
+ * BYTES_ZERO_BEFORE_END bytes are zero the rest are also zero. This
+ * may be what the "lame" adjective in the above comment is for. In
+ * this case, exact gc may lose badly. */
+void
+scrub_control_stack(void)
+{
+ struct thread *th = arch_os_get_current_thread();
+ os_vm_address_t guard_page_address = CONTROL_STACK_GUARD_PAGE(th);
+ os_vm_address_t hard_guard_page_address = CONTROL_STACK_HARD_GUARD_PAGE(th);
+ lispobj *sp;
+#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ sp = (lispobj *)&sp - 1;
+#else
+ sp = access_control_stack_pointer(th);
+#endif
+ scrub:
+ if ((((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size)) &&
+ ((os_vm_address_t)sp >= hard_guard_page_address)) ||
+ (((os_vm_address_t)sp < (guard_page_address + os_vm_page_size)) &&
+ ((os_vm_address_t)sp >= guard_page_address) &&
+ (th->control_stack_guard_page_protected != NIL)))
+ return;
+#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
+ do {
+ *sp = 0;
+ } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1));
+ if ((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size))
+ return;
+ do {
+ if (*sp)
+ goto scrub;
+ } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1));
+#else
+ do {
+ *sp = 0;
+ } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1));
+ if ((os_vm_address_t)sp >= hard_guard_page_address)
+ return;
+ do {
+ if (*sp)
+ goto scrub;
+ } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1));
+#endif
+}
+\f
+#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
+
+/* Scavenging Interrupt Contexts */
+
+static int boxed_registers[] = BOXED_REGISTERS;
+
+/* The GC has a notion of an "interior pointer" register, an unboxed
+ * register that typically contains a pointer to inside an object
+ * referenced by another pointer. The most obvious of these is the
+ * program counter, although many compiler backends define a "Lisp
+ * Interior Pointer" register known to the runtime as reg_LIP, and
+ * various CPU architectures have other registers that also partake of
+ * the interior-pointer nature. As the code for pairing an interior
+ * pointer value up with its "base" register, and fixing it up after
+ * scavenging is complete is horribly repetitive, a few macros paper
+ * over the monotony. --AB, 2010-Jul-14 */
+
+/* These macros are only ever used over a lexical environment which
+ * defines a pointer to an os_context_t called context, thus we don't
+ * bother to pass that context in as a parameter. */
+
+/* Define how to access a given interior pointer. */
+#define ACCESS_INTERIOR_POINTER_pc \
+ *os_context_pc_addr(context)
+#define ACCESS_INTERIOR_POINTER_lip \
+ *os_context_register_addr(context, reg_LIP)
+#define ACCESS_INTERIOR_POINTER_lr \
+ *os_context_lr_addr(context)
+#define ACCESS_INTERIOR_POINTER_npc \
+ *os_context_npc_addr(context)
+#define ACCESS_INTERIOR_POINTER_ctr \
+ *os_context_ctr_addr(context)
+
+#define INTERIOR_POINTER_VARS(name) \
+ unsigned long name##_offset; \
+ int name##_register_pair
+
+#define PAIR_INTERIOR_POINTER(name) \
+ pair_interior_pointer(context, \
+ ACCESS_INTERIOR_POINTER_##name, \
+ &name##_offset, \
+ &name##_register_pair)
+
+/* One complexity here is that if a paired register is not found for
+ * an interior pointer, then that pointer does not get updated.
+ * Originally, there was some commentary about using an index of -1
+ * when calling os_context_register_addr() on SPARC referring to the
+ * program counter, but the real reason is to allow an interior
+ * pointer register to point to the runtime, read-only space, or
+ * static space without problems. */
+#define FIXUP_INTERIOR_POINTER(name) \
+ do { \
+ if (name##_register_pair >= 0) { \
+ ACCESS_INTERIOR_POINTER_##name = \
+ (*os_context_register_addr(context, \
+ name##_register_pair) \
+ & ~LOWTAG_MASK) \
+ + name##_offset; \
+ } \
+ } while (0)
+
+
+static void
+pair_interior_pointer(os_context_t *context, unsigned long pointer,
+ unsigned long *saved_offset, int *register_pair)
+{
+ int i;
+
+ /*
+ * I (RLT) think this is trying to find the boxed register that is
+ * closest to the LIP address, without going past it. Usually, it's
+ * reg_CODE or reg_LRA. But sometimes, nothing can be found.
+ */
+ /* 0x7FFFFFFF on 32-bit platforms;
+ 0x7FFFFFFFFFFFFFFF on 64-bit platforms */
+ *saved_offset = (((unsigned long)1) << (N_WORD_BITS - 1)) - 1;
+ *register_pair = -1;
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ unsigned long reg;
+ long offset;
+ int index;
+
+ index = boxed_registers[i];
+ reg = *os_context_register_addr(context, index);
+
+ /* An interior pointer is never relative to a non-pointer
+ * register (an oversight in the original implementation).
+ * The simplest argument for why this is true is to consider
+ * the fixnum that happens by coincide to be the word-index in
+ * memory of the header for some object plus two. This is
+ * happenstance would cause the register containing the fixnum
+ * to be selected as the register_pair if the interior pointer
+ * is to anywhere after the first two words of the object.
+ * The fixnum won't be changed during GC, but the object might
+ * move, thus destroying the interior pointer. --AB,
+ * 2010-Jul-14 */
+
+ if (is_lisp_pointer(reg) &&
+ ((reg & ~LOWTAG_MASK) <= pointer)) {
+ offset = pointer - (reg & ~LOWTAG_MASK);
+ if (offset < *saved_offset) {
+ *saved_offset = offset;
+ *register_pair = index;
+ }
+ }
+ }
+}
+
+static void
+scavenge_interrupt_context(os_context_t * context)
+{
+ int i;
+
+ /* FIXME: The various #ifdef noise here is precisely that: noise.
+ * Is it possible to fold it into the macrology so that we have
+ * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/
+ * compile out for the registers that don't exist on a given
+ * platform? */
+
+ INTERIOR_POINTER_VARS(pc);
+#ifdef reg_LIP
+ INTERIOR_POINTER_VARS(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ INTERIOR_POINTER_VARS(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ INTERIOR_POINTER_VARS(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ INTERIOR_POINTER_VARS(ctr);
+#endif
+
+ PAIR_INTERIOR_POINTER(pc);
+#ifdef reg_LIP
+ PAIR_INTERIOR_POINTER(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ PAIR_INTERIOR_POINTER(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ PAIR_INTERIOR_POINTER(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ PAIR_INTERIOR_POINTER(ctr);
+#endif
+
+ /* Scavenge all boxed registers in the context. */
+ for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
+ int index;
+ lispobj foo;
+
+ index = boxed_registers[i];
+ foo = *os_context_register_addr(context, index);
+ scavenge(&foo, 1);
+ *os_context_register_addr(context, index) = foo;
+
+ /* this is unlikely to work as intended on bigendian
+ * 64 bit platforms */
+
+ scavenge((lispobj *) os_context_register_addr(context, index), 1);
+ }
+
+ /* Now that the scavenging is done, repair the various interior
+ * pointers. */
+ FIXUP_INTERIOR_POINTER(pc);
+#ifdef reg_LIP
+ FIXUP_INTERIOR_POINTER(lip);
+#endif
+#ifdef ARCH_HAS_LINK_REGISTER
+ FIXUP_INTERIOR_POINTER(lr);
+#endif
+#ifdef ARCH_HAS_NPC_REGISTER
+ FIXUP_INTERIOR_POINTER(npc);
+#endif
+#ifdef LISP_FEATURE_PPC
+ FIXUP_INTERIOR_POINTER(ctr);
+#endif
+}
+
+void
+scavenge_interrupt_contexts(struct thread *th)
+{
+ int i, index;
+ os_context_t *context;
+
+ index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+
+#if defined(DEBUG_PRINT_CONTEXT_INDEX)
+ printf("Number of active contexts: %d\n", index);
+#endif
+
+ for (i = 0; i < index; i++) {
+ context = th->interrupt_contexts[i];
+ scavenge_interrupt_context(context);
+ }
+}
+#endif /* x86oid targets */