X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgc-common.c;h=97e227c18a5dcbda0e43ae14336eda644bd12b34;hb=68508f10fb9c235d1f29a55b05f5158e90b8e0b6;hp=abd1eae6fca499fb8e424de59682b0e79682c5c8;hpb=fbde18e9b7d8e67e24f628638be4f293cb128101;p=sbcl.git diff --git a/src/runtime/gc-common.c b/src/runtime/gc-common.c index abd1eae..97e227c 100644 --- a/src/runtime/gc-common.c +++ b/src/runtime/gc-common.c @@ -37,11 +37,11 @@ #include "validate.h" #include "lispregs.h" #include "arch.h" -#include "fixnump.h" #include "gc.h" #include "genesis/primitive-objects.h" #include "genesis/static-symbols.h" #include "genesis/layout.h" +#include "genesis/hash-table.h" #include "gc-internal.h" #ifdef LISP_FEATURE_SPARC @@ -52,6 +52,9 @@ #endif #endif +size_t dynamic_space_size = DEFAULT_DYNAMIC_SPACE_SIZE; +size_t thread_control_stack_size = DEFAULT_CONTROL_STACK_SIZE; + inline static boolean forwarding_pointer_p(lispobj *pointer) { lispobj first_word=*pointer; @@ -93,10 +96,9 @@ unsigned long bytes_consed_between_gcs = 12*1024*1024; /* * copying objects */ - -/* to copy a boxed object */ +static lispobj -copy_object(lispobj object, long nwords) +gc_general_copy_object(lispobj object, long nwords, int page_type_flag) { int tag; lispobj *new; @@ -109,13 +111,26 @@ copy_object(lispobj object, long nwords) tag = lowtag_of(object); /* Allocate space. */ - new = gc_general_alloc(nwords*N_WORD_BYTES,ALLOC_BOXED,ALLOC_QUICK); + new = gc_general_alloc(nwords*N_WORD_BYTES, page_type_flag, ALLOC_QUICK); /* Copy the object. */ memcpy(new,native_pointer(object),nwords*N_WORD_BYTES); return make_lispobj(new,tag); } +/* to copy a boxed object */ +lispobj +copy_object(lispobj object, long nwords) +{ + return gc_general_copy_object(object, nwords, BOXED_PAGE_FLAG); +} + +lispobj +copy_code_object(lispobj object, long nwords) +{ + return gc_general_copy_object(object, nwords, CODE_PAGE_FLAG); +} + static long scav_lose(lispobj *where, lispobj object); /* forward decl */ /* FIXME: Most calls end up going to some trouble to compute an @@ -134,7 +149,9 @@ scavenge(lispobj *start, long n_words) lispobj object = *object_ptr; #ifdef LISP_FEATURE_GENCGC - gc_assert(!forwarding_pointer_p(object_ptr)); + if (forwarding_pointer_p(object_ptr)) + lose("unexpect forwarding pointer in scavenge: %p, start=%p, n=%l\n", + object_ptr, start, n_words); #endif if (is_lisp_pointer(object)) { if (from_space_p(object)) { @@ -156,25 +173,28 @@ scavenge(lispobj *start, long n_words) n_words_scavenged = 1; } } -#ifndef LISP_FEATURE_GENCGC - /* this workaround is probably not necessary for gencgc; at least, the - * behaviour it describes has never been reported */ - else if (n_words==1) { - /* there are some situations where an - other-immediate may end up in a descriptor - register. I'm not sure whether this is - supposed to happen, but if it does then we +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + /* This workaround is probably not needed for those ports + which don't have a partitioned register set (and therefore + scan the stack conservatively for roots). */ + else if (n_words == 1) { + /* there are some situations where an other-immediate may + end up in a descriptor register. I'm not sure whether + this is supposed to happen, but if it does then we don't want to (a) barf or (b) scavenge over the - data-block, because there isn't one. So, if - we're checking a single word and it's anything - other than a pointer, just hush it up */ - int type=widetag_of(object); - n_words_scavenged=1; - - if ((scavtab[type]==scav_lose) || - (((scavtab[type])(start,object))>1)) { - fprintf(stderr,"warning: attempted to scavenge non-descriptor value %x at %p. If you can\nreproduce this warning, send a bug report (see manual page for details)\n", - object,start); + data-block, because there isn't one. So, if we're + checking a single word and it's anything other than a + pointer, just hush it up */ + int widetag = widetag_of(object); + n_words_scavenged = 1; + + if ((scavtab[widetag] == scav_lose) || + (((sizetab[widetag])(object_ptr)) > 1)) { + fprintf(stderr,"warning: \ +attempted to scavenge non-descriptor value %x at %p.\n\n\ +If you can reproduce this warning, please send a bug report\n\ +(see manual page for details).\n", + object, object_ptr); } } #endif @@ -187,8 +207,8 @@ scavenge(lispobj *start, long n_words) (scavtab[widetag_of(object)])(object_ptr, object); } } - gc_assert_verbose(object_ptr == end, "Final object pointer %p, end %p\n", - object_ptr, end); + gc_assert_verbose(object_ptr == end, "Final object pointer %p, start %p, end %p\n", + object_ptr, start, end); } static lispobj trans_fun_header(lispobj object); /* forward decls */ @@ -260,7 +280,7 @@ trans_code(struct code *code) nwords = ncode_words + nheader_words; nwords = CEILING(nwords, 2); - l_new_code = copy_object(l_code, nwords); + l_new_code = copy_code_object(l_code, nwords); new_code = (struct code *) native_pointer(l_new_code); #if defined(DEBUG_CODE_GC) @@ -314,11 +334,18 @@ trans_code(struct code *code) fheaderl = fheaderp->next; prev_pointer = &nfheaderp->next; } +#ifdef LISP_FEATURE_GENCGC + /* Cheneygc doesn't need this os_flush_icache, it flushes the whole + spaces once when all copying is done. */ os_flush_icache((os_vm_address_t) (((long *)new_code) + nheader_words), ncode_words * sizeof(long)); -#ifdef LISP_FEATURE_GENCGC + +#endif + +#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64) gencgc_apply_code_fixups(code, new_code); #endif + return new_code; } @@ -345,7 +372,8 @@ scav_code_header(lispobj *where, lispobj object) entry_point != NIL; entry_point = function_ptr->next) { - gc_assert_verbose(is_lisp_pointer(entry_point), "Entry point %lx\n", + gc_assert_verbose(is_lisp_pointer(entry_point), + "Entry point %lx\n is not a lisp pointer.", (long)entry_point); function_ptr = (struct simple_fun *) native_pointer(entry_point); @@ -354,6 +382,7 @@ scav_code_header(lispobj *where, lispobj object) scavenge(&function_ptr->name, 1); scavenge(&function_ptr->arglist, 1); scavenge(&function_ptr->type, 1); + scavenge(&function_ptr->info, 1); } return n_words; @@ -535,7 +564,7 @@ trans_list(lispobj object) /* Copy 'object'. */ new_cons = (struct cons *) - gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK); + gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK); new_cons->car = cons->car; new_cons->cdr = cons->cdr; /* updated later */ new_list_pointer = make_lispobj(new_cons,lowtag_of(object)); @@ -560,7 +589,7 @@ trans_list(lispobj object) /* Copy 'cdr'. */ new_cdr_cons = (struct cons*) - gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK); + gc_general_alloc(sizeof(struct cons), BOXED_PAGE_FLAG, ALLOC_QUICK); new_cdr_cons->car = cdr_cons->car; new_cdr_cons->cdr = cdr_cons->cdr; new_cdr = make_lispobj(new_cdr_cons, lowtag_of(cdr)); @@ -695,7 +724,7 @@ size_boxed(lispobj *where) /* Note: on the sparc we don't have to do anything special for fdefns, */ /* 'cause the raw-addr has a function lowtag. */ -#ifndef LISP_FEATURE_SPARC +#if !defined(LISP_FEATURE_SPARC) static long scav_fdefn(lispobj *where, lispobj object) { @@ -706,8 +735,7 @@ scav_fdefn(lispobj *where, lispobj object) /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n", fdefn->fun, fdefn->raw_addr)); */ - if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET) - == (char *)((unsigned long)(fdefn->raw_addr))) { + if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET) == fdefn->raw_addr) { scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1); /* Don't write unnecessarily. */ @@ -1508,11 +1536,17 @@ size_weak_pointer(lispobj *where) void scan_weak_pointers(void) { - struct weak_pointer *wp; - for (wp = weak_pointers; wp != NULL; wp=wp->next) { + struct weak_pointer *wp, *next_wp; + for (wp = weak_pointers, next_wp = NULL; wp != NULL; wp = next_wp) { lispobj value = wp->value; lispobj *first_pointer; gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG); + + next_wp = wp->next; + wp->next = NULL; + if (next_wp == wp) /* gencgc uses a ref to self for end of list */ + next_wp = NULL; + if (!(is_lisp_pointer(value) && from_space_p(value))) continue; @@ -1534,6 +1568,312 @@ void scan_weak_pointers(void) } } + +/* Hash tables */ + +#if N_WORD_BITS == 32 +#define EQ_HASH_MASK 0x1fffffff +#elif N_WORD_BITS == 64 +#define EQ_HASH_MASK 0x1fffffffffffffff +#endif + +/* Compute the EQ-hash of KEY. This must match POINTER-HASH in + * target-hash-table.lisp. */ +#define EQ_HASH(key) ((key) & EQ_HASH_MASK) + +/* List of weak hash tables chained through their NEXT-WEAK-HASH-TABLE + * slot. Set to NULL at the end of a collection. + * + * This is not optimal because, when a table is tenured, it won't be + * processed automatically; only the yougest generation is GC'd by + * default. On the other hand, all applications will need an + * occasional full GC anyway, so it's not that bad either. */ +struct hash_table *weak_hash_tables = NULL; + +/* Return true if OBJ has already survived the current GC. */ +static inline int +survived_gc_yet (lispobj obj) +{ + return (!is_lisp_pointer(obj) || !from_space_p(obj) || + forwarding_pointer_p(native_pointer(obj))); +} + +static inline int +weak_hash_entry_alivep (lispobj weakness, lispobj key, lispobj value) +{ + switch (weakness) { + case KEY: + return survived_gc_yet(key); + case VALUE: + return survived_gc_yet(value); + case KEY_OR_VALUE: + return (survived_gc_yet(key) || survived_gc_yet(value)); + case KEY_AND_VALUE: + return (survived_gc_yet(key) && survived_gc_yet(value)); + default: + gc_assert(0); + /* Shut compiler up. */ + return 0; + } +} + +/* Return the beginning of data in ARRAY (skipping the header and the + * length) or NULL if it isn't an array of the specified widetag after + * all. */ +static inline lispobj * +get_array_data (lispobj array, int widetag, unsigned long *length) +{ + if (is_lisp_pointer(array) && + (widetag_of(*(lispobj *)native_pointer(array)) == widetag)) { + if (length != NULL) + *length = fixnum_value(((lispobj *)native_pointer(array))[1]); + return ((lispobj *)native_pointer(array)) + 2; + } else { + return NULL; + } +} + +/* Only need to worry about scavenging the _real_ entries in the + * table. Phantom entries such as the hash table itself at index 0 and + * the empty marker at index 1 were scavenged by scav_vector that + * either called this function directly or arranged for it to be + * called later by pushing the hash table onto weak_hash_tables. */ +static void +scav_hash_table_entries (struct hash_table *hash_table) +{ + lispobj *kv_vector; + unsigned long kv_length; + lispobj *index_vector; + unsigned long length; + lispobj *next_vector; + unsigned long next_vector_length; + lispobj *hash_vector; + unsigned long hash_vector_length; + lispobj empty_symbol; + lispobj weakness = hash_table->weakness; + unsigned long i; + + kv_vector = get_array_data(hash_table->table, + SIMPLE_VECTOR_WIDETAG, &kv_length); + if (kv_vector == NULL) + lose("invalid kv_vector %x\n", hash_table->table); + + index_vector = get_array_data(hash_table->index_vector, + SIMPLE_ARRAY_WORD_WIDETAG, &length); + if (index_vector == NULL) + lose("invalid index_vector %x\n", hash_table->index_vector); + + next_vector = get_array_data(hash_table->next_vector, + SIMPLE_ARRAY_WORD_WIDETAG, + &next_vector_length); + if (next_vector == NULL) + lose("invalid next_vector %x\n", hash_table->next_vector); + + hash_vector = get_array_data(hash_table->hash_vector, + SIMPLE_ARRAY_WORD_WIDETAG, + &hash_vector_length); + if (hash_vector != NULL) + gc_assert(hash_vector_length == next_vector_length); + + /* These lengths could be different as the index_vector can be a + * different length from the others, a larger index_vector could + * help reduce collisions. */ + gc_assert(next_vector_length*2 == kv_length); + + empty_symbol = kv_vector[1]; + /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/ + if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) != + SYMBOL_HEADER_WIDETAG) { + lose("not a symbol where empty-hash-table-slot symbol expected: %x\n", + *(lispobj *)native_pointer(empty_symbol)); + } + + /* Work through the KV vector. */ + for (i = 1; i < next_vector_length; i++) { + lispobj old_key = kv_vector[2*i]; + lispobj value = kv_vector[2*i+1]; + if ((weakness == NIL) || + weak_hash_entry_alivep(weakness, old_key, value)) { + + /* Scavenge the key and value. */ + scavenge(&kv_vector[2*i],2); + + /* If an EQ-based key has moved, mark the hash-table for + * rehashing. */ + if (!hash_vector || hash_vector[i] == MAGIC_HASH_VECTOR_VALUE) { + lispobj new_key = kv_vector[2*i]; + + if (old_key != new_key && new_key != empty_symbol) { + hash_table->needs_rehash_p = T; + } + } + } + } +} + +long +scav_vector (lispobj *where, lispobj object) +{ + unsigned long kv_length; + lispobj *kv_vector; + struct hash_table *hash_table; + + /* SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based and weak + * hash tables in the Lisp HASH-TABLE code to indicate need for + * special GC support. */ + if (HeaderValue(object) == subtype_VectorNormal) + return 1; + + kv_length = fixnum_value(where[1]); + kv_vector = where + 2; /* Skip the header and length. */ + /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/ + + /* Scavenge element 0, which may be a hash-table structure. */ + scavenge(where+2, 1); + if (!is_lisp_pointer(where[2])) { + /* This'll happen when REHASH clears the header of old-kv-vector + * and fills it with zero, but some other thread simulatenously + * sets the header in %%PUTHASH. + */ + fprintf(stderr, + "Warning: no pointer at %lx in hash table: this indicates " + "non-fatal corruption caused by concurrent access to a " + "hash-table from multiple threads. Any accesses to " + "hash-tables shared between threads should be protected " + "by locks.\n", (unsigned long)&where[2]); + // We've scavenged three words. + return 3; + } + hash_table = (struct hash_table *)native_pointer(where[2]); + /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/ + if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) { + lose("hash table not instance (%x at %x)\n", + hash_table->header, + hash_table); + } + + /* Scavenge element 1, which should be some internal symbol that + * the hash table code reserves for marking empty slots. */ + scavenge(where+3, 1); + if (!is_lisp_pointer(where[3])) { + lose("not empty-hash-table-slot symbol pointer: %x\n", where[3]); + } + + /* Scavenge hash table, which will fix the positions of the other + * needed objects. */ + scavenge((lispobj *)hash_table, + sizeof(struct hash_table) / sizeof(lispobj)); + + /* Cross-check the kv_vector. */ + if (where != (lispobj *)native_pointer(hash_table->table)) { + lose("hash_table table!=this table %x\n", hash_table->table); + } + + if (hash_table->weakness == NIL) { + scav_hash_table_entries(hash_table); + } else { + /* Delay scavenging of this table by pushing it onto + * weak_hash_tables (if it's not there already) for the weak + * object phase. */ + if (hash_table->next_weak_hash_table == NIL) { + hash_table->next_weak_hash_table = (lispobj)weak_hash_tables; + weak_hash_tables = hash_table; + } + } + + return (CEILING(kv_length + 2, 2)); +} + +void +scav_weak_hash_tables (void) +{ + struct hash_table *table; + + /* Scavenge entries whose triggers are known to survive. */ + for (table = weak_hash_tables; table != NULL; + table = (struct hash_table *)table->next_weak_hash_table) { + scav_hash_table_entries(table); + } +} + +/* Walk through the chain whose first element is *FIRST and remove + * dead weak entries. */ +static inline void +scan_weak_hash_table_chain (struct hash_table *hash_table, lispobj *prev, + lispobj *kv_vector, lispobj *index_vector, + lispobj *next_vector, lispobj *hash_vector, + lispobj empty_symbol, lispobj weakness) +{ + unsigned index = *prev; + while (index) { + unsigned next = next_vector[index]; + lispobj key = kv_vector[2 * index]; + lispobj value = kv_vector[2 * index + 1]; + gc_assert(key != empty_symbol); + gc_assert(value != empty_symbol); + if (!weak_hash_entry_alivep(weakness, key, value)) { + unsigned count = fixnum_value(hash_table->number_entries); + gc_assert(count > 0); + *prev = next; + hash_table->number_entries = make_fixnum(count - 1); + next_vector[index] = fixnum_value(hash_table->next_free_kv); + hash_table->next_free_kv = make_fixnum(index); + kv_vector[2 * index] = empty_symbol; + kv_vector[2 * index + 1] = empty_symbol; + if (hash_vector) + hash_vector[index] = MAGIC_HASH_VECTOR_VALUE; + } else { + prev = &next_vector[index]; + } + index = next; + } +} + +static void +scan_weak_hash_table (struct hash_table *hash_table) +{ + lispobj *kv_vector; + lispobj *index_vector; + unsigned long length = 0; /* prevent warning */ + lispobj *next_vector; + unsigned long next_vector_length = 0; /* prevent warning */ + lispobj *hash_vector; + lispobj empty_symbol; + lispobj weakness = hash_table->weakness; + unsigned long i; + + kv_vector = get_array_data(hash_table->table, + SIMPLE_VECTOR_WIDETAG, NULL); + index_vector = get_array_data(hash_table->index_vector, + SIMPLE_ARRAY_WORD_WIDETAG, &length); + next_vector = get_array_data(hash_table->next_vector, + SIMPLE_ARRAY_WORD_WIDETAG, + &next_vector_length); + hash_vector = get_array_data(hash_table->hash_vector, + SIMPLE_ARRAY_WORD_WIDETAG, NULL); + empty_symbol = kv_vector[1]; + + for (i = 0; i < length; i++) { + scan_weak_hash_table_chain(hash_table, &index_vector[i], + kv_vector, index_vector, next_vector, + hash_vector, empty_symbol, weakness); + } +} + +/* Remove dead entries from weak hash tables. */ +void +scan_weak_hash_tables (void) +{ + struct hash_table *table, *next; + + for (table = weak_hash_tables; table != NULL; table = next) { + next = (struct hash_table *)table->next_weak_hash_table; + table->next_weak_hash_table = NIL; + scan_weak_hash_table(table); + } + + weak_hash_tables = NULL; +} /* @@ -1545,7 +1885,7 @@ scav_lose(lispobj *where, lispobj object) { lose("no scavenge function for object 0x%08x (widetag 0x%x)\n", (unsigned long)object, - widetag_of(*(lispobj*)native_pointer(object))); + widetag_of(object)); return 0; /* bogus return value to satisfy static type checking */ } @@ -1576,7 +1916,7 @@ size_lose(lispobj *where) void gc_init_tables(void) { - long i; + unsigned long i; /* Set default value in all slots of scavenge table. FIXME * replace this gnarly sizeof with something based on @@ -1596,7 +1936,8 @@ gc_init_tables(void) /* skipping OTHER_IMMEDIATE_0_LOWTAG */ scavtab[LIST_POINTER_LOWTAG|(i<= hard_guard_page_address)) || + (((os_vm_address_t)sp < (guard_page_address + os_vm_page_size)) && + ((os_vm_address_t)sp >= guard_page_address) && + (th->control_stack_guard_page_protected != NIL))) + return; +#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD + do { + *sp = 0; + } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1)); + if ((os_vm_address_t)sp < (hard_guard_page_address + os_vm_page_size)) + return; + do { + if (*sp) + goto scrub; + } while (((unsigned long)sp--) & (BYTES_ZERO_BEFORE_END - 1)); +#else + do { + *sp = 0; + } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1)); + if ((os_vm_address_t)sp >= hard_guard_page_address) + return; + do { + if (*sp) + goto scrub; + } while (((unsigned long)++sp) & (BYTES_ZERO_BEFORE_END - 1)); +#endif +} + +#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64) + +/* Scavenging Interrupt Contexts */ + +static int boxed_registers[] = BOXED_REGISTERS; + +/* The GC has a notion of an "interior pointer" register, an unboxed + * register that typically contains a pointer to inside an object + * referenced by another pointer. The most obvious of these is the + * program counter, although many compiler backends define a "Lisp + * Interior Pointer" register known to the runtime as reg_LIP, and + * various CPU architectures have other registers that also partake of + * the interior-pointer nature. As the code for pairing an interior + * pointer value up with its "base" register, and fixing it up after + * scavenging is complete is horribly repetitive, a few macros paper + * over the monotony. --AB, 2010-Jul-14 */ + +/* These macros are only ever used over a lexical environment which + * defines a pointer to an os_context_t called context, thus we don't + * bother to pass that context in as a parameter. */ + +/* Define how to access a given interior pointer. */ +#define ACCESS_INTERIOR_POINTER_pc \ + *os_context_pc_addr(context) +#define ACCESS_INTERIOR_POINTER_lip \ + *os_context_register_addr(context, reg_LIP) +#define ACCESS_INTERIOR_POINTER_lr \ + *os_context_lr_addr(context) +#define ACCESS_INTERIOR_POINTER_npc \ + *os_context_npc_addr(context) + +#define INTERIOR_POINTER_VARS(name) \ + unsigned long name##_offset; \ + int name##_register_pair + +#define PAIR_INTERIOR_POINTER(name) \ + pair_interior_pointer(context, \ + ACCESS_INTERIOR_POINTER_##name, \ + &name##_offset, \ + &name##_register_pair) + +/* One complexity here is that if a paired register is not found for + * an interior pointer, then that pointer does not get updated. + * Originally, there was some commentary about using an index of -1 + * when calling os_context_register_addr() on SPARC referring to the + * program counter, but the real reason is to allow an interior + * pointer register to point to the runtime, read-only space, or + * static space without problems. */ +#define FIXUP_INTERIOR_POINTER(name) \ + do { \ + if (name##_register_pair >= 0) { \ + ACCESS_INTERIOR_POINTER_##name = \ + (*os_context_register_addr(context, \ + name##_register_pair) \ + & ~LOWTAG_MASK) \ + + name##_offset; \ + } \ + } while (0) + + +static void +pair_interior_pointer(os_context_t *context, unsigned long pointer, + unsigned long *saved_offset, int *register_pair) +{ + int i; + + /* + * I (RLT) think this is trying to find the boxed register that is + * closest to the LIP address, without going past it. Usually, it's + * reg_CODE or reg_LRA. But sometimes, nothing can be found. + */ + /* 0x7FFFFFFF on 32-bit platforms; + 0x7FFFFFFFFFFFFFFF on 64-bit platforms */ + *saved_offset = (((unsigned long)1) << (N_WORD_BITS - 1)) - 1; + *register_pair = -1; + for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) { + unsigned long reg; + long offset; + int index; + + index = boxed_registers[i]; + reg = *os_context_register_addr(context, index); + + /* An interior pointer is never relative to a non-pointer + * register (an oversight in the original implementation). + * The simplest argument for why this is true is to consider + * the fixnum that happens by coincide to be the word-index in + * memory of the header for some object plus two. This is + * happenstance would cause the register containing the fixnum + * to be selected as the register_pair if the interior pointer + * is to anywhere after the first two words of the object. + * The fixnum won't be changed during GC, but the object might + * move, thus destroying the interior pointer. --AB, + * 2010-Jul-14 */ + + if (is_lisp_pointer(reg) && + ((reg & ~LOWTAG_MASK) <= pointer)) { + offset = pointer - (reg & ~LOWTAG_MASK); + if (offset < *saved_offset) { + *saved_offset = offset; + *register_pair = index; + } + } + } +} + +static void +scavenge_interrupt_context(os_context_t * context) +{ + int i; + + /* FIXME: The various #ifdef noise here is precisely that: noise. + * Is it possible to fold it into the macrology so that we have + * one set of #ifdefs and then INTERIOR_POINTER_VARS /et alia/ + * compile out for the registers that don't exist on a given + * platform? */ + + INTERIOR_POINTER_VARS(pc); +#ifdef reg_LIP + INTERIOR_POINTER_VARS(lip); +#endif +#ifdef ARCH_HAS_LINK_REGISTER + INTERIOR_POINTER_VARS(lr); +#endif +#ifdef ARCH_HAS_NPC_REGISTER + INTERIOR_POINTER_VARS(npc); +#endif + + PAIR_INTERIOR_POINTER(pc); +#ifdef reg_LIP + PAIR_INTERIOR_POINTER(lip); +#endif +#ifdef ARCH_HAS_LINK_REGISTER + PAIR_INTERIOR_POINTER(lr); +#endif +#ifdef ARCH_HAS_NPC_REGISTER + PAIR_INTERIOR_POINTER(npc); +#endif + + /* Scavenge all boxed registers in the context. */ + for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) { + int index; + lispobj foo; + + index = boxed_registers[i]; + foo = *os_context_register_addr(context, index); + scavenge(&foo, 1); + *os_context_register_addr(context, index) = foo; + + /* this is unlikely to work as intended on bigendian + * 64 bit platforms */ + + scavenge((lispobj *) os_context_register_addr(context, index), 1); + } + + /* Now that the scavenging is done, repair the various interior + * pointers. */ + FIXUP_INTERIOR_POINTER(pc); +#ifdef reg_LIP + FIXUP_INTERIOR_POINTER(lip); +#endif +#ifdef ARCH_HAS_LINK_REGISTER + FIXUP_INTERIOR_POINTER(lr); +#endif +#ifdef ARCH_HAS_NPC_REGISTER + FIXUP_INTERIOR_POINTER(npc); +#endif +} + +void +scavenge_interrupt_contexts(struct thread *th) +{ + int i, index; + os_context_t *context; + + index = fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)); + +#if defined(DEBUG_PRINT_CONTEXT_INDEX) + printf("Number of active contexts: %d\n", index); +#endif + + for (i = 0; i < index; i++) { + context = th->interrupt_contexts[i]; + scavenge_interrupt_context(context); + } +} +#endif /* x86oid targets */