boolean enable_page_protection = 1;
/* Should we unmap a page and re-mmap it to have it zero filled? */
-#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
+#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun)
/* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
* so don't unmap there.
*
* old version of FreeBSD (pre-4.0), so this might no longer be true.
* OTOH, if it is true, this behavior might exist on OpenBSD too, so
* for now we don't unmap there either. -- WHN 2001-04-07 */
+/* Apparently this flag is required to be 0 for SunOS/x86, as there
+ * are reports of heap corruption otherwise. */
boolean gencgc_unmap_zero = 0;
#else
boolean gencgc_unmap_zero = 1;
{
long nheader_words, ncode_words, nwords;
void *p;
- void *constants_start_addr, *constants_end_addr;
+ void *constants_start_addr = NULL, *constants_end_addr;
void *code_start_addr, *code_end_addr;
int fixup_found = 0;
#endif
if ((old_index != new_index) &&
- ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
+ ((!hash_vector) ||
+ (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) &&
((new_key != empty_symbol) ||
(kv_vector[2*i] != empty_symbol))) {
}
switch (widetag_of(start_addr[0])) {
case UNBOUND_MARKER_WIDETAG:
+ case NO_TLS_VALUE_MARKER_WIDETAG:
case CHARACTER_WIDETAG:
#if N_WORD_BITS == 64
case SINGLE_FLOAT_WIDETAG:
/* Scavenge the Lisp functions of the interrupt handlers, taking
* care to avoid SIG_DFL and SIG_IGN. */
- for_each_thread(th) {
- struct interrupt_data *data=th->interrupt_data;
for (i = 0; i < NSIG; i++) {
- union interrupt_handler handler = data->interrupt_handlers[i];
+ union interrupt_handler handler = interrupt_handlers[i];
if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
!ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- scavenge((lispobj *)(data->interrupt_handlers + i), 1);
- }
+ scavenge((lispobj *)(interrupt_handlers + i), 1);
}
}
+ /* Scavenge the function list for INTERRUPT-THREAD. */
+ for_each_thread(th) {
+ scavenge(&th->interrupt_fun,1);
+ }
/* Scavenge the binding stacks. */
- {
- struct thread *th;
- for_each_thread(th) {
- long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
- th->binding_stack_start;
- scavenge((lispobj *) th->binding_stack_start,len);
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ th->binding_stack_start;
+ scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
- /* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
- (sizeof (struct thread))/(sizeof (lispobj));
- scavenge((lispobj *) (th+1),len);
+ /* do the tls as well */
+ len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj));
+ scavenge((lispobj *) (th+1),len);
#endif
}
}
char *
alloc(long nbytes)
{
- struct thread *th=arch_os_get_current_thread();
+ struct thread *thread=arch_os_get_current_thread();
struct alloc_region *region=
#ifdef LISP_FEATURE_SB_THREAD
- th ? &(th->alloc_region) : &boxed_region;
+ thread ? &(thread->alloc_region) : &boxed_region;
#else
&boxed_region;
#endif
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- struct thread *thread=arch_os_get_current_thread();
+ gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)));
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
- if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) {
+ if (SymbolValue(GC_PENDING,thread) == NIL) {
/* set things up so that GC happens when we finish the PA
- * section. We only do this if there wasn't a pending
- * handler already, in case it was a gc. If it wasn't a
- * GC, the next allocation will get us back to this point
- * anyway, so no harm done
- */
- struct interrupt_data *data=th->interrupt_data;
- sigset_t new_mask,old_mask;
- sigemptyset(&new_mask);
- sigaddset_blockable(&new_mask);
- thread_sigmask(SIG_BLOCK,&new_mask,&old_mask);
-
- if(!data->pending_handler) {
- if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0))
- lose("Not in atomic: %d.\n",
- SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread));
- /* Leave the signals blocked just as if it was
- * deferred the normal way and set the
- * pending_mask. */
- sigcopyset(&(data->pending_mask),&old_mask);
- SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread);
- } else {
- thread_sigmask(SIG_SETMASK,&old_mask,0);
- }
+ * section */
+ SetSymbolValue(GC_PENDING,T,thread);
+ if (SymbolValue(GC_INHIBIT,thread) == NIL)
+ arch_set_pseudo_atomic_interrupted(0);
}
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);