boolean enable_page_protection = 1;
/* Should we unmap a page and re-mmap it to have it zero filled? */
-#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
+#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__sun)
/* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
* so don't unmap there.
*
* old version of FreeBSD (pre-4.0), so this might no longer be true.
* OTOH, if it is true, this behavior might exist on OpenBSD too, so
* for now we don't unmap there either. -- WHN 2001-04-07 */
+/* Apparently this flag is required to be 0 for SunOS/x86, as there
+ * are reports of heap corruption otherwise. */
boolean gencgc_unmap_zero = 0;
#else
boolean gencgc_unmap_zero = 1;
return copy_large_object(object, length);
}
-
+/* Doesn't seem to be used, delete it after the grace period. */
+#if 0
static lispobj
trans_unboxed_large(lispobj object)
{
return copy_large_unboxed_object(object, length);
}
+#endif
\f
/*
/* FIXME: What does this mean? */
int gencgc_hash = 1;
-static int
+static long
scav_vector(lispobj *where, lispobj object)
{
unsigned long kv_length;
if (!is_lisp_pointer(where[2])) {
lose("no pointer at %x in hash table", where[2]);
}
- hash_table = (lispobj *)native_pointer(where[2]);
+ hash_table = (struct hash_table *)native_pointer(where[2]);
/*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
lose("hash table not instance (%x at %x)",
/* Scavenge hash table, which will fix the positions of the other
* needed objects. */
- scavenge(hash_table, sizeof(struct hash_table) / sizeof(lispobj));
+ scavenge((lispobj *)hash_table,
+ sizeof(struct hash_table) / sizeof(lispobj));
/* Cross-check the kv_vector. */
if (where != (lispobj *)native_pointer(hash_table->table)) {
if (is_lisp_pointer(index_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
SIMPLE_ARRAY_WORD_WIDETAG)) {
- index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2;
+ index_vector =
+ ((unsigned long *)native_pointer(index_vector_obj)) + 2;
/*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
/*FSHOW((stderr, "/length = %d\n", length));*/
if (is_lisp_pointer(next_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
SIMPLE_ARRAY_WORD_WIDETAG)) {
- next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2;
+ next_vector = ((unsigned long *)native_pointer(next_vector_obj)) + 2;
/*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
/*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
if (is_lisp_pointer(hash_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
SIMPLE_ARRAY_WORD_WIDETAG)){
- hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2;
+ hash_vector =
+ ((unsigned long *)native_pointer(hash_vector_obj)) + 2;
/*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
== next_vector_length);
#endif
if ((old_index != new_index) &&
- ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
+ ((!hash_vector) ||
+ (hash_vector[i] == MAGIC_HASH_VECTOR_VALUE)) &&
((new_key != empty_symbol) ||
(kv_vector[2*i] != empty_symbol))) {
}
switch (widetag_of(start_addr[0])) {
case UNBOUND_MARKER_WIDETAG:
+ case NO_TLS_VALUE_MARKER_WIDETAG:
case CHARACTER_WIDETAG:
#if N_WORD_BITS == 64
case SINGLE_FLOAT_WIDETAG:
#ifdef LISP_FEATURE_SB_THREAD
long i,free;
if(th==arch_os_get_current_thread()) {
- esp = (void **) &raise;
+ /* Somebody is going to burn in hell for this, but casting
+ * it in two steps shuts gcc up about strict aliasing. */
+ esp = (void **)((void *)&raise);
} else {
void **esp1;
free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
for(i=free-1;i>=0;i--) {
os_context_t *c=th->interrupt_contexts[i];
esp1 = (void **) *os_context_register_addr(c,reg_SP);
- if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
+ if (esp1>=(void **)th->control_stack_start &&
+ esp1<(void **)th->control_stack_end) {
if(esp1<esp) esp=esp1;
for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
preserve_pointer(*ptr);
}
}
#else
- esp = (void **) &raise;
+ esp = (void **)((void *)&raise);
#endif
for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
preserve_pointer(*ptr);
* care to avoid SIG_DFL and SIG_IGN. */
for_each_thread(th) {
struct interrupt_data *data=th->interrupt_data;
- for (i = 0; i < NSIG; i++) {
+ for (i = 0; i < NSIG; i++) {
union interrupt_handler handler = data->interrupt_handlers[i];
- if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
- !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
+ if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
+ !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
scavenge((lispobj *)(data->interrupt_handlers + i), 1);
}
}
}
+ /* Scavenge the function list for INTERRUPT-THREAD. */
+ for_each_thread(th) {
+ scavenge(&th->interrupt_fun,1);
+ }
/* Scavenge the binding stacks. */
- {
- struct thread *th;
- for_each_thread(th) {
- long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
- th->binding_stack_start;
- scavenge((lispobj *) th->binding_stack_start,len);
+ {
+ struct thread *th;
+ for_each_thread(th) {
+ long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ th->binding_stack_start;
+ scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
- /* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
- (sizeof (struct thread))/(sizeof (lispobj));
- scavenge((lispobj *) (th+1),len);
+ /* do the tls as well */
+ len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj));
+ scavenge((lispobj *) (th+1),len);
#endif
}
}
page_table[page].first_object_offset =
(void *)prev - page_address(page);
page++;
- } while (page_address(page) < alloc_ptr);
+ } while ((long)page_address(page) < alloc_ptr);
generations[0].bytes_allocated = PAGE_BYTES*page;
bytes_allocated = PAGE_BYTES*page;
char *
alloc(long nbytes)
{
- struct thread *th=arch_os_get_current_thread();
+ struct thread *thread=arch_os_get_current_thread();
struct alloc_region *region=
#ifdef LISP_FEATURE_SB_THREAD
- th ? &(th->alloc_region) : &boxed_region;
+ thread ? &(thread->alloc_region) : &boxed_region;
#else
&boxed_region;
#endif
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- struct thread *thread=arch_os_get_current_thread();
+ gc_assert(fixnum_value(SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread)));
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
- if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) {
+ if (SymbolValue(GC_PENDING,thread) == NIL) {
/* set things up so that GC happens when we finish the PA
- * section. We only do this if there wasn't a pending
- * handler already, in case it was a gc. If it wasn't a
- * GC, the next allocation will get us back to this point
- * anyway, so no harm done
- */
- struct interrupt_data *data=th->interrupt_data;
- sigset_t new_mask,old_mask;
- sigemptyset(&new_mask);
- sigaddset_blockable(&new_mask);
- thread_sigmask(SIG_BLOCK,&new_mask,&old_mask);
-
- if(!data->pending_handler) {
- if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0))
- lose("Not in atomic: %d.\n",
- SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread));
- /* Leave the signals blocked just as if it was
- * deferred the normal way and set the
- * pending_mask. */
- sigcopyset(&(data->pending_mask),&old_mask);
- SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread);
- } else {
- thread_sigmask(SIG_SETMASK,&old_mask,0);
- }
+ * section */
+ SetSymbolValue(GC_PENDING,T,thread);
+ if (SymbolValue(GC_INHIBIT,thread) == NIL)
+ arch_set_pseudo_atomic_interrupted(0);
}
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
region->free_pointer = page_address(0);
region->end_addr = page_address(0);
}
-