#include "genesis/vector.h"
#include "genesis/weak-pointer.h"
#include "genesis/simple-fun.h"
-
-/* assembly language stub that executes trap_PendingInterrupt */
-void do_pending_interrupt(void);
+#include "genesis/hash-table.h"
/* forward declarations */
long gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed);
static void gencgc_pickup_dynamic(void);
-boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
\f
/*
gc_assert(generations[i].bytes_allocated
== count_generation_bytes_allocated(i));
fprintf(stderr,
- " %1d: %5d %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
+ " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
i,
boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
pinned_cnt,
* e.g. boxed/unboxed, generation, ages; there may need to be many
* allocation regions.
*
- * Each allocation region may be start within a partly used page. Many
+ * Each allocation region may start within a partly used page. Many
* features of memory use are noted on a page wise basis, e.g. the
* generation; so if a region starts within an existing allocated page
* it must be consistent with this page.
if (first_page >= NUM_PAGES) {
fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
+ "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
/* Check for a failure */
if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
fprintf(stderr,
- "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n",
+ "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
nbytes);
print_generation_stats(1);
lose(NULL);
unsigned long kv_length;
lispobj *kv_vector;
unsigned long length = 0; /* (0 = dummy to stop GCC warning) */
- lispobj *hash_table;
+ struct hash_table *hash_table;
lispobj empty_symbol;
unsigned long *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
unsigned long *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
}
hash_table = (lispobj *)native_pointer(where[2]);
/*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
- if (widetag_of(hash_table[0]) != INSTANCE_HEADER_WIDETAG) {
- lose("hash table not instance (%x at %x)", hash_table[0], hash_table);
+ if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
+ lose("hash table not instance (%x at %x)",
+ hash_table->header,
+ hash_table);
}
/* Scavenge element 1, which should be some internal symbol that
/* Scavenge hash table, which will fix the positions of the other
* needed objects. */
- scavenge(hash_table, 16);
+ scavenge(hash_table, sizeof(struct hash_table) / sizeof(lispobj));
/* Cross-check the kv_vector. */
- if (where != (lispobj *)native_pointer(hash_table[9])) {
- lose("hash_table table!=this table %x", hash_table[9]);
+ if (where != (lispobj *)native_pointer(hash_table->table)) {
+ lose("hash_table table!=this table %x", hash_table->table);
}
/* WEAK-P */
- weak_p_obj = hash_table[10];
+ weak_p_obj = hash_table->weak_p;
/* index vector */
{
- lispobj index_vector_obj = hash_table[13];
+ lispobj index_vector_obj = hash_table->index_vector;
if (is_lisp_pointer(index_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
/* next vector */
{
- lispobj next_vector_obj = hash_table[14];
+ lispobj next_vector_obj = hash_table->next_vector;
if (is_lisp_pointer(next_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
/* maybe hash vector */
{
- /* FIXME: This bare "15" offset should become a symbolic
- * expression of some sort. And all the other bare offsets
- * too. And the bare "16" in scavenge(hash_table, 16). And
- * probably other stuff too. Ugh.. */
- lispobj hash_vector_obj = hash_table[15];
+ lispobj hash_vector_obj = hash_table->hash_vector;
if (is_lisp_pointer(hash_vector_obj) &&
(widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
/*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
index_vector[old_index] = next_vector[i];
/* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table[11]);
- hash_table[11] = make_fixnum(i);
+ next_vector[i] = fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(i);
/*SHOW("P2");*/
} else {
unsigned prior = index_vector[old_index];
/* Link it into the needing rehash
* chain. */
next_vector[next] =
- fixnum_value(hash_table[11]);
- hash_table[11] = make_fixnum(next);
+ fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(next);
/*SHOW("/P3");*/
break;
}
lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
return NULL;
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
lispobj *
lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
return NULL;
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
/* a faster version for searching the dynamic space. This will work even
return NULL;
start = (lispobj *)((void *)page_address(page_index)
+ page_table[page_index].first_object_offset);
- return (search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
/* Is there any possibility that pointer is a valid Lisp object
case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
#endif
case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
-#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
for(i=free-1;i>=0;i--) {
os_context_t *c=th->interrupt_contexts[i];
- esp1 = (void **) *os_context_register_addr(c,reg_ESP);
+ esp1 = (void **) *os_context_register_addr(c,reg_SP);
if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
if(esp1<esp) esp=esp1;
for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
page_table[page].bytes_used = PAGE_BYTES;
page_table[page].large_object = 0;
- first=search_space(prev,(ptr+2)-prev,ptr);
+ first=gc_search_space(prev,(ptr+2)-prev,ptr);
if(ptr == first) prev=ptr;
page_table[page].first_object_offset =
(void *)prev - page_address(page);
#ifdef LISP_FEATURE_SB_THREAD
if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
register u32 fs;
- fprintf(stderr, "fatal error in thread 0x%x, pid=%d\n",
- th,getpid());
+ fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
+ th,th->os_thread);
__asm__("movl %fs,%0" : "=r" (fs) : );
fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
debug_get_fs(),th->tls_cookie);
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- /* set things up so that GC happens when we finish the PA
- * section. We only do this if there wasn't a pending handler
- * already, in case it was a gc. If it wasn't a GC, the next
- * allocation will get us back to this point anyway, so no harm done
- */
- struct interrupt_data *data=th->interrupt_data;
- if(!data->pending_handler)
- maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
+ struct thread *thread=arch_os_get_current_thread();
+ /* Don't flood the system with interrupts if the need to gc is
+ * already noted. This can happen for example when SUB-GC
+ * allocates or after a gc triggered in a WITHOUT-GCING. */
+ if (SymbolValue(NEED_TO_COLLECT_GARBAGE,thread) == NIL) {
+ /* set things up so that GC happens when we finish the PA
+ * section. We only do this if there wasn't a pending
+ * handler already, in case it was a gc. If it wasn't a
+ * GC, the next allocation will get us back to this point
+ * anyway, so no harm done
+ */
+ struct interrupt_data *data=th->interrupt_data;
+ sigset_t new_mask,old_mask;
+ sigemptyset(&new_mask);
+ sigaddset_blockable(&new_mask);
+ thread_sigmask(SIG_BLOCK,&new_mask,&old_mask);
+
+ if(!data->pending_handler) {
+ if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0))
+ lose("Not in atomic: %d.\n",
+ SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread));
+ /* Leave the signals blocked just as if it was
+ * deferred the normal way and set the
+ * pending_mask. */
+ sigcopyset(&(data->pending_mask),&old_mask);
+ SetSymbolValue(NEED_TO_COLLECT_GARBAGE,T,thread);
+ } else {
+ thread_sigmask(SIG_SETMASK,&old_mask,0);
+ }
+ }
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);