/* assembly language stub that executes trap_PendingInterrupt */
void do_pending_interrupt(void);
+/* forward declarations */
+int gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region);
+void gc_set_region_empty(struct alloc_region *region);
+void gc_alloc_update_all_page_tables(void);
+static void gencgc_pickup_dynamic(void);
+boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
+
\f
/*
* GC parameters
/ ((double)generations[gen].bytes_allocated);
}
+void fpu_save(int *); /* defined in x86-assem.S */
+void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
static void
gc_assert((alloc_region->first_page == 0)
&& (alloc_region->last_page == -1)
&& (alloc_region->free_pointer == alloc_region->end_addr));
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
generations[gc_alloc_generation].alloc_unboxed_start_page;
(lispobj)(((char *)heap_base) + last_free_page*4096),
0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
next_page = first_page+1;
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
/* some bytes were allocated in the region */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
page_table[next_page].allocated = FREE_PAGE;
next_page++;
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
/* alloc_region is per-thread, we're ok to do this unlocked */
gc_set_region_empty(alloc_region);
}
index ahead of the current region and bumped up here to save a
lot of re-scanning. */
- get_spinlock(&free_pages_lock,alloc_region);
+ get_spinlock(&free_pages_lock,(int) alloc_region);
if (unboxed) {
first_page =
SetSymbolValue(ALLOCATION_POINTER,
(lispobj)(((char *)heap_base) + last_free_page*4096),0);
}
- free_pages_lock=0;
+ release_spinlock(&free_pages_lock);
return((void *)(page_address(first_page)+orig_first_page_bytes_used));
}
gc_assert(page_table[page].allocated != FREE_PAGE);
gc_assert(page_table[page].bytes_used != 0);
- /* Skip if it's already write-protected or an unboxed page. */
+ /* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
+ || page_table[page].dont_move
|| (page_table[page].allocated & UNBOXED_PAGE))
return (0);
for (i = 0; i < last_free_page; i++)
if ((page_table[i].allocated == BOXED_PAGE)
&& (page_table[i].bytes_used != 0)
+ && !page_table[i].dont_move
&& (page_table[i].gen == generation)) {
void *page_start;
/* Before any pointers are preserved, the dont_move flags on the
* pages need to be cleared. */
for (i = 0; i < last_free_page; i++)
- page_table[i].dont_move = 0;
+ if(page_table[i].gen==from_space)
+ page_table[i].dont_move = 0;
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
/* Scavenge the stacks' conservative roots. */
for_each_thread(th) {
void **ptr;
+ void **esp= (void **) &raise;
#ifdef LISP_FEATURE_SB_THREAD
- struct user_regs_struct regs;
- if(ptrace(PTRACE_GETREGS,th->pid,0,®s)){
- /* probably doesn't exist any more. */
- fprintf(stderr,"child pid %d, %s\n",th->pid,strerror(errno));
- perror("PTRACE_GETREGS");
+ if(th!=arch_os_get_current_thread()) {
+ os_context_t *last_context=get_interrupt_context_for_thread(th);
+ esp = (void **)*os_context_register_addr(last_context,reg_ESP);
}
- preserve_pointer(regs.ebx);
- preserve_pointer(regs.ecx);
- preserve_pointer(regs.edx);
- preserve_pointer(regs.esi);
- preserve_pointer(regs.edi);
- preserve_pointer(regs.ebp);
- preserve_pointer(regs.eax);
-#endif
- for (ptr = th->control_stack_end;
-#ifdef LISP_FEATURE_SB_THREAD
- ptr > regs.esp;
-#else
- ptr > (void **)&raise;
#endif
- ptr--) {
+ for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
preserve_pointer(*ptr);
}
}
update_x86_dynamic_space_free_pointer();
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
if(gencgc_verbose)
- fprintf(stderr,"Next gc when %d bytes have been consed\n",
+ fprintf(stderr,"Next gc when %ld bytes have been consed\n",
auto_gc_trigger);
SHOW("returning from collect_garbage");
}
\f
-extern boolean maybe_gc_pending ;
/* alloc(..) is the external interface for memory allocation. It
* allocates to generation 0. It is not called from within the garbage
* collector as it is only external uses that need the check for heap
__asm__("movl %fs,%0" : "=r" (fs) : );
fprintf(stderr, "fs is %x, th->tls_cookie=%x (should be identical)\n",
debug_get_fs(),th->tls_cookie);
- lose("If you see this message before 2003.05.01, mail details to sbcl-devel\n");
+ lose("If you see this message before 2003.12.01, mail details to sbcl-devel\n");
}
#else
gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
/* set things up so that GC happens when we finish the PA
* section. */
- maybe_gc_pending=1;
- SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),th);
+ struct interrupt_data *data=th->interrupt_data;
+ maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0);
}
new_obj = gc_alloc_with_region(nbytes,0,region,0);
return (new_obj);
*/
if(page_table[page_index].write_protected_cleared != 1)
lose("fault in heap page not marked as write-protected");
-
- /* Don't worry, we can handle it. */
- return 1;
}
+ /* Don't worry, we can handle it. */
+ return 1;
}
}
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
unhandled_sigmemoryfault()
{}
-gc_alloc_update_all_page_tables(void)
+void gc_alloc_update_all_page_tables(void)
{
/* Flush the alloc regions updating the tables. */
struct thread *th;