X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc.c;h=3b2f2f09824cb651963aac61a029167462fdd839;hb=04ee798422795a1e3d664c257a6b02b833eec4c6;hp=88de20f40602f5547273485531d8f00bf1b6e47f;hpb=1479483c5f40fc470053da0fc5cd8e42fc77676e;p=sbcl.git diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 88de20f..3b2f2f0 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -24,6 +24,7 @@ * . */ +#include #include #include #include @@ -144,7 +145,6 @@ boolean gencgc_partial_pickup = 0; /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */ unsigned long bytes_allocated = 0; -extern unsigned long bytes_consed_between_gcs; /* gc-common.c */ unsigned long auto_gc_trigger = 0; /* the source and destination generations. These are set before a GC starts @@ -159,10 +159,11 @@ boolean gc_active_p = 0; * saving a core), don't scan the stack / mark pages dont_move. */ static boolean conservative_stack = 1; -/* An array of page structures is statically allocated. +/* An array of page structures is allocated on gc initialization. * This helps quickly map between an address its page structure. - * NUM_PAGES is set from the size of the dynamic space. */ -struct page page_table[NUM_PAGES]; + * page_table_pages is set from the size of the dynamic space. */ +unsigned page_table_pages; +struct page *page_table; /* To map addresses to page structures the address of the first page * is needed. */ @@ -184,7 +185,7 @@ find_page_index(void *addr) if (index >= 0) { index = ((unsigned long)index)/PAGE_BYTES; - if (index < NUM_PAGES) + if (index < page_table_pages) return (index); } @@ -1115,11 +1116,11 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) do { first_page = restart_page; if (large_p) - while ((first_page < NUM_PAGES) + while ((first_page < page_table_pages) && (page_table[first_page].allocated != FREE_PAGE_FLAG)) first_page++; else - while (first_page < NUM_PAGES) { + while (first_page < page_table_pages) { if(page_table[first_page].allocated == FREE_PAGE_FLAG) break; if((page_table[first_page].allocated == @@ -1134,7 +1135,7 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) first_page++; } - if (first_page >= NUM_PAGES) + if (first_page >= page_table_pages) gc_heap_exhausted_error_or_lose(0, nbytes); gc_assert(page_table[first_page].write_protected == 0); @@ -1144,7 +1145,7 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) num_pages = 1; while (((bytes_found < nbytes) || (!large_p && (num_pages < 2))) - && (last_page < (NUM_PAGES-1)) + && (last_page < (page_table_pages-1)) && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) { last_page++; num_pages++; @@ -1157,10 +1158,10 @@ gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed) gc_assert(bytes_found == region_size); restart_page = last_page + 1; - } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes)); + } while ((restart_page < page_table_pages) && (bytes_found < nbytes)); /* Check for a failure */ - if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) + if ((restart_page >= page_table_pages) && (bytes_found < nbytes)) gc_heap_exhausted_error_or_lose(bytes_found, nbytes); *restart_page_ptr=first_page; @@ -2809,7 +2810,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) #define SC_GEN_CK 0 #if SC_GEN_CK /* Clear the write_protected_cleared flags on all pages. */ - for (i = 0; i < NUM_PAGES; i++) + for (i = 0; i < page_table_pages; i++) page_table[i].write_protected_cleared = 0; #endif @@ -2863,7 +2864,7 @@ scavenge_generations(generation_index_t from, generation_index_t to) #if SC_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) @@ -3095,7 +3096,7 @@ scavenge_newspace_generation(generation_index_t generation) #if SC_NS_GEN_CK /* Check that none of the write_protected pages in this generation * have been written to. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { if ((page_table[i].allocation != FREE_PAGE_FLAG) && (page_table[i].bytes_used != 0) && (page_table[i].gen == generation) @@ -3229,13 +3230,6 @@ print_ptr(lispobj *addr) } #endif -#if defined(LISP_FEATURE_PPC) -extern int closure_tramp; -extern int undefined_tramp; -#else -extern int undefined_tramp; -#endif - static void verify_space(lispobj *start, size_t words) { @@ -3290,14 +3284,7 @@ verify_space(lispobj *start, size_t words) */ } else { /* Verify that it points to another valid space. */ - if (!to_readonly_space && !to_static_space && -#if defined(LISP_FEATURE_PPC) - !((thing == &closure_tramp) || - (thing == &undefined_tramp)) -#else - thing != (unsigned long)&undefined_tramp -#endif - ) { + if (!to_readonly_space && !to_static_space) { lose("Ptr %x @ %x sees junk.\n", thing, start); } } @@ -3835,6 +3822,22 @@ preserve_context_registers (os_context_t *c) preserve_pointer((void*)*os_context_register_addr(c,reg_ESI)); preserve_pointer((void*)*os_context_register_addr(c,reg_EDI)); preserve_pointer((void*)*os_context_pc_addr(c)); +#elif defined LISP_FEATURE_X86_64 + preserve_pointer((void*)*os_context_register_addr(c,reg_RAX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RCX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RBX)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RSI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_RDI)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R8)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R9)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R10)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R11)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R12)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R13)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R14)); + preserve_pointer((void*)*os_context_register_addr(c,reg_R15)); + preserve_pointer((void*)*os_context_pc_addr(c)); #else #error "preserve_context_registers needs to be tweaked for non-x86 Darwin" #endif @@ -4331,7 +4334,7 @@ gc_free_heap(void) if (gencgc_verbose > 1) SHOW("entering gc_free_heap"); - for (page = 0; page < NUM_PAGES; page++) { + for (page = 0; page < page_table_pages; page++) { /* Skip free pages which should already be zero filled. */ if (page_table[page].allocated != FREE_PAGE_FLAG) { void *page_start, *addr; @@ -4417,6 +4420,14 @@ gc_init(void) { page_index_t i; + /* Compute the number of pages needed for the dynamic space. + * Dynamic space size should be aligned on page size. */ + page_table_pages = dynamic_space_size/PAGE_BYTES; + gc_assert(dynamic_space_size == (size_t) page_table_pages*PAGE_BYTES); + + page_table = calloc(page_table_pages, sizeof(struct page)); + gc_assert(page_table); + gc_init_tables(); scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer; transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large; @@ -4430,7 +4441,7 @@ gc_init(void) heap_base = (void*)DYNAMIC_SPACE_START; /* Initialize each page structure. */ - for (i = 0; i < NUM_PAGES; i++) { + for (i = 0; i < page_table_pages; i++) { /* Initialize all pages as free. */ page_table[i].allocated = FREE_PAGE_FLAG; page_table[i].bytes_used = 0; @@ -4548,8 +4559,12 @@ alloc(long nbytes) #else &boxed_region; #endif +#ifndef LISP_FEATURE_WIN32 + lispobj alloc_signal; +#endif void *new_obj; void *new_free_pointer; + gc_assert(nbytes>0); /* Check for alignment allocation problems. */ @@ -4601,6 +4616,24 @@ alloc(long nbytes) } } new_obj = gc_alloc_with_region(nbytes,0,region,0); + +#ifndef LISP_FEATURE_WIN32 + alloc_signal = SymbolValue(ALLOC_SIGNAL,thread); + if ((alloc_signal & FIXNUM_TAG_MASK) == 0) { + if ((signed long) alloc_signal <= 0) { +#ifdef LISP_FEATURE_SB_THREAD + kill_thread_safely(thread->os_thread, SIGPROF); +#else + raise(SIGPROF); +#endif + } else { + SetSymbolValue(ALLOC_SIGNAL, + alloc_signal - (1 << N_FIXNUM_TAG_BITS), + thread); + } + } +#endif + return (new_obj); }