X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fruntime%2Fgencgc-internal.h;h=c9fe3b5b27f2256eb0c9e799b59cad32efdff4bd;hb=1de12891f900d156ed035a097561ecd7755a256a;hp=18dc9fbdb1c47316827f4941577f377cf87706f5;hpb=1b64697e4e8a85ff8f11f5c05de71687dc5ad2e2;p=sbcl.git diff --git a/src/runtime/gencgc-internal.h b/src/runtime/gencgc-internal.h index 18dc9fb..c9fe3b5 100644 --- a/src/runtime/gencgc-internal.h +++ b/src/runtime/gencgc-internal.h @@ -19,104 +19,101 @@ #ifndef _GENCGC_INTERNAL_H_ #define _GENCGC_INTERNAL_H_ +#include +#include "gc.h" +#include "gencgc-alloc-region.h" +#include "genesis/code.h" + void gc_free_heap(void); -inline int find_page_index(void *); -inline void *page_address(int); +inline page_index_t find_page_index(void *); +inline void *page_address(page_index_t); int gencgc_handle_wp_violation(void *); -lispobj *search_dynamic_space(lispobj *); struct page { + /* The name of this field is not well-chosen for its actual use. + * This is the offset from the start of the page to the start + * of the alloc_region which contains/contained it. It's negative or 0 + */ + long first_object_offset; + + /* the number of bytes of this page that are used. This may be less + * than the actual bytes used for pages within the current + * allocation regions. It should be 0 for all unallocated pages (not + * hard to achieve). + * + * Currently declared as an unsigned short to make the struct size + * smaller. This means that GENCGC-PAGE-SIZE is constrained to fit + * inside a short. + */ + unsigned short bytes_used; + +#if USHRT_MAX < PAGE_BYTES +#error "PAGE_BYTES too large" +#endif unsigned /* This is set when the page is write-protected. This should - * always reflect the actual write_protect status of a page. - * (If the page is written into, we catch the exception, make - * the page writable, and clear this flag.) */ + * always reflect the actual write_protect status of a page. + * (If the page is written into, we catch the exception, make + * the page writable, and clear this flag.) */ write_protected :1, - /* This flag is set when the above write_protected flag is - * cleared by the SIGBUS handler (or SIGSEGV handler, for some - * OSes). This is useful for re-scavenging pages that are - * written during a GC. */ - write_protected_cleared :1, - /* the region the page is allocated to: 0 for a free page; 1 + /* This flag is set when the above write_protected flag is + * cleared by the SIGBUS handler (or SIGSEGV handler, for some + * OSes). This is useful for re-scavenging pages that are + * written during a GC. */ + write_protected_cleared :1, + /* the region the page is allocated to: 0 for a free page; 1 * for boxed objects; 2 for unboxed objects. If the page is * free the following slots are invalid (well the bytes_used * must be 0). */ - allocated :2, - /* If this page should not be moved during a GC then this flag + allocated :3, + /* If this page should not be moved during a GC then this flag * is set. It's only valid during a GC for allocated pages. */ - dont_move :1, - /* If the page is part of a large object then this flag is + dont_move :1, + /* If the page is part of a large object then this flag is * set. No other objects should be allocated to these pages. * This is only valid when the page is allocated. */ - large_object :1; + large_object :1, + /* True if the page is known to contain only zeroes. */ + need_to_zero :1; /* the generation that this page belongs to. This should be valid * for all pages that may have objects allocated, even current * allocation region pages - this allows the space of an object to * be easily determined. */ - int gen; - - /* the number of bytes of this page that are used. This may be less - * than the actual bytes used for pages within the current - * allocation regions. It should be 0 for all unallocated pages (not - * hard to achieve). */ - int bytes_used; - - /* It is important to know the offset to the first object in the - * page. Currently it's only important to know if an object starts - * at the beginning of the page in which case the offset would be 0. */ - int first_object_offset; + generation_index_t gen; }; + /* values for the page.allocated field */ -/* the number of pages needed for the dynamic space - rounding up */ -#define NUM_PAGES ((DYNAMIC_SPACE_SIZE+4095)/4096) -extern struct page page_table[NUM_PAGES]; - -/* Abstract out the data for an allocation region allowing a single - * routine to be used for allocation and closing. */ -struct alloc_region { - - /* These two are needed for quick allocation. */ - void *free_pointer; - void *end_addr; /* pointer to the byte after the last usable byte */ - - /* These are needed when closing the region. */ - int first_page; - int last_page; - void *start_addr; -}; - -extern struct alloc_region boxed_region; -extern struct alloc_region unboxed_region; -extern int from_space, new_space; -extern struct weak_pointer *weak_pointers; - -extern void *current_region_free_pointer; -extern void *current_region_end_addr; +extern unsigned page_table_pages; +extern struct page *page_table; -void gencgc_pickup_dynamic(void); +/* forward declarations */ -void sniff_code_object(struct code *code, unsigned displacement); +void sniff_code_object(struct code *code, unsigned long displacement); void gencgc_apply_code_fixups(struct code *old_code, struct code *new_code); -int update_x86_dynamic_space_free_pointer(void); -void gc_alloc_update_page_tables(int unboxed, - struct alloc_region *alloc_region); +long update_dynamic_space_free_pointer(void); +void gc_alloc_update_page_tables(int unboxed, + struct alloc_region *alloc_region); +void gc_alloc_update_all_page_tables(void); +void gc_set_region_empty(struct alloc_region *region); + /* * predicates */ -static inline int -space_matches_p(lispobj obj, int space) +static inline boolean +space_matches_p(lispobj obj, generation_index_t space) { - int page_index=(void*)obj - (void *)DYNAMIC_SPACE_START; + page_index_t page_index=(void*)obj - (void *)DYNAMIC_SPACE_START; return ((page_index >= 0) - && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES) - && (page_table[page_index].gen == space)); + && ((page_index = + ((unsigned long)page_index)/PAGE_BYTES) < page_table_pages) + && (page_table[page_index].gen == space)); } static inline boolean @@ -131,6 +128,7 @@ new_space_p(lispobj obj) return space_matches_p(obj,new_space); } +extern page_index_t last_free_page; +extern boolean gencgc_partial_pickup; - -#endif +#endif