/* forward declarations */
page_index_t gc_find_freeish_pages(long *restart_page_ptr, long nbytes,
- int unboxed);
+ int page_type_flag);
\f
/*
* integrated with the Lisp code. */
page_index_t last_free_page;
\f
+#ifdef LISP_FEATURE_SB_THREAD
/* This lock is to prevent multiple threads from simultaneously
* allocating new regions which overlap each other. Note that the
* majority of GC is single-threaded, but alloc() may be called from
* >1 thread at a time and must be thread-safe. This lock must be
* seized before all accesses to generations[] or to parts of
* page_table[] that other threads may want to see */
-
-#ifdef LISP_FEATURE_SB_THREAD
static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER;
+/* This lock is used to protect non-thread-local allocation. */
+static pthread_mutex_t allocation_lock = PTHREAD_MUTEX_INITIALIZER;
#endif
\f
/* The generation currently being allocated to. */
static generation_index_t gc_alloc_generation;
+static inline page_index_t
+generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large)
+{
+ if (large) {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_large_unboxed_start_page;
+ } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_large_start_page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ } else {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_unboxed_start_page;
+ } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ return generations[generation].alloc_start_page;
+ } else {
+ lose("bad page_type_flag: %d", page_type_flag);
+ }
+ }
+}
+
+static inline void
+set_generation_alloc_start_page(generation_index_t generation, int page_type_flag, int large,
+ page_index_t page)
+{
+ if (large) {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_large_unboxed_start_page = page;
+ } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_large_start_page = page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ } else {
+ if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_unboxed_start_page = page;
+ } else if (BOXED_PAGE_FLAG == page_type_flag) {
+ generations[generation].alloc_start_page = page;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+ }
+}
+
/* Find a new region with room for at least the given number of bytes.
*
* It starts looking at the current generation's alloc_start_page. So
* are allocated, although they will initially be empty.
*/
static void
-gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
page_index_t first_page;
page_index_t last_page;
&& (alloc_region->free_pointer == alloc_region->end_addr));
ret = thread_mutex_lock(&free_pages_lock);
gc_assert(ret == 0);
- if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_unboxed_start_page;
- } else {
- first_page =
- generations[gc_alloc_generation].alloc_start_page;
- }
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+ first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0);
+ last_page=gc_find_freeish_pages(&first_page, nbytes, page_type_flag);
bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
+ npage_bytes(last_page-first_page);
/* The first page may have already been in use. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].large_object = 0;
page_table[first_page].region_start_offset = 0;
}
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == page_type_flag);
page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
- if (unboxed)
- page_table[i].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[i].allocated = BOXED_PAGE_FLAG;
+ page_table[i].allocated = page_type_flag;
page_table[i].gen = gc_alloc_generation;
page_table[i].large_object = 0;
/* This may not be necessary for unboxed regions (think it was
* it is safe to try to re-update the page table of this reset
* alloc_region. */
void
-gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
+gc_alloc_update_page_tables(int page_type_flag, struct alloc_region *alloc_region)
{
int more;
page_index_t first_page;
gc_assert(page_table[first_page].region_start_offset == 0);
page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
* region, and set the bytes_used. */
while (more) {
page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].allocated==page_type_flag);
gc_assert(page_table[next_page].bytes_used == 0);
gc_assert(page_table[next_page].gen == gc_alloc_generation);
gc_assert(page_table[next_page].large_object == 0);
/* Set the generations alloc restart page to the last page of
* the region. */
- if (unboxed)
- generations[gc_alloc_generation].alloc_unboxed_start_page =
- next_page-1;
- else
- generations[gc_alloc_generation].alloc_start_page = next_page-1;
+ set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 0, next_page-1);
/* Add the region to the new_areas if requested. */
- if (!unboxed)
+ if (BOXED_PAGE_FLAG == page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used, region_size);
/*
/* Allocate a possibly large object. */
void *
-gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region)
+gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
page_index_t first_page;
page_index_t last_page;
ret = thread_mutex_lock(&free_pages_lock);
gc_assert(ret == 0);
- if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_large_unboxed_start_page;
- } else {
- first_page = generations[gc_alloc_generation].alloc_large_start_page;
- }
+ first_page = generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1);
if (first_page <= alloc_region->last_page) {
first_page = alloc_region->last_page+1;
}
- last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
+ last_page=gc_find_freeish_pages(&first_page,nbytes, page_type_flag);
gc_assert(first_page > alloc_region->last_page);
- if (unboxed)
- generations[gc_alloc_generation].alloc_large_unboxed_start_page =
- last_page;
- else
- generations[gc_alloc_generation].alloc_large_start_page = last_page;
+
+ set_generation_alloc_start_page(gc_alloc_generation, page_type_flag, 1, last_page);
/* Set up the pages. */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* If the first page was free then set up the gen, and
* region_start_offset. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].allocated = page_type_flag;
page_table[first_page].gen = gc_alloc_generation;
page_table[first_page].region_start_offset = 0;
page_table[first_page].large_object = 1;
}
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == page_type_flag);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 1);
while (more) {
gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
gc_assert(page_table[next_page].bytes_used == 0);
- if (unboxed)
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[next_page].allocated = BOXED_PAGE_FLAG;
+ page_table[next_page].allocated = page_type_flag;
page_table[next_page].gen = gc_alloc_generation;
page_table[next_page].large_object = 1;
generations[gc_alloc_generation].bytes_allocated += nbytes;
/* Add the region to the new_areas if requested. */
- if (!unboxed)
+ if (BOXED_PAGE_FLAG == page_type_flag)
add_new_area(first_page,orig_first_page_bytes_used,nbytes);
/* Bump up last_free_page */
}
page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int unboxed)
+gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes, int page_type_flag)
{
page_index_t first_page, last_page;
page_index_t restart_page = *restart_page_ptr;
(page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
last_page++;
bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
+ gc_assert(0 == page_table[last_page].bytes_used);
+ gc_assert(0 == page_table[last_page].write_protected);
}
if (bytes_found > most_bytes_found)
most_bytes_found = bytes_found;
while (first_page < page_table_pages) {
if (page_table[first_page].allocated == FREE_PAGE_FLAG)
{
+ gc_assert(0 == page_table[first_page].bytes_used);
bytes_found = PAGE_BYTES;
break;
}
- else if ((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ else if ((page_table[first_page].allocated == page_type_flag) &&
(page_table[first_page].large_object == 0) &&
(page_table[first_page].gen == gc_alloc_generation) &&
(page_table[first_page].write_protected == 0) &&
* functions will eventually call this */
void *
-gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
+gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region,
int quick_p)
{
void *new_free_pointer;
if (nbytes>=large_object_size)
- return gc_alloc_large(nbytes,unboxed_p,my_region);
+ return gc_alloc_large(nbytes, page_type_flag, my_region);
/* Check whether there is room in the current alloc region. */
new_free_pointer = my_region->free_pointer + nbytes;
if (!quick_p &&
void_diff(my_region->end_addr,my_region->free_pointer) <= 32) {
/* If so, finished with the current region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
+ gc_alloc_update_page_tables(page_type_flag, my_region);
/* Set up a new region. */
- gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
+ gc_alloc_new_region(32 /*bytes*/, page_type_flag, my_region);
}
return((void *)new_obj);
/* Else not enough free space in the current region: retry with a
* new region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
- gc_alloc_new_region(nbytes, unboxed_p, my_region);
- return gc_alloc_with_region(nbytes,unboxed_p,my_region,0);
+ gc_alloc_update_page_tables(page_type_flag, my_region);
+ gc_alloc_new_region(nbytes, page_type_flag, my_region);
+ return gc_alloc_with_region(nbytes, page_type_flag, my_region,0);
}
/* these are only used during GC: all allocation from the mutator calls
* alloc() -> gc_alloc_with_region() with the appropriate per-thread
* region */
-void *
-gc_general_alloc(long nbytes,int unboxed_p,int quick_p)
-{
- struct alloc_region *my_region =
- unboxed_p ? &unboxed_region : &boxed_region;
- return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
-}
-
static inline void *
gc_quick_alloc(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK);
}
static inline void *
gc_quick_alloc_large(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, BOXED_PAGE_FLAG ,ALLOC_QUICK);
}
static inline void *
gc_alloc_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,0);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
}
static inline void *
gc_quick_alloc_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
static inline void *
gc_quick_alloc_large_unboxed(long nbytes)
{
- return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
+ return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
\f
{
gencgc_pickup_dynamic();
}
-
-
\f
/* alloc(..) is the external interface for memory allocation. It
* The check for a GC trigger is only performed when the current
* region is full, so in most cases it's not needed. */
-lispobj *
-alloc(long nbytes)
+static inline lispobj *
+general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region,
+ struct thread *thread)
{
- struct thread *thread=arch_os_get_current_thread();
- struct alloc_region *region=
-#ifdef LISP_FEATURE_SB_THREAD
- thread ? &(thread->alloc_region) : &boxed_region;
-#else
- &boxed_region;
-#endif
#ifndef LISP_FEATURE_WIN32
lispobj alloc_signal;
#endif
gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0)
&& ((nbytes & LOWTAG_MASK) == 0));
-#if 0
- if(all_threads)
- /* there are a few places in the C code that allocate data in the
- * heap before Lisp starts. This is before interrupts are enabled,
- * so we don't need to check for pseudo-atomic */
-#ifdef LISP_FEATURE_SB_THREAD
- if(!get_psuedo_atomic_atomic(th)) {
- register u32 fs;
- fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
- th,th->os_thread);
- __asm__("movl %fs,%0" : "=r" (fs) : );
- fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
- debug_get_fs(),th->tls_cookie);
- lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
- }
-#else
- gc_assert(get_pseudo_atomic_atomic(th));
-#endif
-#endif
+ /* Must be inside a PA section. */
+ gc_assert(get_pseudo_atomic_atomic(thread));
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
return(new_obj); /* yup */
}
- /* we have to go the long way around, it seems. Check whether
- * we should GC in the near future
+ /* we have to go the long way around, it seems. Check whether we
+ * should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
- gc_assert(get_pseudo_atomic_atomic(thread));
/* Don't flood the system with interrupts if the need to gc is
* already noted. This can happen for example when SUB-GC
* allocates or after a gc triggered in a WITHOUT-GCING. */
set_pseudo_atomic_interrupted(thread);
}
}
- new_obj = gc_alloc_with_region(nbytes,0,region,0);
+ new_obj = gc_alloc_with_region(nbytes, page_type_flag, region, 0);
#ifndef LISP_FEATURE_WIN32
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
return (new_obj);
}
+
+lispobj *
+general_alloc(long nbytes, int page_type_flag)
+{
+ struct thread *thread = arch_os_get_current_thread();
+ /* Select correct region, and call general_alloc_internal with it.
+ * For other then boxed allocation we must lock first, since the
+ * region is shared. */
+ if (BOXED_PAGE_FLAG == page_type_flag) {
+#ifdef LISP_FEATURE_SB_THREAD
+ struct alloc_region *region = (thread ? &(thread->alloc_region) : &boxed_region);
+#else
+ struct alloc_region *region = &boxed_region;
+#endif
+ return general_alloc_internal(nbytes, page_type_flag, region, thread);
+ } else if (UNBOXED_PAGE_FLAG == page_type_flag) {
+ lispobj * obj;
+ gc_assert(0 == thread_mutex_lock(&allocation_lock));
+ obj = general_alloc_internal(nbytes, page_type_flag, &unboxed_region, thread);
+ gc_assert(0 == thread_mutex_unlock(&allocation_lock));
+ return obj;
+ } else {
+ lose("bad page type flag: %d", page_type_flag);
+ }
+}
+
+lispobj *
+alloc(long nbytes)
+{
+ general_alloc(nbytes, BOXED_PAGE_FLAG);
+}
\f
/*
* shared support for the OS-dependent signal handlers which
/* Flush the alloc regions updating the tables. */
struct thread *th;
for_each_thread(th)
- gc_alloc_update_page_tables(0, &th->alloc_region);
- gc_alloc_update_page_tables(1, &unboxed_region);
- gc_alloc_update_page_tables(0, &boxed_region);
+ gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &th->alloc_region);
+ gc_alloc_update_page_tables(UNBOXED_PAGE_FLAG, &unboxed_region);
+ gc_alloc_update_page_tables(BOXED_PAGE_FLAG, &boxed_region);
}
void
* function being set to the value of the static symbol
* SB!VM:RESTART-LISP-FUNCTION */
void
-gc_and_save(char *filename, int prepend_runtime)
+gc_and_save(char *filename, boolean prepend_runtime,
+ boolean save_runtime_options)
{
FILE *file;
void *runtime_bytes = NULL;
/* The dumper doesn't know that pages need to be zeroed before use. */
zero_all_free_pages();
save_to_filehandle(file, filename, SymbolValue(RESTART_LISP_FUNCTION,0),
- prepend_runtime);
+ prepend_runtime, save_runtime_options);
/* Oops. Save still managed to fail. Since we've mangled the stack
* beyond hope, there's not much we can do.
* (beyond FUNCALLing RESTART_LISP_FUNCTION, but I suspect that's