#include <stdlib.h>
#include <stdio.h>
-#include <signal.h>
#include <errno.h>
#include <string.h>
#include "sbcl.h"
+#if defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD)
+#include "pthreads_win32.h"
+#else
+#include <signal.h>
+#endif
#include "runtime.h"
#include "os.h"
#include "interr.h"
#endif
/* forward declarations */
-page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, long nbytes,
+page_index_t gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t nbytes,
int page_type_flag);
\f
}
\f
-#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
+#if defined(LISP_FEATURE_X86)
void fast_bzero(void*, size_t); /* in <arch>-assem.S */
#endif
* are allocated, although they will initially be empty.
*/
static void
-gc_alloc_new_region(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
+gc_alloc_new_region(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
page_index_t first_page;
page_index_t last_page;
gc_set_region_empty(alloc_region);
}
-static inline void *gc_quick_alloc(long nbytes);
+static inline void *gc_quick_alloc(word_t nbytes);
/* Allocate a possibly large object. */
void *
-gc_alloc_large(long nbytes, int page_type_flag, struct alloc_region *alloc_region)
+gc_alloc_large(sword_t nbytes, int page_type_flag, struct alloc_region *alloc_region)
{
boolean more;
page_index_t first_page, next_page, last_page;
static page_index_t gencgc_alloc_start_page = -1;
void
-gc_heap_exhausted_error_or_lose (long available, long requested)
+gc_heap_exhausted_error_or_lose (sword_t available, sword_t requested)
{
struct thread *thread = arch_os_get_current_thread();
/* Write basic information before doing anything else: if we don't
else {
/* FIXME: assert free_pages_lock held */
(void)thread_mutex_unlock(&free_pages_lock);
+#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
gc_assert(get_pseudo_atomic_atomic(thread));
clear_pseudo_atomic_atomic(thread);
if (get_pseudo_atomic_interrupted(thread))
do_pending_interrupt();
+#endif
/* Another issue is that signalling HEAP-EXHAUSTED error leads
* to running user code at arbitrary places, even in a
* WITHOUT-INTERRUPTS which may lead to a deadlock without
}
page_index_t
-gc_find_freeish_pages(page_index_t *restart_page_ptr, long bytes,
+gc_find_freeish_pages(page_index_t *restart_page_ptr, sword_t bytes,
int page_type_flag)
{
page_index_t most_bytes_found_from = 0, most_bytes_found_to = 0;
* functions will eventually call this */
void *
-gc_alloc_with_region(long nbytes,int page_type_flag, struct alloc_region *my_region,
+gc_alloc_with_region(sword_t nbytes,int page_type_flag, struct alloc_region *my_region,
int quick_p)
{
void *new_free_pointer;
* region */
static inline void *
-gc_quick_alloc(long nbytes)
+gc_quick_alloc(word_t nbytes)
{
return gc_general_alloc(nbytes, BOXED_PAGE_FLAG, ALLOC_QUICK);
}
static inline void *
-gc_alloc_unboxed(long nbytes)
+gc_alloc_unboxed(word_t nbytes)
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, 0);
}
static inline void *
-gc_quick_alloc_unboxed(long nbytes)
+gc_quick_alloc_unboxed(word_t nbytes)
{
return gc_general_alloc(nbytes, UNBOXED_PAGE_FLAG, ALLOC_QUICK);
}
* Bignums and vectors may have shrunk. If the object is not copied
* the space needs to be reclaimed, and the page_tables corrected. */
static lispobj
-general_copy_large_object(lispobj object, long nwords, boolean boxedp)
+general_copy_large_object(lispobj object, word_t nwords, boolean boxedp)
{
int tag;
lispobj *new;
}
lispobj
-copy_large_object(lispobj object, long nwords)
+copy_large_object(lispobj object, sword_t nwords)
{
return general_copy_large_object(object, nwords, 1);
}
lispobj
-copy_large_unboxed_object(lispobj object, long nwords)
+copy_large_unboxed_object(lispobj object, sword_t nwords)
{
return general_copy_large_object(object, nwords, 0);
}
/* to copy unboxed objects */
lispobj
-copy_unboxed_object(lispobj object, long nwords)
+copy_unboxed_object(lispobj object, sword_t nwords)
{
return gc_general_copy_object(object, nwords, UNBOXED_PAGE_FLAG);
}
sniff_code_object(struct code *code, os_vm_size_t displacement)
{
#ifdef LISP_FEATURE_X86
- long nheader_words, ncode_words, nwords;
+ sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr = NULL, constants_end_addr, p;
os_vm_address_t code_start_addr, code_end_addr;
os_vm_address_t code_addr = (os_vm_address_t)code;
{
/* x86-64 uses pc-relative addressing instead of this kludge */
#ifndef LISP_FEATURE_X86_64
- long nheader_words, ncode_words, nwords;
+ sword_t nheader_words, ncode_words, nwords;
os_vm_address_t constants_start_addr, constants_end_addr;
os_vm_address_t code_start_addr, code_end_addr;
os_vm_address_t code_addr = (os_vm_address_t)new_code;
if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
/* Got the fixups for the code block. Now work through the vector,
and apply a fixup at each address. */
- long length = fixnum_value(fixups_vector->length);
- long i;
+ sword_t length = fixnum_value(fixups_vector->length);
+ sword_t i;
for (i = 0; i < length; i++) {
long offset = fixups_vector->data[i];
/* Now check the current value of offset. */
trans_boxed_large(lispobj object)
{
lispobj header;
- unsigned long length;
+ uword_t length;
gc_assert(is_lisp_pointer(object));
trans_unboxed_large(lispobj object)
{
lispobj header;
- unsigned long length;
+ uword_t length;
gc_assert(is_lisp_pointer(object));
#define WEAK_POINTER_NWORDS \
CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
-static long
+static sword_t
scav_weak_pointer(lispobj *where, lispobj object)
{
/* Since we overwrite the 'next' field, we have to make
{
page_index_t first_page;
page_index_t next_page;
- long nwords;
+ sword_t nwords;
- unsigned long remaining_bytes;
- unsigned long bytes_freed;
- unsigned long old_bytes_used;
+ uword_t remaining_bytes;
+ uword_t bytes_freed;
+ uword_t old_bytes_used;
int boxed;
/* quick check 2: Check the offset within the page.
*
*/
- if (((unsigned long)addr & (GENCGC_CARD_BYTES - 1)) >
+ if (((uword_t)addr & (GENCGC_CARD_BYTES - 1)) >
page_table[addr_page_index].bytes_used)
return;
if (page_free_p(addr_page_index)
|| (page_table[addr_page_index].bytes_used == 0)
/* Check the offset within the page. */
- || (((unsigned long)addr & (GENCGC_CARD_BYTES - 1))
+ || (((uword_t)addr & (GENCGC_CARD_BYTES - 1))
> page_table[addr_page_index].bytes_used)) {
FSHOW((stderr,
"weird? ignore ptr 0x%x to freed area of large object\n",
update_page_write_prot(page_index_t page)
{
generation_index_t gen = page_table[page].gen;
- long j;
+ sword_t j;
int wp_it = 1;
void **page_addr = (void **)page_address(page);
- long num_words = page_table[page].bytes_used / N_WORD_BYTES;
+ sword_t num_words = page_table[page].bytes_used / N_WORD_BYTES;
/* Shouldn't be a free page. */
gc_assert(page_allocated_p(page));
}
if (!write_protected) {
scavenge(page_address(i),
- ((unsigned long)(page_table[last_page].bytes_used
+ ((uword_t)(page_table[last_page].bytes_used
+ npage_bytes(last_page-i)))
/N_WORD_BYTES);
/* Do a limited check for write-protected pages. */
if (!all_wp) {
- long nwords = (((unsigned long)
+ sword_t nwords = (((uword_t)
(page_table[last_page].bytes_used
+ npage_bytes(last_page-i)
+ page_table[i].region_start_offset))
page_index_t i;
void *region_addr = 0;
void *page_addr = 0;
- unsigned long region_bytes = 0;
+ uword_t region_bytes = 0;
for (i = 0; i < last_free_page; i++) {
if (page_allocated_p(i)
* assumes that all objects have been copied or promoted to an older
* generation. Bytes_allocated and the generation bytes_allocated
* counter are updated. The number of bytes freed is returned. */
-static unsigned long
+static uword_t
free_oldspace(void)
{
- unsigned long bytes_freed = 0;
+ uword_t bytes_freed = 0;
page_index_t first_page, last_page;
first_page = 0;
{
int is_in_dynamic_space = (find_page_index((void*)start) != -1);
int is_in_readonly_space =
- (READ_ONLY_SPACE_START <= (unsigned long)start &&
- (unsigned long)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ (READ_ONLY_SPACE_START <= (uword_t)start &&
+ (uword_t)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
while (words > 0) {
size_t count = 1;
if (is_lisp_pointer(thing)) {
page_index_t page_index = find_page_index((void*)thing);
- long to_readonly_space =
+ sword_t to_readonly_space =
(READ_ONLY_SPACE_START <= thing &&
thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
- long to_static_space =
+ sword_t to_static_space =
(STATIC_SPACE_START <= thing &&
thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
case INSTANCE_HEADER_WIDETAG:
{
lispobj nuntagged;
- long ntotal = HeaderValue(thing);
+ sword_t ntotal = HeaderValue(thing);
lispobj layout = ((struct instance *)start)->slots[0];
if (!layout) {
count = 1;
{
lispobj object = *start;
struct code *code;
- long nheader_words, ncode_words, nwords;
+ sword_t nheader_words, ncode_words, nwords;
lispobj fheaderl;
struct simple_fun *fheaderp;
* Some counts of lispobjs are called foo_count; it might be good
* to grep for all foo_size and rename the appropriate ones to
* foo_count. */
- long read_only_space_size =
+ sword_t read_only_space_size =
(lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
- (lispobj*)READ_ONLY_SPACE_START;
- long static_space_size =
+ sword_t static_space_size =
(lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
- (lispobj*)STATIC_SPACE_START;
struct thread *th;
for_each_thread(th) {
- long binding_stack_size =
+ sword_t binding_stack_size =
(lispobj*)get_binding_stack_pointer(th)
- (lispobj*)th->binding_stack_start;
verify_space(th->binding_stack_start, binding_stack_size);
break;
verify_space(page_address(i),
- ((unsigned long)
+ ((uword_t)
(page_table[last_page].bytes_used
+ npage_bytes(last_page-i)))
/ N_WORD_BYTES);
for (page = 0; page < last_free_page; page++) {
if (page_free_p(page)) {
/* The whole page should be zero filled. */
- long *start_addr = (long *)page_address(page);
- long size = 1024;
- long i;
+ sword_t *start_addr = (sword_t *)page_address(page);
+ sword_t size = 1024;
+ sword_t i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
lose("free page not zero at %x\n", start_addr + i);
}
}
} else {
- long free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used;
+ sword_t free_bytes = GENCGC_CARD_BYTES - page_table[page].bytes_used;
if (free_bytes > 0) {
- long *start_addr = (long *)((unsigned long)page_address(page)
+ sword_t *start_addr = (sword_t *)((uword_t)page_address(page)
+ page_table[page].bytes_used);
- long size = free_bytes / N_WORD_BYTES;
- long i;
+ sword_t size = free_bytes / N_WORD_BYTES;
+ sword_t i;
for (i = 0; i < size; i++) {
if (start_addr[i] != 0) {
lose("free region not zero at %x\n", start_addr + i);
/* On Darwin the signal context isn't a contiguous block of memory,
* so just preserve_pointering its contents won't be sufficient.
*/
-#if defined(LISP_FEATURE_DARWIN)
+#if defined(LISP_FEATURE_DARWIN)||defined(LISP_FEATURE_WIN32)
#if defined LISP_FEATURE_X86
preserve_pointer((void*)*os_context_register_addr(c,reg_EAX));
preserve_pointer((void*)*os_context_register_addr(c,reg_ECX));
#error "preserve_context_registers needs to be tweaked for non-x86 Darwin"
#endif
#endif
+#if !defined(LISP_FEATURE_WIN32)
for(ptr = ((void **)(c+1))-1; ptr>=(void **)c; ptr--) {
preserve_pointer(*ptr);
}
+#endif
}
#endif
static void
garbage_collect_generation(generation_index_t generation, int raise)
{
- unsigned long bytes_freed;
+ uword_t bytes_freed;
page_index_t i;
- unsigned long static_space_size;
+ uword_t static_space_size;
struct thread *th;
gc_assert(generation <= HIGHEST_NORMAL_GENERATION);
}
# endif
# elif defined(LISP_FEATURE_SB_THREAD)
- long i,free;
+ sword_t i,free;
if(th==arch_os_get_current_thread()) {
/* Somebody is going to burn in hell for this, but casting
* it in two steps shuts gcc up about strict aliasing. */
#if QSHOW
if (gencgc_verbose > 1) {
- long num_dont_move_pages = count_dont_move_pages();
+ sword_t num_dont_move_pages = count_dont_move_pages();
fprintf(stderr,
"/non-movable pages due to conservative pointers = %d (%d bytes)\n",
num_dont_move_pages,
scavenge_control_stack(th);
}
+# ifdef LISP_FEATURE_SB_SAFEPOINT
+ /* In this case, scrub all stacks right here from the GCing thread
+ * instead of doing what the comment below says. Suboptimal, but
+ * easier. */
+ for_each_thread(th)
+ scrub_thread_control_stack(th);
+# else
/* Scrub the unscavenged control stack space, so that we can't run
* into any stale pointers in a later GC (this is done by the
* stop-for-gc handler in the other threads). */
scrub_control_stack();
+# endif
}
#endif
{
struct thread *th;
for_each_thread(th) {
- long len= (lispobj *)get_binding_stack_pointer(th) -
+ sword_t len= (lispobj *)get_binding_stack_pointer(th) -
th->binding_stack_start;
scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
* please submit a patch. */
#if 0
if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
- unsigned long read_only_space_size =
+ uword_t read_only_space_size =
(lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
(lispobj*)READ_ONLY_SPACE_START;
FSHOW((stderr,
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
-long
+sword_t
update_dynamic_space_free_pointer(void)
{
page_index_t last_page = -1, i;
#endif
} else if (gencgc_zero_check_during_free_heap) {
/* Double-check that the page is zero filled. */
- long *page_start;
+ sword_t *page_start;
page_index_t i;
gc_assert(page_free_p(page));
gc_assert(page_table[page].bytes_used == 0);
- page_start = (long *)page_address(page);
- for (i=0; i<GENCGC_CARD_BYTES/sizeof(long); i++) {
+ page_start = (sword_t *)page_address(page);
+ for (i=0; i<GENCGC_CARD_BYTES/sizeof(sword_t); i++) {
if (page_start[i] != 0) {
lose("free region not zero at %x\n", page_start + i);
}
{
page_index_t i;
+#if defined(LISP_FEATURE_SB_SAFEPOINT)
+ alloc_gc_page();
+#endif
+
/* Compute the number of pages needed for the dynamic space.
* Dynamic space size should be aligned on page size. */
page_table_pages = dynamic_space_size/GENCGC_CARD_BYTES;
* region is full, so in most cases it's not needed. */
static inline lispobj *
-general_alloc_internal(long nbytes, int page_type_flag, struct alloc_region *region,
+general_alloc_internal(sword_t nbytes, int page_type_flag, struct alloc_region *region,
struct thread *thread)
{
#ifndef LISP_FEATURE_WIN32
gc_assert(nbytes>0);
/* Check for alignment allocation problems. */
- gc_assert((((unsigned long)region->free_pointer & LOWTAG_MASK) == 0)
+ gc_assert((((uword_t)region->free_pointer & LOWTAG_MASK) == 0)
&& ((nbytes & LOWTAG_MASK) == 0));
+#if !(defined(LISP_FEATURE_WIN32) && defined(LISP_FEATURE_SB_THREAD))
/* Must be inside a PA section. */
gc_assert(get_pseudo_atomic_atomic(thread));
+#endif
if (nbytes > large_allocation)
large_allocation = nbytes;
/* for sb-prof, and not supported on Windows yet */
alloc_signal = SymbolValue(ALLOC_SIGNAL,thread);
if ((alloc_signal & FIXNUM_TAG_MASK) == 0) {
- if ((signed long) alloc_signal <= 0) {
+ if ((sword_t) alloc_signal <= 0) {
SetSymbolValue(ALLOC_SIGNAL, T, thread);
raise(SIGPROF);
} else {
}
lispobj *
-general_alloc(long nbytes, int page_type_flag)
+general_alloc(sword_t nbytes, int page_type_flag)
{
struct thread *thread = arch_os_get_current_thread();
/* Select correct region, and call general_alloc_internal with it.
}
}
-lispobj *
+lispobj AMD64_SYSV_ABI *
alloc(long nbytes)
{
+#ifdef LISP_FEATURE_WIN32
+ /* WIN32 is currently the only platform where inline allocation is
+ * not pseudo atomic. */
+ struct thread *self = arch_os_get_current_thread();
+ int was_pseudo_atomic = get_pseudo_atomic_atomic(self);
+ if (!was_pseudo_atomic)
+ set_pseudo_atomic_atomic(self);
+#else
gc_assert(get_pseudo_atomic_atomic(arch_os_get_current_thread()));
- return general_alloc(nbytes, BOXED_PAGE_FLAG);
+#endif
+
+ lispobj *result = general_alloc(nbytes, BOXED_PAGE_FLAG);
+
+#ifdef LISP_FEATURE_WIN32
+ if (!was_pseudo_atomic)
+ clear_pseudo_atomic_atomic(self);
+#endif
+
+ return result;
}
\f
/*