/*
- * allocation routines for C code. For allocation done by Lisp look
+ * allocation routines for C code. For allocation done by Lisp look
* instead at src/compiler/target/alloc.lisp and .../macros.lisp
*/
#if defined LISP_FEATURE_GENCGC
extern lispobj *alloc(int bytes);
lispobj *
-pa_alloc(int bytes)
+pa_alloc(int bytes)
{
lispobj *result=0;
struct thread *th=arch_os_get_current_thread();
result=alloc(bytes);
SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0),th);
if (fixnum_value(SymbolValue(PSEUDO_ATOMIC_INTERRUPTED,th)))
- /* even if we gc at this point, the new allocation will be
- * protected from being moved, because result is on the c stack
- * and points to it */
- do_pending_interrupt();
- return result;
+ /* even if we gc at this point, the new allocation will be
+ * protected from being moved, because result is on the c stack
+ * and points to it */
+ do_pending_interrupt();
+ return result;
}
#else
SET_FREE_POINTER((lispobj *)(result + bytes));
if (GET_GC_TRIGGER() && GET_FREE_POINTER() > GET_GC_TRIGGER()) {
- SET_GC_TRIGGER((char *)GET_FREE_POINTER()
- - (char *)current_dynamic_space);
+ SET_GC_TRIGGER((char *)GET_FREE_POINTER()
+ - (char *)current_dynamic_space);
}
return (lispobj *) result;
}
ptr->digits[0] = n;
- return make_lispobj(ptr, OTHER_POINTER_LOWTAG);
+ return make_lispobj(ptr, OTHER_POINTER_LOWTAG);
}
}
{
struct sap *sap;
sap=(struct sap *)
- alloc_unboxed((int)SAP_WIDETAG, sizeof(struct sap)/sizeof(lispobj) -1);
+ alloc_unboxed((int)SAP_WIDETAG, sizeof(struct sap)/sizeof(lispobj) -1);
sap->pointer = ptr;
return make_lispobj(sap,OTHER_POINTER_LOWTAG);
}
* page size is. */
if (mmap((os_vm_address_t) call_into_lisp_LRA_page,os_vm_page_size,
- OS_VM_PROT_ALL,MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,-1,0)
- == (os_vm_address_t) -1)
- perror("mmap");
-
+ OS_VM_PROT_ALL,MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,-1,0)
+ == (os_vm_address_t) -1)
+ perror("mmap");
+
/* call_into_lisp_LRA is a collection of trampolines written in asm -
* see alpha-assem.S. We copy it to call_into_lisp_LRA_page where
- * VOPs and things can find it. (I don't know why they can't find it
+ * VOPs and things can find it. (I don't know why they can't find it
* where it was to start with.) */
bcopy(call_into_lisp_LRA,(void *)call_into_lisp_LRA_page,os_vm_page_size);
os_flush_icache((os_vm_address_t)call_into_lisp_LRA_page,
- os_vm_page_size);
+ os_vm_page_size);
return;
}
-os_vm_address_t
+os_vm_address_t
arch_get_bad_addr (int sig, siginfo_t *code, os_context_t *context)
{
unsigned int badinst;
/* Instructions are 32 bit quantities. */
unsigned int *pc ;
/* fprintf(stderr,"arch_get_bad_addr %d %p %p\n",
- sig, code, context); */
+ sig, code, context); */
pc= (unsigned int *)(*os_context_pc_addr(context));
if (((unsigned long)pc) & 3) {
- return NULL; /* In what case would pc be unaligned?? */
+ return NULL; /* In what case would pc be unaligned?? */
}
if ( (pc < READ_ONLY_SPACE_START ||
- pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
- (pc < current_dynamic_space ||
- pc >= current_dynamic_space + DYNAMIC_SPACE_SIZE))
- return NULL;
+ pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
+ (pc < current_dynamic_space ||
+ pc >= current_dynamic_space + DYNAMIC_SPACE_SIZE))
+ return NULL;
return context->uc_mcontext.sc_traparg_a0;
}
{
/* On coming out of an atomic section, we subtract 1 from
* reg_Alloc, then try to store something at that address. So,
- * to signal that it was interrupted and a signal should be handled,
+ * to signal that it was interrupted and a signal should be handled,
* we set bit 63 of reg_ALLOC here so that the end-of-atomic code
* will raise SIGSEGV (no ram mapped there). We catch the signal
- * (see the appropriate *-os.c) and call interrupt_handle_pending()
+ * (see the appropriate *-os.c) and call interrupt_handle_pending()
* for the saved signal instead */
*os_context_register_addr(context,reg_ALLOC) |= (1L<<63);
unsigned int *ptr = (unsigned int *)pc;
unsigned long result = (unsigned long) *ptr;
*ptr = BREAKPOINT_INST;
-
+
os_flush_icache((os_vm_address_t)ptr, sizeof(unsigned long));
-
+
return result;
}
int reg_a = (orig_inst >> 21) & 0x1f;
int reg_b = (orig_inst >> 16) & 0x1f;
int disp =
- (orig_inst&(1<<20)) ?
- orig_inst | (-1 << 21) :
- orig_inst&0x1fffff;
+ (orig_inst&(1<<20)) ?
+ orig_inst | (-1 << 21) :
+ orig_inst&0x1fffff;
int next_pc = *os_context_pc_addr(context);
- int branch = 0; /* was NULL; */
+ int branch = 0; /* was NULL; */
switch(op) {
case 0x1a: /* jmp, jsr, jsr_coroutine, ret */
- *os_context_register_addr(context,reg_a) =
- *os_context_pc_addr(context);
- *os_context_pc_addr(context) =
- *os_context_register_addr(context,reg_b)& ~3;
- break;
+ *os_context_register_addr(context,reg_a) =
+ *os_context_pc_addr(context);
+ *os_context_pc_addr(context) =
+ *os_context_register_addr(context,reg_b)& ~3;
+ break;
case 0x30: /* br */
- *os_context_register_addr(context,reg_a)=*os_context_pc_addr(context);
- branch = 1;
- break;
+ *os_context_register_addr(context,reg_a)=*os_context_pc_addr(context);
+ branch = 1;
+ break;
case 0x31: /* fbeq */
- if (*(os_context_float_register_addr(context,reg_a))==0) branch = 1;
- break;
+ if (*(os_context_float_register_addr(context,reg_a))==0) branch = 1;
+ break;
case 0x32: /* fblt */
- if (*os_context_float_register_addr(context,reg_a)<0) branch = 1;
- break;
+ if (*os_context_float_register_addr(context,reg_a)<0) branch = 1;
+ break;
case 0x33: /* fble */
- if (*os_context_float_register_addr(context,reg_a)<=0) branch = 1;
- break;
+ if (*os_context_float_register_addr(context,reg_a)<=0) branch = 1;
+ break;
case 0x34: /* bsr */
- *os_context_register_addr(context,reg_a)=*os_context_pc_addr(context);
- branch = 1;
- break;
+ *os_context_register_addr(context,reg_a)=*os_context_pc_addr(context);
+ branch = 1;
+ break;
case 0x35: /* fbne */
- if (*os_context_register_addr(context,reg_a)!=0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)!=0) branch = 1;
+ break;
case 0x36: /* fbge */
- if (*os_context_float_register_addr(context,reg_a)>=0) branch = 1;
- break;
+ if (*os_context_float_register_addr(context,reg_a)>=0) branch = 1;
+ break;
case 0x37: /* fbgt */
- if (*os_context_float_register_addr(context,reg_a)>0) branch = 1;
- break;
+ if (*os_context_float_register_addr(context,reg_a)>0) branch = 1;
+ break;
case 0x38: /* blbc */
- if ((*os_context_register_addr(context,reg_a)&1) == 0) branch = 1;
- break;
+ if ((*os_context_register_addr(context,reg_a)&1) == 0) branch = 1;
+ break;
case 0x39: /* beq */
- if (*os_context_register_addr(context,reg_a)==0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)==0) branch = 1;
+ break;
case 0x3a: /* blt */
- if (*os_context_register_addr(context,reg_a)<0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)<0) branch = 1;
+ break;
case 0x3b: /* ble */
- if (*os_context_register_addr(context,reg_a)<=0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)<=0) branch = 1;
+ break;
case 0x3c: /* blbs */
- if ((*os_context_register_addr(context,reg_a)&1)!=0) branch = 1;
- break;
+ if ((*os_context_register_addr(context,reg_a)&1)!=0) branch = 1;
+ break;
case 0x3d: /* bne */
- if (*os_context_register_addr(context,reg_a)!=0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)!=0) branch = 1;
+ break;
case 0x3e: /* bge */
- if (*os_context_register_addr(context,reg_a)>=0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)>=0) branch = 1;
+ break;
case 0x3f: /* bgt */
- if (*os_context_register_addr(context,reg_a)>0) branch = 1;
- break;
+ if (*os_context_register_addr(context,reg_a)>0) branch = 1;
+ break;
}
if (branch)
- next_pc += disp*4;
+ next_pc += disp*4;
return next_pc;
}
/* Perform the instruction that we overwrote with a breakpoint. As we
* don't have a single-step facility, this means we have to:
* - put the instruction back
- * - put a second breakpoint at the following instruction,
+ * - put a second breakpoint at the following instruction,
* set after_breakpoint and continue execution.
*
* When the second breakpoint is hit (very shortly thereafter, we hope)
- * sigtrap_handler gets called again, but follows the AfterBreakpoint
- * arm, which
- * - puts a bpt back in the first breakpoint place (running across a
+ * sigtrap_handler gets called again, but follows the AfterBreakpoint
+ * arm, which
+ * - puts a bpt back in the first breakpoint place (running across a
* breakpoint shouldn't cause it to be uninstalled)
* - replaces the second bpt with the instruction it was meant to be
- * - carries on
+ * - carries on
*
* Clear?
*/
os_flush_icache((os_vm_address_t)pc, sizeof(unsigned long));
skipped_break_addr = pc;
- /* Figure out where we will end up after running the displaced
+ /* Figure out where we will end up after running the displaced
* instruction */
if (op == 0x1a || (op&0xf) == 0x30) /* a branch */
- /* The cast to long is just to shut gcc up. */
- next_pc = (unsigned int *)((long)emulate_branch(context,orig_inst));
+ /* The cast to long is just to shut gcc up. */
+ next_pc = (unsigned int *)((long)emulate_branch(context,orig_inst));
else
- next_pc = pc+1;
-
+ next_pc = pc+1;
+
/* Set the after breakpoint. */
displaced_after_inst = *next_pc;
*next_pc = BREAKPOINT_INST;
* breakpoint or a "system service" */
if ((*(unsigned int*)(*os_context_pc_addr(context)-4))==BREAKPOINT_INST) {
- if (after_breakpoint) {
- /* see comments above arch_do_displaced_inst. This is where
- * we reinsert the breakpoint that we removed earlier */
-
- *os_context_pc_addr(context) -=4;
- *skipped_break_addr = BREAKPOINT_INST;
- os_flush_icache((os_vm_address_t)skipped_break_addr,
- sizeof(unsigned long));
- skipped_break_addr = NULL;
- *(unsigned int *)*os_context_pc_addr(context) =
- displaced_after_inst;
- os_flush_icache((os_vm_address_t)*os_context_pc_addr(context), sizeof(unsigned long));
- *os_context_sigmask_addr(context)= orig_sigmask;
- after_breakpoint=0; /* false */
- return;
- } else
- code = trap_Breakpoint;
+ if (after_breakpoint) {
+ /* see comments above arch_do_displaced_inst. This is where
+ * we reinsert the breakpoint that we removed earlier */
+
+ *os_context_pc_addr(context) -=4;
+ *skipped_break_addr = BREAKPOINT_INST;
+ os_flush_icache((os_vm_address_t)skipped_break_addr,
+ sizeof(unsigned long));
+ skipped_break_addr = NULL;
+ *(unsigned int *)*os_context_pc_addr(context) =
+ displaced_after_inst;
+ os_flush_icache((os_vm_address_t)*os_context_pc_addr(context), sizeof(unsigned long));
+ *os_context_sigmask_addr(context)= orig_sigmask;
+ after_breakpoint=0; /* false */
+ return;
+ } else
+ code = trap_Breakpoint;
} else
- /* a "system service" */
+ /* a "system service" */
code=*((u32 *)(*os_context_pc_addr(context)));
-
+
switch (code) {
case trap_PendingInterrupt:
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
case trap_Halt:
- fake_foreign_function_call(context);
- lose("%%primitive halt called; the party is over.\n");
+ fake_foreign_function_call(context);
+ lose("%%primitive halt called; the party is over.\n");
case trap_Error:
case trap_Cerror:
- interrupt_internal_error(signal, siginfo, context, code==trap_Cerror);
- break;
+ interrupt_internal_error(signal, siginfo, context, code==trap_Cerror);
+ break;
- case trap_Breakpoint: /* call lisp-level handler */
+ case trap_Breakpoint: /* call lisp-level handler */
*os_context_pc_addr(context) -=4;
- handle_breakpoint(signal, siginfo, context);
- break;
+ handle_breakpoint(signal, siginfo, context);
+ break;
case trap_FunEndBreakpoint:
*os_context_pc_addr(context) -=4;
- *os_context_pc_addr(context) =
- (int)handle_fun_end_breakpoint(signal, siginfo, context);
- break;
+ *os_context_pc_addr(context) =
+ (int)handle_fun_end_breakpoint(signal, siginfo, context);
+ break;
default:
- fprintf(stderr, "unidentified breakpoint/trap %d\n",code);
- interrupt_handle_now(signal, siginfo, context);
- break;
+ fprintf(stderr, "unidentified breakpoint/trap %d\n",code);
+ interrupt_handle_now(signal, siginfo, context);
+ break;
}
}
#define _ALPHA_ARCH_H
-static inline void
+static inline void
get_spinlock(lispobj *word,long value)
{
- *word=value; /* FIXME for threads */
+ *word=value; /* FIXME for threads */
}
static inline void
#error "Define threading support functions"
#else
int arch_os_thread_init(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
int arch_os_thread_cleanup(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
#endif
but without the UL, which would probably lead to 32/64-bit
errors if we simply used it here. Ugh. CSR, 2003-09-15 */
arch_set_fp_control(os_context_fp_control(context) & ~(0x7e0000UL) &
- /* KLUDGE: for some reason that I don't
- understand, by the time we get here the
- "enable denormalized traps" bit in the fp
- control word is set. Since we really don't
- want to tra every time someone types
- LEAST-POSITIVE-SINGLE-FLOAT into the repl,
- mask that bit out. -- CSR, 2003-09-15 */
- ~(0x1UL<<6));
+ /* KLUDGE: for some reason that I don't
+ understand, by the time we get here the
+ "enable denormalized traps" bit in the fp
+ control word is set. Since we really don't
+ want to tra every time someone types
+ LEAST-POSITIVE-SINGLE-FLOAT into the repl,
+ mask that bit out. -- CSR, 2003-09-15 */
+ ~(0x1UL<<6));
}
void os_flush_icache(os_vm_address_t address, os_vm_size_t length)
* files for more information.
*/
-#define NREGS (32)
+#define NREGS (32)
#ifdef LANGUAGE_ASSEMBLY
#ifdef linux
#else
#define REG(num) num
#endif
- /* "traditional" register name and use */
- /* courtesy of <alpha/regdef.h> */
-#define reg_LIP REG(0) /* v0 */
-#define reg_A0 REG(1) /* t0 - temporary (caller-saved) */
-#define reg_A1 REG(2) /* t1 */
-#define reg_A2 REG(3) /* t2 */
-#define reg_A3 REG(4) /* t3 */
-#define reg_A4 REG(5) /* t4 */
-#define reg_A5 REG(6) /* t5 */
-#define reg_L0 REG(7) /* t6 */
-#define reg_NARGS REG(8) /* t7 */
-#define reg_CSP REG(9) /* s0 - saved (callee-saved) */
-#define reg_CFP REG(10) /* s1 */
-#define reg_OCFP REG(11) /* s2 */
-#define reg_BSP REG(12) /* s3 */
-#define reg_LEXENV REG(13) /* s4 */
-#define reg_CODE REG(14) /* s5 */
-#define reg_NULL REG(15) /* s6 = fp (frame pointer) */
-#define reg_NL0 REG(16) /* a0 - argument (caller-saved) */
-#define reg_NL1 REG(17) /* a1 */
-#define reg_NL2 REG(18) /* a2 */
-#define reg_NL3 REG(19) /* a3 */
-#define reg_NL4 REG(20) /* a4 */
-#define reg_NL5 REG(21) /* a5 */
-#define reg_ALLOC REG(22) /* t8 - more temps (caller-saved) */
-#define reg_FDEFN REG(23) /* t9 */
-#define reg_CFUNC REG(24) /* t10 */
-#define reg_NFP REG(25) /* t11 */
-#define reg_LRA REG(26) /* ra - return address */
-#define reg_L1 REG(27) /* t12, or pv - procedure variable */
-#define reg_L2 REG(28) /* at - assembler temporary */
-#define reg_GP REG(29) /* global pointer */
-#define reg_NSP REG(30) /* sp - stack pointer */
-#define reg_ZERO REG(31) /* reads as zero, writes are noops */
+ /* "traditional" register name and use */
+ /* courtesy of <alpha/regdef.h> */
+#define reg_LIP REG(0) /* v0 */
+#define reg_A0 REG(1) /* t0 - temporary (caller-saved) */
+#define reg_A1 REG(2) /* t1 */
+#define reg_A2 REG(3) /* t2 */
+#define reg_A3 REG(4) /* t3 */
+#define reg_A4 REG(5) /* t4 */
+#define reg_A5 REG(6) /* t5 */
+#define reg_L0 REG(7) /* t6 */
+#define reg_NARGS REG(8) /* t7 */
+#define reg_CSP REG(9) /* s0 - saved (callee-saved) */
+#define reg_CFP REG(10) /* s1 */
+#define reg_OCFP REG(11) /* s2 */
+#define reg_BSP REG(12) /* s3 */
+#define reg_LEXENV REG(13) /* s4 */
+#define reg_CODE REG(14) /* s5 */
+#define reg_NULL REG(15) /* s6 = fp (frame pointer) */
+#define reg_NL0 REG(16) /* a0 - argument (caller-saved) */
+#define reg_NL1 REG(17) /* a1 */
+#define reg_NL2 REG(18) /* a2 */
+#define reg_NL3 REG(19) /* a3 */
+#define reg_NL4 REG(20) /* a4 */
+#define reg_NL5 REG(21) /* a5 */
+#define reg_ALLOC REG(22) /* t8 - more temps (caller-saved) */
+#define reg_FDEFN REG(23) /* t9 */
+#define reg_CFUNC REG(24) /* t10 */
+#define reg_NFP REG(25) /* t11 */
+#define reg_LRA REG(26) /* ra - return address */
+#define reg_L1 REG(27) /* t12, or pv - procedure variable */
+#define reg_L2 REG(28) /* at - assembler temporary */
+#define reg_GP REG(29) /* global pointer */
+#define reg_NSP REG(30) /* sp - stack pointer */
+#define reg_ZERO REG(31) /* reads as zero, writes are noops */
#define REGNAMES \
#error "Define threading support functions"
#else
int arch_os_thread_init(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
int arch_os_thread_cleanup(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
#endif
void os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
#ifdef __GNUC__
- asm volatile ("imb" : : : "memory" );
+ asm volatile ("imb" : : : "memory" );
#else
/* digital CC has different syntax */
asm("imb");
extern void arch_remove_breakpoint(void *pc, unsigned long orig_inst);
extern void arch_install_interrupt_handlers(void);
extern void arch_do_displaced_inst(os_context_t *context,
- unsigned int orig_inst);
+ unsigned int orig_inst);
extern int arch_os_thread_init(struct thread *thread);
extern int arch_os_thread_cleanup(struct thread *thread);
extern lispobj funcall1(lispobj function, lispobj arg0);
extern lispobj funcall2(lispobj function, lispobj arg0, lispobj arg1);
extern lispobj funcall3(lispobj function, lispobj arg0, lispobj arg1,
- lispobj arg2);
+ lispobj arg2);
extern lispobj *component_ptr_from_pc(lispobj *pc);
#endif /* __ARCH_H__ */
struct call_frame {
#ifndef LISP_FEATURE_ALPHA
- struct call_frame *old_cont;
+ struct call_frame *old_cont;
#else
u32 old_cont;
#endif
- lispobj saved_lra;
+ lispobj saved_lra;
lispobj code;
- lispobj other_state[5];
+ lispobj other_state[5];
};
struct call_info {
{
struct thread *thread=arch_os_get_current_thread();
return (((char *) thread->control_stack_start <= (char *) pointer) &&
- ((char *) pointer < (char *) current_control_stack_pointer));
+ ((char *) pointer < (char *) current_control_stack_pointer));
}
static void
info->interrupted = 1;
if (lowtag_of(*os_context_register_addr(context, reg_CODE))
- == FUN_POINTER_LOWTAG) {
+ == FUN_POINTER_LOWTAG) {
/* We tried to call a function, but crapped out before $CODE could
* be fixed up. Probably an undefined function. */
info->frame =
- (struct call_frame *)(*os_context_register_addr(context,
- reg_OCFP));
+ (struct call_frame *)(*os_context_register_addr(context,
+ reg_OCFP));
info->lra = (lispobj)(*os_context_register_addr(context, reg_LRA));
info->code = code_pointer(info->lra);
pc = (unsigned long)native_pointer(info->lra);
}
else {
info->frame =
- (struct call_frame *)(*os_context_register_addr(context, reg_CFP));
+ (struct call_frame *)(*os_context_register_addr(context, reg_CFP));
info->code =
- code_pointer(*os_context_register_addr(context, reg_CODE));
+ code_pointer(*os_context_register_addr(context, reg_CODE));
info->lra = NIL;
pc = *os_context_pc_addr(context);
}
/* We were interrupted. Find the correct signal context. */
free = SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)>>2;
while (free-- > 0) {
- os_context_t *context =
- thread->interrupt_contexts[free];
+ os_context_t *context =
+ thread->interrupt_contexts[free];
if ((struct call_frame *)(*os_context_register_addr(context,
- reg_CFP))
- == info->frame) {
+ reg_CFP))
+ == info->frame) {
call_info_from_context(info, context);
break;
}
backtrace(int nframes)
{
struct call_info info;
-
+
call_info_from_lisp_state(&info);
do {
string = (struct vector *) object;
printf("%s, ", (char *) string->data);
} else
- /* FIXME: broken from (VECTOR NIL) */
+ /* FIXME: broken from (VECTOR NIL) */
printf("(Not simple string??\?), ");
} else
printf("(Not other pointer??\?), ");
code = (struct code *)native_pointer(code_obj);
return (void *)((char *)code + HeaderValue(code->header)*sizeof(lispobj)
- + pc_offset);
+ + pc_offset);
}
unsigned long breakpoint_install(lispobj code_obj, int pc_offset)
}
void breakpoint_remove(lispobj code_obj, int pc_offset,
- unsigned long orig_inst)
+ unsigned long orig_inst)
{
arch_remove_breakpoint(compute_pc(code_obj, pc_offset), orig_inst);
}
void breakpoint_do_displaced_inst(os_context_t* context,
- unsigned long orig_inst)
+ unsigned long orig_inst)
{
/* on platforms with sigreturn(), we go directly back from
* arch_do_displaced_inst() to lisp code, so we need to clean up
lispobj header;
if (lowtag_of(code) != OTHER_POINTER_LOWTAG)
- return NIL;
+ return NIL;
header = *(lispobj *)(code-OTHER_POINTER_LOWTAG);
if (widetag_of(header) == CODE_HEADER_WIDETAG)
- return code;
+ return code;
else
- return code - HeaderValue(header)*sizeof(lispobj);
+ return code - HeaderValue(header)*sizeof(lispobj);
#else
return NIL;
#endif
static lispobj find_code(os_context_t *context)
{
lispobj codeptr =
- (lispobj)component_ptr_from_pc((lispobj *)(*os_context_pc_addr(context)));
+ (lispobj)component_ptr_from_pc((lispobj *)(*os_context_pc_addr(context)));
if (codeptr == 0) {
- return NIL;
+ return NIL;
} else {
- return codeptr + OTHER_POINTER_LOWTAG;
+ return codeptr + OTHER_POINTER_LOWTAG;
}
}
#endif
static long compute_offset(os_context_t *context, lispobj code)
{
if (code == NIL)
- return 0;
+ return 0;
else {
- unsigned long code_start;
- struct code *codeptr = (struct code *)native_pointer(code);
+ unsigned long code_start;
+ struct code *codeptr = (struct code *)native_pointer(code);
#ifdef parisc
- unsigned long pc = *os_context_pc_addr(context) & ~3;
+ unsigned long pc = *os_context_pc_addr(context) & ~3;
#else
- unsigned long pc = *os_context_pc_addr(context);
+ unsigned long pc = *os_context_pc_addr(context);
#endif
- code_start = (unsigned long)codeptr
- + HeaderValue(codeptr->header)*sizeof(lispobj);
- if (pc < code_start)
- return 0;
- else {
- long offset = pc - code_start;
- if (offset >= codeptr->code_size)
- return 0;
- else
- return make_fixnum(offset);
- }
+ code_start = (unsigned long)codeptr
+ + HeaderValue(codeptr->header)*sizeof(lispobj);
+ if (pc < code_start)
+ return 0;
+ else {
+ long offset = pc - code_start;
+ if (offset >= codeptr->code_size)
+ return 0;
+ else
+ return make_fixnum(offset);
+ }
}
}
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
funcall3(SymbolFunction(HANDLE_BREAKPOINT),
- compute_offset(context, code),
- code,
- context_sap);
+ compute_offset(context, code),
+ code,
+ context_sap);
undo_fake_foreign_function_call(context);
}
#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
void *handle_fun_end_breakpoint(int signal, siginfo_t *info,
- os_context_t *context)
+ os_context_t *context)
{
lispobj code, lra;
struct code *codeptr;
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
funcall3(SymbolFunction(HANDLE_BREAKPOINT),
- compute_offset(context, code),
- code,
- alloc_sap(context));
+ compute_offset(context, code),
+ code,
+ alloc_sap(context));
lra = codeptr->constants[REAL_LRA_SLOT];
#ifdef reg_CODE
if (codeptr->constants[KNOWN_RETURN_P_SLOT] == NIL) {
- *os_context_register_addr(context, reg_CODE) = lra;
+ *os_context_register_addr(context, reg_CODE) = lra;
}
#endif
undo_fake_foreign_function_call(context);
}
#else
void *handle_fun_end_breakpoint(int signal, siginfo_t *info,
- os_context_t *context)
+ os_context_t *context)
{
lispobj code, context_sap = alloc_sap(context);
struct code *codeptr;
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
funcall3(SymbolFunction(HANDLE_BREAKPOINT),
- compute_offset(context, code),
- code,
- context_sap);
+ compute_offset(context, code),
+ code,
+ context_sap);
undo_fake_foreign_function_call(context);
return compute_pc(codeptr->constants[REAL_LRA_SLOT],
- fixnum_value(codeptr->constants[REAL_LRA_SLOT+1]));
+ fixnum_value(codeptr->constants[REAL_LRA_SLOT+1]));
}
#endif
extern unsigned long breakpoint_install(lispobj code_obj, int pc_offset);
extern void breakpoint_remove(lispobj code_obj,
- int pc_offset,
- unsigned long orig_inst);
+ int pc_offset,
+ unsigned long orig_inst);
extern void breakpoint_do_displaced_inst(os_context_t *context,
- unsigned long orig_inst);
+ unsigned long orig_inst);
extern void handle_breakpoint(int signal, siginfo_t *info,
- os_context_t *context);
+ os_context_t *context);
extern void *handle_fun_end_breakpoint(int signal, siginfo_t *info,
- os_context_t *context);
+ os_context_t *context);
#endif
static void netbsd_init();
#endif /* __NetBSD__ */
-
+
void os_init(void)
{
os_vm_page_size = getpagesize();
int flags = MAP_PRIVATE | MAP_ANON;
if (addr)
- flags |= MAP_FIXED;
+ flags |= MAP_FIXED;
addr = mmap(addr, len, OS_VM_PROT_ALL, flags, -1, 0);
if (addr == MAP_FAILED) {
- perror("mmap");
- return NULL;
+ perror("mmap");
+ return NULL;
}
return addr;
os_invalidate(os_vm_address_t addr, os_vm_size_t len)
{
if (munmap(addr, len) == -1)
- perror("munmap");
+ perror("munmap");
}
os_vm_address_t
os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
{
addr = mmap(addr, len,
- OS_VM_PROT_ALL,
- MAP_PRIVATE | MAP_FILE | MAP_FIXED,
- fd, (off_t) offset);
+ OS_VM_PROT_ALL,
+ MAP_PRIVATE | MAP_FILE | MAP_FIXED,
+ fd, (off_t) offset);
if (addr == MAP_FAILED) {
- perror("mmap");
- lose("unexpected mmap(..) failure");
+ perror("mmap");
+ lose("unexpected mmap(..) failure");
}
return addr;
os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
{
if (mprotect(address, length, prot) == -1) {
- perror("mprotect");
+ perror("mprotect");
}
}
\f
if(in_range_p(addr, READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE) ||
in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) ||
in_range_p(addr, DYNAMIC_SPACE_START , DYNAMIC_SPACE_SIZE))
- return 1;
+ return 1;
for_each_thread(th) {
- if((th->control_stack_start <= addr) && (addr < th->control_stack_end))
- return 1;
- if(in_range_p(addr, th->binding_stack_start, BINDING_STACK_SIZE))
- return 1;
+ if((th->control_stack_start <= addr) && (addr < th->control_stack_end))
+ return 1;
+ if(in_range_p(addr, th->binding_stack_start, BINDING_STACK_SIZE))
+ return 1;
}
return 0;
}
#endif
os_context_t *context = arch_os_get_context(&void_context);
- if (!gencgc_handle_wp_violation(fault_addr))
+ if (!gencgc_handle_wp_violation(fault_addr))
if(!handle_guard_page_triggered(context,fault_addr))
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
- arrange_return_to_lisp_function(context, SymbolFunction(MEMORY_FAULT_ERROR));
+ arrange_return_to_lisp_function(context, SymbolFunction(MEMORY_FAULT_ERROR));
#else
- interrupt_handle_now(signal, siginfo, context);
+ interrupt_handle_now(signal, siginfo, context);
#endif
}
void
{
SHOW("os_install_interrupt_handlers()/bsd-os/defined(GENCGC)");
undoably_install_low_level_interrupt_handler(SIG_MEMORY_FAULT,
- memory_fault_handler);
+ memory_fault_handler);
SHOW("leaving os_install_interrupt_handlers()");
}
os_context_t *context = arch_os_get_context(&void_context);
unsigned int pc = (unsigned int *)(*os_context_pc_addr(context));
os_vm_address_t addr;
-
+
addr = arch_get_bad_addr(signal,info,context);
if(!interrupt_maybe_gc(signal, info, context))
- if(!handle_guard_page_triggered(context,addr))
- interrupt_handle_now(signal, info, context);
+ if(!handle_guard_page_triggered(context,addr))
+ interrupt_handle_now(signal, info, context);
/* Work around G5 bug; fix courtesy gbyers */
DARWIN_FIX_CONTEXT(context);
}
{
SHOW("os_install_interrupt_handlers()/bsd-os/!defined(GENCGC)");
undoably_install_low_level_interrupt_handler(SIG_MEMORY_FAULT,
- sigsegv_handler);
+ sigsegv_handler);
}
#endif /* defined GENCGC */
/* If we're older than 2.0... */
if (osrev < 200000000) {
- fprintf(stderr, "osrev = %d (needed at least 200000000).\n", osrev);
- lose("NetBSD kernel too old to run sbcl.\n");
+ fprintf(stderr, "osrev = %d (needed at least 200000000).\n", osrev);
+ lose("NetBSD kernel too old to run sbcl.\n");
}
-
+
/* NetBSD counts mmap()ed space against the process's data size limit,
* so yank it up. This might be a nasty thing to do? */
getrlimit (RLIMIT_DATA, &rl);
-- CSR, 2004-04-08 */
rl.rlim_cur = 1073741824;
if (setrlimit (RLIMIT_DATA, &rl) < 0) {
- fprintf (stderr,
- "RUNTIME WARNING: unable to raise process data size limit:\n\
+ fprintf (stderr,
+ "RUNTIME WARNING: unable to raise process data size limit:\n\
%s.\n\
The system may fail to start.\n",
- strerror(errno));
+ strerror(errno));
}
}
#endif /* __NetBSD__ */
{
lispobj *ptr;
- /* this can be called for untagged pointers as well as for
+ /* this can be called for untagged pointers as well as for
descriptors, so this assertion's not applicable
gc_assert(is_lisp_pointer(object));
*/
ptr = (lispobj *) native_pointer(object);
return ((from_space <= ptr) &&
- (ptr < from_space_free_pointer));
-}
+ (ptr < from_space_free_pointer));
+}
boolean
new_space_p(lispobj object)
/* gc_assert(is_lisp_pointer(object)); */
ptr = (lispobj *) native_pointer(object);
-
+
return ((new_space <= ptr) &&
- (ptr < new_space_free_pointer));
-}
+ (ptr < new_space_free_pointer));
+}
#else
#define from_space_p(ptr) \
- ((from_space <= ((lispobj *) ((pointer_sized_uint_t) ptr))) && \
- (((lispobj *) ((pointer_sized_uint_t) ptr))< from_space_free_pointer))
+ ((from_space <= ((lispobj *) ((pointer_sized_uint_t) ptr))) && \
+ (((lispobj *) ((pointer_sized_uint_t) ptr))< from_space_free_pointer))
#define new_space_p(ptr) \
- ((new_space <= ((lispobj *) ((pointer_sized_uint_t) ptr))) && \
- (((lispobj *) ((pointer_sized_uint_t) ptr)) < new_space_free_pointer))
+ ((new_space <= ((lispobj *) ((pointer_sized_uint_t) ptr))) && \
+ (((lispobj *) ((pointer_sized_uint_t) ptr)) < new_space_free_pointer))
#endif
tv_diff(struct timeval *x, struct timeval *y)
{
return (((double) x->tv_sec + (double) x->tv_usec * 1.0e-6) -
- ((double) y->tv_sec + (double) y->tv_usec * 1.0e-6));
+ ((double) y->tv_sec + (double) y->tv_usec * 1.0e-6));
}
#endif
#define BYTES_ZERO_BEFORE_END (1<<12)
-/* FIXME do we need this? Doesn't it duplicate lisp code in
+/* FIXME do we need this? Doesn't it duplicate lisp code in
* scrub-control-stack? */
static void
lispobj *ptr = current_control_stack_pointer;
search:
do {
- if (*ptr)
- goto fill;
- ptr++;
+ if (*ptr)
+ goto fill;
+ ptr++;
} while (((unsigned long)ptr) & (BYTES_ZERO_BEFORE_END-1));
return;
fill:
do {
- *ptr++ = 0;
+ *ptr++ = 0;
} while (((unsigned long)ptr) & (BYTES_ZERO_BEFORE_END-1));
goto search;
#endif
unsigned long size_retained;
lispobj *current_static_space_free_pointer;
- unsigned long static_space_size;
- unsigned long control_stack_size, binding_stack_size;
+ unsigned long static_space_size;
+ unsigned long control_stack_size, binding_stack_size;
sigset_t tmp, old;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
#ifdef PRINTNOISE
printf("[Collecting garbage ... \n");
-
+
getrusage(RUSAGE_SELF, &start_rusage);
gettimeofday(&start_tv, (struct timezone *) 0);
#endif
-
- /* it's possible that signals are blocked already if this was called
+
+ /* it's possible that signals are blocked already if this was called
* from a signal handler (e.g. with the sigsegv gc_trigger stuff) */
sigemptyset(&tmp);
sigaddset_blockable(&tmp);
thread_sigmask(SIG_BLOCK, &tmp, &old);
current_static_space_free_pointer =
- (lispobj *) ((unsigned long)
- SymbolValue(STATIC_SPACE_FREE_POINTER,0));
+ (lispobj *) ((unsigned long)
+ SymbolValue(STATIC_SPACE_FREE_POINTER,0));
/* Set up from space and new space pointers. */
#ifdef PRINTNOISE
fprintf(stderr,"from_space = %lx\n",
- (unsigned long) current_dynamic_space);
+ (unsigned long) current_dynamic_space);
#endif
if (current_dynamic_space == (lispobj *) DYNAMIC_0_SPACE_START)
- new_space = (lispobj *)DYNAMIC_1_SPACE_START;
+ new_space = (lispobj *)DYNAMIC_1_SPACE_START;
else if (current_dynamic_space == (lispobj *) DYNAMIC_1_SPACE_START)
- new_space = (lispobj *) DYNAMIC_0_SPACE_START;
+ new_space = (lispobj *) DYNAMIC_0_SPACE_START;
else {
- lose("GC lossage. Current dynamic space is bogus!\n");
+ lose("GC lossage. Current dynamic space is bogus!\n");
}
new_space_free_pointer = new_space;
#ifdef PRINTNOISE
printf("Scavenging interrupt handlers (%d bytes) ...\n",
- (int)sizeof(interrupt_handlers));
+ (int)sizeof(interrupt_handlers));
#endif
scavenge((lispobj *) data->interrupt_handlers,
- sizeof(data->interrupt_handlers) / sizeof(lispobj));
-
+ sizeof(data->interrupt_handlers) / sizeof(lispobj));
+
/* _size quantities are in units of sizeof(lispobj) - i.e. 4 */
- control_stack_size =
- current_control_stack_pointer-
- (lispobj *)th->control_stack_start;
+ control_stack_size =
+ current_control_stack_pointer-
+ (lispobj *)th->control_stack_start;
#ifdef PRINTNOISE
printf("Scavenging the control stack at %p (%ld words) ...\n",
- ((lispobj *)th->control_stack_start),
- control_stack_size);
+ ((lispobj *)th->control_stack_start),
+ control_stack_size);
#endif
scavenge(((lispobj *)th->control_stack_start), control_stack_size);
-
- binding_stack_size =
- current_binding_stack_pointer -
- (lispobj *)th->binding_stack_start;
+
+ binding_stack_size =
+ current_binding_stack_pointer -
+ (lispobj *)th->binding_stack_start;
#ifdef PRINTNOISE
printf("Scavenging the binding stack %x - %x (%d words) ...\n",
- th->binding_stack_start,current_binding_stack_pointer,
- (int)(binding_stack_size));
+ th->binding_stack_start,current_binding_stack_pointer,
+ (int)(binding_stack_size));
#endif
scavenge(((lispobj *)th->binding_stack_start), binding_stack_size);
-
- static_space_size =
- current_static_space_free_pointer - (lispobj *) STATIC_SPACE_START;
+
+ static_space_size =
+ current_static_space_free_pointer - (lispobj *) STATIC_SPACE_START;
#ifdef PRINTNOISE
printf("Scavenging static space %x - %x (%d words) ...\n",
- STATIC_SPACE_START,current_static_space_free_pointer,
- (int)(static_space_size));
+ STATIC_SPACE_START,current_static_space_free_pointer,
+ (int)(static_space_size));
#endif
scavenge(((lispobj *)STATIC_SPACE_START), static_space_size);
/* Scavenge newspace. */
#ifdef PRINTNOISE
printf("Scavenging new space (%d bytes) ...\n",
- (int)((new_space_free_pointer - new_space) * sizeof(lispobj)));
+ (int)((new_space_free_pointer - new_space) * sizeof(lispobj)));
#endif
scavenge_newspace();
printf("Flipping spaces ...\n");
#endif
- /* Maybe FIXME: it's possible that we could significantly reduce
- * RSS by zeroing the from_space or madvise(MADV_DONTNEED) or
+ /* Maybe FIXME: it's possible that we could significantly reduce
+ * RSS by zeroing the from_space or madvise(MADV_DONTNEED) or
* similar os-dependent tricks here */
os_zero((os_vm_address_t) from_space,
- (os_vm_size_t) DYNAMIC_SPACE_SIZE);
+ (os_vm_size_t) DYNAMIC_SPACE_SIZE);
current_dynamic_space = new_space;
dynamic_space_free_pointer = new_space_free_pointer;
getrusage(RUSAGE_SELF, &stop_rusage);
printf("done.]\n");
-
+
percent_retained = (((float) size_retained) /
- ((float) size_discarded)) * 100.0;
+ ((float) size_discarded)) * 100.0;
printf("Total of %ld bytes out of %ld bytes retained (%3.2f%%).\n",
- size_retained, size_discarded, percent_retained);
+ size_retained, size_discarded, percent_retained);
real_time = tv_diff(&stop_tv, &start_tv);
user_time = tv_diff(&stop_rusage.ru_utime, &start_rusage.ru_utime);
system_time = tv_diff(&stop_rusage.ru_stime, &start_rusage.ru_stime);
printf("Statistics: %10.2fs real, %10.2fs user, %10.2fs system.\n",
- real_time, user_time, system_time);
+ real_time, user_time, system_time);
gc_rate = ((float) size_retained / (float) (1<<20)) / real_time;
-
+
printf("%10.2f M bytes/sec collected.\n", gc_rate);
#endif
}
here = new_space;
while (here < new_space_free_pointer) {
- /* printf("here=%lx, new_space_free_pointer=%lx\n",
- here,new_space_free_pointer); */
- next = new_space_free_pointer;
- scavenge(here, next - here);
- here = next;
+ /* printf("here=%lx, new_space_free_pointer=%lx\n",
+ here,new_space_free_pointer); */
+ next = new_space_free_pointer;
+ scavenge(here, next - here);
+ here = next;
}
/* printf("done with newspace\n"); */
}
lip_offset = (((unsigned long)1) << (N_WORD_BITS - 1)) - 1;
lip_register_pair = -1;
for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- unsigned long reg;
- long offset;
- int index;
-
- index = boxed_registers[i];
- reg = *os_context_register_addr(context, index);
- /* would be using PTR if not for integer length issues */
- if ((reg & ~((1L<<N_LOWTAG_BITS)-1)) <= lip) {
- offset = lip - reg;
- if (offset < lip_offset) {
- lip_offset = offset;
- lip_register_pair = index;
- }
- }
+ unsigned long reg;
+ long offset;
+ int index;
+
+ index = boxed_registers[i];
+ reg = *os_context_register_addr(context, index);
+ /* would be using PTR if not for integer length issues */
+ if ((reg & ~((1L<<N_LOWTAG_BITS)-1)) <= lip) {
+ offset = lip - reg;
+ if (offset < lip_offset) {
+ lip_offset = offset;
+ lip_register_pair = index;
+ }
+ }
}
#endif /* reg_LIP */
/* Compute the PC's offset from the start of the CODE */
/* register. */
pc_code_offset =
- *os_context_pc_addr(context) -
- *os_context_register_addr(context, reg_CODE);
+ *os_context_pc_addr(context) -
+ *os_context_register_addr(context, reg_CODE);
#ifdef ARCH_HAS_NPC_REGISTER
npc_code_offset =
- *os_context_npc_addr(context) -
- *os_context_register_addr(context, reg_CODE);
-#endif
+ *os_context_npc_addr(context) -
+ *os_context_register_addr(context, reg_CODE);
+#endif
#ifdef ARCH_HAS_LINK_REGISTER
lr_code_offset =
- *os_context_lr_addr(context) -
- *os_context_register_addr(context, reg_CODE);
+ *os_context_lr_addr(context) -
+ *os_context_register_addr(context, reg_CODE);
#endif
-
+
/* Scavenge all boxed registers in the context. */
for (i = 0; i < (sizeof(boxed_registers) / sizeof(int)); i++) {
- int index;
- lispobj foo;
-
- index = boxed_registers[i];
- foo = *os_context_register_addr(context,index);
- scavenge((lispobj *) &foo, 1);
- *os_context_register_addr(context,index) = foo;
-
- /* this is unlikely to work as intended on bigendian
- * 64 bit platforms */
-
- scavenge((lispobj *)
- os_context_register_addr(context, index), 1);
+ int index;
+ lispobj foo;
+
+ index = boxed_registers[i];
+ foo = *os_context_register_addr(context,index);
+ scavenge((lispobj *) &foo, 1);
+ *os_context_register_addr(context,index) = foo;
+
+ /* this is unlikely to work as intended on bigendian
+ * 64 bit platforms */
+
+ scavenge((lispobj *)
+ os_context_register_addr(context, index), 1);
}
#ifdef reg_LIP
/* Fix the LIP */
*os_context_register_addr(context, reg_LIP) =
- *os_context_register_addr(context, lip_register_pair) + lip_offset;
+ *os_context_register_addr(context, lip_register_pair) + lip_offset;
#endif /* reg_LIP */
-
+
/* Fix the PC if it was in from space */
if (from_space_p(*os_context_pc_addr(context)))
- *os_context_pc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + pc_code_offset;
+ *os_context_pc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + pc_code_offset;
#ifdef ARCH_HAS_LINK_REGISTER
- /* Fix the LR ditto; important if we're being called from
+ /* Fix the LR ditto; important if we're being called from
* an assembly routine that expects to return using blr, otherwise
* harmless */
if (from_space_p(*os_context_lr_addr(context)))
- *os_context_lr_addr(context) =
- *os_context_register_addr(context, reg_CODE) + lr_code_offset;
+ *os_context_lr_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + lr_code_offset;
#endif
#ifdef ARCH_HAS_NPC_REGISTER
if (from_space_p(*os_context_npc_addr(context)))
- *os_context_npc_addr(context) =
- *os_context_register_addr(context, reg_CODE) + npc_code_offset;
+ *os_context_npc_addr(context) =
+ *os_context_register_addr(context, reg_CODE) + npc_code_offset;
#endif
}
fprintf(stderr, "%d interrupt contexts to scan\n",index);
#endif
for (i = 0; i < index; i++) {
- context = th->interrupt_contexts[i];
- scavenge_interrupt_context(context);
+ context = th->interrupt_contexts[i];
+ scavenge_interrupt_context(context);
}
}
total_words_not_copied = 0;
start = from_space;
while (start < from_space_free_pointer) {
- lispobj object;
- int forwardp, type, nwords;
- lispobj header;
-
- object = *start;
- forwardp = is_lisp_pointer(object) && new_space_p(object);
-
- if (forwardp) {
- int tag;
- lispobj *pointer;
-
- tag = lowtag_of(object);
-
- switch (tag) {
- case LIST_POINTER_LOWTAG:
- nwords = 2;
- break;
- case INSTANCE_POINTER_LOWTAG:
- printf("Don't know about instances yet!\n");
- nwords = 1;
- break;
- case FUN_POINTER_LOWTAG:
- nwords = 1;
- break;
- case OTHER_POINTER_LOWTAG:
- pointer = (lispobj *) native_pointer(object);
- header = *pointer;
- type = widetag_of(header);
- nwords = (sizetab[type])(pointer);
- break;
- default: nwords=1; /* shut yer whinging, gcc */
- }
- } else {
- type = widetag_of(object);
- nwords = (sizetab[type])(start);
- total_words_not_copied += nwords;
- printf("%4d words not copied at 0x%16lx; ",
- nwords, (unsigned long) start);
- printf("Header word is 0x%08x\n",
- (unsigned int) object);
- }
- start += nwords;
+ lispobj object;
+ int forwardp, type, nwords;
+ lispobj header;
+
+ object = *start;
+ forwardp = is_lisp_pointer(object) && new_space_p(object);
+
+ if (forwardp) {
+ int tag;
+ lispobj *pointer;
+
+ tag = lowtag_of(object);
+
+ switch (tag) {
+ case LIST_POINTER_LOWTAG:
+ nwords = 2;
+ break;
+ case INSTANCE_POINTER_LOWTAG:
+ printf("Don't know about instances yet!\n");
+ nwords = 1;
+ break;
+ case FUN_POINTER_LOWTAG:
+ nwords = 1;
+ break;
+ case OTHER_POINTER_LOWTAG:
+ pointer = (lispobj *) native_pointer(object);
+ header = *pointer;
+ type = widetag_of(header);
+ nwords = (sizetab[type])(pointer);
+ break;
+ default: nwords=1; /* shut yer whinging, gcc */
+ }
+ } else {
+ type = widetag_of(object);
+ nwords = (sizetab[type])(start);
+ total_words_not_copied += nwords;
+ printf("%4d words not copied at 0x%16lx; ",
+ nwords, (unsigned long) start);
+ printf("Header word is 0x%08x\n",
+ (unsigned int) object);
+ }
+ start += nwords;
}
printf("%d total words not copied.\n", total_words_not_copied);
}
{
if (HeaderValue(object) == subtype_VectorValidHashing) {
*where =
- (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
+ (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
}
return 1;
/* weak pointers */
#define WEAK_POINTER_NWORDS \
- CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
+ CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
static long
scav_weak_pointer(lispobj *where, lispobj object)
lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
- return NULL;
- return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ return NULL;
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
lispobj *
lispobj* start = (lispobj*)STATIC_SPACE_START;
lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
- return NULL;
- return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ return NULL;
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
lispobj *
lispobj *start = (lispobj *) current_dynamic_space;
lispobj *end = (lispobj *) dynamic_space_free_pointer;
if ((pointer < (void *)start) || (pointer >= (void *)end))
- return NULL;
- return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ return NULL;
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
\f
/* initialization. if gc_init can be moved to after core load, we could
* auto_gc_trigger */
void set_auto_gc_trigger(os_vm_size_t dynamic_usage)
{
- os_vm_address_t addr=(os_vm_address_t)current_dynamic_space
- + dynamic_usage;
+ os_vm_address_t addr=(os_vm_address_t)current_dynamic_space
+ + dynamic_usage;
long length = DYNAMIC_SPACE_SIZE - dynamic_usage;
if (addr < (os_vm_address_t)dynamic_space_free_pointer) {
- fprintf(stderr,
- "set_auto_gc_trigger: tried to set gc trigger too low! (%ld < 0x%08lx)\n",
- (unsigned long)dynamic_usage,
- (unsigned long)((os_vm_address_t)dynamic_space_free_pointer
- - (os_vm_address_t)current_dynamic_space));
- lose("lost");
+ fprintf(stderr,
+ "set_auto_gc_trigger: tried to set gc trigger too low! (%ld < 0x%08lx)\n",
+ (unsigned long)dynamic_usage,
+ (unsigned long)((os_vm_address_t)dynamic_space_free_pointer
+ - (os_vm_address_t)current_dynamic_space));
+ lose("lost");
}
else if (length < 0) {
- fprintf(stderr,
- "set_auto_gc_trigger: tried to set gc trigger too high! (0x%08lx)\n",
- (unsigned long)dynamic_usage);
- lose("lost");
+ fprintf(stderr,
+ "set_auto_gc_trigger: tried to set gc trigger too high! (0x%08lx)\n",
+ (unsigned long)dynamic_usage);
+ lose("lost");
}
addr=os_round_up_to_page(addr);
{
if (current_auto_gc_trigger!=NULL){
#if defined(SUNOS) || defined(SOLARIS)/* don't want to force whole space into swapping mode... */
- os_vm_address_t addr=(os_vm_address_t)current_auto_gc_trigger;
- os_vm_size_t length=
- DYNAMIC_SPACE_SIZE + (os_vm_address_t)current_dynamic_space - addr;
+ os_vm_address_t addr=(os_vm_address_t)current_auto_gc_trigger;
+ os_vm_size_t length=
+ DYNAMIC_SPACE_SIZE + (os_vm_address_t)current_dynamic_space - addr;
- os_validate(addr,length);
+ os_validate(addr,length);
#else
- os_protect((os_vm_address_t)current_dynamic_space,
- DYNAMIC_SPACE_SIZE,
- OS_VM_PROT_ALL);
+ os_protect((os_vm_address_t)current_dynamic_space,
+ DYNAMIC_SPACE_SIZE,
+ OS_VM_PROT_ALL);
#endif
- current_auto_gc_trigger = NULL;
+ current_auto_gc_trigger = NULL;
}
}
* A saved SBCL system is a .core file; the code here helps us accept
* such a file as input.
*/
-
+
/*
* This software is part of the SBCL system. See the README file for
* more information.
struct ndir_entry *entry;
FSHOW((stderr, "/process_directory(..), count=%d\n", count));
-
+
for (entry = (struct ndir_entry *) ptr; --count>= 0; ++entry) {
- long id = entry->identifier;
- long offset = os_vm_page_size * (1 + entry->data_page);
- os_vm_address_t addr =
- (os_vm_address_t) (os_vm_page_size * entry->address);
- lispobj *free_pointer = (lispobj *) addr + entry->nwords;
- long len = os_vm_page_size * entry->page_count;
-
- if (len != 0) {
- os_vm_address_t real_addr;
- FSHOW((stderr, "/mapping %ld(0x%lx) bytes at 0x%lx\n",
- (long)len, (long)len, addr));
- real_addr = os_map(fd, offset, addr, len);
- if (real_addr != addr) {
- lose("file mapped in wrong place! "
- "(0x%08x != 0x%08lx)",
- real_addr,
- addr);
- }
- }
-
- FSHOW((stderr, "/space id = %d, free pointer = 0x%08x\n",
- id, (long)free_pointer));
-
- switch (id) {
- case DYNAMIC_CORE_SPACE_ID:
-#ifdef LISP_FEATURE_GENCGC
- if (addr != (os_vm_address_t)DYNAMIC_SPACE_START) {
- fprintf(stderr, "in core: 0x%lx; in runtime: 0x%lx \n",
- (long)addr, (long)DYNAMIC_SPACE_START);
- lose("core/runtime address mismatch: DYNAMIC_SPACE_START");
- }
+ long id = entry->identifier;
+ long offset = os_vm_page_size * (1 + entry->data_page);
+ os_vm_address_t addr =
+ (os_vm_address_t) (os_vm_page_size * entry->address);
+ lispobj *free_pointer = (lispobj *) addr + entry->nwords;
+ long len = os_vm_page_size * entry->page_count;
+
+ if (len != 0) {
+ os_vm_address_t real_addr;
+ FSHOW((stderr, "/mapping %ld(0x%lx) bytes at 0x%lx\n",
+ (long)len, (long)len, addr));
+ real_addr = os_map(fd, offset, addr, len);
+ if (real_addr != addr) {
+ lose("file mapped in wrong place! "
+ "(0x%08x != 0x%08lx)",
+ real_addr,
+ addr);
+ }
+ }
+
+ FSHOW((stderr, "/space id = %d, free pointer = 0x%08x\n",
+ id, (long)free_pointer));
+
+ switch (id) {
+ case DYNAMIC_CORE_SPACE_ID:
+#ifdef LISP_FEATURE_GENCGC
+ if (addr != (os_vm_address_t)DYNAMIC_SPACE_START) {
+ fprintf(stderr, "in core: 0x%lx; in runtime: 0x%lx \n",
+ (long)addr, (long)DYNAMIC_SPACE_START);
+ lose("core/runtime address mismatch: DYNAMIC_SPACE_START");
+ }
#else
- if ((addr != (os_vm_address_t)DYNAMIC_0_SPACE_START) &&
- (addr != (os_vm_address_t)DYNAMIC_1_SPACE_START)) {
- fprintf(stderr, "in core: 0x%lx; in runtime: 0x%lx or 0x%lx\n",
- (long)addr,
- (long)DYNAMIC_0_SPACE_START,
- (long)DYNAMIC_1_SPACE_START);
- lose("warning: core/runtime address mismatch: DYNAMIC_SPACE_START");
- }
+ if ((addr != (os_vm_address_t)DYNAMIC_0_SPACE_START) &&
+ (addr != (os_vm_address_t)DYNAMIC_1_SPACE_START)) {
+ fprintf(stderr, "in core: 0x%lx; in runtime: 0x%lx or 0x%lx\n",
+ (long)addr,
+ (long)DYNAMIC_0_SPACE_START,
+ (long)DYNAMIC_1_SPACE_START);
+ lose("warning: core/runtime address mismatch: DYNAMIC_SPACE_START");
+ }
#endif
#if defined(ALLOCATION_POINTER)
- SetSymbolValue(ALLOCATION_POINTER, (lispobj)free_pointer,0);
+ SetSymbolValue(ALLOCATION_POINTER, (lispobj)free_pointer,0);
#else
- dynamic_space_free_pointer = free_pointer;
+ dynamic_space_free_pointer = free_pointer;
#endif
- /* For stop-and-copy GC, this will be whatever the GC was
- * using at the time. With GENCGC, this will always be
- * space 0. (We checked above that for GENCGC,
- * addr==DYNAMIC_SPACE_START.) */
- current_dynamic_space = (lispobj *)addr;
- break;
- case STATIC_CORE_SPACE_ID:
- if (addr != (os_vm_address_t)STATIC_SPACE_START) {
- fprintf(stderr, "in core: 0x%lx - in runtime: 0x%lx\n",
- (long)addr, (long)STATIC_SPACE_START);
- lose("core/runtime address mismatch: STATIC_SPACE_START");
- }
- break;
- case READ_ONLY_CORE_SPACE_ID:
- if (addr != (os_vm_address_t)READ_ONLY_SPACE_START) {
- fprintf(stderr, "in core: 0x%lx - in runtime: 0x%lx\n",
- (long)addr, (long)READ_ONLY_SPACE_START);
- lose("core/runtime address mismatch: READ_ONLY_SPACE_START");
- }
- break;
- default:
- lose("unknown space ID %ld addr 0x%p", id);
- }
+ /* For stop-and-copy GC, this will be whatever the GC was
+ * using at the time. With GENCGC, this will always be
+ * space 0. (We checked above that for GENCGC,
+ * addr==DYNAMIC_SPACE_START.) */
+ current_dynamic_space = (lispobj *)addr;
+ break;
+ case STATIC_CORE_SPACE_ID:
+ if (addr != (os_vm_address_t)STATIC_SPACE_START) {
+ fprintf(stderr, "in core: 0x%lx - in runtime: 0x%lx\n",
+ (long)addr, (long)STATIC_SPACE_START);
+ lose("core/runtime address mismatch: STATIC_SPACE_START");
+ }
+ break;
+ case READ_ONLY_CORE_SPACE_ID:
+ if (addr != (os_vm_address_t)READ_ONLY_SPACE_START) {
+ fprintf(stderr, "in core: 0x%lx - in runtime: 0x%lx\n",
+ (long)addr, (long)READ_ONLY_SPACE_START);
+ lose("core/runtime address mismatch: READ_ONLY_SPACE_START");
+ }
+ break;
+ default:
+ lose("unknown space ID %ld addr 0x%p", id);
+ }
}
}
lispobj initial_function = NIL;
FSHOW((stderr, "/entering load_core_file(%s)\n", file));
if (fd < 0) {
- fprintf(stderr, "could not open file \"%s\"\n", file);
- perror("open");
- exit(1);
+ fprintf(stderr, "could not open file \"%s\"\n", file);
+ perror("open");
+ exit(1);
}
header = calloc(os_vm_page_size / sizeof(u32), sizeof(u32));
count = read(fd, header, os_vm_page_size);
if (count < os_vm_page_size) {
- lose("premature end of core file");
+ lose("premature end of core file");
}
SHOW("successfully read first page of core");
val = *ptr++;
if (val != CORE_MAGIC) {
- lose("invalid magic number in core: 0x%lx should have been 0x%x.",
- val,
- CORE_MAGIC);
+ lose("invalid magic number in core: 0x%lx should have been 0x%x.",
+ val,
+ CORE_MAGIC);
}
SHOW("found CORE_MAGIC");
while (val != END_CORE_ENTRY_TYPE_CODE) {
- val = *ptr++;
- len = *ptr++;
- remaining_len = len - 2; /* (-2 to cancel the two ++ operations) */
- FSHOW((stderr, "/val=0x%ld, remaining_len=0x%ld\n",
- (long)val, (long)remaining_len));
-
- switch (val) {
-
- case END_CORE_ENTRY_TYPE_CODE:
- SHOW("END_CORE_ENTRY_TYPE_CODE case");
- break;
-
- case VERSION_CORE_ENTRY_TYPE_CODE:
- SHOW("VERSION_CORE_ENTRY_TYPE_CODE case");
- if (*ptr != SBCL_CORE_VERSION_INTEGER) {
- lose("core file version (%d) != runtime library version (%d)",
- *ptr,
- SBCL_CORE_VERSION_INTEGER);
- }
- break;
-
- case BUILD_ID_CORE_ENTRY_TYPE_CODE:
- SHOW("BUILD_ID_CORE_ENTRY_TYPE_CODE case");
- {
- int i;
-
- FSHOW((stderr, "build_id[]=\"%s\"\n", build_id));
- FSHOW((stderr, "remaining_len = %d\n", remaining_len));
- if (remaining_len != strlen(build_id))
- goto losing_build_id;
- for (i = 0; i < remaining_len; ++i) {
- FSHOW((stderr, "ptr[%d] = char = %d, expected=%d\n",
- i, ptr[i], build_id[i]));
- if (ptr[i] != build_id[i])
- goto losing_build_id;
- }
- break;
- losing_build_id:
- /* .core files are not binary-compatible between
- * builds because we can't easily detect whether the
- * sources were patched between the time the
- * dumping-the-.core runtime was built and the time
- * that the loading-the-.core runtime was built.
- *
- * (We could easily detect whether version.lisp-expr
- * was changed, but people experimenting with patches
- * don't necessarily update version.lisp-expr.) */
-
- lose("can't load .core for different runtime, sorry");
- }
-
- case NEW_DIRECTORY_CORE_ENTRY_TYPE_CODE:
- SHOW("NEW_DIRECTORY_CORE_ENTRY_TYPE_CODE case");
- process_directory(fd,
- ptr,
+ val = *ptr++;
+ len = *ptr++;
+ remaining_len = len - 2; /* (-2 to cancel the two ++ operations) */
+ FSHOW((stderr, "/val=0x%ld, remaining_len=0x%ld\n",
+ (long)val, (long)remaining_len));
+
+ switch (val) {
+
+ case END_CORE_ENTRY_TYPE_CODE:
+ SHOW("END_CORE_ENTRY_TYPE_CODE case");
+ break;
+
+ case VERSION_CORE_ENTRY_TYPE_CODE:
+ SHOW("VERSION_CORE_ENTRY_TYPE_CODE case");
+ if (*ptr != SBCL_CORE_VERSION_INTEGER) {
+ lose("core file version (%d) != runtime library version (%d)",
+ *ptr,
+ SBCL_CORE_VERSION_INTEGER);
+ }
+ break;
+
+ case BUILD_ID_CORE_ENTRY_TYPE_CODE:
+ SHOW("BUILD_ID_CORE_ENTRY_TYPE_CODE case");
+ {
+ int i;
+
+ FSHOW((stderr, "build_id[]=\"%s\"\n", build_id));
+ FSHOW((stderr, "remaining_len = %d\n", remaining_len));
+ if (remaining_len != strlen(build_id))
+ goto losing_build_id;
+ for (i = 0; i < remaining_len; ++i) {
+ FSHOW((stderr, "ptr[%d] = char = %d, expected=%d\n",
+ i, ptr[i], build_id[i]));
+ if (ptr[i] != build_id[i])
+ goto losing_build_id;
+ }
+ break;
+ losing_build_id:
+ /* .core files are not binary-compatible between
+ * builds because we can't easily detect whether the
+ * sources were patched between the time the
+ * dumping-the-.core runtime was built and the time
+ * that the loading-the-.core runtime was built.
+ *
+ * (We could easily detect whether version.lisp-expr
+ * was changed, but people experimenting with patches
+ * don't necessarily update version.lisp-expr.) */
+
+ lose("can't load .core for different runtime, sorry");
+ }
+
+ case NEW_DIRECTORY_CORE_ENTRY_TYPE_CODE:
+ SHOW("NEW_DIRECTORY_CORE_ENTRY_TYPE_CODE case");
+ process_directory(fd,
+ ptr,
#ifndef LISP_FEATURE_ALPHA
- remaining_len / (sizeof(struct ndir_entry) /
- sizeof(long))
+ remaining_len / (sizeof(struct ndir_entry) /
+ sizeof(long))
#else
- remaining_len / (sizeof(struct ndir_entry) /
- sizeof(u32))
+ remaining_len / (sizeof(struct ndir_entry) /
+ sizeof(u32))
#endif
- );
- break;
+ );
+ break;
- case INITIAL_FUN_CORE_ENTRY_TYPE_CODE:
- SHOW("INITIAL_FUN_CORE_ENTRY_TYPE_CODE case");
- initial_function = (lispobj)*ptr;
- break;
+ case INITIAL_FUN_CORE_ENTRY_TYPE_CODE:
+ SHOW("INITIAL_FUN_CORE_ENTRY_TYPE_CODE case");
+ initial_function = (lispobj)*ptr;
+ break;
- default:
- lose("unknown core file entry: %ld", (long)val);
- }
+ default:
+ lose("unknown core file entry: %ld", (long)val);
+ }
- ptr += remaining_len;
- FSHOW((stderr, "/new ptr=%x\n", ptr));
+ ptr += remaining_len;
+ FSHOW((stderr, "/new ptr=%x\n", ptr));
}
SHOW("about to free(header)");
free(header);
SetBSP(binding+1);
#ifdef LISP_FEATURE_SB_THREAD
if(!sym->tls_index) {
- sym->tls_index=SymbolValue(FREE_TLS_INDEX,0);
- SetSymbolValue(FREE_TLS_INDEX,
- make_fixnum(fixnum_value(sym->tls_index)+1),0);
+ sym->tls_index=SymbolValue(FREE_TLS_INDEX,0);
+ SetSymbolValue(FREE_TLS_INDEX,
+ make_fixnum(fixnum_value(sym->tls_index)+1),0);
}
#endif
old_tl_value=SymbolTlValue(symbol,thread);
struct thread *thread=(struct thread *)th;
struct binding *binding;
lispobj symbol;
-
+
binding = GetBSP() - 1;
-
+
symbol = binding->symbol;
SetTlSymbolValue(symbol, binding->value,thread);
lispobj symbol;
while (target < binding) {
- binding--;
+ binding--;
- symbol = binding->symbol;
- if (symbol) {
- SetTlSymbolValue(symbol, binding->value,thread);
- binding->symbol = 0;
- }
+ symbol = binding->symbol;
+ if (symbol) {
+ SetTlSymbolValue(symbol, binding->value,thread);
+ binding->symbol = 0;
+ }
}
SetBSP(binding);
}
#define _FIXNUMP_H
static inline int fixnump(lispobj obj) {
- return((obj &
- (LOWTAG_MASK &
- (~(EVEN_FIXNUM_LOWTAG|ODD_FIXNUM_LOWTAG))))
- == 0);
+ return((obj &
+ (LOWTAG_MASK &
+ (~(EVEN_FIXNUM_LOWTAG|ODD_FIXNUM_LOWTAG))))
+ == 0);
}
#endif
/*
- * Garbage Collection common functions for scavenging, moving and sizing
+ * Garbage Collection common functions for scavenging, moving and sizing
* objects. These are for use with both GC (stop & copy GC) and GENCGC
*/
#endif
#endif
-inline static boolean
+inline static boolean
forwarding_pointer_p(lispobj *pointer) {
- lispobj first_word=*pointer;
+ lispobj first_word=*pointer;
#ifdef LISP_FEATURE_GENCGC
return (first_word == 0x01);
#else
return (is_lisp_pointer(first_word)
- && new_space_p(first_word));
+ && new_space_p(first_word));
#endif
}
long n_words_scavenged;
for (object_ptr = start;
- object_ptr < end;
- object_ptr += n_words_scavenged) {
+ object_ptr < end;
+ object_ptr += n_words_scavenged) {
- lispobj object = *object_ptr;
+ lispobj object = *object_ptr;
#ifdef LISP_FEATURE_GENCGC
- gc_assert(!forwarding_pointer_p(object_ptr));
-#endif
- if (is_lisp_pointer(object)) {
- if (from_space_p(object)) {
- /* It currently points to old space. Check for a
- * forwarding pointer. */
- lispobj *ptr = native_pointer(object);
- if (forwarding_pointer_p(ptr)) {
- /* Yes, there's a forwarding pointer. */
- *object_ptr = LOW_WORD(forwarding_pointer_value(ptr));
- n_words_scavenged = 1;
- } else {
- /* Scavenge that pointer. */
- n_words_scavenged =
- (scavtab[widetag_of(object)])(object_ptr, object);
- }
- } else {
- /* It points somewhere other than oldspace. Leave it
- * alone. */
- n_words_scavenged = 1;
- }
- }
+ gc_assert(!forwarding_pointer_p(object_ptr));
+#endif
+ if (is_lisp_pointer(object)) {
+ if (from_space_p(object)) {
+ /* It currently points to old space. Check for a
+ * forwarding pointer. */
+ lispobj *ptr = native_pointer(object);
+ if (forwarding_pointer_p(ptr)) {
+ /* Yes, there's a forwarding pointer. */
+ *object_ptr = LOW_WORD(forwarding_pointer_value(ptr));
+ n_words_scavenged = 1;
+ } else {
+ /* Scavenge that pointer. */
+ n_words_scavenged =
+ (scavtab[widetag_of(object)])(object_ptr, object);
+ }
+ } else {
+ /* It points somewhere other than oldspace. Leave it
+ * alone. */
+ n_words_scavenged = 1;
+ }
+ }
#ifndef LISP_FEATURE_GENCGC
- /* this workaround is probably not necessary for gencgc; at least, the
- * behaviour it describes has never been reported */
- else if (n_words==1) {
- /* there are some situations where an
- other-immediate may end up in a descriptor
- register. I'm not sure whether this is
- supposed to happen, but if it does then we
- don't want to (a) barf or (b) scavenge over the
- data-block, because there isn't one. So, if
- we're checking a single word and it's anything
- other than a pointer, just hush it up */
- int type=widetag_of(object);
- n_words_scavenged=1;
-
- if ((scavtab[type]==scav_lose) ||
- (((scavtab[type])(start,object))>1)) {
- fprintf(stderr,"warning: attempted to scavenge non-descriptor value %x at %p. If you can\nreproduce this warning, send a bug report (see manual page for details)\n",
- object,start);
- }
- }
-#endif
- else if (fixnump(object)) {
- /* It's a fixnum: really easy.. */
- n_words_scavenged = 1;
- } else {
- /* It's some sort of header object or another. */
- n_words_scavenged =
- (scavtab[widetag_of(object)])(object_ptr, object);
- }
+ /* this workaround is probably not necessary for gencgc; at least, the
+ * behaviour it describes has never been reported */
+ else if (n_words==1) {
+ /* there are some situations where an
+ other-immediate may end up in a descriptor
+ register. I'm not sure whether this is
+ supposed to happen, but if it does then we
+ don't want to (a) barf or (b) scavenge over the
+ data-block, because there isn't one. So, if
+ we're checking a single word and it's anything
+ other than a pointer, just hush it up */
+ int type=widetag_of(object);
+ n_words_scavenged=1;
+
+ if ((scavtab[type]==scav_lose) ||
+ (((scavtab[type])(start,object))>1)) {
+ fprintf(stderr,"warning: attempted to scavenge non-descriptor value %x at %p. If you can\nreproduce this warning, send a bug report (see manual page for details)\n",
+ object,start);
+ }
+ }
+#endif
+ else if (fixnump(object)) {
+ /* It's a fixnum: really easy.. */
+ n_words_scavenged = 1;
+ } else {
+ /* It's some sort of header object or another. */
+ n_words_scavenged =
+ (scavtab[widetag_of(object)])(object_ptr, object);
+ }
}
gc_assert(object_ptr == end);
}
switch (widetag_of(*first_pointer)) {
case SIMPLE_FUN_HEADER_WIDETAG:
- copy = trans_fun_header(object);
- break;
+ copy = trans_fun_header(object);
+ break;
default:
- copy = trans_boxed(object);
- break;
+ copy = trans_boxed(object);
+ break;
}
if (copy != object) {
- /* Set forwarding pointer */
- set_forwarding_pointer(first_pointer,copy);
+ /* Set forwarding pointer */
+ set_forwarding_pointer(first_pointer,copy);
}
gc_assert(is_lisp_pointer(copy));
first = code->header;
if (forwarding_pointer_p((lispobj *)code)) {
#ifdef DEBUG_CODE_GC
- printf("Was already transported\n");
+ printf("Was already transported\n");
#endif
- return (struct code *) forwarding_pointer_value
- ((lispobj *)((pointer_sized_uint_t) code));
+ return (struct code *) forwarding_pointer_value
+ ((lispobj *)((pointer_sized_uint_t) code));
}
-
+
gc_assert(widetag_of(first) == CODE_HEADER_WIDETAG);
/* prepare to transport the code vector */
#if defined(DEBUG_CODE_GC)
printf("Old code object at 0x%08x, new code object at 0x%08x.\n",
- (unsigned long) code, (unsigned long) new_code);
+ (unsigned long) code, (unsigned long) new_code);
printf("Code object is %d words long.\n", nwords);
#endif
#ifdef LISP_FEATURE_GENCGC
if (new_code == code)
- return new_code;
+ return new_code;
#endif
displacement = l_new_code - l_code;
set_forwarding_pointer((lispobj *)code, l_new_code);
-
+
/* set forwarding pointers for all the function headers in the */
/* code object. also fix all self pointers */
prev_pointer = &new_code->entry_points;
while (fheaderl != NIL) {
- struct simple_fun *fheaderp, *nfheaderp;
- lispobj nfheaderl;
-
- fheaderp = (struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ struct simple_fun *fheaderp, *nfheaderp;
+ lispobj nfheaderl;
+
+ fheaderp = (struct simple_fun *) native_pointer(fheaderl);
+ gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
- /* Calculate the new function pointer and the new */
- /* function header. */
- nfheaderl = fheaderl + displacement;
- nfheaderp = (struct simple_fun *) native_pointer(nfheaderl);
+ /* Calculate the new function pointer and the new */
+ /* function header. */
+ nfheaderl = fheaderl + displacement;
+ nfheaderp = (struct simple_fun *) native_pointer(nfheaderl);
#ifdef DEBUG_CODE_GC
- printf("fheaderp->header (at %x) <- %x\n",
- &(fheaderp->header) , nfheaderl);
+ printf("fheaderp->header (at %x) <- %x\n",
+ &(fheaderp->header) , nfheaderl);
#endif
- set_forwarding_pointer((lispobj *)fheaderp, nfheaderl);
-
- /* fix self pointer. */
- nfheaderp->self =
+ set_forwarding_pointer((lispobj *)fheaderp, nfheaderl);
+
+ /* fix self pointer. */
+ nfheaderp->self =
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- FUN_RAW_ADDR_OFFSET +
+ FUN_RAW_ADDR_OFFSET +
#endif
- nfheaderl;
-
- *prev_pointer = nfheaderl;
+ nfheaderl;
+
+ *prev_pointer = nfheaderl;
- fheaderl = fheaderp->next;
- prev_pointer = &nfheaderp->next;
+ fheaderl = fheaderp->next;
+ prev_pointer = &nfheaderp->next;
}
os_flush_icache((os_vm_address_t) (((long *)new_code) + nheader_words),
- ncode_words * sizeof(long));
+ ncode_words * sizeof(long));
#ifdef LISP_FEATURE_GENCGC
gencgc_apply_code_fixups(code, new_code);
#endif
{
struct code *code;
long n_header_words, n_code_words, n_words;
- lispobj entry_point; /* tagged pointer to entry point */
+ lispobj entry_point; /* tagged pointer to entry point */
struct simple_fun *function_ptr; /* untagged pointer to entry point */
code = (struct code *) where;
/* Scavenge the boxed section of each function object in the
* code data block. */
for (entry_point = code->entry_points;
- entry_point != NIL;
- entry_point = function_ptr->next) {
+ entry_point != NIL;
+ entry_point = function_ptr->next) {
- gc_assert(is_lisp_pointer(entry_point));
+ gc_assert(is_lisp_pointer(entry_point));
- function_ptr = (struct simple_fun *) native_pointer(entry_point);
- gc_assert(widetag_of(function_ptr->header)==SIMPLE_FUN_HEADER_WIDETAG);
+ function_ptr = (struct simple_fun *) native_pointer(entry_point);
+ gc_assert(widetag_of(function_ptr->header)==SIMPLE_FUN_HEADER_WIDETAG);
- scavenge(&function_ptr->name, 1);
- scavenge(&function_ptr->arglist, 1);
- scavenge(&function_ptr->type, 1);
+ scavenge(&function_ptr->name, 1);
+ scavenge(&function_ptr->arglist, 1);
+ scavenge(&function_ptr->type, 1);
}
-
+
return n_words;
}
long nheader_words, ncode_words, nwords;
code = (struct code *) where;
-
+
ncode_words = fixnum_value(code->code_size);
nheader_words = HeaderValue(code->header);
nwords = ncode_words + nheader_words;
scav_return_pc_header(lispobj *where, lispobj object)
{
lose("attempted to scavenge a return PC header where=0x%08x object=0x%08x",
- (unsigned long) where,
- (unsigned long) object);
+ (unsigned long) where,
+ (unsigned long) object);
return 0; /* bogus return value to satisfy static type checking */
}
#endif /* LISP_FEATURE_X86 */
/* The function may have moved so update the raw address. But
* don't write unnecessarily. */
if (closure->fun != fun + FUN_RAW_ADDR_OFFSET)
- closure->fun = fun + FUN_RAW_ADDR_OFFSET;
+ closure->fun = fun + FUN_RAW_ADDR_OFFSET;
#endif
return 2;
}
scav_fun_header(lispobj *where, lispobj object)
{
lose("attempted to scavenge a function header where=0x%08x object=0x%08x",
- (unsigned long) where,
- (unsigned long) object);
+ (unsigned long) where,
+ (unsigned long) object);
return 0; /* bogus return value to satisfy static type checking */
}
#endif /* LISP_FEATURE_X86 */
struct simple_fun *fheader;
unsigned long offset;
struct code *code, *ncode;
-
+
fheader = (struct simple_fun *) native_pointer(object);
/* FIXME: was times 4, should it really be N_WORD_BYTES? */
offset = HeaderValue(fheader->header) * N_WORD_BYTES;
cons = (struct cons *) native_pointer(object);
/* Copy 'object'. */
- new_cons = (struct cons *)
- gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
+ new_cons = (struct cons *)
+ gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
new_cons->car = cons->car;
new_cons->cdr = cons->cdr; /* updated later */
new_list_pointer = make_lispobj(new_cons,lowtag_of(object));
/* Try to linearize the list in the cdr direction to help reduce
* paging. */
while (1) {
- lispobj new_cdr;
- struct cons *cdr_cons, *new_cdr_cons;
-
- if(lowtag_of(cdr) != LIST_POINTER_LOWTAG ||
- !from_space_p(cdr) ||
- forwarding_pointer_p((lispobj *)native_pointer(cdr)))
- break;
-
- cdr_cons = (struct cons *) native_pointer(cdr);
-
- /* Copy 'cdr'. */
- new_cdr_cons = (struct cons*)
- gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
- new_cdr_cons->car = cdr_cons->car;
- new_cdr_cons->cdr = cdr_cons->cdr;
- new_cdr = make_lispobj(new_cdr_cons, lowtag_of(cdr));
-
- /* Grab the cdr before it is clobbered. */
- cdr = cdr_cons->cdr;
- set_forwarding_pointer((lispobj *)cdr_cons, new_cdr);
-
- /* Update the cdr of the last cons copied into new space to
- * keep the newspace scavenge from having to do it. */
- new_cons->cdr = new_cdr;
-
- new_cons = new_cdr_cons;
+ lispobj new_cdr;
+ struct cons *cdr_cons, *new_cdr_cons;
+
+ if(lowtag_of(cdr) != LIST_POINTER_LOWTAG ||
+ !from_space_p(cdr) ||
+ forwarding_pointer_p((lispobj *)native_pointer(cdr)))
+ break;
+
+ cdr_cons = (struct cons *) native_pointer(cdr);
+
+ /* Copy 'cdr'. */
+ new_cdr_cons = (struct cons*)
+ gc_general_alloc(sizeof(struct cons),ALLOC_BOXED,ALLOC_QUICK);
+ new_cdr_cons->car = cdr_cons->car;
+ new_cdr_cons->cdr = cdr_cons->cdr;
+ new_cdr = make_lispobj(new_cdr_cons, lowtag_of(cdr));
+
+ /* Grab the cdr before it is clobbered. */
+ cdr = cdr_cons->cdr;
+ set_forwarding_pointer((lispobj *)cdr_cons, new_cdr);
+
+ /* Update the cdr of the last cons copied into new space to
+ * keep the newspace scavenge from having to do it. */
+ new_cons->cdr = new_cdr;
+
+ new_cons = new_cdr_cons;
}
return new_list_pointer;
first = (transother[widetag_of(*first_pointer)])(object);
if (first != object) {
- set_forwarding_pointer(first_pointer, first);
+ set_forwarding_pointer(first_pointer, first);
#ifdef LISP_FEATURE_GENCGC
- *where = first;
+ *where = first;
#endif
}
#ifndef LISP_FEATURE_GENCGC
lispobj layout = ((struct instance *)native_pointer(where))->slots[0];
if (!layout)
- return 1;
+ return 1;
if (forwarding_pointer_p(native_pointer(layout)))
layout = (lispobj) forwarding_pointer_value(native_pointer(layout));
fdefn = (struct fdefn *)where;
- /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
+ /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
fdefn->fun, fdefn->raw_addr)); */
- if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET)
- == (char *)((unsigned long)(fdefn->raw_addr))) {
- scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
+ if ((char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET)
+ == (char *)((unsigned long)(fdefn->raw_addr))) {
+ scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
- /* Don't write unnecessarily. */
- if (fdefn->raw_addr != (char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET))
- fdefn->raw_addr = (char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET);
- /* gc.c has more casts here, which may be relevant or alternatively
- may be compiler warning defeaters. try
+ /* Don't write unnecessarily. */
+ if (fdefn->raw_addr != (char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET))
+ fdefn->raw_addr = (char *)(fdefn->fun + FUN_RAW_ADDR_OFFSET);
+ /* gc.c has more casts here, which may be relevant or alternatively
+ may be compiler warning defeaters. try
fdefn->raw_addr = ((char *) LOW_WORD(fdefn->fun)) + FUN_RAW_ADDR_OFFSET;
- */
- return sizeof(struct fdefn) / sizeof(lispobj);
+ */
+ return sizeof(struct fdefn) / sizeof(lispobj);
} else {
- return 1;
+ return 1;
}
}
#endif
vector = (struct vector *) where;
length = fixnum_value(vector->length);
- nwords = CEILING(length *
- LONG_FLOAT_SIZE
- + 2, 2);
+ nwords = CEILING(length *
+ LONG_FLOAT_SIZE
+ + 2, 2);
return nwords;
}
#endif
#define WEAK_POINTER_NWORDS \
- CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
+ CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
static lispobj
trans_weak_pointer(lispobj object)
copy = copy_object(object, WEAK_POINTER_NWORDS);
#ifndef LISP_FEATURE_GENCGC
wp = (struct weak_pointer *) native_pointer(copy);
-
+
gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
/* Push the weak pointer onto the list of weak pointers. */
wp->next = LOW_WORD(weak_pointers);
void scan_weak_pointers(void)
{
struct weak_pointer *wp;
- for (wp = weak_pointers; wp != NULL;
- wp=(struct weak_pointer *)native_pointer(wp->next)) {
- lispobj value = wp->value;
- lispobj *first_pointer;
- gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
- if (!(is_lisp_pointer(value) && from_space_p(value)))
- continue;
-
- /* Now, we need to check whether the object has been forwarded. If
- * it has been, the weak pointer is still good and needs to be
- * updated. Otherwise, the weak pointer needs to be nil'ed
- * out. */
-
- first_pointer = (lispobj *)native_pointer(value);
-
- if (forwarding_pointer_p(first_pointer)) {
- wp->value=
- (lispobj)LOW_WORD(forwarding_pointer_value(first_pointer));
- } else {
- /* Break it. */
- wp->value = NIL;
- wp->broken = T;
- }
+ for (wp = weak_pointers; wp != NULL;
+ wp=(struct weak_pointer *)native_pointer(wp->next)) {
+ lispobj value = wp->value;
+ lispobj *first_pointer;
+ gc_assert(widetag_of(wp->header)==WEAK_POINTER_WIDETAG);
+ if (!(is_lisp_pointer(value) && from_space_p(value)))
+ continue;
+
+ /* Now, we need to check whether the object has been forwarded. If
+ * it has been, the weak pointer is still good and needs to be
+ * updated. Otherwise, the weak pointer needs to be nil'ed
+ * out. */
+
+ first_pointer = (lispobj *)native_pointer(value);
+
+ if (forwarding_pointer_p(first_pointer)) {
+ wp->value=
+ (lispobj)LOW_WORD(forwarding_pointer_value(first_pointer));
+ } else {
+ /* Break it. */
+ wp->value = NIL;
+ wp->broken = T;
+ }
}
}
trans_lose(lispobj object)
{
lose("no transport function for object 0x%08x (widetag 0x%x)",
- (unsigned long)object,
- widetag_of(*(lispobj*)native_pointer(object)));
+ (unsigned long)object,
+ widetag_of(*(lispobj*)native_pointer(object)));
return NIL; /* bogus return value to satisfy static type checking */
}
size_lose(lispobj *where)
{
lose("no size function for object at 0x%08x (widetag 0x%x)",
- (unsigned long)where,
- widetag_of(LOW_WORD(where)));
+ (unsigned long)where,
+ widetag_of(LOW_WORD(where)));
return 1; /* bogus return value to satisfy static type checking */
}
/* Set default value in all slots of scavenge table. FIXME
* replace this gnarly sizeof with something based on
* N_WIDETAG_BITS */
- for (i = 0; i < ((sizeof scavtab)/(sizeof scavtab[0])); i++) {
- scavtab[i] = scav_lose;
+ for (i = 0; i < ((sizeof scavtab)/(sizeof scavtab[0])); i++) {
+ scavtab[i] = scav_lose;
}
/* For each type which can be selected by the lowtag alone, set
*/
for (i = 0; i < (1<<(N_WIDETAG_BITS-N_LOWTAG_BITS)); i++) {
- scavtab[EVEN_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_immediate;
- scavtab[FUN_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_fun_pointer;
- /* skipping OTHER_IMMEDIATE_0_LOWTAG */
- scavtab[LIST_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_list_pointer;
- scavtab[ODD_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_immediate;
- scavtab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_instance_pointer;
- /* skipping OTHER_IMMEDIATE_1_LOWTAG */
- scavtab[OTHER_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_other_pointer;
+ scavtab[EVEN_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_immediate;
+ scavtab[FUN_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_fun_pointer;
+ /* skipping OTHER_IMMEDIATE_0_LOWTAG */
+ scavtab[LIST_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_list_pointer;
+ scavtab[ODD_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_immediate;
+ scavtab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_instance_pointer;
+ /* skipping OTHER_IMMEDIATE_1_LOWTAG */
+ scavtab[OTHER_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = scav_other_pointer;
}
/* Other-pointer types (those selected by all eight bits of the
scavtab[SIMPLE_BIT_VECTOR_WIDETAG] = scav_vector_bit;
scavtab[SIMPLE_ARRAY_NIL_WIDETAG] = scav_vector_nil;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG] =
- scav_vector_unsigned_byte_2;
+ scav_vector_unsigned_byte_2;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG] =
- scav_vector_unsigned_byte_4;
+ scav_vector_unsigned_byte_4;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG] =
- scav_vector_unsigned_byte_8;
+ scav_vector_unsigned_byte_8;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG] =
- scav_vector_unsigned_byte_8;
+ scav_vector_unsigned_byte_8;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG] =
- scav_vector_unsigned_byte_16;
+ scav_vector_unsigned_byte_16;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG] =
- scav_vector_unsigned_byte_16;
+ scav_vector_unsigned_byte_16;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG] =
- scav_vector_unsigned_byte_32;
+ scav_vector_unsigned_byte_32;
#endif
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG] =
- scav_vector_unsigned_byte_32;
+ scav_vector_unsigned_byte_32;
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG] =
- scav_vector_unsigned_byte_32;
+ scav_vector_unsigned_byte_32;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG] =
- scav_vector_unsigned_byte_64;
+ scav_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG] =
- scav_vector_unsigned_byte_64;
+ scav_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
scavtab[SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG] =
- scav_vector_unsigned_byte_64;
+ scav_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG] = scav_vector_unsigned_byte_8;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG] =
- scav_vector_unsigned_byte_16;
+ scav_vector_unsigned_byte_16;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG] =
- scav_vector_unsigned_byte_32;
+ scav_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG] =
- scav_vector_unsigned_byte_32;
+ scav_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG] =
- scav_vector_unsigned_byte_64;
+ scav_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
scavtab[SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG] =
- scav_vector_unsigned_byte_64;
+ scav_vector_unsigned_byte_64;
#endif
scavtab[SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG] = scav_vector_single_float;
scavtab[SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG] = scav_vector_double_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
scavtab[SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG] =
- scav_vector_complex_single_float;
+ scav_vector_complex_single_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
scavtab[SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG] =
- scav_vector_complex_double_float;
+ scav_vector_complex_double_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
scavtab[SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG] =
- scav_vector_complex_long_float;
+ scav_vector_complex_long_float;
#endif
scavtab[COMPLEX_BASE_STRING_WIDETAG] = scav_boxed;
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
scavtab[COMPLEX_VECTOR_WIDETAG] = scav_boxed;
scavtab[COMPLEX_ARRAY_WIDETAG] = scav_boxed;
scavtab[CODE_HEADER_WIDETAG] = scav_code_header;
-#ifndef LISP_FEATURE_GENCGC /* FIXME ..._X86 ? */
+#ifndef LISP_FEATURE_GENCGC /* FIXME ..._X86 ? */
scavtab[SIMPLE_FUN_HEADER_WIDETAG] = scav_fun_header;
scavtab[RETURN_PC_HEADER_WIDETAG] = scav_return_pc_header;
#endif
/* transport other table, initialized same way as scavtab */
for (i = 0; i < ((sizeof transother)/(sizeof transother[0])); i++)
- transother[i] = trans_lose;
+ transother[i] = trans_lose;
transother[BIGNUM_WIDETAG] = trans_unboxed;
transother[RATIO_WIDETAG] = trans_boxed;
transother[SIMPLE_VECTOR_WIDETAG] = trans_vector;
transother[SIMPLE_ARRAY_NIL_WIDETAG] = trans_vector_nil;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG] =
- trans_vector_unsigned_byte_2;
+ trans_vector_unsigned_byte_2;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG] =
- trans_vector_unsigned_byte_4;
+ trans_vector_unsigned_byte_4;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG] =
- trans_vector_unsigned_byte_8;
+ trans_vector_unsigned_byte_8;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG] =
- trans_vector_unsigned_byte_8;
+ trans_vector_unsigned_byte_8;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG] =
- trans_vector_unsigned_byte_16;
+ trans_vector_unsigned_byte_16;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG] =
- trans_vector_unsigned_byte_16;
+ trans_vector_unsigned_byte_16;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG] =
- trans_vector_unsigned_byte_32;
+ trans_vector_unsigned_byte_32;
#endif
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG] =
- trans_vector_unsigned_byte_32;
+ trans_vector_unsigned_byte_32;
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG] =
- trans_vector_unsigned_byte_32;
+ trans_vector_unsigned_byte_32;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG] =
- trans_vector_unsigned_byte_64;
+ trans_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG] =
- trans_vector_unsigned_byte_64;
+ trans_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
transother[SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG] =
- trans_vector_unsigned_byte_64;
+ trans_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG] =
- trans_vector_unsigned_byte_8;
+ trans_vector_unsigned_byte_8;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG] =
- trans_vector_unsigned_byte_16;
+ trans_vector_unsigned_byte_16;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG] =
- trans_vector_unsigned_byte_32;
+ trans_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG] =
- trans_vector_unsigned_byte_32;
+ trans_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG] =
- trans_vector_unsigned_byte_64;
+ trans_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
transother[SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG] =
- trans_vector_unsigned_byte_64;
+ trans_vector_unsigned_byte_64;
#endif
transother[SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG] =
- trans_vector_single_float;
+ trans_vector_single_float;
transother[SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG] =
- trans_vector_double_float;
+ trans_vector_double_float;
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
transother[SIMPLE_ARRAY_LONG_FLOAT_WIDETAG] =
- trans_vector_long_float;
+ trans_vector_long_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
transother[SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG] =
- trans_vector_complex_single_float;
+ trans_vector_complex_single_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
transother[SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG] =
- trans_vector_complex_double_float;
+ trans_vector_complex_double_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
transother[SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG] =
- trans_vector_complex_long_float;
+ trans_vector_complex_long_float;
#endif
transother[COMPLEX_BASE_STRING_WIDETAG] = trans_boxed;
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
/* size table, initialized the same way as scavtab */
for (i = 0; i < ((sizeof sizetab)/(sizeof sizetab[0])); i++)
- sizetab[i] = size_lose;
+ sizetab[i] = size_lose;
for (i = 0; i < (1<<(N_WIDETAG_BITS-N_LOWTAG_BITS)); i++) {
- sizetab[EVEN_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = size_immediate;
- sizetab[FUN_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
- /* skipping OTHER_IMMEDIATE_0_LOWTAG */
- sizetab[LIST_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
- sizetab[ODD_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = size_immediate;
- sizetab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
- /* skipping OTHER_IMMEDIATE_1_LOWTAG */
- sizetab[OTHER_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
+ sizetab[EVEN_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = size_immediate;
+ sizetab[FUN_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
+ /* skipping OTHER_IMMEDIATE_0_LOWTAG */
+ sizetab[LIST_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
+ sizetab[ODD_FIXNUM_LOWTAG|(i<<N_LOWTAG_BITS)] = size_immediate;
+ sizetab[INSTANCE_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
+ /* skipping OTHER_IMMEDIATE_1_LOWTAG */
+ sizetab[OTHER_POINTER_LOWTAG|(i<<N_LOWTAG_BITS)] = size_pointer;
}
sizetab[BIGNUM_WIDETAG] = size_unboxed;
sizetab[RATIO_WIDETAG] = size_boxed;
sizetab[SIMPLE_VECTOR_WIDETAG] = size_vector;
sizetab[SIMPLE_ARRAY_NIL_WIDETAG] = size_vector_nil;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG] =
- size_vector_unsigned_byte_2;
+ size_vector_unsigned_byte_2;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG] =
- size_vector_unsigned_byte_4;
+ size_vector_unsigned_byte_4;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG] =
- size_vector_unsigned_byte_8;
+ size_vector_unsigned_byte_8;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG] =
- size_vector_unsigned_byte_8;
+ size_vector_unsigned_byte_8;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG] =
- size_vector_unsigned_byte_16;
+ size_vector_unsigned_byte_16;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG] =
- size_vector_unsigned_byte_16;
+ size_vector_unsigned_byte_16;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG] =
- size_vector_unsigned_byte_32;
+ size_vector_unsigned_byte_32;
#endif
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG] =
- size_vector_unsigned_byte_32;
+ size_vector_unsigned_byte_32;
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG] =
- size_vector_unsigned_byte_32;
+ size_vector_unsigned_byte_32;
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG] =
- size_vector_unsigned_byte_64;
+ size_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG] =
- size_vector_unsigned_byte_64;
+ size_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
sizetab[SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG] =
- size_vector_unsigned_byte_64;
+ size_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG] = size_vector_unsigned_byte_8;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG] =
- size_vector_unsigned_byte_16;
+ size_vector_unsigned_byte_16;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG] =
- size_vector_unsigned_byte_32;
+ size_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG] =
- size_vector_unsigned_byte_32;
+ size_vector_unsigned_byte_32;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG] =
- size_vector_unsigned_byte_64;
+ size_vector_unsigned_byte_64;
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
sizetab[SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG] =
- size_vector_unsigned_byte_64;
+ size_vector_unsigned_byte_64;
#endif
sizetab[SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG] = size_vector_single_float;
sizetab[SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG] = size_vector_double_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
sizetab[SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG] =
- size_vector_complex_single_float;
+ size_vector_complex_single_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
sizetab[SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG] =
- size_vector_complex_double_float;
+ size_vector_complex_double_float;
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
sizetab[SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG] =
- size_vector_complex_long_float;
+ size_vector_complex_long_float;
#endif
sizetab[COMPLEX_BASE_STRING_WIDETAG] = size_boxed;
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
lispobj *object = NULL;
if ( (object = search_read_only_space(pc)) )
- ;
+ ;
else if ( (object = search_static_space(pc)) )
- ;
+ ;
else
- object = search_dynamic_space(pc);
+ object = search_dynamic_space(pc);
if (object) /* if we found something */
- if (widetag_of(*object) == CODE_HEADER_WIDETAG)
- return(object);
+ if (widetag_of(*object) == CODE_HEADER_WIDETAG)
+ return(object);
return (NULL);
}
gc_search_space(lispobj *start, size_t words, lispobj *pointer)
{
while (words > 0) {
- size_t count = 1;
- lispobj thing = *start;
+ size_t count = 1;
+ lispobj thing = *start;
- /* If thing is an immediate then this is a cons. */
- if (is_lisp_pointer(thing)
- || (fixnump(thing))
- || (widetag_of(thing) == CHARACTER_WIDETAG)
+ /* If thing is an immediate then this is a cons. */
+ if (is_lisp_pointer(thing)
+ || (fixnump(thing))
+ || (widetag_of(thing) == CHARACTER_WIDETAG)
#if N_WORD_BITS == 64
- || (widetag_of(thing) == SINGLE_FLOAT_WIDETAG)
+ || (widetag_of(thing) == SINGLE_FLOAT_WIDETAG)
#endif
- || (widetag_of(thing) == UNBOUND_MARKER_WIDETAG))
- count = 2;
- else
- count = (sizetab[widetag_of(thing)])(start);
+ || (widetag_of(thing) == UNBOUND_MARKER_WIDETAG))
+ count = 2;
+ else
+ count = (sizetab[widetag_of(thing)])(start);
- /* Check whether the pointer is within this object. */
- if ((pointer >= start) && (pointer < (start+count))) {
- /* found it! */
- /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
- return(start);
- }
+ /* Check whether the pointer is within this object. */
+ if ((pointer >= start) && (pointer < (start+count))) {
+ /* found it! */
+ /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
+ return(start);
+ }
- /* Round up the count. */
- count = CEILING(count,2);
+ /* Round up the count. */
+ count = CEILING(count,2);
- start += count;
- words -= count;
+ start += count;
+ words -= count;
}
return (NULL);
}
* last I tried it - dan 2003.12.21 */
#if 1
#define gc_assert(ex) do { \
- if (!(ex)) gc_abort(); \
+ if (!(ex)) gc_abort(); \
} while (0)
#else
#define gc_assert(ex)
#endif
#define gc_abort() lose("GC invariant lost, file \"%s\", line %d", \
- __FILE__, __LINE__)
+ __FILE__, __LINE__)
#define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
/* FIXME (1) this could probably be defined using something like
* sizeof(lispobj)*floor(sizeof(struct simple_fun)/sizeof(lispobj))
* - FUN_POINTER_LOWTAG
- * as I'm reasonably sure that simple_fun->code must always be the
- * last slot in the object
+ * as I'm reasonably sure that simple_fun->code must always be the
+ * last slot in the object
* FIXME (2) it also appears in purify.c, and it has a different value
* for SPARC users in that bit
unsigned
/* This is set when the page is write-protected. This should
- * always reflect the actual write_protect status of a page.
- * (If the page is written into, we catch the exception, make
- * the page writable, and clear this flag.) */
+ * always reflect the actual write_protect status of a page.
+ * (If the page is written into, we catch the exception, make
+ * the page writable, and clear this flag.) */
write_protected :1,
- /* This flag is set when the above write_protected flag is
- * cleared by the SIGBUS handler (or SIGSEGV handler, for some
- * OSes). This is useful for re-scavenging pages that are
- * written during a GC. */
- write_protected_cleared :1,
- /* the region the page is allocated to: 0 for a free page; 1
+ /* This flag is set when the above write_protected flag is
+ * cleared by the SIGBUS handler (or SIGSEGV handler, for some
+ * OSes). This is useful for re-scavenging pages that are
+ * written during a GC. */
+ write_protected_cleared :1,
+ /* the region the page is allocated to: 0 for a free page; 1
* for boxed objects; 2 for unboxed objects. If the page is
* free the following slots are invalid (well the bytes_used
* must be 0). */
- allocated :3,
- /* If this page should not be moved during a GC then this flag
+ allocated :3,
+ /* If this page should not be moved during a GC then this flag
* is set. It's only valid during a GC for allocated pages. */
- dont_move :1,
- /* If the page is part of a large object then this flag is
+ dont_move :1,
+ /* If the page is part of a large object then this flag is
* set. No other objects should be allocated to these pages.
* This is only valid when the page is allocated. */
- large_object :1;
+ large_object :1;
/* the generation that this page belongs to. This should be valid
* for all pages that may have objects allocated, even current
long bytes_used;
/* The name of this field is not well-chosen for its actual use.
- * This is the offset from the start of the page to the start
+ * This is the offset from the start of the page to the start
* of the alloc_region which contains/contained it. It's negative or 0
*/
long first_object_offset;
long update_x86_dynamic_space_free_pointer(void);
void gc_alloc_update_page_tables(int unboxed,
- struct alloc_region *alloc_region);
+ struct alloc_region *alloc_region);
void gc_alloc_update_all_page_tables(void);
void gc_set_region_empty(struct alloc_region *region);
/*
* predicates
*/
-static inline int
+static inline int
space_matches_p(lispobj obj, long space)
{
long page_index=(void*)obj - (void *)DYNAMIC_SPACE_START;
return ((page_index >= 0)
- && ((page_index =
- ((unsigned long)page_index)/PAGE_BYTES) < NUM_PAGES)
- && (page_table[page_index].gen == space));
+ && ((page_index =
+ ((unsigned long)page_index)/PAGE_BYTES) < NUM_PAGES)
+ && (page_table[page_index].gen == space));
}
static inline boolean
-#endif
+#endif
long index = addr-heap_base;
if (index >= 0) {
- index = ((unsigned long)index)/PAGE_BYTES;
- if (index < NUM_PAGES)
- return (index);
+ index = ((unsigned long)index)/PAGE_BYTES;
+ if (index < NUM_PAGES)
+ return (index);
}
return (-1);
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].gen == generation)
- && (page_table[i].write_protected == 1))
- count++;
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].gen == generation)
+ && (page_table[i].write_protected == 1))
+ count++;
return count;
}
long count = 0;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != 0)
- && (page_table[i].gen == generation))
- count++;
+ if ((page_table[i].allocated != 0)
+ && (page_table[i].gen == generation))
+ count++;
return count;
}
long i;
long count = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
- ++count;
- }
+ if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
+ ++count;
+ }
}
return count;
}
long i;
long result = 0;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
- result += page_table[i].bytes_used;
+ if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
+ result += page_table[i].bytes_used;
}
return result;
}
gen_av_mem_age(int gen)
{
if (generations[gen].bytes_allocated == 0)
- return 0.0;
+ return 0.0;
return
- ((double)generations[gen].cum_sum_bytes_allocated)
- / ((double)generations[gen].bytes_allocated);
+ ((double)generations[gen].cum_sum_bytes_allocated)
+ / ((double)generations[gen].bytes_allocated);
}
-void fpu_save(int *); /* defined in x86-assem.S */
-void fpu_restore(int *); /* defined in x86-assem.S */
+void fpu_save(int *); /* defined in x86-assem.S */
+void fpu_restore(int *); /* defined in x86-assem.S */
/* The verbose argument controls how much to print: 0 for normal
* level of detail; 1 for debugging. */
static void
/* number of generations to print */
if (verbose)
- gens = NUM_GENERATIONS+1;
+ gens = NUM_GENERATIONS+1;
else
- gens = NUM_GENERATIONS;
+ gens = NUM_GENERATIONS;
/* Print the heap stats. */
fprintf(stderr,
- " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
+ " Gen Boxed Unboxed LB LUB !move Alloc Waste Trig WP GCs Mem-age\n");
for (i = 0; i < gens; i++) {
- int j;
- int boxed_cnt = 0;
- int unboxed_cnt = 0;
- int large_boxed_cnt = 0;
- int large_unboxed_cnt = 0;
- int pinned_cnt=0;
-
- for (j = 0; j < last_free_page; j++)
- if (page_table[j].gen == i) {
-
- /* Count the number of boxed pages within the given
- * generation. */
- if (page_table[j].allocated & BOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_boxed_cnt++;
- else
- boxed_cnt++;
- }
- if(page_table[j].dont_move) pinned_cnt++;
- /* Count the number of unboxed pages within the given
- * generation. */
- if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
- if (page_table[j].large_object)
- large_unboxed_cnt++;
- else
- unboxed_cnt++;
- }
- }
-
- gc_assert(generations[i].bytes_allocated
- == count_generation_bytes_allocated(i));
- fprintf(stderr,
- " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
- i,
- boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
- pinned_cnt,
- generations[i].bytes_allocated,
- (count_generation_pages(i)*PAGE_BYTES
- - generations[i].bytes_allocated),
- generations[i].gc_trigger,
- count_write_protect_generation_pages(i),
- generations[i].num_gc,
- gen_av_mem_age(i));
+ int j;
+ int boxed_cnt = 0;
+ int unboxed_cnt = 0;
+ int large_boxed_cnt = 0;
+ int large_unboxed_cnt = 0;
+ int pinned_cnt=0;
+
+ for (j = 0; j < last_free_page; j++)
+ if (page_table[j].gen == i) {
+
+ /* Count the number of boxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & BOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_boxed_cnt++;
+ else
+ boxed_cnt++;
+ }
+ if(page_table[j].dont_move) pinned_cnt++;
+ /* Count the number of unboxed pages within the given
+ * generation. */
+ if (page_table[j].allocated & UNBOXED_PAGE_FLAG) {
+ if (page_table[j].large_object)
+ large_unboxed_cnt++;
+ else
+ unboxed_cnt++;
+ }
+ }
+
+ gc_assert(generations[i].bytes_allocated
+ == count_generation_bytes_allocated(i));
+ fprintf(stderr,
+ " %1d: %5d %5d %5d %5d %5d %8ld %5ld %8ld %4ld %3d %7.4f\n",
+ i,
+ boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
+ pinned_cnt,
+ generations[i].bytes_allocated,
+ (count_generation_pages(i)*PAGE_BYTES
+ - generations[i].bytes_allocated),
+ generations[i].gc_trigger,
+ count_write_protect_generation_pages(i),
+ generations[i].num_gc,
+ gen_av_mem_age(i));
}
fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated);
/*
FSHOW((stderr,
- "/alloc_new_region for %d bytes from gen %d\n",
- nbytes, gc_alloc_generation));
+ "/alloc_new_region for %d bytes from gen %d\n",
+ nbytes, gc_alloc_generation));
*/
/* Check that the region is in a reset state. */
gc_assert((alloc_region->first_page == 0)
- && (alloc_region->last_page == -1)
- && (alloc_region->free_pointer == alloc_region->end_addr));
+ && (alloc_region->last_page == -1)
+ && (alloc_region->free_pointer == alloc_region->end_addr));
get_spinlock(&free_pages_lock,(long) alloc_region);
if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_unboxed_start_page;
+ first_page =
+ generations[gc_alloc_generation].alloc_unboxed_start_page;
} else {
- first_page =
- generations[gc_alloc_generation].alloc_start_page;
+ first_page =
+ generations[gc_alloc_generation].alloc_start_page;
}
last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
bytes_found=(PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
+ + PAGE_BYTES*(last_page-first_page);
/* Set up the alloc_region. */
alloc_region->first_page = first_page;
alloc_region->last_page = last_page;
alloc_region->start_addr = page_table[first_page].bytes_used
- + page_address(first_page);
+ + page_address(first_page);
alloc_region->free_pointer = alloc_region->start_addr;
alloc_region->end_addr = alloc_region->start_addr + bytes_found;
/* The first page may have already been in use. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
- page_table[first_page].gen = gc_alloc_generation;
- page_table[first_page].large_object = 0;
- page_table[first_page].first_object_offset = 0;
+ if (unboxed)
+ page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
+ else
+ page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].gen = gc_alloc_generation;
+ page_table[first_page].large_object = 0;
+ page_table[first_page].first_object_offset = 0;
}
if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
- page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ page_table[first_page].allocated |= OPEN_REGION_PAGE_FLAG;
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 0);
for (i = first_page+1; i <= last_page; i++) {
- if (unboxed)
- page_table[i].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[i].allocated = BOXED_PAGE_FLAG;
- page_table[i].gen = gc_alloc_generation;
- page_table[i].large_object = 0;
- /* This may not be necessary for unboxed regions (think it was
- * broken before!) */
- page_table[i].first_object_offset =
- alloc_region->start_addr - page_address(i);
- page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
+ if (unboxed)
+ page_table[i].allocated = UNBOXED_PAGE_FLAG;
+ else
+ page_table[i].allocated = BOXED_PAGE_FLAG;
+ page_table[i].gen = gc_alloc_generation;
+ page_table[i].large_object = 0;
+ /* This may not be necessary for unboxed regions (think it was
+ * broken before!) */
+ page_table[i].first_object_offset =
+ alloc_region->start_addr - page_address(i);
+ page_table[i].allocated |= OPEN_REGION_PAGE_FLAG ;
}
/* Bump up last_free_page. */
if (last_page+1 > last_free_page) {
- last_free_page = last_page+1;
- SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
- 0);
+ last_free_page = last_page+1;
+ SetSymbolValue(ALLOCATION_POINTER,
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),
+ 0);
}
release_spinlock(&free_pages_lock);
-
+
/* we can do this after releasing free_pages_lock */
if (gencgc_zero_check) {
- long *p;
- for (p = (long *)alloc_region->start_addr;
- p < (long *)alloc_region->end_addr; p++) {
- if (*p != 0) {
- /* KLUDGE: It would be nice to use %lx and explicit casts
- * (long) in code like this, so that it is less likely to
- * break randomly when running on a machine with different
- * word sizes. -- WHN 19991129 */
- lose("The new region at %x is not zero.", p);
- }
+ long *p;
+ for (p = (long *)alloc_region->start_addr;
+ p < (long *)alloc_region->end_addr; p++) {
+ if (*p != 0) {
+ /* KLUDGE: It would be nice to use %lx and explicit casts
+ * (long) in code like this, so that it is less likely to
+ * break randomly when running on a machine with different
+ * word sizes. -- WHN 19991129 */
+ lose("The new region at %x is not zero.", p);
+ }
}
}
/* Ignore if full. */
if (new_areas_index >= NUM_NEW_AREAS)
- return;
+ return;
switch (record_new_objects) {
case 0:
- return;
+ return;
case 1:
- if (first_page > new_areas_ignore_page)
- return;
- break;
+ if (first_page > new_areas_ignore_page)
+ return;
+ break;
case 2:
- break;
+ break;
default:
- gc_abort();
+ gc_abort();
}
new_area_start = PAGE_BYTES*first_page + offset;
/* Search backwards for a prior area that this follows from. If
found this will save adding a new area. */
for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
- unsigned area_end =
- PAGE_BYTES*((*new_areas)[i].page)
- + (*new_areas)[i].offset
- + (*new_areas)[i].size;
- /*FSHOW((stderr,
- "/add_new_area S1 %d %d %d %d\n",
- i, c, new_area_start, area_end));*/
- if (new_area_start == area_end) {
- /*FSHOW((stderr,
- "/adding to [%d] %d %d %d with %d %d %d:\n",
- i,
- (*new_areas)[i].page,
- (*new_areas)[i].offset,
- (*new_areas)[i].size,
- first_page,
- offset,
- size);*/
- (*new_areas)[i].size += size;
- return;
- }
+ unsigned area_end =
+ PAGE_BYTES*((*new_areas)[i].page)
+ + (*new_areas)[i].offset
+ + (*new_areas)[i].size;
+ /*FSHOW((stderr,
+ "/add_new_area S1 %d %d %d %d\n",
+ i, c, new_area_start, area_end));*/
+ if (new_area_start == area_end) {
+ /*FSHOW((stderr,
+ "/adding to [%d] %d %d %d with %d %d %d:\n",
+ i,
+ (*new_areas)[i].page,
+ (*new_areas)[i].offset,
+ (*new_areas)[i].size,
+ first_page,
+ offset,
+ size);*/
+ (*new_areas)[i].size += size;
+ return;
+ }
}
(*new_areas)[new_areas_index].page = first_page;
(*new_areas)[new_areas_index].offset = offset;
(*new_areas)[new_areas_index].size = size;
/*FSHOW((stderr,
- "/new_area %d page %d offset %d size %d\n",
- new_areas_index, first_page, offset, size));*/
+ "/new_area %d page %d offset %d size %d\n",
+ new_areas_index, first_page, offset, size));*/
new_areas_index++;
/* Note the max new_areas used. */
if (new_areas_index > max_new_areas)
- max_new_areas = new_areas_index;
+ max_new_areas = new_areas_index;
}
/* Update the tables for the alloc_region. The region may be added to
/* Catch an unused alloc_region. */
if ((first_page == 0) && (alloc_region->last_page == -1))
- return;
+ return;
next_page = first_page+1;
get_spinlock(&free_pages_lock,(long) alloc_region);
if (alloc_region->free_pointer != alloc_region->start_addr) {
- /* some bytes were allocated in the region */
- orig_first_page_bytes_used = page_table[first_page].bytes_used;
-
- gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
-
- /* All the pages used need to be updated */
-
- /* Update the first page. */
-
- /* If the page was free then set up the gen, and
- * first_object_offset. */
- if (page_table[first_page].bytes_used == 0)
- gc_assert(page_table[first_page].first_object_offset == 0);
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
-
- if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[first_page].gen == gc_alloc_generation);
- gc_assert(page_table[first_page].large_object == 0);
-
- byte_cnt = 0;
-
- /* Calculate the number of bytes used in this page. This is not
- * always the number of new bytes, unless it was free. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[first_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
-
- /* All the rest of the pages should be free. We need to set their
- * first_object_offset pointer to the start of the region, and set
- * the bytes_used. */
- while (more) {
- page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (unboxed)
- gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
- else
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].bytes_used == 0);
- gc_assert(page_table[next_page].gen == gc_alloc_generation);
- gc_assert(page_table[next_page].large_object == 0);
-
- gc_assert(page_table[next_page].first_object_offset ==
- alloc_region->start_addr - page_address(next_page));
-
- /* Calculate the number of bytes used in this page. */
- more = 0;
- if ((bytes_used = (alloc_region->free_pointer
- - page_address(next_page)))>PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[next_page].bytes_used = bytes_used;
- byte_cnt += bytes_used;
-
- next_page++;
- }
-
- region_size = alloc_region->free_pointer - alloc_region->start_addr;
- bytes_allocated += region_size;
- generations[gc_alloc_generation].bytes_allocated += region_size;
-
- gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
-
- /* Set the generations alloc restart page to the last page of
- * the region. */
- if (unboxed)
- generations[gc_alloc_generation].alloc_unboxed_start_page =
- next_page-1;
- else
- generations[gc_alloc_generation].alloc_start_page = next_page-1;
-
- /* Add the region to the new_areas if requested. */
- if (!unboxed)
- add_new_area(first_page,orig_first_page_bytes_used, region_size);
-
- /*
- FSHOW((stderr,
- "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
- region_size,
- gc_alloc_generation));
- */
+ /* some bytes were allocated in the region */
+ orig_first_page_bytes_used = page_table[first_page].bytes_used;
+
+ gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
+
+ /* All the pages used need to be updated */
+
+ /* Update the first page. */
+
+ /* If the page was free then set up the gen, and
+ * first_object_offset. */
+ if (page_table[first_page].bytes_used == 0)
+ gc_assert(page_table[first_page].first_object_offset == 0);
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+
+ if (unboxed)
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].gen == gc_alloc_generation);
+ gc_assert(page_table[first_page].large_object == 0);
+
+ byte_cnt = 0;
+
+ /* Calculate the number of bytes used in this page. This is not
+ * always the number of new bytes, unless it was free. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[first_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+
+ /* All the rest of the pages should be free. We need to set their
+ * first_object_offset pointer to the start of the region, and set
+ * the bytes_used. */
+ while (more) {
+ page_table[next_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+ if (unboxed)
+ gc_assert(page_table[next_page].allocated==UNBOXED_PAGE_FLAG);
+ else
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].bytes_used == 0);
+ gc_assert(page_table[next_page].gen == gc_alloc_generation);
+ gc_assert(page_table[next_page].large_object == 0);
+
+ gc_assert(page_table[next_page].first_object_offset ==
+ alloc_region->start_addr - page_address(next_page));
+
+ /* Calculate the number of bytes used in this page. */
+ more = 0;
+ if ((bytes_used = (alloc_region->free_pointer
+ - page_address(next_page)))>PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[next_page].bytes_used = bytes_used;
+ byte_cnt += bytes_used;
+
+ next_page++;
+ }
+
+ region_size = alloc_region->free_pointer - alloc_region->start_addr;
+ bytes_allocated += region_size;
+ generations[gc_alloc_generation].bytes_allocated += region_size;
+
+ gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
+
+ /* Set the generations alloc restart page to the last page of
+ * the region. */
+ if (unboxed)
+ generations[gc_alloc_generation].alloc_unboxed_start_page =
+ next_page-1;
+ else
+ generations[gc_alloc_generation].alloc_start_page = next_page-1;
+
+ /* Add the region to the new_areas if requested. */
+ if (!unboxed)
+ add_new_area(first_page,orig_first_page_bytes_used, region_size);
+
+ /*
+ FSHOW((stderr,
+ "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
+ region_size,
+ gc_alloc_generation));
+ */
} else {
- /* There are no bytes allocated. Unallocate the first_page if
- * there are 0 bytes_used. */
- page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
- if (page_table[first_page].bytes_used == 0)
- page_table[first_page].allocated = FREE_PAGE_FLAG;
+ /* There are no bytes allocated. Unallocate the first_page if
+ * there are 0 bytes_used. */
+ page_table[first_page].allocated &= ~(OPEN_REGION_PAGE_FLAG);
+ if (page_table[first_page].bytes_used == 0)
+ page_table[first_page].allocated = FREE_PAGE_FLAG;
}
/* Unallocate any unused pages. */
while (next_page <= alloc_region->last_page) {
- gc_assert(page_table[next_page].bytes_used == 0);
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- next_page++;
+ gc_assert(page_table[next_page].bytes_used == 0);
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ next_page++;
}
release_spinlock(&free_pages_lock);
/* alloc_region is per-thread, we're ok to do this unlocked */
get_spinlock(&free_pages_lock,(long) alloc_region);
if (unboxed) {
- first_page =
- generations[gc_alloc_generation].alloc_large_unboxed_start_page;
+ first_page =
+ generations[gc_alloc_generation].alloc_large_unboxed_start_page;
} else {
- first_page = generations[gc_alloc_generation].alloc_large_start_page;
+ first_page = generations[gc_alloc_generation].alloc_large_start_page;
}
if (first_page <= alloc_region->last_page) {
- first_page = alloc_region->last_page+1;
+ first_page = alloc_region->last_page+1;
}
last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed);
gc_assert(first_page > alloc_region->last_page);
if (unboxed)
- generations[gc_alloc_generation].alloc_large_unboxed_start_page =
- last_page;
+ generations[gc_alloc_generation].alloc_large_unboxed_start_page =
+ last_page;
else
- generations[gc_alloc_generation].alloc_large_start_page = last_page;
+ generations[gc_alloc_generation].alloc_large_start_page = last_page;
/* Set up the pages. */
orig_first_page_bytes_used = page_table[first_page].bytes_used;
/* If the first page was free then set up the gen, and
* first_object_offset. */
if (page_table[first_page].bytes_used == 0) {
- if (unboxed)
- page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[first_page].allocated = BOXED_PAGE_FLAG;
- page_table[first_page].gen = gc_alloc_generation;
- page_table[first_page].first_object_offset = 0;
- page_table[first_page].large_object = 1;
+ if (unboxed)
+ page_table[first_page].allocated = UNBOXED_PAGE_FLAG;
+ else
+ page_table[first_page].allocated = BOXED_PAGE_FLAG;
+ page_table[first_page].gen = gc_alloc_generation;
+ page_table[first_page].first_object_offset = 0;
+ page_table[first_page].large_object = 1;
}
if (unboxed)
- gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == UNBOXED_PAGE_FLAG);
else
- gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[first_page].allocated == BOXED_PAGE_FLAG);
gc_assert(page_table[first_page].gen == gc_alloc_generation);
gc_assert(page_table[first_page].large_object == 1);
* always the number of new bytes, unless it was free. */
more = 0;
if ((bytes_used = nbytes+orig_first_page_bytes_used) > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
+ bytes_used = PAGE_BYTES;
+ more = 1;
}
page_table[first_page].bytes_used = bytes_used;
byte_cnt += bytes_used;
* first_object_offset pointer to the start of the region, and
* set the bytes_used. */
while (more) {
- gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
- gc_assert(page_table[next_page].bytes_used == 0);
- if (unboxed)
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- else
- page_table[next_page].allocated = BOXED_PAGE_FLAG;
- page_table[next_page].gen = gc_alloc_generation;
- page_table[next_page].large_object = 1;
-
- page_table[next_page].first_object_offset =
- orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
-
- /* Calculate the number of bytes used in this page. */
- more = 0;
- if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
- bytes_used = PAGE_BYTES;
- more = 1;
- }
- page_table[next_page].bytes_used = bytes_used;
- page_table[next_page].write_protected=0;
- page_table[next_page].dont_move=0;
- byte_cnt += bytes_used;
- next_page++;
+ gc_assert(page_table[next_page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_table[next_page].bytes_used == 0);
+ if (unboxed)
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ else
+ page_table[next_page].allocated = BOXED_PAGE_FLAG;
+ page_table[next_page].gen = gc_alloc_generation;
+ page_table[next_page].large_object = 1;
+
+ page_table[next_page].first_object_offset =
+ orig_first_page_bytes_used - PAGE_BYTES*(next_page-first_page);
+
+ /* Calculate the number of bytes used in this page. */
+ more = 0;
+ if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > PAGE_BYTES) {
+ bytes_used = PAGE_BYTES;
+ more = 1;
+ }
+ page_table[next_page].bytes_used = bytes_used;
+ page_table[next_page].write_protected=0;
+ page_table[next_page].dont_move=0;
+ byte_cnt += bytes_used;
+ next_page++;
}
gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
/* Add the region to the new_areas if requested. */
if (!unboxed)
- add_new_area(first_page,orig_first_page_bytes_used,nbytes);
+ add_new_area(first_page,orig_first_page_bytes_used,nbytes);
/* Bump up last_free_page */
if (last_page+1 > last_free_page) {
- last_free_page = last_page+1;
- SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
+ last_free_page = last_page+1;
+ SetSymbolValue(ALLOCATION_POINTER,
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
}
release_spinlock(&free_pages_lock);
* for a free page. */
do {
- first_page = restart_page;
- if (large_p)
- while ((first_page < NUM_PAGES)
- && (page_table[first_page].allocated != FREE_PAGE_FLAG))
- first_page++;
- else
- while (first_page < NUM_PAGES) {
- if(page_table[first_page].allocated == FREE_PAGE_FLAG)
- break;
- if((page_table[first_page].allocated ==
- (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
- (page_table[first_page].large_object == 0) &&
- (page_table[first_page].gen == gc_alloc_generation) &&
- (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
- (page_table[first_page].write_protected == 0) &&
- (page_table[first_page].dont_move == 0)) {
- break;
- }
- first_page++;
- }
-
- if (first_page >= NUM_PAGES) {
- fprintf(stderr,
- "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
- }
-
- gc_assert(page_table[first_page].write_protected == 0);
-
- last_page = first_page;
- bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
- num_pages = 1;
- while (((bytes_found < nbytes)
- || (!large_p && (num_pages < 2)))
- && (last_page < (NUM_PAGES-1))
- && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
- last_page++;
- num_pages++;
- bytes_found += PAGE_BYTES;
- gc_assert(page_table[last_page].write_protected == 0);
- }
-
- region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
- + PAGE_BYTES*(last_page-first_page);
-
- gc_assert(bytes_found == region_size);
- restart_page = last_page + 1;
+ first_page = restart_page;
+ if (large_p)
+ while ((first_page < NUM_PAGES)
+ && (page_table[first_page].allocated != FREE_PAGE_FLAG))
+ first_page++;
+ else
+ while (first_page < NUM_PAGES) {
+ if(page_table[first_page].allocated == FREE_PAGE_FLAG)
+ break;
+ if((page_table[first_page].allocated ==
+ (unboxed ? UNBOXED_PAGE_FLAG : BOXED_PAGE_FLAG)) &&
+ (page_table[first_page].large_object == 0) &&
+ (page_table[first_page].gen == gc_alloc_generation) &&
+ (page_table[first_page].bytes_used < (PAGE_BYTES-32)) &&
+ (page_table[first_page].write_protected == 0) &&
+ (page_table[first_page].dont_move == 0)) {
+ break;
+ }
+ first_page++;
+ }
+
+ if (first_page >= NUM_PAGES) {
+ fprintf(stderr,
+ "Argh! gc_find_free_space failed (first_page), nbytes=%ld.\n",
+ nbytes);
+ print_generation_stats(1);
+ lose(NULL);
+ }
+
+ gc_assert(page_table[first_page].write_protected == 0);
+
+ last_page = first_page;
+ bytes_found = PAGE_BYTES - page_table[first_page].bytes_used;
+ num_pages = 1;
+ while (((bytes_found < nbytes)
+ || (!large_p && (num_pages < 2)))
+ && (last_page < (NUM_PAGES-1))
+ && (page_table[last_page+1].allocated == FREE_PAGE_FLAG)) {
+ last_page++;
+ num_pages++;
+ bytes_found += PAGE_BYTES;
+ gc_assert(page_table[last_page].write_protected == 0);
+ }
+
+ region_size = (PAGE_BYTES - page_table[first_page].bytes_used)
+ + PAGE_BYTES*(last_page-first_page);
+
+ gc_assert(bytes_found == region_size);
+ restart_page = last_page + 1;
} while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
/* Check for a failure */
if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
- fprintf(stderr,
- "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
- nbytes);
- print_generation_stats(1);
- lose(NULL);
+ fprintf(stderr,
+ "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%ld.\n",
+ nbytes);
+ print_generation_stats(1);
+ lose(NULL);
}
*restart_page_ptr=first_page;
return last_page;
void *
gc_alloc_with_region(long nbytes,int unboxed_p, struct alloc_region *my_region,
- int quick_p)
+ int quick_p)
{
void *new_free_pointer;
if(nbytes>=large_object_size)
- return gc_alloc_large(nbytes,unboxed_p,my_region);
+ return gc_alloc_large(nbytes,unboxed_p,my_region);
/* Check whether there is room in the current alloc region. */
new_free_pointer = my_region->free_pointer + nbytes;
my_region->free_pointer, new_free_pointer); */
if (new_free_pointer <= my_region->end_addr) {
- /* If so then allocate from the current alloc region. */
- void *new_obj = my_region->free_pointer;
- my_region->free_pointer = new_free_pointer;
-
- /* Unless a `quick' alloc was requested, check whether the
- alloc region is almost empty. */
- if (!quick_p &&
- (my_region->end_addr - my_region->free_pointer) <= 32) {
- /* If so, finished with the current region. */
- gc_alloc_update_page_tables(unboxed_p, my_region);
- /* Set up a new region. */
- gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
- }
-
- return((void *)new_obj);
+ /* If so then allocate from the current alloc region. */
+ void *new_obj = my_region->free_pointer;
+ my_region->free_pointer = new_free_pointer;
+
+ /* Unless a `quick' alloc was requested, check whether the
+ alloc region is almost empty. */
+ if (!quick_p &&
+ (my_region->end_addr - my_region->free_pointer) <= 32) {
+ /* If so, finished with the current region. */
+ gc_alloc_update_page_tables(unboxed_p, my_region);
+ /* Set up a new region. */
+ gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
+ }
+
+ return((void *)new_obj);
}
/* Else not enough free space in the current region: retry with a
}
/* these are only used during GC: all allocation from the mutator calls
- * alloc() -> gc_alloc_with_region() with the appropriate per-thread
+ * alloc() -> gc_alloc_with_region() with the appropriate per-thread
* region */
void *
gc_general_alloc(long nbytes,int unboxed_p,int quick_p)
{
- struct alloc_region *my_region =
+ struct alloc_region *my_region =
unboxed_p ? &unboxed_region : &boxed_region;
return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
}
if (page_table[first_page].large_object) {
- /* Promote the object. */
-
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
-
- /* Note: Any page write-protection must be removed, else a
- * later scavenge_newspace may incorrectly not scavenge these
- * pages. This would not be necessary if they are added to the
- * new areas, but let's do it for them all (they'll probably
- * be written anyway?). */
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[next_page].write_protected) {
- os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[next_page].write_protected = 0;
- }
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk
- * so there may be more unused pages which will be freed. */
-
- /* The object may have shrunk but shouldn't have grown. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to bother zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected as they
- * should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
- bytes_freed;
- generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
- bytes_allocated -= bytes_freed;
-
- /* Add the region to the new_areas if requested. */
- add_new_area(first_page,0,nwords*N_WORD_BYTES);
-
- return(object);
+ /* Promote the object. */
+
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ /* Note: Any page write-protection must be removed, else a
+ * later scavenge_newspace may incorrectly not scavenge these
+ * pages. This would not be necessary if they are added to the
+ * new areas, but let's do it for them all (they'll probably
+ * be written anyway?). */
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ if (page_table[next_page].write_protected) {
+ os_protect(page_address(next_page), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[next_page].write_protected = 0;
+ }
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk
+ * so there may be more unused pages which will be freed. */
+
+ /* The object may have shrunk but shouldn't have grown. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ gc_assert(page_table[next_page].allocated == BOXED_PAGE_FLAG);
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ (page_table[next_page].allocated == BOXED_PAGE_FLAG) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to bother zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected as they
+ * should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ generations[from_space].bytes_allocated -= N_WORD_BYTES*nwords +
+ bytes_freed;
+ generations[new_space].bytes_allocated += N_WORD_BYTES*nwords;
+ bytes_allocated -= bytes_freed;
+
+ /* Add the region to the new_areas if requested. */
+ add_new_area(first_page,0,nwords*N_WORD_BYTES);
+
+ return(object);
} else {
- /* Get tag of object. */
- tag = lowtag_of(object);
+ /* Get tag of object. */
+ tag = lowtag_of(object);
- /* Allocate space. */
- new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
+ /* Allocate space. */
+ new = gc_quick_alloc_large(nwords*N_WORD_BYTES);
- memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
+ memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
- /* Return Lisp pointer of new object. */
- return ((lispobj) new) | tag;
+ /* Return Lisp pointer of new object. */
+ return ((lispobj) new) | tag;
}
}
gc_assert((nwords & 0x01) == 0);
if ((nwords > 1024*1024) && gencgc_verbose)
- FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
+ FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*N_WORD_BYTES));
/* Check whether it's a large object. */
first_page = find_page_index((void *)object);
gc_assert(first_page >= 0);
if (page_table[first_page].large_object) {
- /* Promote the object. Note: Unboxed objects may have been
- * allocated to a BOXED region so it may be necessary to
- * change the region to UNBOXED. */
- long remaining_bytes;
- long next_page;
- long bytes_freed;
- long old_bytes_used;
-
- gc_assert(page_table[first_page].first_object_offset == 0);
-
- next_page = first_page;
- remaining_bytes = nwords*N_WORD_BYTES;
- while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
- remaining_bytes -= PAGE_BYTES;
- next_page++;
- }
-
- /* Now only one page remains, but the object may have shrunk so
- * there may be more unused pages which will be freed. */
-
- /* Object may have shrunk but shouldn't have grown - check. */
- gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
-
- page_table[next_page].gen = new_space;
- page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
-
- /* Adjust the bytes_used. */
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].bytes_used = remaining_bytes;
-
- bytes_freed = old_bytes_used - remaining_bytes;
-
- /* Free any remaining pages; needs care. */
- next_page++;
- while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* Checks out OK, free the page. Don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write-protected, even if
- * boxed they should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
- }
-
- if ((bytes_freed > 0) && gencgc_verbose)
- FSHOW((stderr,
- "/copy_large_unboxed bytes_freed=%d\n",
- bytes_freed));
-
- generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
- generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
- bytes_allocated -= bytes_freed;
-
- return(object);
+ /* Promote the object. Note: Unboxed objects may have been
+ * allocated to a BOXED region so it may be necessary to
+ * change the region to UNBOXED. */
+ long remaining_bytes;
+ long next_page;
+ long bytes_freed;
+ long old_bytes_used;
+
+ gc_assert(page_table[first_page].first_object_offset == 0);
+
+ next_page = first_page;
+ remaining_bytes = nwords*N_WORD_BYTES;
+ while (remaining_bytes > PAGE_BYTES) {
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG));
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
+ }
+
+ /* Now only one page remains, but the object may have shrunk so
+ * there may be more unused pages which will be freed. */
+
+ /* Object may have shrunk but shouldn't have grown - check. */
+ gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
+
+ page_table[next_page].gen = new_space;
+ page_table[next_page].allocated = UNBOXED_PAGE_FLAG;
+
+ /* Adjust the bytes_used. */
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].bytes_used = remaining_bytes;
+
+ bytes_freed = old_bytes_used - remaining_bytes;
+
+ /* Free any remaining pages; needs care. */
+ next_page++;
+ while ((old_bytes_used == PAGE_BYTES) &&
+ (page_table[next_page].gen == from_space) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* Checks out OK, free the page. Don't need to both zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write-protected, even if
+ * boxed they should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
+ }
+
+ if ((bytes_freed > 0) && gencgc_verbose)
+ FSHOW((stderr,
+ "/copy_large_unboxed bytes_freed=%d\n",
+ bytes_freed));
+
+ generations[from_space].bytes_allocated -= nwords*N_WORD_BYTES + bytes_freed;
+ generations[new_space].bytes_allocated += nwords*N_WORD_BYTES;
+ bytes_allocated -= bytes_freed;
+
+ return(object);
}
else {
- /* Get tag of object. */
- tag = lowtag_of(object);
+ /* Get tag of object. */
+ tag = lowtag_of(object);
- /* Allocate space. */
- new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
+ /* Allocate space. */
+ new = gc_quick_alloc_large_unboxed(nwords*N_WORD_BYTES);
/* Copy the object. */
memcpy(new,native_pointer(object),nwords*N_WORD_BYTES);
- /* Return Lisp pointer of new object. */
- return ((lispobj) new) | tag;
+ /* Return Lisp pointer of new object. */
+ return ((lispobj) new) | tag;
}
}
int fixup_found = 0;
if (!check_code_fixups)
- return;
+ return;
ncode_words = fixnum_value(code->code_size);
nheader_words = HeaderValue(*(lispobj *)code);
/* Work through the unboxed code. */
for (p = code_start_addr; p < code_end_addr; p++) {
- void *data = *(void **)p;
- unsigned d1 = *((unsigned char *)p - 1);
- unsigned d2 = *((unsigned char *)p - 2);
- unsigned d3 = *((unsigned char *)p - 3);
- unsigned d4 = *((unsigned char *)p - 4);
+ void *data = *(void **)p;
+ unsigned d1 = *((unsigned char *)p - 1);
+ unsigned d2 = *((unsigned char *)p - 2);
+ unsigned d3 = *((unsigned char *)p - 3);
+ unsigned d4 = *((unsigned char *)p - 4);
#ifdef QSHOW
- unsigned d5 = *((unsigned char *)p - 5);
- unsigned d6 = *((unsigned char *)p - 6);
+ unsigned d5 = *((unsigned char *)p - 5);
+ unsigned d6 = *((unsigned char *)p - 6);
#endif
- /* Check for code references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the code adea of the code object. */
- if ((data >= (code_start_addr-displacement))
- && (data < (code_end_addr-displacement))) {
- /* function header */
- if ((d4 == 0x5e)
- && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
- /* Skip the function header */
- p += 6*4 - 4 - 1;
- continue;
- }
- /* the case of PUSH imm32 */
- if (d1 == 0x68) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/PUSH $0x%.8x\n", data));
- }
- /* the case of MOV [reg-8],imm32 */
- if ((d3 == 0xc7)
- && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
- || d2==0x45 || d2==0x46 || d2==0x47)
- && (d1 == 0xf8)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
- }
- /* the case of LEA reg,[disp32] */
- if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
- }
- }
-
- /* Check for constant references. */
- /* Check for a 32 bit word that looks like an absolute
- reference to within the constant vector. Constant references
- will be aligned. */
- if ((data >= (constants_start_addr-displacement))
- && (data < (constants_end_addr-displacement))
- && (((unsigned)data & 0x3) == 0)) {
- /* Mov eax,m32 */
- if (d1 == 0xa1) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
- }
-
- /* the case of MOV m32,EAX */
- if (d1 == 0xa3) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
- }
-
- /* the case of CMP m32,imm32 */
- if ((d1 == 0x3d) && (d2 == 0x81)) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- /* XX Check this */
- FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
- }
-
- /* Check for a mod=00, r/m=101 byte. */
- if ((d1 & 0xc7) == 5) {
- /* Cmp m32,reg */
- if (d2 == 0x39) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
- }
- /* the case of CMP reg32,m32 */
- if (d2 == 0x3b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
- }
- /* the case of MOV m32,reg32 */
- if (d2 == 0x89) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
- }
- /* the case of MOV reg32,m32 */
- if (d2 == 0x8b) {
- fixup_found = 1;
- FSHOW((stderr,
- "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
- }
- /* the case of LEA reg32,m32 */
- if (d2 == 0x8d) {
- fixup_found = 1;
- FSHOW((stderr,
- "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
- p, d6, d5, d4, d3, d2, d1, data));
- FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
- }
- }
- }
+ /* Check for code references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the code adea of the code object. */
+ if ((data >= (code_start_addr-displacement))
+ && (data < (code_end_addr-displacement))) {
+ /* function header */
+ if ((d4 == 0x5e)
+ && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
+ /* Skip the function header */
+ p += 6*4 - 4 - 1;
+ continue;
+ }
+ /* the case of PUSH imm32 */
+ if (d1 == 0x68) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/PUSH $0x%.8x\n", data));
+ }
+ /* the case of MOV [reg-8],imm32 */
+ if ((d3 == 0xc7)
+ && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
+ || d2==0x45 || d2==0x46 || d2==0x47)
+ && (d1 == 0xf8)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
+ }
+ /* the case of LEA reg,[disp32] */
+ if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
+ }
+ }
+
+ /* Check for constant references. */
+ /* Check for a 32 bit word that looks like an absolute
+ reference to within the constant vector. Constant references
+ will be aligned. */
+ if ((data >= (constants_start_addr-displacement))
+ && (data < (constants_end_addr-displacement))
+ && (((unsigned)data & 0x3) == 0)) {
+ /* Mov eax,m32 */
+ if (d1 == 0xa1) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
+ }
+
+ /* the case of MOV m32,EAX */
+ if (d1 == 0xa3) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
+ }
+
+ /* the case of CMP m32,imm32 */
+ if ((d1 == 0x3d) && (d2 == 0x81)) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ /* XX Check this */
+ FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
+ }
+
+ /* Check for a mod=00, r/m=101 byte. */
+ if ((d1 & 0xc7) == 5) {
+ /* Cmp m32,reg */
+ if (d2 == 0x39) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
+ }
+ /* the case of CMP reg32,m32 */
+ if (d2 == 0x3b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
+ }
+ /* the case of MOV m32,reg32 */
+ if (d2 == 0x89) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
+ }
+ /* the case of MOV reg32,m32 */
+ if (d2 == 0x8b) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
+ }
+ /* the case of LEA reg32,m32 */
+ if (d2 == 0x8d) {
+ fixup_found = 1;
+ FSHOW((stderr,
+ "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
+ p, d6, d5, d4, d3, d2, d1, data));
+ FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
+ }
+ }
+ }
}
/* If anything was found, print some information on the code
* object. */
if (fixup_found) {
- FSHOW((stderr,
- "/compiled code object at %x: header words = %d, code words = %d\n",
- code, nheader_words, ncode_words));
- FSHOW((stderr,
- "/const start = %x, end = %x\n",
- constants_start_addr, constants_end_addr));
- FSHOW((stderr,
- "/code start = %x, end = %x\n",
- code_start_addr, code_end_addr));
+ FSHOW((stderr,
+ "/compiled code object at %x: header words = %d, code words = %d\n",
+ code, nheader_words, ncode_words));
+ FSHOW((stderr,
+ "/const start = %x, end = %x\n",
+ constants_start_addr, constants_end_addr));
+ FSHOW((stderr,
+ "/code start = %x, end = %x\n",
+ code_start_addr, code_end_addr));
}
}
nheader_words = HeaderValue(*(lispobj *)new_code);
nwords = ncode_words + nheader_words;
/* FSHOW((stderr,
- "/compiled code object at %x: header words = %d, code words = %d\n",
- new_code, nheader_words, ncode_words)); */
+ "/compiled code object at %x: header words = %d, code words = %d\n",
+ new_code, nheader_words, ncode_words)); */
constants_start_addr = (void *)new_code + 5*N_WORD_BYTES;
constants_end_addr = (void *)new_code + nheader_words*N_WORD_BYTES;
code_start_addr = (void *)new_code + nheader_words*N_WORD_BYTES;
code_end_addr = (void *)new_code + nwords*N_WORD_BYTES;
/*
FSHOW((stderr,
- "/const start = %x, end = %x\n",
- constants_start_addr,constants_end_addr));
+ "/const start = %x, end = %x\n",
+ constants_start_addr,constants_end_addr));
FSHOW((stderr,
- "/code start = %x; end = %x\n",
- code_start_addr,code_end_addr));
+ "/code start = %x; end = %x\n",
+ code_start_addr,code_end_addr));
*/
/* The first constant should be a pointer to the fixups for this
* will be the case if the code object has been purified, for
* example) and will be an other pointer if it is valid. */
if ((fixups == 0) || (fixups == UNBOUND_MARKER_WIDETAG) ||
- !is_lisp_pointer(fixups)) {
- /* Check for possible errors. */
- if (check_code_fixups)
- sniff_code_object(new_code, displacement);
+ !is_lisp_pointer(fixups)) {
+ /* Check for possible errors. */
+ if (check_code_fixups)
+ sniff_code_object(new_code, displacement);
- return;
+ return;
}
fixups_vector = (struct vector *)native_pointer(fixups);
/* FIXME is this always in from_space? if so, could replace this code with
* forwarding_pointer_p/forwarding_pointer_value */
if (is_lisp_pointer(fixups) &&
- (find_page_index((void*)fixups_vector) != -1) &&
- (fixups_vector->header == 0x01)) {
- /* If so, then follow it. */
- /*SHOW("following pointer to a forwarding pointer");*/
- fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
+ (find_page_index((void*)fixups_vector) != -1) &&
+ (fixups_vector->header == 0x01)) {
+ /* If so, then follow it. */
+ /*SHOW("following pointer to a forwarding pointer");*/
+ fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
}
/*SHOW("got fixups");*/
if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
- /* Got the fixups for the code block. Now work through the vector,
- and apply a fixup at each address. */
- long length = fixnum_value(fixups_vector->length);
- long i;
- for (i = 0; i < length; i++) {
- unsigned offset = fixups_vector->data[i];
- /* Now check the current value of offset. */
- unsigned old_value =
- *(unsigned *)((unsigned)code_start_addr + offset);
-
- /* If it's within the old_code object then it must be an
- * absolute fixup (relative ones are not saved) */
- if ((old_value >= (unsigned)old_code)
- && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES)))
- /* So add the dispacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
- old_value + displacement;
- else
- /* It is outside the old code object so it must be a
- * relative fixup (absolute fixups are not saved). So
- * subtract the displacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) =
- old_value - displacement;
- }
+ /* Got the fixups for the code block. Now work through the vector,
+ and apply a fixup at each address. */
+ long length = fixnum_value(fixups_vector->length);
+ long i;
+ for (i = 0; i < length; i++) {
+ unsigned offset = fixups_vector->data[i];
+ /* Now check the current value of offset. */
+ unsigned old_value =
+ *(unsigned *)((unsigned)code_start_addr + offset);
+
+ /* If it's within the old_code object then it must be an
+ * absolute fixup (relative ones are not saved) */
+ if ((old_value >= (unsigned)old_code)
+ && (old_value < ((unsigned)old_code + nwords*N_WORD_BYTES)))
+ /* So add the dispacement. */
+ *(unsigned *)((unsigned)code_start_addr + offset) =
+ old_value + displacement;
+ else
+ /* It is outside the old code object so it must be a
+ * relative fixup (absolute fixups are not saved). So
+ * subtract the displacement. */
+ *(unsigned *)((unsigned)code_start_addr + offset) =
+ old_value - displacement;
+ }
} else {
fprintf(stderr, "widetag of fixup vector is %d\n", widetag_of(fixups_vector->header));
}
/* Check for possible errors. */
if (check_code_fixups) {
- sniff_code_object(new_code,displacement);
+ sniff_code_object(new_code,displacement);
}
}
* though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
* hash tables in the Lisp HASH-TABLE code, and nowhere else. */
if (HeaderValue(object) != subtype_VectorValidHashing)
- return 1;
+ return 1;
if (!gencgc_hash) {
- /* This is set for backward compatibility. FIXME: Do we need
- * this any more? */
- *where =
- (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
- return 1;
+ /* This is set for backward compatibility. FIXME: Do we need
+ * this any more? */
+ *where =
+ (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
+ return 1;
}
kv_length = fixnum_value(where[1]);
/* Scavenge element 0, which may be a hash-table structure. */
scavenge(where+2, 1);
if (!is_lisp_pointer(where[2])) {
- lose("no pointer at %x in hash table", where[2]);
+ lose("no pointer at %x in hash table", where[2]);
}
hash_table = (lispobj *)native_pointer(where[2]);
/*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
if (widetag_of(hash_table->header) != INSTANCE_HEADER_WIDETAG) {
- lose("hash table not instance (%x at %x)",
- hash_table->header,
- hash_table);
+ lose("hash table not instance (%x at %x)",
+ hash_table->header,
+ hash_table);
}
/* Scavenge element 1, which should be some internal symbol that
* the hash table code reserves for marking empty slots. */
scavenge(where+3, 1);
if (!is_lisp_pointer(where[3])) {
- lose("not empty-hash-table-slot symbol pointer: %x", where[3]);
+ lose("not empty-hash-table-slot symbol pointer: %x", where[3]);
}
empty_symbol = where[3];
/* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) !=
- SYMBOL_HEADER_WIDETAG) {
- lose("not a symbol where empty-hash-table-slot symbol expected: %x",
- *(lispobj *)native_pointer(empty_symbol));
+ SYMBOL_HEADER_WIDETAG) {
+ lose("not a symbol where empty-hash-table-slot symbol expected: %x",
+ *(lispobj *)native_pointer(empty_symbol));
}
/* Scavenge hash table, which will fix the positions of the other
/* Cross-check the kv_vector. */
if (where != (lispobj *)native_pointer(hash_table->table)) {
- lose("hash_table table!=this table %x", hash_table->table);
+ lose("hash_table table!=this table %x", hash_table->table);
}
/* WEAK-P */
/* index vector */
{
- lispobj index_vector_obj = hash_table->index_vector;
-
- if (is_lisp_pointer(index_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2;
- /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
- length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
- /*FSHOW((stderr, "/length = %d\n", length));*/
- } else {
- lose("invalid index_vector %x", index_vector_obj);
- }
+ lispobj index_vector_obj = hash_table->index_vector;
+
+ if (is_lisp_pointer(index_vector_obj) &&
+ (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
+ SIMPLE_ARRAY_WORD_WIDETAG)) {
+ index_vector = ((lispobj *)native_pointer(index_vector_obj)) + 2;
+ /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
+ length = fixnum_value(((lispobj *)native_pointer(index_vector_obj))[1]);
+ /*FSHOW((stderr, "/length = %d\n", length));*/
+ } else {
+ lose("invalid index_vector %x", index_vector_obj);
+ }
}
/* next vector */
{
- lispobj next_vector_obj = hash_table->next_vector;
-
- if (is_lisp_pointer(next_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)) {
- next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2;
- /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
- next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
- /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
- } else {
- lose("invalid next_vector %x", next_vector_obj);
- }
+ lispobj next_vector_obj = hash_table->next_vector;
+
+ if (is_lisp_pointer(next_vector_obj) &&
+ (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
+ SIMPLE_ARRAY_WORD_WIDETAG)) {
+ next_vector = ((lispobj *)native_pointer(next_vector_obj)) + 2;
+ /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
+ next_vector_length = fixnum_value(((lispobj *)native_pointer(next_vector_obj))[1]);
+ /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
+ } else {
+ lose("invalid next_vector %x", next_vector_obj);
+ }
}
/* maybe hash vector */
{
- lispobj hash_vector_obj = hash_table->hash_vector;
-
- if (is_lisp_pointer(hash_vector_obj) &&
- (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
- SIMPLE_ARRAY_WORD_WIDETAG)){
- hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2;
- /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
- gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
- == next_vector_length);
- } else {
- hash_vector = NULL;
- /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
- }
+ lispobj hash_vector_obj = hash_table->hash_vector;
+
+ if (is_lisp_pointer(hash_vector_obj) &&
+ (widetag_of(*(lispobj *)native_pointer(hash_vector_obj)) ==
+ SIMPLE_ARRAY_WORD_WIDETAG)){
+ hash_vector = ((lispobj *)native_pointer(hash_vector_obj)) + 2;
+ /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
+ gc_assert(fixnum_value(((lispobj *)native_pointer(hash_vector_obj))[1])
+ == next_vector_length);
+ } else {
+ hash_vector = NULL;
+ /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
+ }
}
/* These lengths could be different as the index_vector can be a
/* Work through the KV vector. */
{
- long i;
- for (i = 1; i < next_vector_length; i++) {
- lispobj old_key = kv_vector[2*i];
+ long i;
+ for (i = 1; i < next_vector_length; i++) {
+ lispobj old_key = kv_vector[2*i];
#if N_WORD_BITS == 32
- unsigned long old_index = (old_key & 0x1fffffff)%length;
+ unsigned long old_index = (old_key & 0x1fffffff)%length;
#elif N_WORD_BITS == 64
- unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
+ unsigned long old_index = (old_key & 0x1fffffffffffffff)%length;
#endif
- /* Scavenge the key and value. */
- scavenge(&kv_vector[2*i],2);
+ /* Scavenge the key and value. */
+ scavenge(&kv_vector[2*i],2);
- /* Check whether the key has moved and is EQ based. */
- {
- lispobj new_key = kv_vector[2*i];
+ /* Check whether the key has moved and is EQ based. */
+ {
+ lispobj new_key = kv_vector[2*i];
#if N_WORD_BITS == 32
- unsigned long new_index = (new_key & 0x1fffffff)%length;
+ unsigned long new_index = (new_key & 0x1fffffff)%length;
#elif N_WORD_BITS == 64
- unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
+ unsigned long new_index = (new_key & 0x1fffffffffffffff)%length;
#endif
- if ((old_index != new_index) &&
- ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
- ((new_key != empty_symbol) ||
- (kv_vector[2*i] != empty_symbol))) {
-
- /*FSHOW((stderr,
- "* EQ key %d moved from %x to %x; index %d to %d\n",
- i, old_key, new_key, old_index, new_index));*/
-
- if (index_vector[old_index] != 0) {
- /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
-
- /* Unlink the key from the old_index chain. */
- if (index_vector[old_index] == i) {
- /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
- index_vector[old_index] = next_vector[i];
- /* Link it into the needing rehash chain. */
- next_vector[i] = fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(i);
- /*SHOW("P2");*/
- } else {
- unsigned prior = index_vector[old_index];
- unsigned next = next_vector[prior];
-
- /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
-
- while (next != 0) {
- /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
- if (next == i) {
- /* Unlink it. */
- next_vector[prior] = next_vector[next];
- /* Link it into the needing rehash
- * chain. */
- next_vector[next] =
- fixnum_value(hash_table->needing_rehash);
- hash_table->needing_rehash = make_fixnum(next);
- /*SHOW("/P3");*/
- break;
- }
- prior = next;
- next = next_vector[next];
- }
- }
- }
- }
- }
- }
+ if ((old_index != new_index) &&
+ ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
+ ((new_key != empty_symbol) ||
+ (kv_vector[2*i] != empty_symbol))) {
+
+ /*FSHOW((stderr,
+ "* EQ key %d moved from %x to %x; index %d to %d\n",
+ i, old_key, new_key, old_index, new_index));*/
+
+ if (index_vector[old_index] != 0) {
+ /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
+
+ /* Unlink the key from the old_index chain. */
+ if (index_vector[old_index] == i) {
+ /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
+ index_vector[old_index] = next_vector[i];
+ /* Link it into the needing rehash chain. */
+ next_vector[i] = fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(i);
+ /*SHOW("P2");*/
+ } else {
+ unsigned prior = index_vector[old_index];
+ unsigned next = next_vector[prior];
+
+ /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
+
+ while (next != 0) {
+ /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
+ if (next == i) {
+ /* Unlink it. */
+ next_vector[prior] = next_vector[next];
+ /* Link it into the needing rehash
+ * chain. */
+ next_vector[next] =
+ fixnum_value(hash_table->needing_rehash);
+ hash_table->needing_rehash = make_fixnum(next);
+ /*SHOW("/P3");*/
+ break;
+ }
+ prior = next;
+ next = next_vector[next];
+ }
+ }
+ }
+ }
+ }
+ }
}
return (CEILING(kv_length + 2, 2));
}
/* Check whether it's already in the list. */
while (wp != NULL) {
- if (wp == (struct weak_pointer*)where) {
- break;
- }
- wp = wp->next;
+ if (wp == (struct weak_pointer*)where) {
+ break;
+ }
+ wp = wp->next;
}
if (wp == NULL) {
- /* Add it to the start of the list. */
- wp = (struct weak_pointer*)where;
- if (wp->next != weak_pointers) {
- wp->next = weak_pointers;
- } else {
- /*SHOW("avoided write to weak pointer");*/
- }
- weak_pointers = wp;
+ /* Add it to the start of the list. */
+ wp = (struct weak_pointer*)where;
+ if (wp->next != weak_pointers) {
+ wp->next = weak_pointers;
+ } else {
+ /*SHOW("avoided write to weak pointer");*/
+ }
+ weak_pointers = wp;
}
/* Do not let GC scavenge the value slot of the weak pointer.
lispobj *start = (lispobj *) READ_ONLY_SPACE_START;
lispobj *end = (lispobj *) SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
- return NULL;
+ return NULL;
return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
lispobj *
lispobj *start = (lispobj *)STATIC_SPACE_START;
lispobj *end = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0);
if ((pointer < (void *)start) || (pointer >= (void *)end))
- return NULL;
- return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *) pointer));
+ return NULL;
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *) pointer));
}
/* a faster version for searching the dynamic space. This will work even
/* The address may be invalid, so do some checks. */
if ((page_index == -1) ||
- (page_table[page_index].allocated == FREE_PAGE_FLAG))
- return NULL;
+ (page_table[page_index].allocated == FREE_PAGE_FLAG))
+ return NULL;
start = (lispobj *)((void *)page_address(page_index)
- + page_table[page_index].first_object_offset);
- return (gc_search_space(start,
- (((lispobj *)pointer)+2)-start,
- (lispobj *)pointer));
+ + page_table[page_index].first_object_offset);
+ return (gc_search_space(start,
+ (((lispobj *)pointer)+2)-start,
+ (lispobj *)pointer));
}
/* Is there any possibility that pointer is a valid Lisp object
/* Find the object start address. */
if ((start_addr = search_dynamic_space(pointer)) == NULL) {
- return 0;
+ return 0;
}
/* We need to allow raw pointers into Code objects for return
* addresses. This will also pick up pointers to functions in code
* objects. */
if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
- /* XXX could do some further checks here */
- return 1;
+ /* XXX could do some further checks here */
+ return 1;
}
/* If it's not a return address then it needs to be a valid Lisp
* pointer. */
if (!is_lisp_pointer((lispobj)pointer)) {
- return 0;
+ return 0;
}
/* Check that the object pointed to is consistent with the pointer
*/
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
- /* Start_addr should be the enclosing code object, or a closure
- * header. */
- switch (widetag_of(*start_addr)) {
- case CODE_HEADER_WIDETAG:
- /* This case is probably caught above. */
- break;
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+FUN_POINTER_LOWTAG)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wf2: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- break;
- default:
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wf3: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- break;
+ /* Start_addr should be the enclosing code object, or a closure
+ * header. */
+ switch (widetag_of(*start_addr)) {
+ case CODE_HEADER_WIDETAG:
+ /* This case is probably caught above. */
+ break;
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ if ((unsigned)pointer !=
+ ((unsigned)start_addr+FUN_POINTER_LOWTAG)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wf2: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ break;
+ default:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wf3: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ break;
case LIST_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+LIST_POINTER_LOWTAG)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wl1: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- /* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || (fixnump(start_addr[0]))
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
+ if ((unsigned)pointer !=
+ ((unsigned)start_addr+LIST_POINTER_LOWTAG)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wl1: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ /* Is it plausible cons? */
+ if ((is_lisp_pointer(start_addr[0])
+ || (fixnump(start_addr[0]))
+ || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
+ || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || (fixnump(start_addr[1]))
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
+ || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
+ && (is_lisp_pointer(start_addr[1])
+ || (fixnump(start_addr[1]))
+ || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
+ || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
- break;
- else {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wl2: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
+ || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
+ break;
+ else {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wl2: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
case INSTANCE_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wi1: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wi2: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- break;
+ if ((unsigned)pointer !=
+ ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wi1: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wi2: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ break;
case OTHER_POINTER_LOWTAG:
- if ((unsigned)pointer !=
- ((int)start_addr+OTHER_POINTER_LOWTAG)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wo1: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- /* Is it plausible? Not a cons. XXX should check the headers. */
- if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wo2: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- switch (widetag_of(start_addr[0])) {
- case UNBOUND_MARKER_WIDETAG:
- case CHARACTER_WIDETAG:
+ if ((unsigned)pointer !=
+ ((int)start_addr+OTHER_POINTER_LOWTAG)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wo1: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ /* Is it plausible? Not a cons. XXX should check the headers. */
+ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wo2: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ switch (widetag_of(start_addr[0])) {
+ case UNBOUND_MARKER_WIDETAG:
+ case CHARACTER_WIDETAG:
#if N_WORD_BITS == 64
- case SINGLE_FLOAT_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
#endif
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo3: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- /* only pointed to by function pointers? */
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo4: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- case INSTANCE_HEADER_WIDETAG:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*Wo5: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
-
- /* the valid other immediate pointer objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo3: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ /* only pointed to by function pointers? */
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo4: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ case INSTANCE_HEADER_WIDETAG:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*Wo5: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+
+ /* the valid other immediate pointer objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
- case COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
- case COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
- case COMPLEX_LONG_FLOAT_WIDETAG:
+ case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_BASE_STRING_WIDETAG:
+ case SIMPLE_ARRAY_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
- case COMPLEX_CHARACTER_STRING_WIDETAG:
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
#endif
- case COMPLEX_VECTOR_NIL_WIDETAG:
- case COMPLEX_BIT_VECTOR_WIDETAG:
- case COMPLEX_VECTOR_WIDETAG:
- case COMPLEX_ARRAY_WIDETAG:
- case VALUE_CELL_HEADER_WIDETAG:
- case SYMBOL_HEADER_WIDETAG:
- case FDEFN_WIDETAG:
- case CODE_HEADER_WIDETAG:
- case BIGNUM_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
+ case COMPLEX_BIT_VECTOR_WIDETAG:
+ case COMPLEX_VECTOR_WIDETAG:
+ case COMPLEX_ARRAY_WIDETAG:
+ case VALUE_CELL_HEADER_WIDETAG:
+ case SYMBOL_HEADER_WIDETAG:
+ case FDEFN_WIDETAG:
+ case CODE_HEADER_WIDETAG:
+ case BIGNUM_WIDETAG:
#if N_WORD_BITS != 64
- case SINGLE_FLOAT_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
#endif
- case DOUBLE_FLOAT_WIDETAG:
+ case DOUBLE_FLOAT_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
- case LONG_FLOAT_WIDETAG:
+ case LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_BASE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
- case SIMPLE_CHARACTER_STRING_WIDETAG:
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
#endif
- case SIMPLE_BIT_VECTOR_WIDETAG:
- case SIMPLE_ARRAY_NIL_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
#endif
- case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
- case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
- case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SAP_WIDETAG:
- case WEAK_POINTER_WIDETAG:
- break;
-
- default:
- if (gencgc_verbose)
- FSHOW((stderr,
- "/Wo6: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
- }
- break;
+ case SAP_WIDETAG:
+ case WEAK_POINTER_WIDETAG:
+ break;
+
+ default:
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "/Wo6: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
+ }
+ break;
default:
- if (gencgc_verbose)
- FSHOW((stderr,
- "*W?: %x %x %x\n",
- pointer, start_addr, *start_addr));
- return 0;
+ if (gencgc_verbose)
+ FSHOW((stderr,
+ "*W?: %x %x %x\n",
+ pointer, start_addr, *start_addr));
+ return 0;
}
/* looks good */
/* Check whether it's a vector or bignum object. */
switch (widetag_of(where[0])) {
case SIMPLE_VECTOR_WIDETAG:
- boxed = BOXED_PAGE_FLAG;
- break;
+ boxed = BOXED_PAGE_FLAG;
+ break;
case BIGNUM_WIDETAG:
case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- boxed = UNBOXED_PAGE_FLAG;
- break;
+ boxed = UNBOXED_PAGE_FLAG;
+ break;
default:
- return;
+ return;
}
/* Find its current size. */
next_page = first_page;
remaining_bytes = nwords*N_WORD_BYTES;
while (remaining_bytes > PAGE_BYTES) {
- gc_assert(page_table[next_page].gen == from_space);
- gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
- gc_assert(page_table[next_page].large_object);
- gc_assert(page_table[next_page].first_object_offset ==
- -PAGE_BYTES*(next_page-first_page));
- gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
-
- page_table[next_page].allocated = boxed;
-
- /* Shouldn't be write-protected at this stage. Essential that the
- * pages aren't. */
- gc_assert(!page_table[next_page].write_protected);
- remaining_bytes -= PAGE_BYTES;
- next_page++;
+ gc_assert(page_table[next_page].gen == from_space);
+ gc_assert((page_table[next_page].allocated == BOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == UNBOXED_PAGE_FLAG));
+ gc_assert(page_table[next_page].large_object);
+ gc_assert(page_table[next_page].first_object_offset ==
+ -PAGE_BYTES*(next_page-first_page));
+ gc_assert(page_table[next_page].bytes_used == PAGE_BYTES);
+
+ page_table[next_page].allocated = boxed;
+
+ /* Shouldn't be write-protected at this stage. Essential that the
+ * pages aren't. */
+ gc_assert(!page_table[next_page].write_protected);
+ remaining_bytes -= PAGE_BYTES;
+ next_page++;
}
/* Now only one page remains, but the object may have shrunk so
page_table[next_page].allocated = boxed;
gc_assert(page_table[next_page].allocated ==
- page_table[first_page].allocated);
+ page_table[first_page].allocated);
/* Adjust the bytes_used. */
old_bytes_used = page_table[next_page].bytes_used;
/* Free any remaining pages; needs care. */
next_page++;
while ((old_bytes_used == PAGE_BYTES) &&
- (page_table[next_page].gen == from_space) &&
- ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
- || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
- page_table[next_page].large_object &&
- (page_table[next_page].first_object_offset ==
- -(next_page - first_page)*PAGE_BYTES)) {
- /* It checks out OK, free the page. We don't need to both zeroing
- * pages as this should have been done before shrinking the
- * object. These pages shouldn't be write protected as they
- * should be zero filled. */
- gc_assert(page_table[next_page].write_protected == 0);
-
- old_bytes_used = page_table[next_page].bytes_used;
- page_table[next_page].allocated = FREE_PAGE_FLAG;
- page_table[next_page].bytes_used = 0;
- bytes_freed += old_bytes_used;
- next_page++;
+ (page_table[next_page].gen == from_space) &&
+ ((page_table[next_page].allocated == UNBOXED_PAGE_FLAG)
+ || (page_table[next_page].allocated == BOXED_PAGE_FLAG)) &&
+ page_table[next_page].large_object &&
+ (page_table[next_page].first_object_offset ==
+ -(next_page - first_page)*PAGE_BYTES)) {
+ /* It checks out OK, free the page. We don't need to both zeroing
+ * pages as this should have been done before shrinking the
+ * object. These pages shouldn't be write protected as they
+ * should be zero filled. */
+ gc_assert(page_table[next_page].write_protected == 0);
+
+ old_bytes_used = page_table[next_page].bytes_used;
+ page_table[next_page].allocated = FREE_PAGE_FLAG;
+ page_table[next_page].bytes_used = 0;
+ bytes_freed += old_bytes_used;
+ next_page++;
}
if ((bytes_freed > 0) && gencgc_verbose) {
- FSHOW((stderr,
- "/maybe_adjust_large_object() freed %d\n",
- bytes_freed));
+ FSHOW((stderr,
+ "/maybe_adjust_large_object() freed %d\n",
+ bytes_freed));
}
generations[from_space].bytes_allocated -= bytes_freed;
/* quick check 1: Address is quite likely to have been invalid. */
if ((addr_page_index == -1)
- || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
- || (page_table[addr_page_index].bytes_used == 0)
- || (page_table[addr_page_index].gen != from_space)
- /* Skip if already marked dont_move. */
- || (page_table[addr_page_index].dont_move != 0))
- return;
+ || (page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ || (page_table[addr_page_index].bytes_used == 0)
+ || (page_table[addr_page_index].gen != from_space)
+ /* Skip if already marked dont_move. */
+ || (page_table[addr_page_index].dont_move != 0))
+ return;
gc_assert(!(page_table[addr_page_index].allocated&OPEN_REGION_PAGE_FLAG));
/* (Now that we know that addr_page_index is in range, it's
* safe to index into page_table[] with it.) */
*
*/
if (((unsigned)addr & (PAGE_BYTES - 1)) > page_table[addr_page_index].bytes_used)
- return;
+ return;
/* Filter out anything which can't be a pointer to a Lisp object
* (or, as a special case which also requires dont_move, a return
* probability that random garbage will be bogusly interpreted as
* a pointer which prevents a page from moving. */
if (!(possibly_valid_dynamic_space_pointer(addr)))
- return;
+ return;
/* Find the beginning of the region. Note that there may be
* objects in the region preceding the one that we were passed a
/* I think this'd work just as well, but without the assertions.
* -dan 2004.01.01 */
first_page=
- find_page_index(page_address(addr_page_index)+
- page_table[addr_page_index].first_object_offset);
-#else
+ find_page_index(page_address(addr_page_index)+
+ page_table[addr_page_index].first_object_offset);
+#else
first_page = addr_page_index;
while (page_table[first_page].first_object_offset != 0) {
- --first_page;
- /* Do some checks. */
- gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
- gc_assert(page_table[first_page].gen == from_space);
- gc_assert(page_table[first_page].allocated == region_allocation);
+ --first_page;
+ /* Do some checks. */
+ gc_assert(page_table[first_page].bytes_used == PAGE_BYTES);
+ gc_assert(page_table[first_page].gen == from_space);
+ gc_assert(page_table[first_page].allocated == region_allocation);
}
#endif
/* Adjust any large objects before promotion as they won't be
* copied after promotion. */
if (page_table[first_page].large_object) {
- maybe_adjust_large_object(page_address(first_page));
- /* If a large object has shrunk then addr may now point to a
- * free area in which case it's ignored here. Note it gets
- * through the valid pointer test above because the tail looks
- * like conses. */
- if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
- || (page_table[addr_page_index].bytes_used == 0)
- /* Check the offset within the page. */
- || (((unsigned)addr & (PAGE_BYTES - 1))
- > page_table[addr_page_index].bytes_used)) {
- FSHOW((stderr,
- "weird? ignore ptr 0x%x to freed area of large object\n",
- addr));
- return;
- }
- /* It may have moved to unboxed pages. */
- region_allocation = page_table[first_page].allocated;
+ maybe_adjust_large_object(page_address(first_page));
+ /* If a large object has shrunk then addr may now point to a
+ * free area in which case it's ignored here. Note it gets
+ * through the valid pointer test above because the tail looks
+ * like conses. */
+ if ((page_table[addr_page_index].allocated == FREE_PAGE_FLAG)
+ || (page_table[addr_page_index].bytes_used == 0)
+ /* Check the offset within the page. */
+ || (((unsigned)addr & (PAGE_BYTES - 1))
+ > page_table[addr_page_index].bytes_used)) {
+ FSHOW((stderr,
+ "weird? ignore ptr 0x%x to freed area of large object\n",
+ addr));
+ return;
+ }
+ /* It may have moved to unboxed pages. */
+ region_allocation = page_table[first_page].allocated;
}
/* Now work forward until the end of this contiguous area is found,
* marking all pages as dont_move. */
for (i = first_page; ;i++) {
- gc_assert(page_table[i].allocated == region_allocation);
-
- /* Mark the page static. */
- page_table[i].dont_move = 1;
-
- /* Move the page to the new_space. XX I'd rather not do this
- * but the GC logic is not quite able to copy with the static
- * pages remaining in the from space. This also requires the
- * generation bytes_allocated counters be updated. */
- page_table[i].gen = new_space;
- generations[new_space].bytes_allocated += page_table[i].bytes_used;
- generations[from_space].bytes_allocated -= page_table[i].bytes_used;
-
- /* It is essential that the pages are not write protected as
- * they may have pointers into the old-space which need
- * scavenging. They shouldn't be write protected at this
- * stage. */
- gc_assert(!page_table[i].write_protected);
-
- /* Check whether this is the last page in this contiguous block.. */
- if ((page_table[i].bytes_used < PAGE_BYTES)
- /* ..or it is PAGE_BYTES and is the last in the block */
- || (page_table[i+1].allocated == FREE_PAGE_FLAG)
- || (page_table[i+1].bytes_used == 0) /* next page free */
- || (page_table[i+1].gen != from_space) /* diff. gen */
- || (page_table[i+1].first_object_offset == 0))
- break;
+ gc_assert(page_table[i].allocated == region_allocation);
+
+ /* Mark the page static. */
+ page_table[i].dont_move = 1;
+
+ /* Move the page to the new_space. XX I'd rather not do this
+ * but the GC logic is not quite able to copy with the static
+ * pages remaining in the from space. This also requires the
+ * generation bytes_allocated counters be updated. */
+ page_table[i].gen = new_space;
+ generations[new_space].bytes_allocated += page_table[i].bytes_used;
+ generations[from_space].bytes_allocated -= page_table[i].bytes_used;
+
+ /* It is essential that the pages are not write protected as
+ * they may have pointers into the old-space which need
+ * scavenging. They shouldn't be write protected at this
+ * stage. */
+ gc_assert(!page_table[i].write_protected);
+
+ /* Check whether this is the last page in this contiguous block.. */
+ if ((page_table[i].bytes_used < PAGE_BYTES)
+ /* ..or it is PAGE_BYTES and is the last in the block */
+ || (page_table[i+1].allocated == FREE_PAGE_FLAG)
+ || (page_table[i+1].bytes_used == 0) /* next page free */
+ || (page_table[i+1].gen != from_space) /* diff. gen */
+ || (page_table[i+1].first_object_offset == 0))
+ break;
}
/* Check that the page is now static. */
/* Skip if it's already write-protected, pinned, or unboxed */
if (page_table[page].write_protected
- || page_table[page].dont_move
- || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
- return (0);
+ || page_table[page].dont_move
+ || (page_table[page].allocated & UNBOXED_PAGE_FLAG))
+ return (0);
/* Scan the page for pointers to younger generations or the
* top temp. generation. */
for (j = 0; j < num_words; j++) {
- void *ptr = *(page_addr+j);
- long index = find_page_index(ptr);
-
- /* Check that it's in the dynamic space */
- if (index != -1)
- if (/* Does it point to a younger or the temp. generation? */
- ((page_table[index].allocated != FREE_PAGE_FLAG)
- && (page_table[index].bytes_used != 0)
- && ((page_table[index].gen < gen)
- || (page_table[index].gen == NUM_GENERATIONS)))
-
- /* Or does it point within a current gc_alloc() region? */
- || ((boxed_region.start_addr <= ptr)
- && (ptr <= boxed_region.free_pointer))
- || ((unboxed_region.start_addr <= ptr)
- && (ptr <= unboxed_region.free_pointer))) {
- wp_it = 0;
- break;
- }
+ void *ptr = *(page_addr+j);
+ long index = find_page_index(ptr);
+
+ /* Check that it's in the dynamic space */
+ if (index != -1)
+ if (/* Does it point to a younger or the temp. generation? */
+ ((page_table[index].allocated != FREE_PAGE_FLAG)
+ && (page_table[index].bytes_used != 0)
+ && ((page_table[index].gen < gen)
+ || (page_table[index].gen == NUM_GENERATIONS)))
+
+ /* Or does it point within a current gc_alloc() region? */
+ || ((boxed_region.start_addr <= ptr)
+ && (ptr <= boxed_region.free_pointer))
+ || ((unboxed_region.start_addr <= ptr)
+ && (ptr <= unboxed_region.free_pointer))) {
+ wp_it = 0;
+ break;
+ }
}
if (wp_it == 1) {
- /* Write-protect the page. */
- /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
+ /* Write-protect the page. */
+ /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
- os_protect((void *)page_addr,
- PAGE_BYTES,
- OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
+ os_protect((void *)page_addr,
+ PAGE_BYTES,
+ OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
- /* Note the page as protected in the page tables. */
- page_table[page].write_protected = 1;
+ /* Note the page as protected in the page tables. */
+ page_table[page].write_protected = 1;
}
return (wp_it);
#if SC_GEN_CK
/* Clear the write_protected_cleared flags on all pages. */
for (i = 0; i < NUM_PAGES; i++)
- page_table[i].write_protected_cleared = 0;
+ page_table[i].write_protected_cleared = 0;
#endif
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)) {
- long last_page,j;
- int write_protected=1;
-
- /* This should be the start of a region */
- gc_assert(page_table[i].first_object_offset == 0);
-
- /* Now work forward until the end of the region */
- for (last_page = i; ; last_page++) {
- write_protected =
- write_protected && page_table[last_page].write_protected;
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
- if (!write_protected) {
- scavenge(page_address(i),
- (page_table[last_page].bytes_used +
- (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
-
- /* Now scan the pages and write protect those that
- * don't have pointers to younger generations. */
- if (enable_page_protection) {
- for (j = i; j <= last_page; j++) {
- num_wp += update_page_write_prot(j);
- }
- }
- }
- i = last_page;
- }
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)) {
+ long last_page,j;
+ int write_protected=1;
+
+ /* This should be the start of a region */
+ gc_assert(page_table[i].first_object_offset == 0);
+
+ /* Now work forward until the end of the region */
+ for (last_page = i; ; last_page++) {
+ write_protected =
+ write_protected && page_table[last_page].write_protected;
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+ if (!write_protected) {
+ scavenge(page_address(i),
+ (page_table[last_page].bytes_used +
+ (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+
+ /* Now scan the pages and write protect those that
+ * don't have pointers to younger generations. */
+ if (enable_page_protection) {
+ for (j = i; j <= last_page; j++) {
+ num_wp += update_page_write_prot(j);
+ }
+ }
+ }
+ i = last_page;
+ }
}
if ((gencgc_verbose > 1) && (num_wp != 0)) {
- FSHOW((stderr,
- "/write protected %d pages within generation %d\n",
- num_wp, generation));
+ FSHOW((stderr,
+ "/write protected %d pages within generation %d\n",
+ num_wp, generation));
}
#if SC_GEN_CK
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)
- && (page_table[i].write_protected_cleared != 0)) {
- FSHOW((stderr, "/scavenge_generation() %d\n", generation));
- FSHOW((stderr,
- "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
- page_table[i].bytes_used,
- page_table[i].first_object_offset,
- page_table[i].dont_move));
- lose("write to protected page %d in scavenge_generation()", i);
- }
+ if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)
+ && (page_table[i].write_protected_cleared != 0)) {
+ FSHOW((stderr, "/scavenge_generation() %d\n", generation));
+ FSHOW((stderr,
+ "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
+ page_table[i].bytes_used,
+ page_table[i].first_object_offset,
+ page_table[i].dont_move));
+ lose("write to protected page %d in scavenge_generation()", i);
+ }
}
#endif
}
long i;
FSHOW((stderr,
- "/starting one full scan of newspace generation %d\n",
- generation));
+ "/starting one full scan of newspace generation %d\n",
+ generation));
for (i = 0; i < last_free_page; i++) {
- /* Note that this skips over open regions when it encounters them. */
- if ((page_table[i].allocated & BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)
- && ((page_table[i].write_protected == 0)
- /* (This may be redundant as write_protected is now
- * cleared before promotion.) */
- || (page_table[i].dont_move == 1))) {
- long last_page;
- int all_wp=1;
-
- /* The scavenge will start at the first_object_offset of page i.
- *
- * We need to find the full extent of this contiguous
- * block in case objects span pages.
- *
- * Now work forward until the end of this contiguous area
- * is found. A small area is preferred as there is a
- * better chance of its pages being write-protected. */
- for (last_page = i; ;last_page++) {
- /* If all pages are write-protected and movable,
- * then no need to scavenge */
- all_wp=all_wp && page_table[last_page].write_protected &&
- !page_table[last_page].dont_move;
-
- /* Check whether this is the last page in this
- * contiguous block */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
- }
-
- /* Do a limited check for write-protected pages. */
- if (!all_wp) {
- long size;
-
- size = (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES
- - page_table[i].first_object_offset)/N_WORD_BYTES;
- new_areas_ignore_page = last_page;
-
- scavenge(page_address(i) +
- page_table[i].first_object_offset,
- size);
-
- }
- i = last_page;
- }
+ /* Note that this skips over open regions when it encounters them. */
+ if ((page_table[i].allocated & BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)
+ && ((page_table[i].write_protected == 0)
+ /* (This may be redundant as write_protected is now
+ * cleared before promotion.) */
+ || (page_table[i].dont_move == 1))) {
+ long last_page;
+ int all_wp=1;
+
+ /* The scavenge will start at the first_object_offset of page i.
+ *
+ * We need to find the full extent of this contiguous
+ * block in case objects span pages.
+ *
+ * Now work forward until the end of this contiguous area
+ * is found. A small area is preferred as there is a
+ * better chance of its pages being write-protected. */
+ for (last_page = i; ;last_page++) {
+ /* If all pages are write-protected and movable,
+ * then no need to scavenge */
+ all_wp=all_wp && page_table[last_page].write_protected &&
+ !page_table[last_page].dont_move;
+
+ /* Check whether this is the last page in this
+ * contiguous block */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (!(page_table[last_page+1].allocated & BOXED_PAGE_FLAG))
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+ }
+
+ /* Do a limited check for write-protected pages. */
+ if (!all_wp) {
+ long size;
+
+ size = (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES
+ - page_table[i].first_object_offset)/N_WORD_BYTES;
+ new_areas_ignore_page = last_page;
+
+ scavenge(page_address(i) +
+ page_table[i].first_object_offset,
+ size);
+
+ }
+ i = last_page;
+ }
}
FSHOW((stderr,
- "/done with one full scan of newspace generation %d\n",
- generation));
+ "/done with one full scan of newspace generation %d\n",
+ generation));
}
/* Do a complete scavenge of the newspace generation. */
current_new_areas_index = new_areas_index;
/*FSHOW((stderr,
- "The first scan is finished; current_new_areas_index=%d.\n",
- current_new_areas_index));*/
+ "The first scan is finished; current_new_areas_index=%d.\n",
+ current_new_areas_index));*/
while (current_new_areas_index > 0) {
- /* Move the current to the previous new areas */
- previous_new_areas = current_new_areas;
- previous_new_areas_index = current_new_areas_index;
-
- /* Scavenge all the areas in previous new areas. Any new areas
- * allocated are saved in current_new_areas. */
-
- /* Allocate an array for current_new_areas; alternating between
- * new_areas_1 and 2 */
- if (previous_new_areas == &new_areas_1)
- current_new_areas = &new_areas_2;
- else
- current_new_areas = &new_areas_1;
-
- /* Set up for gc_alloc(). */
- new_areas = current_new_areas;
- new_areas_index = 0;
-
- /* Check whether previous_new_areas had overflowed. */
- if (previous_new_areas_index >= NUM_NEW_AREAS) {
-
- /* New areas of objects allocated have been lost so need to do a
- * full scan to be sure! If this becomes a problem try
- * increasing NUM_NEW_AREAS. */
- if (gencgc_verbose)
- SHOW("new_areas overflow, doing full scavenge");
-
- /* Don't need to record new areas that get scavenge anyway
- * during scavenge_newspace_generation_one_scan. */
- record_new_objects = 1;
-
- scavenge_newspace_generation_one_scan(generation);
-
- /* Record all new areas now. */
- record_new_objects = 2;
-
- /* Flush the current regions updating the tables. */
- gc_alloc_update_all_page_tables();
-
- } else {
-
- /* Work through previous_new_areas. */
- for (i = 0; i < previous_new_areas_index; i++) {
- long page = (*previous_new_areas)[i].page;
- long offset = (*previous_new_areas)[i].offset;
- long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
- gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
- scavenge(page_address(page)+offset, size);
- }
+ /* Move the current to the previous new areas */
+ previous_new_areas = current_new_areas;
+ previous_new_areas_index = current_new_areas_index;
+
+ /* Scavenge all the areas in previous new areas. Any new areas
+ * allocated are saved in current_new_areas. */
+
+ /* Allocate an array for current_new_areas; alternating between
+ * new_areas_1 and 2 */
+ if (previous_new_areas == &new_areas_1)
+ current_new_areas = &new_areas_2;
+ else
+ current_new_areas = &new_areas_1;
+
+ /* Set up for gc_alloc(). */
+ new_areas = current_new_areas;
+ new_areas_index = 0;
+
+ /* Check whether previous_new_areas had overflowed. */
+ if (previous_new_areas_index >= NUM_NEW_AREAS) {
+
+ /* New areas of objects allocated have been lost so need to do a
+ * full scan to be sure! If this becomes a problem try
+ * increasing NUM_NEW_AREAS. */
+ if (gencgc_verbose)
+ SHOW("new_areas overflow, doing full scavenge");
+
+ /* Don't need to record new areas that get scavenge anyway
+ * during scavenge_newspace_generation_one_scan. */
+ record_new_objects = 1;
+
+ scavenge_newspace_generation_one_scan(generation);
+
+ /* Record all new areas now. */
+ record_new_objects = 2;
+
+ /* Flush the current regions updating the tables. */
+ gc_alloc_update_all_page_tables();
+
+ } else {
+
+ /* Work through previous_new_areas. */
+ for (i = 0; i < previous_new_areas_index; i++) {
+ long page = (*previous_new_areas)[i].page;
+ long offset = (*previous_new_areas)[i].offset;
+ long size = (*previous_new_areas)[i].size / N_WORD_BYTES;
+ gc_assert((*previous_new_areas)[i].size % N_WORD_BYTES == 0);
+ scavenge(page_address(page)+offset, size);
+ }
- /* Flush the current regions updating the tables. */
- gc_alloc_update_all_page_tables();
- }
+ /* Flush the current regions updating the tables. */
+ gc_alloc_update_all_page_tables();
+ }
- current_new_areas_index = new_areas_index;
+ current_new_areas_index = new_areas_index;
- /*FSHOW((stderr,
- "The re-scan has finished; current_new_areas_index=%d.\n",
- current_new_areas_index));*/
+ /*FSHOW((stderr,
+ "The re-scan has finished; current_new_areas_index=%d.\n",
+ current_new_areas_index));*/
}
/* Turn off recording of areas allocated by gc_alloc(). */
/* Check that none of the write_protected pages in this generation
* have been written to. */
for (i = 0; i < NUM_PAGES; i++) {
- if ((page_table[i].allocation != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)
- && (page_table[i].write_protected_cleared != 0)
- && (page_table[i].dont_move == 0)) {
- lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
- i, generation, page_table[i].dont_move);
- }
+ if ((page_table[i].allocation != FREE_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)
+ && (page_table[i].write_protected_cleared != 0)
+ && (page_table[i].dont_move == 0)) {
+ lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
+ i, generation, page_table[i].dont_move);
+ }
}
#endif
}
long i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == from_space)) {
- void *page_start;
-
- page_start = (void *)page_address(i);
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- if (page_table[i].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[i].write_protected = 0;
- }
- }
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == from_space)) {
+ void *page_start;
+
+ page_start = (void *)page_address(i);
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ if (page_table[i].write_protected) {
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[i].write_protected = 0;
+ }
+ }
}
}
first_page = 0;
do {
- /* Find a first page for the next region of pages. */
- while ((first_page < last_free_page)
- && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
- || (page_table[first_page].bytes_used == 0)
- || (page_table[first_page].gen != from_space)))
- first_page++;
-
- if (first_page >= last_free_page)
- break;
-
- /* Find the last page of this region. */
- last_page = first_page;
-
- do {
- /* Free the page. */
- bytes_freed += page_table[last_page].bytes_used;
- generations[page_table[last_page].gen].bytes_allocated -=
- page_table[last_page].bytes_used;
- page_table[last_page].allocated = FREE_PAGE_FLAG;
- page_table[last_page].bytes_used = 0;
-
- /* Remove any write-protection. We should be able to rely
- * on the write-protect flag to avoid redundant calls. */
- {
- void *page_start = (void *)page_address(last_page);
-
- if (page_table[last_page].write_protected) {
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[last_page].write_protected = 0;
- }
- }
- last_page++;
- }
- while ((last_page < last_free_page)
- && (page_table[last_page].allocated != FREE_PAGE_FLAG)
- && (page_table[last_page].bytes_used != 0)
- && (page_table[last_page].gen == from_space));
-
- /* Zero pages from first_page to (last_page-1).
- *
- * FIXME: Why not use os_zero(..) function instead of
- * hand-coding this again? (Check other gencgc_unmap_zero
- * stuff too. */
- if (gencgc_unmap_zero) {
- void *page_start, *addr;
-
- page_start = (void *)page_address(first_page);
-
- os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
- addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
- if (addr == NULL || addr != page_start) {
- lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
- addr);
- }
- } else {
- long *page_start;
-
- page_start = (long *)page_address(first_page);
- memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
- }
-
- first_page = last_page;
+ /* Find a first page for the next region of pages. */
+ while ((first_page < last_free_page)
+ && ((page_table[first_page].allocated == FREE_PAGE_FLAG)
+ || (page_table[first_page].bytes_used == 0)
+ || (page_table[first_page].gen != from_space)))
+ first_page++;
+
+ if (first_page >= last_free_page)
+ break;
+
+ /* Find the last page of this region. */
+ last_page = first_page;
+
+ do {
+ /* Free the page. */
+ bytes_freed += page_table[last_page].bytes_used;
+ generations[page_table[last_page].gen].bytes_allocated -=
+ page_table[last_page].bytes_used;
+ page_table[last_page].allocated = FREE_PAGE_FLAG;
+ page_table[last_page].bytes_used = 0;
+
+ /* Remove any write-protection. We should be able to rely
+ * on the write-protect flag to avoid redundant calls. */
+ {
+ void *page_start = (void *)page_address(last_page);
+
+ if (page_table[last_page].write_protected) {
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[last_page].write_protected = 0;
+ }
+ }
+ last_page++;
+ }
+ while ((last_page < last_free_page)
+ && (page_table[last_page].allocated != FREE_PAGE_FLAG)
+ && (page_table[last_page].bytes_used != 0)
+ && (page_table[last_page].gen == from_space));
+
+ /* Zero pages from first_page to (last_page-1).
+ *
+ * FIXME: Why not use os_zero(..) function instead of
+ * hand-coding this again? (Check other gencgc_unmap_zero
+ * stuff too. */
+ if (gencgc_unmap_zero) {
+ void *page_start, *addr;
+
+ page_start = (void *)page_address(first_page);
+
+ os_invalidate(page_start, PAGE_BYTES*(last_page-first_page));
+ addr = os_validate(page_start, PAGE_BYTES*(last_page-first_page));
+ if (addr == NULL || addr != page_start) {
+ lose("free_oldspace: page moved, 0x%08x ==> 0x%08x",page_start,
+ addr);
+ }
+ } else {
+ long *page_start;
+
+ page_start = (long *)page_address(first_page);
+ memset(page_start, 0,PAGE_BYTES*(last_page-first_page));
+ }
+
+ first_page = last_page;
} while (first_page < last_free_page);
long pi1 = find_page_index((void*)addr);
if (pi1 != -1)
- fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
- (unsigned long) addr,
- pi1,
- page_table[pi1].allocated,
- page_table[pi1].gen,
- page_table[pi1].bytes_used,
- page_table[pi1].first_object_offset,
- page_table[pi1].dont_move);
+ fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
+ (unsigned long) addr,
+ pi1,
+ page_table[pi1].allocated,
+ page_table[pi1].gen,
+ page_table[pi1].bytes_used,
+ page_table[pi1].first_object_offset,
+ page_table[pi1].dont_move);
fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
- *(addr-4),
- *(addr-3),
- *(addr-2),
- *(addr-1),
- *(addr-0),
- *(addr+1),
- *(addr+2),
- *(addr+3),
- *(addr+4));
+ *(addr-4),
+ *(addr-3),
+ *(addr-2),
+ *(addr-1),
+ *(addr-0),
+ *(addr+1),
+ *(addr+2),
+ *(addr+3),
+ *(addr+4));
}
#endif
{
int is_in_dynamic_space = (find_page_index((void*)start) != -1);
int is_in_readonly_space =
- (READ_ONLY_SPACE_START <= (unsigned)start &&
- (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ (READ_ONLY_SPACE_START <= (unsigned)start &&
+ (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
while (words > 0) {
- size_t count = 1;
- lispobj thing = *(lispobj*)start;
-
- if (is_lisp_pointer(thing)) {
- long page_index = find_page_index((void*)thing);
- long to_readonly_space =
- (READ_ONLY_SPACE_START <= thing &&
- thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
- long to_static_space =
- (STATIC_SPACE_START <= thing &&
- thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
-
- /* Does it point to the dynamic space? */
- if (page_index != -1) {
- /* If it's within the dynamic space it should point to a used
- * page. XX Could check the offset too. */
- if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
- && (page_table[page_index].bytes_used == 0))
- lose ("Ptr %x @ %x sees free page.", thing, start);
- /* Check that it doesn't point to a forwarding pointer! */
- if (*((lispobj *)native_pointer(thing)) == 0x01) {
- lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
- }
- /* Check that its not in the RO space as it would then be a
- * pointer from the RO to the dynamic space. */
- if (is_in_readonly_space) {
- lose("ptr to dynamic space %x from RO space %x",
- thing, start);
- }
- /* Does it point to a plausible object? This check slows
- * it down a lot (so it's commented out).
- *
- * "a lot" is serious: it ate 50 minutes cpu time on
- * my duron 950 before I came back from lunch and
- * killed it.
- *
- * FIXME: Add a variable to enable this
- * dynamically. */
- /*
- if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
- lose("ptr %x to invalid object %x", thing, start);
- }
- */
- } else {
- /* Verify that it points to another valid space. */
- if (!to_readonly_space && !to_static_space
- && (thing != (unsigned)&undefined_tramp)) {
- lose("Ptr %x @ %x sees junk.", thing, start);
- }
- }
- } else {
- if (!(fixnump(thing))) {
- /* skip fixnums */
- switch(widetag_of(*start)) {
-
- /* boxed objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
- case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_BASE_STRING_WIDETAG:
+ size_t count = 1;
+ lispobj thing = *(lispobj*)start;
+
+ if (is_lisp_pointer(thing)) {
+ long page_index = find_page_index((void*)thing);
+ long to_readonly_space =
+ (READ_ONLY_SPACE_START <= thing &&
+ thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ long to_static_space =
+ (STATIC_SPACE_START <= thing &&
+ thing < SymbolValue(STATIC_SPACE_FREE_POINTER,0));
+
+ /* Does it point to the dynamic space? */
+ if (page_index != -1) {
+ /* If it's within the dynamic space it should point to a used
+ * page. XX Could check the offset too. */
+ if ((page_table[page_index].allocated != FREE_PAGE_FLAG)
+ && (page_table[page_index].bytes_used == 0))
+ lose ("Ptr %x @ %x sees free page.", thing, start);
+ /* Check that it doesn't point to a forwarding pointer! */
+ if (*((lispobj *)native_pointer(thing)) == 0x01) {
+ lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
+ }
+ /* Check that its not in the RO space as it would then be a
+ * pointer from the RO to the dynamic space. */
+ if (is_in_readonly_space) {
+ lose("ptr to dynamic space %x from RO space %x",
+ thing, start);
+ }
+ /* Does it point to a plausible object? This check slows
+ * it down a lot (so it's commented out).
+ *
+ * "a lot" is serious: it ate 50 minutes cpu time on
+ * my duron 950 before I came back from lunch and
+ * killed it.
+ *
+ * FIXME: Add a variable to enable this
+ * dynamically. */
+ /*
+ if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
+ lose("ptr %x to invalid object %x", thing, start);
+ }
+ */
+ } else {
+ /* Verify that it points to another valid space. */
+ if (!to_readonly_space && !to_static_space
+ && (thing != (unsigned)&undefined_tramp)) {
+ lose("Ptr %x @ %x sees junk.", thing, start);
+ }
+ }
+ } else {
+ if (!(fixnump(thing))) {
+ /* skip fixnums */
+ switch(widetag_of(*start)) {
+
+ /* boxed objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
+ case SIMPLE_ARRAY_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
- case COMPLEX_CHARACTER_STRING_WIDETAG:
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
#endif
- case COMPLEX_VECTOR_NIL_WIDETAG:
- case COMPLEX_BIT_VECTOR_WIDETAG:
- case COMPLEX_VECTOR_WIDETAG:
- case COMPLEX_ARRAY_WIDETAG:
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- case VALUE_CELL_HEADER_WIDETAG:
- case SYMBOL_HEADER_WIDETAG:
- case CHARACTER_WIDETAG:
+ case COMPLEX_VECTOR_NIL_WIDETAG:
+ case COMPLEX_BIT_VECTOR_WIDETAG:
+ case COMPLEX_VECTOR_WIDETAG:
+ case COMPLEX_ARRAY_WIDETAG:
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ case VALUE_CELL_HEADER_WIDETAG:
+ case SYMBOL_HEADER_WIDETAG:
+ case CHARACTER_WIDETAG:
#if N_WORD_BITS == 64
- case SINGLE_FLOAT_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
#endif
- case UNBOUND_MARKER_WIDETAG:
- case INSTANCE_HEADER_WIDETAG:
- case FDEFN_WIDETAG:
- count = 1;
- break;
-
- case CODE_HEADER_WIDETAG:
- {
- lispobj object = *start;
- struct code *code;
- long nheader_words, ncode_words, nwords;
- lispobj fheaderl;
- struct simple_fun *fheaderp;
-
- code = (struct code *) start;
-
- /* Check that it's not in the dynamic space.
- * FIXME: Isn't is supposed to be OK for code
- * objects to be in the dynamic space these days? */
- if (is_in_dynamic_space
- /* It's ok if it's byte compiled code. The trace
- * table offset will be a fixnum if it's x86
- * compiled code - check.
- *
- * FIXME: #^#@@! lack of abstraction here..
- * This line can probably go away now that
- * there's no byte compiler, but I've got
- * too much to worry about right now to try
- * to make sure. -- WHN 2001-10-06 */
- && fixnump(code->trace_table_offset)
- /* Only when enabled */
- && verify_dynamic_code_check) {
- FSHOW((stderr,
- "/code object at %x in the dynamic space\n",
- start));
- }
-
- ncode_words = fixnum_value(code->code_size);
- nheader_words = HeaderValue(object);
- nwords = ncode_words + nheader_words;
- nwords = CEILING(nwords, 2);
- /* Scavenge the boxed section of the code data block */
- verify_space(start + 1, nheader_words - 1);
-
- /* Scavenge the boxed section of each function
- * object in the code data block. */
- fheaderl = code->entry_points;
- while (fheaderl != NIL) {
- fheaderp =
- (struct simple_fun *) native_pointer(fheaderl);
- gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
- verify_space(&fheaderp->name, 1);
- verify_space(&fheaderp->arglist, 1);
- verify_space(&fheaderp->type, 1);
- fheaderl = fheaderp->next;
- }
- count = nwords;
- break;
- }
-
- /* unboxed objects */
- case BIGNUM_WIDETAG:
+ case UNBOUND_MARKER_WIDETAG:
+ case INSTANCE_HEADER_WIDETAG:
+ case FDEFN_WIDETAG:
+ count = 1;
+ break;
+
+ case CODE_HEADER_WIDETAG:
+ {
+ lispobj object = *start;
+ struct code *code;
+ long nheader_words, ncode_words, nwords;
+ lispobj fheaderl;
+ struct simple_fun *fheaderp;
+
+ code = (struct code *) start;
+
+ /* Check that it's not in the dynamic space.
+ * FIXME: Isn't is supposed to be OK for code
+ * objects to be in the dynamic space these days? */
+ if (is_in_dynamic_space
+ /* It's ok if it's byte compiled code. The trace
+ * table offset will be a fixnum if it's x86
+ * compiled code - check.
+ *
+ * FIXME: #^#@@! lack of abstraction here..
+ * This line can probably go away now that
+ * there's no byte compiler, but I've got
+ * too much to worry about right now to try
+ * to make sure. -- WHN 2001-10-06 */
+ && fixnump(code->trace_table_offset)
+ /* Only when enabled */
+ && verify_dynamic_code_check) {
+ FSHOW((stderr,
+ "/code object at %x in the dynamic space\n",
+ start));
+ }
+
+ ncode_words = fixnum_value(code->code_size);
+ nheader_words = HeaderValue(object);
+ nwords = ncode_words + nheader_words;
+ nwords = CEILING(nwords, 2);
+ /* Scavenge the boxed section of the code data block */
+ verify_space(start + 1, nheader_words - 1);
+
+ /* Scavenge the boxed section of each function
+ * object in the code data block. */
+ fheaderl = code->entry_points;
+ while (fheaderl != NIL) {
+ fheaderp =
+ (struct simple_fun *) native_pointer(fheaderl);
+ gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
+ verify_space(&fheaderp->name, 1);
+ verify_space(&fheaderp->arglist, 1);
+ verify_space(&fheaderp->type, 1);
+ fheaderl = fheaderp->next;
+ }
+ count = nwords;
+ break;
+ }
+
+ /* unboxed objects */
+ case BIGNUM_WIDETAG:
#if N_WORD_BITS != 64
- case SINGLE_FLOAT_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
#endif
- case DOUBLE_FLOAT_WIDETAG:
+ case DOUBLE_FLOAT_WIDETAG:
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
- case LONG_FLOAT_WIDETAG:
+ case LONG_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
- case COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
- case COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
- case COMPLEX_LONG_FLOAT_WIDETAG:
+ case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_BASE_STRING_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
- case SIMPLE_CHARACTER_STRING_WIDETAG:
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
#endif
- case SIMPLE_BIT_VECTOR_WIDETAG:
- case SIMPLE_ARRAY_NIL_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
#endif
- case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
- case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
- case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SAP_WIDETAG:
- case WEAK_POINTER_WIDETAG:
- count = (sizetab[widetag_of(*start)])(start);
- break;
-
- default:
- gc_abort();
- }
- }
- }
- start += count;
- words -= count;
+ case SAP_WIDETAG:
+ case WEAK_POINTER_WIDETAG:
+ count = (sizetab[widetag_of(*start)])(start);
+ break;
+
+ default:
+ gc_abort();
+ }
+ }
+ }
+ start += count;
+ words -= count;
}
}
* to grep for all foo_size and rename the appropriate ones to
* foo_count. */
long read_only_space_size =
- (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
- - (lispobj*)READ_ONLY_SPACE_START;
+ (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0)
+ - (lispobj*)READ_ONLY_SPACE_START;
long static_space_size =
- (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
- - (lispobj*)STATIC_SPACE_START;
+ (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER,0)
+ - (lispobj*)STATIC_SPACE_START;
struct thread *th;
for_each_thread(th) {
long binding_stack_size =
- (lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
- - (lispobj*)th->binding_stack_start;
- verify_space(th->binding_stack_start, binding_stack_size);
+ (lispobj*)SymbolValue(BINDING_STACK_POINTER,th)
+ - (lispobj*)th->binding_stack_start;
+ verify_space(th->binding_stack_start, binding_stack_size);
}
verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
int i;
for (i = 0; i < last_free_page; i++) {
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && (page_table[i].gen == generation)) {
- long last_page;
- int region_allocation = page_table[i].allocated;
-
- /* This should be the start of a contiguous block */
- gc_assert(page_table[i].first_object_offset == 0);
-
- /* Need to find the full extent of this contiguous block in case
- objects span pages. */
-
- /* Now work forward until the end of this contiguous area is
- found. */
- for (last_page = i; ;last_page++)
- /* Check whether this is the last page in this contiguous
- * block. */
- if ((page_table[last_page].bytes_used < PAGE_BYTES)
- /* Or it is PAGE_BYTES and is the last in the block */
- || (page_table[last_page+1].allocated != region_allocation)
- || (page_table[last_page+1].bytes_used == 0)
- || (page_table[last_page+1].gen != generation)
- || (page_table[last_page+1].first_object_offset == 0))
- break;
-
- verify_space(page_address(i), (page_table[last_page].bytes_used
- + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
- i = last_page;
- }
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && (page_table[i].gen == generation)) {
+ long last_page;
+ int region_allocation = page_table[i].allocated;
+
+ /* This should be the start of a contiguous block */
+ gc_assert(page_table[i].first_object_offset == 0);
+
+ /* Need to find the full extent of this contiguous block in case
+ objects span pages. */
+
+ /* Now work forward until the end of this contiguous area is
+ found. */
+ for (last_page = i; ;last_page++)
+ /* Check whether this is the last page in this contiguous
+ * block. */
+ if ((page_table[last_page].bytes_used < PAGE_BYTES)
+ /* Or it is PAGE_BYTES and is the last in the block */
+ || (page_table[last_page+1].allocated != region_allocation)
+ || (page_table[last_page+1].bytes_used == 0)
+ || (page_table[last_page+1].gen != generation)
+ || (page_table[last_page+1].first_object_offset == 0))
+ break;
+
+ verify_space(page_address(i), (page_table[last_page].bytes_used
+ + (last_page-i)*PAGE_BYTES)/N_WORD_BYTES);
+ i = last_page;
+ }
}
}
long page;
for (page = 0; page < last_free_page; page++) {
- if (page_table[page].allocated == FREE_PAGE_FLAG) {
- /* The whole page should be zero filled. */
- long *start_addr = (long *)page_address(page);
- long size = 1024;
- long i;
- for (i = 0; i < size; i++) {
- if (start_addr[i] != 0) {
- lose("free page not zero at %x", start_addr + i);
- }
- }
- } else {
- long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
- if (free_bytes > 0) {
- long *start_addr = (long *)((unsigned)page_address(page)
- + page_table[page].bytes_used);
- long size = free_bytes / N_WORD_BYTES;
- long i;
- for (i = 0; i < size; i++) {
- if (start_addr[i] != 0) {
- lose("free region not zero at %x", start_addr + i);
- }
- }
- }
- }
+ if (page_table[page].allocated == FREE_PAGE_FLAG) {
+ /* The whole page should be zero filled. */
+ long *start_addr = (long *)page_address(page);
+ long size = 1024;
+ long i;
+ for (i = 0; i < size; i++) {
+ if (start_addr[i] != 0) {
+ lose("free page not zero at %x", start_addr + i);
+ }
+ }
+ } else {
+ long free_bytes = PAGE_BYTES - page_table[page].bytes_used;
+ if (free_bytes > 0) {
+ long *start_addr = (long *)((unsigned)page_address(page)
+ + page_table[page].bytes_used);
+ long size = free_bytes / N_WORD_BYTES;
+ long i;
+ for (i = 0; i < size; i++) {
+ if (start_addr[i] != 0) {
+ lose("free region not zero at %x", start_addr + i);
+ }
+ }
+ }
+ }
}
}
long i;
for (i = 0; i < NUM_GENERATIONS; i++)
- verify_generation(i);
+ verify_generation(i);
if (gencgc_enable_verify_zero_fill)
- verify_zero_fill();
+ verify_zero_fill();
}
\f
/* Write-protect all the dynamic boxed pages in the given generation. */
gc_assert(generation < NUM_GENERATIONS);
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated == BOXED_PAGE_FLAG)
- && (page_table[i].bytes_used != 0)
- && !page_table[i].dont_move
- && (page_table[i].gen == generation)) {
- void *page_start;
+ if ((page_table[i].allocated == BOXED_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0)
+ && !page_table[i].dont_move
+ && (page_table[i].gen == generation)) {
+ void *page_start;
- page_start = (void *)page_address(i);
+ page_start = (void *)page_address(i);
- os_protect(page_start,
- PAGE_BYTES,
- OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
+ os_protect(page_start,
+ PAGE_BYTES,
+ OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
- /* Note the page as protected in the page tables. */
- page_table[i].write_protected = 1;
- }
+ /* Note the page as protected in the page tables. */
+ page_table[i].write_protected = 1;
+ }
if (gencgc_verbose > 1) {
- FSHOW((stderr,
- "/write protected %d of %d pages in generation %d\n",
- count_write_protect_generation_pages(generation),
- count_generation_pages(generation),
- generation));
+ FSHOW((stderr,
+ "/write protected %d of %d pages in generation %d\n",
+ count_write_protect_generation_pages(generation),
+ count_generation_pages(generation),
+ generation));
}
}
* done. Set up this new generation. There should be no pages
* allocated to it yet. */
if (!raise) {
- gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
+ gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
}
/* Set the global src and dest. generations */
from_space = generation;
if (raise)
- new_space = generation+1;
+ new_space = generation+1;
else
- new_space = NUM_GENERATIONS;
+ new_space = NUM_GENERATIONS;
/* Change to a new space for allocation, resetting the alloc_start_page */
gc_alloc_generation = new_space;
/* Before any pointers are preserved, the dont_move flags on the
* pages need to be cleared. */
for (i = 0; i < last_free_page; i++)
- if(page_table[i].gen==from_space)
- page_table[i].dont_move = 0;
+ if(page_table[i].gen==from_space)
+ page_table[i].dont_move = 0;
/* Un-write-protect the old-space pages. This is essential for the
* promoted pages as they may contain pointers into the old-space
/* there are potentially two stacks for each thread: the main
* stack, which may contain Lisp pointers, and the alternate stack.
- * We don't ever run Lisp code on the altstack, but it may
+ * We don't ever run Lisp code on the altstack, but it may
* host a sigcontext with lisp objects in it */
/* what we need to do: (1) find the stack pointer for the main
* initiates GC. If you ever call GC from inside an altstack
* handler, you will lose. */
for_each_thread(th) {
- void **ptr;
- void **esp=(void **)-1;
+ void **ptr;
+ void **esp=(void **)-1;
#ifdef LISP_FEATURE_SB_THREAD
- long i,free;
- if(th==arch_os_get_current_thread()) {
- esp = (void **) &raise;
- } else {
- void **esp1;
- free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
- for(i=free-1;i>=0;i--) {
- os_context_t *c=th->interrupt_contexts[i];
- esp1 = (void **) *os_context_register_addr(c,reg_SP);
- if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
- if(esp1<esp) esp=esp1;
- for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
- preserve_pointer(*ptr);
- }
- }
- }
- }
+ long i,free;
+ if(th==arch_os_get_current_thread()) {
+ esp = (void **) &raise;
+ } else {
+ void **esp1;
+ free=fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th));
+ for(i=free-1;i>=0;i--) {
+ os_context_t *c=th->interrupt_contexts[i];
+ esp1 = (void **) *os_context_register_addr(c,reg_SP);
+ if(esp1>=th->control_stack_start&& esp1<th->control_stack_end){
+ if(esp1<esp) esp=esp1;
+ for(ptr = (void **)(c+1); ptr>=(void **)c; ptr--) {
+ preserve_pointer(*ptr);
+ }
+ }
+ }
+ }
#else
- esp = (void **) &raise;
+ esp = (void **) &raise;
#endif
- for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
- preserve_pointer(*ptr);
- }
+ for (ptr = (void **)th->control_stack_end; ptr > esp; ptr--) {
+ preserve_pointer(*ptr);
+ }
}
#ifdef QSHOW
if (gencgc_verbose > 1) {
- long num_dont_move_pages = count_dont_move_pages();
- fprintf(stderr,
- "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
- num_dont_move_pages,
- num_dont_move_pages * PAGE_BYTES);
+ long num_dont_move_pages = count_dont_move_pages();
+ fprintf(stderr,
+ "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
+ num_dont_move_pages,
+ num_dont_move_pages * PAGE_BYTES);
}
#endif
/* Scavenge the Lisp functions of the interrupt handlers, taking
* care to avoid SIG_DFL and SIG_IGN. */
for_each_thread(th) {
- struct interrupt_data *data=th->interrupt_data;
+ struct interrupt_data *data=th->interrupt_data;
for (i = 0; i < NSIG; i++) {
- union interrupt_handler handler = data->interrupt_handlers[i];
- if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
- !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- scavenge((lispobj *)(data->interrupt_handlers + i), 1);
- }
- }
+ union interrupt_handler handler = data->interrupt_handlers[i];
+ if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
+ !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
+ scavenge((lispobj *)(data->interrupt_handlers + i), 1);
+ }
+ }
}
/* Scavenge the binding stacks. */
{
struct thread *th;
for_each_thread(th) {
- long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
- th->binding_stack_start;
- scavenge((lispobj *) th->binding_stack_start,len);
+ long len= (lispobj *)SymbolValue(BINDING_STACK_POINTER,th) -
+ th->binding_stack_start;
+ scavenge((lispobj *) th->binding_stack_start,len);
#ifdef LISP_FEATURE_SB_THREAD
- /* do the tls as well */
- len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
- (sizeof (struct thread))/(sizeof (lispobj));
+ /* do the tls as well */
+ len=fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj));
scavenge((lispobj *) (th+1),len);
#endif
- }
+ }
}
/* The original CMU CL code had scavenge-read-only-space code
* please submit a patch. */
#if 0
if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
- unsigned long read_only_space_size =
- (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
- (lispobj*)READ_ONLY_SPACE_START;
- FSHOW((stderr,
- "/scavenge read only space: %d bytes\n",
- read_only_space_size * sizeof(lispobj)));
- scavenge( (lispobj *) READ_ONLY_SPACE_START, read_only_space_size);
+ unsigned long read_only_space_size =
+ (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
+ (lispobj*)READ_ONLY_SPACE_START;
+ FSHOW((stderr,
+ "/scavenge read only space: %d bytes\n",
+ read_only_space_size * sizeof(lispobj)));
+ scavenge( (lispobj *) READ_ONLY_SPACE_START, read_only_space_size);
}
#endif
/* Scavenge static space. */
static_space_size =
- (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
- (lispobj *)STATIC_SPACE_START;
+ (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
+ (lispobj *)STATIC_SPACE_START;
if (gencgc_verbose > 1) {
- FSHOW((stderr,
- "/scavenge static space: %d bytes\n",
- static_space_size * sizeof(lispobj)));
+ FSHOW((stderr,
+ "/scavenge static space: %d bytes\n",
+ static_space_size * sizeof(lispobj)));
}
scavenge( (lispobj *) STATIC_SPACE_START, static_space_size);
* scavenged. The new_space generation needs special handling as
* objects may be moved in - it is handled separately below. */
for (i = 0; i < NUM_GENERATIONS; i++) {
- if ((i != generation) && (i != new_space)) {
- scavenge_generation(i);
- }
+ if ((i != generation) && (i != new_space)) {
+ scavenge_generation(i);
+ }
}
/* Finally scavenge the new_space generation. Keep going until no
/* As a check re-scavenge the newspace once; no new objects should
* be found. */
{
- long old_bytes_allocated = bytes_allocated;
- long bytes_allocated;
+ long old_bytes_allocated = bytes_allocated;
+ long bytes_allocated;
- /* Start with a full scavenge. */
- scavenge_newspace_generation_one_scan(new_space);
+ /* Start with a full scavenge. */
+ scavenge_newspace_generation_one_scan(new_space);
- /* Flush the current regions, updating the tables. */
- gc_alloc_update_all_page_tables();
+ /* Flush the current regions, updating the tables. */
+ gc_alloc_update_all_page_tables();
- bytes_allocated = bytes_allocated - old_bytes_allocated;
+ bytes_allocated = bytes_allocated - old_bytes_allocated;
- if (bytes_allocated != 0) {
- lose("Rescan of new_space allocated %d more bytes.",
- bytes_allocated);
- }
+ if (bytes_allocated != 0) {
+ lose("Rescan of new_space allocated %d more bytes.",
+ bytes_allocated);
+ }
}
#endif
/* If the GC is not raising the age then lower the generation back
* to its normal generation number */
if (!raise) {
- for (i = 0; i < last_free_page; i++)
- if ((page_table[i].bytes_used != 0)
- && (page_table[i].gen == NUM_GENERATIONS))
- page_table[i].gen = generation;
- gc_assert(generations[generation].bytes_allocated == 0);
- generations[generation].bytes_allocated =
- generations[NUM_GENERATIONS].bytes_allocated;
- generations[NUM_GENERATIONS].bytes_allocated = 0;
+ for (i = 0; i < last_free_page; i++)
+ if ((page_table[i].bytes_used != 0)
+ && (page_table[i].gen == NUM_GENERATIONS))
+ page_table[i].gen = generation;
+ gc_assert(generations[generation].bytes_allocated == 0);
+ generations[generation].bytes_allocated =
+ generations[NUM_GENERATIONS].bytes_allocated;
+ generations[NUM_GENERATIONS].bytes_allocated = 0;
}
/* Reset the alloc_start_page for generation. */
generations[generation].alloc_large_unboxed_start_page = 0;
if (generation >= verify_gens) {
- if (gencgc_verbose)
- SHOW("verifying");
- verify_gc();
- verify_dynamic_space();
+ if (gencgc_verbose)
+ SHOW("verifying");
+ verify_gc();
+ verify_dynamic_space();
}
/* Set the new gc trigger for the GCed generation. */
generations[generation].gc_trigger =
- generations[generation].bytes_allocated
- + generations[generation].bytes_consed_between_gc;
+ generations[generation].bytes_allocated
+ + generations[generation].bytes_consed_between_gc;
if (raise)
- generations[generation].num_gc = 0;
+ generations[generation].num_gc = 0;
else
- ++generations[generation].num_gc;
+ ++generations[generation].num_gc;
}
/* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
long i;
for (i = 0; i < last_free_page; i++)
- if ((page_table[i].allocated != FREE_PAGE_FLAG)
- && (page_table[i].bytes_used != 0))
- last_page = i;
+ if ((page_table[i].allocated != FREE_PAGE_FLAG)
+ && (page_table[i].bytes_used != 0))
+ last_page = i;
last_free_page = last_page+1;
SetSymbolValue(ALLOCATION_POINTER,
- (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
+ (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0);
return 0; /* dummy value: return something ... */
}
*
* We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
* last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
-
+
void
collect_garbage(unsigned last_gen)
{
FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
if (last_gen > NUM_GENERATIONS) {
- FSHOW((stderr,
- "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
- last_gen));
- last_gen = 0;
+ FSHOW((stderr,
+ "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
+ last_gen));
+ last_gen = 0;
}
/* Flush the alloc regions updating the tables. */
/* Verify the new objects created by Lisp code. */
if (pre_verify_gen_0) {
- FSHOW((stderr, "pre-checking generation 0\n"));
- verify_generation(0);
+ FSHOW((stderr, "pre-checking generation 0\n"));
+ verify_generation(0);
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats(0);
do {
- /* Collect the generation. */
-
- if (gen >= gencgc_oldest_gen_to_gc) {
- /* Never raise the oldest generation. */
- raise = 0;
- } else {
- raise =
- (gen < last_gen)
- || (generations[gen].num_gc >= generations[gen].trigger_age);
- }
-
- if (gencgc_verbose > 1) {
- FSHOW((stderr,
- "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
- gen,
- raise,
- generations[gen].bytes_allocated,
- generations[gen].gc_trigger,
- generations[gen].num_gc));
- }
-
- /* If an older generation is being filled, then update its
- * memory age. */
- if (raise == 1) {
- generations[gen+1].cum_sum_bytes_allocated +=
- generations[gen+1].bytes_allocated;
- }
-
- garbage_collect_generation(gen, raise);
-
- /* Reset the memory age cum_sum. */
- generations[gen].cum_sum_bytes_allocated = 0;
-
- if (gencgc_verbose > 1) {
- FSHOW((stderr, "GC of generation %d finished:\n", gen));
- print_generation_stats(0);
- }
-
- gen++;
+ /* Collect the generation. */
+
+ if (gen >= gencgc_oldest_gen_to_gc) {
+ /* Never raise the oldest generation. */
+ raise = 0;
+ } else {
+ raise =
+ (gen < last_gen)
+ || (generations[gen].num_gc >= generations[gen].trigger_age);
+ }
+
+ if (gencgc_verbose > 1) {
+ FSHOW((stderr,
+ "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
+ gen,
+ raise,
+ generations[gen].bytes_allocated,
+ generations[gen].gc_trigger,
+ generations[gen].num_gc));
+ }
+
+ /* If an older generation is being filled, then update its
+ * memory age. */
+ if (raise == 1) {
+ generations[gen+1].cum_sum_bytes_allocated +=
+ generations[gen+1].bytes_allocated;
+ }
+
+ garbage_collect_generation(gen, raise);
+
+ /* Reset the memory age cum_sum. */
+ generations[gen].cum_sum_bytes_allocated = 0;
+
+ if (gencgc_verbose > 1) {
+ FSHOW((stderr, "GC of generation %d finished:\n", gen));
+ print_generation_stats(0);
+ }
+
+ gen++;
} while ((gen <= gencgc_oldest_gen_to_gc)
- && ((gen < last_gen)
- || ((gen <= gencgc_oldest_gen_to_gc)
- && raise
- && (generations[gen].bytes_allocated
- > generations[gen].gc_trigger)
- && (gen_av_mem_age(gen)
- > generations[gen].min_av_mem_age))));
+ && ((gen < last_gen)
+ || ((gen <= gencgc_oldest_gen_to_gc)
+ && raise
+ && (generations[gen].bytes_allocated
+ > generations[gen].gc_trigger)
+ && (gen_av_mem_age(gen)
+ > generations[gen].min_av_mem_age))));
/* Now if gen-1 was raised all generations before gen are empty.
* If it wasn't raised then all generations before gen-1 are empty.
* generations are GCed only the pages which have been written
* need scanning. */
if (raise)
- gen_to_wp = gen;
+ gen_to_wp = gen;
else
- gen_to_wp = gen - 1;
+ gen_to_wp = gen - 1;
/* There's not much point in WPing pages in generation 0 as it is
* never scavenged (except promoted pages). */
if ((gen_to_wp > 0) && enable_page_protection) {
- /* Check that they are all empty. */
- for (i = 0; i < gen_to_wp; i++) {
- if (generations[i].bytes_allocated)
- lose("trying to write-protect gen. %d when gen. %d nonempty",
- gen_to_wp, i);
- }
- write_protect_generation_pages(gen_to_wp);
+ /* Check that they are all empty. */
+ for (i = 0; i < gen_to_wp; i++) {
+ if (generations[i].bytes_allocated)
+ lose("trying to write-protect gen. %d when gen. %d nonempty",
+ gen_to_wp, i);
+ }
+ write_protect_generation_pages(gen_to_wp);
}
/* Set gc_alloc() back to generation 0. The current regions should
update_x86_dynamic_space_free_pointer();
auto_gc_trigger = bytes_allocated + bytes_consed_between_gcs;
if(gencgc_verbose)
- fprintf(stderr,"Next gc when %ld bytes have been consed\n",
- auto_gc_trigger);
+ fprintf(stderr,"Next gc when %ld bytes have been consed\n",
+ auto_gc_trigger);
SHOW("returning from collect_garbage");
}
long page;
if (gencgc_verbose > 1)
- SHOW("entering gc_free_heap");
+ SHOW("entering gc_free_heap");
for (page = 0; page < NUM_PAGES; page++) {
- /* Skip free pages which should already be zero filled. */
- if (page_table[page].allocated != FREE_PAGE_FLAG) {
- void *page_start, *addr;
-
- /* Mark the page free. The other slots are assumed invalid
- * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
- * should not be write-protected -- except that the
- * generation is used for the current region but it sets
- * that up. */
- page_table[page].allocated = FREE_PAGE_FLAG;
- page_table[page].bytes_used = 0;
-
- /* Zero the page. */
- page_start = (void *)page_address(page);
-
- /* First, remove any write-protection. */
- os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page].write_protected = 0;
-
- os_invalidate(page_start,PAGE_BYTES);
- addr = os_validate(page_start,PAGE_BYTES);
- if (addr == NULL || addr != page_start) {
- lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
- page_start,
- addr);
- }
- } else if (gencgc_zero_check_during_free_heap) {
- /* Double-check that the page is zero filled. */
- long *page_start, i;
- gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
- gc_assert(page_table[page].bytes_used == 0);
- page_start = (long *)page_address(page);
- for (i=0; i<1024; i++) {
- if (page_start[i] != 0) {
- lose("free region not zero at %x", page_start + i);
- }
- }
- }
+ /* Skip free pages which should already be zero filled. */
+ if (page_table[page].allocated != FREE_PAGE_FLAG) {
+ void *page_start, *addr;
+
+ /* Mark the page free. The other slots are assumed invalid
+ * when it is a FREE_PAGE_FLAG and bytes_used is 0 and it
+ * should not be write-protected -- except that the
+ * generation is used for the current region but it sets
+ * that up. */
+ page_table[page].allocated = FREE_PAGE_FLAG;
+ page_table[page].bytes_used = 0;
+
+ /* Zero the page. */
+ page_start = (void *)page_address(page);
+
+ /* First, remove any write-protection. */
+ os_protect(page_start, PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page].write_protected = 0;
+
+ os_invalidate(page_start,PAGE_BYTES);
+ addr = os_validate(page_start,PAGE_BYTES);
+ if (addr == NULL || addr != page_start) {
+ lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
+ page_start,
+ addr);
+ }
+ } else if (gencgc_zero_check_during_free_heap) {
+ /* Double-check that the page is zero filled. */
+ long *page_start, i;
+ gc_assert(page_table[page].allocated == FREE_PAGE_FLAG);
+ gc_assert(page_table[page].bytes_used == 0);
+ page_start = (long *)page_address(page);
+ for (i=0; i<1024; i++) {
+ if (page_start[i] != 0) {
+ lose("free region not zero at %x", page_start + i);
+ }
+ }
+ }
}
bytes_allocated = 0;
/* Initialize the generations. */
for (page = 0; page < NUM_GENERATIONS; page++) {
- generations[page].alloc_start_page = 0;
- generations[page].alloc_unboxed_start_page = 0;
- generations[page].alloc_large_start_page = 0;
- generations[page].alloc_large_unboxed_start_page = 0;
- generations[page].bytes_allocated = 0;
- generations[page].gc_trigger = 2000000;
- generations[page].num_gc = 0;
- generations[page].cum_sum_bytes_allocated = 0;
+ generations[page].alloc_start_page = 0;
+ generations[page].alloc_unboxed_start_page = 0;
+ generations[page].alloc_large_start_page = 0;
+ generations[page].alloc_large_unboxed_start_page = 0;
+ generations[page].bytes_allocated = 0;
+ generations[page].gc_trigger = 2000000;
+ generations[page].num_gc = 0;
+ generations[page].cum_sum_bytes_allocated = 0;
}
if (gencgc_verbose > 1)
- print_generation_stats(0);
+ print_generation_stats(0);
/* Initialize gc_alloc(). */
gc_alloc_generation = 0;
SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base),0);
if (verify_after_free_heap) {
- /* Check whether purify has left any bad pointers. */
- if (gencgc_verbose)
- SHOW("checking after free_heap\n");
- verify_gc();
+ /* Check whether purify has left any bad pointers. */
+ if (gencgc_verbose)
+ SHOW("checking after free_heap\n");
+ verify_gc();
}
}
\f
/* Initialize each page structure. */
for (i = 0; i < NUM_PAGES; i++) {
- /* Initialize all pages as free. */
- page_table[i].allocated = FREE_PAGE_FLAG;
- page_table[i].bytes_used = 0;
+ /* Initialize all pages as free. */
+ page_table[i].allocated = FREE_PAGE_FLAG;
+ page_table[i].bytes_used = 0;
- /* Pages are not write-protected at startup. */
- page_table[i].write_protected = 0;
+ /* Pages are not write-protected at startup. */
+ page_table[i].write_protected = 0;
}
bytes_allocated = 0;
*
* FIXME: very similar to code in gc_free_heap(), should be shared */
for (i = 0; i < NUM_GENERATIONS; i++) {
- generations[i].alloc_start_page = 0;
- generations[i].alloc_unboxed_start_page = 0;
- generations[i].alloc_large_start_page = 0;
- generations[i].alloc_large_unboxed_start_page = 0;
- generations[i].bytes_allocated = 0;
- generations[i].gc_trigger = 2000000;
- generations[i].num_gc = 0;
- generations[i].cum_sum_bytes_allocated = 0;
- /* the tune-able parameters */
- generations[i].bytes_consed_between_gc = 2000000;
- generations[i].trigger_age = 1;
- generations[i].min_av_mem_age = 0.75;
+ generations[i].alloc_start_page = 0;
+ generations[i].alloc_unboxed_start_page = 0;
+ generations[i].alloc_large_start_page = 0;
+ generations[i].alloc_large_unboxed_start_page = 0;
+ generations[i].bytes_allocated = 0;
+ generations[i].gc_trigger = 2000000;
+ generations[i].num_gc = 0;
+ generations[i].cum_sum_bytes_allocated = 0;
+ /* the tune-able parameters */
+ generations[i].bytes_consed_between_gc = 2000000;
+ generations[i].trigger_age = 1;
+ generations[i].min_av_mem_age = 0.75;
}
/* Initialize gc_alloc. */
lispobj *prev=(lispobj *)page_address(page);
do {
- lispobj *first,*ptr= (lispobj *)page_address(page);
- page_table[page].allocated = BOXED_PAGE_FLAG;
- page_table[page].gen = 0;
- page_table[page].bytes_used = PAGE_BYTES;
- page_table[page].large_object = 0;
-
- first=gc_search_space(prev,(ptr+2)-prev,ptr);
- if(ptr == first) prev=ptr;
- page_table[page].first_object_offset =
- (void *)prev - page_address(page);
- page++;
+ lispobj *first,*ptr= (lispobj *)page_address(page);
+ page_table[page].allocated = BOXED_PAGE_FLAG;
+ page_table[page].gen = 0;
+ page_table[page].bytes_used = PAGE_BYTES;
+ page_table[page].large_object = 0;
+
+ first=gc_search_space(prev,(ptr+2)-prev,ptr);
+ if(ptr == first) prev=ptr;
+ page_table[page].first_object_offset =
+ (void *)prev - page_address(page);
+ page++;
} while (page_address(page) < alloc_ptr);
generations[0].bytes_allocated = PAGE_BYTES*page;
struct thread *th=arch_os_get_current_thread();
struct alloc_region *region=
#ifdef LISP_FEATURE_SB_THREAD
- th ? &(th->alloc_region) : &boxed_region;
+ th ? &(th->alloc_region) : &boxed_region;
#else
- &boxed_region;
+ &boxed_region;
#endif
void *new_obj;
void *new_free_pointer;
gc_assert(nbytes>0);
/* Check for alignment allocation problems. */
gc_assert((((unsigned)region->free_pointer & LOWTAG_MASK) == 0)
- && ((nbytes & LOWTAG_MASK) == 0));
+ && ((nbytes & LOWTAG_MASK) == 0));
#if 0
if(all_threads)
- /* there are a few places in the C code that allocate data in the
- * heap before Lisp starts. This is before interrupts are enabled,
- * so we don't need to check for pseudo-atomic */
+ /* there are a few places in the C code that allocate data in the
+ * heap before Lisp starts. This is before interrupts are enabled,
+ * so we don't need to check for pseudo-atomic */
#ifdef LISP_FEATURE_SB_THREAD
- if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
- register u32 fs;
- fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
- th,th->os_thread);
- __asm__("movl %fs,%0" : "=r" (fs) : );
- fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
- debug_get_fs(),th->tls_cookie);
- lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
- }
+ if(!SymbolValue(PSEUDO_ATOMIC_ATOMIC,th)) {
+ register u32 fs;
+ fprintf(stderr, "fatal error in thread 0x%x, tid=%ld\n",
+ th,th->os_thread);
+ __asm__("movl %fs,%0" : "=r" (fs) : );
+ fprintf(stderr, "fs is %x, th->tls_cookie=%x \n",
+ debug_get_fs(),th->tls_cookie);
+ lose("If you see this message before 2004.01.31, mail details to sbcl-devel\n");
+ }
#else
gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC,th));
#endif
#endif
-
+
/* maybe we can do this quickly ... */
new_free_pointer = region->free_pointer + nbytes;
if (new_free_pointer <= region->end_addr) {
- new_obj = (void*)(region->free_pointer);
- region->free_pointer = new_free_pointer;
- return(new_obj); /* yup */
+ new_obj = (void*)(region->free_pointer);
+ region->free_pointer = new_free_pointer;
+ return(new_obj); /* yup */
}
-
- /* we have to go the long way around, it seems. Check whether
+
+ /* we have to go the long way around, it seems. Check whether
* we should GC in the near future
*/
if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
thread_sigmask(SIG_BLOCK,&new_mask,&old_mask);
if(!data->pending_handler) {
- if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0))
- lose("Not in atomic: %d.\n",
+ if(!maybe_defer_handler(interrupt_maybe_gc_int,data,0,0,0))
+ lose("Not in atomic: %d.\n",
SymbolValue(PSEUDO_ATOMIC_ATOMIC,thread));
/* Leave the signals blocked just as if it was
* deferred the normal way and set the
#ifdef QSHOW_SIGNALS
FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
- fault_addr, page_index));
+ fault_addr, page_index));
#endif
/* Check whether the fault is within the dynamic space. */
if (page_index == (-1)) {
- /* It can be helpful to be able to put a breakpoint on this
- * case to help diagnose low-level problems. */
- unhandled_sigmemoryfault();
+ /* It can be helpful to be able to put a breakpoint on this
+ * case to help diagnose low-level problems. */
+ unhandled_sigmemoryfault();
- /* not within the dynamic space -- not our responsibility */
- return 0;
+ /* not within the dynamic space -- not our responsibility */
+ return 0;
} else {
- if (page_table[page_index].write_protected) {
- /* Unprotect the page. */
- os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
- page_table[page_index].write_protected_cleared = 1;
- page_table[page_index].write_protected = 0;
- } else {
- /* The only acceptable reason for this signal on a heap
- * access is that GENCGC write-protected the page.
- * However, if two CPUs hit a wp page near-simultaneously,
- * we had better not have the second one lose here if it
- * does this test after the first one has already set wp=0
- */
- if(page_table[page_index].write_protected_cleared != 1)
- lose("fault in heap page not marked as write-protected");
- }
- /* Don't worry, we can handle it. */
- return 1;
+ if (page_table[page_index].write_protected) {
+ /* Unprotect the page. */
+ os_protect(page_address(page_index), PAGE_BYTES, OS_VM_PROT_ALL);
+ page_table[page_index].write_protected_cleared = 1;
+ page_table[page_index].write_protected = 0;
+ } else {
+ /* The only acceptable reason for this signal on a heap
+ * access is that GENCGC write-protected the page.
+ * However, if two CPUs hit a wp page near-simultaneously,
+ * we had better not have the second one lose here if it
+ * does this test after the first one has already set wp=0
+ */
+ if(page_table[page_index].write_protected_cleared != 1)
+ lose("fault in heap page not marked as write-protected");
+ }
+ /* Don't worry, we can handle it. */
+ return 1;
}
}
/* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
{
/* Flush the alloc regions updating the tables. */
struct thread *th;
- for_each_thread(th)
+ for_each_thread(th)
gc_alloc_update_page_tables(0, &th->alloc_region);
gc_alloc_update_page_tables(1, &unboxed_region);
gc_alloc_update_page_tables(0, &boxed_region);
}
-void
+void
gc_set_region_empty(struct alloc_region *region)
{
region->first_page = 0;
* validate() and coreparse(). */
current_control_frame_pointer = (lispobj *)0;
-#ifndef LISP_FEATURE_GENCGC
+#ifndef LISP_FEATURE_GENCGC
/* no GC trigger yet */
current_auto_gc_trigger = NULL;
#endif
foreign_function_call_active = 1;
#if defined(LISP_FEATURE_SB_THREAD)
pthread_key_create(&specials,0);
-#endif
+#endif
}
# ifdef LISP_FEATURE_MIPS
# ifdef __linux__
-# define EXTERN(name,bytes) .globl name
+# define EXTERN(name,bytes) .globl name
# else
# define EXTERN(name,bytes) .extern name bytes
# endif
/**/
# ifdef LISP_FEATURE_ALPHA
# ifdef __linux__
-# define EXTERN(name,bytes) .globl name
+# define EXTERN(name,bytes) .globl name
# endif
# endif
/**/
# ifdef LISP_FEATURE_DARWIN
# define EXTERN(name,bytes) .globl _/**/name
# else
-# define EXTERN(name,bytes) .globl name
+# define EXTERN(name,bytes) .globl name
# endif
# endif
/**/
state = (struct save_state *)(&(scp->sc_sl.sl_ss));
if (state == NULL)
- return NULL;
+ return NULL;
/* Check the instruction address first. */
addr = (os_vm_address_t)((unsigned long)scp->sc_pcoq_head & ~3);
if (addr < (os_vm_address_t)0x1000)
- return addr;
+ return addr;
/* Otherwise, it must have been a data fault. */
return (os_vm_address_t)state->ss_cr21;
state = (struct hp800_thread_state *)(scp->sc_ap);
if (state == NULL)
- return NULL;
+ return NULL;
/* Check the instruction address first. */
addr = scp->sc_pcoqh & ~3;
if (addr < 0x1000)
- return addr;
+ return addr;
/* Otherwise, it must have been a data fault. */
return state->cr21;
/* state so that we will continue as if nothing happened. */
if (NextPc == NULL)
- lose("SingleStepBreakpoint trap at strange time.");
+ lose("SingleStepBreakpoint trap at strange time.");
if ((SC_PC(scp)&~3) == (unsigned long)SingleStepTraps) {
- /* The next instruction was not nullified. */
- SC_PC(scp) = NextPc;
- if ((SC_NPC(scp)&~3) == (unsigned long)SingleStepTraps + 4) {
- /* The instruction we just stepped over was not a branch, so */
- /* we need to fix it up. If it was a branch, it will point to */
- /* the correct place. */
- SC_NPC(scp) = NextPc + 4;
- }
+ /* The next instruction was not nullified. */
+ SC_PC(scp) = NextPc;
+ if ((SC_NPC(scp)&~3) == (unsigned long)SingleStepTraps + 4) {
+ /* The instruction we just stepped over was not a branch, so */
+ /* we need to fix it up. If it was a branch, it will point to */
+ /* the correct place. */
+ SC_NPC(scp) = NextPc + 4;
+ }
}
else {
- /* The next instruction was nullified, so we want to skip it. */
- SC_PC(scp) = NextPc + 4;
- SC_NPC(scp) = NextPc + 8;
+ /* The next instruction was nullified, so we want to skip it. */
+ SC_PC(scp) = NextPc + 4;
+ SC_NPC(scp) = NextPc + 8;
}
NextPc = NULL;
if (BreakpointAddr) {
- *BreakpointAddr = trap_Breakpoint;
- os_flush_icache((os_vm_address_t)BreakpointAddr,
- sizeof(unsigned long));
- BreakpointAddr = NULL;
+ *BreakpointAddr = trap_Breakpoint;
+ os_flush_icache((os_vm_address_t)BreakpointAddr,
+ sizeof(unsigned long));
+ BreakpointAddr = NULL;
}
}
#endif
#if 0
printf("sigtrap_handler, pc=0x%08x, alloc=0x%08x\n", scp->sc_pcoqh,
- SC_REG(scp,reg_ALLOC));
+ SC_REG(scp,reg_ALLOC));
#endif
bad_inst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
if (bad_inst & 0xfc001fe0)
- interrupt_handle_now(signal, siginfo, context);
+ interrupt_handle_now(signal, siginfo, context);
else {
- int im5 = bad_inst & 0x1f;
-
- switch (im5) {
- case trap_Halt:
- fake_foreign_function_call(context);
- lose("%%primitive halt called; the party is over.\n");
-
- case trap_PendingInterrupt:
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
-
- case trap_Error:
- case trap_Cerror:
- interrupt_internal_error(signal, siginfo, context, im5==trap_Cerror);
- break;
-
- case trap_Breakpoint:
- /*sigsetmask(scp->sc_mask); */
- handle_breakpoint(signal, siginfo, context);
- break;
-
- case trap_FunEndBreakpoint:
- /*sigsetmask(scp->sc_mask); */
- {
- unsigned long pc;
- pc = (unsigned long)
- handle_fun_end_breakpoint(signal, siginfo, context);
- *os_context_pc_addr(context) = pc;
- *os_context_npc_addr(context) = pc + 4;
- }
- break;
-
- case trap_SingleStepBreakpoint:
- /* Uh, FIXME */
+ int im5 = bad_inst & 0x1f;
+
+ switch (im5) {
+ case trap_Halt:
+ fake_foreign_function_call(context);
+ lose("%%primitive halt called; the party is over.\n");
+
+ case trap_PendingInterrupt:
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
+
+ case trap_Error:
+ case trap_Cerror:
+ interrupt_internal_error(signal, siginfo, context, im5==trap_Cerror);
+ break;
+
+ case trap_Breakpoint:
+ /*sigsetmask(scp->sc_mask); */
+ handle_breakpoint(signal, siginfo, context);
+ break;
+
+ case trap_FunEndBreakpoint:
+ /*sigsetmask(scp->sc_mask); */
+ {
+ unsigned long pc;
+ pc = (unsigned long)
+ handle_fun_end_breakpoint(signal, siginfo, context);
+ *os_context_pc_addr(context) = pc;
+ *os_context_npc_addr(context) = pc + 4;
+ }
+ break;
+
+ case trap_SingleStepBreakpoint:
+ /* Uh, FIXME */
#ifdef hpux
- restore_breakpoint(context);
+ restore_breakpoint(context);
#endif
- break;
-
- default:
- interrupt_handle_now(signal, siginfo, context);
- break;
- }
+ break;
+
+ default:
+ interrupt_handle_now(signal, siginfo, context);
+ break;
+ }
}
}
#if 0
printf("sigfpe_handler, pc=0x%08x, alloc=0x%08x\n", scp->sc_pcoqh,
- SC_REG(scp,reg_ALLOC));
+ SC_REG(scp,reg_ALLOC));
#endif
switch (siginfo->si_code) {
case FPE_INTOVF: /*I_OVFLO: */
- badinst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
- opcode = badinst >> 26;
-
- if (opcode == 2) {
- /* reg/reg inst. */
- r1 = (badinst >> 16) & 0x1f;
- op1 = fixnum_value(*os_context_register_addr(context, r1));
- r2 = (badinst >> 21) & 0x1f;
- op2 = fixnum_value(*os_context_register_addr(context, r2));
- t = badinst & 0x1f;
-
- switch ((badinst >> 5) & 0x7f) {
- case 0x70:
- /* Add and trap on overflow. */
- res = op1 + op2;
- break;
-
- case 0x60:
- /* Subtract and trap on overflow. */
- res = op1 - op2;
- break;
-
- default:
- goto not_interesting;
- }
- }
- else if ((opcode & 0x37) == 0x25 && (badinst & (1<<11))) {
- /* Add or subtract immediate. */
- op1 = ((badinst >> 3) & 0xff) | ((-badinst&1)<<8);
- r2 = (badinst >> 16) & 0x1f;
- op2 = fixnum_value(*os_context_register_addr(context, r1));
- t = (badinst >> 21) & 0x1f;
- if (opcode == 0x2d)
- res = op1 + op2;
- else
- res = op1 - op2;
- }
- else
- goto not_interesting;
-
- /* ?? What happens here if we hit the end of dynamic space? */
+ badinst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
+ opcode = badinst >> 26;
+
+ if (opcode == 2) {
+ /* reg/reg inst. */
+ r1 = (badinst >> 16) & 0x1f;
+ op1 = fixnum_value(*os_context_register_addr(context, r1));
+ r2 = (badinst >> 21) & 0x1f;
+ op2 = fixnum_value(*os_context_register_addr(context, r2));
+ t = badinst & 0x1f;
+
+ switch ((badinst >> 5) & 0x7f) {
+ case 0x70:
+ /* Add and trap on overflow. */
+ res = op1 + op2;
+ break;
+
+ case 0x60:
+ /* Subtract and trap on overflow. */
+ res = op1 - op2;
+ break;
+
+ default:
+ goto not_interesting;
+ }
+ }
+ else if ((opcode & 0x37) == 0x25 && (badinst & (1<<11))) {
+ /* Add or subtract immediate. */
+ op1 = ((badinst >> 3) & 0xff) | ((-badinst&1)<<8);
+ r2 = (badinst >> 16) & 0x1f;
+ op2 = fixnum_value(*os_context_register_addr(context, r1));
+ t = (badinst >> 21) & 0x1f;
+ if (opcode == 0x2d)
+ res = op1 + op2;
+ else
+ res = op1 - op2;
+ }
+ else
+ goto not_interesting;
+
+ /* ?? What happens here if we hit the end of dynamic space? */
dynamic_space_free_pointer = (lispobj *) *os_context_register_addr(context, reg_ALLOC);
- *os_context_register_addr(context, t) = alloc_number(res);
- *os_context_register_addr(context, reg_ALLOC)
- = (unsigned long) dynamic_space_free_pointer;
- arch_skip_instruction(context);
-
- break;
-
+ *os_context_register_addr(context, t) = alloc_number(res);
+ *os_context_register_addr(context, reg_ALLOC)
+ = (unsigned long) dynamic_space_free_pointer;
+ arch_skip_instruction(context);
+
+ break;
+
case 0: /* I_COND: ?? Maybe tagged add?? FIXME */
- badinst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
- if ((badinst&0xfffff800) == (0xb000e000|reg_ALLOC<<21|reg_ALLOC<<16)) {
- /* It is an ADDIT,OD i,ALLOC,ALLOC instruction that trapped. */
- /* That means that it is the end of a pseudo-atomic. So do the */
- /* add stripping off the pseudo-atomic-interrupted bit, and then */
- /* tell the machine-independent code to process the pseudo- */
- /* atomic. */
- int immed = (badinst>>1)&0x3ff;
- if (badinst & 1)
- immed |= -1<<10;
- *os_context_register_addr(context, reg_ALLOC) += (immed-1);
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
- }
- /* else drop-through. */
+ badinst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
+ if ((badinst&0xfffff800) == (0xb000e000|reg_ALLOC<<21|reg_ALLOC<<16)) {
+ /* It is an ADDIT,OD i,ALLOC,ALLOC instruction that trapped. */
+ /* That means that it is the end of a pseudo-atomic. So do the */
+ /* add stripping off the pseudo-atomic-interrupted bit, and then */
+ /* tell the machine-independent code to process the pseudo- */
+ /* atomic. */
+ int immed = (badinst>>1)&0x3ff;
+ if (badinst & 1)
+ immed |= -1<<10;
+ *os_context_register_addr(context, reg_ALLOC) += (immed-1);
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
+ }
+ /* else drop-through. */
default:
not_interesting:
- interrupt_handle_now(signal, siginfo, context);
+ interrupt_handle_now(signal, siginfo, context);
}
}
badinst = *(unsigned long *)(*os_context_pc_addr(context) & ~3);
/* First, test for the pseudo-atomic instruction */
if ((badinst & 0xfffff800) == (0xb000e000 |
- reg_ALLOC<<21 |
- reg_ALLOC<<16)) {
- /* It is an ADDIT,OD i,ALLOC,ALLOC instruction that trapped.
- That means that it is the end of a pseudo-atomic. So do
- the add stripping off the pseudo-atomic-interrupted bit,
- and then tell the machine-independent code to process the
- pseudo-atomic. */
- int immed = (badinst>>1) & 0x3ff;
- if (badinst & 1)
- immed |= -1<<10;
- *os_context_register_addr(context, reg_ALLOC) += (immed-1);
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- return;
+ reg_ALLOC<<21 |
+ reg_ALLOC<<16)) {
+ /* It is an ADDIT,OD i,ALLOC,ALLOC instruction that trapped.
+ That means that it is the end of a pseudo-atomic. So do
+ the add stripping off the pseudo-atomic-interrupted bit,
+ and then tell the machine-independent code to process the
+ pseudo-atomic. */
+ int immed = (badinst>>1) & 0x3ff;
+ if (badinst & 1)
+ immed |= -1<<10;
+ *os_context_register_addr(context, reg_ALLOC) += (immed-1);
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ return;
} else {
- opcode = badinst >> 26;
- if (opcode == 2) {
- /* reg/reg inst. */
- r1 = (badinst >> 16) & 0x1f;
- op1 = fixnum_value(*os_context_register_addr(context, r1));
- r2 = (badinst >> 21) & 0x1f;
- op2 = fixnum_value(*os_context_register_addr(context, r2));
- t = badinst & 0x1f;
-
- switch ((badinst >> 5) & 0x7f) {
- case 0x70:
- /* Add and trap on overflow. */
- res = op1 + op2;
- break;
-
- case 0x60:
- /* Subtract and trap on overflow. */
- res = op1 - op2;
- break;
-
- default:
- goto not_interesting;
- }
- } else if ((opcode & 0x37) == 0x25 && (badinst & (1<<11))) {
- /* Add or subtract immediate. */
- op1 = ((badinst >> 3) & 0xff) | ((-badinst&1)<<8);
- r2 = (badinst >> 16) & 0x1f;
- op2 = fixnum_value(*os_context_register_addr(context, r1));
- t = (badinst >> 21) & 0x1f;
- if (opcode == 0x2d)
- res = op1 + op2;
- else
- res = op1 - op2;
- }
- else
- goto not_interesting;
-
- /* ?? What happens here if we hit the end of dynamic space? */
+ opcode = badinst >> 26;
+ if (opcode == 2) {
+ /* reg/reg inst. */
+ r1 = (badinst >> 16) & 0x1f;
+ op1 = fixnum_value(*os_context_register_addr(context, r1));
+ r2 = (badinst >> 21) & 0x1f;
+ op2 = fixnum_value(*os_context_register_addr(context, r2));
+ t = badinst & 0x1f;
+
+ switch ((badinst >> 5) & 0x7f) {
+ case 0x70:
+ /* Add and trap on overflow. */
+ res = op1 + op2;
+ break;
+
+ case 0x60:
+ /* Subtract and trap on overflow. */
+ res = op1 - op2;
+ break;
+
+ default:
+ goto not_interesting;
+ }
+ } else if ((opcode & 0x37) == 0x25 && (badinst & (1<<11))) {
+ /* Add or subtract immediate. */
+ op1 = ((badinst >> 3) & 0xff) | ((-badinst&1)<<8);
+ r2 = (badinst >> 16) & 0x1f;
+ op2 = fixnum_value(*os_context_register_addr(context, r1));
+ t = (badinst >> 21) & 0x1f;
+ if (opcode == 0x2d)
+ res = op1 + op2;
+ else
+ res = op1 - op2;
+ }
+ else
+ goto not_interesting;
+
+ /* ?? What happens here if we hit the end of dynamic space? */
dynamic_space_free_pointer = (lispobj *) *os_context_register_addr(context, reg_ALLOC);
- *os_context_register_addr(context, t) = alloc_number(res);
- *os_context_register_addr(context, reg_ALLOC)
- = (unsigned long) dynamic_space_free_pointer;
- arch_skip_instruction(context);
-
- return;
-
+ *os_context_register_addr(context, t) = alloc_number(res);
+ *os_context_register_addr(context, reg_ALLOC)
+ = (unsigned long) dynamic_space_free_pointer;
+ arch_skip_instruction(context);
+
+ return;
+
not_interesting:
- interrupt_handle_now(signal, siginfo, context);
+ interrupt_handle_now(signal, siginfo, context);
}
}
#define _HPPA_ARCH_H
-static inline void
+static inline void
get_spinlock(lispobj *word,long value)
{
- *word=value; /* FIXME for threads */
+ *word=value; /* FIXME for threads */
}
static inline void
os_context_register_addr(os_context_t *context, int offset)
{
if (offset == 0) {
- /* KLUDGE: I'm not sure, but it's possible that Linux puts the
+ /* KLUDGE: I'm not sure, but it's possible that Linux puts the
contents of the Processor Status Word in the (wired-zero)
slot in the mcontext. In any case, the following is
unlikely to do any harm: */
- static int zero;
- zero = 0;
- return &zero;
+ static int zero;
+ zero = 0;
+ return &zero;
} else {
- return &(((struct sigcontext *) &(context->uc_mcontext))->sc_gr[offset]);
+ return &(((struct sigcontext *) &(context->uc_mcontext))->sc_gr[offset]);
}
}
return &(context->uc_sigmask);
}
-void
+void
os_restore_fp_control(os_context_t *context)
{
/* FIXME: Probably do something. */
}
-void
+void
os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
/* FIXME: Maybe this is OK. */
-#define NREGS (32)
+#define NREGS (32)
#ifdef LANGUAGE_ASSEMBLY
#define REG(num) num
fprintf(stderr, "(tid %ld)",thread_self());
#endif
if (fmt) {
- fprintf(stderr, ":\n");
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- va_end(ap);
+ fprintf(stderr, ":\n");
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
}
fprintf(stderr, "\n");
fflush(stderr);
printf("internal error #%d\n", *ptr++);
len--;
while (len > 0) {
- scoffset = *ptr++;
- len--;
- if (scoffset == 253) {
- scoffset = *ptr++;
- len--;
- }
- else if (scoffset == 254) {
- scoffset = ptr[0] + ptr[1]*256;
- ptr += 2;
- len -= 2;
- }
- else if (scoffset == 255) {
- scoffset = ptr[0] + (ptr[1]<<8) + (ptr[2]<<16) + (ptr[3]<<24);
- ptr += 4;
- len -= 4;
- }
- sc = scoffset & 0x1f;
- offset = scoffset >> 5;
-
- printf(" SC: %d, Offset: %d", sc, offset);
- switch (sc) {
- case sc_AnyReg:
- case sc_DescriptorReg:
- putchar('\t');
- brief_print(*os_context_register_addr(context, offset));
- break;
+ scoffset = *ptr++;
+ len--;
+ if (scoffset == 253) {
+ scoffset = *ptr++;
+ len--;
+ }
+ else if (scoffset == 254) {
+ scoffset = ptr[0] + ptr[1]*256;
+ ptr += 2;
+ len -= 2;
+ }
+ else if (scoffset == 255) {
+ scoffset = ptr[0] + (ptr[1]<<8) + (ptr[2]<<16) + (ptr[3]<<24);
+ ptr += 4;
+ len -= 4;
+ }
+ sc = scoffset & 0x1f;
+ offset = scoffset >> 5;
- case sc_CharacterReg:
- ch = *os_context_register_addr(context, offset);
+ printf(" SC: %d, Offset: %d", sc, offset);
+ switch (sc) {
+ case sc_AnyReg:
+ case sc_DescriptorReg:
+ putchar('\t');
+ brief_print(*os_context_register_addr(context, offset));
+ break;
+
+ case sc_CharacterReg:
+ ch = *os_context_register_addr(context, offset);
#ifdef LISP_FEATURE_X86
- if (offset&1)
- ch = ch>>8;
- ch = ch & 0xff;
+ if (offset&1)
+ ch = ch>>8;
+ ch = ch & 0xff;
#endif
- switch (ch) {
- case '\n': printf("\t'\\n'\n"); break;
- case '\b': printf("\t'\\b'\n"); break;
- case '\t': printf("\t'\\t'\n"); break;
- case '\r': printf("\t'\\r'\n"); break;
- default:
- if (ch < 32 || ch > 127)
- printf("\\%03o", ch);
- else
- printf("\t'%c'\n", ch);
- break;
- }
- break;
- case sc_SapReg:
+ switch (ch) {
+ case '\n': printf("\t'\\n'\n"); break;
+ case '\b': printf("\t'\\b'\n"); break;
+ case '\t': printf("\t'\\t'\n"); break;
+ case '\r': printf("\t'\\r'\n"); break;
+ default:
+ if (ch < 32 || ch > 127)
+ printf("\\%03o", ch);
+ else
+ printf("\t'%c'\n", ch);
+ break;
+ }
+ break;
+ case sc_SapReg:
#ifdef sc_WordPointerReg
- case sc_WordPointerReg:
+ case sc_WordPointerReg:
#endif
- printf("\t0x%08lx\n", (unsigned long) *os_context_register_addr(context, offset));
- break;
- case sc_SignedReg:
- printf("\t%ld\n", (long) *os_context_register_addr(context, offset));
- break;
- case sc_UnsignedReg:
- printf("\t%lu\n", (unsigned long) *os_context_register_addr(context, offset));
- break;
+ printf("\t0x%08lx\n", (unsigned long) *os_context_register_addr(context, offset));
+ break;
+ case sc_SignedReg:
+ printf("\t%ld\n", (long) *os_context_register_addr(context, offset));
+ break;
+ case sc_UnsignedReg:
+ printf("\t%lu\n", (unsigned long) *os_context_register_addr(context, offset));
+ break;
#ifdef sc_SingleFloatReg
- case sc_SingleFloatReg:
- printf("\t%g\n", *(float *)&context->sc_fpregs[offset]);
- break;
+ case sc_SingleFloatReg:
+ printf("\t%g\n", *(float *)&context->sc_fpregs[offset]);
+ break;
#endif
#ifdef sc_DoubleFloatReg
- case sc_DoubleFloatReg:
- printf("\t%g\n", *(double *)&context->sc_fpregs[offset]);
- break;
+ case sc_DoubleFloatReg:
+ printf("\t%g\n", *(double *)&context->sc_fpregs[offset]);
+ break;
#endif
- default:
- printf("\t???\n");
- break;
- }
+ default:
+ printf("\t???\n");
+ break;
+ }
}
}
\f
lispobj debug_print(lispobj string)
{
- /* This is a kludge. It's not actually safe - in general - to use
+ /* This is a kludge. It's not actually safe - in general - to use
%primitive print on the alpha, because it skips half of the
number stack setup that should usually be done on a function call,
so the called routine (i.e. this one) ends up being able to overwrite
we just put guarantee our safety by putting an unused buffer on
the stack before doing anything else here */
char untouched[32]; /* GCC warns about not using this, but that's the point.. */
- fprintf(stderr, "%s\n",
- (char *)(((struct vector *)native_pointer(string))->data),untouched);
+ fprintf(stderr, "%s\n",
+ (char *)(((struct vector *)native_pointer(string))->data),untouched);
return NIL;
}
* In that case, the Lisp-level handler is stored in interrupt_handlers[..]
* and interrupt_low_level_handlers[..] is cleared.
*
- * However, some signals need special handling, e.g.
+ * However, some signals need special handling, e.g.
*
* o the SIGSEGV (for e.g. Linux) or SIGBUS (for e.g. FreeBSD) used by the
* garbage collector to detect violations of write protection,
* o the SIGTRAP (Linux/Alpha) which Lisp code uses to handle breakpoints,
* pseudo-atomic sections, and some classes of error (e.g. "function
* not defined"). This never goes anywhere near the Lisp handlers at all.
- * See runtime/alpha-arch.c and code/signal.lisp
- *
+ * See runtime/alpha-arch.c and code/signal.lisp
+ *
* - WHN 20000728, dan 20010128 */
void run_deferred_handler(struct interrupt_data *data, void *v_context) ;
-static void store_signal_data_for_later (struct interrupt_data *data,
- void *handler, int signal,
- siginfo_t *info,
- os_context_t *context);
+static void store_signal_data_for_later (struct interrupt_data *data,
+ void *handler, int signal,
+ siginfo_t *info,
+ os_context_t *context);
boolean interrupt_maybe_gc_int(int signal, siginfo_t *info, void *v_context);
void sigaddset_blockable(sigset_t *s)
lose("interrupts not enabled");
if (
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
- (!foreign_function_call_active) &&
+ (!foreign_function_call_active) &&
#endif
- arch_pseudo_atomic_atomic(context))
+ arch_pseudo_atomic_atomic(context))
lose ("in pseudo atomic section");
}
* mask ought to be clear anyway most of the time, but may be non-zero
* if we were interrupted e.g. while waiting for a queue. */
-void reset_signal_mask ()
+void reset_signal_mask ()
{
sigset_t new;
sigemptyset(&new);
* utility routines used by various signal handlers
*/
-void
+void
build_fake_control_stack_frames(struct thread *th,os_context_t *context)
{
#ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
-
+
lispobj oldcont;
/* Build a fake stack frame or frames */
current_control_frame_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_CSP));
+ (lispobj *)(*os_context_register_addr(context, reg_CSP));
if ((lispobj *)(*os_context_register_addr(context, reg_CFP))
- == current_control_frame_pointer) {
+ == current_control_frame_pointer) {
/* There is a small window during call where the callee's
* frame isn't built yet. */
if (lowtag_of(*os_context_register_addr(context, reg_CODE))
- == FUN_POINTER_LOWTAG) {
+ == FUN_POINTER_LOWTAG) {
/* We have called, but not built the new frame, so
* build it for them. */
current_control_frame_pointer[0] =
- *os_context_register_addr(context, reg_OCFP);
+ *os_context_register_addr(context, reg_OCFP);
current_control_frame_pointer[1] =
- *os_context_register_addr(context, reg_LRA);
+ *os_context_register_addr(context, reg_LRA);
current_control_frame_pointer += 8;
/* Build our frame on top of it. */
oldcont = (lispobj)(*os_context_register_addr(context, reg_CFP));
current_control_frame_pointer[0] = oldcont;
current_control_frame_pointer[1] = NIL;
current_control_frame_pointer[2] =
- (lispobj)(*os_context_register_addr(context, reg_CODE));
+ (lispobj)(*os_context_register_addr(context, reg_CODE));
#endif
}
/* Get current Lisp state from context. */
#ifdef reg_ALLOC
dynamic_space_free_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_ALLOC));
+ (lispobj *)(*os_context_register_addr(context, reg_ALLOC));
#if defined(LISP_FEATURE_ALPHA)
if ((long)dynamic_space_free_pointer & 1) {
- lose("dead in fake_foreign_function_call, context = %x", context);
+ lose("dead in fake_foreign_function_call, context = %x", context);
}
#endif
#endif
#ifdef reg_BSP
current_binding_stack_pointer =
- (lispobj *)(*os_context_register_addr(context, reg_BSP));
+ (lispobj *)(*os_context_register_addr(context, reg_BSP));
#endif
build_fake_control_stack_frames(thread,context);
/* Do dynamic binding of the active interrupt context index
* and save the context in the context array. */
context_index =
- fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
-
+ fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread));
+
if (context_index >= MAX_INTERRUPTS) {
lose("maximum interrupt nesting depth (%d) exceeded", MAX_INTERRUPTS);
}
bind_variable(FREE_INTERRUPT_CONTEXT_INDEX,
- make_fixnum(context_index + 1),thread);
+ make_fixnum(context_index + 1),thread);
thread->interrupt_contexts[context_index] = context;
}
/* blocks all blockable signals. If you are calling from a signal handler,
- * the usual signal mask will be restored from the context when the handler
+ * the usual signal mask will be restored from the context when the handler
* finishes. Otherwise, be careful */
void
* signalling an internal error */
void
interrupt_internal_error(int signal, siginfo_t *info, os_context_t *context,
- boolean continuable)
+ boolean continuable)
{
lispobj context_sap = 0;
/* Allocate the SAP object while the interrupts are still
* disabled. */
if (internal_errors_enabled) {
- context_sap = alloc_sap(context);
+ context_sap = alloc_sap(context);
}
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
if (internal_errors_enabled) {
SHOW("in interrupt_internal_error");
#ifdef QSHOW
- /* Display some rudimentary debugging information about the
- * error, so that even if the Lisp error handler gets badly
- * confused, we have a chance to determine what's going on. */
- describe_internal_error(context);
+ /* Display some rudimentary debugging information about the
+ * error, so that even if the Lisp error handler gets badly
+ * confused, we have a chance to determine what's going on. */
+ describe_internal_error(context);
#endif
- funcall2(SymbolFunction(INTERNAL_ERROR), context_sap,
- continuable ? T : NIL);
+ funcall2(SymbolFunction(INTERNAL_ERROR), context_sap,
+ continuable ? T : NIL);
} else {
- describe_internal_error(context);
- /* There's no good way to recover from an internal error
- * before the Lisp error handling mechanism is set up. */
- lose("internal error too early in init, can't recover");
+ describe_internal_error(context);
+ /* There's no good way to recover from an internal error
+ * before the Lisp error handling mechanism is set up. */
+ lose("internal error too early in init, can't recover");
}
undo_fake_foreign_function_call(context); /* blocks signals again */
if (continuable) {
- arch_skip_instruction(context);
+ arch_skip_instruction(context);
}
}
the FPU control word from the context, as after the signal is
delivered we appear to have a null FPU control word. */
os_restore_fp_control(context);
-#endif
+#endif
handler = thread->interrupt_data->interrupt_handlers[signal];
if (ARE_SAME_HANDLER(handler.c, SIG_IGN)) {
- return;
+ return;
}
-
+
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
were_in_lisp = !foreign_function_call_active;
if (were_in_lisp)
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
- "/entering interrupt_handle_now(%d, info, context)\n",
- signal));
+ "/entering interrupt_handle_now(%d, info, context)\n",
+ signal));
#endif
if (ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
- /* This can happen if someone tries to ignore or default one
- * of the signals we need for runtime support, and the runtime
- * support decides to pass on it. */
- lose("no handler for signal %d in interrupt_handle_now(..)", signal);
+ /* This can happen if someone tries to ignore or default one
+ * of the signals we need for runtime support, and the runtime
+ * support decides to pass on it. */
+ lose("no handler for signal %d in interrupt_handle_now(..)", signal);
} else if (lowtag_of(handler.lisp) == FUN_POINTER_LOWTAG) {
- /* Once we've decided what to do about contexts in a
- * return-elsewhere world (the original context will no longer
- * be available; should we copy it or was nobody using it anyway?)
- * then we should convert this to return-elsewhere */
+ /* Once we've decided what to do about contexts in a
+ * return-elsewhere world (the original context will no longer
+ * be available; should we copy it or was nobody using it anyway?)
+ * then we should convert this to return-elsewhere */
/* CMUCL comment said "Allocate the SAPs while the interrupts
- * are still disabled.". I (dan, 2003.08.21) assume this is
- * because we're not in pseudoatomic and allocation shouldn't
- * be interrupted. In which case it's no longer an issue as
- * all our allocation from C now goes through a PA wrapper,
- * but still, doesn't hurt */
+ * are still disabled.". I (dan, 2003.08.21) assume this is
+ * because we're not in pseudoatomic and allocation shouldn't
+ * be interrupted. In which case it's no longer an issue as
+ * all our allocation from C now goes through a PA wrapper,
+ * but still, doesn't hurt */
lispobj info_sap,context_sap = alloc_sap(context);
info_sap = alloc_sap(info);
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
#ifdef QSHOW_SIGNALS
- SHOW("calling Lisp-level handler");
+ SHOW("calling Lisp-level handler");
#endif
funcall3(handler.lisp,
- make_fixnum(signal),
- info_sap,
- context_sap);
+ make_fixnum(signal),
+ info_sap,
+ context_sap);
} else {
#ifdef QSHOW_SIGNALS
- SHOW("calling C-level handler");
+ SHOW("calling C-level handler");
#endif
/* Allow signals again. */
thread_sigmask(SIG_SETMASK, os_context_sigmask_addr(context), 0);
-
+
(*handler.c)(signal, info, void_context);
}
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
- "/returning from interrupt_handle_now(%d, info, context)\n",
- signal));
+ "/returning from interrupt_handle_now(%d, info, context)\n",
+ signal));
#endif
}
boolean
maybe_defer_handler(void *handler, struct interrupt_data *data,
- int signal, siginfo_t *info, os_context_t *context)
+ int signal, siginfo_t *info, os_context_t *context)
{
struct thread *thread=arch_os_get_current_thread();
* atomic section inside a without-interrupts.
*/
if (SymbolValue(INTERRUPTS_ENABLED,thread) == NIL) {
- store_signal_data_for_later(data,handler,signal,info,context);
+ store_signal_data_for_later(data,handler,signal,info,context);
SetSymbolValue(INTERRUPT_PENDING, T,thread);
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
"/maybe_defer_handler(%x,%d),thread=%ld: deferred\n",
(unsigned int)handler,signal,thread->os_thread));
#endif
- return 1;
- }
+ return 1;
+ }
/* a slightly confusing test. arch_pseudo_atomic_atomic() doesn't
* actually use its argument for anything on x86, so this branch
* may succeed even when context is null (gencgc alloc()) */
if (
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
- (!foreign_function_call_active) &&
+ (!foreign_function_call_active) &&
#endif
- arch_pseudo_atomic_atomic(context)) {
- store_signal_data_for_later(data,handler,signal,info,context);
- arch_set_pseudo_atomic_interrupted(context);
+ arch_pseudo_atomic_atomic(context)) {
+ store_signal_data_for_later(data,handler,signal,info,context);
+ arch_set_pseudo_atomic_interrupted(context);
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
"/maybe_defer_handler(%x,%d),thread=%ld: deferred(PA)\n",
(unsigned int)handler,signal,thread->os_thread));
#endif
- return 1;
+ return 1;
}
#ifdef QSHOW_SIGNALS
FSHOW((stderr,
static void
store_signal_data_for_later (struct interrupt_data *data, void *handler,
- int signal,
- siginfo_t *info, os_context_t *context)
+ int signal,
+ siginfo_t *info, os_context_t *context)
{
if (data->pending_handler)
lose("tried to overwrite pending interrupt handler %x with %x\n",
data->pending_handler = handler;
data->pending_signal = signal;
if(info)
- memcpy(&(data->pending_info), info, sizeof(siginfo_t));
+ memcpy(&(data->pending_info), info, sizeof(siginfo_t));
if(context) {
- /* the signal mask in the context (from before we were
- * interrupted) is copied to be restored when
- * run_deferred_handler happens. Then the usually-blocked
- * signals are added to the mask in the context so that we are
- * running with blocked signals when the handler returns */
- sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
- sigaddset_blockable(os_context_sigmask_addr(context));
+ /* the signal mask in the context (from before we were
+ * interrupted) is copied to be restored when
+ * run_deferred_handler happens. Then the usually-blocked
+ * signals are added to the mask in the context so that we are
+ * running with blocked signals when the handler returns */
+ sigcopyset(&(data->pending_mask),os_context_sigmask_addr(context));
+ sigaddset_blockable(os_context_sigmask_addr(context));
}
}
struct interrupt_data *data=thread->interrupt_data;
#ifdef LISP_FEATURE_LINUX
os_restore_fp_control(context);
-#endif
+#endif
if(maybe_defer_handler(interrupt_handle_now,data,
- signal,info,context))
- return;
+ signal,info,context))
+ return;
interrupt_handle_now(signal, info, context);
#ifdef LISP_FEATURE_DARWIN
/* Work around G5 bug */
struct interrupt_data *data=thread->interrupt_data;
#ifdef LISP_FEATURE_LINUX
os_restore_fp_control(context);
-#endif
+#endif
if(maybe_defer_handler(low_level_interrupt_handle_now,data,
- signal,info,context))
- return;
+ signal,info,context))
+ return;
low_level_interrupt_handle_now(signal, info, context);
#ifdef LISP_FEATURE_DARWIN
/* Work around G5 bug */
struct thread *thread=arch_os_get_current_thread();
sigset_t ss;
int i;
-
+
/* need the context stored so it can have registers scavenged */
- fake_foreign_function_call(context);
+ fake_foreign_function_call(context);
sigemptyset(&ss);
for(i=1;i<NSIG;i++) sigaddset(&ss,i); /* Block everything. */
* stuff to detect and handle hitting the GC trigger
*/
-#ifndef LISP_FEATURE_GENCGC
+#ifndef LISP_FEATURE_GENCGC
/* since GENCGC has its own way to record trigger */
static boolean
gc_trigger_hit(int signal, siginfo_t *info, os_context_t *context)
{
if (current_auto_gc_trigger == NULL)
- return 0;
+ return 0;
else{
- void *badaddr=arch_get_bad_addr(signal,info,context);
- return (badaddr >= (void *)current_auto_gc_trigger &&
- badaddr <((void *)current_dynamic_space + DYNAMIC_SPACE_SIZE));
+ void *badaddr=arch_get_bad_addr(signal,info,context);
+ return (badaddr >= (void *)current_auto_gc_trigger &&
+ badaddr <((void *)current_dynamic_space + DYNAMIC_SPACE_SIZE));
}
}
#endif
#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
void * fun=native_pointer(function);
void *code = &(((struct simple_fun *) fun)->code);
-#endif
+#endif
/* Build a stack frame showing `interrupted' so that the
* user's backtrace makes (as much) sense (as usual) */
addl $12,%esp
popal
popfl
- leave
- ret
+ leave
+ ret
* What we do here is set up the stack that call_into_lisp would
* expect to see if it had been called by this code, and frob the
*(sp-15) = post_signal_tramp; /* return address for call_into_lisp */
*(sp-14) = function; /* args for call_into_lisp : function*/
- *(sp-13) = 0; /* arg array */
- *(sp-12) = 0; /* no. args */
+ *(sp-13) = 0; /* arg array */
+ *(sp-12) = 0; /* no. args */
/* this order matches that used in POPAD */
*(sp-11)=*os_context_register_addr(context,reg_EDI);
*(sp-10)=*os_context_register_addr(context,reg_ESI);
*os_context_register_addr(context,reg_RDI) = function; /* function */
*os_context_register_addr(context,reg_RSI) = 0; /* arg. array */
*os_context_register_addr(context,reg_RDX) = 0; /* no. args */
-#else
+#else
struct thread *th=arch_os_get_current_thread();
build_fake_control_stack_frames(th,context);
#endif
#ifdef LISP_FEATURE_X86
*os_context_pc_addr(context) = call_into_lisp;
- *os_context_register_addr(context,reg_ECX) = 0;
+ *os_context_register_addr(context,reg_ECX) = 0;
*os_context_register_addr(context,reg_EBP) = sp-2;
-#ifdef __NetBSD__
+#ifdef __NetBSD__
*os_context_register_addr(context,reg_UESP) = sp-15;
#else
*os_context_register_addr(context,reg_ESP) = sp-15;
#endif
#elif defined(LISP_FEATURE_X86_64)
*os_context_pc_addr(context) = call_into_lisp;
- *os_context_register_addr(context,reg_RCX) = 0;
+ *os_context_register_addr(context,reg_RCX) = 0;
*os_context_register_addr(context,reg_RBP) = sp-2;
*os_context_register_addr(context,reg_RSP) = sp-18;
#else
/* this much of the calling convention is common to all
non-x86 ports */
*os_context_pc_addr(context) = code;
- *os_context_register_addr(context,reg_NARGS) = 0;
+ *os_context_register_addr(context,reg_NARGS) = 0;
*os_context_register_addr(context,reg_LIP) = code;
- *os_context_register_addr(context,reg_CFP) =
- current_control_frame_pointer;
+ *os_context_register_addr(context,reg_CFP) =
+ current_control_frame_pointer;
#endif
#ifdef ARCH_HAS_NPC_REGISTER
*os_context_npc_addr(context) =
- 4 + *os_context_pc_addr(context);
+ 4 + *os_context_pc_addr(context);
#endif
#ifdef LISP_FEATURE_SPARC
- *os_context_register_addr(context,reg_CODE) =
- fun + FUN_POINTER_LOWTAG;
+ *os_context_register_addr(context,reg_CODE) =
+ fun + FUN_POINTER_LOWTAG;
#endif
}
* variables should work for functions as well, but on PPC/Darwin
* we get bus error at bogus addresses instead, hence this workaround,
* that has the added benefit of automatically discriminating between
- * functions and variables.
+ * functions and variables.
*/
void undefined_alien_function() {
funcall0(SymbolFunction(UNDEFINED_ALIEN_FUNCTION_ERROR));
boolean handle_guard_page_triggered(os_context_t *context,void *addr){
struct thread *th=arch_os_get_current_thread();
-
- /* note the os_context hackery here. When the signal handler returns,
+
+ /* note the os_context hackery here. When the signal handler returns,
* it won't go back to what it was doing ... */
- if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
+ if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
/* We hit the end of the control stack: disable guard page
* protection so the error handler has some headroom, protect the
* and restore it. */
protect_control_stack_guard_page(th,0);
protect_control_stack_return_guard_page(th,1);
-
+
arrange_return_to_lisp_function
(context, SymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
return 1;
return 1;
}
else if (addr >= undefined_alien_address &&
- addr < undefined_alien_address + os_vm_page_size) {
- arrange_return_to_lisp_function
+ addr < undefined_alien_address + os_vm_page_size) {
+ arrange_return_to_lisp_function
(context, SymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
- return 1;
+ return 1;
}
else return 0;
}
os_context_t *context=(os_context_t *) void_context;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
if(!data->pending_handler && !foreign_function_call_active &&
gc_trigger_hit(signal, info, context)){
/* SUB-GC may return without GCing if *GC-INHIBIT* is set, in
* which case we will be running with no gc trigger barrier
* thing for a while. But it shouldn't be long until the end
- * of WITHOUT-GCING.
+ * of WITHOUT-GCING.
*
* FIXME: It would be good to protect the end of dynamic space
* and signal a storage condition from there.
void
undoably_install_low_level_interrupt_handler (int signal,
- void handler(int,
- siginfo_t*,
- void*))
+ void handler(int,
+ siginfo_t*,
+ void*))
{
struct sigaction sa;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
if (0 > signal || signal >= NSIG) {
- lose("bad signal number %d", signal);
+ lose("bad signal number %d", signal);
}
if (sigismember(&blockable_sigset,signal))
sigaddset_blockable(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESTART;
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
- if((signal==SIG_MEMORY_FAULT)
+ if((signal==SIG_MEMORY_FAULT)
#ifdef SIG_INTERRUPT_THREAD
|| (signal==SIG_INTERRUPT_THREAD)
#endif
)
- sa.sa_flags|= SA_ONSTACK;
+ sa.sa_flags|= SA_ONSTACK;
#endif
-
+
sigaction(signal, &sa, NULL);
data->interrupt_low_level_handlers[signal] =
- (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
+ (ARE_SAME_HANDLER(handler, SIG_DFL) ? 0 : handler);
}
/* This is called from Lisp. */
union interrupt_handler oldhandler;
struct thread *th=arch_os_get_current_thread();
struct interrupt_data *data=
- th ? th->interrupt_data : global_interrupt_data;
+ th ? th->interrupt_data : global_interrupt_data;
FSHOW((stderr, "/entering POSIX install_handler(%d, ..)\n", signal));
sigaddset_blockable(&new);
FSHOW((stderr, "/data->interrupt_low_level_handlers[signal]=%x\n",
- (unsigned int)data->interrupt_low_level_handlers[signal]));
+ (unsigned int)data->interrupt_low_level_handlers[signal]));
if (data->interrupt_low_level_handlers[signal]==0) {
- if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
- ARE_SAME_HANDLER(handler, SIG_IGN)) {
- sa.sa_sigaction = handler;
- } else if (sigismember(&new, signal)) {
- sa.sa_sigaction = maybe_now_maybe_later;
- } else {
- sa.sa_sigaction = interrupt_handle_now_handler;
- }
-
- sigemptyset(&sa.sa_mask);
- sigaddset_blockable(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO | SA_RESTART;
- sigaction(signal, &sa, NULL);
+ if (ARE_SAME_HANDLER(handler, SIG_DFL) ||
+ ARE_SAME_HANDLER(handler, SIG_IGN)) {
+ sa.sa_sigaction = handler;
+ } else if (sigismember(&new, signal)) {
+ sa.sa_sigaction = maybe_now_maybe_later;
+ } else {
+ sa.sa_sigaction = interrupt_handle_now_handler;
+ }
+
+ sigemptyset(&sa.sa_mask);
+ sigaddset_blockable(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
+ sigaction(signal, &sa, NULL);
}
oldhandler = data->interrupt_handlers[signal];
SHOW("entering interrupt_init()");
sigemptyset(&blockable_sigset);
sigaddset_blockable(&blockable_sigset);
-
+
global_interrupt_data=calloc(sizeof(struct interrupt_data), 1);
/* Set up high level handler information. */
for (i = 0; i < NSIG; i++) {
global_interrupt_data->interrupt_handlers[i].c =
- /* (The cast here blasts away the distinction between
- * SA_SIGACTION-style three-argument handlers and
- * signal(..)-style one-argument handlers, which is OK
- * because it works to call the 1-argument form where the
- * 3-argument form is expected.) */
- (void (*)(int, siginfo_t*, void*))SIG_DFL;
+ /* (The cast here blasts away the distinction between
+ * SA_SIGACTION-style three-argument handlers and
+ * signal(..)-style one-argument handlers, which is OK
+ * because it works to call the 1-argument form where the
+ * 3-argument form is expected.) */
+ (void (*)(int, siginfo_t*, void*))SIG_DFL;
}
SHOW("returning from interrupt_init()");
*
* Note: In CMU CL, this was 4096, but there was no explanation given,
* and it's hard to see why we'd need that many nested interrupts, so
- * I've scaled it back (to 256) to see what happens. -- WHN 20000730
+ * I've scaled it back (to 256) to see what happens. -- WHN 20000730
* Nothing happened, so let's creep it back a bit further -- dan 20030411 */
#define MAX_INTERRUPTS 32
void (*interrupt_low_level_handlers[NSIG]) (int, siginfo_t*, void*) ;
union interrupt_handler interrupt_handlers[NSIG];
- /* signal information for pending signal. pending_signal=0 when there
+ /* signal information for pending signal. pending_signal=0 when there
* is no pending signal. */
void (*pending_handler) (int, siginfo_t*, void*) ;
int pending_signal ;
extern void interrupt_handle_now(int, siginfo_t*, void*);
extern void interrupt_handle_pending(os_context_t*);
extern void interrupt_internal_error(int, siginfo_t*, os_context_t*,
- boolean continuable);
+ boolean continuable);
extern boolean handle_guard_page_triggered(os_context_t *,void *);
extern boolean interrupt_maybe_gc(int, siginfo_t*, void*);
extern boolean interrupt_maybe_gc_int(int, siginfo_t *, void *);
extern void sig_stop_for_gc_handler(int, siginfo_t*, void*);
#endif
extern void undoably_install_low_level_interrupt_handler (int signal,
- void
- handler(int,
- siginfo_t*,
- void*));
+ void
+ handler(int,
+ siginfo_t*,
+ void*));
extern unsigned long install_handler(int signal,
- void handler(int, siginfo_t*, void*));
+ void handler(int, siginfo_t*, void*));
extern union interrupt_handler interrupt_handlers[NSIG];
#define __NR_sys_futex __NR_futex
_syscall4(int,sys_futex,
- int *, futex,
- int, op,
- int, val,
- struct timespec *, rel);
+ int *, futex,
+ int, op,
+ int, val,
+ struct timespec *, rel);
#endif
#include "gc.h"
int minor_version;
char *p;
uname(&name);
- p=name.release;
+ p=name.release;
major_version = atoi(p);
p=strchr(p,'.')+1;
minor_version = atoi(p);
if (major_version<2) {
- lose("linux kernel version too old: major version=%d (can't run in version < 2.0.0)",
- major_version);
+ lose("linux kernel version too old: major version=%d (can't run in version < 2.0.0)",
+ major_version);
}
if (!(major_version>2 || minor_version >= 4)) {
#ifdef LISP_FEATURE_SPARC
- FSHOW((stderr,"linux kernel %d.%d predates 2.4;\n enabling workarounds for SPARC kernel bugs in signal handling.\n", major_version,minor_version));
- linux_sparc_siginfo_bug = 1;
+ FSHOW((stderr,"linux kernel %d.%d predates 2.4;\n enabling workarounds for SPARC kernel bugs in signal handling.\n", major_version,minor_version));
+ linux_sparc_siginfo_bug = 1;
#endif
}
#ifdef LISP_FEATURE_SB_THREAD
futex_wait(futex,-1);
if(errno==ENOSYS) linux_no_threads_p = 1;
- if(linux_no_threads_p)
- fprintf(stderr,"Linux with NPTL support (e.g. kernel 2.6 or newer) required for \nthread-enabled SBCL. Disabling thread support.\n\n");
+ if(linux_no_threads_p)
+ fprintf(stderr,"Linux with NPTL support (e.g. kernel 2.6 or newer) required for \nthread-enabled SBCL. Disabling thread support.\n\n");
#endif
os_vm_page_size = getpagesize();
}
#ifdef LISP_FEATURE_ALPHA
/* The Alpha is a 64 bit CPU. SBCL is a 32 bit application. Due to all
- * the places that assume we can get a pointer into a fixnum with no
+ * the places that assume we can get a pointer into a fixnum with no
* information loss, we have to make sure it allocates all its ram in the
* 0-2Gb region. */
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
os_vm_address_t actual ;
- if (addr)
- flags |= MAP_FIXED;
+ if (addr)
+ flags |= MAP_FIXED;
#ifdef LISP_FEATURE_ALPHA
else {
- flags |= MAP_FIXED;
- addr=under_2gb_free_pointer;
+ flags |= MAP_FIXED;
+ addr=under_2gb_free_pointer;
}
-#endif
+#endif
actual = mmap(addr, len, OS_VM_PROT_ALL, flags, -1, 0);
- if (actual == MAP_FAILED || (addr && (addr!=actual))) {
- perror("mmap");
- return 0; /* caller should check this */
+ if (actual == MAP_FAILED || (addr && (addr!=actual))) {
+ perror("mmap");
+ return 0; /* caller should check this */
}
#ifdef LISP_FEATURE_ALPHA
os_invalidate(os_vm_address_t addr, os_vm_size_t len)
{
if (munmap(addr,len) == -1) {
- perror("munmap");
+ perror("munmap");
}
}
os_vm_address_t actual;
actual = mmap(addr, len, OS_VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED,
- fd, (off_t) offset);
+ fd, (off_t) offset);
if (actual == MAP_FAILED || (addr && (addr != actual))) {
- perror("mmap");
- lose("unexpected mmap(..) failure");
+ perror("mmap");
+ lose("unexpected mmap(..) failure");
}
return actual;
os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
{
if (mprotect(address, length, prot) == -1) {
- perror("mprotect");
+ perror("mprotect");
}
}
\f
{
struct thread *th;
size_t ad = (size_t) addr;
-
+
if ((READ_ONLY_SPACE_START <= ad && ad < READ_ONLY_SPACE_END)
- || (STATIC_SPACE_START <= ad && ad < STATIC_SPACE_END)
+ || (STATIC_SPACE_START <= ad && ad < STATIC_SPACE_END)
#if defined LISP_FEATURE_GENCGC
- || (DYNAMIC_SPACE_START <= ad && ad < DYNAMIC_SPACE_END)
+ || (DYNAMIC_SPACE_START <= ad && ad < DYNAMIC_SPACE_END)
#else
- || (DYNAMIC_0_SPACE_START <= ad && ad < DYNAMIC_0_SPACE_END)
- || (DYNAMIC_1_SPACE_START <= ad && ad < DYNAMIC_1_SPACE_END)
+ || (DYNAMIC_0_SPACE_START <= ad && ad < DYNAMIC_0_SPACE_END)
+ || (DYNAMIC_1_SPACE_START <= ad && ad < DYNAMIC_1_SPACE_END)
#endif
- )
- return 1;
+ )
+ return 1;
for_each_thread(th) {
- if((size_t)(th->control_stack_start) <= ad
- && ad < (size_t)(th->control_stack_end))
- return 1;
- if((size_t)(th->binding_stack_start) <= ad
- && ad < (size_t)(th->binding_stack_start + BINDING_STACK_SIZE))
- return 1;
+ if((size_t)(th->control_stack_start) <= ad
+ && ad < (size_t)(th->control_stack_end))
+ return 1;
+ if((size_t)(th->binding_stack_start) <= ad
+ && ad < (size_t)(th->binding_stack_start + BINDING_STACK_SIZE))
+ return 1;
}
return 0;
}
{
os_context_t *context = arch_os_get_context(&void_context);
void* fault_addr = (void*)info->si_addr;
- if (!gencgc_handle_wp_violation(fault_addr))
- if(!handle_guard_page_triggered(context,fault_addr))
+ if (!gencgc_handle_wp_violation(fault_addr))
+ if(!handle_guard_page_triggered(context,fault_addr))
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
arrange_return_to_lisp_function(context, SymbolFunction(MEMORY_FAULT_ERROR));
#else
(how we got here: when interrupting, we set bit 63 in reg_ALLOC.
At the end of the atomic section we tried to write to reg_ALLOC,
got a SIGSEGV (there's nothing mapped there) so ended up here. */
- if (addr != NULL &&
- *os_context_register_addr(context,reg_ALLOC) & (1L<<63)){
- *os_context_register_addr(context,reg_ALLOC) -= (1L<<63);
- interrupt_handle_pending(context);
- return;
+ if (addr != NULL &&
+ *os_context_register_addr(context,reg_ALLOC) & (1L<<63)){
+ *os_context_register_addr(context,reg_ALLOC) -= (1L<<63);
+ interrupt_handle_pending(context);
+ return;
}
#endif
if(!interrupt_maybe_gc(signal, info, context))
- if(!handle_guard_page_triggered(context,addr))
- interrupt_handle_now(signal, info, context);
+ if(!handle_guard_page_triggered(context,addr))
+ interrupt_handle_now(signal, info, context);
}
#endif
os_install_interrupt_handlers(void)
{
undoably_install_low_level_interrupt_handler(SIG_MEMORY_FAULT,
- sigsegv_handler);
+ sigsegv_handler);
#ifdef LISP_FEATURE_SB_THREAD
undoably_install_low_level_interrupt_handler(SIG_INTERRUPT_THREAD,
- interrupt_thread_handler);
+ interrupt_thread_handler);
undoably_install_low_level_interrupt_handler(SIG_STOP_FOR_GC,
- sig_stop_for_gc_handler);
+ sig_stop_for_gc_handler);
#endif
}
switch(opcode) {
case 0x0: /* jr, jalr */
- switch(inst & 0x3f) {
- case 0x08: /* jr */
- tgt = *os_context_register_addr(context, r1);
- break;
- case 0x09: /* jalr */
- tgt = *os_context_register_addr(context, r1);
- *os_context_register_addr(context, r3)
- = *os_context_pc_addr(context) + 4;
- break;
- }
- break;
+ switch(inst & 0x3f) {
+ case 0x08: /* jr */
+ tgt = *os_context_register_addr(context, r1);
+ break;
+ case 0x09: /* jalr */
+ tgt = *os_context_register_addr(context, r1);
+ *os_context_register_addr(context, r3)
+ = *os_context_pc_addr(context) + 4;
+ break;
+ }
+ break;
case 0x1: /* bltz, bgez, bltzal, bgezal */
- switch((inst >> 16) & 0x1f) {
- case 0x00: /* bltz */
- if(*os_context_register_addr(context, r1) < 0)
- tgt += disp;
- break;
- case 0x01: /* bgez */
- if(*os_context_register_addr(context, r1) >= 0)
- tgt += disp;
- break;
- case 0x10: /* bltzal */
- if(*os_context_register_addr(context, r1) < 0)
- tgt += disp;
- *os_context_register_addr(context, 31)
- = *os_context_pc_addr(context) + 4;
- break;
- case 0x11: /* bgezal */
- if(*os_context_register_addr(context, r1) >= 0)
- tgt += disp;
- *os_context_register_addr(context, 31)
- = *os_context_pc_addr(context) + 4;
- break;
- }
- break;
+ switch((inst >> 16) & 0x1f) {
+ case 0x00: /* bltz */
+ if(*os_context_register_addr(context, r1) < 0)
+ tgt += disp;
+ break;
+ case 0x01: /* bgez */
+ if(*os_context_register_addr(context, r1) >= 0)
+ tgt += disp;
+ break;
+ case 0x10: /* bltzal */
+ if(*os_context_register_addr(context, r1) < 0)
+ tgt += disp;
+ *os_context_register_addr(context, 31)
+ = *os_context_pc_addr(context) + 4;
+ break;
+ case 0x11: /* bgezal */
+ if(*os_context_register_addr(context, r1) >= 0)
+ tgt += disp;
+ *os_context_register_addr(context, 31)
+ = *os_context_pc_addr(context) + 4;
+ break;
+ }
+ break;
case 0x4: /* beq */
- if(*os_context_register_addr(context, r1)
- == *os_context_register_addr(context, r2))
- tgt += disp;
- break;
+ if(*os_context_register_addr(context, r1)
+ == *os_context_register_addr(context, r2))
+ tgt += disp;
+ break;
case 0x5: /* bne */
- if(*os_context_register_addr(context, r1)
- != *os_context_register_addr(context, r2))
- tgt += disp;
- break;
+ if(*os_context_register_addr(context, r1)
+ != *os_context_register_addr(context, r2))
+ tgt += disp;
+ break;
case 0x6: /* blez */
- if(*os_context_register_addr(context, r1)
- <= *os_context_register_addr(context, r2))
- tgt += disp;
- break;
+ if(*os_context_register_addr(context, r1)
+ <= *os_context_register_addr(context, r2))
+ tgt += disp;
+ break;
case 0x7: /* bgtz */
- if(*os_context_register_addr(context, r1)
- > *os_context_register_addr(context, r2))
- tgt += disp;
- break;
+ if(*os_context_register_addr(context, r1)
+ > *os_context_register_addr(context, r2))
+ tgt += disp;
+ break;
case 0x2: /* j */
- tgt = jtgt;
- break;
+ tgt = jtgt;
+ break;
case 0x3: /* jal */
- tgt = jtgt;
- *os_context_register_addr(context, 31)
- = *os_context_pc_addr(context) + 4;
- break;
+ tgt = jtgt;
+ *os_context_register_addr(context, 31)
+ = *os_context_pc_addr(context) + 4;
+ break;
}
return tgt;
}
{
/* Skip the offending instruction */
if (os_context_bd_cause(context)) {
- /* Currently, we never get here, because Linux' support for
+ /* Currently, we never get here, because Linux' support for
bd_cause seems not terribly solid (c.f os_context_bd_cause
in mips-linux-os.c). If a port to Irix comes along, this
code will be executed, because presumably Irix' support is
better (it can hardly be worse). We lose() to remind the
porter to review this code. -- CSR, 2002-09-06 */
- lose("bd_cause branch taken; review code for new OS?\n");
+ lose("bd_cause branch taken; review code for new OS?\n");
*os_context_pc_addr(context)
- = emulate_branch(context, *os_context_pc_addr(context));
+ = emulate_branch(context, *os_context_pc_addr(context));
} else
*os_context_pc_addr(context) += 4;
}
arch_internal_error_arguments(os_context_t *context)
{
if (os_context_bd_cause(context))
- return (unsigned char *)(*os_context_pc_addr(context) + 8);
+ return (unsigned char *)(*os_context_pc_addr(context) + 8);
else
- return (unsigned char *)(*os_context_pc_addr(context) + 4);
+ return (unsigned char *)(*os_context_pc_addr(context) + 4);
}
boolean
/* Figure out where the breakpoint is, and what happens next. */
if (os_context_bd_cause(context)) {
- break_pc = pc+1;
- next_inst = *pc;
+ break_pc = pc+1;
+ next_inst = *pc;
}
else {
- break_pc = pc;
- next_inst = orig_inst;
+ break_pc = pc;
+ next_inst = orig_inst;
}
/* Put the original instruction back. */
if (opcode == 1 || ((opcode & 0x3c) == 0x4) || ((next_inst & 0xf00e0000) == 0x80000000))
next_pc = (unsigned int *)emulate_branch(context, next_inst);
else
- next_pc = pc+1;
+ next_pc = pc+1;
displaced_after_inst = arch_install_breakpoint(next_pc);
}
switch (code) {
case trap_Halt:
- fake_foreign_function_call(context);
- lose("%%primitive halt called; the party is over.\n");
+ fake_foreign_function_call(context);
+ lose("%%primitive halt called; the party is over.\n");
case trap_PendingInterrupt:
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
case trap_Error:
case trap_Cerror:
- interrupt_internal_error(signal, info, context, code==trap_Cerror);
- break;
+ interrupt_internal_error(signal, info, context, code==trap_Cerror);
+ break;
case trap_Breakpoint:
- handle_breakpoint(signal, info, context);
- break;
+ handle_breakpoint(signal, info, context);
+ break;
case trap_FunEndBreakpoint:
- *os_context_pc_addr(context) = (int)handle_fun_end_breakpoint(signal, info, context);
- os_flush_icache((os_vm_address_t)*os_context_pc_addr(context), sizeof(unsigned int));
- break;
+ *os_context_pc_addr(context) = (int)handle_fun_end_breakpoint(signal, info, context);
+ os_flush_icache((os_vm_address_t)*os_context_pc_addr(context), sizeof(unsigned int));
+ break;
case trap_AfterBreakpoint:
- arch_remove_breakpoint(os_context_pc_addr(context), displaced_after_inst);
- displaced_after_inst = arch_install_breakpoint(skipped_break_addr);
- *os_context_sigmask_addr(context) = orig_sigmask;
- break;
+ arch_remove_breakpoint(os_context_pc_addr(context), displaced_after_inst);
+ displaced_after_inst = arch_install_breakpoint(skipped_break_addr);
+ *os_context_sigmask_addr(context) = orig_sigmask;
+ break;
case 0x10:
- /* Clear the pseudo-atomic flag */
- *os_context_register_addr(context, reg_NL4) &= ~(-1LL<<31);
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- return;
-
+ /* Clear the pseudo-atomic flag */
+ *os_context_register_addr(context, reg_NL4) &= ~(-1LL<<31);
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ return;
+
default:
- interrupt_handle_now(signal, info, context);
- break;
+ interrupt_handle_now(signal, info, context);
+ break;
}
}
switch (op) {
case 0x0: /* SPECIAL */
- switch (funct) {
- case 0x20: /* ADD */
- /* FIXME: Hopefully, this whole section can just go away,
+ switch (funct) {
+ case 0x20: /* ADD */
+ /* FIXME: Hopefully, this whole section can just go away,
with the rewrite of pseudo-atomic and the deletion of
overflow VOPs */
- /* Check to see if this is really a pa_interrupted hit */
- if (rs == reg_ALLOC && rt == reg_NL4) {
- *os_context_register_addr(context, reg_ALLOC)
- += *os_context_register_addr(context, reg_NL4) &= ~(-1LL<<31);
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- return;
- }
- result = FIXNUM_VALUE(*os_context_register_addr(context, rs))
- + FIXNUM_VALUE(*os_context_register_addr(context, rt));
- dest = rd;
- break;
-
- case 0x22: /* SUB */
- result = FIXNUM_VALUE(*os_context_register_addr(context, rs))
- - FIXNUM_VALUE(*os_context_register_addr(context, rt));
- dest = rd;
- break;
- }
- break;
-
+ /* Check to see if this is really a pa_interrupted hit */
+ if (rs == reg_ALLOC && rt == reg_NL4) {
+ *os_context_register_addr(context, reg_ALLOC)
+ += *os_context_register_addr(context, reg_NL4) &= ~(-1LL<<31);
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ return;
+ }
+ result = FIXNUM_VALUE(*os_context_register_addr(context, rs))
+ + FIXNUM_VALUE(*os_context_register_addr(context, rt));
+ dest = rd;
+ break;
+
+ case 0x22: /* SUB */
+ result = FIXNUM_VALUE(*os_context_register_addr(context, rs))
+ - FIXNUM_VALUE(*os_context_register_addr(context, rt));
+ dest = rd;
+ break;
+ }
+ break;
+
case 0x8: /* ADDI */
- result = FIXNUM_VALUE(*os_context_register_addr(context,rs)) + (immed>>2);
- dest = rt;
- break;
+ result = FIXNUM_VALUE(*os_context_register_addr(context,rs)) + (immed>>2);
+ dest = rt;
+ break;
}
-
+
if (dest < 32) {
dynamic_space_free_pointer =
(lispobj *) *os_context_register_addr(context,reg_ALLOC);
*os_context_register_addr(context,dest) = alloc_number(result);
- *os_context_register_addr(context, reg_ALLOC) =
- (unsigned int) dynamic_space_free_pointer;
+ *os_context_register_addr(context, reg_ALLOC) =
+ (unsigned int) dynamic_space_free_pointer;
arch_skip_instruction(context);
-
+
}
else
interrupt_handle_now(signal, info, context);
void
arch_install_interrupt_handlers()
-{
+{
undoably_install_low_level_interrupt_handler(SIGILL,sigill_handler);
undoably_install_low_level_interrupt_handler(SIGTRAP,sigtrap_handler);
undoably_install_low_level_interrupt_handler(SIGFPE,sigfpe_handler);
#define _MIPS_ARCH_H
-static inline void
+static inline void
get_spinlock(volatile lispobj *word, long value)
{
#ifdef LISP_FEATURE_SB_THREAD
int __cmp;
__asm__ __volatile__ (
- " .set push\n"
- " .set mips2\n"
- " .set noreorder\n"
- "1: ll %[__prev],%[__mem]\n"
- " bne %[__prev],%[__old],2f\n"
- " li %[__cmp],0\n"
- " move %[__cmp],%[__new]\n"
- " sc %[__cmp],%[__mem]\n"
- " beqz %[__cmp],1b\n"
- " nop\n"
- " sync\n"
- "2:\n"
- " .set pop"
- : [__prev] "=&r" (__prev),
- [__cmp] "=&r" (__cmp)
- : [__mem] "R" (*word),
- [__old] "r" (__old),
- [__new] "r" (value)
- : "memory");
+ " .set push\n"
+ " .set mips2\n"
+ " .set noreorder\n"
+ "1: ll %[__prev],%[__mem]\n"
+ " bne %[__prev],%[__old],2f\n"
+ " li %[__cmp],0\n"
+ " move %[__cmp],%[__new]\n"
+ " sc %[__cmp],%[__mem]\n"
+ " beqz %[__cmp],1b\n"
+ " nop\n"
+ " sync\n"
+ "2:\n"
+ " .set pop"
+ : [__prev] "=&r" (__prev),
+ [__cmp] "=&r" (__cmp)
+ : [__mem] "R" (*word),
+ [__old] "r" (__old),
+ [__new] "r" (value)
+ : "memory");
- if (!cmp)
- lose("recursive get_spinlock: 0x%x,%d\n", word, value);
+ if (!cmp)
+ lose("recursive get_spinlock: 0x%x,%d\n", word, value);
#else /* LISP_FEATURE_SB_THREAD */
*word=value;
#endif
/* FIXME: Probably do something. */
}
-void
+void
os_restore_fp_control(os_context_t *context)
{
/* FIXME: Probably do something. */
loop" where a (BREAK 16) not in a branch delay slot would have
CAUSEF_BD filled. So, we comment
- #include <asm/mipsregs.h>
+ #include <asm/mipsregs.h>
- return (((struct sigcontext *) &(context->uc_mcontext))->sc_cause
- & CAUSEF_BD);
+ return (((struct sigcontext *) &(context->uc_mcontext))->sc_cause
+ & CAUSEF_BD);
out and return 0 always. -- CSR, 2002-09-02 */
return 0;
}
-void
+void
os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
if (cacheflush(address, length, ICACHE) == -1)
- perror("cacheflush");
+ perror("cacheflush");
}
#define REG(num) num
#endif
-#define NREGS (32)
+#define NREGS (32)
#define reg_ZERO REG(0)
#define reg_NL3 REG(1)
#define reg_LIP REG(31)
#define REGNAMES \
- "ZERO", "NL3", "CFUNC", "NL4", \
- "NL0", "NL1", "NL2", "NARGS", \
- "A0", "A1", "A2", "A3", \
- "A4", "A5", "FDEFN", "LEXENV", \
- "NFP", "OCFP", "LRA", "L0", \
- "NIL", "BSP", "CFP", "CSP", \
- "L1", "ALLOC", "K0", "K1", \
- "GP", "NSP", "CODE", "LIP"
+ "ZERO", "NL3", "CFUNC", "NL4", \
+ "NL0", "NL1", "NL2", "NARGS", \
+ "A0", "A1", "A2", "A3", \
+ "A4", "A5", "FDEFN", "LEXENV", \
+ "NFP", "OCFP", "LRA", "L0", \
+ "NIL", "BSP", "CFP", "CSP", \
+ "L1", "ALLOC", "K0", "K1", \
+ "GP", "NSP", "CODE", "LIP"
#define BOXED_REGISTERS { \
*
* (We could set up output to go to a special ldb_out stream for the
* same reason, but there's been no pressure for that so far.)
- *
+ *
* The enter-the-ldb-monitor function is responsible for setting up
* this stream. */
static FILE *ldb_in = 0;
#if 0
#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
printf("BSP\t=\t0x%08lx\n",
- (unsigned long)SymbolValue(BINDING_STACK_POINTER));
+ (unsigned long)SymbolValue(BINDING_STACK_POINTER));
#endif
printf("DYNAMIC\t=\t0x%08lx\n", (unsigned long)DYNAMIC_SPACE_START);
#if (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
printf("ALLOC\t=\t0x%08lx\n",
- (unsigned long)SymbolValue(ALLOCATION_POINTER));
+ (unsigned long)SymbolValue(ALLOCATION_POINTER));
#else
printf("ALLOC\t=\t0x%08X\n",
- (unsigned long)dynamic_space_free_pointer);
+ (unsigned long)dynamic_space_free_pointer);
printf("TRIGGER\t=\t0x%08lx\n", (unsigned long)current_auto_gc_trigger);
#endif
printf("STATIC\t=\t0x%08lx\n",
- (unsigned long)SymbolValue(STATIC_SPACE_FREE_POINTER));
+ (unsigned long)SymbolValue(STATIC_SPACE_FREE_POINTER));
printf("RDONLY\t=\t0x%08lx\n",
- (unsigned long)SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
+ (unsigned long)SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
#endif /* 0 */
}
if (widetag_of(obj) == SIMPLE_FUN_HEADER_WIDETAG) {
print((long)addr | FUN_POINTER_LOWTAG);
} else if (lowtag_of(obj) == OTHER_IMMEDIATE_0_LOWTAG ||
- lowtag_of(obj) == OTHER_IMMEDIATE_1_LOWTAG) {
+ lowtag_of(obj) == OTHER_IMMEDIATE_1_LOWTAG) {
print((lispobj)addr | OTHER_POINTER_LOWTAG);
} else {
print((lispobj)addr);
} if (count == -1) {
return;
- }
+ }
}
}
int i;
for (i = 0; i < NREGS; i++) {
- printf("%s:\t", lisp_register_names[i]);
+ printf("%s:\t", lisp_register_names[i]);
#ifdef LISP_FEATURE_X86
- brief_print((lispobj)(*os_context_register_addr(context,
- i*2)));
+ brief_print((lispobj)(*os_context_register_addr(context,
+ i*2)));
#else
- brief_print((lispobj)(*os_context_register_addr(context,i)));
+ brief_print((lispobj)(*os_context_register_addr(context,i)));
#endif
}
#ifdef LISP_FEATURE_DARWIN
printf("DSISR:\t\t 0x%08lx\n", (unsigned long)(*os_context_register_addr(context, 42)));
#endif
printf("PC:\t\t 0x%08lx\n",
- (unsigned long)(*os_context_pc_addr(context)));
+ (unsigned long)(*os_context_pc_addr(context)));
}
static void
struct thread *thread=arch_os_get_current_thread();
free = SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)>>2;
-
+
if (more_p(ptr)) {
- int index;
-
- index = parse_number(ptr);
-
- if ((index >= 0) && (index < free)) {
- printf("There are %d interrupt contexts.\n", free);
- printf("printing context %d\n", index);
- print_context(thread->interrupt_contexts[index]);
- } else {
- printf("There aren't that many/few contexts.\n");
- printf("There are %d interrupt contexts.\n", free);
- }
+ int index;
+
+ index = parse_number(ptr);
+
+ if ((index >= 0) && (index < free)) {
+ printf("There are %d interrupt contexts.\n", free);
+ printf("printing context %d\n", index);
+ print_context(thread->interrupt_contexts[index]);
+ } else {
+ printf("There aren't that many/few contexts.\n");
+ printf("There are %d interrupt contexts.\n", free);
+ }
} else {
- if (free == 0)
- printf("There are no interrupt contexts!\n");
- else {
- printf("There are %d interrupt contexts.\n", free);
- printf("printing context %d\n", free - 1);
- print_context(thread->interrupt_contexts[free - 1]);
- }
+ if (free == 0)
+ printf("There are no interrupt contexts!\n");
+ else {
+ printf("There are %d interrupt contexts.\n", free);
+ printf("printing context %d\n", free - 1);
+ print_context(thread->interrupt_contexts[free - 1]);
+ }
}
}
int n;
if (more_p(ptr))
- n = parse_number(ptr);
+ n = parse_number(ptr);
else
- n = 100;
+ n = 100;
printf("Backtrace:\n");
backtrace(n);
while (catch != NULL) {
#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
printf("0x%08lX:\n\tuwp: 0x%08lX\n\tfp: 0x%08lX\n\tcode: 0x%08lx\n\tentry: 0x%08lx\n\ttag: ",
- (unsigned long)catch, (unsigned long)(catch->current_uwp),
- (unsigned long)(catch->current_cont),
- catch->current_code,
- catch->entry_pc);
+ (unsigned long)catch, (unsigned long)(catch->current_uwp),
+ (unsigned long)(catch->current_cont),
+ catch->current_code,
+ catch->entry_pc);
#else
printf("0x%08lX:\n\tuwp: 0x%08lX\n\tfp: 0x%08lX\n\tcode: 0x%08lx\n\tentry: 0x%08lx\n\ttag: ",
- (unsigned long)catch, (unsigned long)(catch->current_uwp),
- (unsigned long)(catch->current_cont),
- (unsigned long)component_ptr_from_pc((void*)catch->entry_pc) +
- OTHER_POINTER_LOWTAG,
- (unsigned long)catch->entry_pc);
+ (unsigned long)catch, (unsigned long)(catch->current_uwp),
+ (unsigned long)(catch->current_cont),
+ (unsigned long)component_ptr_from_pc((void*)catch->entry_pc) +
+ OTHER_POINTER_LOWTAG,
+ (unsigned long)catch->entry_pc);
#endif
brief_print((lispobj)catch->tag);
catch = catch->previous_catch;
int ambig;
if (!ldb_in) {
- ldb_in = fopen("/dev/tty","r+");
- ldb_in_fd = fileno(ldb_in);
+ ldb_in = fopen("/dev/tty","r+");
+ ldb_in_fd = fileno(ldb_in);
}
while (!done) {
fflush(stdout);
line = fgets(buf, sizeof(buf), ldb_in);
if (line == NULL) {
- if (isatty(ldb_in_fd)) {
- putchar('\n');
- continue;
- }
- else {
- fprintf(stderr, "\nEOF on something other than a tty.\n");
- exit(0);
- }
- }
+ if (isatty(ldb_in_fd)) {
+ putchar('\n');
+ continue;
+ }
+ else {
+ fprintf(stderr, "\nEOF on something other than a tty.\n");
+ exit(0);
+ }
+ }
ptr = line;
if ((token = parse_token(&ptr)) == NULL)
continue;
block_size = os_trunc_size_to_page(length);
if (block_start > addr)
- bzero((char *)addr, block_start-addr);
+ bzero((char *)addr, block_start-addr);
if (block_size < length)
- bzero((char *)block_start+block_size, length-block_size);
+ bzero((char *)block_start+block_size, length-block_size);
if (block_size != 0) {
- /* Now deallocate and allocate the block so that it faults in
- * zero-filled. */
+ /* Now deallocate and allocate the block so that it faults in
+ * zero-filled. */
- os_invalidate(block_start, block_size);
- addr = os_validate(block_start, block_size);
+ os_invalidate(block_start, block_size);
+ addr = os_validate(block_start, block_size);
- if (addr == NULL || addr != block_start)
- lose("os_zero: block moved! 0x%08x ==> 0x%08x",
- block_start,
- addr);
+ if (addr == NULL || addr != block_start)
+ lose("os_zero: block moved! 0x%08x ==> 0x%08x",
+ block_start,
+ addr);
}
}
old_len=os_round_up_size_to_page(old_len);
if (addr==NULL)
- return os_allocate(len);
+ return os_allocate(len);
else{
- long len_diff=len-old_len;
-
- if (len_diff<0)
- os_invalidate(addr+len,-len_diff);
- else{
- if (len_diff!=0) {
- os_vm_address_t new=os_allocate(len);
-
- if(new!=NULL){
- bcopy(addr,new,old_len);
- os_invalidate(addr,old_len);
- }
-
- addr=new;
- }
- }
- return addr;
+ long len_diff=len-old_len;
+
+ if (len_diff<0)
+ os_invalidate(addr+len,-len_diff);
+ else{
+ if (len_diff!=0) {
+ os_vm_address_t new=os_allocate(len);
+
+ if(new!=NULL){
+ bcopy(addr,new,old_len);
+ os_invalidate(addr,old_len);
+ }
+
+ addr=new;
+ }
+ }
+ return addr;
}
}
/* This maps a file into memory, or calls lose(..) for various
* failures. */
extern os_vm_address_t os_map(int fd,
- int offset,
- os_vm_address_t addr,
- os_vm_size_t len);
+ int offset,
+ os_vm_address_t addr,
+ os_vm_size_t len);
/* This presumably flushes the instruction cache, if that can be done
* explicitly. (It doesn't seem to be an issue for the i386 port,
* write-protecting a page so that the garbage collector can find out
* whether it's modified by handling the signal. */
extern void os_protect(os_vm_address_t addr,
- os_vm_size_t len,
- os_vm_prot_t protection);
+ os_vm_size_t len,
+ os_vm_prot_t protection);
/* This returns true for an address which makes sense at the Lisp level. */
extern boolean is_valid_lisp_addr(os_vm_address_t test);
* depend not only on the OS, but also on the architecture, e.g.
* getting at EFL/EFLAGS on the x86. Such things are defined in the
* architecture-dependence files, not the OS-dependence files.) */
-
+
/* These are not architecture-specific functions, but are instead
* general utilities defined in terms of the architecture-specific
* function os_validate(..) and os_invalidate(..).
extern os_vm_address_t os_allocate(os_vm_size_t len);
extern os_vm_address_t os_allocate_at(os_vm_address_t addr, os_vm_size_t len);
extern os_vm_address_t os_reallocate(os_vm_address_t addr,
- os_vm_size_t old_len,
- os_vm_size_t len);
+ os_vm_size_t old_len,
+ os_vm_size_t len);
extern void os_deallocate(os_vm_address_t addr, os_vm_size_t len);
* interface looks a lot like the Mach interface (but simpler in some
* places). For some operating systems, a subset of these functions
* will have to be emulated.
- *
+ *
* This is the OSF/1 version, based on the Linux version, itself based
- * on the OSF1 version from CMUCL by Sean Hallgren. Now _there's_
+ * on the OSF1 version from CMUCL by Sean Hallgren. Now _there's_
* a metacircularity for you ...
*/
else flags |= MAP_VARIABLE;
if((addr=mmap(addr,len,OS_VM_PROT_ALL,flags,-1,0)) == (os_vm_address_t) -1)
- perror("mmap");
-
+ perror("mmap");
+
return addr;
}
os_invalidate(os_vm_address_t addr, os_vm_size_t len)
{
if (munmap(addr,len) == -1) {
- perror("munmap");
+ perror("munmap");
}
}
os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
{
addr = mmap(addr, len,
- OS_VM_PROT_ALL,
- MAP_PRIVATE | MAP_FILE | MAP_FIXED,
- fd, (off_t) offset);
+ OS_VM_PROT_ALL,
+ MAP_PRIVATE | MAP_FILE | MAP_FIXED,
+ fd, (off_t) offset);
if (addr == MAP_FAILED) {
- perror("mmap");
- lose("unexpected mmap(..) failure");
+ perror("mmap");
+ lose("unexpected mmap(..) failure");
}
return addr;
os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
{
if (mprotect(address, length, prot) == -1) {
- perror("mprotect");
+ perror("mprotect");
}
}
os_vm_address_t newaddr;
newaddr=os_trunc_to_page(addr);
if((ret=mvalid(newaddr,newaddr-addr+4,OS_VM_PROT_ALL)) == 0)
- return TRUE;
+ return TRUE;
else if(errno==EINVAL)
- perror("mvalid");
+ perror("mvalid");
return FALSE;
}
\f
os_context_t *context = arch_os_get_context(&void_context);
os_vm_address_t addr = arch_get_bad_addr(signal,info,context);
-
- if (addr != NULL &&
- *os_context_register_addr(context,reg_ALLOC) & (1L<<63)){
- /* this is lifted from linux-os.c, so violates OOAO */
- *os_context_register_addr(context,reg_ALLOC) -= (1L<<63);
- interrupt_handle_pending(context);
+
+ if (addr != NULL &&
+ *os_context_register_addr(context,reg_ALLOC) & (1L<<63)){
+ /* this is lifted from linux-os.c, so violates OOAO */
+ *os_context_register_addr(context,reg_ALLOC) -= (1L<<63);
+ interrupt_handle_pending(context);
} else if (!interrupt_maybe_gc(signal, info, context)) {
- if(!handle_guard_page_triggered(context,addr))
- interrupt_handle_now(signal, info, context);
+ if(!handle_guard_page_triggered(context,addr))
+ interrupt_handle_now(signal, info, context);
}
}
os_install_interrupt_handlers(void)
{
undoably_install_low_level_interrupt_handler(SIG_MEMORY_FAULT,
- sigsegv_handler);
+ sigsegv_handler);
}
typedef long os_context_register_t ;
-#ifndef NSIG /* osf1 -D_XOPEN_SOURCE_EXTENDED omits this */
+#ifndef NSIG /* osf1 -D_XOPEN_SOURCE_EXTENDED omits this */
#define NSIG (SIGMAX+1)
#endif
/* Search static space. */
headerptr = (lispobj *)STATIC_SPACE_START;
count =
- (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
- (lispobj *)STATIC_SPACE_START;
+ (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0) -
+ (lispobj *)STATIC_SPACE_START;
if (search_for_symbol(name, &headerptr, &count)) {
*result = make_lispobj(headerptr,OTHER_POINTER_LOWTAG);
return 1;
parse_regnum(char *s)
{
if ((s[1] == 'R') || (s[1] == 'r')) {
- int regnum;
+ int regnum;
- if (s[2] == '\0')
- return -1;
+ if (s[2] == '\0')
+ return -1;
- /* skip the $R part and call atoi on the number */
- regnum = atoi(s + 2);
- if ((regnum >= 0) && (regnum < NREGS))
- return regnum;
- else
- return -1;
+ /* skip the $R part and call atoi on the number */
+ regnum = atoi(s + 2);
+ if ((regnum >= 0) && (regnum < NREGS))
+ return regnum;
+ else
+ return -1;
} else {
- int i;
+ int i;
- for (i = 0; i < NREGS ; i++)
- if (strcasecmp(s + 1, lisp_register_names[i]) == 0)
+ for (i = 0; i < NREGS ; i++)
+ if (strcasecmp(s + 1, lisp_register_names[i]) == 0)
#ifdef LISP_FEATURE_X86
- return i*2;
+ return i*2;
#else
- return i;
+ return i;
#endif
-
- return -1;
+
+ return -1;
}
}
printf("expected an object\n");
throw_to_monitor();
} else if (token[0] == '$') {
- if (isalpha(token[1])) {
- int free;
- int regnum;
- os_context_t *context;
+ if (isalpha(token[1])) {
+ int free;
+ int regnum;
+ os_context_t *context;
- free = SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)>>2;
+ free = SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)>>2;
- if (free == 0) {
- printf("Variable ``%s'' is not valid -- there is no current interrupt context.\n", token);
- throw_to_monitor();
- }
+ if (free == 0) {
+ printf("Variable ``%s'' is not valid -- there is no current interrupt context.\n", token);
+ throw_to_monitor();
+ }
- context = thread->interrupt_contexts[free - 1];
+ context = thread->interrupt_contexts[free - 1];
- regnum = parse_regnum(token);
- if (regnum < 0) {
- printf("bogus register: ``%s''\n", token);
- throw_to_monitor();
- }
+ regnum = parse_regnum(token);
+ if (regnum < 0) {
+ printf("bogus register: ``%s''\n", token);
+ throw_to_monitor();
+ }
- result = *os_context_register_addr(context, regnum);
- } else if (!lookup_variable(token+1, &result)) {
+ result = *os_context_register_addr(context, regnum);
+ } else if (!lookup_variable(token+1, &result)) {
printf("unknown variable: ``%s''\n", token);
throw_to_monitor();
}
Even with the patch, the DSISR may not have its 'write' bit set
correctly (it tends not to be set if the fault was caused by
something other than a protection violation.)
-
+
Caveat callers. */
#ifndef PT_DAR
-#define PT_DAR 41
+#define PT_DAR 41
#endif
#ifndef PT_DSISR
-#define PT_DSISR 42
+#define PT_DSISR 42
#endif
void arch_init() {
}
-os_vm_address_t
+os_vm_address_t
arch_get_bad_addr(int sig, siginfo_t *code, os_context_t *context)
{
unsigned int *pc = (unsigned int *)(*os_context_pc_addr(context));
os_vm_address_t addr;
-
-
+
+
/* Make sure it's not the pc thats bogus, and that it was lisp code */
/* that caused the fault. */
if ((((unsigned long)pc) & 3) != 0 ||
- ((pc < READ_ONLY_SPACE_START ||
- pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
- ((lispobj *)pc < current_dynamic_space ||
- (lispobj *)pc >= current_dynamic_space + DYNAMIC_SPACE_SIZE)))
- return 0;
-
-
+ ((pc < READ_ONLY_SPACE_START ||
+ pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
+ ((lispobj *)pc < current_dynamic_space ||
+ (lispobj *)pc >= current_dynamic_space + DYNAMIC_SPACE_SIZE)))
+ return 0;
+
+
addr = (os_vm_address_t) (*os_context_register_addr(context,PT_DAR));
return addr;
}
-
-void
+
+void
arch_skip_instruction(os_context_t *context)
{
char** pcptr;
}
-boolean
+boolean
arch_pseudo_atomic_atomic(os_context_t *context)
{
return ((*os_context_register_addr(context,reg_ALLOC)) & 4);
#define PSEUDO_ATOMIC_INTERRUPTED_BIAS 0x7f000000
-void
+void
arch_set_pseudo_atomic_interrupted(os_context_t *context)
{
- *os_context_register_addr(context,reg_NL3)
- += PSEUDO_ATOMIC_INTERRUPTED_BIAS;
+ *os_context_register_addr(context,reg_NL3)
+ += PSEUDO_ATOMIC_INTERRUPTED_BIAS;
}
-unsigned long
+unsigned long
arch_install_breakpoint(void *pc)
{
unsigned long *ptr = (unsigned long *)pc;
return result;
}
-void
+void
arch_remove_breakpoint(void *pc, unsigned long orig_inst)
{
*(unsigned long *)pc = orig_inst;
static unsigned long *skipped_break_addr, displaced_after_inst;
static sigset_t orig_sigmask;
-void
+void
arch_do_displaced_inst(os_context_t *context,unsigned int orig_inst)
{
/* not sure how we ensure that we get the breakpoint reinstalled
* after doing this -dan */
unsigned long *pc = (unsigned long *)(*os_context_pc_addr(context));
-
+
orig_sigmask = *os_context_sigmask_addr(context);
sigaddset_blockable(os_context_sigmask_addr(context));
-
+
*pc = orig_inst;
os_flush_icache((os_vm_address_t) pc, sizeof(unsigned long));
skipped_break_addr = pc;
}
-static void
+static void
sigtrap_handler(int signal, siginfo_t *siginfo, os_context_t *context)
{
u32 code;
#endif
code=*((u32 *)(*os_context_pc_addr(context)));
if (code == ((3 << 26) | (16 << 21) | (reg_ALLOC << 16))) {
- /* twlti reg_ALLOC,0 - check for deferred interrupt */
- *os_context_register_addr(context,reg_ALLOC)
- -= PSEUDO_ATOMIC_INTERRUPTED_BIAS;
- arch_skip_instruction(context);
- /* interrupt or GC was requested in PA; now we're done with the
- PA section we may as well get around to it */
- interrupt_handle_pending(context);
- return;
-
+ /* twlti reg_ALLOC,0 - check for deferred interrupt */
+ *os_context_register_addr(context,reg_ALLOC)
+ -= PSEUDO_ATOMIC_INTERRUPTED_BIAS;
+ arch_skip_instruction(context);
+ /* interrupt or GC was requested in PA; now we're done with the
+ PA section we may as well get around to it */
+ interrupt_handle_pending(context);
+ return;
+
}
if ((code >> 16) == ((3 << 10) | (6 << 5))) {
- /* twllei reg_ZERO,N will always trap if reg_ZERO = 0 */
- int trap = code & 0x1f;
-
- switch (trap) {
- case trap_Halt:
- fake_foreign_function_call(context);
- lose("%%primitive halt called; the party is over.\n");
-
- case trap_Error:
- case trap_Cerror:
- interrupt_internal_error(signal, code, context, trap == trap_Cerror);
- break;
-
- case trap_PendingInterrupt:
- /* This is supposed run after WITHOUT-INTERRUPTS if there
- * were pending signals. */
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
-
- case trap_Breakpoint:
- handle_breakpoint(signal, code, context);
- break;
-
- case trap_FunEndBreakpoint:
- *os_context_pc_addr(context)
- =(int)handle_fun_end_breakpoint(signal, code, context);
- break;
-
- case trap_AfterBreakpoint:
- *skipped_break_addr = trap_Breakpoint;
- skipped_break_addr = NULL;
- *(unsigned long *)*os_context_pc_addr(context)
- = displaced_after_inst;
- *os_context_sigmask_addr(context)= orig_sigmask;
-
- os_flush_icache((os_vm_address_t) *os_context_pc_addr(context),
- sizeof(unsigned long));
- break;
-
- default:
- interrupt_handle_now(signal, code, context);
- break;
- }
+ /* twllei reg_ZERO,N will always trap if reg_ZERO = 0 */
+ int trap = code & 0x1f;
+
+ switch (trap) {
+ case trap_Halt:
+ fake_foreign_function_call(context);
+ lose("%%primitive halt called; the party is over.\n");
+
+ case trap_Error:
+ case trap_Cerror:
+ interrupt_internal_error(signal, code, context, trap == trap_Cerror);
+ break;
+
+ case trap_PendingInterrupt:
+ /* This is supposed run after WITHOUT-INTERRUPTS if there
+ * were pending signals. */
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
+
+ case trap_Breakpoint:
+ handle_breakpoint(signal, code, context);
+ break;
+
+ case trap_FunEndBreakpoint:
+ *os_context_pc_addr(context)
+ =(int)handle_fun_end_breakpoint(signal, code, context);
+ break;
+
+ case trap_AfterBreakpoint:
+ *skipped_break_addr = trap_Breakpoint;
+ skipped_break_addr = NULL;
+ *(unsigned long *)*os_context_pc_addr(context)
+ = displaced_after_inst;
+ *os_context_sigmask_addr(context)= orig_sigmask;
+
+ os_flush_icache((os_vm_address_t) *os_context_pc_addr(context),
+ sizeof(unsigned long));
+ break;
+
+ default:
+ interrupt_handle_now(signal, code, context);
+ break;
+ }
#ifdef LISP_FEATURE_DARWIN
- DARWIN_FIX_CONTEXT(context);
+ DARWIN_FIX_CONTEXT(context);
#endif
- return;
+ return;
}
if (((code >> 26) == 3) && (((code >> 21) & 31) == 24)) {
- interrupt_internal_error(signal, code, context, 0);
+ interrupt_internal_error(signal, code, context, 0);
#ifdef LISP_FEATURE_DARWIN
- DARWIN_FIX_CONTEXT(context);
+ DARWIN_FIX_CONTEXT(context);
#endif
- return;
+ return;
}
-
+
interrupt_handle_now(signal, code, context);
#ifdef LISP_FEATURE_DARWIN
/* Work around G5 bug */
* ori 13, 13, (low part of addr)
* mtctr 13
* bctr
- *
+ *
*/
int* inst_ptr;
unsigned long hi; /* Top 16 bits of address */
/*
* addis 13, 0, (hi part)
*/
-
+
inst = (15 << 26) | (LINKAGE_TEMP_REG << 21) | (0 << 16) | hi;
*inst_ptr++ = inst;
inst = (24 << 26) | (LINKAGE_TEMP_REG << 21) | (LINKAGE_TEMP_REG << 16) | lo;
*inst_ptr++ = inst;
-
+
/*
* mtctr 13
*/
*inst_ptr++ = inst;
-
+
os_flush_icache((os_vm_address_t) reloc_addr, (char*) inst_ptr - (char*) reloc_addr);
}
-void
+void
arch_write_linkage_table_ref(void * reloc_addr, void *target_addr)
{
*(unsigned long *)reloc_addr = (unsigned long)target_addr;
#ifndef _PPC_ARCH_H
#define _PPC_ARCH_H
-static inline void
+static inline void
get_spinlock(lispobj *word,long value)
{
- *word=value; /* FIXME for threads */
+ *word=value; /* FIXME for threads */
}
static inline void
static int last_error = 0;
-void
+void
dlshim_image_callback(struct mach_header* ptr, unsigned long phooey)
{
callback_count++;
last_header = ptr;
}
-int
+int
lib_path_count(void)
{
char* libpath;
libpath = getenv("DYLD_LIBRARY_PATH");
count = 1;
if (libpath) {
- for (i = 0; libpath[i] != '\0'; i++) {
- if (libpath[i] == ':') count++;
- }
+ for (i = 0; libpath[i] != '\0'; i++) {
+ if (libpath[i] == ':') count++;
+ }
}
return count;
}
-const char*
+const char*
lib_path_prefixify(int index, const char* filename)
{
static char* retbuf = NULL;
int fi, li, i, count;
char* libpath;
if (!retbuf) {
- retbuf = (char*) malloc(1024*sizeof(char));
+ retbuf = (char*) malloc(1024*sizeof(char));
}
count = 0;
fi = 0;
li = -1;
libpath = getenv("DYLD_LIBRARY_PATH");
if (libpath) {
- i = 0;
- while (count != index && libpath[i] != '\0') {
- if (libpath[i] == ':') count++;
- i++;
- }
- fi = i;
- while (libpath[i] != '\0' && libpath[i] != ':') {
- i++;
- }
- li = i - 1;
+ i = 0;
+ while (count != index && libpath[i] != '\0') {
+ if (libpath[i] == ':') count++;
+ i++;
+ }
+ fi = i;
+ while (libpath[i] != '\0' && libpath[i] != ':') {
+ i++;
+ }
+ li = i - 1;
}
if (li - fi > 0) {
- if (li - fi + 1 > 1022 - strlen(filename)) {
- retbuf =
- (char*) realloc(retbuf, (li - fi + 3 + strlen(filename))*sizeof(char));
- }
- memcpy(retbuf, libpath + fi, (li - fi + 1)*sizeof(char));
- retbuf[li - fi + 1] = '/';
- memcpy(retbuf + li - fi + 2, filename, strlen(filename) + 1);
- return retbuf;
+ if (li - fi + 1 > 1022 - strlen(filename)) {
+ retbuf =
+ (char*) realloc(retbuf, (li - fi + 3 + strlen(filename))*sizeof(char));
+ }
+ memcpy(retbuf, libpath + fi, (li - fi + 1)*sizeof(char));
+ retbuf[li - fi + 1] = '/';
+ memcpy(retbuf + li - fi + 2, filename, strlen(filename) + 1);
+ return retbuf;
} else {
- return filename;
+ return filename;
}
}
-const void*
+const void*
dlopen(const char* filename, int flags)
{
static char has_callback = 0;
if (!has_callback) {
- _dyld_register_func_for_add_image(dlshim_image_callback);
+ _dyld_register_func_for_add_image(dlshim_image_callback);
}
if (!filename) {
- return &dl_self;
+ return &dl_self;
} else {
- const struct mach_header* img = NULL;
- if (!img)
- img = NSAddImage(filename, NSADDIMAGE_OPTION_RETURN_ON_ERROR);
- if (!img)
- img = NSAddImage(filename,
- NSADDIMAGE_OPTION_RETURN_ON_ERROR |
- NSADDIMAGE_OPTION_WITH_SEARCHING);
- if (!img) {
- NSObjectFileImage fileImage;
- callback_count = 0;
- last_header = NULL;
- if (NSCreateObjectFileImageFromFile(filename, &fileImage)
- == NSObjectFileImageSuccess) {
- NSLinkModule(fileImage, filename,
- NSLINKMODULE_OPTION_BINDNOW |
- ((flags & RTLD_GLOBAL)?NSLINKMODULE_OPTION_PRIVATE:0) |
- NSLINKMODULE_OPTION_RETURN_ON_ERROR);
- if (callback_count && last_header)
- img = last_header;
- }
- }
- if (!img) {
- NSObjectFileImage fileImage;
- int i, maxi;
- const char* prefixfilename;
- maxi = lib_path_count();
- for (i = 0; i < maxi && !img; i++) {
- prefixfilename = lib_path_prefixify(i, filename);
- callback_count = 0;
- last_header = NULL;
- if (NSCreateObjectFileImageFromFile(prefixfilename, &fileImage)
- == NSObjectFileImageSuccess) {
- NSLinkModule(fileImage, filename,
- NSLINKMODULE_OPTION_BINDNOW |
- ((flags & RTLD_GLOBAL)?NSLINKMODULE_OPTION_PRIVATE:0) |
- NSLINKMODULE_OPTION_RETURN_ON_ERROR);
- if (callback_count && last_header)
- img = last_header;
- }
- }
- }
- if (img) {
- if (flags & RTLD_NOW) {
- NSLookupSymbolInImage(img, "",
- NSLOOKUPSYMBOLINIMAGE_OPTION_BIND_FULLY |
- NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
- }
- if (NSIsSymbolNameDefinedInImage(img, "__init")) {
- NSSymbol* initsymbol;
- void (*initfunc) (void);
- initsymbol = NSLookupSymbolInImage(img, "__init", 0);
- initfunc = NSAddressOfSymbol(initsymbol);
- initfunc();
- }
- } else
- last_error = DLOPEN_ERROR;
- return img;
+ const struct mach_header* img = NULL;
+ if (!img)
+ img = NSAddImage(filename, NSADDIMAGE_OPTION_RETURN_ON_ERROR);
+ if (!img)
+ img = NSAddImage(filename,
+ NSADDIMAGE_OPTION_RETURN_ON_ERROR |
+ NSADDIMAGE_OPTION_WITH_SEARCHING);
+ if (!img) {
+ NSObjectFileImage fileImage;
+ callback_count = 0;
+ last_header = NULL;
+ if (NSCreateObjectFileImageFromFile(filename, &fileImage)
+ == NSObjectFileImageSuccess) {
+ NSLinkModule(fileImage, filename,
+ NSLINKMODULE_OPTION_BINDNOW |
+ ((flags & RTLD_GLOBAL)?NSLINKMODULE_OPTION_PRIVATE:0) |
+ NSLINKMODULE_OPTION_RETURN_ON_ERROR);
+ if (callback_count && last_header)
+ img = last_header;
+ }
+ }
+ if (!img) {
+ NSObjectFileImage fileImage;
+ int i, maxi;
+ const char* prefixfilename;
+ maxi = lib_path_count();
+ for (i = 0; i < maxi && !img; i++) {
+ prefixfilename = lib_path_prefixify(i, filename);
+ callback_count = 0;
+ last_header = NULL;
+ if (NSCreateObjectFileImageFromFile(prefixfilename, &fileImage)
+ == NSObjectFileImageSuccess) {
+ NSLinkModule(fileImage, filename,
+ NSLINKMODULE_OPTION_BINDNOW |
+ ((flags & RTLD_GLOBAL)?NSLINKMODULE_OPTION_PRIVATE:0) |
+ NSLINKMODULE_OPTION_RETURN_ON_ERROR);
+ if (callback_count && last_header)
+ img = last_header;
+ }
+ }
+ }
+ if (img) {
+ if (flags & RTLD_NOW) {
+ NSLookupSymbolInImage(img, "",
+ NSLOOKUPSYMBOLINIMAGE_OPTION_BIND_FULLY |
+ NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
+ }
+ if (NSIsSymbolNameDefinedInImage(img, "__init")) {
+ NSSymbol* initsymbol;
+ void (*initfunc) (void);
+ initsymbol = NSLookupSymbolInImage(img, "__init", 0);
+ initfunc = NSAddressOfSymbol(initsymbol);
+ initfunc();
+ }
+ } else
+ last_error = DLOPEN_ERROR;
+ return img;
}
}
-const char*
+const char*
dlerror()
{
NSLinkEditErrors c;
char *result = NULL;
if (last_error) {
- NSLinkEditError(&c, &errorNumber, &fileName, &errorString);
- /* The errorString obtained by the above is too verbose for
- * our needs, so we just translate the errno.
- *
- * We also have simple fallbacks in case we've somehow lost
- * the context before this point. */
- if (errorNumber) {
- result = strerror(errorNumber);
- } else if (DLSYM_ERROR == last_error) {
- result = "dlsym(3) failed";
- } else if (DLOPEN_ERROR == last_error) {
- result = "dlopen(3) failed";
- }
- last_error = 0;
+ NSLinkEditError(&c, &errorNumber, &fileName, &errorString);
+ /* The errorString obtained by the above is too verbose for
+ * our needs, so we just translate the errno.
+ *
+ * We also have simple fallbacks in case we've somehow lost
+ * the context before this point. */
+ if (errorNumber) {
+ result = strerror(errorNumber);
+ } else if (DLSYM_ERROR == last_error) {
+ result = "dlsym(3) failed";
+ } else if (DLOPEN_ERROR == last_error) {
+ result = "dlopen(3) failed";
+ }
+ last_error = 0;
}
-
+
return result;
}
-void*
+void*
dlsym(void* handle, char* symbol)
{
if (handle == &dl_self) {
- if (NSIsSymbolNameDefined(symbol)) {
- NSSymbol* retsym;
- retsym = NSLookupAndBindSymbol(symbol);
- return NSAddressOfSymbol(retsym);
- } else {
+ if (NSIsSymbolNameDefined(symbol)) {
+ NSSymbol* retsym;
+ retsym = NSLookupAndBindSymbol(symbol);
+ return NSAddressOfSymbol(retsym);
+ } else {
last_error = DLSYM_ERROR;
- return NULL;
- }
+ return NULL;
+ }
} else {
- if (NSIsSymbolNameDefinedInImage(handle, symbol)) {
- NSSymbol* retsym;
- retsym = NSLookupSymbolInImage(handle, symbol, 0);
- return NSAddressOfSymbol(retsym);
- } else {
+ if (NSIsSymbolNameDefinedInImage(handle, symbol)) {
+ NSSymbol* retsym;
+ retsym = NSLookupSymbolInImage(handle, symbol, 0);
+ return NSAddressOfSymbol(retsym);
+ } else {
last_error = DLSYM_ERROR;
- return NULL;
- }
+ return NULL;
+ }
}
}
-int
+int
dlclose(void *handle)
{
/* dlclose is not implemented, and never will be for dylibs.
int j, max;
max = seg->nsects;
if (strncmp("SBCL", seg->segname, 4) == 0) {
- is_sbcl = 1;
- seg->vmsize = space_sizes[spacei];
+ is_sbcl = 1;
+ seg->vmsize = space_sizes[spacei];
} else {
- is_sbcl = 0;
+ is_sbcl = 0;
}
seg++;
sectptr = (struct section*) seg;
for (j = 0; j < max; j++) {
- if (is_sbcl) {
- sectptr->size = space_sizes[spacei];
- spacei++;
- }
- sectptr++;
+ if (is_sbcl) {
+ sectptr->size = space_sizes[spacei];
+ spacei++;
+ }
+ sectptr++;
}
}
}
char *lang = getenv ("LANG");
if ((lang != NULL) && (!strcmp(lang, "C"))) {
- return latin1;
+ return latin1;
} else {
- return utf8;
+ return utf8;
}
}
}
ppc_saved_state_t *state = &context->uc_mcontext->ss;
switch(offset) {
case 0:
- return &state->r0;
+ return &state->r0;
case 1:
- return &state->r1;
+ return &state->r1;
case 2:
- return &state->r2;
+ return &state->r2;
case 3:
- return &state->r3;
+ return &state->r3;
case 4:
- return &state->r4;
+ return &state->r4;
case 5:
- return &state->r5;
+ return &state->r5;
case 6:
- return &state->r6;
+ return &state->r6;
case 7:
- return &state->r7;
+ return &state->r7;
case 8:
- return &state->r8;
+ return &state->r8;
case 9:
- return &state->r9;
+ return &state->r9;
case 10:
- return &state->r10;
+ return &state->r10;
case 11:
- return &state->r11;
+ return &state->r11;
case 12:
- return &state->r12;
+ return &state->r12;
case 13:
- return &state->r13;
+ return &state->r13;
case 14:
- return &state->r14;
+ return &state->r14;
case 15:
- return &state->r15;
+ return &state->r15;
case 16:
- return &state->r16;
+ return &state->r16;
case 17:
- return &state->r17;
+ return &state->r17;
case 18:
- return &state->r18;
+ return &state->r18;
case 19:
- return &state->r19;
+ return &state->r19;
case 20:
- return &state->r20;
+ return &state->r20;
case 21:
- return &state->r21;
+ return &state->r21;
case 22:
- return &state->r22;
+ return &state->r22;
case 23:
- return &state->r23;
+ return &state->r23;
case 24:
- return &state->r24;
+ return &state->r24;
case 25:
- return &state->r25;
+ return &state->r25;
case 26:
- return &state->r26;
+ return &state->r26;
case 27:
- return &state->r27;
+ return &state->r27;
case 28:
- return &state->r28;
+ return &state->r28;
case 29:
- return &state->r29;
+ return &state->r29;
case 30:
- return &state->r30;
+ return &state->r30;
case 31:
- return &state->r31;
+ return &state->r31;
case 41:
- /* PT_DAR */
- return &context->uc_mcontext->es.dar;
+ /* PT_DAR */
+ return &context->uc_mcontext->es.dar;
case 42:
- /* PT_DSISR */
- return &context->uc_mcontext->es.dsisr;
+ /* PT_DSISR */
+ return &context->uc_mcontext->es.dsisr;
}
}
return &context->uc_mcontext->ss.lr;
}
-void
+void
os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
/* see ppc-arch.c */
size_t os_vm_page_size;
int arch_os_thread_init(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
int arch_os_thread_cleanup(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
os_context_register_t *
(the number of ppc registers), but that happens to get the
right answer. -- CSR, 2002-07-11 */
#if defined(GLIBC231_STYLE_UCONTEXT)
- return context->uc_mcontext.regs->gpr[PT_FPSCR];
+ return context->uc_mcontext.regs->gpr[PT_FPSCR];
#elif defined(GLIBC232_STYLE_UCONTEXT)
- return context->uc_mcontext.uc_regs->gregs[PT_FPSCR];
+ return context->uc_mcontext.uc_regs->gregs[PT_FPSCR];
#endif
}
-void
+void
os_restore_fp_control(os_context_t *context)
{
unsigned long control;
double d;
-
- control = os_context_fp_control(context) &
- /* FIXME: Should we preserve the user's requested rounding mode?
-
- Note that doing
-
- ~(FLOAT_STICKY_BITS_MASK | FLOAT_EXCEPTIONS_BYTE_MASK)
-
- here leads to infinite SIGFPE for invalid operations, as
- there are bits in the control register that need to be
- cleared that are let through by that mask. -- CSR, 2002-07-16 */
-
- FLOAT_TRAPS_BYTE_MASK;
-
+
+ control = os_context_fp_control(context) &
+ /* FIXME: Should we preserve the user's requested rounding mode?
+
+ Note that doing
+
+ ~(FLOAT_STICKY_BITS_MASK | FLOAT_EXCEPTIONS_BYTE_MASK)
+
+ here leads to infinite SIGFPE for invalid operations, as
+ there are bits in the control register that need to be
+ cleared that are let through by that mask. -- CSR, 2002-07-16 */
+
+ FLOAT_TRAPS_BYTE_MASK;
+
d = *((double *) &control);
/* Hmp. Apparently the following doesn't work either:
-
+
asm volatile ("mtfsf 0xff,%0" : : "f" (d));
causing segfaults at the first GC.
*/
}
-void
+void
os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
/* see ppc-arch.c */
#define NREGS 32
-#define reg_ZERO REG(0) /* Should always contain 0 in lisp */
-#define reg_NSP REG(1) /* The number/C stack pointer */
-#define reg_POLL REG(2) /* Lisp preemption/Mystery SVR4 ABI reg */
-#define reg_NL0 REG(3) /* FF param/result 1 */
-#define reg_NL1 REG(4) /* FF param/result 2 */
-#define reg_NL2 REG(5) /* FF param 3 */
+#define reg_ZERO REG(0) /* Should always contain 0 in lisp */
+#define reg_NSP REG(1) /* The number/C stack pointer */
+#define reg_POLL REG(2) /* Lisp preemption/Mystery SVR4 ABI reg */
+#define reg_NL0 REG(3) /* FF param/result 1 */
+#define reg_NL1 REG(4) /* FF param/result 2 */
+#define reg_NL2 REG(5) /* FF param 3 */
#define reg_NL3 REG(6)
#define reg_NL4 REG(7)
#define reg_NL5 REG(8)
-#define reg_NL6 REG(9) /* Last (7th) FF param */
+#define reg_NL6 REG(9) /* Last (7th) FF param */
#define reg_FDEFN REG(10) /* was NL7 until recently -dan */
#define reg_NARGS REG(11)
#ifdef LISP_FEATURE_DARWIN
-#define reg_CFUNC REG(12) /* Silly to blow a reg on FF-name */
-#define reg_NFP REG(13) /* Lisp may save around FF-call */
+#define reg_CFUNC REG(12) /* Silly to blow a reg on FF-name */
+#define reg_NFP REG(13) /* Lisp may save around FF-call */
#else
-#define reg_NFP REG(12) /* Lisp may save around FF-call */
-#define reg_CFUNC REG(13) /* Silly to blow a reg on FF-name */
+#define reg_NFP REG(12) /* Lisp may save around FF-call */
+#define reg_CFUNC REG(13) /* Silly to blow a reg on FF-name */
#endif
#define reg_BSP REG(14) /* Binding stack pointer */
-#define reg_CFP REG(15) /* Control/value stack frame pointer */
-#define reg_CSP REG(16) /* Control/value stack top */
-#define reg_ALLOC REG(17) /* (Global) dynamic free pointer */
-#define reg_NULL REG(18) /* NIL and globals nearby */
-#define reg_CODE REG(19) /* Current function object */
-#define reg_CNAME REG(20) /* Current function name */
-#define reg_LEXENV REG(21) /* And why burn a register for this ? */
+#define reg_CFP REG(15) /* Control/value stack frame pointer */
+#define reg_CSP REG(16) /* Control/value stack top */
+#define reg_ALLOC REG(17) /* (Global) dynamic free pointer */
+#define reg_NULL REG(18) /* NIL and globals nearby */
+#define reg_CODE REG(19) /* Current function object */
+#define reg_CNAME REG(20) /* Current function name */
+#define reg_LEXENV REG(21) /* And why burn a register for this ? */
#define reg_OCFP REG(22) /* The caller's reg_CFP */
-#define reg_LRA REG(23) /* Tagged lisp return address */
-#define reg_A0 REG(24) /* First function arg/return value */
-#define reg_A1 REG(25) /* Second. */
-#define reg_A2 REG(26) /* */
-#define reg_A3 REG(27) /* Last of (only) 4 arg regs */
-#define reg_L0 REG(28) /* Tagged temp regs */
+#define reg_LRA REG(23) /* Tagged lisp return address */
+#define reg_A0 REG(24) /* First function arg/return value */
+#define reg_A1 REG(25) /* Second. */
+#define reg_A2 REG(26) /* */
+#define reg_A3 REG(27) /* Last of (only) 4 arg regs */
+#define reg_L0 REG(28) /* Tagged temp regs */
#define reg_L1 REG(29)
-#define reg_L2 REG(30) /* Last lisp temp reg */
-#define reg_LIP REG(31) /* Lisp Interior Pointer, e.g., locative */
+#define reg_L2 REG(30) /* Last lisp temp reg */
+#define reg_LIP REG(31) /* Lisp Interior Pointer, e.g., locative */
#define REGNAMES \
- "ZERO", "NSP", "???", "NL0", \
- "NL1", "NL2", "NL3P", "NL4", \
- "NL5", "NL6", "FDEFN", "NARGS", \
- "NFP", "CFUNC", "BSP", "CFP", \
- "CSP", "ALLOC", "NULL", "CODE", \
- "CNAME", "LEXENV", "OCFP", "LRA", \
- "A0", "A1", "A2", "A3", \
- "L0", "L1", "L2", "LIP"
+ "ZERO", "NSP", "???", "NL0", \
+ "NL1", "NL2", "NL3P", "NL4", \
+ "NL5", "NL6", "FDEFN", "NARGS", \
+ "NFP", "CFUNC", "BSP", "CFP", \
+ "CSP", "ALLOC", "NULL", "CODE", \
+ "CNAME", "LEXENV", "OCFP", "LRA", \
+ "A0", "A1", "A2", "A3", \
+ "L0", "L1", "L2", "LIP"
#define BOXED_REGISTERS { \
reg_FDEFN, reg_CODE, reg_CNAME, reg_LEXENV, reg_OCFP, reg_LRA, \
break;
default:
- idx = type >> 2;
- if (idx < (sizeof(lowtag_Names) / sizeof(char *)))
- printf("%s", lowtag_Names[idx]);
- else
- printf("unknown type (0x%0x)", type);
+ idx = type >> 2;
+ if (idx < (sizeof(lowtag_Names) / sizeof(char *)))
+ printf("%s", lowtag_Names[idx]);
+ else
+ printf("unknown type (0x%0x)", type);
break;
}
}
idx = type >> 2;
if (idx < (sizeof(lowtag_Names) / sizeof(char *)))
- printf(", %s", lowtag_Names[idx]);
+ printf(", %s", lowtag_Names[idx]);
else
- printf(", unknown type (0x%0x)", type);
+ printf(", unknown type (0x%0x)", type);
switch (widetag_of(obj)) {
case CHARACTER_WIDETAG:
int length = 0;
if (!is_valid_lisp_addr((os_vm_address_t)native_pointer(obj)))
- printf("(invalid Lisp-level address)");
+ printf("(invalid Lisp-level address)");
else if (obj == NIL)
printf("NIL");
else {
static void print_list(lispobj obj)
{
if (!is_valid_lisp_addr((os_vm_address_t)native_pointer(obj))) {
- printf("(invalid address)");
+ printf("(invalid address)");
} else if (obj == NIL) {
printf(" (NIL)");
} else {
char buffer[16];
print_obj("type: ", ((struct instance *)native_pointer(obj))->slots[0]);
for (i = 1; i < HeaderValue(instance->header); i++) {
- sprintf(buffer, "slot %d: ", i);
- print_obj(buffer, instance->slots[i]);
+ sprintf(buffer, "slot %d: ", i);
+ print_obj(buffer, instance->slots[i]);
}
}
ptr = (lispobj *) native_pointer(obj);
if (!is_valid_lisp_addr((os_vm_address_t)obj)) {
- printf("(invalid address)");
- return;
+ printf("(invalid address)");
+ return;
}
header = *ptr;
print_obj(*slots++, *ptr++);
} else {
print_obj("???: ", *ptr++);
- }
+ }
}
}
"plist: ", "name: ", "package: ",
#ifdef LISP_FEATURE_SB_THREAD
"tls-index: " ,
-#endif
+#endif
NULL};
static char *ratio_slots[] = {"numer: ", "denom: ", NULL};
static char *complex_slots[] = {"real: ", "imag: ", NULL};
static void print_otherptr(lispobj obj)
{
if (!is_valid_lisp_addr((os_vm_address_t)obj)) {
- printf("(invalid address)");
+ printf("(invalid address)");
} else {
#ifndef LISP_FEATURE_ALPHA
lispobj *ptr;
int count, type, index;
char *cptr, buffer[16];
- ptr = (lispobj*) native_pointer(obj);
- if (ptr == NULL) {
- printf(" (NULL Pointer)");
- return;
- }
+ ptr = (lispobj*) native_pointer(obj);
+ if (ptr == NULL) {
+ printf(" (NULL Pointer)");
+ return;
+ }
- header = *ptr++;
- length = (*ptr) >> 2;
- count = header>>8;
- type = widetag_of(header);
+ header = *ptr++;
+ length = (*ptr) >> 2;
+ count = header>>8;
+ type = widetag_of(header);
print_obj("header: ", header);
if (lowtag_of(header) != OTHER_IMMEDIATE_0_LOWTAG &&
- lowtag_of(header) != OTHER_IMMEDIATE_1_LOWTAG) {
+ lowtag_of(header) != OTHER_IMMEDIATE_1_LOWTAG) {
NEWLINE_OR_RETURN;
printf("(invalid header object)");
return;
case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
- case SIMPLE_CHARACTER_STRING_WIDETAG: /* FIXME */
+ case SIMPLE_CHARACTER_STRING_WIDETAG: /* FIXME */
#endif
NEWLINE_OR_RETURN;
cptr = (char *)(ptr+1);
case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case COMPLEX_BASE_STRING_WIDETAG:
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
- case COMPLEX_CHARACTER_STRING_WIDETAG:
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
#endif
case COMPLEX_VECTOR_NIL_WIDETAG:
case COMPLEX_BIT_VECTOR_WIDETAG:
break;
case VALUE_CELL_HEADER_WIDETAG:
- print_slots(value_cell_slots, 1, ptr);
+ print_slots(value_cell_slots, 1, ptr);
break;
case SAP_WIDETAG:
break;
case WEAK_POINTER_WIDETAG:
- print_slots(weak_pointer_slots, 1, ptr);
+ print_slots(weak_pointer_slots, 1, ptr);
break;
case CHARACTER_WIDETAG:
printf("pointer to an immediate?");
break;
- case FDEFN_WIDETAG:
- print_slots(fdefn_slots, count, ptr);
- break;
-
+ case FDEFN_WIDETAG:
+ print_slots(fdefn_slots, count, ptr);
+ break;
+
default:
NEWLINE_OR_RETURN;
printf("Unknown header object?");
static void print_obj(char *prefix, lispobj obj)
{
static void (*verbose_fns[])(lispobj obj)
- = {print_fixnum, print_struct, print_otherimm, print_list,
- print_fixnum, print_otherptr, print_otherimm, print_otherptr};
+ = {print_fixnum, print_struct, print_otherimm, print_list,
+ print_fixnum, print_otherptr, print_otherimm, print_otherptr};
static void (*brief_fns[])(lispobj obj)
- = {brief_fixnum, brief_struct, brief_otherimm, brief_list,
- brief_fixnum, brief_otherptr, brief_otherimm, brief_otherptr};
+ = {brief_fixnum, brief_struct, brief_otherimm, brief_list,
+ brief_fixnum, brief_otherptr, brief_otherimm, brief_otherptr};
int type = lowtag_of(obj);
struct var *var = lookup_by_obj(obj);
char buffer[256];
dont_descend = 1;
if (var == NULL &&
- /* FIXME: What does this "x & y & z & .." expression mean? */
- (obj & FUN_POINTER_LOWTAG & LIST_POINTER_LOWTAG & INSTANCE_POINTER_LOWTAG & OTHER_POINTER_LOWTAG) != 0)
+ /* FIXME: What does this "x & y & z & .." expression mean? */
+ (obj & FUN_POINTER_LOWTAG & LIST_POINTER_LOWTAG & INSTANCE_POINTER_LOWTAG & OTHER_POINTER_LOWTAG) != 0)
var = define_var(NULL, obj, 0);
if (var != NULL)
{
printf("lispobj 0x%lx\n", (unsigned long)obj);
}
-
+
#endif /* defined(LISP_FEATURE_SB_LDB) */
#if 1
#define gc_assert(ex) do { \
- if (!(ex)) gc_abort(); \
+ if (!(ex)) gc_abort(); \
} while (0)
#else
#define gc_assert(ex)
{
#ifndef LISP_FEATURE_GENCGC
return (ptr >= (lispobj)current_dynamic_space
- &&
- ptr < (lispobj)dynamic_space_free_pointer);
+ &&
+ ptr < (lispobj)dynamic_space_free_pointer);
#else
/* Be more conservative, and remember, this is a maybe. */
return (ptr >= (lispobj)DYNAMIC_SPACE_START
- &&
- ptr < (lispobj)dynamic_space_free_pointer);
+ &&
+ ptr < (lispobj)dynamic_space_free_pointer);
#endif
}
static inline lispobj *
-newspace_alloc(long nwords, int constantp)
+newspace_alloc(long nwords, int constantp)
{
lispobj *ret;
nwords=CEILING(nwords,2);
if(constantp) {
- ret=read_only_free;
- read_only_free+=nwords;
+ ret=read_only_free;
+ read_only_free+=nwords;
} else {
- ret=static_free;
- static_free+=nwords;
+ ret=static_free;
+ static_free+=nwords;
}
return ret;
}
/* If it's not a return address then it needs to be a valid Lisp
* pointer. */
if (!is_lisp_pointer((lispobj)pointer))
- return 0;
+ return 0;
/* Check that the object pointed to is consistent with the pointer
* low tag. */
switch (lowtag_of((lispobj)pointer)) {
case FUN_POINTER_LOWTAG:
- /* Start_addr should be the enclosing code object, or a closure
- * header. */
- switch (widetag_of(*start_addr)) {
- case CODE_HEADER_WIDETAG:
- /* This case is probably caught above. */
- break;
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if ((long)pointer != ((long)start_addr+FUN_POINTER_LOWTAG)) {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wf2: %x %x %x\n",
- (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- break;
- default:
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wf3: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- break;
+ /* Start_addr should be the enclosing code object, or a closure
+ * header. */
+ switch (widetag_of(*start_addr)) {
+ case CODE_HEADER_WIDETAG:
+ /* This case is probably caught above. */
+ break;
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ if ((long)pointer != ((long)start_addr+FUN_POINTER_LOWTAG)) {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wf2: %x %x %x\n",
+ (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ break;
+ default:
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wf3: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ break;
case LIST_POINTER_LOWTAG:
- if ((long)pointer != ((long)start_addr+LIST_POINTER_LOWTAG)) {
- if (pointer_filter_verbose)
- fprintf(stderr,"*Wl1: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- return 0;
- }
- /* Is it plausible cons? */
- if ((is_lisp_pointer(start_addr[0])
- || ((start_addr[0] & FIXNUM_TAG_MASK) == 0) /* fixnum */
- || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
+ if ((long)pointer != ((long)start_addr+LIST_POINTER_LOWTAG)) {
+ if (pointer_filter_verbose)
+ fprintf(stderr,"*Wl1: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ return 0;
+ }
+ /* Is it plausible cons? */
+ if ((is_lisp_pointer(start_addr[0])
+ || ((start_addr[0] & FIXNUM_TAG_MASK) == 0) /* fixnum */
+ || (widetag_of(start_addr[0]) == CHARACTER_WIDETAG)
#if N_WORD_BITS == 64
- || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
+ || (widetag_of(start_addr[0]) == SINGLE_FLOAT_WIDETAG)
#endif
- || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
- && (is_lisp_pointer(start_addr[1])
- || ((start_addr[1] & FIXNUM_TAG_MASK) == 0) /* fixnum */
- || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
+ || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
+ && (is_lisp_pointer(start_addr[1])
+ || ((start_addr[1] & FIXNUM_TAG_MASK) == 0) /* fixnum */
+ || (widetag_of(start_addr[1]) == CHARACTER_WIDETAG)
#if N_WORD_BITS == 64
- || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
-#endif
- || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) {
- break;
- } else {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wl2: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
+ || (widetag_of(start_addr[1]) == SINGLE_FLOAT_WIDETAG)
+#endif
+ || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG))) {
+ break;
+ } else {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wl2: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
case INSTANCE_POINTER_LOWTAG:
- if ((long)pointer != ((long)start_addr+INSTANCE_POINTER_LOWTAG)) {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wi1: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wi2: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- break;
+ if ((long)pointer != ((long)start_addr+INSTANCE_POINTER_LOWTAG)) {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wi1: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wi2: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ break;
case OTHER_POINTER_LOWTAG:
- if ((long)pointer != ((long)start_addr+OTHER_POINTER_LOWTAG)) {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo1: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- /* Is it plausible? Not a cons. XXX should check the headers. */
- if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & FIXNUM_TAG_MASK) == 0)) {
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo2: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- switch (widetag_of(start_addr[0])) {
- case UNBOUND_MARKER_WIDETAG:
- case CHARACTER_WIDETAG:
+ if ((long)pointer != ((long)start_addr+OTHER_POINTER_LOWTAG)) {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo1: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ /* Is it plausible? Not a cons. XXX should check the headers. */
+ if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & FIXNUM_TAG_MASK) == 0)) {
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo2: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ switch (widetag_of(start_addr[0])) {
+ case UNBOUND_MARKER_WIDETAG:
+ case CHARACTER_WIDETAG:
#if N_WORD_BITS == 64
- case SINGLE_FLOAT_WIDETAG:
-#endif
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo3: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
-
- /* only pointed to by function pointers? */
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo4: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
-
- case INSTANCE_HEADER_WIDETAG:
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo5: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
-
- /* the valid other immediate pointer objects */
- case SIMPLE_VECTOR_WIDETAG:
- case RATIO_WIDETAG:
- case COMPLEX_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
+#endif
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo3: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+
+ /* only pointed to by function pointers? */
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo4: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+
+ case INSTANCE_HEADER_WIDETAG:
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo5: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+
+ /* the valid other immediate pointer objects */
+ case SIMPLE_VECTOR_WIDETAG:
+ case RATIO_WIDETAG:
+ case COMPLEX_WIDETAG:
#ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
- case COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
- case COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef COMPLEX_LONG_FLOAT_WIDETAG
- case COMPLEX_LONG_FLOAT_WIDETAG:
+ case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_ARRAY_WIDETAG:
- case COMPLEX_BASE_STRING_WIDETAG:
+ case SIMPLE_ARRAY_WIDETAG:
+ case COMPLEX_BASE_STRING_WIDETAG:
#ifdef COMPLEX_CHARACTER_STRING_WIDETAG
- case COMPLEX_CHARACTER_STRING_WIDETAG:
-#endif
- case COMPLEX_VECTOR_NIL_WIDETAG:
- case COMPLEX_BIT_VECTOR_WIDETAG:
- case COMPLEX_VECTOR_WIDETAG:
- case COMPLEX_ARRAY_WIDETAG:
- case VALUE_CELL_HEADER_WIDETAG:
- case SYMBOL_HEADER_WIDETAG:
- case FDEFN_WIDETAG:
- case CODE_HEADER_WIDETAG:
- case BIGNUM_WIDETAG:
+ case COMPLEX_CHARACTER_STRING_WIDETAG:
+#endif
+ case COMPLEX_VECTOR_NIL_WIDETAG:
+ case COMPLEX_BIT_VECTOR_WIDETAG:
+ case COMPLEX_VECTOR_WIDETAG:
+ case COMPLEX_ARRAY_WIDETAG:
+ case VALUE_CELL_HEADER_WIDETAG:
+ case SYMBOL_HEADER_WIDETAG:
+ case FDEFN_WIDETAG:
+ case CODE_HEADER_WIDETAG:
+ case BIGNUM_WIDETAG:
#if N_WORD_BITS != 64
- case SINGLE_FLOAT_WIDETAG:
+ case SINGLE_FLOAT_WIDETAG:
#endif
- case DOUBLE_FLOAT_WIDETAG:
+ case DOUBLE_FLOAT_WIDETAG:
#ifdef LONG_FLOAT_WIDETAG
- case LONG_FLOAT_WIDETAG:
+ case LONG_FLOAT_WIDETAG:
#endif
- case SIMPLE_ARRAY_NIL_WIDETAG:
- case SIMPLE_BASE_STRING_WIDETAG:
+ case SIMPLE_ARRAY_NIL_WIDETAG:
+ case SIMPLE_BASE_STRING_WIDETAG:
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
- case SIMPLE_CHARACTER_STRING_WIDETAG:
-#endif
- case SIMPLE_BIT_VECTOR_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
+#endif
+ case SIMPLE_BIT_VECTOR_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_7_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_15_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
#endif
- case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_31_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_60_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_63_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_64_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_61_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG
- case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
+ case SIMPLE_ARRAY_SIGNED_BYTE_64_WIDETAG:
#endif
- case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
- case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
+ case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
- case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
-#endif
- case SAP_WIDETAG:
- case WEAK_POINTER_WIDETAG:
- break;
-
- default:
- if (pointer_filter_verbose) {
- fprintf(stderr,"*Wo6: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
- }
- break;
+ case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
+#endif
+ case SAP_WIDETAG:
+ case WEAK_POINTER_WIDETAG:
+ break;
+
+ default:
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*Wo6: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
+ }
+ break;
default:
- if (pointer_filter_verbose) {
- fprintf(stderr,"*W?: %x %x %x\n", (unsigned long) pointer,
- (unsigned long) start_addr, *start_addr);
- }
- return 0;
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*W?: %x %x %x\n", (unsigned long) pointer,
+ (unsigned long) start_addr, *start_addr);
+ }
+ return 0;
}
/* looks good */
num_valid_stack_locations = 0;
num_valid_stack_ra_locations = 0;
for (sp = lowaddr; sp < base; sp++) {
- lispobj thing = *sp;
- /* Find the object start address */
- lispobj *start_addr = search_dynamic_space((void *)thing);
- if (start_addr) {
- /* We need to allow raw pointers into Code objects for
- * return addresses. This will also pick up pointers to
- * functions in code objects. */
- if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
- /* FIXME asserting here is a really dumb thing to do.
- * If we've overflowed some arbitrary static limit, we
- * should just refuse to purify, instead of killing
- * the whole lisp session
- */
- gc_assert(num_valid_stack_ra_locations <
- MAX_STACK_RETURN_ADDRESSES);
- valid_stack_ra_locations[num_valid_stack_ra_locations] = sp;
- valid_stack_ra_code_objects[num_valid_stack_ra_locations++] =
- (lispobj *)((long)start_addr + OTHER_POINTER_LOWTAG);
- } else {
- if (valid_dynamic_space_pointer((void *)thing, start_addr)) {
- gc_assert(num_valid_stack_locations < MAX_STACK_POINTERS);
- valid_stack_locations[num_valid_stack_locations++] = sp;
- }
- }
- }
+ lispobj thing = *sp;
+ /* Find the object start address */
+ lispobj *start_addr = search_dynamic_space((void *)thing);
+ if (start_addr) {
+ /* We need to allow raw pointers into Code objects for
+ * return addresses. This will also pick up pointers to
+ * functions in code objects. */
+ if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
+ /* FIXME asserting here is a really dumb thing to do.
+ * If we've overflowed some arbitrary static limit, we
+ * should just refuse to purify, instead of killing
+ * the whole lisp session
+ */
+ gc_assert(num_valid_stack_ra_locations <
+ MAX_STACK_RETURN_ADDRESSES);
+ valid_stack_ra_locations[num_valid_stack_ra_locations] = sp;
+ valid_stack_ra_code_objects[num_valid_stack_ra_locations++] =
+ (lispobj *)((long)start_addr + OTHER_POINTER_LOWTAG);
+ } else {
+ if (valid_dynamic_space_pointer((void *)thing, start_addr)) {
+ gc_assert(num_valid_stack_locations < MAX_STACK_POINTERS);
+ valid_stack_locations[num_valid_stack_locations++] = sp;
+ }
+ }
+ }
}
if (pointer_filter_verbose) {
- fprintf(stderr, "number of valid stack pointers = %d\n",
- num_valid_stack_locations);
- fprintf(stderr, "number of stack return addresses = %d\n",
- num_valid_stack_ra_locations);
+ fprintf(stderr, "number of valid stack pointers = %d\n",
+ num_valid_stack_locations);
+ fprintf(stderr, "number of stack return addresses = %d\n",
+ num_valid_stack_ra_locations);
}
}
long i;
for (i = 0; i < num_valid_stack_locations; i++)
- pscav(valid_stack_locations[i], 1, 0);
+ pscav(valid_stack_locations[i], 1, 0);
for (i = 0; i < num_valid_stack_ra_locations; i++) {
- lispobj code_obj = (lispobj)valid_stack_ra_code_objects[i];
- pscav(&code_obj, 1, 0);
- if (pointer_filter_verbose) {
- fprintf(stderr,"*C moved RA %x to %x; for code object %x to %x\n",
- *valid_stack_ra_locations[i],
- (long)(*valid_stack_ra_locations[i])
- - ((long)valid_stack_ra_code_objects[i] - (long)code_obj),
- (unsigned long) valid_stack_ra_code_objects[i], code_obj);
- }
- *valid_stack_ra_locations[i] =
- ((long)(*valid_stack_ra_locations[i])
- - ((long)valid_stack_ra_code_objects[i] - (long)code_obj));
+ lispobj code_obj = (lispobj)valid_stack_ra_code_objects[i];
+ pscav(&code_obj, 1, 0);
+ if (pointer_filter_verbose) {
+ fprintf(stderr,"*C moved RA %x to %x; for code object %x to %x\n",
+ *valid_stack_ra_locations[i],
+ (long)(*valid_stack_ra_locations[i])
+ - ((long)valid_stack_ra_code_objects[i] - (long)code_obj),
+ (unsigned long) valid_stack_ra_code_objects[i], code_obj);
+ }
+ *valid_stack_ra_locations[i] =
+ ((long)(*valid_stack_ra_locations[i])
+ - ((long)valid_stack_ra_code_objects[i] - (long)code_obj));
}
}
#endif
switch (pure) {
case T:
- return (ptrans_boxed(thing, header, 1));
+ return (ptrans_boxed(thing, header, 1));
case NIL:
- return (ptrans_boxed(thing, header, 0));
+ return (ptrans_boxed(thing, header, 0));
case 0:
- {
- /* Substructure: special case for the COMPACT-INFO-ENVs,
- * where the instance may have a point to the dynamic
- * space placed into it (e.g. the cache-name slot), but
- * the lists and arrays at the time of a purify can be
- * moved to the RO space. */
- long nwords;
- lispobj result, *new, *old;
+ {
+ /* Substructure: special case for the COMPACT-INFO-ENVs,
+ * where the instance may have a point to the dynamic
+ * space placed into it (e.g. the cache-name slot), but
+ * the lists and arrays at the time of a purify can be
+ * moved to the RO space. */
+ long nwords;
+ lispobj result, *new, *old;
- nwords = CEILING(1 + HeaderValue(header), 2);
+ nwords = CEILING(1 + HeaderValue(header), 2);
- /* Allocate it */
- old = (lispobj *)native_pointer(thing);
- new = newspace_alloc(nwords, 0); /* inconstant */
+ /* Allocate it */
+ old = (lispobj *)native_pointer(thing);
+ new = newspace_alloc(nwords, 0); /* inconstant */
- /* Copy it. */
- bcopy(old, new, nwords * sizeof(lispobj));
+ /* Copy it. */
+ bcopy(old, new, nwords * sizeof(lispobj));
- /* Deposit forwarding pointer. */
- result = make_lispobj(new, lowtag_of(thing));
- *old = result;
+ /* Deposit forwarding pointer. */
+ result = make_lispobj(new, lowtag_of(thing));
+ *old = result;
- /* Scavenge it. */
- pscav(new, nwords, 1);
+ /* Scavenge it. */
+ pscav(new, nwords, 1);
- return result;
- }
+ return result;
+ }
default:
- gc_abort();
- return NIL; /* dummy value: return something ... */
+ gc_abort();
+ return NIL; /* dummy value: return something ... */
}
}
/* Allocate it */
old = (lispobj *)native_pointer(thing);
- new = newspace_alloc(nwords, 0); /* inconstant */
+ new = newspace_alloc(nwords, 0); /* inconstant */
/* Copy it. */
bcopy(old, new, nwords * sizeof(lispobj));
{
long nwords;
lispobj result, *new, *old;
-
+
nwords = CEILING(1 + HeaderValue(header), 2);
-
+
/* Allocate it */
old = (lispobj *)native_pointer(thing);
- new = newspace_alloc(nwords,1); /* always constant */
-
+ new = newspace_alloc(nwords,1); /* always constant */
+
/* copy it. */
bcopy(old, new, nwords * sizeof(lispobj));
-
+
/* Deposit forwarding pointer. */
result = make_lispobj(new , lowtag_of(thing));
*old = result;
static lispobj
ptrans_vector(lispobj thing, long bits, long extra,
- boolean boxed, boolean constant)
+ boolean boxed, boolean constant)
{
struct vector *vector;
long nwords;
nwords = 2;
} else {
nwords = CEILING(NWORDS(length, bits) + 2, 2);
- }
+ }
new=newspace_alloc(nwords, (constant || !boxed));
bcopy(vector, new, nwords * sizeof(lispobj));
/* It will be 0 or the unbound-marker if there are no fixups, and
* will be an other-pointer to a vector if it is valid. */
if ((fixups==0) ||
- (fixups==UNBOUND_MARKER_WIDETAG) ||
- !is_lisp_pointer(fixups)) {
+ (fixups==UNBOUND_MARKER_WIDETAG) ||
+ !is_lisp_pointer(fixups)) {
#ifdef LISP_FEATURE_GENCGC
- /* Check for a possible errors. */
- sniff_code_object(new_code,displacement);
+ /* Check for a possible errors. */
+ sniff_code_object(new_code,displacement);
#endif
- return;
+ return;
}
fixups_vector = (struct vector *)native_pointer(fixups);
/* Could be pointing to a forwarding pointer. */
if (is_lisp_pointer(fixups) && (dynamic_pointer_p(fixups))
- && forwarding_pointer_p(*(lispobj *)fixups_vector)) {
- /* If so then follow it. */
- fixups_vector =
- (struct vector *)native_pointer(*(lispobj *)fixups_vector);
+ && forwarding_pointer_p(*(lispobj *)fixups_vector)) {
+ /* If so then follow it. */
+ fixups_vector =
+ (struct vector *)native_pointer(*(lispobj *)fixups_vector);
}
if (widetag_of(fixups_vector->header) == SIMPLE_ARRAY_WORD_WIDETAG) {
- /* We got the fixups for the code block. Now work through the
- * vector, and apply a fixup at each address. */
- long length = fixnum_value(fixups_vector->length);
- long i;
- for (i=0; i<length; i++) {
- unsigned offset = fixups_vector->data[i];
- /* Now check the current value of offset. */
- unsigned old_value =
- *(unsigned *)((unsigned)code_start_addr + offset);
-
- /* If it's within the old_code object then it must be an
- * absolute fixup (relative ones are not saved) */
- if ((old_value>=(unsigned)old_code)
- && (old_value<((unsigned)old_code + nwords * N_WORD_BYTES)))
- /* So add the dispacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) = old_value
- + displacement;
- else
- /* It is outside the old code object so it must be a relative
- * fixup (absolute fixups are not saved). So subtract the
- * displacement. */
- *(unsigned *)((unsigned)code_start_addr + offset) = old_value
- - displacement;
- }
+ /* We got the fixups for the code block. Now work through the
+ * vector, and apply a fixup at each address. */
+ long length = fixnum_value(fixups_vector->length);
+ long i;
+ for (i=0; i<length; i++) {
+ unsigned offset = fixups_vector->data[i];
+ /* Now check the current value of offset. */
+ unsigned old_value =
+ *(unsigned *)((unsigned)code_start_addr + offset);
+
+ /* If it's within the old_code object then it must be an
+ * absolute fixup (relative ones are not saved) */
+ if ((old_value>=(unsigned)old_code)
+ && (old_value<((unsigned)old_code + nwords * N_WORD_BYTES)))
+ /* So add the dispacement. */
+ *(unsigned *)((unsigned)code_start_addr + offset) = old_value
+ + displacement;
+ else
+ /* It is outside the old code object so it must be a relative
+ * fixup (absolute fixups are not saved). So subtract the
+ * displacement. */
+ *(unsigned *)((unsigned)code_start_addr + offset) = old_value
+ - displacement;
+ }
}
/* No longer need the fixups. */
code = (struct code *)native_pointer(thing);
nwords = CEILING(HeaderValue(code->header) + fixnum_value(code->code_size),
- 2);
+ 2);
new = (struct code *)newspace_alloc(nwords,1); /* constant */
src/compiler/target-disassem.lisp. -- CSR, 2004-01-08 */
if (!(fixnump(new->trace_table_offset)))
#if 0
- pscav(&new->trace_table_offset, 1, 0);
+ pscav(&new->trace_table_offset, 1, 0);
#else
new->trace_table_offset = NIL; /* limit lifetime */
#endif
gc_assert(!dynamic_pointer_p(func));
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- /* Temporarily convert the self pointer to a real function pointer. */
- ((struct simple_fun *)native_pointer(func))->self
- -= FUN_RAW_ADDR_OFFSET;
+ /* Temporarily convert the self pointer to a real function pointer. */
+ ((struct simple_fun *)native_pointer(func))->self
+ -= FUN_RAW_ADDR_OFFSET;
#endif
pscav(&((struct simple_fun *)native_pointer(func))->self, 2, 1);
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- ((struct simple_fun *)native_pointer(func))->self
- += FUN_RAW_ADDR_OFFSET;
+ ((struct simple_fun *)native_pointer(func))->self
+ += FUN_RAW_ADDR_OFFSET;
#endif
pscav_later(&((struct simple_fun *)native_pointer(func))->name, 3);
}
if (widetag_of(header) == SIMPLE_FUN_HEADER_WIDETAG) {
- /* We can only end up here if the code object has not been
+ /* We can only end up here if the code object has not been
* scavenged, because if it had been scavenged, forwarding pointers
* would have been left behind for all the entry points. */
function = (struct simple_fun *)native_pointer(thing);
code =
- make_lispobj
- ((native_pointer(thing) -
- (HeaderValue(function->header))), OTHER_POINTER_LOWTAG);
-
- /* This will cause the function's header to be replaced with a
+ make_lispobj
+ ((native_pointer(thing) -
+ (HeaderValue(function->header))), OTHER_POINTER_LOWTAG);
+
+ /* This will cause the function's header to be replaced with a
* forwarding pointer. */
ptrans_code(code);
return function->header;
}
else {
- /* It's some kind of closure-like thing. */
+ /* It's some kind of closure-like thing. */
nwords = CEILING(1 + HeaderValue(header), 2);
old = (lispobj *)native_pointer(thing);
- /* Allocate the new one. FINs *must* not go in read_only
- * space. Closures can; they never change */
+ /* Allocate the new one. FINs *must* not go in read_only
+ * space. Closures can; they never change */
+
+ new = newspace_alloc
+ (nwords,(widetag_of(header)!=FUNCALLABLE_INSTANCE_HEADER_WIDETAG));
- new = newspace_alloc
- (nwords,(widetag_of(header)!=FUNCALLABLE_INSTANCE_HEADER_WIDETAG));
-
/* Copy it. */
bcopy(old, new, nwords * sizeof(lispobj));
do {
/* Allocate a new cons cell. */
old = (struct cons *)native_pointer(thing);
- new = (struct cons *) newspace_alloc(WORDS_PER_CONS,constant);
+ new = (struct cons *) newspace_alloc(WORDS_PER_CONS,constant);
/* Copy the cons cell and keep a pointer to the cdr. */
new->car = old->car;
ptrans_otherptr(lispobj thing, lispobj header, boolean constant)
{
switch (widetag_of(header)) {
- /* FIXME: this needs a reindent */
+ /* FIXME: this needs a reindent */
case BIGNUM_WIDETAG:
case SINGLE_FLOAT_WIDETAG:
case DOUBLE_FLOAT_WIDETAG:
case COMPLEX_LONG_FLOAT_WIDETAG:
#endif
case SAP_WIDETAG:
- return ptrans_unboxed(thing, header);
+ return ptrans_unboxed(thing, header);
case RATIO_WIDETAG:
case COMPLEX_WIDETAG:
case COMPLEX_VECTOR_WIDETAG:
case COMPLEX_ARRAY_WIDETAG:
return ptrans_boxed(thing, header, constant);
-
+
case VALUE_CELL_HEADER_WIDETAG:
case WEAK_POINTER_WIDETAG:
return ptrans_boxed(thing, header, 0);
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
case SIMPLE_CHARACTER_STRING_WIDETAG:
- return ptrans_vector(thing, 32, 1, 0, constant);
+ return ptrans_vector(thing, 32, 1, 0, constant);
#endif
case SIMPLE_BIT_VECTOR_WIDETAG:
#endif
return ptrans_vector(thing, 64, 0, 0, constant);
#endif
-
+
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
return ptrans_vector(thing, 32, 0, 0, constant);
return ptrans_returnpc(thing, header);
case FDEFN_WIDETAG:
- return ptrans_fdefn(thing, header);
+ return ptrans_fdefn(thing, header);
default:
- fprintf(stderr, "Invalid widetag: %d\n", widetag_of(header));
+ fprintf(stderr, "Invalid widetag: %d\n", widetag_of(header));
/* Should only come across other pointers to the above stuff. */
gc_abort();
- return NIL;
+ return NIL;
}
}
long nwords;
lispobj func;
nwords = CEILING(HeaderValue(code->header) + fixnum_value(code->code_size),
- 2);
+ 2);
/* Arrange to scavenge the debug info later. */
pscav_later(&code->debug_info, 1);
gc_assert(!dynamic_pointer_p(func));
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- /* Temporarily convert the self pointer to a real function
- * pointer. */
- ((struct simple_fun *)native_pointer(func))->self
- -= FUN_RAW_ADDR_OFFSET;
+ /* Temporarily convert the self pointer to a real function
+ * pointer. */
+ ((struct simple_fun *)native_pointer(func))->self
+ -= FUN_RAW_ADDR_OFFSET;
#endif
pscav(&((struct simple_fun *)native_pointer(func))->self, 2, 1);
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- ((struct simple_fun *)native_pointer(func))->self
- += FUN_RAW_ADDR_OFFSET;
+ ((struct simple_fun *)native_pointer(func))->self
+ += FUN_RAW_ADDR_OFFSET;
#endif
pscav_later(&((struct simple_fun *)native_pointer(func))->name, 3);
}
/* It's a pointer. Is it something we might have to move? */
if (dynamic_pointer_p(thing)) {
/* Maybe. Have we already moved it? */
- thingp = (lispobj *)native_pointer(thing);
+ thingp = (lispobj *)native_pointer(thing);
header = *thingp;
if (is_lisp_pointer(header) && forwarding_pointer_p(header))
/* Yep, so just copy the forwarding pointer. */
}
#if N_WORD_BITS == 64
else if (widetag_of(thing) == SINGLE_FLOAT_WIDETAG) {
- count = 1;
- }
+ count = 1;
+ }
#endif
else if (thing & FIXNUM_TAG_MASK) {
/* It's an other immediate. Maybe the header for an unboxed */
break;
case SIMPLE_VECTOR_WIDETAG:
- if (HeaderValue(thing) == subtype_VectorValidHashing) {
+ if (HeaderValue(thing) == subtype_VectorValidHashing) {
*addr = (subtype_VectorMustRehash << N_WIDETAG_BITS) |
SIMPLE_VECTOR_WIDETAG;
- }
+ }
count = 2;
break;
- case SIMPLE_ARRAY_NIL_WIDETAG:
- count = 2;
- break;
+ case SIMPLE_ARRAY_NIL_WIDETAG:
+ count = 2;
+ break;
case SIMPLE_BASE_STRING_WIDETAG:
vector = (struct vector *)addr;
break;
#ifdef SIMPLE_CHARACTER_STRING_WIDETAG
- case SIMPLE_CHARACTER_STRING_WIDETAG:
- vector = (struct vector *)addr;
- count = CEILING(NWORDS(fixnum_value(vector->length)+1,32)+2,2);
- break;
+ case SIMPLE_CHARACTER_STRING_WIDETAG:
+ vector = (struct vector *)addr;
+ count = CEILING(NWORDS(fixnum_value(vector->length)+1,32)+2,2);
+ break;
#endif
case SIMPLE_BIT_VECTOR_WIDETAG:
case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
- case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
+ case SIMPLE_ARRAY_UNSIGNED_BYTE_29_WIDETAG:
#endif
#ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
vector = (struct vector *)addr;
- count = CEILING(NWORDS(fixnum_value(vector->length), 32) + 2,
- 2);
+ count = CEILING(NWORDS(fixnum_value(vector->length), 32) + 2,
+ 2);
break;
case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
#endif
vector = (struct vector *)addr;
- count = CEILING(NWORDS(fixnum_value(vector->length), 64) + 2,
- 2);
+ count = CEILING(NWORDS(fixnum_value(vector->length), 64) + 2,
+ 2);
break;
#ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
#ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
vector = (struct vector *)addr;
- count = CEILING(NWORDS(fixnum_value(vector->length), 128) + 2,
- 2);
+ count = CEILING(NWORDS(fixnum_value(vector->length), 128) + 2,
+ 2);
break;
#endif
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
gc_abort(); /* no code headers in static space */
#else
- count = pscav_code((struct code*)addr);
+ count = pscav_code((struct code*)addr);
#endif
break;
/* We should never hit any of these, 'cause they occur
* buried in the middle of code objects. */
gc_abort();
- break;
+ break;
#if defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64)
- case CLOSURE_HEADER_WIDETAG:
- case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
- /* The function self pointer needs special care on the
- * x86 because it is the real entry point. */
- {
- lispobj fun = ((struct closure *)addr)->fun
- - FUN_RAW_ADDR_OFFSET;
- pscav(&fun, 1, constant);
- ((struct closure *)addr)->fun = fun + FUN_RAW_ADDR_OFFSET;
- }
- count = 2;
- break;
+ case CLOSURE_HEADER_WIDETAG:
+ case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
+ /* The function self pointer needs special care on the
+ * x86 because it is the real entry point. */
+ {
+ lispobj fun = ((struct closure *)addr)->fun
+ - FUN_RAW_ADDR_OFFSET;
+ pscav(&fun, 1, constant);
+ ((struct closure *)addr)->fun = fun + FUN_RAW_ADDR_OFFSET;
+ }
+ count = 2;
+ break;
#endif
case WEAK_POINTER_WIDETAG:
/* Weak pointers get preserved during purify, 'cause I
- * don't feel like figuring out how to break them. */
+ * don't feel like figuring out how to break them. */
pscav(addr+1, 2, constant);
count = 4;
break;
- case FDEFN_WIDETAG:
- /* We have to handle fdefn objects specially, so we
- * can fix up the raw function address. */
- count = pscav_fdefn((struct fdefn *)addr);
- break;
-
- case INSTANCE_HEADER_WIDETAG:
- {
- struct instance *instance = (struct instance *) addr;
- struct layout *layout
- = (struct layout *) native_pointer(instance->slots[0]);
- long nuntagged = fixnum_value(layout->n_untagged_slots);
- long nslots = HeaderValue(*addr);
- pscav(addr + 1, nslots - nuntagged, constant);
- count = CEILING(1 + nslots, 2);
- }
- break;
+ case FDEFN_WIDETAG:
+ /* We have to handle fdefn objects specially, so we
+ * can fix up the raw function address. */
+ count = pscav_fdefn((struct fdefn *)addr);
+ break;
+
+ case INSTANCE_HEADER_WIDETAG:
+ {
+ struct instance *instance = (struct instance *) addr;
+ struct layout *layout
+ = (struct layout *) native_pointer(instance->slots[0]);
+ long nuntagged = fixnum_value(layout->n_untagged_slots);
+ long nslots = HeaderValue(*addr);
+ pscav(addr + 1, nslots - nuntagged, constant);
+ count = CEILING(1 + nslots, 2);
+ }
+ break;
default:
count = 1;
struct thread *thread;
if(all_threads->next) {
- /* FIXME: there should be _some_ sensible error reporting
- * convention. See following comment too */
- fprintf(stderr,"Can't purify when more than one thread exists\n");
- fflush(stderr);
- return 0;
+ /* FIXME: there should be _some_ sensible error reporting
+ * convention. See following comment too */
+ fprintf(stderr,"Can't purify when more than one thread exists\n");
+ fflush(stderr);
+ return 0;
}
#ifdef PRINTNOISE
gc_alloc_update_all_page_tables();
#endif
for_each_thread(thread)
- if (fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)) != 0) {
- /* FIXME: 1. What does this mean? 2. It shouldn't be reporting
- * its error simply by a. printing a string b. to stdout instead
- * of stderr. */
+ if (fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,thread)) != 0) {
+ /* FIXME: 1. What does this mean? 2. It shouldn't be reporting
+ * its error simply by a. printing a string b. to stdout instead
+ * of stderr. */
printf(" Ack! Can't purify interrupt contexts. ");
fflush(stdout);
return 0;
#endif
#if defined(LISP_FEATURE_GENCGC) && (defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
- /* note this expects only one thread to be active. We'd have to
- * stop all the others in the same way as GC does if we wanted
+ /* note this expects only one thread to be active. We'd have to
+ * stop all the others in the same way as GC does if we wanted
* PURIFY to work when >1 thread exists */
setup_i386_stack_scav(((&static_roots)-2),
- ((void *)all_threads->control_stack_end));
+ ((void *)all_threads->control_stack_end));
#endif
pscav(&static_roots, 1, 0);
#endif
pscav((lispobj *) all_threads->interrupt_data->interrupt_handlers,
sizeof(all_threads->interrupt_data->interrupt_handlers)
- / sizeof(lispobj),
+ / sizeof(lispobj),
0);
#ifdef PRINTNOISE
#endif
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
pscav((lispobj *)all_threads->control_stack_start,
- current_control_stack_pointer -
- all_threads->control_stack_start,
- 0);
+ current_control_stack_pointer -
+ all_threads->control_stack_start,
+ 0);
#else
#ifdef LISP_FEATURE_GENCGC
pscav_i386_stack();
#endif
#if !(defined(LISP_FEATURE_X86) || defined(LISP_FEATURE_X86_64))
pscav( (lispobj *)all_threads->binding_stack_start,
- (lispobj *)current_binding_stack_pointer -
- all_threads->binding_stack_start,
- 0);
+ (lispobj *)current_binding_stack_pointer -
+ all_threads->binding_stack_start,
+ 0);
#else
for_each_thread(thread) {
- pscav( (lispobj *)thread->binding_stack_start,
- (lispobj *)SymbolValue(BINDING_STACK_POINTER,thread) -
- (lispobj *)thread->binding_stack_start,
- 0);
- pscav( (lispobj *) (thread+1),
- fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
- (sizeof (struct thread))/(sizeof (lispobj)),
- 0);
+ pscav( (lispobj *)thread->binding_stack_start,
+ (lispobj *)SymbolValue(BINDING_STACK_POINTER,thread) -
+ (lispobj *)thread->binding_stack_start,
+ 0);
+ pscav( (lispobj *) (thread+1),
+ fixnum_value(SymbolValue(FREE_TLS_INDEX,0)) -
+ (sizeof (struct thread))/(sizeof (lispobj)),
+ 0);
}
* please submit a patch. */
#if 0
if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != UNBOUND_MARKER_WIDETAG
- && SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
+ && SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
unsigned read_only_space_size =
- (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
- (lispobj *)READ_ONLY_SPACE_START;
+ (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
+ (lispobj *)READ_ONLY_SPACE_START;
fprintf(stderr,
- "scavenging read only space: %d bytes\n",
- read_only_space_size * sizeof(lispobj));
+ "scavenging read only space: %d bytes\n",
+ read_only_space_size * sizeof(lispobj));
pscav( (lispobj *)READ_ONLY_SPACE_START, read_only_space_size, 0);
}
#endif
i++;
} else {
pscav(laters->u[i].ptr, 1, 1);
- }
+ }
}
next = laters->next;
free(laters);
#if !defined(LISP_FEATURE_X86) && !defined(LISP_FEATURE_X86_64)
os_zero((os_vm_address_t) current_control_stack_pointer,
(os_vm_size_t)
- ((all_threads->control_stack_end -
- current_control_stack_pointer) * sizeof(lispobj)));
+ ((all_threads->control_stack_end -
+ current_control_stack_pointer) * sizeof(lispobj)));
#endif
/* It helps to update the heap free pointers so that free_heap can
* example code found at
* http://www.yendor.com/programming/unix/apue/pty/main.c
--brkint
+-brkint
*/
-int set_noecho(int fd)
+int set_noecho(int fd)
{
struct termios stermios;
-
+
if (tcgetattr(fd, &stermios) < 0) return 0;
-
+
stermios.c_lflag &= ~( ECHO | /* ECHOE | ECHOK | */ ECHONL);
- stermios.c_oflag |= (ONLCR);
+ stermios.c_oflag |= (ONLCR);
stermios.c_iflag &= ~(BRKINT);
- stermios.c_iflag |= (ICANON|ICRNL);
+ stermios.c_iflag |= (ICANON|ICRNL);
stermios.c_cc[VERASE]=0177;
if (tcsetattr(fd, TCSANOW, &stermios) < 0) return 0;
}
int spawn(char *program, char *argv[], char *envp[], char *pty_name,
- int stdin, int stdout, int stderr)
+ int stdin, int stdout, int stderr)
{
int pid = fork();
int fd;
if (pid != 0)
- return pid;
+ return pid;
/* Put us in our own process group. */
#if defined(hpux)
/* If we are supposed to be part of some other pty, go for it. */
if (pty_name) {
#if !defined(hpux) && !defined(SVR4)
- fd = open("/dev/tty", O_RDWR, 0);
- if (fd >= 0) {
- ioctl(fd, TIOCNOTTY, 0);
- close(fd);
- }
+ fd = open("/dev/tty", O_RDWR, 0);
+ if (fd >= 0) {
+ ioctl(fd, TIOCNOTTY, 0);
+ close(fd);
+ }
#endif
- fd = open(pty_name, O_RDWR, 0);
- dup2(fd, 0);
- set_noecho(0);
- dup2(fd, 1);
- dup2(fd, 2);
- close(fd);
+ fd = open(pty_name, O_RDWR, 0);
+ dup2(fd, 0);
+ set_noecho(0);
+ dup2(fd, 1);
+ dup2(fd, 2);
+ close(fd);
} else{
/* Set up stdin, stdout, and stderr */
if (stdin >= 0)
- dup2(stdin, 0);
+ dup2(stdin, 0);
if (stdout >= 0)
- dup2(stdout, 1);
+ dup2(stdout, 1);
if (stderr >= 0)
- dup2(stderr, 2);
+ dup2(stderr, 2);
}
/* Close all other fds. */
#ifdef SVR4
for (fd = sysconf(_SC_OPEN_MAX)-1; fd >= 3; fd--)
- close(fd);
+ close(fd);
#else
for (fd = getdtablesize()-1; fd >= 3; fd--)
- close(fd);
+ close(fd);
#endif
/* Exec the program. */
static void
sigint_handler(int signal, siginfo_t *info, void *void_context)
{
- lose("\nSIGINT hit at 0x%08lX\n",
- (unsigned long) *os_context_pc_addr(void_context));
+ lose("\nSIGINT hit at 0x%08lX\n",
+ (unsigned long) *os_context_pc_addr(void_context));
}
/* (This is not static, because we want to be able to call it from
{
void* result = malloc(size);
if (0 == result) {
- lose("malloc failure");
+ lose("malloc failure");
} else {
- return result;
+ return result;
}
return (void *) NULL; /* dummy value: return something ... */
}
{
struct stat filename_stat;
if (stat(filename, &filename_stat)) { /* if failure */
- return 0;
+ return 0;
} else {
return copied_string(filename);
}
alloc_base_string_list(char *array_ptr[])
{
if (*array_ptr) {
- return alloc_cons(alloc_base_string(*array_ptr),
- alloc_base_string_list(1 + array_ptr));
+ return alloc_cons(alloc_base_string(*array_ptr),
+ alloc_base_string_list(1 + array_ptr));
} else {
- return NIL;
+ return NIL;
}
}
\f
/* Parse our part of the command line (aka "runtime options"),
* stripping out those options that we handle. */
{
- int argi = 1;
- while (argi < argc) {
- char *arg = argv[argi];
- if (0 == strcmp(arg, "--noinform")) {
- noinform = 1;
- ++argi;
- } else if (0 == strcmp(arg, "--core")) {
- if (core) {
- lose("more than one core file specified");
- } else {
- ++argi;
- if (argi >= argc) {
- lose("missing filename for --core argument");
- }
- core = copied_string(argv[argi]);
- ++argi;
- }
- } else if (0 == strcmp(arg, "--help")) {
- /* I think this is the (or a) usual convention: upon
- * seeing "--help" we immediately print our help
- * string and exit, ignoring everything else. */
- print_help();
- exit(0);
- } else if (0 == strcmp(arg, "--version")) {
- /* As in "--help" case, I think this is expected. */
- print_version();
- exit(0);
- } else if (0 == strcmp(arg, "--end-runtime-options")) {
- end_runtime_options = 1;
- ++argi;
- break;
- } else {
- /* This option was unrecognized as a runtime option,
- * so it must be a toplevel option or a user option,
- * so we must be past the end of the runtime option
- * section. */
- break;
- }
- }
- /* This is where we strip out those options that we handle. We
- * also take this opportunity to make sure that we don't find
- * an out-of-place "--end-runtime-options" option. */
- {
- char *argi0 = argv[argi];
- int argj = 1;
- /* (argc - argi) for the arguments, one for the binary,
- and one for the terminating NULL. */
- sbcl_argv = successful_malloc((2 + argc - argi) * sizeof(char *));
- sbcl_argv[0] = argv[0];
- while (argi < argc) {
- char *arg = argv[argi++];
- /* If we encounter --end-runtime-options for the first
- * time after the point where we had to give up on
- * runtime options, then the point where we had to
- * give up on runtime options must've been a user
- * error. */
- if (!end_runtime_options &&
- 0 == strcmp(arg, "--end-runtime-options")) {
- lose("bad runtime option \"%s\"", argi0);
- }
- sbcl_argv[argj++] = arg;
- }
- sbcl_argv[argj] = 0;
- }
+ int argi = 1;
+ while (argi < argc) {
+ char *arg = argv[argi];
+ if (0 == strcmp(arg, "--noinform")) {
+ noinform = 1;
+ ++argi;
+ } else if (0 == strcmp(arg, "--core")) {
+ if (core) {
+ lose("more than one core file specified");
+ } else {
+ ++argi;
+ if (argi >= argc) {
+ lose("missing filename for --core argument");
+ }
+ core = copied_string(argv[argi]);
+ ++argi;
+ }
+ } else if (0 == strcmp(arg, "--help")) {
+ /* I think this is the (or a) usual convention: upon
+ * seeing "--help" we immediately print our help
+ * string and exit, ignoring everything else. */
+ print_help();
+ exit(0);
+ } else if (0 == strcmp(arg, "--version")) {
+ /* As in "--help" case, I think this is expected. */
+ print_version();
+ exit(0);
+ } else if (0 == strcmp(arg, "--end-runtime-options")) {
+ end_runtime_options = 1;
+ ++argi;
+ break;
+ } else {
+ /* This option was unrecognized as a runtime option,
+ * so it must be a toplevel option or a user option,
+ * so we must be past the end of the runtime option
+ * section. */
+ break;
+ }
+ }
+ /* This is where we strip out those options that we handle. We
+ * also take this opportunity to make sure that we don't find
+ * an out-of-place "--end-runtime-options" option. */
+ {
+ char *argi0 = argv[argi];
+ int argj = 1;
+ /* (argc - argi) for the arguments, one for the binary,
+ and one for the terminating NULL. */
+ sbcl_argv = successful_malloc((2 + argc - argi) * sizeof(char *));
+ sbcl_argv[0] = argv[0];
+ while (argi < argc) {
+ char *arg = argv[argi++];
+ /* If we encounter --end-runtime-options for the first
+ * time after the point where we had to give up on
+ * runtime options, then the point where we had to
+ * give up on runtime options must've been a user
+ * error. */
+ if (!end_runtime_options &&
+ 0 == strcmp(arg, "--end-runtime-options")) {
+ lose("bad runtime option \"%s\"", argi0);
+ }
+ sbcl_argv[argj++] = arg;
+ }
+ sbcl_argv[argj] = 0;
+ }
}
/* If no core file was specified, look for one. */
if (!core) {
- char *sbcl_home = getenv("SBCL_HOME");
- char *lookhere;
- char *stem = "/sbcl.core";
- if(!sbcl_home) sbcl_home = SBCL_HOME;
- lookhere = (char *) calloc(strlen(sbcl_home) +
- strlen(stem) +
- 1,
- sizeof(char));
- sprintf(lookhere, "%s%s", sbcl_home, stem);
- core = copied_existing_filename_or_null(lookhere);
- free(lookhere);
- if (!core) {
- lose("can't find core file");
- }
+ char *sbcl_home = getenv("SBCL_HOME");
+ char *lookhere;
+ char *stem = "/sbcl.core";
+ if(!sbcl_home) sbcl_home = SBCL_HOME;
+ lookhere = (char *) calloc(strlen(sbcl_home) +
+ strlen(stem) +
+ 1,
+ sizeof(char));
+ sprintf(lookhere, "%s%s", sbcl_home, stem);
+ core = copied_existing_filename_or_null(lookhere);
+ free(lookhere);
+ if (!core) {
+ lose("can't find core file");
+ }
}
/* Make sure that SBCL_HOME is set, no matter where the core was
* found */
if (!getenv("SBCL_HOME")) {
- char *envstring, *copied_core, *dir;
- char *stem = "SBCL_HOME=";
- copied_core = copied_string(core);
- dir = dirname(copied_core);
- envstring = (char *) calloc(strlen(stem) +
- strlen(dir) +
- 1,
- sizeof(char));
- sprintf(envstring, "%s%s", stem, dir);
- putenv(envstring);
- free(copied_core);
+ char *envstring, *copied_core, *dir;
+ char *stem = "SBCL_HOME=";
+ copied_core = copied_string(core);
+ dir = dirname(copied_core);
+ envstring = (char *) calloc(strlen(stem) +
+ strlen(dir) +
+ 1,
+ sizeof(char));
+ sprintf(envstring, "%s%s", stem, dir);
+ putenv(envstring);
+ free(copied_core);
}
-
+
if (!noinform) {
- print_banner();
- fflush(stdout);
+ print_banner();
+ fflush(stdout);
}
#if defined(SVR4) || defined(__linux__)
initial_function = load_core_file(core);
if (initial_function == NIL) {
- lose("couldn't find initial function");
+ lose("couldn't find initial function");
}
SHOW("freeing core");
free(core);
/* KLUDGE: As far as I can tell there's no ANSI C way of saying
* "this function never returns". This is the way that you do it
- * in GCC later than version 2.7 or so. If you are using some
+ * in GCC later than version 2.7 or so. If you are using some
* compiler that doesn't understand this, you could could just
* change it to "typedef void never_returns" and nothing would
* break, though you might get a few more bytes of compiled code or
#include "genesis/symbol.h"
static void
-write_lispobj(lispobj obj, FILE *file)
+write_lispobj(lispobj obj, FILE *file)
{
fwrite(&obj, sizeof(lispobj), 1, file);
}
* being SAVE-LISP-AND-DIE instead of SAVE-LISP-AND-GO-ON). */
printf("[undoing binding stack and other enclosing state... ");
fflush(stdout);
- for_each_thread(th) { /* XXX really? */
- unbind_to_here((lispobj *)th->binding_stack_start,th);
- SetSymbolValue(CURRENT_CATCH_BLOCK, 0,th);
- SetSymbolValue(CURRENT_UNWIND_PROTECT_BLOCK, 0,th);
+ for_each_thread(th) { /* XXX really? */
+ unbind_to_here((lispobj *)th->binding_stack_start,th);
+ SetSymbolValue(CURRENT_CATCH_BLOCK, 0,th);
+ SetSymbolValue(CURRENT_UNWIND_PROTECT_BLOCK, 0,th);
}
printf("done]\n");
fflush(stdout);
-
+
/* (Now we can actually start copying ourselves into the output file.) */
printf("[saving current Lisp image into %s:\n", filename);
write_lispobj(BUILD_ID_CORE_ENTRY_TYPE_CODE, file);
write_lispobj(/* (We're writing the word count of the entry here, and the 2
- * term is one word for the leading BUILD_ID_CORE_ENTRY_TYPE_CODE
- * word and one word where we store the count itself.) */
- 2 + strlen(build_id),
- file);
+ * term is one word for the leading BUILD_ID_CORE_ENTRY_TYPE_CODE
+ * word and one word where we store the count itself.) */
+ 2 + strlen(build_id),
+ file);
{
- char *p;
- for (p = build_id; *p; ++p)
- write_lispobj(*p, file);
+ char *p;
+ for (p = build_id; *p; ++p)
+ write_lispobj(*p, file);
}
write_lispobj(NEW_DIRECTORY_CORE_ENTRY_TYPE_CODE, file);
write_lispobj(/* (word count = 3 spaces described by 5 words each, plus the
- * entry type code, plus this count itself) */
- (5*3)+2, file);
+ * entry type code, plus this count itself) */
+ (5*3)+2, file);
output_space(file,
- READ_ONLY_CORE_SPACE_ID,
- (lispobj *)READ_ONLY_SPACE_START,
- (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
+ READ_ONLY_CORE_SPACE_ID,
+ (lispobj *)READ_ONLY_SPACE_START,
+ (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER,0));
output_space(file,
- STATIC_CORE_SPACE_ID,
- (lispobj *)STATIC_SPACE_START,
- (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0));
+ STATIC_CORE_SPACE_ID,
+ (lispobj *)STATIC_SPACE_START,
+ (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER,0));
#ifdef reg_ALLOC
output_space(file,
- DYNAMIC_CORE_SPACE_ID,
- (lispobj *)current_dynamic_space,
- dynamic_space_free_pointer);
+ DYNAMIC_CORE_SPACE_ID,
+ (lispobj *)current_dynamic_space,
+ dynamic_space_free_pointer);
#else
#ifdef LISP_FEATURE_GENCGC
/* Flush the current_region, updating the tables. */
update_x86_dynamic_space_free_pointer();
#endif
output_space(file,
- DYNAMIC_CORE_SPACE_ID,
- (lispobj *)DYNAMIC_SPACE_START,
- (lispobj *)SymbolValue(ALLOCATION_POINTER,0));
+ DYNAMIC_CORE_SPACE_ID,
+ (lispobj *)DYNAMIC_SPACE_START,
+ (lispobj *)SymbolValue(ALLOCATION_POINTER,0));
#endif
write_lispobj(INITIAL_FUN_CORE_ENTRY_TYPE_CODE, file);
lispobj obj, *addr;
while ((*count == -1 || (*count > 0)) &&
- is_valid_lisp_addr((os_vm_address_t)*start)) {
+ is_valid_lisp_addr((os_vm_address_t)*start)) {
obj = **start;
addr = *start;
if (*count != -1)
while (search_for_type(SYMBOL_HEADER_WIDETAG, start, count)) {
symbol = (struct symbol *)native_pointer((lispobj)*start);
- if (lowtag_of(symbol->name) == OTHER_POINTER_LOWTAG) {
+ if (lowtag_of(symbol->name) == OTHER_POINTER_LOWTAG) {
symbol_name = (struct vector *)native_pointer(symbol->name);
if (is_valid_lisp_addr((os_vm_address_t)symbol_name) &&
- /* FIXME: Broken with more than one type of string
- (i.e. even broken given (VECTOR NIL) */
- widetag_of(symbol_name->header) == SIMPLE_BASE_STRING_WIDETAG &&
- strcmp((char *)symbol_name->data, name) == 0)
+ /* FIXME: Broken with more than one type of string
+ (i.e. even broken given (VECTOR NIL) */
+ widetag_of(symbol_name->header) == SIMPLE_BASE_STRING_WIDETAG &&
+ strcmp((char *)symbol_name->data, name) == 0)
return 1;
- }
+ }
(*start) += 2;
}
return 0;
{
unsigned long badinst;
unsigned long *pc;
- int rs1;
+ int rs1;
pc = (unsigned long *)(*os_context_pc_addr(context));
/* Unaligned */
return NULL;
}
- if ((pc < READ_ONLY_SPACE_START ||
- pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
- (pc < current_dynamic_space ||
+ if ((pc < READ_ONLY_SPACE_START ||
+ pc >= READ_ONLY_SPACE_START+READ_ONLY_SPACE_SIZE) &&
+ (pc < current_dynamic_space ||
pc >= current_dynamic_space + DYNAMIC_SPACE_SIZE)) {
return NULL;
}
badinst = *pc;
if ((badinst >> 30) != 3)
- /* All load/store instructions have op = 11 (binary) */
- return 0;
+ /* All load/store instructions have op = 11 (binary) */
+ return 0;
rs1 = (badinst>>14)&0x1f;
-
+
if (badinst & (1<<13)) {
- /* r[rs1] + simm(13) */
- int simm13 = badinst & 0x1fff;
+ /* r[rs1] + simm(13) */
+ int simm13 = badinst & 0x1fff;
- if (simm13 & (1<<12))
- simm13 |= -1<<13;
+ if (simm13 & (1<<12))
+ simm13 |= -1<<13;
- return (os_vm_address_t)
- (*os_context_register_addr(context, rs1)+simm13);
+ return (os_vm_address_t)
+ (*os_context_register_addr(context, rs1)+simm13);
}
else {
- /* r[rs1] + r[rs2] */
- int rs2 = badinst & 0x1f;
+ /* r[rs1] + r[rs2] */
+ int rs2 = badinst & 0x1f;
- return (os_vm_address_t)
- (*os_context_register_addr(context, rs1) +
- *os_context_register_addr(context, rs2));
+ return (os_vm_address_t)
+ (*os_context_register_addr(context, rs1) +
+ *os_context_register_addr(context, rs2));
}
}
unsigned long *ptr = (unsigned long *)pc;
unsigned long result = *ptr;
*ptr = trap_Breakpoint;
-
+
os_flush_icache((os_vm_address_t) pc, sizeof(unsigned long));
-
+
return result;
}
displaced_after_inst = *npc;
*npc = trap_AfterBreakpoint;
os_flush_icache((os_vm_address_t) npc, sizeof(unsigned long));
-
+
}
static int pseudo_atomic_trap_p(os_context_t *context)
unsigned int* pc;
unsigned int badinst;
int result;
-
-
+
+
pc = (unsigned int*) *os_context_pc_addr(context);
badinst = *pc;
result = 0;
-
+
/* Check to see if the current instruction is a pseudo-atomic-trap */
if (((badinst >> 30) == 2) && (((badinst >> 19) & 0x3f) == 0x3a)
- && (((badinst >> 13) & 1) == 1) && ((badinst & 0x7f) == PSEUDO_ATOMIC_TRAP))
- {
- unsigned int previnst;
- previnst = pc[-1];
- /*
- * Check to see if the previous instruction was an andcc alloc-tn,
- * 3, zero-tn instruction.
- */
- if (((previnst >> 30) == 2) && (((previnst >> 19) & 0x3f) == 0x11)
- && (((previnst >> 14) & 0x1f) == reg_ALLOC)
- && (((previnst >> 25) & 0x1f) == reg_ZERO)
- && (((previnst >> 13) & 1) == 1)
- && ((previnst & 0x1fff) == 3))
- {
- result = 1;
- }
- else
- {
- fprintf(stderr, "Oops! Got a PSEUDO-ATOMIC-TRAP without a preceeding andcc!\n");
- }
- }
+ && (((badinst >> 13) & 1) == 1) && ((badinst & 0x7f) == PSEUDO_ATOMIC_TRAP))
+ {
+ unsigned int previnst;
+ previnst = pc[-1];
+ /*
+ * Check to see if the previous instruction was an andcc alloc-tn,
+ * 3, zero-tn instruction.
+ */
+ if (((previnst >> 30) == 2) && (((previnst >> 19) & 0x3f) == 0x11)
+ && (((previnst >> 14) & 0x1f) == reg_ALLOC)
+ && (((previnst >> 25) & 0x1f) == reg_ZERO)
+ && (((previnst >> 13) & 1) == 1)
+ && ((previnst & 0x1fff) == 3))
+ {
+ result = 1;
+ }
+ else
+ {
+ fprintf(stderr, "Oops! Got a PSEUDO-ATOMIC-TRAP without a preceeding andcc!\n");
+ }
+ }
return result;
}
/* FIXME: Check that this is necessary -- CSR, 2002-07-15 */
os_restore_fp_control(context);
#endif
-
+
if ((siginfo->si_code) == ILL_ILLOPC
#ifdef LISP_FEATURE_LINUX
- || (linux_sparc_siginfo_bug && (siginfo->si_code == 2))
+ || (linux_sparc_siginfo_bug && (siginfo->si_code == 2))
#endif
- ) {
- int trap;
- unsigned int inst;
- unsigned int* pc = (unsigned int*) siginfo->si_addr;
-
- inst = *pc;
- trap = inst & 0x3fffff;
-
- switch (trap) {
- case trap_PendingInterrupt:
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
-
- case trap_Halt:
- fake_foreign_function_call(context);
- lose("%%primitive halt called; the party is over.\n");
-
- case trap_Error:
- case trap_Cerror:
- interrupt_internal_error(signal, siginfo, context, trap == trap_Cerror);
- break;
-
- case trap_Breakpoint:
- handle_breakpoint(signal, siginfo, context);
- break;
-
- case trap_FunEndBreakpoint:
- *os_context_pc_addr(context) = (int) handle_fun_end_breakpoint(signal, siginfo, context);
- *os_context_npc_addr(context) = *os_context_pc_addr(context) + 4;
- break;
-
- case trap_AfterBreakpoint:
- *skipped_break_addr = trap_Breakpoint;
- skipped_break_addr = NULL;
- *(unsigned long *) os_context_pc_addr(context) = displaced_after_inst;
- /* context->sigmask = orig_sigmask; */
- os_flush_icache((os_vm_address_t) os_context_pc_addr(context), sizeof(unsigned long));
- break;
-
- default:
- interrupt_handle_now(signal, siginfo, context);
- break;
- }
+ ) {
+ int trap;
+ unsigned int inst;
+ unsigned int* pc = (unsigned int*) siginfo->si_addr;
+
+ inst = *pc;
+ trap = inst & 0x3fffff;
+
+ switch (trap) {
+ case trap_PendingInterrupt:
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
+
+ case trap_Halt:
+ fake_foreign_function_call(context);
+ lose("%%primitive halt called; the party is over.\n");
+
+ case trap_Error:
+ case trap_Cerror:
+ interrupt_internal_error(signal, siginfo, context, trap == trap_Cerror);
+ break;
+
+ case trap_Breakpoint:
+ handle_breakpoint(signal, siginfo, context);
+ break;
+
+ case trap_FunEndBreakpoint:
+ *os_context_pc_addr(context) = (int) handle_fun_end_breakpoint(signal, siginfo, context);
+ *os_context_npc_addr(context) = *os_context_pc_addr(context) + 4;
+ break;
+
+ case trap_AfterBreakpoint:
+ *skipped_break_addr = trap_Breakpoint;
+ skipped_break_addr = NULL;
+ *(unsigned long *) os_context_pc_addr(context) = displaced_after_inst;
+ /* context->sigmask = orig_sigmask; */
+ os_flush_icache((os_vm_address_t) os_context_pc_addr(context), sizeof(unsigned long));
+ break;
+
+ default:
+ interrupt_handle_now(signal, siginfo, context);
+ break;
+ }
}
else if ((siginfo->si_code) == ILL_ILLTRP
#ifdef LISP_FEATURE_LINUX
- || (linux_sparc_siginfo_bug && (siginfo->si_code) == 192)
+ || (linux_sparc_siginfo_bug && (siginfo->si_code) == 192)
#endif
- ) {
- if (pseudo_atomic_trap_p(context)) {
- /* A trap instruction from a pseudo-atomic. We just need
- to fixup up alloc-tn to remove the interrupted flag,
- skip over the trap instruction, and then handle the
- pending interrupt(s). */
- *os_context_register_addr(context, reg_ALLOC) &= ~7;
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- }
- else {
- interrupt_internal_error(signal, siginfo, context, 0);
- }
+ ) {
+ if (pseudo_atomic_trap_p(context)) {
+ /* A trap instruction from a pseudo-atomic. We just need
+ to fixup up alloc-tn to remove the interrupted flag,
+ skip over the trap instruction, and then handle the
+ pending interrupt(s). */
+ *os_context_register_addr(context, reg_ALLOC) &= ~7;
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ }
+ else {
+ interrupt_internal_error(signal, siginfo, context, 0);
+ }
}
else {
- interrupt_handle_now(signal, siginfo, context);
+ interrupt_handle_now(signal, siginfo, context);
}
}
#ifdef LISP_FEATURE_LINUX
os_restore_fp_control(context);
#endif
-
+
badinst = *(unsigned long *)os_context_pc_addr(context);
if ((badinst >> 30) != 2 || ((badinst >> 20) & 0x1f) != 0x11) {
- /* It wasn't a tagged add. Pass the signal into lisp. */
- interrupt_handle_now(signal, siginfo, context);
- return;
+ /* It wasn't a tagged add. Pass the signal into lisp. */
+ interrupt_handle_now(signal, siginfo, context);
+ return;
}
-
+
fprintf(stderr, "SIGEMT trap handler with tagged op instruction!\n");
-
+
/* Extract the parts of the inst. */
subtract = badinst & (1<<19);
rs1 = (badinst>>14) & 0x1f;
op1 = *os_context_register_addr(context, rs1);
-
+
/* If the first arg is $ALLOC then it is really a signal-pending note */
/* for the pseudo-atomic noise. */
if (rs1 == reg_ALLOC) {
- /* Perform the op anyway. */
- op2 = badinst & 0x1fff;
- if (op2 & (1<<12))
- op2 |= -1<<13;
- if (subtract)
- result = op1 - op2;
- else
- result = op1 + op2;
- *os_context_register_addr(context, reg_ALLOC) = result & ~7;
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- return;
+ /* Perform the op anyway. */
+ op2 = badinst & 0x1fff;
+ if (op2 & (1<<12))
+ op2 |= -1<<13;
+ if (subtract)
+ result = op1 - op2;
+ else
+ result = op1 + op2;
+ *os_context_register_addr(context, reg_ALLOC) = result & ~7;
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ return;
}
-
+
if ((op1 & 3) != 0) {
- /* The first arg wan't a fixnum. */
- interrupt_internal_error(signal, siginfo, context, 0);
- return;
+ /* The first arg wan't a fixnum. */
+ interrupt_internal_error(signal, siginfo, context, 0);
+ return;
}
-
+
if (immed = badinst & (1<<13)) {
- op2 = badinst & 0x1fff;
- if (op2 & (1<<12))
- op2 |= -1<<13;
+ op2 = badinst & 0x1fff;
+ if (op2 & (1<<12))
+ op2 |= -1<<13;
}
else {
- rs2 = badinst & 0x1f;
- op2 = *os_context_register_addr(context, rs2);
+ rs2 = badinst & 0x1f;
+ op2 = *os_context_register_addr(context, rs2);
}
-
+
if ((op2 & 3) != 0) {
- /* The second arg wan't a fixnum. */
- interrupt_internal_error(signal, siginfo, context, 0);
- return;
+ /* The second arg wan't a fixnum. */
+ interrupt_internal_error(signal, siginfo, context, 0);
+ return;
}
-
+
rd = (badinst>>25) & 0x1f;
if (rd != 0) {
- /* Don't bother computing the result unless we are going to use it. */
- if (subtract)
- result = (op1>>2) - (op2>>2);
- else
- result = (op1>>2) + (op2>>2);
-
- dynamic_space_free_pointer =
- (lispobj *) *os_context_register_addr(context, reg_ALLOC);
-
- *os_context_register_addr(context, rd) = alloc_number(result);
-
- *os_context_register_addr(context, reg_ALLOC) =
- (unsigned long) dynamic_space_free_pointer;
+ /* Don't bother computing the result unless we are going to use it. */
+ if (subtract)
+ result = (op1>>2) - (op2>>2);
+ else
+ result = (op1>>2) + (op2>>2);
+
+ dynamic_space_free_pointer =
+ (lispobj *) *os_context_register_addr(context, reg_ALLOC);
+
+ *os_context_register_addr(context, rd) = alloc_number(result);
+
+ *os_context_register_addr(context, reg_ALLOC) =
+ (unsigned long) dynamic_space_free_pointer;
}
-
+
arch_skip_instruction(context);
}
* jmp %temp_reg + %lo(addr), %addr_reg
* nop
* nop
- *
+ *
*/
int* inst_ptr;
unsigned long hi; /* Top 22 bits of address */
/*
* sethi %hi(addr), temp_reg
*/
-
+
inst = (0 << 30) | (LINKAGE_TEMP_REG << 25) | (4 << 22) | hi;
*inst_ptr++ = inst;
/* nop (really sethi 0, %g0) */
inst = (0 << 30) | (0 << 25) | (4 << 22) | 0;
-
+
*inst_ptr++ = inst;
*inst_ptr++ = inst;
-
+
os_flush_icache((os_vm_address_t) reloc_addr, (char*) inst_ptr - (char*) reloc_addr);
}
-void
+void
arch_write_linkage_table_ref(void * reloc_addr, void *target_addr)
{
*(unsigned long *)reloc_addr = (unsigned long)target_addr;
#ifndef _SPARC_ARCH_H
#define _SPARC_ARCH_H
-static inline void
+static inline void
get_spinlock(lispobj *word,long value)
{
- *word=value; /* FIXME for threads */
+ *word=value; /* FIXME for threads */
}
static inline void
#error "Define threading support functions"
#else
int arch_os_thread_init(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
int arch_os_thread_cleanup(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
#endif
os_context_register_addr(os_context_t *context, int offset)
{
if (offset == 0) {
- static int zero;
- zero = 0;
- return &zero;
+ static int zero;
+ zero = 0;
+ return &zero;
} else if (offset < 16) {
- return &context->si_regs.u_regs[offset];
+ return &context->si_regs.u_regs[offset];
} else if (offset < 32) {
- int *sp = (int*) context->si_regs.u_regs[14]; /* Stack Pointer */
- return &(sp[offset-16]);
+ int *sp = (int*) context->si_regs.u_regs[14]; /* Stack Pointer */
+ return &(sp[offset-16]);
} else
- return 0;
+ return 0;
}
os_context_register_t *
return &(context->si_mask);
}
-void
+void
os_restore_fp_control(os_context_t *context)
{
/* Included here, for reference, is an attempt at the PPC
Error on floating point exceptions, something like this would
have to be done. -- CSR, 2002-07-13
- asm ("msfsf $255, %0" : : "m"
- (os_context_fp_control(context) &
- ~ (FLOAT_STICKY_BITS_MASK | FLOAT_EXCEPTIONS_BYTE_MASK)));
+ asm ("msfsf $255, %0" : : "m"
+ (os_context_fp_control(context) &
+ ~ (FLOAT_STICKY_BITS_MASK | FLOAT_EXCEPTIONS_BYTE_MASK)));
*/
}
-void
+void
os_flush_icache(os_vm_address_t address, os_vm_size_t length)
{
/* This is the same for linux and solaris, so see sparc-assem.S */
#endif
-#define reg_ZERO GREG(0)
-#define reg_ALLOC GREG(1)
-#define reg_NIL GREG(2)
-#define reg_CSP GREG(3)
-#define reg_CFP GREG(4)
-#define reg_BSP GREG(5)
+#define reg_ZERO GREG(0)
+#define reg_ALLOC GREG(1)
+#define reg_NIL GREG(2)
+#define reg_CSP GREG(3)
+#define reg_CFP GREG(4)
+#define reg_BSP GREG(5)
/* %g6 and %g7 are supposed to be reserved for the system */
-#define reg_NL0 OREG(0)
-#define reg_NL1 OREG(1)
-#define reg_NL2 OREG(2)
-#define reg_NL3 OREG(3)
-#define reg_NL4 OREG(4)
-#define reg_NL5 OREG(5)
-#define reg_NSP OREG(6)
-#define reg_NARGS OREG(7)
+#define reg_NL0 OREG(0)
+#define reg_NL1 OREG(1)
+#define reg_NL2 OREG(2)
+#define reg_NL3 OREG(3)
+#define reg_NL4 OREG(4)
+#define reg_NL5 OREG(5)
+#define reg_NSP OREG(6)
+#define reg_NARGS OREG(7)
-#define reg_A0 LREG(0)
-#define reg_A1 LREG(1)
-#define reg_A2 LREG(2)
-#define reg_A3 LREG(3)
-#define reg_A4 LREG(4)
-#define reg_A5 LREG(5)
-#define reg_OCFP LREG(6)
-#define reg_LRA LREG(7)
+#define reg_A0 LREG(0)
+#define reg_A1 LREG(1)
+#define reg_A2 LREG(2)
+#define reg_A3 LREG(3)
+#define reg_A4 LREG(4)
+#define reg_A5 LREG(5)
+#define reg_OCFP LREG(6)
+#define reg_LRA LREG(7)
-#define reg_FDEFN IREG(0)
-#define reg_LEXENV IREG(1)
-#define reg_L0 IREG(2)
-#define reg_NFP IREG(3)
-#define reg_CFUNC IREG(4)
-#define reg_CODE IREG(5)
-#define reg_LIP IREG(7)
+#define reg_FDEFN IREG(0)
+#define reg_LEXENV IREG(1)
+#define reg_L0 IREG(2)
+#define reg_NFP IREG(3)
+#define reg_CFUNC IREG(4)
+#define reg_CODE IREG(5)
+#define reg_LIP IREG(7)
#define REGNAMES \
- "ZERO", "ALLOC", "NULL", "CSP", \
- "CFP", "BSP", "%g6", "%g7", \
- "NL0", "NL1", "NL2", "NL3", \
- "NL4", "NL5", "NSP", "NARGS", \
- "A0", "A1", "A2", "A3", \
- "A4", "A5", "OCFP", "LRA", \
- "FDEFN", "LEXENV", "L0", "NFP", \
- "CFUNC", "CODE", "???", "LIP"
+ "ZERO", "ALLOC", "NULL", "CSP", \
+ "CFP", "BSP", "%g6", "%g7", \
+ "NL0", "NL1", "NL2", "NL3", \
+ "NL4", "NL5", "NSP", "NARGS", \
+ "A0", "A1", "A2", "A3", \
+ "A4", "A5", "OCFP", "LRA", \
+ "FDEFN", "LEXENV", "L0", "NFP", \
+ "CFUNC", "CODE", "???", "LIP"
#define BOXED_REGISTERS { \
reg_A0, reg_A1, reg_A2, reg_A3, reg_A4, reg_A5, reg_FDEFN, reg_LEXENV, \
#error "Define threading support functions"
#else
int arch_os_thread_init(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
int arch_os_thread_cleanup(struct thread *thread) {
- return 1; /* success */
+ return 1; /* success */
}
#endif
os_context_register_addr(os_context_t *context, int offset)
{
if (offset == 0) {
- static int zero;
- zero = 0;
- return &zero;
+ static int zero;
+ zero = 0;
+ return &zero;
} else if (offset < 16) {
- return &context->uc_mcontext.gregs[offset+3];
+ return &context->uc_mcontext.gregs[offset+3];
} else if (offset < 32) {
- /* FIXME: You know, this (int *) stuff looks decidedly
- dubious */
- int *sp = (int*) context->uc_mcontext.gregs[REG_SP];
- return &(sp[offset-16]);
+ /* FIXME: You know, this (int *) stuff looks decidedly
+ dubious */
+ int *sp = (int*) context->uc_mcontext.gregs[REG_SP];
+ return &(sp[offset-16]);
} else {
- return 0;
+ return 0;
}
}
running on 5.8 to use MAP_ANON, but because of C's lack of
introspection at runtime, we can't grab the right value because
it's stuffed in a header file somewhere. We can, however, hardcode
- it, and test at runtime for whether to use it... -- CSR, 2002-05-06
+ it, and test at runtime for whether to use it... -- CSR, 2002-05-06
And, in fact, it sucks slightly more, as if you don't use MAP_ANON
you need to have /dev/zero open and pass the file descriptor to
struct utsname name;
int major_version;
int minor_version;
-
+
uname(&name);
major_version = atoi(name.release);
if (major_version != 5) {
- lose("sunos major version=%d (which isn't 5!)", major_version);
+ lose("sunos major version=%d (which isn't 5!)", major_version);
}
minor_version = atoi(name.release+2);
- if ((minor_version == 8) ||
- (minor_version == 9) ||
- (minor_version == 10)) {
- KLUDGE_MAYBE_MAP_ANON = 0x100;
+ if ((minor_version == 8) ||
+ (minor_version == 9) ||
+ (minor_version == 10)) {
+ KLUDGE_MAYBE_MAP_ANON = 0x100;
} else if (minor_version > 10) {
- FSHOW((stderr, "os_init: Solaris version greater than 9?\nUnknown MAP_ANON behaviour.\n"));
- lose("Unknown mmap() interaction with MAP_ANON");
+ FSHOW((stderr, "os_init: Solaris version greater than 9?\nUnknown MAP_ANON behaviour.\n"));
+ lose("Unknown mmap() interaction with MAP_ANON");
} else { /* minor_version < 8 */
- kludge_mmap_fd = open("/dev/zero",O_RDONLY);
- if (kludge_mmap_fd < 0) {
- perror("open");
- lose("Error in open(..)");
- }
+ kludge_mmap_fd = open("/dev/zero",O_RDONLY);
+ if (kludge_mmap_fd < 0) {
+ perror("open");
+ lose("Error in open(..)");
+ }
}
/* I do not understand this at all. FIXME. */
os_vm_page_size = os_real_page_size = sysconf(_SC_PAGESIZE);
if(os_vm_page_size>OS_VM_DEFAULT_PAGESIZE){
- fprintf(stderr,"os_init: Pagesize too large (%d > %d)\n",
- os_vm_page_size,OS_VM_DEFAULT_PAGESIZE);
- exit(1);
+ fprintf(stderr,"os_init: Pagesize too large (%d > %d)\n",
+ os_vm_page_size,OS_VM_DEFAULT_PAGESIZE);
+ exit(1);
} else {
- /*
- * we do this because there are apparently dependencies on
- * the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
- * but since the OS doesn't know we're using this restriction,
- * we have to grovel around a bit to enforce it, thus anything
- * that uses real_page_size_difference.
- */
- /* FIXME: Is this still true? */
- real_page_size_difference=OS_VM_DEFAULT_PAGESIZE-os_vm_page_size;
- os_vm_page_size=OS_VM_DEFAULT_PAGESIZE;
+ /*
+ * we do this because there are apparently dependencies on
+ * the pagesize being OS_VM_DEFAULT_PAGESIZE somewhere...
+ * but since the OS doesn't know we're using this restriction,
+ * we have to grovel around a bit to enforce it, thus anything
+ * that uses real_page_size_difference.
+ */
+ /* FIXME: Is this still true? */
+ real_page_size_difference=OS_VM_DEFAULT_PAGESIZE-os_vm_page_size;
+ os_vm_page_size=OS_VM_DEFAULT_PAGESIZE;
}
}
os_vm_address_t os_validate(os_vm_address_t addr, os_vm_size_t len)
{
int flags = MAP_PRIVATE | MAP_NORESERVE | KLUDGE_MAYBE_MAP_ANON;
- if (addr)
- flags |= MAP_FIXED;
+ if (addr)
+ flags |= MAP_FIXED;
- addr = mmap(addr, len,
- OS_VM_PROT_ALL,
- flags,
- kludge_mmap_fd, 0);
+ addr = mmap(addr, len,
+ OS_VM_PROT_ALL,
+ flags,
+ kludge_mmap_fd, 0);
if (addr == MAP_FAILED) {
- perror("mmap");
- lose ("Error in mmap(..)");
+ perror("mmap");
+ lose ("Error in mmap(..)");
}
-
+
return addr;
}
void os_invalidate(os_vm_address_t addr, os_vm_size_t len)
{
if(munmap((void*) addr, len) == -1)
- perror("munmap");
+ perror("munmap");
}
\f
-os_vm_address_t
+os_vm_address_t
os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
{
addr = mmap(addr, len,
- OS_VM_PROT_ALL,
- MAP_PRIVATE | MAP_FIXED,
- fd, (off_t) offset);
+ OS_VM_PROT_ALL,
+ MAP_PRIVATE | MAP_FIXED,
+ fd, (off_t) offset);
if (addr == MAP_FAILED) {
- perror("mmap");
- lose("Unexpedted mmap(..) failure");
+ perror("mmap");
+ lose("Unexpedted mmap(..) failure");
}
-
+
return addr;
}
os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
{
if(mprotect((void*)address, length, prot) == -1) {
- perror("mprotect");
+ perror("mprotect");
}
}
boolean is_valid_lisp_addr(os_vm_address_t addr)
{
/* Old CMUCL comment:
-
+
Just assume address is valid if it lies within one of the known
spaces. (Unlike sunos-os which keeps track of every valid page.) */
-
+
/* FIXME: this looks like a valid definition for all targets with
cheney-gc; it may not be impressively smart (witness the
comment above) but maybe associating these functions with the
in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) ||
in_range_p(addr, DYNAMIC_0_SPACE_START, DYNAMIC_SPACE_SIZE) ||
in_range_p(addr, DYNAMIC_1_SPACE_START, DYNAMIC_SPACE_SIZE))
- return 1;
+ return 1;
for_each_thread(th) {
- if((th->control_stack_start <= addr) && (addr < th->control_stack_end))
- return 1;
- if(in_range_p(addr, th->binding_stack_start, BINDING_STACK_SIZE))
- return 1;
+ if((th->control_stack_start <= addr) && (addr < th->control_stack_end))
+ return 1;
+ if(in_range_p(addr, th->binding_stack_start, BINDING_STACK_SIZE))
+ return 1;
}
return 0;
}
addr = arch_get_bad_addr(signal, info, context);
if(!interrupt_maybe_gc(signal, info, context)) {
- if(!handle_guard_page_triggered(context,addr))
- interrupt_handle_now(signal, info, context);
+ if(!handle_guard_page_triggered(context,addr))
+ interrupt_handle_now(signal, info, context);
}
}
os_install_interrupt_handlers()
{
undoably_install_low_level_interrupt_handler(SIG_MEMORY_FAULT,
- sigsegv_handler);
+ sigsegv_handler);
}
#include "sbcl.h"
#include "runtime.h"
-#include "validate.h" /* for CONTROL_STACK_SIZE etc */
+#include "validate.h" /* for CONTROL_STACK_SIZE etc */
#include "alloc.h"
#include "thread.h"
#include "arch.h"
#define ALIEN_STACK_SIZE (1*1024*1024) /* 1Mb size chosen at random */
-int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
+int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
struct thread *all_threads;
volatile lispobj all_threads_lock;
extern struct interrupt_data * global_interrupt_data;
struct thread * create_thread_struct(lispobj initial_function) {
union per_thread_data *per_thread;
- struct thread *th=0; /* subdue gcc */
+ struct thread *th=0; /* subdue gcc */
void *spaces=0;
/* may as well allocate all the spaces at once: it saves us from
* having to decide what to do if only some of the allocations
* succeed */
spaces=os_validate(0,
- THREAD_CONTROL_STACK_SIZE+
- BINDING_STACK_SIZE+
- ALIEN_STACK_SIZE+
- dynamic_values_bytes+
- 32*SIGSTKSZ);
+ THREAD_CONTROL_STACK_SIZE+
+ BINDING_STACK_SIZE+
+ ALIEN_STACK_SIZE+
+ dynamic_values_bytes+
+ 32*SIGSTKSZ);
if(!spaces)
- return NULL;
+ return NULL;
per_thread=(union per_thread_data *)
- (spaces+
- THREAD_CONTROL_STACK_SIZE+
- BINDING_STACK_SIZE+
- ALIEN_STACK_SIZE);
+ (spaces+
+ THREAD_CONTROL_STACK_SIZE+
+ BINDING_STACK_SIZE+
+ ALIEN_STACK_SIZE);
if(all_threads) {
- memcpy(per_thread,arch_os_get_current_thread(),
- dynamic_values_bytes);
+ memcpy(per_thread,arch_os_get_current_thread(),
+ dynamic_values_bytes);
} else {
#ifdef LISP_FEATURE_SB_THREAD
- int i;
- for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
- per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
- if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
- SetSymbolValue
- (FREE_TLS_INDEX,
- make_fixnum(MAX_INTERRUPTS+
- sizeof(struct thread)/sizeof(lispobj)),
- 0);
+ int i;
+ for(i=0;i<(dynamic_values_bytes/sizeof(lispobj));i++)
+ per_thread->dynamic_values[i]=UNBOUND_MARKER_WIDETAG;
+ if(SymbolValue(FREE_TLS_INDEX,0)==UNBOUND_MARKER_WIDETAG)
+ SetSymbolValue
+ (FREE_TLS_INDEX,
+ make_fixnum(MAX_INTERRUPTS+
+ sizeof(struct thread)/sizeof(lispobj)),
+ 0);
#define STATIC_TLS_INIT(sym,field) \
((struct symbol *)(sym-OTHER_POINTER_LOWTAG))->tls_index= \
make_fixnum(THREAD_SLOT_OFFSET_WORDS(field))
- STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
- STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
- STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
- STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
- STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
+ STATIC_TLS_INIT(BINDING_STACK_START,binding_stack_start);
+ STATIC_TLS_INIT(BINDING_STACK_POINTER,binding_stack_pointer);
+ STATIC_TLS_INIT(CONTROL_STACK_START,control_stack_start);
+ STATIC_TLS_INIT(CONTROL_STACK_END,control_stack_end);
+ STATIC_TLS_INIT(ALIEN_STACK,alien_stack_pointer);
#if defined(LISP_FEATURE_X86) || defined (LISP_FEATURE_X86_64)
- STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
- STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
+ STATIC_TLS_INIT(PSEUDO_ATOMIC_ATOMIC,pseudo_atomic_atomic);
+ STATIC_TLS_INIT(PSEUDO_ATOMIC_INTERRUPTED,pseudo_atomic_interrupted);
#endif
#undef STATIC_TLS_INIT
#endif
th=&per_thread->thread;
th->control_stack_start = spaces;
th->binding_stack_start=
- (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
+ (lispobj*)((void*)th->control_stack_start+THREAD_CONTROL_STACK_SIZE);
th->control_stack_end = th->binding_stack_start;
th->alien_stack_start=
- (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
+ (lispobj*)((void*)th->binding_stack_start+BINDING_STACK_SIZE);
th->binding_stack_pointer=th->binding_stack_start;
th->this=th;
th->os_thread=0;
th->state=STATE_STARTING;
#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
th->alien_stack_pointer=((void *)th->alien_stack_start
- + ALIEN_STACK_SIZE-N_WORD_BYTES);
+ + ALIEN_STACK_SIZE-N_WORD_BYTES);
#else
th->alien_stack_pointer=((void *)th->alien_stack_start);
#endif
th->interrupt_data = (struct interrupt_data *)
os_validate(0,(sizeof (struct interrupt_data)));
if(all_threads)
- memcpy(th->interrupt_data,
- arch_os_get_current_thread()->interrupt_data,
- sizeof (struct interrupt_data));
+ memcpy(th->interrupt_data,
+ arch_os_get_current_thread()->interrupt_data,
+ sizeof (struct interrupt_data));
else
- memcpy(th->interrupt_data,global_interrupt_data,
- sizeof (struct interrupt_data));
+ memcpy(th->interrupt_data,global_interrupt_data,
+ sizeof (struct interrupt_data));
th->unbound_marker=initial_function;
return th;
struct thread *th=create_thread_struct(initial_function);
os_thread_t kid_tid=thread_self();
if(th && kid_tid>0) {
- link_thread(th,kid_tid);
- initial_thread_trampoline(all_threads); /* no return */
+ link_thread(th,kid_tid);
+ initial_thread_trampoline(all_threads); /* no return */
} else lose("can't create initial thread");
}
sigemptyset(&newset);
sigaddset_blockable(&newset);
thread_sigmask(SIG_BLOCK, &newset, &oldset);
-
+
if((pthread_attr_init(&attr)) ||
(pthread_attr_setstack(&attr,th->control_stack_start,
THREAD_CONTROL_STACK_SIZE-16)) ||
success=create_os_thread(th,&kid_tid);
if (success)
- link_thread(th,kid_tid);
+ link_thread(th,kid_tid);
else
- os_invalidate((os_vm_address_t) th->control_stack_start,
- ((sizeof (lispobj))
- * (th->control_stack_end-th->control_stack_start)) +
- BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
- 32*SIGSTKSZ);
+ os_invalidate((os_vm_address_t) th->control_stack_start,
+ ((sizeof (lispobj))
+ * (th->control_stack_end-th->control_stack_start)) +
+ BINDING_STACK_SIZE+ALIEN_STACK_SIZE+dynamic_values_bytes+
+ 32*SIGSTKSZ);
RELEASE_ALL_THREADS_LOCK("create_thread")
FSHOW_SIGNAL((stderr,"/gc_start_the_world:begin\n"));
for(p=all_threads;p;p=p->next) {
gc_assert(p->os_thread!=0);
- if((p!=th) && (p->state!=STATE_DEAD)) {
+ if((p!=th) && (p->state!=STATE_DEAD)) {
if(p->state!=STATE_SUSPENDED) {
lose("gc_start_the_world: wrong thread state is %ld\n",
fixnum_value(p->state));
union per_thread_data {
struct thread thread;
- lispobj dynamic_values[1]; /* actually more like 4000 or so */
+ lispobj dynamic_values[1]; /* actually more like 4000 or so */
};
extern struct thread *all_threads;
static inline lispobj SymbolValue(u64 tagged_symbol_pointer, void *thread) {
struct symbol *sym= (struct symbol *)
- (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
+ (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
#ifdef LISP_FEATURE_SB_THREAD
if(thread && sym->tls_index) {
- lispobj r=
- ((union per_thread_data *)thread)
- ->dynamic_values[fixnum_value(sym->tls_index)];
- if(r!=UNBOUND_MARKER_WIDETAG) return r;
+ lispobj r=
+ ((union per_thread_data *)thread)
+ ->dynamic_values[fixnum_value(sym->tls_index)];
+ if(r!=UNBOUND_MARKER_WIDETAG) return r;
}
#endif
return sym->value;
}
static inline lispobj SymbolTlValue(u64 tagged_symbol_pointer, void *thread) {
struct symbol *sym= (struct symbol *)
- (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
+ (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
#ifdef LISP_FEATURE_SB_THREAD
return ((union per_thread_data *)thread)
- ->dynamic_values[fixnum_value(sym->tls_index)];
+ ->dynamic_values[fixnum_value(sym->tls_index)];
#else
return sym->value;
#endif
}
static inline void SetSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread) {
- struct symbol *sym= (struct symbol *)
- (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
+ struct symbol *sym= (struct symbol *)
+ (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
#ifdef LISP_FEATURE_SB_THREAD
if(thread && sym->tls_index) {
- lispobj *pr= &(((union per_thread_data *)thread)
- ->dynamic_values[fixnum_value(sym->tls_index)]);
- if(*pr!= UNBOUND_MARKER_WIDETAG) {
- *pr=val;
- return;
- }
+ lispobj *pr= &(((union per_thread_data *)thread)
+ ->dynamic_values[fixnum_value(sym->tls_index)]);
+ if(*pr!= UNBOUND_MARKER_WIDETAG) {
+ *pr=val;
+ return;
+ }
}
#endif
sym->value = val;
}
static inline void SetTlSymbolValue(u64 tagged_symbol_pointer,lispobj val, void *thread) {
#ifdef LISP_FEATURE_SB_THREAD
- struct symbol *sym= (struct symbol *)
- (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
+ struct symbol *sym= (struct symbol *)
+ (pointer_sized_uint_t)(tagged_symbol_pointer-OTHER_POINTER_LOWTAG);
((union per_thread_data *)thread)
- ->dynamic_values[fixnum_value(sym->tls_index)]
- =val;
+ ->dynamic_values[fixnum_value(sym->tls_index)]
+ =val;
#else
SetSymbolValue(tagged_symbol_pointer,val,thread) ;
#endif
static inline os_context_t *get_interrupt_context_for_thread(struct thread *th)
{
return th->interrupt_contexts
- [fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)-1)];
+ [fixnum_value(SymbolValue(FREE_INTERRUPT_CONTEXT_INDEX,th)-1)];
}
-/* This is clearly per-arch and possibly even per-OS code, but we can't
+/* This is clearly per-arch and possibly even per-OS code, but we can't
* put it somewhere sensible like x86-linux-os.c because it needs too
* much stuff like struct thread and all_threads to be defined, which
* usually aren't by that time. So, it's here instead. Sorry */
#if defined(LISP_FEATURE_X86)
register struct thread *me=0;
if(all_threads)
- __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
- : "i" (offsetof (struct thread,this)));
+ __asm__ __volatile__ ("movl %%fs:%c1,%0" : "=r" (me)
+ : "i" (offsetof (struct thread,this)));
return me;
#else
return pthread_getspecific(specials);
sw = (((gtm.tm_hour*60)+gtm.tm_min)*60+gtm.tm_sec) - (((ltm.tm_hour*60)+ltm.tm_min)*60+ltm.tm_sec);
if ((gtm.tm_wday + 1) % 7 == ltm.tm_wday)
- sw -= 24*3600;
+ sw -= 24*3600;
else if (gtm.tm_wday == (ltm.tm_wday + 1) % 7)
- sw += 24*3600;
+ sw += 24*3600;
*secwest = sw;
*dst = ltm.tm_isdst;
}
{
long result;
if (1 != sscanf(s, "%lx", &result)) {
- fprintf(stderr, "can't parse \"%s\" as hexadecimal integer\n", s);
- exit(1);
+ fprintf(stderr, "can't parse \"%s\" as hexadecimal integer\n", s);
+ exit(1);
}
return result;
}
* sanity check in make-target-2.sh before we try to execute sbcl
* itself. */
if (argc != 3) {
- fprintf(stderr, "usage: %s $addr $size\n", argv[0]);
- exit(1);
+ fprintf(stderr, "usage: %s $addr $size\n", argv[0]);
+ exit(1);
}
requested_addr = (char*)hexparse(argv[1]);
addr = mmap(requested_addr,
- hexparse(argv[2]),
- 0x7,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- -1,
- 0);
+ hexparse(argv[2]),
+ 0x7,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1,
+ 0);
/* FIXME: It would be nice to make this a stronger test. E.g.
* besides just trying to mmap() the area, we could check that the
* (At least on OpenBSD, "A successful mmap deletes any previous
* mapping in the allocated address range.") */
if (addr != requested_addr) {
- perror("mmap");
+ perror("mmap");
}
-
+
exit(0);
}
* SHARED_FUNCTION(sinh)
* SHARED_FUNCTION(strlen)
* etc. and the per-OS files could look like
- * #define SHARED_FUNCTION(f) ....
+ * #define SHARED_FUNCTION(f) ....
* #include "shared-function-names.h"
* ...then going on to do OS-specific things
* "Once and only once."
* provided with absolutely no warranty. See the COPYING and CREDITS
* files for more information.
*/
-
+
/* Pick up all the syscalls. */
F(accept)
F(access)
{
/* Ensure that we have enough space, or die. */
if (va->n_used >= va->n_avail) { /* if we've run out of space */
- /* We need to allocate more space. */
+ /* We need to allocate more space. */
int new_n_avail = 1 + 2 * va->n_avail;
void** new_result = (void**)calloc(sizeof(void*), new_n_avail);
int i;
- if (!new_result) {
- return 1;
- }
+ if (!new_result) {
+ return 1;
+ }
/* Copy old result into new space. */
- for (i = va->n_used; --i >= 0; ) {
- new_result[i] = va->result[i];
- }
- free(va->result);
- va->result = new_result;
- va->n_avail = new_n_avail;
+ for (i = va->n_used; --i >= 0; ) {
+ new_result[i] = va->result[i];
+ }
+ free(va->result);
+ va->result = new_result;
+ va->n_avail = new_n_avail;
}
- /* If we get to this point, we have enough space to store x.
- *
+ /* If we get to this point, we have enough space to store x.
+ *
* Note that since we cleverly counted the 0 as part of the space
* used, now we need to subtract one to get the correct offset to
* write into.:-| */
} voidacc;
int voidacc_ctor(voidacc*); /* the ctor, returning 0 for success */
int voidacc_acc(voidacc*, void*); /* Accumulate an element into result,
- * returning 0 for success. */
+ * returning 0 for success. */
void** voidacc_give_away_result(voidacc*); /* giving away ownership */
void voidacc_dtor(voidacc*); /* the dtor */
ensure_space(lispobj *start, unsigned long size)
{
if (os_validate((os_vm_address_t)start,(os_vm_size_t)size)==NULL) {
- fprintf(stderr,
- "ensure_space: failed to validate %ld bytes at 0x%08lx\n",
- size,
- (unsigned long)start);
- fprintf(stderr,
- "(hint: Try \"ulimit -a\"; maybe you should increase memory limits.)\n");
- exit(1);
+ fprintf(stderr,
+ "ensure_space: failed to validate %ld bytes at 0x%08lx\n",
+ size,
+ (unsigned long)start);
+ fprintf(stderr,
+ "(hint: Try \"ulimit -a\"; maybe you should increase memory limits.)\n");
+ exit(1);
}
}
ensure_undefined_alien(void) {
os_vm_address_t start = os_validate(NULL, os_vm_page_size);
if (start) {
- os_protect(start, os_vm_page_size, OS_VM_PROT_NONE);
- undefined_alien_address = start;
+ os_protect(start, os_vm_page_size, OS_VM_PROT_NONE);
+ undefined_alien_address = start;
} else {
- lose("could not allocate guard page for undefined alien");
+ lose("could not allocate guard page for undefined alien");
}
}
printf("validating memory ...");
fflush(stdout);
#endif
-
+
ensure_space( (lispobj *)READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE);
ensure_space( (lispobj *)STATIC_SPACE_START , STATIC_SPACE_SIZE);
#ifdef LISP_FEATURE_GENCGC
#ifdef LISP_FEATURE_OS_PROVIDES_DLOPEN
ensure_undefined_alien();
#endif
-
+
#ifdef PRINTNOISE
printf(" done.\n");
#endif
}
-void
+void
protect_control_stack_guard_page(struct thread *th, int protect_p) {
os_protect(CONTROL_STACK_GUARD_PAGE(th),
- os_vm_page_size,protect_p ?
- (OS_VM_PROT_READ|OS_VM_PROT_EXECUTE) : OS_VM_PROT_ALL);
+ os_vm_page_size,protect_p ?
+ (OS_VM_PROT_READ|OS_VM_PROT_EXECUTE) : OS_VM_PROT_ALL);
}
-void
+void
protect_control_stack_return_guard_page(struct thread *th, int protect_p) {
os_protect(CONTROL_STACK_RETURN_GUARD_PAGE(th),
- os_vm_page_size,protect_p ?
- (OS_VM_PROT_READ|OS_VM_PROT_EXECUTE) : OS_VM_PROT_ALL);
+ os_vm_page_size,protect_p ?
+ (OS_VM_PROT_READ|OS_VM_PROT_EXECUTE) : OS_VM_PROT_ALL);
}
#if !defined(LANGUAGE_ASSEMBLY)
#include <thread.h>
-#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
+#ifdef LISP_FEATURE_STACK_GROWS_DOWNWARD_NOT_UPWARD
#define CONTROL_STACK_GUARD_PAGE(th) ((void *)(th->control_stack_start))
#define CONTROL_STACK_RETURN_GUARD_PAGE(th) (CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size)
#else
* and so forth. In SBCL, the memory map data are defined at the Lisp
* level (compiler/target/parms.lisp) and stuffed into the sbcl.h file
* created by GENESIS, so there's no longer a need for an
- * architecture-dependent header file of memory map data.
+ * architecture-dependent header file of memory map data.
*/
#endif
}
struct var *define_dynamic_var(char *name, lispobj updatefn(struct var *),
- boolean perm)
+ boolean perm)
{
struct var *var = make_var(name, perm);
extern struct var *lookup_by_obj(lispobj obj);
extern struct var *define_var(char *name, lispobj obj, boolean perm);
extern struct var *define_dynamic_var(char *name,
- lispobj update_fn(struct var *var),
- boolean perm);
+ lispobj update_fn(struct var *var),
+ boolean perm);
extern char *var_name(struct var *var);
extern lispobj var_value(struct var *var);
sbcl-devel dated 2004-03-29, this is the POSIXly-correct way of
using environ: by an explicit declaration. -- CSR, 2004-03-30 */
extern char **environ;
-\f
+\f
/*
* stuff needed by CL:DIRECTORY and other Lisp directory operations
*/
if (dir_ptr) { /* if opendir success */
- struct voidacc va;
+ struct voidacc va;
- if (0 == voidacc_ctor(&va)) { /* if voidacc_ctor success */
- struct dirent *dirent_ptr;
+ if (0 == voidacc_ctor(&va)) { /* if voidacc_ctor success */
+ struct dirent *dirent_ptr;
- while ( (dirent_ptr = readdir(dir_ptr)) ) { /* until end of data */
- char* original_name = dirent_ptr->d_name;
- if (is_lispy_filename(original_name)) {
- /* strdup(3) is in Linux and *BSD. If you port
- * somewhere else that doesn't have it, it's easy
- * to reimplement. */
- char* dup_name = strdup(original_name);
- if (!dup_name) { /* if strdup failure */
- goto dtors;
- }
- if (voidacc_acc(&va, dup_name)) { /* if acc failure */
- goto dtors;
- }
- }
- }
- result = (char**)voidacc_give_away_result(&va);
- }
+ while ( (dirent_ptr = readdir(dir_ptr)) ) { /* until end of data */
+ char* original_name = dirent_ptr->d_name;
+ if (is_lispy_filename(original_name)) {
+ /* strdup(3) is in Linux and *BSD. If you port
+ * somewhere else that doesn't have it, it's easy
+ * to reimplement. */
+ char* dup_name = strdup(original_name);
+ if (!dup_name) { /* if strdup failure */
+ goto dtors;
+ }
+ if (voidacc_acc(&va, dup_name)) { /* if acc failure */
+ goto dtors;
+ }
+ }
+ }
+ result = (char**)voidacc_give_away_result(&va);
+ }
dtors:
- voidacc_dtor(&va);
- /* ignoring closedir(3) return code, since what could we do?
- *
- * "Never ask questions you don't want to know the answer to."
- * -- William Irving Zumwalt (Rich Cook, _The Wizardry Quested_) */
- closedir(dir_ptr);
+ voidacc_dtor(&va);
+ /* ignoring closedir(3) return code, since what could we do?
+ *
+ * "Never ask questions you don't want to know the answer to."
+ * -- William Irving Zumwalt (Rich Cook, _The Wizardry Quested_) */
+ closedir(dir_ptr);
}
return result;
/* Free the strings. */
for (p = directory_lispy_filenames; *p; ++p) {
- free(*p);
+ free(*p);
}
/* Free the table of strings. */
{
int bufsiz = strlen(path) + 16;
while (1) {
- char *result = malloc(bufsiz);
- int n_read = readlink(path, result, bufsiz);
- if (n_read < 0) {
- free(result);
- return 0;
- } else if (n_read < bufsiz) {
- result[n_read] = 0;
- return result;
- } else {
- free(result);
- bufsiz *= 2;
- }
+ char *result = malloc(bufsiz);
+ int n_read = readlink(path, result, bufsiz);
+ if (n_read < 0) {
+ free(result);
+ return 0;
+ } else if (n_read < bufsiz) {
+ result[n_read] = 0;
+ return result;
+ } else {
+ free(result);
+ bufsiz *= 2;
+ }
}
}
\f
time_t wrapped_st_ctime; /* time_t of last change */
};
-static void
+static void
copy_to_stat_wrapper(struct stat_wrapper *to, struct stat *from)
{
#define FROB(stem) to->wrapped_st_##stem = from->st_##stem
struct stat real_buf;
int ret;
if ((ret = stat(file_name,&real_buf)) >= 0)
- copy_to_stat_wrapper(buf, &real_buf);
+ copy_to_stat_wrapper(buf, &real_buf);
return ret;
}
{
struct stat real_buf;
int ret;
- if ((ret = lstat(file_name,&real_buf)) >= 0)
- copy_to_stat_wrapper(buf, &real_buf);
+ if ((ret = lstat(file_name,&real_buf)) >= 0)
+ copy_to_stat_wrapper(buf, &real_buf);
return ret;
}
struct stat real_buf;
int ret;
if ((ret = fstat(filedes,&real_buf)) >= 0)
- copy_to_stat_wrapper(buf, &real_buf);
+ copy_to_stat_wrapper(buf, &real_buf);
return ret;
}
\f
{
struct passwd *p = getpwuid(uid);
if (p) {
- /* The object *p is a static struct which'll be overwritten by
- * the next call to getpwuid(), so it'd be unsafe to return
- * p->pw_name without copying. */
- return strdup(p->pw_name);
+ /* The object *p is a static struct which'll be overwritten by
+ * the next call to getpwuid(), so it'd be unsafe to return
+ * p->pw_name without copying. */
+ return strdup(p->pw_name);
} else {
- return 0;
+ return 0;
}
}
{
struct passwd *p = getpwuid(uid);
if(p) {
- /* Let's be careful about this, shall we? */
- size_t len = strlen(p->pw_dir);
- if (p->pw_dir[len-1] == '/') {
- return strdup(p->pw_dir);
- } else {
- char *result = malloc(len + 2);
- if (result) {
- int nchars = sprintf(result,"%s/",p->pw_dir);
- if (nchars == len + 1) {
- return result;
- } else {
- return 0;
- }
- } else {
- return 0;
- }
- }
+ /* Let's be careful about this, shall we? */
+ size_t len = strlen(p->pw_dir);
+ if (p->pw_dir[len-1] == '/') {
+ return strdup(p->pw_dir);
+ } else {
+ char *result = malloc(len + 2);
+ if (result) {
+ int nchars = sprintf(result,"%s/",p->pw_dir);
+ if (nchars == len + 1) {
+ return result;
+ } else {
+ return 0;
+ }
+ } else {
+ return 0;
+ }
+ }
} else {
- return 0;
+ return 0;
}
}
\f
#include "genesis/static-symbols.h"
#include "genesis/symbol.h"
-#define BREAKPOINT_INST 0xcc /* INT3 */
+#define BREAKPOINT_INST 0xcc /* INT3 */
unsigned long fast_random_state = 1;
int vlen;
long code;
-
+
/* Get and skip the Lisp interrupt code. */
code = *(char*)(*os_context_pc_addr(context))++;
switch (code)
- {
- case trap_Error:
- case trap_Cerror:
- /* Lisp error arg vector length */
- vlen = *(char*)(*os_context_pc_addr(context))++;
- /* Skip Lisp error arg data bytes. */
- while (vlen-- > 0) {
- ++*os_context_pc_addr(context);
- }
- break;
-
- case trap_Breakpoint: /* not tested */
- case trap_FunEndBreakpoint: /* not tested */
- break;
-
- case trap_PendingInterrupt:
- case trap_Halt:
- /* only needed to skip the Code */
- break;
-
- default:
- fprintf(stderr,"[arch_skip_inst invalid code %d\n]\n",code);
- break;
- }
+ {
+ case trap_Error:
+ case trap_Cerror:
+ /* Lisp error arg vector length */
+ vlen = *(char*)(*os_context_pc_addr(context))++;
+ /* Skip Lisp error arg data bytes. */
+ while (vlen-- > 0) {
+ ++*os_context_pc_addr(context);
+ }
+ break;
+
+ case trap_Breakpoint: /* not tested */
+ case trap_FunEndBreakpoint: /* not tested */
+ break;
+
+ case trap_PendingInterrupt:
+ case trap_Halt:
+ /* only needed to skip the Code */
+ break;
+
+ default:
+ fprintf(stderr,"[arch_skip_inst invalid code %d\n]\n",code);
+ break;
+ }
FSHOW((stderr,
- "/[arch_skip_inst resuming at %x]\n",
- *os_context_pc_addr(context)));
+ "/[arch_skip_inst resuming at %x]\n",
+ *os_context_pc_addr(context)));
}
unsigned char *
arch_set_pseudo_atomic_interrupted(os_context_t *context)
{
SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),
- arch_os_get_current_thread());
+ arch_os_get_current_thread());
}
\f
/*
{
unsigned long result = *(unsigned long*)pc;
- *(char*)pc = BREAKPOINT_INST; /* x86 INT3 */
- *((char*)pc+1) = trap_Breakpoint; /* Lisp trap code */
+ *(char*)pc = BREAKPOINT_INST; /* x86 INT3 */
+ *((char*)pc+1) = trap_Breakpoint; /* Lisp trap code */
return result;
}
if (single_stepping && (signal==SIGTRAP))
{
- /* fprintf(stderr,"* single step trap %x\n", single_stepping); */
+ /* fprintf(stderr,"* single step trap %x\n", single_stepping); */
#ifdef CANNOT_GET_TO_SINGLE_STEP_FLAG
- /* Un-install single step helper instructions. */
- *(single_stepping-3) = single_step_save1;
- *(single_stepping-2) = single_step_save2;
- *(single_stepping-1) = single_step_save3;
+ /* Un-install single step helper instructions. */
+ *(single_stepping-3) = single_step_save1;
+ *(single_stepping-2) = single_step_save2;
+ *(single_stepping-1) = single_step_save3;
#else
- *context_eflags_addr(context) ^= 0x100;
+ *context_eflags_addr(context) ^= 0x100;
#endif
- /* Re-install the breakpoint if possible. */
- if (*os_context_pc_addr(context) == (int)single_stepping + 1) {
- fprintf(stderr, "warning: couldn't reinstall breakpoint\n");
- } else {
- *((char *)single_stepping) = BREAKPOINT_INST; /* x86 INT3 */
- *((char *)single_stepping+1) = trap_Breakpoint;
- }
-
- single_stepping = NULL;
- return;
+ /* Re-install the breakpoint if possible. */
+ if (*os_context_pc_addr(context) == (int)single_stepping + 1) {
+ fprintf(stderr, "warning: couldn't reinstall breakpoint\n");
+ } else {
+ *((char *)single_stepping) = BREAKPOINT_INST; /* x86 INT3 */
+ *((char *)single_stepping+1) = trap_Breakpoint;
+ }
+
+ single_stepping = NULL;
+ return;
}
/* This is just for info in case the monitor wants to print an
* approximation. */
current_control_stack_pointer =
- (lispobj *)*os_context_sp_addr(context);
+ (lispobj *)*os_context_sp_addr(context);
/* FIXME: CMUCL puts the float control restoration code here.
Thus, it seems to me that single-stepping won't restore the
switch (trap) {
case trap_PendingInterrupt:
- FSHOW((stderr, "/<trap pending interrupt>\n"));
- arch_skip_instruction(context);
- interrupt_handle_pending(context);
- break;
+ FSHOW((stderr, "/<trap pending interrupt>\n"));
+ arch_skip_instruction(context);
+ interrupt_handle_pending(context);
+ break;
case trap_Halt:
- /* Note: the old CMU CL code tried to save FPU state
- * here, and restore it after we do our thing, but there
- * seems to be no point in doing that, since we're just
- * going to lose(..) anyway. */
- fake_foreign_function_call(context);
- lose("%%PRIMITIVE HALT called; the party is over.");
+ /* Note: the old CMU CL code tried to save FPU state
+ * here, and restore it after we do our thing, but there
+ * seems to be no point in doing that, since we're just
+ * going to lose(..) anyway. */
+ fake_foreign_function_call(context);
+ lose("%%PRIMITIVE HALT called; the party is over.");
case trap_Error:
case trap_Cerror:
- FSHOW((stderr, "<trap error/cerror %d>\n", code));
- interrupt_internal_error(signal, info, context, code==trap_Cerror);
- break;
+ FSHOW((stderr, "<trap error/cerror %d>\n", code));
+ interrupt_internal_error(signal, info, context, code==trap_Cerror);
+ break;
case trap_Breakpoint:
- --*os_context_pc_addr(context);
- handle_breakpoint(signal, info, context);
- break;
+ --*os_context_pc_addr(context);
+ handle_breakpoint(signal, info, context);
+ break;
case trap_FunEndBreakpoint:
- --*os_context_pc_addr(context);
- *os_context_pc_addr(context) =
- (unsigned long)handle_fun_end_breakpoint(signal, info, context);
- break;
+ --*os_context_pc_addr(context);
+ *os_context_pc_addr(context) =
+ (unsigned long)handle_fun_end_breakpoint(signal, info, context);
+ break;
default:
- FSHOW((stderr,"/[C--trap default %d %d %x]\n",
- signal, code, context));
- interrupt_handle_now(signal, info, context);
- break;
+ FSHOW((stderr,"/[C--trap default %d %d %x]\n",
+ signal, code, context));
+ interrupt_handle_now(signal, info, context);
+ break;
}
}
* things.
*/
-void
+void
arch_write_linkage_table_jmp(char * reloc, void * fun)
{
unsigned long addr = (unsigned long) fun;
*reloc++ = 0x00; /* ... */
for (i = 0; i < 8; i++) {
- *reloc++ = addr & 0xff;
- addr >>= 8;
+ *reloc++ = addr & 0xff;
+ addr >>= 8;
}
/* write a nop for good measure. */
/* These setup and check *both* the sse2 and x87 FPUs. While lisp code
only uses the sse2 FPU, other code (such as libc) may use the x87 FPU.
*/
-
+
unsigned int
arch_get_fp_modes()
{
unsigned int temp;
unsigned int result;
- /* return the x87 exception flags ored in with the sse2
+ /* return the x87 exception flags ored in with the sse2
* control+status flags */
asm ("fnstsw %0" : "=m" (temp));
result = temp;
/* turn trap enable bits into exception mask */
mxcsr ^= 0x3F << 7;
-
+
/* set x87 modes */
asm ("fnstenv %0" : "=m" (f_env));
/* set control word: always long double precision
/* set status word: only override exception flags, from mxcsr */
f_env.sw &= ~0x3F;
f_env.sw |= (mxcsr & 0x3F);
-
+
asm ("fldenv %0" : : "m" (f_env));
-
+
/* now, simply, load up the mxcsr register */
temp = mxcsr;
asm ("ldmxcsr %0" : : "m" (temp));
extern never_returns lose(char *fmt, ...);
-static inline void
+static inline void
get_spinlock(volatile lispobj *word,long value)
{
#ifdef LISP_FEATURE_SB_THREAD
u64 rax=0;
- if(*word==value)
- lose("recursive get_spinlock: 0x%x,%ld\n",word,value);
+ if(*word==value)
+ lose("recursive get_spinlock: 0x%x,%ld\n",word,value);
do {
- asm ("xor %0,%0\n\
- lock cmpxchg %1,%2"
- : "=a" (rax)
- : "r" (value), "m" (*word)
- : "memory", "cc");
+ asm ("xor %0,%0\n\
+ lock cmpxchg %1,%2"
+ : "=a" (rax)
+ : "r" (value), "m" (*word)
+ : "memory", "cc");
} while(rax!=0);
#else
*word=value;
#include <linux/unistd.h>
#include <sys/mman.h>
#include <linux/version.h>
-#include "thread.h" /* dynamic_values_bytes */
+#include "thread.h" /* dynamic_values_bytes */
#include "validate.h"
size_t os_vm_page_size;
#define RCASE(name) case reg_ ## name: return &context->uc_mcontext.gregs[REG_ ## name];
switch(offset) {
RCASE(RAX)
- RCASE(RCX)
- RCASE(RDX)
- RCASE(RBX)
- RCASE(RSP)
- RCASE(RBP)
- RCASE(RSI)
- RCASE(RDI)
- RCASE(R8)
- RCASE(R9)
- RCASE(R10)
- RCASE(R11)
- RCASE(R12)
- RCASE(R13)
- RCASE(R14)
- RCASE(R15)
- default:
- if(offset<NGREG)
- return &context->uc_mcontext.gregs[offset/2+4];
- else return 0;
+ RCASE(RCX)
+ RCASE(RDX)
+ RCASE(RBX)
+ RCASE(RSP)
+ RCASE(RBP)
+ RCASE(RSI)
+ RCASE(RDI)
+ RCASE(R8)
+ RCASE(R9)
+ RCASE(R10)
+ RCASE(R11)
+ RCASE(R12)
+ RCASE(R13)
+ RCASE(R14)
+ RCASE(R15)
+ default:
+ if(offset<NGREG)
+ return &context->uc_mcontext.gregs[offset/2+4];
+ else return 0;
}
return &context->uc_mcontext.gregs[offset];
}
os_context_register_t *
os_context_sp_addr(os_context_t *context)
-{
+{
return &context->uc_mcontext.gregs[REG_RSP];
}
unsigned long
os_context_fp_control(os_context_t *context)
{
- /* return the x87 exception flags ored in with the sse2
+ /* return the x87 exception flags ored in with the sse2
* control+status flags */
unsigned int result = (context->uc_mcontext.fpregs->swd & 0x3F) | context->uc_mcontext.fpregs->mxcsr;
/* flip exception mask bits */
/* the number of registers visible as registers in the virtual machine
* (excludes stuff like segment registers) */
-#define NREGS (16)
+#define NREGS (16)
#ifdef LANGUAGE_ASSEMBLY
#define REG(num) $ ## num
#include "genesis/static-symbols.h"
#include "genesis/symbol.h"
-#define BREAKPOINT_INST 0xcc /* INT3 */
+#define BREAKPOINT_INST 0xcc /* INT3 */
unsigned long fast_random_state = 1;
int vlen;
int code;
-
+
/* Get and skip the Lisp interrupt code. */
code = *(char*)(*os_context_pc_addr(context))++;
switch (code)
- {
- case trap_Error:
- case trap_Cerror:
- /* Lisp error arg vector length */
- vlen = *(char*)(*os_context_pc_addr(context))++;
- /* Skip Lisp error arg data bytes. */
- while (vlen-- > 0) {
- ++*os_context_pc_addr(context);
- }
- break;
-
- case trap_Breakpoint: /* not tested */
- case trap_FunEndBreakpoint: /* not tested */
- break;
-
- case trap_PendingInterrupt:
- case trap_Halt:
- /* only needed to skip the Code */
- break;
-
- default:
- fprintf(stderr,"[arch_skip_inst invalid code %d\n]\n",code);
- break;
- }
+ {
+ case trap_Error:
+ case trap_Cerror:
+ /* Lisp error arg vector length */
+ vlen = *(char*)(*os_context_pc_addr(context))++;
+ /* Skip Lisp error arg data bytes. */
+ while (vlen-- > 0) {
+ ++*os_context_pc_addr(context);
+ }
+ break;
+
+ case trap_Breakpoint: /* not tested */
+ case trap_FunEndBreakpoint: /* not tested */
+ break;
+
+ case trap_PendingInterrupt:
+ case trap_Halt:
+ /* only needed to skip the Code */
+ break;
+
+ default:
+ fprintf(stderr,"[arch_skip_inst invalid code %d\n]\n",code);
+ break;
+ }
FSHOW((stderr,
- "/[arch_skip_inst resuming at %x]\n",
- *os_context_pc_addr(context)));
+ "/[arch_skip_inst resuming at %x]\n",
+ *os_context_pc_addr(context)));
}
unsigned char *
arch_set_pseudo_atomic_interrupted(os_context_t *context)
{
SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1),
- arch_os_get_current_thread());
+ arch_os_get_current_thread());
}
\f
/*
{
unsigned long result = *(unsigned long*)pc;
- *(char*)pc = BREAKPOINT_INST; /* x86 INT3 */
- *((char*)pc+1) = trap_Breakpoint; /* Lisp trap code */
+ *(char*)pc = BREAKPOINT_INST; /* x86 INT3 */
+ *((char*)pc+1) = trap_Breakpoint; /* Lisp trap code */
return result;
}
if (single_stepping && (signal==SIGTRAP))
{
- /* fprintf(stderr,"* single step trap %x\n", single_stepping); */
+ /* fprintf(stderr,"* single step trap %x\n", single_stepping); */
#ifdef CANNOT_GET_TO_SINGLE_STEP_FLAG
- /* Un-install single step helper instructions. */
- *(single_stepping-3) = single_step_save1;
- *(single_stepping-2) = single_step_save2;
- *(single_stepping-1) = single_step_save3;
+ /* Un-install single step helper instructions. */
+ *(single_stepping-3) = single_step_save1;
+ *(single_stepping-2) = single_step_save2;
+ *(single_stepping-1) = single_step_save3;
#else
- *context_eflags_addr(context) ^= 0x100;
+ *context_eflags_addr(context) ^= 0x100;
#endif
- /* Re-install the breakpoint if possible. */
- if (*os_context_pc_addr(context) == (int)single_stepping + 1) {
- fprintf(stderr, "warning: couldn't reinstall breakpoint\n");
- } else {
- *((char *)single_stepping) = BREAKPOINT_INST; /* x86 INT3 */
- *((char *)single_stepping+1) = trap_Breakpoint;
- }
-
- single_stepping = NULL;
- return;
+ /* Re-install the breakpoint if possible. */
+ if (*os_context_pc_addr(context) == (int)single_stepping + 1) {
+ fprintf(stderr, "warning: couldn't reinstall breakpoint\n");
+ } else {
+ *((char *)single_stepping) = BREAKPOINT_INST; /* x86 INT3 */
+ *((char *)single_stepping+1) = trap_Breakpoint;
+ }
+
+ single_stepping = NULL;
+ return;
}
/* This is just for info in case the monitor wants to print an
* approximation. */
current_control_stack_pointer =
- (lispobj *)*os_context_sp_addr(context);
+ (lispobj *)*os_context_sp_addr(context);
/* FIXME: CMUCL puts the float control restoration code here.
Thus, it seems to me that single-stepping won't restore the
switch (trap) {
case trap_PendingInterrupt:
- FSHOW((stderr, "/<trap pending interrupt>\n"));
- arch_skip_instruction(context);
+ FSHOW((stderr, "/<trap pending interrupt>\n"));
+ arch_skip_instruction(context);
interrupt_handle_pending(context);
- break;
+ break;
case trap_Halt:
- /* Note: the old CMU CL code tried to save FPU state
- * here, and restore it after we do our thing, but there
- * seems to be no point in doing that, since we're just
- * going to lose(..) anyway. */
- fake_foreign_function_call(context);
- lose("%%PRIMITIVE HALT called; the party is over.");
+ /* Note: the old CMU CL code tried to save FPU state
+ * here, and restore it after we do our thing, but there
+ * seems to be no point in doing that, since we're just
+ * going to lose(..) anyway. */
+ fake_foreign_function_call(context);
+ lose("%%PRIMITIVE HALT called; the party is over.");
case trap_Error:
case trap_Cerror:
- FSHOW((stderr, "<trap error/cerror %d>\n", code));
- interrupt_internal_error(signal, info, context, code==trap_Cerror);
- break;
+ FSHOW((stderr, "<trap error/cerror %d>\n", code));
+ interrupt_internal_error(signal, info, context, code==trap_Cerror);
+ break;
case trap_Breakpoint:
- --*os_context_pc_addr(context);
- handle_breakpoint(signal, info, context);
- break;
+ --*os_context_pc_addr(context);
+ handle_breakpoint(signal, info, context);
+ break;
case trap_FunEndBreakpoint:
- --*os_context_pc_addr(context);
- *os_context_pc_addr(context) =
- (int)handle_fun_end_breakpoint(signal, info, context);
- break;
+ --*os_context_pc_addr(context);
+ *os_context_pc_addr(context) =
+ (int)handle_fun_end_breakpoint(signal, info, context);
+ break;
default:
- FSHOW((stderr,"/[C--trap default %d %d %x]\n",
- signal, code, context));
- interrupt_handle_now(signal, info, context);
- break;
+ FSHOW((stderr,"/[C--trap default %d %d %x]\n",
+ signal, code, context));
+ interrupt_handle_now(signal, info, context);
+ break;
}
}
* things.
*/
-void
+void
arch_write_linkage_table_jmp(char * reloc, void * fun)
{
/* Make JMP to function entry. JMP offset is calculated from next
long offset = (char *)fun - (reloc + 5);
int i;
- *reloc++ = 0xe9; /* opcode for JMP rel32 */
+ *reloc++ = 0xe9; /* opcode for JMP rel32 */
for (i = 0; i < 4; i++) {
- *reloc++ = offset & 0xff;
- offset >>= 8;
+ *reloc++ = offset & 0xff;
+ offset >>= 8;
}
/* write a nop for good measure. */
extern never_returns lose(char *fmt, ...);
-static inline void
+static inline void
get_spinlock(volatile lispobj *word,long value)
{
#ifdef LISP_FEATURE_SB_THREAD
u32 eax=0;
- if(*word==value)
- lose("recursive get_spinlock: 0x%x,%ld\n",word,value);
+ if(*word==value)
+ lose("recursive get_spinlock: 0x%x,%ld\n",word,value);
do {
- asm ("xor %0,%0\n\
- lock cmpxchg %1,%2"
- : "=a" (eax)
- : "r" (value), "m" (*word)
- : "memory", "cc");
+ asm ("xor %0,%0\n\
+ lock cmpxchg %1,%2"
+ : "=a" (eax)
+ : "r" (value), "m" (*word)
+ : "memory", "cc");
} while(eax!=0);
#else
*word=value;
* stuff in FreeBSD and OpenBSD, but in detail they're different in
* almost every line of code. It would be nice to find some way to
* factor out the commonality better; failing that, it might be best
- * just to split this generic-BSD code into one variant for each BSD.
+ * just to split this generic-BSD code into one variant for each BSD.
*
* KLUDGE II: this split has begun with the addition of the Darwin BSD
* flavour, with the cross-architecture complications that this
* entails; unfortunately, currently the situation is worse, not
* better, than in the above paragraph. */
-#if defined(__FreeBSD__) || defined(__OpenBSD__)
+#if defined(__FreeBSD__) || defined(__OpenBSD__)
int *
os_context_register_addr(os_context_t *context, int offset)
{
switch(offset) {
case 0:
- return CONTEXT_ADDR_FROM_STEM(eax);
+ return CONTEXT_ADDR_FROM_STEM(eax);
case 2:
- return CONTEXT_ADDR_FROM_STEM(ecx);
+ return CONTEXT_ADDR_FROM_STEM(ecx);
case 4:
- return CONTEXT_ADDR_FROM_STEM(edx);
+ return CONTEXT_ADDR_FROM_STEM(edx);
case 6:
- return CONTEXT_ADDR_FROM_STEM(ebx);
+ return CONTEXT_ADDR_FROM_STEM(ebx);
case 8:
- return CONTEXT_ADDR_FROM_STEM(esp);
+ return CONTEXT_ADDR_FROM_STEM(esp);
case 10:
- return CONTEXT_ADDR_FROM_STEM(ebp);
+ return CONTEXT_ADDR_FROM_STEM(ebp);
case 12:
- return CONTEXT_ADDR_FROM_STEM(esi);
+ return CONTEXT_ADDR_FROM_STEM(esi);
case 14:
- return CONTEXT_ADDR_FROM_STEM(edi);
+ return CONTEXT_ADDR_FROM_STEM(edi);
default:
- return 0;
+ return 0;
}
}
{
switch(offset) {
case 0:
- return CONTEXT_ADDR_FROM_STEM(EAX);
+ return CONTEXT_ADDR_FROM_STEM(EAX);
case 2:
- return CONTEXT_ADDR_FROM_STEM(ECX);
+ return CONTEXT_ADDR_FROM_STEM(ECX);
case 4:
- return CONTEXT_ADDR_FROM_STEM(EDX);
+ return CONTEXT_ADDR_FROM_STEM(EDX);
case 6:
- return CONTEXT_ADDR_FROM_STEM(EBX);
+ return CONTEXT_ADDR_FROM_STEM(EBX);
case 8:
- return CONTEXT_ADDR_FROM_STEM(ESP);
+ return CONTEXT_ADDR_FROM_STEM(ESP);
case 10:
- return CONTEXT_ADDR_FROM_STEM(EBP);
+ return CONTEXT_ADDR_FROM_STEM(EBP);
case 12:
- return CONTEXT_ADDR_FROM_STEM(ESI);
+ return CONTEXT_ADDR_FROM_STEM(ESI);
case 14:
- return CONTEXT_ADDR_FROM_STEM(EDI);
+ return CONTEXT_ADDR_FROM_STEM(EDI);
case 16:
- return CONTEXT_ADDR_FROM_STEM(UESP);
+ return CONTEXT_ADDR_FROM_STEM(UESP);
default:
- return 0;
+ return 0;
}
}
-/* FIXME: If this can be a no-op on BSD/x86, then it
+/* FIXME: If this can be a no-op on BSD/x86, then it
* deserves a more precise name.
*
* (Perhaps os_prepare_data_area_to_be_executed()?) */
#include <linux/unistd.h>
#include <sys/mman.h>
#include <linux/version.h>
-#include "thread.h" /* dynamic_values_bytes */
+#include "thread.h" /* dynamic_values_bytes */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define user_desc modify_ldt_ldt_s
+#define user_desc modify_ldt_ldt_s
#endif
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount );
* users have thread-related problems that maintainers can't duplicate */
void debug_get_ldt()
-{
+{
int n=modify_ldt (0, local_ldt_copy, sizeof local_ldt_copy);
printf("%d bytes in ldt: print/x local_ldt_copy\n", n);
}
-volatile lispobj modify_ldt_lock; /* protect all calls to modify_ldt */
+volatile lispobj modify_ldt_lock; /* protect all calls to modify_ldt */
int arch_os_thread_init(struct thread *thread) {
stack_t sigstack;
* held when getting modify_ldt_lock
*/
struct user_desc ldt_entry = {
- 1, 0, 0, /* index, address, length filled in later */
- 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
- };
+ 1, 0, 0, /* index, address, length filled in later */
+ 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
+ };
int n;
get_spinlock(&modify_ldt_lock,(long)thread);
n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy);
/* get next free ldt entry */
if(n) {
- u32 *p;
- for(n=0,p=local_ldt_copy;*p;p+=LDT_ENTRY_SIZE/sizeof(u32))
- n++;
+ u32 *p;
+ for(n=0,p=local_ldt_copy;*p;p+=LDT_ENTRY_SIZE/sizeof(u32))
+ n++;
}
ldt_entry.entry_number=n;
ldt_entry.base_addr=(unsigned long) thread;
ldt_entry.limit=dynamic_values_bytes;
ldt_entry.limit_in_pages=0;
if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
- /* modify_ldt call failed: something magical is not happening */
- return -1;
+ modify_ldt_lock=0;
+ /* modify_ldt call failed: something magical is not happening */
+ return -1;
}
- __asm__ __volatile__ ("movw %w0, %%fs" : : "q"
- ((n << 3) /* selector number */
- + (1 << 2) /* TI set = LDT */
- + 3)); /* privilege level */
+ __asm__ __volatile__ ("movw %w0, %%fs" : : "q"
+ ((n << 3) /* selector number */
+ + (1 << 2) /* TI set = LDT */
+ + 3)); /* privilege level */
thread->tls_cookie=n;
modify_ldt_lock=0;
int arch_os_thread_cleanup(struct thread *thread) {
struct user_desc ldt_entry = {
- 0, 0, 0,
- 0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0
- };
+ 0, 0, 0,
+ 0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0
+ };
ldt_entry.entry_number=thread->tls_cookie;
get_spinlock(&modify_ldt_lock,(long)thread);
if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
- modify_ldt_lock=0;
- /* modify_ldt call failed: something magical is not happening */
- return 0;
+ modify_ldt_lock=0;
+ /* modify_ldt call failed: something magical is not happening */
+ return 0;
}
modify_ldt_lock=0;
return 1;
os_context_register_addr(os_context_t *context, int offset)
{
switch(offset) {
- case reg_EAX: return &context->uc_mcontext.gregs[11];
- case reg_ECX: return &context->uc_mcontext.gregs[10];
- case reg_EDX: return &context->uc_mcontext.gregs[9];
- case reg_EBX: return &context->uc_mcontext.gregs[8];
- case reg_ESP: return &context->uc_mcontext.gregs[7];
- case reg_EBP: return &context->uc_mcontext.gregs[6];
- case reg_ESI: return &context->uc_mcontext.gregs[5];
- case reg_EDI: return &context->uc_mcontext.gregs[4];
+ case reg_EAX: return &context->uc_mcontext.gregs[11];
+ case reg_ECX: return &context->uc_mcontext.gregs[10];
+ case reg_EDX: return &context->uc_mcontext.gregs[9];
+ case reg_EBX: return &context->uc_mcontext.gregs[8];
+ case reg_ESP: return &context->uc_mcontext.gregs[7];
+ case reg_EBP: return &context->uc_mcontext.gregs[6];
+ case reg_ESI: return &context->uc_mcontext.gregs[5];
+ case reg_EDI: return &context->uc_mcontext.gregs[4];
default: return 0;
}
return &context->uc_mcontext.gregs[offset];
os_context_register_t *
os_context_sp_addr(os_context_t *context)
-{
+{
return &context->uc_mcontext.gregs[17]; /* REG_UESP */
}
os_context_fp_control(os_context_t *context)
{
return ((((context->uc_mcontext.fpregs->cw) & 0xffff) ^ 0x3f) |
- (((context->uc_mcontext.fpregs->sw) & 0xffff) << 16));
+ (((context->uc_mcontext.fpregs->sw) & 0xffff) << 16));
}
sigset_t *
/* the number of registers visible as registers in the virtual machine
* (excludes stuff like segment registers) */
-#define NREGS (8)
+#define NREGS (8)
#ifdef LANGUAGE_ASSEMBLY
#define REG(num) $ ## num
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"0.9.2.41"
+"0.9.2.42"