00da75f45026237483c4e10488e3e30626774480
[sbcl.git] / src / runtime / x86-64-darwin-os.c
1 #ifdef LISP_FEATURE_SB_THREAD
2 #include <architecture/i386/table.h>
3 #include <i386/user_ldt.h>
4 #include <mach/mach_init.h>
5 #endif
6
7 #include "thread.h"
8 #include "validate.h"
9 #include "runtime.h"
10 #include "interrupt.h"
11 #include "x86-64-darwin-os.h"
12 #include "genesis/fdefn.h"
13
14 #include <mach/mach.h>
15 #include <mach/mach_error.h>
16 #include <mach/mach_types.h>
17 #include <mach/sync_policy.h>
18 #include <mach/machine/thread_state.h>
19 #include <mach/machine/thread_status.h>
20 #include <sys/_types.h>
21 #include <sys/ucontext.h>
22 #include <pthread.h>
23 #include <assert.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26
27 #if __DARWIN_UNIX03
28 #include <sys/_structs.h>
29 #endif
30
31 #if __DARWIN_UNIX03
32
33 typedef struct __darwin_ucontext darwin_ucontext;
34 typedef struct __darwin_mcontext64 darwin_mcontext;
35
36 #define rip __rip
37 #define rsp __rsp
38 #define rbp __rbp
39 #define rax __rax
40 #define rbx __rbx
41 #define rcx __rcx
42 #define rdx __rdx
43 #define rsi __rsi
44 #define rdi __rdi
45 #define r8 __r8
46 #define r9 __r9
47 #define faultvaddr __faultvaddr
48 #define ss __ss
49 #define es __es
50 #define fs __fs
51
52 #else
53
54 typedef struct ucontext darwin_ucontext;
55 typedef struct mcontext darwin_mcontext;
56
57 #endif
58
59 #ifdef LISP_FEATURE_SB_THREAD
60 pthread_mutex_t mach_exception_lock = PTHREAD_MUTEX_INITIALIZER;
61 #endif
62
63 #ifdef LISP_FEATURE_MACH_EXCEPTION_HANDLER
64
65 kern_return_t mach_thread_init(mach_port_t thread_exception_port);
66
67 void sigill_handler(int signal, siginfo_t *siginfo, os_context_t *context);
68 void sigtrap_handler(int signal, siginfo_t *siginfo, os_context_t *context);
69 void memory_fault_handler(int signal, siginfo_t *siginfo,
70                           os_context_t *context);
71
72 /* exc_server handles mach exception messages from the kernel and
73  * calls catch exception raise. We use the system-provided
74  * mach_msg_server, which, I assume, calls exc_server in a loop.
75  *
76  */
77 extern boolean_t exc_server();
78
79 /* This executes in the faulting thread as part of the signal
80  * emulation.  It is passed a context with the uc_mcontext field
81  * pointing to a valid block of memory. */
82 void build_fake_signal_context(darwin_ucontext *context,
83                                x86_thread_state64_t *thread_state,
84                                x86_float_state64_t *float_state) {
85     pthread_sigmask(0, NULL, &context->uc_sigmask);
86     context->uc_mcontext->ss = *thread_state;
87     context->uc_mcontext->fs = *float_state;
88 }
89
90 /* This executes in the faulting thread as part of the signal
91  * emulation.  It is effectively the inverse operation from above. */
92 void update_thread_state_from_context(x86_thread_state64_t *thread_state,
93                                       x86_float_state64_t *float_state,
94                                       darwin_ucontext  *context) {
95     *thread_state = context->uc_mcontext->ss;
96     *float_state = context->uc_mcontext->fs;
97     pthread_sigmask(SIG_SETMASK, &context->uc_sigmask, NULL);
98 }
99
100 /* Modify a context to push new data on its stack. */
101 void push_context(u64 data, x86_thread_state64_t *context)
102 {
103     u64 *stack_pointer;
104
105     stack_pointer = (u64*) context->rsp;
106     *(--stack_pointer) = data;
107     context->rsp = (u64) stack_pointer;
108 }
109
110 void align_context_stack(x86_thread_state64_t *context)
111 {
112     /* 16byte align the stack (provided that the stack is, as it
113      * should be, 8byte aligned. */
114     while (context->rsp & 15) push_context(0, context);
115 }
116
117 /* Stack allocation starts with a context that has a mod-4 ESP value
118  * and needs to leave a context with a mod-16 ESP that will restore
119  * the old ESP value and other register state when activated.  The
120  * first part of this is the recovery trampoline, which loads ESP from
121  * EBP, pops EBP, and returns. */
122 asm(".globl _stack_allocation_recover; \
123     .align 4; \
124  _stack_allocation_recover: \
125     lea -48(%rbp), %rsp; \
126     pop %rsi; \
127     pop %rdi; \
128     pop %rdx; \
129     pop %rcx; \
130     pop %r8; \
131     pop %r9; \
132     pop %rbp; \
133     ret;");
134
135 void open_stack_allocation(x86_thread_state64_t *context)
136 {
137     void stack_allocation_recover(void);
138
139     push_context(context->rip, context);
140     push_context(context->rbp, context);
141     context->rbp = context->rsp;
142
143     push_context(context->r9, context);
144     push_context(context->r8, context);
145     push_context(context->rcx, context);
146     push_context(context->rdx, context);
147     push_context(context->rsi, context);
148     push_context(context->rdi, context);
149
150     context->rip = (u64) stack_allocation_recover;
151
152     align_context_stack(context);
153 }
154
155 /* Stack allocation of data starts with a context with a mod-16 ESP
156  * value and reserves some space on it by manipulating the ESP
157  * register. */
158 void *stack_allocate(x86_thread_state64_t *context, size_t size)
159 {
160     /* round up size to 16byte multiple */
161     size = (size + 15) & -16;
162
163     context->rsp = ((u64)context->rsp) - size;
164
165     return (void *)context->rsp;
166 }
167
168 /* Arranging to invoke a C function is tricky, as we have to assume
169  * cdecl calling conventions (caller removes args) and x86/darwin
170  * alignment requirements.  The simplest way to arrange this,
171  * actually, is to open a new stack allocation.
172  * WARNING!!! THIS DOES NOT PRESERVE REGISTERS! */
173 void call_c_function_in_context(x86_thread_state64_t *context,
174                                 void *function,
175                                 int nargs,
176                                 ...)
177 {
178     va_list ap;
179     int i;
180     u64 *stack_pointer;
181
182     /* Set up to restore stack on exit. */
183     open_stack_allocation(context);
184
185     /* Have to keep stack 16byte aligned on x86/darwin. */
186     for (i = (1 & -nargs); i; i--) {
187         push_context(0, context);
188     }
189
190     context->rsp = ((u64)context->rsp) - nargs * 8;
191     stack_pointer = (u64 *)context->rsp;
192
193     va_start(ap, nargs);
194     if (nargs > 0) context->rdi = va_arg(ap, u64);
195     if (nargs > 1) context->rsi = va_arg(ap, u64);
196     if (nargs > 2) context->rdx = va_arg(ap, u64);
197     if (nargs > 3) context->rcx = va_arg(ap, u64);
198     if (nargs > 4) context->r8 = va_arg(ap, u64);
199     if (nargs > 5) context->r9 = va_arg(ap, u64);
200     for (i = 6; i < nargs; i++) {
201         stack_pointer[i] = va_arg(ap, u64);
202     }
203     va_end(ap);
204
205     push_context(context->rip, context);
206     context->rip = (u64) function;
207 }
208
209 void signal_emulation_wrapper(x86_thread_state64_t *thread_state,
210                               x86_float_state64_t *float_state,
211                               int signal,
212                               siginfo_t *siginfo,
213                               void (*handler)(int, siginfo_t *, void *))
214 {
215
216     /* CLH: FIXME **NOTE: HACK ALERT!** Ideally, we would allocate
217      * context and regs on the stack as local variables, but this
218      * causes problems for the lisp debugger. When it walks the stack
219      * for a back trace, it sees the 1) address of the local variable
220      * on the stack and thinks that is a frame pointer to a lisp
221      * frame, and, 2) the address of the sap that we alloc'ed in
222      * dynamic space and thinks that is a return address, so it,
223      * heuristicly (and wrongly), chooses that this should be
224      * interpreted as a lisp frame instead of as a C frame.
225      * We can work around this in this case by os_validating the
226      * context (and regs just for symmetry).
227      */
228
229     darwin_ucontext  *context;
230     darwin_mcontext *regs;
231
232     context = (darwin_ucontext *) os_validate(0, sizeof(darwin_ucontext));
233     regs = (darwin_mcontext*) os_validate(0, sizeof(darwin_mcontext));
234     context->uc_mcontext = regs;
235
236     /* when BSD signals are fired, they mask they signals in sa_mask
237        which always seem to be the blockable_sigset, for us, so we
238        need to:
239        1) save the current sigmask
240        2) block blockable signals
241        3) call the signal handler
242        4) restore the sigmask */
243
244     build_fake_signal_context(context, thread_state, float_state);
245
246     block_blockable_signals(0, 0);
247
248     handler(signal, siginfo, context);
249
250     update_thread_state_from_context(thread_state, float_state, context);
251
252     os_invalidate((os_vm_address_t)context, sizeof(darwin_ucontext));
253     os_invalidate((os_vm_address_t)regs, sizeof(darwin_mcontext));
254
255     /* Trap to restore the signal context. */
256     asm volatile ("mov %0, %%rax; mov %1, %%rbx; .quad 0xffffffffffff0b0f"
257                   : : "r" (thread_state), "r" (float_state));
258 }
259
260 #if defined DUMP_CONTEXT
261 void dump_context(x86_thread_state64_t *context)
262 {
263     int i;
264     u64 *stack_pointer;
265
266     printf("rax: %08lx  rcx: %08lx  rdx: %08lx  rbx: %08lx\n",
267            context->rax, context->rcx, context->rdx, context->rbx);
268     printf("rsp: %08lx  rbp: %08lx  rsi: %08lx  rdi: %08lx\n",
269            context->rsp, context->rbp, context->rsi, context->rdi);
270     printf("rip: %08lx  eflags: %08lx\n",
271            context->rip, context->rflags);
272     printf("cs: %04hx  ds: %04hx  es: %04hx  "
273            "ss: %04hx  fs: %04hx  gs: %04hx\n",
274            context->cs, context->ds, context->rs,
275            context->ss, context->fs, context->gs);
276
277     stack_pointer = (u64 *)context->rsp;
278     for (i = 0; i < 48; i+=4) {
279         printf("%08x:  %08x %08x %08x %08x\n",
280                context->rsp + (i * 4),
281                stack_pointer[i],
282                stack_pointer[i+1],
283                stack_pointer[i+2],
284                stack_pointer[i+3]);
285     }
286 }
287 #endif
288
289 void
290 control_stack_exhausted_handler(int signal, siginfo_t *siginfo,
291                                 os_context_t *context) {
292     unblock_signals_in_context_and_maybe_warn(context);
293     arrange_return_to_lisp_function
294         (context, StaticSymbolFunction(CONTROL_STACK_EXHAUSTED_ERROR));
295 }
296
297 void
298 undefined_alien_handler(int signal, siginfo_t *siginfo, os_context_t *context) {
299     arrange_return_to_lisp_function
300         (context, StaticSymbolFunction(UNDEFINED_ALIEN_VARIABLE_ERROR));
301 }
302
303 kern_return_t
304 catch_exception_raise(mach_port_t exception_port,
305                       mach_port_t thread,
306                       mach_port_t task,
307                       exception_type_t exception,
308                       exception_data_t code_vector,
309                       mach_msg_type_number_t code_count)
310 {
311     kern_return_t ret;
312     int signal;
313     siginfo_t* siginfo;
314
315 #ifdef LISP_FEATURE_SB_THREAD
316     thread_mutex_lock(&mach_exception_lock);
317 #endif
318
319     x86_thread_state64_t thread_state;
320     mach_msg_type_number_t thread_state_count = x86_THREAD_STATE64_COUNT;
321
322     x86_float_state64_t float_state;
323     mach_msg_type_number_t float_state_count = x86_FLOAT_STATE64_COUNT;
324
325     x86_exception_state64_t exception_state;
326     mach_msg_type_number_t exception_state_count = x86_EXCEPTION_STATE64_COUNT;
327
328     x86_thread_state64_t backup_thread_state;
329     x86_thread_state64_t *target_thread_state;
330     x86_float_state64_t *target_float_state;
331
332     os_vm_address_t addr;
333
334     struct thread *th = (struct thread*) exception_port;
335
336     FSHOW((stderr,"/entering catch_exception_raise with exception: %d\n", exception));
337
338     switch (exception) {
339
340     case EXC_BAD_ACCESS:
341         signal = SIGBUS;
342         ret = thread_get_state(thread,
343                                x86_THREAD_STATE64,
344                                (thread_state_t)&thread_state,
345                                &thread_state_count);
346         ret = thread_get_state(thread,
347                                x86_FLOAT_STATE64,
348                                (thread_state_t)&float_state,
349                                &float_state_count);
350         ret = thread_get_state(thread,
351                                x86_EXCEPTION_STATE64,
352                                (thread_state_t)&exception_state,
353                                &exception_state_count);
354         addr = (void*)exception_state.faultvaddr;
355
356
357         /* note the os_context hackery here.  When the signal handler returns,
358          * it won't go back to what it was doing ... */
359         if(addr >= CONTROL_STACK_GUARD_PAGE(th) &&
360            addr < CONTROL_STACK_GUARD_PAGE(th) + os_vm_page_size) {
361             /* We hit the end of the control stack: disable guard page
362              * protection so the error handler has some headroom, protect the
363              * previous page so that we can catch returns from the guard page
364              * and restore it. */
365             lower_thread_control_stack_guard_page(th);
366
367             backup_thread_state = thread_state;
368             open_stack_allocation(&thread_state);
369             /* Reserve a 256 byte zone for signal handlers
370              * to use on the interrupted thread stack.
371              */
372             stack_allocate(&thread_state, 256);
373
374             /* Save thread state */
375             target_thread_state =
376                 stack_allocate(&thread_state, sizeof(*target_thread_state));
377             (*target_thread_state) = backup_thread_state;
378
379             /* Save float state */
380             target_float_state =
381                 stack_allocate(&thread_state, sizeof(*target_float_state));
382             (*target_float_state) = float_state;
383
384             /* Set up siginfo */
385             siginfo = stack_allocate(&thread_state, sizeof(*siginfo));
386             /* what do we need to put in our fake siginfo?  It looks like
387              * the x86 code only uses si_signo and si_adrr. */
388             siginfo->si_signo = signal;
389             siginfo->si_addr = (void*)exception_state.faultvaddr;
390
391             call_c_function_in_context(&thread_state,
392                                        signal_emulation_wrapper,
393                                        5,
394                                        target_thread_state,
395                                        target_float_state,
396                                        signal,
397                                        siginfo,
398                                        control_stack_exhausted_handler);
399         }
400         else if(addr >= CONTROL_STACK_RETURN_GUARD_PAGE(th) &&
401                 addr < CONTROL_STACK_RETURN_GUARD_PAGE(th) + os_vm_page_size) {
402             /* We're returning from the guard page: reprotect it, and
403              * unprotect this one. This works even if we somehow missed
404              * the return-guard-page, and hit it on our way to new
405              * exhaustion instead. */
406             reset_thread_control_stack_guard_page(th);
407         }
408         else if (addr >= undefined_alien_address &&
409                  addr < undefined_alien_address + os_vm_page_size) {
410             backup_thread_state = thread_state;
411             open_stack_allocation(&thread_state);
412             stack_allocate(&thread_state, 256);
413
414             /* Save thread state */
415             target_thread_state =
416                 stack_allocate(&thread_state, sizeof(*target_thread_state));
417             (*target_thread_state) = backup_thread_state;
418
419             target_float_state =
420                 stack_allocate(&thread_state, sizeof(*target_float_state));
421             (*target_float_state) = float_state;
422
423             /* Set up siginfo */
424             siginfo = stack_allocate(&thread_state, sizeof(*siginfo));
425             /* what do we need to put in our fake siginfo?  It looks like
426              * the x86 code only uses si_signo and si_adrr. */
427             siginfo->si_signo = signal;
428             siginfo->si_addr = (void*)exception_state.faultvaddr;
429
430             call_c_function_in_context(&thread_state,
431                                        signal_emulation_wrapper,
432                                        5,
433                                        target_thread_state,
434                                        target_float_state,
435                                        signal,
436                                        siginfo,
437                                        undefined_alien_handler);
438         } else {
439
440             backup_thread_state = thread_state;
441             open_stack_allocation(&thread_state);
442             stack_allocate(&thread_state, 256);
443
444             /* Save thread state */
445             target_thread_state =
446                 stack_allocate(&thread_state, sizeof(*target_thread_state));
447             (*target_thread_state) = backup_thread_state;
448
449             target_float_state =
450                 stack_allocate(&thread_state, sizeof(*target_float_state));
451             (*target_float_state) = float_state;
452
453             /* Set up siginfo */
454             siginfo = stack_allocate(&thread_state, sizeof(*siginfo));
455             /* what do we need to put in our fake siginfo?  It looks like
456              * the x86 code only uses si_signo and si_adrr. */
457             siginfo->si_signo = signal;
458             siginfo->si_addr = (void*)exception_state.faultvaddr;
459
460             call_c_function_in_context(&thread_state,
461                                        signal_emulation_wrapper,
462                                        5,
463                                        target_thread_state,
464                                        target_float_state,
465                                        signal,
466                                        siginfo,
467                                        memory_fault_handler);
468         }
469         ret = thread_set_state(thread,
470                                x86_THREAD_STATE64,
471                                (thread_state_t)&thread_state,
472                                thread_state_count);
473
474         ret = thread_set_state(thread,
475                                x86_FLOAT_STATE64,
476                                (thread_state_t)&float_state,
477                                float_state_count);
478 #ifdef LISP_FEATURE_SB_THREAD
479         thread_mutex_unlock(&mach_exception_lock);
480 #endif
481         return KERN_SUCCESS;
482
483     case EXC_BAD_INSTRUCTION:
484
485         ret = thread_get_state(thread,
486                                x86_THREAD_STATE64,
487                                (thread_state_t)&thread_state,
488                                &thread_state_count);
489         ret = thread_get_state(thread,
490                                x86_FLOAT_STATE64,
491                                (thread_state_t)&float_state,
492                                &float_state_count);
493         ret = thread_get_state(thread,
494                                x86_EXCEPTION_STATE64,
495                                (thread_state_t)&exception_state,
496                                &exception_state_count);
497         if (0xffffffffffff0b0f == *((u64 *)thread_state.rip)) {
498             /* fake sigreturn. */
499
500             /* When we get here, thread_state.rax is a pointer to a
501              * thread_state to restore. */
502             /* thread_state = *((thread_state_t *)thread_state.rax); */
503
504             ret = thread_set_state(thread,
505                                    x86_THREAD_STATE64,
506                                    (thread_state_t) thread_state.rax,
507                                    /* &thread_state, */
508                                    thread_state_count);
509
510             ret = thread_set_state(thread,
511                                    x86_FLOAT_STATE64,
512                                    (thread_state_t) thread_state.rbx,
513                                    /* &thread_state, */
514                                    float_state_count);
515         } else {
516
517             backup_thread_state = thread_state;
518             open_stack_allocation(&thread_state);
519             stack_allocate(&thread_state, 256);
520
521             /* Save thread state */
522             target_thread_state =
523                 stack_allocate(&thread_state, sizeof(*target_thread_state));
524             (*target_thread_state) = backup_thread_state;
525
526             target_float_state =
527                 stack_allocate(&thread_state, sizeof(*target_float_state));
528             (*target_float_state) = float_state;
529
530             /* Set up siginfo */
531             siginfo = stack_allocate(&thread_state, sizeof(*siginfo));
532             /* what do we need to put in our fake siginfo?  It looks like
533              * the x86 code only uses si_signo and si_adrr. */
534             if (*((unsigned short *)target_thread_state->rip) == 0x0b0f) {
535                 signal = SIGTRAP;
536                 siginfo->si_signo = signal;
537                 siginfo->si_addr = (void*)exception_state.faultvaddr;
538                 target_thread_state->rip += 2;
539                 call_c_function_in_context(&thread_state,
540                                            signal_emulation_wrapper,
541                                            5,
542                                            target_thread_state,
543                                            target_float_state,
544                                            signal,
545                                            siginfo,
546                                            sigtrap_handler);
547             } else {
548                 signal = SIGILL;
549                 siginfo->si_signo = signal;
550                 siginfo->si_addr = (void*)exception_state.faultvaddr;
551
552                 call_c_function_in_context(&thread_state,
553                                            signal_emulation_wrapper,
554                                            5,
555                                            target_thread_state,
556                                            target_float_state,
557                                            signal,
558                                            siginfo,
559                                            sigill_handler);
560             }
561             ret = thread_set_state(thread,
562                                    x86_THREAD_STATE64,
563                                    (thread_state_t)&thread_state,
564                                    thread_state_count);
565             ret = thread_set_state(thread,
566                                    x86_FLOAT_STATE64,
567                                    (thread_state_t)&float_state,
568                                    float_state_count);
569         }
570 #ifdef LISP_FEATURE_SB_THREAD
571         thread_mutex_unlock(&mach_exception_lock);
572 #endif
573         return KERN_SUCCESS;
574
575     default:
576 #ifdef LISP_FEATURE_SB_THREAD
577         thread_mutex_unlock(&mach_exception_lock);
578 #endif
579         return KERN_INVALID_RIGHT;
580     }
581 }
582
583 void *
584 mach_exception_handler(void *port)
585 {
586   mach_msg_server(exc_server, 2048, (mach_port_t) port, 0);
587   /* mach_msg_server should never return, but it should dispatch mach
588    * exceptions to our catch_exception_raise function
589    */
590   lose("mach_msg_server returned");
591 }
592
593 /* Sets up the thread that will listen for mach exceptions. note that
594    the exception handlers will be run on this thread. This is
595    different from the BSD-style signal handling situation in which the
596    signal handlers run in the relevant thread directly. */
597
598 mach_port_t mach_exception_handler_port_set = MACH_PORT_NULL;
599
600 pthread_t
601 setup_mach_exception_handling_thread()
602 {
603     kern_return_t ret;
604     pthread_t mach_exception_handling_thread = NULL;
605     pthread_attr_t attr;
606
607     /* allocate a mach_port for this process */
608     ret = mach_port_allocate(mach_task_self(),
609                              MACH_PORT_RIGHT_PORT_SET,
610                              &mach_exception_handler_port_set);
611
612     /* create the thread that will receive the mach exceptions */
613
614     FSHOW((stderr, "Creating mach_exception_handler thread!\n"));
615
616     pthread_attr_init(&attr);
617     pthread_create(&mach_exception_handling_thread,
618                    &attr,
619                    mach_exception_handler,
620                    (void*) mach_exception_handler_port_set);
621     pthread_attr_destroy(&attr);
622
623     return mach_exception_handling_thread;
624 }
625
626 /* tell the kernel that we want EXC_BAD_ACCESS exceptions sent to the
627    exception port (which is being listened to do by the mach
628    exception handling thread). */
629 kern_return_t
630 mach_thread_init(mach_port_t thread_exception_port)
631 {
632     kern_return_t ret;
633     /* allocate a named port for the thread */
634
635     FSHOW((stderr, "Allocating mach port %x\n", thread_exception_port));
636
637     ret = mach_port_allocate_name(mach_task_self(),
638                                   MACH_PORT_RIGHT_RECEIVE,
639                                   thread_exception_port);
640     if (ret) {
641         lose("mach_port_allocate_name failed with return_code %d\n", ret);
642     }
643
644     /* establish the right for the thread_exception_port to send messages */
645     ret = mach_port_insert_right(mach_task_self(),
646                                  thread_exception_port,
647                                  thread_exception_port,
648                                  MACH_MSG_TYPE_MAKE_SEND);
649     if (ret) {
650         lose("mach_port_insert_right failed with return_code %d\n", ret);
651     }
652
653     ret = thread_set_exception_ports(mach_thread_self(),
654                                      EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
655                                      thread_exception_port,
656                                      EXCEPTION_DEFAULT,
657                                      THREAD_STATE_NONE);
658     if (ret) {
659         lose("thread_set_exception_port failed with return_code %d\n", ret);
660     }
661
662     ret = mach_port_move_member(mach_task_self(),
663                                 thread_exception_port,
664                                 mach_exception_handler_port_set);
665     if (ret) {
666         lose("mach_port_ failed with return_code %d\n", ret);
667     }
668
669     return ret;
670 }
671
672 void
673 setup_mach_exceptions() {
674     setup_mach_exception_handling_thread();
675     mach_thread_init(THREAD_STRUCT_TO_EXCEPTION_PORT(all_threads));
676 }
677
678 pid_t
679 mach_fork() {
680     pid_t pid = fork();
681     if (pid == 0) {
682         setup_mach_exceptions();
683         return pid;
684     } else {
685         return pid;
686     }
687 }
688
689 #endif