#include <sched.h>
#include <signal.h>
#include <stddef.h>
+#include <errno.h>
#ifndef CLONE_PARENT /* lameass glibc 2.2 doesn't define this */
#define CLONE_PARENT 0x00008000 /* even though the manpage documents it */
#endif
int dynamic_values_bytes=4096*sizeof(lispobj); /* same for all threads */
struct thread *all_threads;
lispobj all_threads_lock;
+volatile int countdown_to_gc;
extern struct interrupt_data * global_interrupt_data;
void get_spinlock(lispobj *word,int value);
* to ensure that we don't have >1 thread with pid=0 on the list at once
*/
protect_control_stack_guard_page(th->pid,1);
- all_threads_lock=0;
+ release_spinlock(&all_threads_lock);
th->pid=kid_pid; /* child will not start until this is set */
#ifndef LISP_FEATURE_SB_THREAD
new_thread_trampoline(all_threads); /* call_into_lisp */
gc_alloc_update_page_tables(0, &th->alloc_region);
#endif
get_spinlock(&all_threads_lock,th->pid);
+ if(countdown_to_gc>0) countdown_to_gc--;
if(th==all_threads)
all_threads=th->next;
else {
while(th1->next!=th) th1=th1->next;
th1->next=th->next; /* unlink */
}
- all_threads_lock=0;
+ release_spinlock(&all_threads_lock);
if(th && th->tls_cookie>=0) arch_os_thread_cleanup(th);
os_invalidate((os_vm_address_t) th->control_stack_start,
((sizeof (lispobj))
return 0;
}
+/* These are not needed unless #+SB-THREAD, and since sigwaitinfo()
+ * doesn't seem to be easily available everywhere (OpenBSD...) it's
+ * more trouble than it's worth to compile it when not needed. */
+#if defined LISP_FEATURE_SB_THREAD
void block_sigcont(void)
{
/* don't allow ourselves to receive SIGCONT while we're in the
*/
sigset_t newset;
sigemptyset(&newset);
- sigaddset(&newset,SIGCONT);
+ sigaddset(&newset,SIG_DEQUEUE);
sigprocmask(SIG_BLOCK, &newset, 0);
}
-/* This is not needed unless #+SB-THREAD, and since sigwaitinfo()
- * doesn't seem to be easily available everywhere (OpenBSD...) it's
- * more trouble than it's worth to compile it when not needed. */
-#if defined LISP_FEATURE_SB_THREAD
void unblock_sigcont_and_sleep(void)
{
sigset_t set;
sigemptyset(&set);
- sigaddset(&set,SIGCONT);
- sigwaitinfo(&set,0);
+ sigaddset(&set,SIG_DEQUEUE);
+ do {
+ errno=0;
+ sigwaitinfo(&set,0);
+ }while(errno==EINTR);
sigprocmask(SIG_UNBLOCK,&set,0);
}
+
+int interrupt_thread(pid_t pid, lispobj function)
+{
+ union sigval sigval;
+ sigval.sival_int=function;
+
+ return sigqueue(pid, SIG_INTERRUPT_THREAD, sigval);
+}
+
+/* stopping the world is a two-stage process. From this thread we signal
+ * all the others with SIG_STOP_FOR_GC. The handler for this thread does
+ * the usual pseudo-atomic checks (we don't want to stop a thread while
+ * it's in the middle of allocation) then kills _itself_ with SIGSTOP.
+ * At any given time, countdown_to_gc should reflect the number of threads
+ * signalled but which haven't yet come to rest
+ */
+
+void gc_stop_the_world()
+{
+ /* stop all other threads by sending them SIG_STOP_FOR_GC */
+ struct thread *p,*th=arch_os_get_current_thread();
+ struct thread *tail=0;
+ int finished=0;
+ do {
+ get_spinlock(&all_threads_lock,th->pid);
+ if(tail!=all_threads) {
+ /* new threads always get consed onto the front of all_threads,
+ * and may be created by any thread that we haven't signalled
+ * yet or hasn't received our signal and stopped yet. So, check
+ * for them on each time around */
+ for(p=all_threads;p!=tail;p=p->next) {
+ if(p==th) continue;
+ countdown_to_gc++;
+ kill(p->pid,SIG_STOP_FOR_GC);
+ }
+ tail=all_threads;
+ } else {
+ finished=(countdown_to_gc==0);
+ }
+ release_spinlock(&all_threads_lock);
+ sched_yield();
+ } while(!finished);
+}
+
+void gc_start_the_world()
+{
+ struct thread *p,*th=arch_os_get_current_thread();
+ get_spinlock(&all_threads_lock,th->pid);
+ for(p=all_threads;p;p=p->next) {
+ if(p==th) continue;
+ kill(p->pid,SIGCONT);
+ }
+ release_spinlock(&all_threads_lock);
+}
#endif