void scheme_done_with_process_id(int pid, int is_group) { Child_Status *st; int keep_unused = 1; /* assume that any process can be in a new group */ mzrt_mutex_lock(child_wait_lock); /* protects unused_pid_statuses */ mzrt_mutex_lock(child_status_lock); for (st = child_statuses; st; st = st->next) { if (st->pid == pid) { if (!st->done) { if (keep_unused) { st->next_unused = unused_pid_statuses; unused_pid_statuses = st; if (st->signal_fd) remove_group_signal_fd(st->signal_fd); } else st->unneeded = 1; st->signal_fd = NULL; } break; } } if (st && (keep_unused || st->done)) { /* remove it from normal list: */ raw_get_child_status(pid, NULL, 0, 1, !st->done); } mzrt_mutex_unlock(child_status_lock); mzrt_mutex_unlock(child_wait_lock); }
void scheme_done_with_process_id(int pid, int is_group) { Child_Status *st; mzrt_mutex_lock(child_wait_lock); /* protects child_group_statuses */ mzrt_mutex_lock(child_status_lock); for (st = child_statuses; st; st = st->next) { if (st->pid == pid) { if (!st->done) { if (is_group) { st->next_group = child_group_statuses; child_group_statuses = st; if (st->signal_fd) remove_group_signal_fd(st->signal_fd); } else st->unneeded = 1; st->signal_fd = NULL; } break; } } if (st && (is_group || st->done)) { /* remove it from normal list: */ raw_get_child_status(pid, NULL, 0, 1, !st->done); } mzrt_mutex_unlock(child_status_lock); mzrt_mutex_unlock(child_wait_lock); }
int scheme_places_register_child(int pid, int is_group, void *signal_fd, int *status) { int found = 0; mzrt_mutex_lock(child_status_lock); /* The child may have terminated already: */ found = raw_get_child_status(pid, status, 0, 0, 0); if (!found) { /* Create a record for the child: */ Child_Status *st; st = malloc(sizeof(Child_Status)); st->pid = pid; st->signal_fd = signal_fd; st->status = 0; st->unneeded = 0; st->done = 0; st->is_group = is_group; st->next = child_statuses; child_statuses = st; st->next_unused = NULL; if (is_group) add_group_signal_fd(signal_fd); } mzrt_mutex_unlock(child_status_lock); return found; }
static void add_child_status(int pid, int status) { Child_Status *st; /* Search for existing record, which will have a signal_fd: */ mzrt_mutex_lock(child_status_lock); for (st = child_statuses; st; st = st->next) { if (st->pid == pid) break; } if (!st) { /* must have terminated before it was registered (and since we detected it, it must not be a group) */ st = malloc(sizeof(Child_Status)); st->pid = pid; st->signal_fd = NULL; st->next = child_statuses; child_statuses = st; st->next_unused = NULL; st->unneeded = 0; st->is_group = 0; } st->status = status; st->done = 1; if (st->signal_fd && st->is_group) remove_group_signal_fd(st->signal_fd); mzrt_mutex_unlock(child_status_lock); if (st->signal_fd) scheme_signal_received_at(st->signal_fd); if (st->unneeded) (void)scheme_get_child_status(st->pid, 0, NULL); }
static Scheme_Object *scheme_place_async_try_recv(Scheme_Place_Async_Channel *ch) { Scheme_Object *msg = NULL; void *msg_memory = NULL; mzrt_mutex_lock(ch->lock); { void *signaldescr; signaldescr = scheme_get_signal_handle(); ch->wakeup_signal = signaldescr; if (ch->count > 0) { /* GET MSG */ msg = ch->msgs[ch->out]; msg_memory = ch->msg_memory[ch->out]; ch->msgs[ch->out] = NULL; ch->msg_memory[ch->out] = NULL; --ch->count; ch->out = (++ch->out % ch->size); } } mzrt_mutex_unlock(ch->lock); if (msg) { return scheme_places_deserialize(msg, msg_memory); } return msg; }
inline static void mark_threads(NewGC *gc, int owner) { GC_Thread_Info *work; Mark2_Proc thread_mark = gc->mark_table[btc_redirect_thread]; for(work = gc->thread_infos; work; work = work->next) { if (work->owner == owner) { if (((Scheme_Object *)work->thread)->type == scheme_thread_type) { /* thread */ if (((Scheme_Thread *)work->thread)->running) { thread_mark(work->thread, gc); if (work->thread == scheme_current_thread) { GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL); } } } else { /* place */ #ifdef MZ_USE_PLACES /* add in the memory used by the place's GC */ intptr_t sz; Scheme_Place_Object *place_obj = ((Scheme_Place *)work->thread)->place_obj; if (place_obj) { mzrt_mutex_lock(place_obj->lock); sz = place_obj->memory_use; mzrt_mutex_unlock(place_obj->lock); account_memory(gc, owner, gcBYTES_TO_WORDS(sz), 0); } #endif } } } }
static void check_master_btc_mark(NewGC *gc, mpage *page) { if (!gc->master_page_btc_mark_checked) { int pause = 1; RELEASE_PAGE_LOCK(1, page); while (pause) { mzrt_mutex_lock(master_btc_lock); if (master_btc_lock_count && (gc->new_btc_mark != MASTERGC->new_btc_mark)) { pause = 1; master_btc_lock_waiters++; } else { pause = 0; MASTERGC->new_btc_mark = gc->new_btc_mark; master_btc_lock_count++; } mzrt_mutex_unlock(master_btc_lock); if (pause) mzrt_sema_wait(master_btc_sema); } TAKE_PAGE_LOCK(1, page); gc->master_page_btc_mark_checked = 1; } }
static Scheme_Object *scheme_place_async_recv(Scheme_Place_Async_Channel *ch, void **msg_memory) { Scheme_Object *msg = NULL; while(1) { mzrt_mutex_lock(ch->lock); { if (ch->count > 0) { /* GET MSG */ msg = ch->msgs[ch->out]; *msg_memory = ch->msg_memory[ch->out]; ch->msgs[ch->out] = NULL; ch->msg_memory[ch->out] = NULL; --ch->count; ch->out = (++ch->out % ch->size); } } mzrt_mutex_unlock(ch->lock); if(msg) break; else { void *signaldescr; signaldescr = scheme_get_signal_handle(); ch->wakeup_signal = signaldescr; scheme_thread_block(0); scheme_block_until((Scheme_Ready_Fun) scheme_place_async_ch_ready, NULL, (Scheme_Object *) ch, 0); } } return msg; }
static int scheme_place_async_ch_ready(Scheme_Place_Async_Channel *ch) { int ready = 0; mzrt_mutex_lock(ch->lock); { void *signaldescr; signaldescr = scheme_get_signal_handle(); ch->wakeup_signal = signaldescr; if (ch->count > 0) ready = 1; } mzrt_mutex_unlock(ch->lock); return ready; }
static void release_master_btc_mark(NewGC *gc) { if (gc->master_page_btc_mark_checked) { /* release the lock on the master's new_btc_mark value */ mzrt_mutex_lock(master_btc_lock); --master_btc_lock_count; if (!master_btc_lock_count && master_btc_lock_waiters) { --master_btc_lock_waiters; mzrt_sema_post(master_btc_sema); } mzrt_mutex_unlock(master_btc_lock); } }
static void scheme_place_async_send(Scheme_Place_Async_Channel *ch, Scheme_Object *uo) { void *msg_memory = NULL; Scheme_Object *o; int cnt; o = scheme_places_serialize(uo, &msg_memory); mzrt_mutex_lock(ch->lock); { cnt = ch->count; if (ch->count == ch->size) { /* GROW QUEUE */ Scheme_Object **new_msgs; void **new_msg_memory; new_msgs = GC_master_malloc(sizeof(Scheme_Object*) * ch->size * 2); new_msg_memory = GC_master_malloc(sizeof(void*) * ch->size * 2); if (ch->out < ch->in) { memcpy(new_msgs, ch->msgs + ch->out, sizeof(Scheme_Object *) * (ch->in - ch->out)); memcpy(new_msg_memory, ch->msg_memory + ch->out, sizeof(void*) * (ch->in - ch->out)); } else { int s1 = (ch->size - ch->out); memcpy(new_msgs, ch->msgs + ch->out, sizeof(Scheme_Object *) * s1); memcpy(new_msgs + s1, ch->msgs, sizeof(Scheme_Object *) * ch->in); memcpy(new_msg_memory, ch->msg_memory + ch->out, sizeof(void*) * s1); memcpy(new_msg_memory + s1, ch->msg_memory, sizeof(void*) * ch->in); } ch->msgs = new_msgs; ch->msg_memory = new_msg_memory; ch->in = ch->size; ch->out = 0; ch->size *= 2; } ch->msgs[ch->in] = o; ch->msg_memory[ch->in] = msg_memory; ++ch->count; ch->in = (++ch->in % ch->size); } mzrt_mutex_unlock(ch->lock); if (!cnt && ch->wakeup_signal) { /*wake up possibly sleeping receiver */ scheme_signal_received_at(ch->wakeup_signal); } }
int scheme_get_child_status(int pid, int is_group, int *status) { int found = 0; if (is_group) { /* need to specifically try the pid, since we don't wait on other process groups in the background thread */ pid_t pid2; int status; do { pid2 = waitpid((pid_t)pid, &status, WNOHANG); } while ((pid2 == -1) && (errno == EINTR)); if (pid2 > 0) add_child_status(pid, status); } mzrt_mutex_lock(child_status_lock); found = raw_get_child_status(pid, status, 1, 1, 1); mzrt_mutex_unlock(child_status_lock); /* printf("scheme_get_child_status found %i pid %i status %i\n", found, pid, *status); */ return found; }
int scheme_get_child_status(int pid, int is_group, int *status) { int found = 0; /* Check specific pid, in case the child has its own group (either given by Racket or given to itself): */ { pid_t pid2; int status; do { pid2 = waitpid((pid_t)pid, &status, WNOHANG); } while ((pid2 == -1) && (errno == EINTR)); if (pid2 > 0) add_child_status(pid, status); } mzrt_mutex_lock(child_status_lock); found = raw_get_child_status(pid, status, 1, 1, 1); mzrt_mutex_unlock(child_status_lock); /* printf("scheme_get_child_status found %i pid %i status %i\n", found, pid, *status); */ return found; }
Scheme_Type scheme_make_type(const char *name) { Scheme_Type newtype; if (!type_names) init_type_arrays(); #ifdef MZ_USE_PLACES mzrt_mutex_lock(type_array_mutex); #endif if (maxtype == allocmax) { /* Expand arrays */ void *naya; intptr_t n; allocmax += 20; naya = malloc(allocmax * sizeof(char *)); memcpy(naya, type_names, maxtype * sizeof(char *)); free(type_names); type_names = (char **)naya; naya = malloc(n = allocmax * sizeof(Scheme_Type_Reader)); memset((char *)naya, 0, n); memcpy(naya, scheme_type_readers, maxtype * sizeof(Scheme_Type_Reader)); free(scheme_type_readers); scheme_type_readers = (Scheme_Type_Reader *)naya; naya = malloc(n = allocmax * sizeof(Scheme_Type_Writer)); memset((char *)naya, 0, n); memcpy(naya, scheme_type_writers, maxtype * sizeof(Scheme_Type_Writer)); free(scheme_type_writers); scheme_type_writers = (Scheme_Type_Writer *)naya; naya = malloc(n = allocmax * sizeof(Scheme_Equal_Proc)); memset((char *)naya, 0, n); memcpy(naya, scheme_type_equals, maxtype * sizeof(Scheme_Equal_Proc)); free(scheme_type_equals); scheme_type_equals = (Scheme_Equal_Proc *)naya; naya = malloc(n = allocmax * sizeof(Scheme_Primary_Hash_Proc)); memset((char *)naya, 0, n); memcpy(naya, scheme_type_hash1s, maxtype * sizeof(Scheme_Primary_Hash_Proc)); free(scheme_type_hash1s); scheme_type_hash1s = (Scheme_Primary_Hash_Proc *)naya; naya = malloc(n = allocmax * sizeof(Scheme_Secondary_Hash_Proc)); memset((char *)naya, 0, n); memcpy(naya, scheme_type_hash2s, maxtype * sizeof(Scheme_Secondary_Hash_Proc)); free(scheme_type_hash2s); scheme_type_hash2s = (Scheme_Secondary_Hash_Proc *)naya; #ifdef MEMORY_COUNTING_ON scheme_type_table_count += 20 * (sizeof(Scheme_Type_Reader) + sizeof(Scheme_Type_Writer)); scheme_misc_count += (20 * sizeof(char *)); #endif } { char *tn; int len; len = strlen(name) + 1; tn = (char *)malloc(len); memcpy(tn, name, len); type_names[maxtype] = tn; } newtype = maxtype; maxtype++; #ifdef MZ_USE_PLACES mzrt_mutex_unlock(type_array_mutex); #endif return newtype; }
static void *mz_proc_thread_signal_worker(void *data) { int status; int pid, check_pid, is_group; sigset_t set; Child_Status *unused_status, *prev_unused, *next; sigemptyset(&set); sigaddset(&set, SIGCHLD); while (1) { int rc; int signalid; do { rc = sigwait(&set, &signalid); if (rc == -1) { if (errno != EINTR) { fprintf(stderr, "unexpected error from sigwait(): %d\n", errno); } } } while (rc == -1 && errno == EINTR); mzrt_mutex_lock(child_status_lock); do_group_signal_fds(); mzrt_mutex_unlock(child_status_lock); mzrt_mutex_lock(child_wait_lock); unused_status = unused_pid_statuses; prev_unused = NULL; do { if (unused_status) { /* See unused_pid_statuses above */ check_pid = unused_status->pid; is_group = 1; } else { /* We wait only on processes in the same group as Racket, because detecting the termination of a group's main process disables our ability to terminate all processes in the group. */ check_pid = 0; /* => processes in the same group as Racket */ is_group = 0; } pid = waitpid(check_pid, &status, WNOHANG); if (pid == -1) { if (errno == EINTR) { /* try again */ pid = 1; } else if (!is_group && (errno == ECHILD)) { /* no more to check */ } else { fprintf(stderr, "unexpected error from waitpid(%d[%d]): %d\n", check_pid, is_group, errno); if (is_group) { prev_unused = unused_status; unused_status = unused_status->next; } } } else if (pid > 0) { /* printf("SIGCHILD pid %i with status %i %i\n", pid, status, WEXITSTATUS(status)); */ if (is_group) { next = unused_status->next; if (prev_unused) prev_unused->next_unused = next; else unused_pid_statuses = next; free(unused_status); unused_status = next; } else add_child_status(pid, status); } else { if (is_group) { prev_unused = unused_status; unused_status = unused_status->next; } } } while ((pid > 0) || is_group); mzrt_mutex_unlock(child_wait_lock); } return NULL; }
static void *mz_proc_thread_signal_worker(void *data) { int status; int pid, check_pid, is_group; sigset_t set; Child_Status *group_status, *prev_group, *next; sigemptyset(&set); sigaddset(&set, SIGCHLD); while (1) { int rc; int signalid; do { rc = sigwait(&set, &signalid); if (rc == -1) { if (errno != EINTR) { fprintf(stderr, "unexpected error from sigwait(): %d\n", errno); } } } while (rc == -1 && errno == EINTR); mzrt_mutex_lock(child_status_lock); do_group_signal_fds(); mzrt_mutex_unlock(child_status_lock); mzrt_mutex_lock(child_wait_lock); group_status = child_group_statuses; prev_group = NULL; do { if (group_status) { check_pid = group_status->pid; is_group = 1; } else { check_pid = 0; /* => processes in the same group as Racket */ is_group = 0; } pid = waitpid(check_pid, &status, WNOHANG); if (pid == -1) { if (errno == EINTR) { /* try again */ pid = 1; } else if (!is_group && (errno == ECHILD)) { /* no more to check */ } else { fprintf(stderr, "unexpected error from waitpid(%d[%d]): %d\n", check_pid, is_group, errno); if (is_group) { prev_group = group_status; group_status = group_status->next; } } } else if (pid > 0) { /* printf("SIGCHILD pid %i with status %i %i\n", pid, status, WEXITSTATUS(status)); */ if (is_group) { next = group_status->next; if (prev_group) prev_group->next_group = next; else child_group_statuses = next; free(group_status); group_status = next; } else add_child_status(pid, status); } else { if (is_group) { prev_group = group_status; group_status = group_status->next; } } } while ((pid > 0) || is_group); mzrt_mutex_unlock(child_wait_lock); } return NULL; }
void scheme_wait_resume() { mzrt_mutex_unlock(child_wait_lock); }