// TODO: some way of telling when actually ... use readline to know static bool anybody_alive(conf_object_t *cpu, struct test_state *t, struct sched_state *s) { struct agent *shell; if (t->test_ever_caused) { if (t->start_population == s->most_agents_ever) { /* Then the shell hasn't even spawned it yet. */ return true; } else if (t->start_population != s->num_agents) { /* Shell's descendants are still alive. */ assert(t->start_population < s->num_agents); return true; } } /* Now we are either before the beginning of the test case, or waiting * for the shell and init to clean up the last remains. Either way, wait * for shell and init to finish switching back and forth until both of * them are suitably blocked. */ /* In atomic scheduler paths, both threads might be off the runqueue * (i.e., one going to sleep and switching to the other). Since we * assume the scheduler is sane, this condition should hold then. */ if (!kern_ready_for_timer_interrupt(cpu) || !interrupts_enabled(cpu)) { return true; } /* */ if ((shell = agent_by_tid_or_null(&s->rq, kern_get_shell_tid())) || (shell = agent_by_tid_or_null(&s->dq, kern_get_shell_tid()))) { if (shell->action.readlining) { if (kern_has_idle()) { return s->cur_agent->tid != kern_get_idle_tid(); } else { return (Q_GET_SIZE(&s->rq) != 0 || Q_GET_SIZE(&s->sq) != 0); } } else { return true; } } /* If we get here, the shell wasn't even created yet..! */ return true; }
/* register a malloced chunk as belonging to a particular mutex. * will add mutex to the list of all mutexes if it's not already there. */ void learn_malloced_mutex_structure(struct user_sync_state *u, unsigned int lock_addr, unsigned int chunk_addr, unsigned int chunk_size) { struct mutex *mp; assert(lock_addr != -1); Q_SEARCH(mp, &u->mutexes, nobe, mp->addr == (unsigned int)lock_addr); if (mp == NULL) { lsprintf(DEV, "created user mutex 0x%x (%u others)\n", lock_addr, Q_GET_SIZE(&u->mutexes)); mp = MM_XMALLOC(1, struct mutex); mp->addr = (unsigned int)lock_addr; Q_INIT_HEAD(&mp->chunks); Q_INSERT_FRONT(&u->mutexes, mp, nobe); }
void main_loop(void) { state.events_until_epoll = Q_GET_SIZE(&state.sched_queue); #if MT_RUNTIME pthread_t thread; for (int i = 0; i < thread_count - 1; i++) { int ret = pthread_create(&thread, NULL, work_loop_mt, NULL); if (ret != 0) fail(1, "pthread_create"); } work_loop_mt(NULL); #else work_loop(); #endif }
// A single threaded main loop void work_loop(void) { for (;;) { thread_t *thread; if ((thread = Q_GET_HEAD(&state.sched_queue))) { Q_REMOVE(&state.sched_queue, thread, q_link); run_thread(thread); state.events_until_epoll--; } bool can_sleep = !Q_GET_HEAD(&state.sched_queue); if (can_sleep || state.events_until_epoll == 0) { struct timespec next_wakeup = get_next_wakeup(); do_poll(can_sleep, next_wakeup); state.events_until_epoll = Q_GET_SIZE(&state.sched_queue); wakeup_sleepers(); // maybe not every time through? } } }