Пример #1
0
/* Call with whether or not the thread is created with a context-switch frame
 * crafted on its stack. Most threads would be; "init" may not be. */
static void agent_fork(struct sched_state *s, int tid, bool on_runqueue)
{
	struct agent *a = MM_XMALLOC(1, struct agent);

	a->tid = tid;
	a->action.handling_timer = false;
	/* this is usually not true, but makes it easier on the student; see
	 * the free pass below. */
	a->action.context_switch = false;
	/* If being called from kern_init_threads, don't give the free pass. */
	a->action.cs_free_pass = true;
	a->action.forking = false;
	a->action.sleeping = false;
	a->action.vanishing = false;
	a->action.readlining = false;
	a->action.just_forked = true;
	a->action.mutex_locking = false;
	a->action.mutex_unlocking = false;
	a->action.schedule_target = false;
	a->blocked_on = NULL;
	a->blocked_on_tid = -1;
	a->blocked_on_addr = -1;

	if (on_runqueue) {
		Q_INSERT_FRONT(&s->rq, a, nobe);
	} else {
		Q_INSERT_FRONT(&s->dq, a, nobe);
	}

	s->num_agents++;
	if (s->num_agents > s->most_agents_ever) {
		s->most_agents_ever = s->num_agents;
	}
}
Пример #2
0
void arbiter_append_choice(struct arbiter_state *r, unsigned int tid, bool txn, unsigned int xabort_code, struct abort_set *aborts)
{
	struct choice *c = MM_XMALLOC(1, struct choice);
	c->tid = tid;
	c->txn = txn;
	c->xabort_code = xabort_code;
	c->aborts = *aborts;
	Q_INSERT_FRONT(&r->choices, c, nobe);
}
Пример #3
0
static void agent_wake(struct sched_state *s, int tid)
{
	struct agent *a = agent_by_tid_or_null(&s->dq, tid);
	if (a) {
		Q_REMOVE(&s->dq, a, nobe);
	} else {
		a = agent_by_tid(&s->sq, tid);
		Q_REMOVE(&s->sq, a, nobe);
	}
	Q_INSERT_FRONT(&s->rq, a, nobe);
}
Пример #4
0
static void handle_unsleep(struct sched_state *s, int tid)
{
	/* If a thread-change happens to an agent on the sleep queue, that means
	 * it has woken up but runnable() hasn't seen it yet. So put it on the
	 * dq, which will satisfy whether or not runnable() triggers. */
	struct agent *a = agent_by_tid_or_null(&s->sq, tid);
	if (a != NULL) {
		Q_REMOVE(&s->sq, a, nobe);
		Q_INSERT_FRONT(&s->dq, a, nobe);
	}
}
Пример #5
0
/* register a malloced chunk as belonging to a particular mutex.
 * will add mutex to the list of all mutexes if it's not already there. */
void learn_malloced_mutex_structure(struct user_sync_state *u, unsigned int lock_addr,
				    unsigned int chunk_addr, unsigned int chunk_size)
{
	struct mutex *mp;
	assert(lock_addr != -1);
	Q_SEARCH(mp, &u->mutexes, nobe, mp->addr == (unsigned int)lock_addr);
	if (mp == NULL) {
		lsprintf(DEV, "created user mutex 0x%x (%u others)\n",
			 lock_addr, Q_GET_SIZE(&u->mutexes));
		mp = MM_XMALLOC(1, struct mutex);
		mp->addr = (unsigned int)lock_addr;
		Q_INIT_HEAD(&mp->chunks);
		Q_INSERT_FRONT(&u->mutexes, mp, nobe);
	}
Пример #6
0
static void agent_deschedule(struct sched_state *s, int tid)
{
	struct agent *a = agent_by_tid_or_null(&s->rq, tid);
	if (a != NULL) {
		Q_REMOVE(&s->rq, a, nobe);
		Q_INSERT_FRONT(&s->dq, a, nobe);
	/* If it's not on the runqueue, we must have already special-case moved
	 * it off in the thread-change event. */
	} else if (agent_by_tid_or_null(&s->sq, tid) == NULL) {
		/* Either it's on the sleep queue, or it vanished. */
		if (agent_by_tid_or_null(&s->dq, tid) != NULL) {
			conf_object_t *cpu = SIM_get_object("cpu0");
			char *stack = stack_trace(cpu, GET_CPU_ATTR(cpu, eip),
						  s->cur_agent->tid);
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "TID %d is "
				 "already off the runqueue at tell_off_rq(); "
				 "probably incorrect annotations?\n", tid);
			lsprintf(ALWAYS, COLOUR_BOLD COLOUR_RED "Current stack: %s\n"
				 COLOUR_DEFAULT, stack);
			assert(0);
		}
	}
}
Пример #7
0
static void agent_sleep(struct sched_state *s)
{
	current_dequeue(s);
	Q_INSERT_FRONT(&s->sq, s->cur_agent, nobe);
}