Ejemplo n.º 1
0
/* Switch to the next task in the run queue. */
static void
mm_task_switch(mm_task_state_t state)
{
	// Move the currently running task to a new state.
	struct mm_task *old_task = mm_core->task;
	ASSERT(old_task->state == MM_TASK_RUNNING);
	old_task->state = state;

	if (unlikely(state == MM_TASK_INVALID)) {
		// Add it to the dead task list.
		mm_list_append(&mm_core->dead, &old_task->queue);
	} else {
		// Reset the priority that could have been temporary raised.
		old_task->priority = old_task->original_priority;
		if (state == MM_TASK_PENDING) {
			// Add it to the run queue.
			mm_runq_put(&mm_core->runq, old_task);
		}
	}

	// Get the next task from the run queue.  As long as this function
	// is called there is at least a boot task in the run queue.  So
	// there should never be a NULL value returned.
	struct mm_task *new_task = mm_runq_get(&mm_core->runq);
	new_task->state = MM_TASK_RUNNING;
	mm_core->task = new_task;

	// Switch to the new task relinquishing CPU control for a while.
	mm_stack_switch(&old_task->stack_ctx, &new_task->stack_ctx);

	// Resume the task unless it has been canceled and it agrees to be
	// canceled asynchronously. In that case it quits here.
	mm_task_testcancel_asynchronous();
}
Ejemplo n.º 2
0
static void
mm_timeq_insert_t1(struct mm_timeq *timeq, struct mm_timeq_entry *entry)
{
	int index = timeq->t1_index + (entry->value - timeq->t1_start) / timeq->t1_width;
	ASSERT(index < timeq->t1_count);

	mm_list_append(&timeq->t1[index], &entry->queue);
	entry->index = index;

	DEBUG("entry: %p, t1 index: %d", entry, index);
}
Ejemplo n.º 3
0
mm_port_create(struct mm_task *task)
{
	ENTER();

	struct mm_port *port = mm_global_alloc(sizeof(struct mm_port));
	port->lock = (mm_regular_lock_t) MM_REGULAR_LOCK_INIT;
	port->task = task;
	port->start = 0;
	port->count = 0;
	mm_waitset_prepare(&port->blocked_senders);

	mm_list_append(&task->ports, &port->ports);

	LEAVE();
	return port;
}
Ejemplo n.º 4
0
static void
mm_timeq_insert_t2(struct mm_timeq *timeq, struct mm_timeq_entry *entry)
{
	mm_list_append(&timeq->t2, &entry->queue);
	entry->index = MM_TIMEQ_INDEX_T2;
	timeq->t2_num++;

	if (timeq->t2_min > entry->value) {
		timeq->t2_min = entry->value;
	}
	if (timeq->t2_max < entry->value) {
		timeq->t2_max = entry->value;
	}

	DEBUG("entry: %p, t2 num: %d", entry, timeq->t2_num);
}
Ejemplo n.º 5
0
void *
mm_task_alloc(size_t size)
{
	ENTER();
	ASSERT(size > 0);

	/* Allocate the requested memory plus some extra for the list link. */
	void *ptr = mm_local_alloc(size + sizeof(struct mm_list));

	/* Keep the allocated memory in the task's chunk list. */
	struct mm_task *task = mm_task_self();
	mm_list_append(&task->chunks, (struct mm_list *) ptr);

	/* Get the address past the list link. */
	ptr = (void *) (((char *) ptr) + sizeof(struct mm_list));

	LEAVE();
	return ptr;
}
Ejemplo n.º 6
0
mm_task_combiner_execute(struct mm_task_combiner *combiner,
			 mm_combiner_routine_t routine, uintptr_t data)
{
	ENTER();

	// Disable cancellation as the enqueue algorithm cannot be
	// safely undone if interrupted in the middle.
	int cancelstate;
	mm_task_setcancelstate(MM_TASK_CANCEL_DISABLE, &cancelstate);

	// Get per-core queue of pending requests.
	mm_core_t core = mm_core_self();
	struct mm_list *wait_queue = MM_THREAD_LOCAL_DEREF(core, combiner->wait_queue);

	// Add the current request to the per-core queue.
	struct mm_task *task = mm_task_selfptr();
	task->flags |= MM_TASK_COMBINING;
	mm_list_append(wait_queue, &task->wait_queue);

	// Wait until the current request becomes the head of the
	// per-core queue.
	while (mm_list_head(wait_queue) != &task->wait_queue)
		mm_task_block();

	mm_combiner_execute(&combiner->combiner, routine, data);

	// Remove the request from the per-core queue.
	mm_list_delete(&task->wait_queue);
	task->flags &= ~MM_TASK_COMBINING;

	// If the per-core queue is not empty then let its new head take
	// the next turn.
	if (!mm_list_empty(wait_queue)) {
		struct mm_link *link = mm_list_head(wait_queue);
		task = containerof(link, struct mm_task, wait_queue);
		mm_task_run(task);
	}

	// Restore cancellation.
	mm_task_setcancelstate(cancelstate, NULL);

	LEAVE();
}