Example #1
0
static u64 read_sum_exec_runtime(struct task_struct *t)
{
	u64 ns;
	struct rq_flags rf;
	struct rq *rq;

	rq = task_rq_lock(t, &rf);
	ns = t->se.sum_exec_runtime;
	task_rq_unlock(rq, t, &rf);

	return ns;
}
Example #2
0
/*
 * At this point we know the task is not on its rtws_rq because the timer was running, which
 * means the task has finished its last instance and was waiting for next activation period.
 */
static enum hrtimer_restart timer_rtws(struct hrtimer *timer)
{
    unsigned long flags;
    struct sched_rtws_entity *rtws_se = container_of(timer,
                             struct sched_rtws_entity,
                             timer);

    struct task_struct *p = task_of_rtws_se(rtws_se);
    struct rq *rq = task_rq_lock(p, &flags);

    printk(KERN_INFO "**task %d state %ld on rq %d timer fired at time %Ld on cpu %d**\n", p->pid, p->state, p->se.on_rq, rq->clock, rq->cpu);

    /*
     * We need to take care of a possible races here. In fact, the
     * task might have changed its scheduling policy to something
     * different from SCHED_RTWS (through sched_setscheduler()).
     */
    if (!rtws_task(p))
        goto unlock;

    WARN_ON(rtws_se->parent);

    /*
     * To avoid contention on global rq and reduce some overhead,
     * when a new task arrives and local rq is idle, we make sure
     * it gets inserted on this rq. Otherwise we try to find a
     * suitable rq for it. This way it only ends up in global rq
     * when all rqs are busy and it has the lowest priority (latest
     * deadline) comparing to running tasks.Nevertheless, since we
     * are dealing with a new instance, scheduling parameters must be updated.
     */
    update_task_rtws(rq, rtws_se);

    if (rq->rtws.nr_running && dispatch_rtws(rq, p))
        goto unlock;

    activate_task(rq, p, ENQUEUE_HEAD);
    resched_task(rq->curr);

unlock:
    task_rq_unlock(rq,p,&flags);

    return HRTIMER_NORESTART;
}
Example #3
0
/**
 * cgroup_move_task - move task to a different cgroup
 * @task: the task
 * @to: the target css_set
 *
 * Move task to a new cgroup and safely migrate its associated stall
 * state between the different groups.
 *
 * This function acquires the task's rq lock to lock out concurrent
 * changes to the task's scheduling state and - in case the task is
 * running - concurrent changes to its stall state.
 */
void cgroup_move_task(struct task_struct *task, struct css_set *to)
{
	unsigned int task_flags = 0;
	struct rq_flags rf;
	struct rq *rq;

	if (static_branch_likely(&psi_disabled)) {
		/*
		 * Lame to do this here, but the scheduler cannot be locked
		 * from the outside, so we move cgroups from inside sched/.
		 */
		rcu_assign_pointer(task->cgroups, to);
		return;
	}

	rq = task_rq_lock(task, &rf);

	if (task_on_rq_queued(task))
		task_flags = TSK_RUNNING;
	else if (task->in_iowait)
		task_flags = TSK_IOWAIT;

	if (task->flags & PF_MEMSTALL)
		task_flags |= TSK_MEMSTALL;

	if (task_flags)
		psi_task_change(task, task_flags, 0);

	/* See comment above */
	rcu_assign_pointer(task->cgroups, to);

	if (task_flags)
		psi_task_change(task, 0, task_flags);

	task_rq_unlock(rq, task, &rf);
}