/**
 * set the scheduler internally in the Linux kernel.
 */
static int dag_set_scheduler(resch_task_t *rt, int prio)
{
    uint64_t runtime, period, deadline;
    struct sched_attr sa;
    int retval;

    struct sched_dl_entity *dl_se = &rt->task->dl;
    struct task_struct *task = container_of(dl_se, struct task_struct, dl);
    struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
    struct rq *rq = get_rq_of_task(task);

    rt->dl_runtime = &dl_se->runtime;
    rt->dl_deadline = &dl_se->deadline;
    rt->rq_clock = &rq->clock;
    rt->dl_sched_release_time = 0;

    memset(&sa, 0, sizeof(struct sched_attr));

    sa.sched_policy = RESCH_SCHED_DAG;
    sa.size = sizeof(struct sched_attr);
    sa.sched_flags = 0;
    sa.sched_deadline =  jiffies_to_nsecs(rt->deadline);
    sa.sched_period = jiffies_to_nsecs(rt->period);
    sa.sched_runtime = rt->runtime*1000;

    RESCH_DPRINT("@@@@sched_DAG status@@@@");
    RESCH_DPRINT("policy   = %16lu\n", sa.sched_policy);
    RESCH_DPRINT("deadline = %16lu\n", sa.sched_deadline);
    RESCH_DPRINT("period   = %16lu\n", sa.sched_period);
    RESCH_DPRINT("runtime  = %16lu\n", sa.sched_runtime);
    RESCH_DPRINT("now_time = %16lu jiffies\n",jiffies);
    RESCH_DPRINT("@@@@@@@@@@@@\n");
    if (sa.sched_runtime < 2 <<(DL_SCALE -1)) {
        printk(KERN_WARNING "RESCH: please check runtime. You have to set the runtime bigger than %d \n", 2<<(DL_SCALE -1));
    }

    rcu_read_lock();
    if (rt->task == NULL || (retval = sched_setattr(rt->task, &sa)<0)) {
        printk(KERN_WARNING "RESCH: dag_set_scheduler() failed.\n");
        printk(KERN_WARNING "RESCH: task#%d (process#%d) priority=%d.\n",
               rt->rid, rt->task->pid, prio);
        return false;
    }
    rcu_read_unlock();

    rt->prio = prio;
    if (task_has_reserve(rt)) {
        rt->task->dl.flags &= ~SCHED_EXHAUSTIVE;
        rt->task->dl.flags |= SCHED_FCBS;
        /* you can additionally set the following flags, if wanted.
           rt->task->dl.flags |= SCHED_FCBS_NO_CATCH_UP; */
    }
    else {
        rt->task->dl.flags |= SCHED_EXHAUSTIVE;
        rt->task->dl.flags &= ~SCHED_FCBS;
    }

    return true;
}
Exemple #2
0
void __init psi_init(void)
{
	if (!psi_enable) {
		static_branch_enable(&psi_disabled);
		return;
	}

	psi_period = jiffies_to_nsecs(PSI_FREQ);
	group_init(&psi_system);
}
Exemple #3
0
static void record_times(struct psi_group_cpu *groupc, int cpu,
			 bool memstall_tick)
{
	u32 delta;
	u64 now;

	now = cpu_clock(cpu);
	delta = now - groupc->state_start;
	groupc->state_start = now;

	if (test_state(groupc->tasks, PSI_IO_SOME)) {
		groupc->times[PSI_IO_SOME] += delta;
		if (test_state(groupc->tasks, PSI_IO_FULL))
			groupc->times[PSI_IO_FULL] += delta;
	}

	if (test_state(groupc->tasks, PSI_MEM_SOME)) {
		groupc->times[PSI_MEM_SOME] += delta;
		if (test_state(groupc->tasks, PSI_MEM_FULL))
			groupc->times[PSI_MEM_FULL] += delta;
		else if (memstall_tick) {
			u32 sample;
			/*
			 * Since we care about lost potential, a
			 * memstall is FULL when there are no other
			 * working tasks, but also when the CPU is
			 * actively reclaiming and nothing productive
			 * could run even if it were runnable.
			 *
			 * When the timer tick sees a reclaiming CPU,
			 * regardless of runnable tasks, sample a FULL
			 * tick (or less if it hasn't been a full tick
			 * since the last state change).
			 */
			sample = min(delta, (u32)jiffies_to_nsecs(1));
			groupc->times[PSI_MEM_FULL] += sample;
		}
	}

	if (test_state(groupc->tasks, PSI_CPU_SOME))
		groupc->times[PSI_CPU_SOME] += delta;

	if (test_state(groupc->tasks, PSI_NONIDLE))
		groupc->times[PSI_NONIDLE] += delta;
}