Exemple #1
0
/*
 * Pushes a -rtws task to the latest rq if its currently executing task has lower priority.
 * Only remote rqs are consider here.
 */
static int push_latest_rtws(struct rq *this_rq, struct task_struct *p, int target_cpu)
{
    struct rq *target_rq;
    int ret = 0;

    target_rq = cpu_rq(target_cpu);

    /* We might release rq lock */
    get_task_struct(p);

    printk(KERN_INFO "check preempting other %llu - %llu\n", p->rtws.job.deadline, target_rq->rtws.earliest_dl);

    double_lock_balance(this_rq, target_rq);

    /* TODO check if in the meanwhile a task was dispatched to here */
    if (target_rq->rtws.nr_running && !time_before_rtws(p->rtws.job.deadline, target_rq->rtws.earliest_dl))
        goto unlock;

    set_task_cpu(p, target_cpu);
    activate_task(target_rq, p, ENQUEUE_HEAD);
    ret = 1;

    resched_task(target_rq->curr);

unlock:
    double_unlock_balance(this_rq, target_rq);
    put_task_struct(p);

    return ret;
}
Exemple #2
0
/*
 * Only "root" tasks with no clildren are allowed to change
 * scheduling parameters! Anything else will be removed from rq
 * and throttled.
 *
 * If the scheduling parameters of a -rtws task changed,
 * a resched might be needed.
 */
static void
prio_changed_rtws(struct rq *rq, struct task_struct *p,
                  int oldprio)
{

    //rq != task_rq(p)
    struct sched_rtws_entity *rtws_se = &p->rtws;

    printk(KERN_INFO "********prio changed, task %d pjob %d on cpu %d*******\n",p->pid, rtws_se->job, rq->cpu);

    if (rtws_se->parent || rtws_se->nr_pjobs > 0) {
        printk(KERN_INFO "problems...\n");
        WARN_ON(1);
        dequeue_task_rtws(rq, p, 0);
        rtws_se->throttled = 1;
        return;
    }

         /*
         * We don't know if p has a earlier
         * or later deadline, so let's blindly set a
         * (maybe not needed) rescheduling point.
         */
        resched_task(p);
}
static void switched_to_idle(struct rq *rq, struct task_struct *p,
			     int running)
{
	/* Can this actually happen?? */
	if (running)
		resched_task(rq->curr);
	else
		check_preempt_curr(rq, p, 0);
}
Exemple #4
0
static void check_preempt_curr_dummy(struct rq *rq, struct task_struct *p, int flags)
{
	if(p->prio < rq->curr->prio) {
		// dequeue_task_dummy(rq, rq->curr, flags);
		// enqueue_task_dummy(rq, rq->curr, flags);
		printk(KERN_CRIT "preempt: %d\n",p->pid);
		resched_task(rq->curr);
	}
}
Exemple #5
0
void resched_cpu(int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
		return;
	resched_task(cpu_curr(cpu));
	raw_spin_unlock_irqrestore(&rq->lock, flags);
}
Exemple #6
0
static void check_preempt_curr_dummy(struct rq *rq, struct task_struct *p, int flags)
{
  /* Here two cases are handled:
   *   - if a process of higher priority becomes runnable; and
   *   - if a process of the same priority as the running process becomes runnable
   *     in which case the running process is preempted only if it has already 
   *     exceeded its round robin quantum.
   */
  if (rq->curr->prio > p->prio) {
    resched_task(rq->curr);
  } else if (rq->curr->prio == p->prio) {

    if (p->dummy_se.rr_tick_count >= get_timeslice()) {    
      dequeue_task_dummy(rq, rq->curr, 0);
      enqueue_task_dummy(rq, rq->curr, 0);
      resched_task(rq->curr);
    }
  }
}
Exemple #7
0
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
	struct sched_rt_entity *rt_se = rt_rq->rt_se;

	if (rt_rq->rt_nr_running) {
		if (rt_se && !on_rt_rq(rt_se))
			enqueue_rt_entity(rt_se);
		if (rt_rq->highest_prio < curr->prio)
			resched_task(curr);
	}
}
Exemple #8
0
/*
 * We switched to the sched_other_rr class.
 */
static void switched_to_other_rr(struct rq *rq, struct task_struct *p,
			     int running)
{
	/*
	 * Kick off the schedule if running, otherwise just see
	 * if we can still preempt the current task.
	 */
	if (running)
		resched_task(rq->curr);
	else
		check_preempt_curr(rq, p, 0);
}
Exemple #9
0
/*
 * Only called when both the current and p are -rtws
 * tasks.
 */
static void
check_preempt_curr_rtws(struct rq *rq, struct task_struct *p, int flags)
{

    printk("RTWS :: ********check preempt curr, task %d on cpu %d*******\n",p->pid, rq->cpu);
    /*if (dl_task(p)) {
        resched_task(rq->curr);
        return;
    }*/

    if (!is_leftmost_pjob(&rq->rtws, &rq->curr->rtws))
        resched_task(rq->curr);

}
static void prio_changed_idle(struct rq *rq, struct task_struct *p,
			      int oldprio, int running)
{
	/* This can happen for hot plug CPUS */

	/*
	 * Reschedule if we are currently running on this runqueue and
	 * our priority decreased, or if we are not currently running on
	 * this runqueue and our priority is higher than the current's
	 */
	if (running) {
		if (p->prio > oldprio)
			resched_task(rq->curr);
	} else
		check_preempt_curr(rq, p, 0);
}
Exemple #11
0
/*
 * At this point we know the task is not on its rtws_rq because the timer was running, which
 * means the task has finished its last instance and was waiting for next activation period.
 */
static enum hrtimer_restart timer_rtws(struct hrtimer *timer)
{
    unsigned long flags;
    struct sched_rtws_entity *rtws_se = container_of(timer,
                             struct sched_rtws_entity,
                             timer);

    struct task_struct *p = task_of_rtws_se(rtws_se);
    struct rq *rq = task_rq_lock(p, &flags);

    printk(KERN_INFO "**task %d state %ld on rq %d timer fired at time %Ld on cpu %d**\n", p->pid, p->state, p->se.on_rq, rq->clock, rq->cpu);

    /*
     * We need to take care of a possible races here. In fact, the
     * task might have changed its scheduling policy to something
     * different from SCHED_RTWS (through sched_setscheduler()).
     */
    if (!rtws_task(p))
        goto unlock;

    WARN_ON(rtws_se->parent);

    /*
     * To avoid contention on global rq and reduce some overhead,
     * when a new task arrives and local rq is idle, we make sure
     * it gets inserted on this rq. Otherwise we try to find a
     * suitable rq for it. This way it only ends up in global rq
     * when all rqs are busy and it has the lowest priority (latest
     * deadline) comparing to running tasks.Nevertheless, since we
     * are dealing with a new instance, scheduling parameters must be updated.
     */
    update_task_rtws(rq, rtws_se);

    if (rq->rtws.nr_running && dispatch_rtws(rq, p))
        goto unlock;

    activate_task(rq, p, ENQUEUE_HEAD);
    resched_task(rq->curr);

unlock:
    task_rq_unlock(rq,p,&flags);

    return HRTIMER_NORESTART;
}
Exemple #12
0
/*
 * Tries to push a -rtws task to a "random" idle rq.
 */
static int push_idle_rtws(struct rq *this_rq, struct task_struct *p)
{
    struct rq *target_rq;
    int ret = 0, target_cpu;
    struct cpudl *cp = &this_rq->rd->rtwsc_cpudl;

retry:
    target_cpu = find_idle_cpu_rtws(cp);

    if (target_cpu == -1)
        return 0;

    printk(KERN_INFO "idle cpu %d\n", target_cpu);

    target_rq = cpu_rq(target_cpu);

    /* We might release rq lock */
    get_task_struct(p);

    double_lock_balance(this_rq, target_rq);

    if (unlikely(target_rq->rtws.nr_running)) {
        double_unlock_balance(this_rq, target_rq);
        put_task_struct(p);
        target_rq = NULL;
        goto retry;
    }

    set_task_cpu(p, target_cpu);
    activate_task(target_rq, p, 0);

    ret = 1;
    resched_task(target_rq->curr);

    double_unlock_balance(this_rq, target_rq);
    put_task_struct(p);

    return ret;
}
Exemple #13
0
/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
{
	if (p->prio < rq->curr->prio)
		resched_task(rq->curr);
}
/*
 * Idle tasks are unconditionally rescheduled:
 */
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
{
	resched_task(rq->idle);
}
Exemple #15
0
static void yield_task_dummy(struct rq *rq)
{
  dequeue_task_dummy(rq, rq->curr, 0);
  enqueue_task_dummy(rq, rq->curr, 0);
  resched_task(rq->curr);
}