Beispiel #1
0
static void request_migration(edf_wm_task_t *et, int cpu_dst)
{
	unsigned long flags;
	resch_task_t *hp;

	INIT_LIST_HEAD(&et->migration_list);

	/* insert the task to the waiting list for the migration thread. */
	spin_lock_irqsave(&kthread[cpu_dst].lock, flags);
	list_add_tail(&et->migration_list, &kthread[cpu_dst].list);
	spin_unlock(&kthread[cpu_dst].lock);

	/* wake up the migration thread running on the destination CPU. */
	wake_up_process(kthread[cpu_dst].task);
	et->rt->task->state = TASK_UNINTERRUPTIBLE;
	set_tsk_need_resched(et->rt->task);
	local_irq_restore(flags);

	active_queue_lock(cpu_dst, &flags);
	hp = active_highest_prio_task(cpu_dst);
	if (hp) {
		set_tsk_need_resched(hp->task);
	}
	active_queue_unlock(cpu_dst, &flags);
	smp_send_reschedule(cpu_dst);
}
Beispiel #2
0
/*******************************************************************************
 * inst_schedule - Probe for schedule
 * @p - Not used
 * @regs - Not used
 * @return - always 0
 * @Side Effects - Updates the tasks counters and requests a re-schedule 
 *                 if the total executed instructions exceed INST_THRESHOLD
 *
 * Responsible for updating reading/writing the performance counters.
 * And also the one which decides when a task has executed enough
 *******************************************************************************/
int inst_schedule(struct kprobe *p, struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	if (!ts[cpu]
	    || TS_MEMBER(ts[cpu], seeker_scheduled) != SEEKER_MAGIC_NUMBER)
		return 0;

	if (is_blacklist_task(ts[cpu]))
		return 0;

	read_counters(cpu);
	if (TS_MEMBER(ts[cpu], interval) != interval_count)
		TS_MEMBER(ts[cpu], interval) = interval_count;
	TS_MEMBER(ts[cpu], inst) += pmu_val[cpu][0];
	TS_MEMBER(ts[cpu], re_cy) += pmu_val[cpu][1];
	TS_MEMBER(ts[cpu], ref_cy) += get_tsc_cycles();
	clear_counters(cpu);
	if (TS_MEMBER(ts[cpu], inst) > INST_THRESHOLD
	    || TS_MEMBER(ts[cpu], cpustate) != cur_cpu_state[cpu]
	    || TS_MEMBER(ts[cpu], interval) != interval_count) {
		set_tsk_need_resched(ts[cpu]);	/* lazy, as we are anyway 
						   getting into schedule */
	}
	return 0;
}
Beispiel #3
0
void litmus_reschedule_local(void)
{
	if (is_in_sched_state(TASK_PICKED))
		set_sched_state(PICKED_WRONG_TASK);
	else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) {
		set_sched_state(WILL_SCHEDULE);
		set_tsk_need_resched(current);
	}
}
Beispiel #4
0
static inline void resched_task(task_t *p)
{
#ifdef CONFIG_SMP
	preempt_disable();
	if (/* balabala */ && (task_cpu(p) != smp_processor_id()))
		smp_send_reschedule(task_cpu(p));
	preempt_enable();
#else
	set_tsk_need_resched(p);
#endif
}
Beispiel #5
0
/* pfair_tick - this function is called for every local timer
 *                         interrupt.
 */
static void pfair_tick(struct task_struct* t)
{
	struct pfair_state* state = &__get_cpu_var(pfair_state);
	quanta_t time, cur;
	int retry = 10;

	do {
		cur  = current_quantum(state);
		PTRACE("q %lu at %llu\n", cur, litmus_clock());

		/* Attempt to advance time. First CPU to get here
		 * will prepare the next quantum.
		 */
		time = cmpxchg(&pfair_time,
			       cur - 1,   /* expected */
			       cur        /* next     */
			);
		if (time == cur - 1) {
			/* exchange succeeded */
			wait_for_quantum(cur - 1, state);
			schedule_next_quantum(cur);
			retry = 0;
		} else if (time_before(time, cur - 1)) {
			/* the whole system missed a tick !? */
			catchup_quanta(time, cur, state);
			retry--;
		} else if (time_after(time, cur)) {
			/* our timer lagging behind!? */
			TRACE("BAD pfair_time:%lu > cur:%lu\n", time, cur);
			retry--;
		} else {
			/* Some other CPU already started scheduling
			 * this quantum. Let it do its job and then update.
			 */
			retry = 0;
		}
	} while (retry);

	/* Spin locally until time advances. */
	wait_for_quantum(cur, state);

	/* copy assignment */
	/* FIXME: what if we race with a future update? Corrupted state? */
	state->local      = state->linked;
	/* signal that we are done */
	mb();
	state->local_tick = state->cur_tick;

	if (state->local != current
	    && (is_realtime(current) || is_present(state->local)))
		set_tsk_need_resched(current);
}
Beispiel #6
0
/*
 * Reschedule call back. Nothing to do,
 * all the work is done automatically when
 * we return from the interrupt.
 */
void smp_reschedule_interrupt(struct pt_regs *regs)
{
	ack_APIC_irq();
	/* LITMUS^RT needs this interrupt to proper reschedule
	 * on this cpu
	 */
	set_tsk_need_resched(current);
	inc_irq_stat(irq_resched_count);
	TS_SEND_RESCHED_END;
	/*
	 * KVM uses this interrupt to force a cpu out of guest mode
	 */
}
Beispiel #7
0
void wk_start_kick_cpu(int cpu)
{
	if(IS_ERR(wk_tsk[cpu]))
	{
		printk("[wdk]wk_task[%d] is NULL\n",cpu);
	}
	else
	{
		/* Need to be alseep *before* we do a kthread_bind */
		__set_task_state(wk_tsk[cpu], TASK_UNINTERRUPTIBLE);
		set_tsk_need_resched(wk_tsk[cpu]);

		kthread_bind(wk_tsk[cpu], cpu);
	//	printk("[wdk]bind thread[%d] to cpu[%d]\n",wk_tsk[cpu]->pid,cpu);
		wake_up_process(wk_tsk[cpu]);
	}
}
Beispiel #8
0
/*
 * task_tick_other_rr is invoked on each scheduler timer tick.
 */
static void task_tick_other_rr(struct rq *rq, struct task_struct *p, int queued)
{
	// first update the task's runtime statistics
	update_curr_other_rr(rq);

	if(other_rr_time_slice != 0)
	{
		p->task_time_slice--;

		if(p->task_time_slice == 0)
		{
			p->task_time_slice = other_rr_time_slice;
			set_tsk_need_resched(p);
			requeue_task_other_rr(rq, rq->curr);
		}
	}
}
Beispiel #9
0
/* Called by the IPI handler after another CPU called smp_send_resched(). */
void sched_state_ipi(void)
{
	/* If the IPI was slow, we might be in any state right now. The IPI is
	 * only meaningful if we are in SHOULD_SCHEDULE. */
	if (is_in_sched_state(SHOULD_SCHEDULE)) {
		/* Cause scheduler to be invoked.
		 * This will cause a transition to WILL_SCHEDULE. */
		set_tsk_need_resched(current);
		TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n",
			    current->comm, current->pid);
	} else {
		/* ignore */
		TRACE_STATE("ignoring IPI in state %x (%s)\n",
			    get_sched_state(),
			    sched_state_name(get_sched_state()));
	}
}
Beispiel #10
0
void scheduler_tick(int user_ticks, int sys_ticks)
{
	//...
	if (p->array != rq->active) {
		set_tsk_need_resched(p);
		goto out;
	}
	//...
	if (!--p->time_slice) {
		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
			enqueue_task(p, rq->expired);
		} else
			enqueue_task(p, rq->active);
	} else {
		/* Prevent a too long timeslice allowing a task to monopolize
		 * the CPU. We do this by splitting up the timeslice into smaller pieces.
		 * */
	}
}
Beispiel #11
0
void resched_task(struct task_struct *p)
{
	int cpu;

	assert_raw_spin_locked(&task_rq(p)->lock);

	if (test_tsk_need_resched(p))
		return;

	set_tsk_need_resched(p);

	cpu = task_cpu(p);
	if (cpu == smp_processor_id())
		return;

	/* NEED_RESCHED must be visible before we test polling */
	smp_mb();
	if (!tsk_is_polling(p))
		smp_send_reschedule(cpu);
}
Beispiel #12
0
enum hrtimer_restart hrtimer_C_callback(struct hrtimer *timer) {
  struct task_struct *task;

  write_lock(&tasklist_lock);
  task = container_of(timer, struct task_struct, C_timer);

  // reset C time
  task->real_C_time = ktime_set(0, 0);

  // printk("[hrtimer_C_callback] PID = %d remaining C = %lld ms \n", timer->start_pid,
  //        ktime_to_ms(hrtimer_get_remaining(timer)));

  // force a context switch after the interrupt ends
  task->put_to_sleep = 1;
  task->state = TASK_UNINTERRUPTIBLE;
  set_tsk_need_resched(task);

  // this timer is rescheduled upon a context switch
  write_unlock(&tasklist_lock);
  return HRTIMER_NORESTART;
}
Beispiel #13
0
int wake_up_process(struct task_struct *tsk)
{
	int need_resched = 0;

	if (tsk->state == TASK_UNINTERRUPTIBLE)
		return -1;

	if (!pri_bitmap)
		need_resched = 1;

	struct task_struct *next = pick_next_task();
	if (next->prio >= tsk->prio)
		need_resched = 1;

	set_task_state(tsk, TASK_RUNNING);

	if (need_resched)
		set_tsk_need_resched(current);

	return 0;
}
Beispiel #14
0
static void check_preempt(struct task_struct* t)
{
	int cpu = NO_CPU;
	if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on &&
	    tsk_rt(t)->present) {
		/* the task can be scheduled and
		 * is not scheduled where it ought to be scheduled
		 */
		cpu = tsk_rt(t)->linked_on != NO_CPU ?
			tsk_rt(t)->linked_on         :
			tsk_rt(t)->scheduled_on;
		PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n",
			   tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on);
		/* preempt */
		if (cpu == smp_processor_id())
			set_tsk_need_resched(current);
		else {
			smp_send_reschedule(cpu);
		}
	}
}
Beispiel #15
0
static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
	/*
	 * RR tasks need a special form of timeslice management.
	 * FIFO tasks have no timeslices.
	 */
	if (p->policy != SCHED_RR)
		return;

	if (--p->time_slice)
		return;

	p->time_slice = static_prio_timeslice(p->static_prio);

	/*
	 * Requeue to the end of queue if we are not the only element
	 * on the queue:
	 */
	if (p->run_list.prev != p->run_list.next) {
		requeue_task_rt(rq, p);
		set_tsk_need_resched(p);
	}
}
Beispiel #16
0
/**
 * called when the given task starts a new job.
 */
static void edf_job_start(resch_task_t *rt)
{
	unsigned long flags;
	int cpu = rt->cpu_id;
	resch_task_t *hp;

	active_queue_lock(cpu, &flags);

	edf_enqueue_task(rt, RESCH_PRIO_EDF, cpu);
	hp = active_highest_prio_task(cpu);
	if (rt == hp) {
		resch_task_t *curr = active_next_prio_task(rt);
		if (curr) {
			curr->task->state = TASK_INTERRUPTIBLE;
			set_tsk_need_resched(curr->task);
		}
	}
	else {
		rt->task->state = TASK_INTERRUPTIBLE;
	}

	active_queue_unlock(cpu, &flags);
}
Beispiel #17
0
/* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must
 * hold the lock that is used to serialize scheduling decisions. */
void litmus_reschedule(int cpu)
{
	int picked_transition_ok = 0;
	int scheduled_transition_ok = 0;

	/* The (remote) CPU could be in any state. */

	/* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU
	 * is not aware of the need to reschedule at this point. */

	/* is a context switch in progress? */
	if (cpu_is_in_sched_state(cpu, TASK_PICKED))
		picked_transition_ok = sched_state_transition_on(
			cpu, TASK_PICKED, PICKED_WRONG_TASK);

	if (!picked_transition_ok &&
	    cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) {
		/* We either raced with the end of the context switch, or the
		 * CPU was in TASK_SCHEDULED anyway. */
		scheduled_transition_ok = sched_state_transition_on(
			cpu, TASK_SCHEDULED, SHOULD_SCHEDULE);
	}

	/* If the CPU was in state TASK_SCHEDULED, then we need to cause the
	 * scheduler to be invoked. */
	if (scheduled_transition_ok) {
		if (smp_processor_id() == cpu)
			set_tsk_need_resched(current);
		else
			smp_send_reschedule(cpu);
	}

	TRACE_STATE("%s picked-ok:%d sched-ok:%d\n",
		    __FUNCTION__,
		    picked_transition_ok,
		    scheduled_transition_ok);
}
static void smp_callback(void *v)
{
	/* we already woke the CPU up, nothing more to do */
	if (is_idle_task(current))
		set_tsk_need_resched(current);
}
Beispiel #19
0
/*
 * Reschedule call back. Trigger a reschedule pass so that
 * RT-overload balancing can pass tasks around.
 */
fastcall notrace void smp_reschedule_interrupt(struct pt_regs *regs)
{
	trace_special(regs->eip, 0, 0);
	ack_APIC_irq();
	set_tsk_need_resched(current);
}
Beispiel #20
0
/**
 * migrate @rt to the given CPU. 
 */
static void edf_migrate_task(resch_task_t *rt, int cpu_dst)
{
	unsigned long flags;
	int cpu_src = rt->cpu_id;

	if (cpu_src != cpu_dst) {
		active_queue_double_lock(cpu_src, cpu_dst, &flags);
		if (task_is_active(rt)) {
			resch_task_t *next_src = NULL, *curr_dst = NULL;
			/* save the next task on the source CPU. */
			if (rt == active_highest_prio_task(cpu_src)) {
				next_src = active_next_prio_task(rt);
			}
#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_out(rt);
#endif
			/* move off the source CPU. */
			edf_dequeue_task(rt, rt->prio, cpu_src);

			/* save the current task on the destination CPU. */
			curr_dst = active_prio_task(cpu_dst, RESCH_PRIO_EDF_RUN);

			/* move on the destination CPU. */
			rt->cpu_id = cpu_dst; 
			edf_enqueue_task(rt, rt->prio, cpu_dst);

#ifdef NO_LINUX_LOAD_BALANCE
			/* trace preemption. */
			preempt_in(rt);
#endif
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);

			/* the next task will never preempt the current task. */
			if (next_src) {
				wake_up_process(next_src->task);
			}

			__migrate_task(rt, cpu_dst);

			/* restart accounting on the new CPU. */
			if (task_is_accounting(rt)) {
				edf_stop_account(rt);
				edf_start_account(rt);
			}

			if (curr_dst) {
				if (rt->deadline_time < curr_dst->deadline_time) {
					curr_dst->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(curr_dst->task);
				}
				else {
					rt->task->state = TASK_INTERRUPTIBLE;
					set_tsk_need_resched(rt->task);
				}
			}
		}
		else {
			rt->cpu_id = cpu_dst;
			active_queue_double_unlock(cpu_src, cpu_dst, &flags);
			__migrate_task(rt, cpu_dst);
		}
	}
	else {
		__migrate_task(rt, cpu_dst);
	}
}