コード例 #1
0
void sobliv_on_blocked(struct task_struct* t)
{
	if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
		/* there is a fraction of time where we're double-counting the
		 * time tracked by the rq and suspension time.
		 * TODO: Do this recording closer to suspension time. */
		tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

		if (!tsk_rt(t)->budget.timer.armed) {
			/* budget exhaustion timer fired as t was waking up, so budget
			 * routine thought t was running. We need to re-trigger the budget
			 * exhastion routine via timer. Schedulers do not call
			 * job_completion() when a task blocks, even if t's budget has been
			 * exhausted. Unfortunately, we cannot rerun the exhaustion routine
			 * here due to spinlock ordering issues. Just re-arm the timer with
			 * the exhausted time, re-running the timer routine immediately once
			 * interrupts have been re-enabled. */

			/* clear the exhausted flag so handle will re-run. this will not
			 * trigger another exhaustion signal since signals are controled by
			 * BTF_SIG_BUDGET_SENT. */
			bt_flag_clear(t, BTF_BUDGET_EXHAUSTED);

			if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
				TRACE_TASK(t, "budget timer not armed. "
						   "Raced with exhaustion-resched? Re-arming.\n");
				arm_enforcement_timer(t, 1);
			}
			else {
				TRACE_TASK(t, "not arming timer because task is waiting "
								"for release.\n");
			}
		}
	}
}
コード例 #2
0
static struct task_struct* pfair_schedule(struct task_struct * prev)
{
	struct pfair_state* state = &__get_cpu_var(pfair_state);
	int blocks;
	struct task_struct* next = NULL;

	raw_spin_lock(&pfair_lock);

	blocks  = is_realtime(prev) && !is_running(prev);

	if (state->local && safe_to_schedule(state->local, state->cpu))
		next = state->local;

	if (prev != next) {
		tsk_rt(prev)->scheduled_on = NO_CPU;
		if (next)
			tsk_rt(next)->scheduled_on = state->cpu;
	}

	raw_spin_unlock(&pfair_lock);

	if (next)
		TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
			   tsk_pfair(next)->release, pfair_time, litmus_clock());
	else if (is_realtime(prev))
		TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock());

	return next;
}
コード例 #3
0
static void unboost_priority(struct task_struct* t)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(t);
	lt_t			now;

	raw_readyq_lock_irqsave(&pedf->slock, flags);
	now = litmus_clock();

	/* assumption: this only happens when the job is scheduled */
	BUG_ON(pedf->scheduled != t);

	TRACE_TASK(t, "priority restored at %llu\n", now);

	/* priority boosted jobs must be scheduled */
	BUG_ON(pedf->scheduled != t);

	tsk_rt(t)->priority_boosted = 0;
	tsk_rt(t)->boost_start_time = 0;

	/* check if this changes anything */
	if (edf_preemption_needed(&pedf->domain, pedf->scheduled))
		preempt(pedf);

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
コード例 #4
0
static void boost_priority(struct task_struct* t)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(t);
	lt_t			now;

	raw_readyq_lock_irqsave(&pedf->slock, flags);
	now = litmus_clock();

	TRACE_TASK(t, "priority boosted at %llu\n", now);

	tsk_rt(t)->priority_boosted = 1;
	tsk_rt(t)->boost_start_time = now;

	if (pedf->scheduled != t) {
		/* holder may be queued: first stop queue changes */
		raw_spin_lock(&pedf->domain.release_lock);
		if (is_queued(t) &&
		    /* If it is queued, then we need to re-order. */
		    bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) &&
		    /* If we bubbled to the top, then we need to check for preemptions. */
		    edf_preemption_needed(&pedf->domain, pedf->scheduled))
				preempt(pedf);
		raw_spin_unlock(&pedf->domain.release_lock);
	} /* else: nothing to do since the job is not queued while scheduled */

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
コード例 #5
0
static int safe_to_schedule(struct task_struct* t, int cpu)
{
	int where = tsk_rt(t)->scheduled_on;
	if (where != NO_CPU && where != cpu) {
		TRACE_TASK(t, "BAD: can't be scheduled on %d, "
			   "scheduled already on %d.\n", cpu, where);
		return 0;
	} else
		return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING;
}
コード例 #6
0
ファイル: ctrldev.c プロジェクト: zs673/litmus_mrsp
/* allocate t->rt_param.ctrl_page*/
static int alloc_ctrl_page(struct task_struct *t)
{
	int err = 0;

	/* only allocate if the task doesn't have one yet */
	if (!tsk_rt(t)->ctrl_page) {
		tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
		if (!tsk_rt(t)->ctrl_page)
			err = -ENOMEM;
		/* will get de-allocated in task teardown */
		TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__,
			   tsk_rt(t)->ctrl_page);
	}
	return err;
}
コード例 #7
0
void simple_io_on_wakeup(struct task_struct* t)
{
	/* we're waking up from an io-based suspension */
	if (tsk_rt(t)->budget.suspend_timestamp) {
		lt_t suspend_cost = litmus_clock() -
				tsk_rt(t)->budget.suspend_timestamp;
		tsk_rt(t)->budget.suspend_timestamp = 0;
		TRACE_TASK(t, "budget consumed while io-suspended: %llu\n",
						suspend_cost);
		get_exec_time(t) += suspend_cost;
	}
	else {
		TRACE_TASK(t, "waking from non-io blocking\n");
	}
}
コード例 #8
0
int cancel_enforcement_timer(struct task_struct* t)
{
	struct enforcement_timer* et;
	int ret = 0;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;

	TRACE_TASK(t, "canceling enforcement timer.\n");

	if (et->armed) {
		raw_spin_lock_irqsave(&et->lock, flags);
		if (et->armed) {
			ret = hrtimer_try_to_cancel(&et->timer);
			if (ret < 0)
				TRACE_TASK(t, "timer already running. failed to cancel.\n");
			else {
				TRACE_TASK(t, "canceled timer with %lld ns remaining.\n",
					ktime_to_ns(hrtimer_expires_remaining(&et->timer)));
				et->armed = 0;
			}
		}
		else
			TRACE_TASK(t, "timer was not armed (race).\n");
		raw_spin_unlock_irqrestore(&et->lock, flags);
	}
	else
		TRACE_TASK(t, "timer was not armed.\n");

	return ret;
}
コード例 #9
0
/* returns 1 if the task needs to go the release queue */
static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
{
	struct pfair_param* p = tsk_pfair(t);
	int to_relq;
	p->cur = (p->cur + 1) % p->quanta;
	if (!p->cur) {
		sched_trace_task_completion(t, 1);
		if (tsk_rt(t)->present) {
			/* we start a new job */
			prepare_for_next_period(t);
			sched_trace_task_release(t);
			get_rt_flags(t) = RT_F_RUNNING;
			p->release += p->period;
		} else {
			/* remove task from system until it wakes */
			drop_all_references(t);
			tsk_pfair(t)->sporadic_release = 1;
			TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n",
				   cpu, p->cur);
			return 0;
		}
	}
	to_relq = time_after(cur_release(t), time);
	TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n",
		   cpu, p->cur, to_relq);
	return to_relq;
}
コード例 #10
0
inline static void arm_enforcement_timer(struct task_struct* t, int force)
{
	struct enforcement_timer* et;
	lt_t when_to_fire, remaining_budget;
	lt_t now;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;
	if (et->armed) {
		TRACE_TASK(t, "timer already armed!\n");
		return;
	}

	if (!force) {
		if ( (!budget_enforced(t) ||
				(budget_enforced(t) &&
					bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
				&&
			(!budget_signalled(t) ||
				(budget_signalled(t) &&
					bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
			TRACE_TASK(t,
					"trying to arm timer when budget "
					"has already been exhausted.\n");
			return;
		}
	}

	TRACE_TASK(t, "arming enforcement timer.\n");

	/* __hrtimer_start_range_ns() cancels the timer
	 * anyway, so we don't have to check whether it is still armed */
	raw_spin_lock_irqsave(&et->lock, flags);

	if (et->armed) {
		TRACE_TASK(t, "timer already armed (race)!\n");
		goto out;
	}

	now = litmus_clock();
	remaining_budget = budget_remaining(t);
	when_to_fire = now + remaining_budget;

	TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n",
					remaining_budget, when_to_fire);

	__hrtimer_start_range_ns(&et->timer,
				 ns_to_ktime(when_to_fire),
				 0 /* delta */,
				 HRTIMER_MODE_ABS_PINNED,  /* TODO: need to use non-pinned? */
				 0 /* no wakeup */);
	et->armed = 1;

out:
	raw_spin_unlock_irqrestore(&et->lock, flags);
}
コード例 #11
0
void simple_on_scheduled(struct task_struct* t)
{
	BUG_ON(!t);

	if(budget_precisely_tracked(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT))
		if (!tsk_rt(t)->budget.timer.armed)
			arm_enforcement_timer(t, 0);
}
コード例 #12
0
static void job_completion(struct task_struct* t, int forced)
{
	sched_trace_task_completion(t,forced);
	TRACE_TASK(t, "job_completion().\n");

	tsk_rt(t)->completed = 0;
	prepare_for_next_period(t);
}
コード例 #13
0
ファイル: ctrldev.c プロジェクト: zs673/litmus_mrsp
static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma)
{
	int err;

	struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page);

	TRACE_CUR(CTRL_NAME
		  ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n",
		  tsk_rt(t)->ctrl_page,page_to_pfn(ctrl), vma->vm_start,
		  vma->vm_page_prot);

	/* Map it into the vma. */
	err = vm_insert_page(vma, vma->vm_start, ctrl);

	if (err)
		TRACE_CUR(CTRL_NAME ": vm_insert_page() failed (%d)\n", err);

	return err;
}
コード例 #14
0
void sobliv_on_exit_top_m(struct task_struct* t)
{
	if (budget_precisely_tracked(t)) {
		if (tsk_rt(t)->budget.timer.armed) {

			if (!is_running(t)) {
				/* the time at which we started draining budget while
				 * suspended is recorded in evt_timestamp.  evt_timestamp
				 * was set either when 't' exited the top-m while suspended
				 * or when 't' blocked. */
				lt_t suspend_cost;
				BUG_ON(!tsk_rt(t)->budget.suspend_timestamp);
				suspend_cost = litmus_clock() -
						tsk_rt(t)->budget.suspend_timestamp;
				TRACE_TASK(t, "budget consumed while suspended: %llu\n",
								suspend_cost);
				get_exec_time(t) += suspend_cost;

				/* timer should have fired before now */
				if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) {
					TRACE_TASK(t,
						"budget overrun while suspended by over 1/10 "
						"millisecond! timer should have already fired!\n");
					WARN_ON(1);
				}
			}

			TRACE_TASK(t, "stops draining budget\n");
			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				/* TODO: record a timestamp if the task isn't running */
				cancel_enforcement_timer(t);
			}
			else {
				TRACE_TASK(t,
					"within callback context. skipping operation.\n");
			}
		}
		else {
			TRACE_TASK(t, "was not draining budget\n");
		}
	}
}
コード例 #15
0
void sobliv_on_wakeup(struct task_struct* t)
{
	if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
		/* we're waking up while in top-m.  record the time spent
		 * suspended while draining in exec_cost. suspend_timestamp was
		 * either set when we entered top-m while asleep, or when we
		 * blocked. */
		if (tsk_rt(t)->budget.suspend_timestamp) {
			lt_t suspend_cost = litmus_clock() -
					tsk_rt(t)->budget.suspend_timestamp;
			tsk_rt(t)->budget.suspend_timestamp = 0;
			TRACE_TASK(t, "budget consumed while suspended: %llu\n",
					suspend_cost);
			get_exec_time(t) += suspend_cost;
		}
		else {
			WARN_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE));
		}
	}
}
コード例 #16
0
static int target_cpu(quanta_t time, struct task_struct* t, int default_cpu)
{
	int cpu;
	if (tsk_rt(t)->scheduled_on != NO_CPU) {
		/* always observe scheduled_on linkage */
		default_cpu = tsk_rt(t)->scheduled_on;
	} else if (tsk_pfair(t)->last_quantum == time - 1) {
		/* back2back quanta */
		/* Only observe last_quantum if no scheduled_on is in the way.
		 * This should only kick in if a CPU missed quanta, and that
		 * *should* only happen in QEMU.
		 */
		cpu = tsk_pfair(t)->last_cpu;
		if (!pstate[cpu]->linked ||
		    tsk_rt(pstate[cpu]->linked)->scheduled_on != cpu) {
			default_cpu = cpu;
		}
	}
	return default_cpu;
}
コード例 #17
0
static void requeue(struct task_struct* t, rt_domain_t *edf)
{
	if (t->state != TASK_RUNNING)
		TRACE_TASK(t, "requeue: !TASK_RUNNING\n");

	tsk_rt(t)->completed = 0;
	if (is_early_releasing(t) || is_released(t, litmus_clock()))
		__add_ready(edf, t);
	else
		add_release(edf, t); /* it has got to wait */
}
コード例 #18
0
static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
{
	enum hrtimer_restart restart = HRTIMER_NORESTART;
	unsigned long flags;

	struct budget_tracker* bt =
		container_of(
			container_of(timer,
				struct enforcement_timer,
				timer),
			struct budget_tracker,
			timer);

	struct task_struct* t =
		container_of(
			container_of(bt, struct rt_param, budget),
			struct task_struct,
			rt_param);

	TRACE_TASK(t, "budget timer interrupt fired at time %lu\n",
					litmus_clock());

	raw_spin_lock_irqsave(&bt->timer.lock, flags);
	tsk_rt(t)->budget.timer.armed = 0;
	raw_spin_unlock_irqrestore(&bt->timer.lock, flags);

	if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
		TRACE_TASK(t,
			"spurious exhastion while waiting for release. dropping.\n");
		goto out;
	}

	restart = bt->ops->on_exhausted(t,!IN_SCHEDULE);

	raw_spin_lock_irqsave(&bt->timer.lock, flags);
	tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART);
	raw_spin_unlock_irqrestore(&bt->timer.lock, flags);

out:
	return restart;
}
コード例 #19
0
void reevaluate_inheritance(struct task_struct* t)
{
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	struct litmus_lock *blocked_lock = NULL;

	TRACE_TASK(t, "reevaluating locks in light of budget exhaustion.\n");

	/* do we need to inherit from any tasks now that our own
	 * priority has decreased? */
	raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
	if (holds_locks(t)) {
		struct task_struct* hp_blocked =
				top_priority(&tsk_rt(t)->hp_blocked_tasks);

		if (litmus->compare(hp_blocked, t))
			litmus->increase_prio(t, effective_priority(hp_blocked));
	}
	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);

	/* do we need to tell the lock we're blocked on about our
	 * changed priority? */
	blocked_lock = tsk_rt(t)->blocked_lock;
	if(blocked_lock) {
		if(blocked_lock->ops->supports_budget_exhaustion) {
			TRACE_TASK(t, "Lock %d supports budget exhaustion.\n",
					   blocked_lock->ident);
			blocked_lock->ops->budget_exhausted(blocked_lock, t);
		}
	}
	else {
		TRACE_TASK(t,
			"Budget exhausted while task not blocked on Litmus lock.\n");
	}
#else
	/* prio-reeval currently relies upon nested locking infrastructure */
	TRACE_TASK(t,
		"Unable to check if sleeping task is blocked "
		"on Litmus lock without "
		"CONFIG_LITMUS_NESTED_LOCKING enabled.\n");
#endif
}
コード例 #20
0
void sobliv_on_enter_top_m(struct task_struct* t)
{
	if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
		if (tsk_rt(t)->budget.timer.armed)
			TRACE_TASK(t, "budget timer already armed.\n");
		else {
			/* if we're blocked, then record the time at which we
			   started measuring */
			if (!is_running(t))
				tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				arm_enforcement_timer(t, 0);
			}
			else {
				TRACE_TASK(t,
					"within callback context. deferring timer arm.\n");
			}
		}
	}
}
コード例 #21
0
/* caller must hold pfair_lock */
static void drop_all_references(struct task_struct *t)
{
        int cpu;
        struct pfair_state* s;
        struct bheap* q;
        if (bheap_node_in_heap(tsk_rt(t)->heap_node)) {
                /* figure out what queue the node is in */
                if (time_before_eq(cur_release(t), merge_time))
                        q = &pfair.ready_queue;
                else
                        q = relq(cur_release(t));
                bheap_delete(pfair_ready_order, q,
                            tsk_rt(t)->heap_node);
        }
        for (cpu = 0; cpu < num_online_cpus(); cpu++) {
                s = &per_cpu(pfair_state, cpu);
                if (s->linked == t)
                        s->linked = NULL;
                if (s->local  == t)
                        s->local  = NULL;
                if (s->scheduled  == t)
                        s->scheduled = NULL;
        }
}
コード例 #22
0
static void psnedf_tick(struct task_struct *t)
{
	psnedf_domain_t *pedf = local_pedf;

	/* Check for inconsistency. We don't need the lock for this since
	 * ->scheduled is only changed in schedule, which obviously is not
	 *  executing in parallel on this CPU
	 */
	BUG_ON(is_realtime(t) && t != pedf->scheduled);

	if (is_realtime(t) &&
		tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
		budget_exhausted(t)) {
		TRACE_TASK(t, "budget exhausted\n");
		budget_state_machine2(t,on_exhausted,!IN_SCHEDULE);
	}
}
コード例 #23
0
static void check_preempt(struct task_struct* t)
{
	int cpu = NO_CPU;
	if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on &&
	    tsk_rt(t)->present) {
		/* the task can be scheduled and
		 * is not scheduled where it ought to be scheduled
		 */
		cpu = tsk_rt(t)->linked_on != NO_CPU ?
			tsk_rt(t)->linked_on         :
			tsk_rt(t)->scheduled_on;
		PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n",
			   tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on);
		/* preempt */
		if (cpu == smp_processor_id())
			set_tsk_need_resched(current);
		else {
			smp_send_reschedule(cpu);
		}
	}
}
コード例 #24
0
void simple_io_on_blocked(struct task_struct* t)
{
	/* hiding is turned on by locking protocols, so if there isn't any
	   hiding, then we're blocking for some other reason.  assume it's I/O. */
	int for_io = 0;
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	for_io |= !tsk_rt(t)->blocked_lock;
#endif
#ifdef CONFIG_REALTIME_AUX_TASKS
	for_io |= tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks;
#endif
#ifdef CONFIG_LITMUS_NVIDIA
	for_io |= tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu;
#endif

	/* we drain budget for io-based suspensions */
	if (for_io) {
		/* there is a fraction of time where we're double-counting the
		 * time tracked by the rq and suspension time.
		 * TODO: Do this recording closer to suspension time. */
		tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

		TRACE_TASK(t, "blocking for I/O.\n");

		if (!tsk_rt(t)->budget.timer.armed) {
			bt_flag_clear(t, BTF_BUDGET_EXHAUSTED);

			if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
				TRACE_TASK(t, "budget timer not armed. "
						   "Raced with exhaustion-resched? Re-arming.\n");
				arm_enforcement_timer(t, 1);
			}
			else {
				TRACE_TASK(t, "not arming timer because task is waiting "
						   "for release.\n");
			}
		}
	}
	else {
		TRACE_TASK(t, "blocking for litmus lock. stop draining.\n");
		simple_on_blocked(t);
	}
}
コード例 #25
0
/* returns one if linking was redirected */
static int pfair_link(quanta_t time, int cpu,
		      struct task_struct* t)
{
	int target = target_cpu(time, t, cpu);
	struct task_struct* prev  = pstate[cpu]->linked;
	struct task_struct* other;

	if (target != cpu) {
		other = pstate[target]->linked;
		pstate[target]->linked = t;
		tsk_rt(t)->linked_on   = target;
		if (!other)
			/* linked ok, but reschedule this CPU */
			return 1;
		if (target < cpu) {
			/* link other to cpu instead */
			tsk_rt(other)->linked_on = cpu;
			pstate[cpu]->linked      = other;
			if (prev) {
				/* prev got pushed back into the ready queue */
				tsk_rt(prev)->linked_on = NO_CPU;
				__add_ready(&pfair, prev);
			}
			/* we are done with this cpu */
			return 0;
		} else {
			/* re-add other, it's original CPU was not considered yet */
			tsk_rt(other)->linked_on = NO_CPU;
			__add_ready(&pfair, other);
			/* reschedule this CPU */
			return 1;
		}
	} else {
		pstate[cpu]->linked  = t;
		tsk_rt(t)->linked_on = cpu;
		if (prev) {
			/* prev got pushed back into the ready queue */
			tsk_rt(prev)->linked_on = NO_CPU;
			__add_ready(&pfair, prev);
		}
		/* we are done with this CPU */
		return 0;
	}
}
コード例 #26
0
static void __pfair_add_release(struct task_struct* t, struct bheap* queue)
{
	bheap_insert(pfair_ready_order, queue,
		    tsk_rt(t)->heap_node);
}
コード例 #27
0
static void pfair_add_release(struct task_struct* t)
{
	BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node));
	__pfair_add_release(t, relq(cur_release(t)));
}