Ejemplo n.º 1
0
asmlinkage void __sched __ipipe_preempt_schedule_irq(void)
{
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;

	BUG_ON(!hard_irqs_disabled());
	local_irq_save(flags);
	hard_local_irq_enable();
	preempt_schedule_irq(); /* Ok, may reschedule now. */
	hard_local_irq_disable();

	/*
	 * Flush any pending interrupt that may have been logged after
	 * preempt_schedule_irq() stalled the root stage before
	 * returning to us, and now.
	 */
	p = ipipe_this_cpu_root_context();
	if (unlikely(__ipipe_ipending_p(p))) {
		add_preempt_count(PREEMPT_ACTIVE);
		trace_hardirqs_on();
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		__ipipe_sync_stage();
		sub_preempt_count(PREEMPT_ACTIVE);
	}

	__ipipe_restore_root_nosync(flags);
}
Ejemplo n.º 2
0
static void __local_bh_disable(unsigned long ip)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	add_preempt_count(SOFTIRQ_OFFSET);
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
}
Ejemplo n.º 3
0
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == cnt)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);

	if (preempt_count() == cnt)
		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
{
	add_preempt_count(cnt);
	barrier();
}
Ejemplo n.º 4
0
/**
 * nohz_restart_sched_tick - restart the idle tick from the idle task
 *
 * Restart the idle tick when the CPU is woken up from idle
 */
void tick_nohz_restart_sched_tick(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
	unsigned long ticks;
	ktime_t now, delta;

	if (!ts->tick_stopped)
		return;

	/* Update jiffies first */
	now = ktime_get();

	local_irq_disable();
	tick_do_update_jiffies64(now);
	cpu_clear(cpu, nohz_cpu_mask);

	/* Account the idle time */
	delta = ktime_sub(now, ts->idle_entrytime);
	ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);

	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX) {
		add_preempt_count(HARDIRQ_OFFSET);
		account_system_time(current, HARDIRQ_OFFSET,
				    jiffies_to_cputime(ticks));
		sub_preempt_count(HARDIRQ_OFFSET);
	}

	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	hrtimer_cancel(&ts->sched_timer);
	ts->sched_timer.expires = ts->idle_tick;

	while (1) {
		/* Forward the time to expire in the future */
		hrtimer_forward(&ts->sched_timer, now, tick_period);

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer,
				      ts->sched_timer.expires,
				      HRTIMER_MODE_ABS);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				break;
		} else {
			if (!tick_program_event(ts->sched_timer.expires, 0))
				break;
		}
		/* Update jiffies and reread time */
		tick_do_update_jiffies64(now);
		now = ktime_get();
	}
	local_irq_enable();
}
Ejemplo n.º 5
0
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip)
{
	add_preempt_count(SOFTIRQ_OFFSET);
	barrier();
}
Ejemplo n.º 6
0
static int kona_timer_unit_test_run(const char *clk_name,
				     unsigned long clk_rate)
{
	struct kona_timer *lkt;
	int i;
	unsigned long flags;
	unsigned long req_cycle;

	pr_info("%s started!\n", __func__);

	lkt = kona_timer_request((char *)clk_name, -1);
	if (lkt == NULL) {
		pr_err("kona_timer_request returned error\n");
		return -EINVAL;
	}
	pr_info("%s ch_num: %d acquired!\n", clk_name, lkt->ch_num);

	timer_m = (struct timer_measure *)
			kmalloc(sizeof(struct timer_measure), GFP_KERNEL);

	if (!timer_m) {
		pr_err("%s memory allocation failed!\n", __func__);
		return -EINVAL;
	}

	spin_lock_init(&timer_m->lock);

#ifdef CONFIG_PREEMPT
	/* Ensure that cond_resched() won't try to preempt anybody */
	add_preempt_count(PREEMPT_ACTIVE);
#endif

	/*----------------------------------------------------------------------
	 * Test 1 : 1 clock cycle test
	 *  - set up 1 clock cycle timer request and measure the expiration
	 *    time.
	 */
	msleep(50);
	pr_info("=== Test case 1 ===\n");
	pr_info("1 clock tick one shot for 1 ms\n");

	req_cycle = 1;
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt);
	timer_m->req_cycle[timer_m->counter] = req_cycle;
	spin_unlock_irqrestore(&timer_m->lock, flags);

	kona_timer_unit_test_program(lkt, MODE_ONESHOT, req_cycle);

	mdelay(1);
	kona_timer_stop(lkt);

	/* Error check */
	print_unit_test_result(atomic_read(&timer_exp_count), 1, clk_rate);

	if (atomic_read(&timer_exp_count) != 1)
		goto error;

	/*----------------------------------------------------------------------
	 * Test 2 : peridic 1 clock timer test
	 *  - set up periodic 1 clock cycle timer request. SW will handle
	 *    periodic timer on every timer expiration. Wait for certain time
	 *    and check how many timers were expired and mesaure the time.
	 */
	msleep(50);
	pr_info("=== Test case 2 ===\n");
	pr_info("Periodic 1 clock tick for 1 ms\n");

	req_cycle = 1;
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt);

	for (i = 0; i < TIMER_MEASURE_MAX; i++)
		timer_m->req_cycle[i] = req_cycle;
	spin_unlock_irqrestore(&timer_m->lock, flags);

	kona_timer_unit_test_program(lkt, MODE_PERIODIC, req_cycle);

	/* mdelay() may be delayed by busy timer request. Should be short */
	mdelay(1);
	kona_timer_stop(lkt);
	pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count));
	print_unit_test_result(atomic_read(&timer_exp_count),
			       atomic_read(&timer_exp_count), clk_rate);

	/*----------------------------------------------------------------------
	* Test 3 : one-shot timer test with various time values
	*  - test various short clock cycles and check
	*    timer expiration and real run time of timer.
	*/
	msleep(50);
	pr_info("=== Test case 3 ===\n");
	pr_info("0~20 clock tick test for 1s, 50ms delay between each req\n");
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	spin_unlock_irqrestore(&timer_m->lock, flags);

	for (i = 0; i < 20; i++) {
		spin_lock_irqsave(&timer_m->lock, flags);
		timer_m->busy = 1;
		timer_m->counter = i;
		timer_m->start[i] =
			(u64)kona_timer_get_counter(lkt);
		timer_m->req_cycle[i] = i;
		kona_timer_unit_test_program(lkt, MODE_ONESHOT, i);
		timer_m->busy = 0;
		spin_unlock_irqrestore(&timer_m->lock, flags);

		mdelay(50);
		kona_timer_stop(lkt);
	}

	print_unit_test_result(atomic_read(&timer_exp_count), 20, clk_rate);

	if (atomic_read(&timer_exp_count) != 20)
		goto error;

	/*----------------------------------------------------------------------
	* Test 4 : one-shot timer test with various time values and delays.
	*  - test various short clock cycles with various short delays.
	*    Verify the timer expiration and timer run-time.
	*/
	msleep(50);
	pr_info("=== Test case 4 ===\n");
	pr_info("0~29 cycle timer test, 0~29 + 3 clock cycle wait\n");
	pr_info("   Ex)1 clock : wait for 4 clock cycle time\n");
	pr_info("   Ex)10 clock : wait for 13 clock cycle time\n");
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	spin_unlock_irqrestore(&timer_m->lock, flags);

	for (i = 0; i < 30; i++) {
		spin_lock_irqsave(&timer_m->lock, flags);
		timer_m->busy = 1;
		timer_m->counter = i;
		timer_m->start[i] =
			(u64)kona_timer_get_counter(lkt);
		timer_m->req_cycle[i] = i;
		kona_timer_unit_test_program(lkt, MODE_ONESHOT, i);
		timer_m->busy = 0;
		spin_unlock_irqrestore(&timer_m->lock, flags);

		kona_clock_delay(lkt, i + 3);
	}
	kona_timer_stop(lkt);
	pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count));
	print_unit_test_result(atomic_read(&timer_exp_count), 30, clk_rate);

	/*----------------------------------------------------------------------
	 * Test 5 : one-shot timer test with short-long time/delay.
	 *  - test various short/long clock cycles with various short/long
	 *    delays. Verify the timer expiration and timer run-time.
	 */
	msleep(50);
	pr_info("=== Test case 5 ===\n");
	pr_info("short and long timer test\n");
	pr_info("   short delay: 0~19 clock cycle wait\n");
	pr_info("   long  delay: 50 ms wait\n");
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	spin_unlock_irqrestore(&timer_m->lock, flags);

	for (i = 0; i < 20; i++) {
		/* Short timer request */
		spin_lock_irqsave(&timer_m->lock, flags);
		timer_m->busy = 1;
		timer_m->counter = 2 * i;
		timer_m->start[2 * i] =
			(u64)kona_timer_get_counter(lkt);
		timer_m->req_cycle[2 * i] = i;
		kona_timer_unit_test_program(lkt, MODE_ONESHOT, i);
		timer_m->busy = 0;
		spin_unlock_irqrestore(&timer_m->lock, flags);

		kona_clock_delay(lkt, i + 1);

		/* Long timer request */
		req_cycle = clk_rate / 50; /* 20ms */
		spin_lock_irqsave(&timer_m->lock, flags);
		timer_m->busy = 1;
		timer_m->counter = 2 * i + 1;
		timer_m->start[2 * i + 1] =
			(u64)kona_timer_get_counter(lkt);
		timer_m->req_cycle[2 * i + 1] = req_cycle;
		kona_timer_unit_test_program(lkt, MODE_ONESHOT, req_cycle);
		timer_m->busy = 0;
		spin_unlock_irqrestore(&timer_m->lock, flags);

		mdelay(1000/50 * 2); /* 40 ms */
	}
	kona_timer_stop(lkt);
	pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count));
	print_unit_test_result(atomic_read(&timer_exp_count), 20 * 2, clk_rate);

	/*----------------------------------------------------------------------
	 * Test 6 : periodic 1s timer test
	 *          wait for 10.5 sec with 1s periodic timer request
	 */
	msleep(50);
	pr_info("=== Test case 6 ===\n");
	pr_info("Periodic 1s timer test for 10.5s\n");

	req_cycle = clk_rate;
	atomic_set(&timer_exp_count, 0);

	spin_lock_irqsave(&timer_m->lock, flags);
	memset(timer_m, 0, sizeof(struct timer_measure));
	timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt);

	for (i = 0; i < TIMER_MEASURE_MAX; i++)
		timer_m->req_cycle[i] = req_cycle;
	spin_unlock_irqrestore(&timer_m->lock, flags);

	kona_timer_unit_test_program(lkt, MODE_PERIODIC, clk_rate);

	msleep(10500);
	kona_timer_stop(lkt);

	print_unit_test_result(atomic_read(&timer_exp_count),
			       atomic_read(&timer_exp_count), clk_rate);

	if (atomic_read(&timer_exp_count) != 10)
		goto error;
	msleep(50);

	/*
	 * End of kona timer unit test
	 */

	kona_timer_free(lkt);

	pr_info("%s Passed!\n", __func__);

#ifdef CONFIG_PREEMPT
	sub_preempt_count(PREEMPT_ACTIVE);
#endif
	kfree(timer_m);
	timer_m = NULL;

	return 0;

error:
	kona_timer_stop(lkt);
	kona_timer_free(lkt);

	pr_err("%s Failed\n", __func__);

#ifdef CONFIG_PREEMPT
	sub_preempt_count(PREEMPT_ACTIVE);
#endif
	kfree(timer_m);
	timer_m = NULL;

	return -EINVAL;
}
Ejemplo n.º 7
0
void local_bh_disable(void)
{
	add_preempt_count(SOFTIRQ_DISABLE_OFFSET);
}