static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == cnt) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(cnt); }
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(SOFTIRQ_OFFSET); }
static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == cnt) trace_softirqs_on(_RET_IP_); sub_preempt_count(cnt); }
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { #ifdef CONFIG_TRACE_IRQFLAGS WARN_ON_ONCE(in_irq()); #endif WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(SOFTIRQ_OFFSET); }
void local_bh_enable_ip(unsigned long ip) { WARN_ON_ONCE(in_irq() || irqs_disabled()); sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); preempt_check_resched(); }
void __local_bh_enable(void) { WARN_ON_ONCE(in_irq()); /* * softirqs should never be enabled by __local_bh_enable(), * it always nests inside local_bh_enable() sections: */ WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); sub_preempt_count(SOFTIRQ_OFFSET); }
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) tick_nohz_stop_sched_tick(); #endif preempt_enable_no_resched(); }
void local_bh_enable(void) { WARN_ON(irqs_disabled()); /* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); preempt_check_resched(); }
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED local_irq_disable(); #else WARN_ON_ONCE(!irqs_disabled()); #endif account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); tick_irq_exit(); rcu_irq_exit(); }
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED local_irq_disable(); #else WARN_ON_ONCE(!irqs_disabled()); #endif account_irq_exit_time(current); trace_hardirq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) tick_nohz_irq_exit(); #endif rcu_irq_exit(); }
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED local_irq_disable(); #else WARN_ON_ONCE(!irqs_disabled()); #endif account_irq_exit_time(current); trace_hardirq_exit(); #ifdef CONFIG_SEC_DEBUG secdbg_msg("hardirq exit"); #endif sub_preempt_count(HARDIRQ_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); tick_irq_exit(); rcu_irq_exit(); }
/** * nohz_restart_sched_tick - restart the idle tick from the idle task * * Restart the idle tick when the CPU is woken up from idle */ void tick_nohz_restart_sched_tick(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); unsigned long ticks; ktime_t now, delta; if (!ts->tick_stopped) return; /* Update jiffies first */ now = ktime_get(); local_irq_disable(); tick_do_update_jiffies64(now); cpu_clear(cpu, nohz_cpu_mask); /* Account the idle time */ delta = ktime_sub(now, ts->idle_entrytime); ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); /* * We stopped the tick in idle. Update process times would miss the * time we slept as update_process_times does only a 1 tick * accounting. Enforce that this is accounted to idle ! */ ticks = jiffies - ts->idle_jiffies; /* * We might be one off. Do not randomly account a huge number of ticks! */ if (ticks && ticks < LONG_MAX) { add_preempt_count(HARDIRQ_OFFSET); account_system_time(current, HARDIRQ_OFFSET, jiffies_to_cputime(ticks)); sub_preempt_count(HARDIRQ_OFFSET); } /* * Cancel the scheduled timer and restore the tick */ ts->tick_stopped = 0; hrtimer_cancel(&ts->sched_timer); ts->sched_timer.expires = ts->idle_tick; while (1) { /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, tick_period); if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, HRTIMER_MODE_ABS); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; } else { if (!tick_program_event(ts->sched_timer.expires, 0)) break; } /* Update jiffies and reread time */ tick_do_update_jiffies64(now); now = ktime_get(); } local_irq_enable(); }
static int kona_timer_unit_test_run(const char *clk_name, unsigned long clk_rate) { struct kona_timer *lkt; int i; unsigned long flags; unsigned long req_cycle; pr_info("%s started!\n", __func__); lkt = kona_timer_request((char *)clk_name, -1); if (lkt == NULL) { pr_err("kona_timer_request returned error\n"); return -EINVAL; } pr_info("%s ch_num: %d acquired!\n", clk_name, lkt->ch_num); timer_m = (struct timer_measure *) kmalloc(sizeof(struct timer_measure), GFP_KERNEL); if (!timer_m) { pr_err("%s memory allocation failed!\n", __func__); return -EINVAL; } spin_lock_init(&timer_m->lock); #ifdef CONFIG_PREEMPT /* Ensure that cond_resched() won't try to preempt anybody */ add_preempt_count(PREEMPT_ACTIVE); #endif /*---------------------------------------------------------------------- * Test 1 : 1 clock cycle test * - set up 1 clock cycle timer request and measure the expiration * time. */ msleep(50); pr_info("=== Test case 1 ===\n"); pr_info("1 clock tick one shot for 1 ms\n"); req_cycle = 1; atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt); timer_m->req_cycle[timer_m->counter] = req_cycle; spin_unlock_irqrestore(&timer_m->lock, flags); kona_timer_unit_test_program(lkt, MODE_ONESHOT, req_cycle); mdelay(1); kona_timer_stop(lkt); /* Error check */ print_unit_test_result(atomic_read(&timer_exp_count), 1, clk_rate); if (atomic_read(&timer_exp_count) != 1) goto error; /*---------------------------------------------------------------------- * Test 2 : peridic 1 clock timer test * - set up periodic 1 clock cycle timer request. SW will handle * periodic timer on every timer expiration. Wait for certain time * and check how many timers were expired and mesaure the time. */ msleep(50); pr_info("=== Test case 2 ===\n"); pr_info("Periodic 1 clock tick for 1 ms\n"); req_cycle = 1; atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt); for (i = 0; i < TIMER_MEASURE_MAX; i++) timer_m->req_cycle[i] = req_cycle; spin_unlock_irqrestore(&timer_m->lock, flags); kona_timer_unit_test_program(lkt, MODE_PERIODIC, req_cycle); /* mdelay() may be delayed by busy timer request. Should be short */ mdelay(1); kona_timer_stop(lkt); pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count)); print_unit_test_result(atomic_read(&timer_exp_count), atomic_read(&timer_exp_count), clk_rate); /*---------------------------------------------------------------------- * Test 3 : one-shot timer test with various time values * - test various short clock cycles and check * timer expiration and real run time of timer. */ msleep(50); pr_info("=== Test case 3 ===\n"); pr_info("0~20 clock tick test for 1s, 50ms delay between each req\n"); atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); spin_unlock_irqrestore(&timer_m->lock, flags); for (i = 0; i < 20; i++) { spin_lock_irqsave(&timer_m->lock, flags); timer_m->busy = 1; timer_m->counter = i; timer_m->start[i] = (u64)kona_timer_get_counter(lkt); timer_m->req_cycle[i] = i; kona_timer_unit_test_program(lkt, MODE_ONESHOT, i); timer_m->busy = 0; spin_unlock_irqrestore(&timer_m->lock, flags); mdelay(50); kona_timer_stop(lkt); } print_unit_test_result(atomic_read(&timer_exp_count), 20, clk_rate); if (atomic_read(&timer_exp_count) != 20) goto error; /*---------------------------------------------------------------------- * Test 4 : one-shot timer test with various time values and delays. * - test various short clock cycles with various short delays. * Verify the timer expiration and timer run-time. */ msleep(50); pr_info("=== Test case 4 ===\n"); pr_info("0~29 cycle timer test, 0~29 + 3 clock cycle wait\n"); pr_info(" Ex)1 clock : wait for 4 clock cycle time\n"); pr_info(" Ex)10 clock : wait for 13 clock cycle time\n"); atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); spin_unlock_irqrestore(&timer_m->lock, flags); for (i = 0; i < 30; i++) { spin_lock_irqsave(&timer_m->lock, flags); timer_m->busy = 1; timer_m->counter = i; timer_m->start[i] = (u64)kona_timer_get_counter(lkt); timer_m->req_cycle[i] = i; kona_timer_unit_test_program(lkt, MODE_ONESHOT, i); timer_m->busy = 0; spin_unlock_irqrestore(&timer_m->lock, flags); kona_clock_delay(lkt, i + 3); } kona_timer_stop(lkt); pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count)); print_unit_test_result(atomic_read(&timer_exp_count), 30, clk_rate); /*---------------------------------------------------------------------- * Test 5 : one-shot timer test with short-long time/delay. * - test various short/long clock cycles with various short/long * delays. Verify the timer expiration and timer run-time. */ msleep(50); pr_info("=== Test case 5 ===\n"); pr_info("short and long timer test\n"); pr_info(" short delay: 0~19 clock cycle wait\n"); pr_info(" long delay: 50 ms wait\n"); atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); spin_unlock_irqrestore(&timer_m->lock, flags); for (i = 0; i < 20; i++) { /* Short timer request */ spin_lock_irqsave(&timer_m->lock, flags); timer_m->busy = 1; timer_m->counter = 2 * i; timer_m->start[2 * i] = (u64)kona_timer_get_counter(lkt); timer_m->req_cycle[2 * i] = i; kona_timer_unit_test_program(lkt, MODE_ONESHOT, i); timer_m->busy = 0; spin_unlock_irqrestore(&timer_m->lock, flags); kona_clock_delay(lkt, i + 1); /* Long timer request */ req_cycle = clk_rate / 50; /* 20ms */ spin_lock_irqsave(&timer_m->lock, flags); timer_m->busy = 1; timer_m->counter = 2 * i + 1; timer_m->start[2 * i + 1] = (u64)kona_timer_get_counter(lkt); timer_m->req_cycle[2 * i + 1] = req_cycle; kona_timer_unit_test_program(lkt, MODE_ONESHOT, req_cycle); timer_m->busy = 0; spin_unlock_irqrestore(&timer_m->lock, flags); mdelay(1000/50 * 2); /* 40 ms */ } kona_timer_stop(lkt); pr_info("Total expiration count: %d\n", atomic_read(&timer_exp_count)); print_unit_test_result(atomic_read(&timer_exp_count), 20 * 2, clk_rate); /*---------------------------------------------------------------------- * Test 6 : periodic 1s timer test * wait for 10.5 sec with 1s periodic timer request */ msleep(50); pr_info("=== Test case 6 ===\n"); pr_info("Periodic 1s timer test for 10.5s\n"); req_cycle = clk_rate; atomic_set(&timer_exp_count, 0); spin_lock_irqsave(&timer_m->lock, flags); memset(timer_m, 0, sizeof(struct timer_measure)); timer_m->start[timer_m->counter] = (u64)kona_timer_get_counter(lkt); for (i = 0; i < TIMER_MEASURE_MAX; i++) timer_m->req_cycle[i] = req_cycle; spin_unlock_irqrestore(&timer_m->lock, flags); kona_timer_unit_test_program(lkt, MODE_PERIODIC, clk_rate); msleep(10500); kona_timer_stop(lkt); print_unit_test_result(atomic_read(&timer_exp_count), atomic_read(&timer_exp_count), clk_rate); if (atomic_read(&timer_exp_count) != 10) goto error; msleep(50); /* * End of kona timer unit test */ kona_timer_free(lkt); pr_info("%s Passed!\n", __func__); #ifdef CONFIG_PREEMPT sub_preempt_count(PREEMPT_ACTIVE); #endif kfree(timer_m); timer_m = NULL; return 0; error: kona_timer_stop(lkt); kona_timer_free(lkt); pr_err("%s Failed\n", __func__); #ifdef CONFIG_PREEMPT sub_preempt_count(PREEMPT_ACTIVE); #endif kfree(timer_m); timer_m = NULL; return -EINVAL; }