Esempio n. 1
0
void do_softirq(void)
{
	__u32 pending;
	int max_restart = MAX_SOFTIRQ_RESTART;
	struct softirq_action *h;

	pending = local_softirq_pending();

restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1)
			h->action(h);
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending && --max_restart)
		goto restart;
}
Esempio n. 2
0
void do_softirq(void)
{
	unsigned pending = local_softirq_pending();
	unsigned allowed = 1 << SCHED_SOFTIRQ | 1 << HRTIMER_SOFTIRQ;
	struct softirq_action *h;
	unsigned long flags;

	__local_bh_disable(SOFTIRQ_OFFSET);

	local_irq_save(flags);
	while ((pending = local_softirq_pending())) {
		/*
		 * For now we only support SCHED_SOFTIRQ
		 * and HRTIMER_SOFTIRQ
		 */
		BUG_ON((~allowed) & pending);

		cpumask_clear_cpu(smp_processor_id(),
				&linsched_cpu_softirq_raised);
		set_softirq_pending(0);
		h = softirq_vec;
		do {
			if (pending & 1)
				h->action(h);
			h++;
			pending >>= 1;
		} while (pending);
	}
	local_irq_restore(flags);

	__local_bh_enable(SOFTIRQ_OFFSET);
}
Esempio n. 3
0
/** Run softirq handlers 
 */
asmlinkage void __do_softirq(void)
{
	int retries = MAX_SOFTIRQ_RETRIES;

	do {
		struct softirq_action *h = softirq_vec;
		unsigned long pending = local_softirq_pending();

		/* reset softirq count */
		set_softirq_pending(0);

		local_irq_enable();

		/* While we have a softirq pending... */
		while (pending) {
			/* need to execute current softirq? */
			if (pending & 1)
				h->action(h);
			/* try next softirq */
			h++;
			/* remove pending flag for last softirq */
			pending >>= 1;
		}

		local_irq_disable();

	/* Somebody might have scheduled another softirq in between
	 * (e.g., an IRQ thread or another tasklet). */
	} while (local_softirq_pending() && --retries);


}
Esempio n. 4
0
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
	WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_disable();
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
		trace_softirqs_on(ip);
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
	 */
	preempt_count_sub(cnt - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending())) {
		/*
		 * Run softirq if any pending. And do it in its own stack
		 * as we may be calling this deep in a task call stack already.
		 */
		do_softirq();
	}

	preempt_count_dec();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_enable();
#endif
	preempt_check_resched();
}
static inline void _local_bh_enable_ip(unsigned long ip)
{
	WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_disable();
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
		trace_softirqs_on(ip);
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
	sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_enable();
#endif
	preempt_check_resched();
}
Esempio n. 6
0
void local_bh_enable(void)
{
#ifdef CONFIG_TRACE_IRQFLAGS
	unsigned long flags;

	WARN_ON_ONCE(in_irq());
#endif
	WARN_ON_ONCE(irqs_disabled());

#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_save(flags);
#endif
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_on((unsigned long)__builtin_return_address(0));
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
 	sub_preempt_count(SOFTIRQ_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS
	local_irq_restore(flags);
#endif
	preempt_check_resched();
}
Esempio n. 7
0
static int ksoftirqd(void * __bind_cpu)
{
	set_user_nice(current, 19);
	current->flags |= PF_NOFREEZE;

	set_current_state(TASK_INTERRUPTIBLE);

	while (!kthread_should_stop()) {
		preempt_disable();
		if (!local_softirq_pending()) {
			preempt_enable_no_resched();
			schedule();
			preempt_disable();
		}

		__set_current_state(TASK_RUNNING);

		while (local_softirq_pending()) {
			/* Preempt disable stops cpu going offline.
			   If already offline, we'll be on wrong CPU:
			   don't process */
			if (cpu_is_offline((long)__bind_cpu))
				goto wait_to_die;
			do_softirq();
			preempt_enable_no_resched();
			cond_resched();
			preempt_disable();
			rcu_qsctr_inc((long)__bind_cpu);
		}
		preempt_enable();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;

wait_to_die:
	preempt_enable();
	/* Wait for kthread_stop */
	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		schedule();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}
Esempio n. 8
0
asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	int max_restart = MAX_SOFTIRQ_RESTART;
	int cpu;

	pending = local_softirq_pending();
	account_system_vtime(current);

	__local_bh_disable((unsigned long)__builtin_return_address(0));
	trace_softirq_enter();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1) {
			kstat_incr_softirqs_this_cpu(h - softirq_vec);
			h->action(h);
			rcu_bh_qsctr_inc(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending && --max_restart)
		goto restart;

	if (pending)
		wakeup_softirqd();

	trace_softirq_exit();

	account_system_vtime(current);
	_local_bh_enable();
}
Esempio n. 9
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
    account_system_vtime(current);
    sub_preempt_count(IRQ_EXIT_OFFSET);
    if (!in_interrupt() && local_softirq_pending())
        invoke_softirq();
    preempt_enable_no_resched();
}
Esempio n. 10
0
void irq_exit(void)
{
	account_system_vtime(current);
	trace_hardirq_exit();
	sub_preempt_count(IRQ_EXIT_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		do_softirq();
}
Esempio n. 11
0
asmlinkage void do_softirq(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (local_softirq_pending())
		__do_softirq();
	local_irq_restore(flags);
}
Esempio n. 12
0
static void softirq_task_function(void *context)
{
	while (true) {
		do_softirq();
		g_n_raises--;
		if (g_n_raises == 0 || local_softirq_pending() == 0) {
			g_n_raises = 0;
			lib_task_wait();
		}
	}
}
Esempio n. 13
0
void local_bh_enable_ip(unsigned long ip)
{
	WARN_ON_ONCE(in_irq() || irqs_disabled());
	sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();
	dec_preempt_count();

	preempt_check_resched();
}
Esempio n. 14
0
asmlinkage void __do_softirq(void)
{
    struct softirq_action *h;
    __u32 pending;
    int max_restart = MAX_SOFTIRQ_RESTART;
    int cpu;

    pending = local_softirq_pending();

    local_bh_disable();
    cpu = smp_processor_id();
restart:
    /* Reset the pending bitmask before enabling irqs */
    set_softirq_pending(0);

    local_irq_enable();

    h = softirq_vec;

    do {
        if (pending & 1) {
            trace_kernel_soft_irq_entry((void*)(h - softirq_vec));
            h->action(h);
            trace_kernel_soft_irq_exit((void*)(h - softirq_vec));
            rcu_bh_qsctr_inc(cpu);
        }
        h++;
        pending >>= 1;
    } while (pending);

    local_irq_disable();

    pending = local_softirq_pending();
    if (pending && --max_restart)
        goto restart;

    if (pending)
        wakeup_softirqd();

    __local_bh_enable();
}
Esempio n. 15
0
static int ksoftirqd(void * __bind_cpu)
{
	daemonize("ksoftirqd/%d", (int) (long) __bind_cpu);
	set_user_nice(current, 19);

	__set_current_state(TASK_INTERRUPTIBLE);
	mb();
	__get_cpu_var(ksoftirqd) = current;

	for (;;) {
		if (!local_softirq_pending())
			schedule();
		__set_current_state(TASK_RUNNING);
		while (local_softirq_pending()) {
			do_softirq();
			if(need_resched())
				schedule();
		}
		__set_current_state(TASK_INTERRUPTIBLE);
	}
}
Esempio n. 16
0
static void run_ksoftirqd(unsigned int cpu)
{
	local_irq_disable();
	if (local_softirq_pending()) {
		__do_softirq();
		rcu_note_context_switch(cpu);
		local_irq_enable();
		cond_resched();
		return;
	}
	local_irq_enable();
}
Esempio n. 17
0
static void run_ksoftirqd(unsigned int cpu)
{
	local_irq_disable();
	if (local_softirq_pending()) {
		/*
		 * We can safely run softirq on inline stack, as we are not deep
		 * in the task stack here.
		 */
		__do_softirq();
		local_irq_enable();
		cond_resched_rcu_qs();
		return;
	}
	local_irq_enable();
}
Esempio n. 18
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
	account_system_vtime(current);
	trace_hardirq_exit();
	sub_preempt_count(IRQ_EXIT_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

#ifdef CONFIG_NO_HZ
	/* Make sure that timer wheel updates are propagated */
	if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
		tick_nohz_stop_sched_tick();
#endif
	preempt_enable_no_resched();
}
Esempio n. 19
0
void local_bh_enable(void)
{
	WARN_ON(irqs_disabled());
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
	preempt_count() -= SOFTIRQ_OFFSET - 1;

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
	preempt_check_resched();
}
Esempio n. 20
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
	local_irq_disable();
#else
	WARN_ON_ONCE(!irqs_disabled());
#endif

	account_system_vtime(current);
	trace_hardirq_exit();
	sub_preempt_count(HARDIRQ_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

	tick_irq_exit();
	rcu_irq_exit();
}
Esempio n. 21
0
asmlinkage void do_softirq(void)
{
	__u32 pending;
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = local_softirq_pending();

	if (pending)
		__do_softirq();

	local_irq_restore(flags);
}
Esempio n. 22
0
File: irq.c Progetto: Einheri/wl500g
asmlinkage void do_softirq(void)
{
 	__u32 pending;
 	unsigned long flags;

 	if (in_interrupt())
 		return;

 	local_irq_save(flags);
 	pending = local_softirq_pending();
 	/* Switch to interrupt stack */
 	if (pending) {
		call_softirq();
		WARN_ON_ONCE(softirq_count());
	}
 	local_irq_restore(flags);
}
Esempio n. 23
0
asmlinkage __visible void do_softirq(void)
{
	__u32 pending;
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = local_softirq_pending();

	if (pending && !ksoftirqd_running())
		do_softirq_own_stack();

	local_irq_restore(flags);
}
Esempio n. 24
0
static int
default_transmission_control(struct dtp_ps * ps, struct pdu * pdu)
{
        struct dtp *  dtp = ps->dm;
        struct dt  *  dt;
        struct efcp * efcp;

        if (!dtp) {
                LOG_ERR("No instance passed, cannot run policy");
                pdu_destroy(pdu);
                return -1;
        }

        dt = dtp_dt(dtp);
        if (!dt) {
                LOG_ERR("Passed instance has no parent, cannot run policy");
                pdu_destroy(pdu);
                return -1;
        }

        efcp = dt_efcp(dt);
        if (!efcp) {
                LOG_ERR("Passed instance has no EFCP, cannot run policy");
                pdu_destroy(pdu);
                return -1;
        }

        /* Post SDU to RMT */
        LOG_DBG("defaultTxPolicy - sending to rmt");
        if (dtp_sv_max_seq_nr_set(dtp,
                                  pci_sequence_number_get(pdu_pci_get_ro(
                                                          pdu))))
                LOG_ERR("Problems setting max sequence number received "
                        "in default_transmission");

        LOG_DBG("local_soft_irq_pending: %d", local_softirq_pending());

        return common_efcp_pdu_send(efcp,
        			    dtp_rmt(dtp),
        			    pci_destination(pdu_pci_get_ro(pdu)),
        			    pci_qos_id(pdu_pci_get_ro(pdu)),
        		            pdu);
}
Esempio n. 25
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
	local_irq_disable();
#else
	WARN_ON_ONCE(!irqs_disabled());
#endif

	account_irq_exit_time(current);
	trace_hardirq_exit();
#ifdef CONFIG_SEC_DEBUG
	secdbg_msg("hardirq exit");
#endif
	
	sub_preempt_count(HARDIRQ_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

	tick_irq_exit();
	rcu_irq_exit();
}
Esempio n. 26
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
	local_irq_disable();
#else
	WARN_ON_ONCE(!irqs_disabled());
#endif

	account_irq_exit_time(current);
	trace_hardirq_exit();
	sub_preempt_count(HARDIRQ_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

#ifdef CONFIG_NO_HZ
	/* Make sure that timer wheel updates are propagated */
	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
		tick_nohz_irq_exit();
#endif
	rcu_irq_exit();
}
Esempio n. 27
0
/*
 * Stop the HZ tick on the current CPU.
 * Only cpu_idle may call this function.
 */
static void stop_hz_timer(void)
{
	unsigned long flags;
	unsigned long seq, next;
	__u64 timer, todval;
	int cpu = smp_processor_id();

	if (sysctl_hz_timer != 0)
		return;

	cpu_set(cpu, nohz_cpu_mask);

	/*
	 * Leave the clock comparator set up for the next timer
	 * tick if either rcu or a softirq is pending.
	 */
	if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
		cpu_clear(cpu, nohz_cpu_mask);
		return;
	}

	/*
	 * This cpu is going really idle. Set up the clock comparator
	 * for the next event.
	 */
	next = next_timer_interrupt();
	do {
		seq = read_seqbegin_irqsave(&xtime_lock, flags);
		timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
	todval = -1ULL;
	/* Be careful about overflows. */
	if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
		timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
		if (timer >= jiffies_timer_cc)
			todval = timer;
	}
	set_clock_comparator(todval);
}
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(int inidle)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	/*
	 * Call to tick_nohz_start_idle stops the last_update_time from being
	 * updated. Thus, it must not be called in the event we are called from
	 * irq_exit() with the prior state different than idle.
	 */
	if (!inidle && !ts->inidle)
		goto end;

	/*
	 * Set ts->inidle unconditionally. Even if the system did not
	 * switch to NOHZ mode the cpu frequency governers rely on the
	 * update of the idle time accounting in tick_nohz_start_idle().
	 */
	ts->inidle = 1;

	now = tick_nohz_start_idle(ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       local_softirq_pending());
			ratelimit++;
		}
		goto end;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;

		/*
		 * On SMP we really should only care for the CPU which
		 * has the do_timer duty assigned. All other CPUs can
		 * sleep as long as they want.
		 */
		if (cpu == tick_do_timer_cpu ||
		    tick_do_timer_cpu == TICK_DO_TIMER_NONE)
			time_delta = timekeeping_max_deferment();
		else
			time_delta = KTIME_MAX;
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
			expires = ktime_add_ns(last_update, time_delta);
		} else {
			expires.tv64 = KTIME_MAX;
		}

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked.
		 */
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;

		if (delta_jiffies > 1)
			cpumask_set_cpu(cpu, nohz_cpu_mask);

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			if (select_nohz_load_balancer(1)) {
				/*
				 * sched tick not stopped!
				 */
				cpumask_clear_cpu(cpu, nohz_cpu_mask);
				goto out;
			}

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
			rcu_enter_nohz();
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpumask_clear_cpu(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
	ts->sleep_length = ktime_sub(dev->next_event, now);
end:
	local_irq_restore(flags);
}
Esempio n. 29
0
static int ksoftirqd_should_run(unsigned int cpu)
{
	return local_softirq_pending();
}
Esempio n. 30
0
asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	int cpu;
	unsigned long old_flags = current->flags;
	int max_restart = MAX_SOFTIRQ_RESTART;

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	account_system_vtime(current);

	__local_bh_disable((unsigned long)__builtin_return_address(0),
				SOFTIRQ_OFFSET);
	lockdep_softirq_enter();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1) {
			unsigned int vec_nr = h - softirq_vec;
			int prev_count = preempt_count();

			kstat_incr_softirqs_this_cpu(vec_nr);

			trace_softirq_entry(vec_nr);
			h->action(h);
			trace_softirq_exit(vec_nr);
			if (unlikely(prev_count != preempt_count())) {
				printk(KERN_ERR "huh, entered softirq %u %s %p"
				       "with preempt_count %08x,"
				       " exited with %08x?\n", vec_nr,
				       softirq_to_name[vec_nr], h->action,
				       prev_count, preempt_count());
				preempt_count_set(prev_count);
			}

			rcu_bh_qs(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	lockdep_softirq_exit();

	account_system_vtime(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}