static void rfkill_schedule_toggle(enum rfkill_type type)
{
	unsigned long flags;

	if (rfkill_is_epo_lock_active())
		return;

	spin_lock_irqsave(&rfkill_op_lock, flags);
	if (!rfkill_op_pending) {
		__set_bit(type, rfkill_sw_pending);
		__change_bit(type, rfkill_sw_state);
		rfkill_schedule_ratelimited();
	}
	spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
Example #2
0
static void rfkill_schedule_global_op(enum rfkill_sched_op op)
{
	unsigned long flags;

	spin_lock_irqsave(&rfkill_op_lock, flags);
	rfkill_op = op;
	rfkill_op_pending = true;
	if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
		/* bypass the limiter for EPO */
		mod_delayed_work(system_wq, &rfkill_op_work, 0);
		rfkill_last_scheduled = jiffies;
	} else
		rfkill_schedule_ratelimited();
	spin_unlock_irqrestore(&rfkill_op_lock, flags);
}
static void rfkill_op_handler(struct work_struct *work)
{
	unsigned int i;
	bool c;

	spin_lock_irq(&rfkill_op_lock);
	do {
		if (rfkill_op_pending) {
			enum rfkill_sched_op op = rfkill_op;
			rfkill_op_pending = false;
			memset(rfkill_sw_pending, 0,
				sizeof(rfkill_sw_pending));
			spin_unlock_irq(&rfkill_op_lock);

			__rfkill_handle_global_op(op);

			spin_lock_irq(&rfkill_op_lock);

			/*
			 * handle global ops first -- during unlocked period
			 * we might have gotten a new global op.
			 */
			if (rfkill_op_pending)
				continue;
		}

		if (rfkill_is_epo_lock_active())
			continue;

		for (i = 0; i < NUM_RFKILL_TYPES; i++) {
			if (__test_and_clear_bit(i, rfkill_sw_pending)) {
				c = __test_and_clear_bit(i, rfkill_sw_state);
				spin_unlock_irq(&rfkill_op_lock);

				__rfkill_handle_normal_op(i, c);

				spin_lock_irq(&rfkill_op_lock);
			}
		}
	} while (rfkill_op_pending);
	spin_unlock_irq(&rfkill_op_lock);
}