コード例 #1
0
ファイル: wcd9xxx-irq.c プロジェクト: StanTRC/lge-kernel-e400
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
	int ret;
	int i;
	struct wcd9xxx *wcd9xxx = data;
	int num_irq_regs = wcd9xxx_num_irq_regs(wcd9xxx);
	u8 status[num_irq_regs];

	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) {
		dev_err(wcd9xxx->dev, "Failed to hold suspend\n");
		return IRQ_NONE;
	}
	ret = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_INTR_STATUS0,
				num_irq_regs, status);
	if (ret < 0) {
		dev_err(wcd9xxx->dev, "Failed to read interrupt status: %d\n",
			ret);
		dev_err(wcd9xxx->dev, "Disable irq %d\n", wcd9xxx->irq);
		disable_irq_wake(wcd9xxx->irq);
		disable_irq_nosync(wcd9xxx->irq);
		wcd9xxx_unlock_sleep(wcd9xxx);
		return IRQ_NONE;
	}

	/* Apply masking */
	for (i = 0; i < num_irq_regs; i++)
		status[i] &= ~wcd9xxx->irq_masks_cur[i];

	/* Find out which interrupt was triggered and call that interrupt's
	 * handler function
	 */
	if (status[BIT_BYTE(WCD9XXX_IRQ_SLIMBUS)] &
	    BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS))
		wcd9xxx_irq_dispatch(wcd9xxx, WCD9XXX_IRQ_SLIMBUS);

	/* Since codec has only one hardware irq line which is shared by
	 * codec's different internal interrupts, so it's possible master irq
	 * handler dispatches multiple nested irq handlers after breaking
	 * order.  Dispatch MBHC interrupts order to follow MBHC state
	 * machine's order */
	for (i = WCD9XXX_IRQ_MBHC_INSERTION;
	     i >= WCD9XXX_IRQ_MBHC_REMOVAL; i--) {
		if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
			wcd9xxx_irq_dispatch(wcd9xxx, i);
	}
	for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->num_irqs; i++) {
		if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
			wcd9xxx_irq_dispatch(wcd9xxx, i);
	}
	wcd9xxx_unlock_sleep(wcd9xxx);

	return IRQ_HANDLED;
}
コード例 #2
0
bool wcd9xxx_lock_sleep(
	struct wcd9xxx_core_resource *wcd9xxx_res)
{
	enum wcd9xxx_pm_state os;

	mutex_lock(&wcd9xxx_res->pm_lock);
	if (wcd9xxx_res->wlock_holders++ == 0) {
		pr_debug("%s: holding wake lock\n", __func__);
		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
				      msm_cpuidle_get_deep_idle_latency());
	}
	mutex_unlock(&wcd9xxx_res->pm_lock);

	if (!wait_event_timeout(wcd9xxx_res->pm_wq,
				((os =  wcd9xxx_pm_cmpxchg(wcd9xxx_res,
						  WCD9XXX_PM_SLEEPABLE,
						  WCD9XXX_PM_AWAKE)) ==
							WCD9XXX_PM_SLEEPABLE ||
					(os == WCD9XXX_PM_AWAKE)),
				msecs_to_jiffies(
					WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
			__func__,
			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
			wcd9xxx_res->wlock_holders);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return false;
	}
	wake_up_all(&wcd9xxx_res->pm_wq);
	return true;
}
コード例 #3
0
bool wcd9xxx_lock_sleep(
	struct wcd9xxx_core_resource *wcd9xxx_res)
{
	enum wcd9xxx_pm_state os;

	/*
	 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
	 * and its subroutines only motly.
	 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
	 * It can race with wcd9xxx_irq_thread.
	 * So need to embrace wlock_holders with mutex.
	 *
	 * If system didn't resume, we can simply return false so codec driver's
	 * IRQ handler can return without handling IRQ.
	 * As interrupt line is still active, codec will have another IRQ to
	 * retry shortly.
	 */
	mutex_lock(&wcd9xxx_res->pm_lock);
	if (wcd9xxx_res->wlock_holders++ == 0) {
		pr_debug("%s: holding wake lock\n", __func__);
		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
				      msm_cpuidle_get_deep_idle_latency());
	}
	mutex_unlock(&wcd9xxx_res->pm_lock);

	if (!wait_event_timeout(wcd9xxx_res->pm_wq,
				((os =  wcd9xxx_pm_cmpxchg(wcd9xxx_res,
						  WCD9XXX_PM_SLEEPABLE,
						  WCD9XXX_PM_AWAKE)) ==
							WCD9XXX_PM_SLEEPABLE ||
					(os == WCD9XXX_PM_AWAKE)),
				msecs_to_jiffies(
					WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
			__func__,
			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
			wcd9xxx_res->wlock_holders);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return false;
	}
	wake_up_all(&wcd9xxx_res->pm_wq);
	return true;
}
コード例 #4
0
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
	int ret;
	int i;
	struct intr_data irqdata;
	char linebuf[128];
	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
	struct wcd9xxx_core_resource *wcd9xxx_res = data;
	int num_irq_regs = wcd9xxx_res->num_irq_regs;
	u8 status[num_irq_regs], status1[num_irq_regs];

	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
		return IRQ_NONE;
	}

	if (!wcd9xxx_res->codec_bulk_read) {
		dev_err(wcd9xxx_res->dev,
				"%s: Codec Bulk Register read callback not supplied\n",
			   __func__);
		goto err_disable_irq;
	}

	ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res,
				WCD9XXX_A_INTR_STATUS0,
				num_irq_regs, status);

	if (ret < 0) {
		dev_err(wcd9xxx_res->dev,
				"Failed to read interrupt status: %d\n", ret);
		goto err_disable_irq;
	}

	
	for (i = 0; i < num_irq_regs; i++)
		status[i] &= ~wcd9xxx_res->irq_masks_cur[i];

	memcpy(status1, status, sizeof(status1));

	for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
		irqdata = wcd9xxx_res->intr_table[i];
		if (status[BIT_BYTE(irqdata.intr_num)] &
			BYTE_BIT_MASK(irqdata.intr_num)) {
			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
			status1[BIT_BYTE(irqdata.intr_num)] &=
					~BYTE_BIT_MASK(irqdata.intr_num);
		}
	}

	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
		if (__ratelimit(&ratelimit)) {
			pr_warn("%s: Unhandled irq found\n", __func__);
			hex_dump_to_buffer(status, sizeof(status), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status0 : %s\n", __func__, linebuf);
			hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status1 : %s\n", __func__, linebuf);
		}

		memset(status, 0xff, num_irq_regs);

		wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
				WCD9XXX_A_INTR_CLEAR0,
				num_irq_regs, status);
		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
			wcd9xxx_res->codec_reg_write(wcd9xxx_res,
					WCD9XXX_A_INTR_MODE, 0x02);
	}
	wcd9xxx_unlock_sleep(wcd9xxx_res);

	return IRQ_HANDLED;

err_disable_irq:
		dev_err(wcd9xxx_res->dev,
				"Disable irq %d\n", wcd9xxx_res->irq);

		disable_irq_wake(wcd9xxx_res->irq);
		disable_irq_nosync(wcd9xxx_res->irq);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return IRQ_NONE;
}
コード例 #5
0
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
	int ret;
	int i;
	struct intr_data irqdata;
	char linebuf[128];
	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
	struct wcd9xxx_core_resource *wcd9xxx_res = data;
	int num_irq_regs = wcd9xxx_res->num_irq_regs;
	u8 status[num_irq_regs], status1[num_irq_regs];

	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
		return IRQ_NONE;
	}

	if (!wcd9xxx_res->codec_bulk_read) {
		dev_err(wcd9xxx_res->dev,
				"%s: Codec Bulk Register read callback not supplied\n",
			   __func__);
		goto err_disable_irq;
	}

	ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res,
				WCD9XXX_A_INTR_STATUS0,
				num_irq_regs, status);

	if (ret < 0) {
		dev_err(wcd9xxx_res->dev,
				"Failed to read interrupt status: %d\n", ret);
		goto err_disable_irq;
	}

	/* Apply masking */
	for (i = 0; i < num_irq_regs; i++)
		status[i] &= ~wcd9xxx_res->irq_masks_cur[i];

	memcpy(status1, status, sizeof(status1));

	/* Find out which interrupt was triggered and call that interrupt's
	 * handler function
	 *
	 * Since codec has only one hardware irq line which is shared by
	 * codec's different internal interrupts, so it's possible master irq
	 * handler dispatches multiple nested irq handlers after breaking
	 * order.  Dispatch interrupts in the order that is maintained by
	 * the interrupt table.
	 */
	for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
		irqdata = wcd9xxx_res->intr_table[i];
		if (status[BIT_BYTE(irqdata.intr_num)] &
			BYTE_BIT_MASK(irqdata.intr_num)) {
			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
			status1[BIT_BYTE(irqdata.intr_num)] &=
					~BYTE_BIT_MASK(irqdata.intr_num);
		}
	}

	/*
	 * As a failsafe if unhandled irq is found, clear it to prevent
	 * interrupt storm.
	 * Note that we can say there was an unhandled irq only when no irq
	 * handled by nested irq handler since Taiko supports qdsp as irqs'
	 * destination for few irqs.  Therefore driver shouldn't clear pending
	 * irqs when few handled while few others not.
	 */
	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
		if (__ratelimit(&ratelimit)) {
			pr_warn("%s: Unhandled irq found\n", __func__);
			hex_dump_to_buffer(status, sizeof(status), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status0 : %s\n", __func__, linebuf);
			hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status1 : %s\n", __func__, linebuf);
		}

		memset(status, 0xff, num_irq_regs);

		ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
				WCD9XXX_A_INTR_CLEAR0,
				num_irq_regs, status);
		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
			wcd9xxx_res->codec_reg_write(wcd9xxx_res,
					WCD9XXX_A_INTR_MODE, 0x02);
	}
	wcd9xxx_unlock_sleep(wcd9xxx_res);

	return IRQ_HANDLED;

err_disable_irq:
		dev_err(wcd9xxx_res->dev,
				"Disable irq %d\n", wcd9xxx_res->irq);

		disable_irq_wake(wcd9xxx_res->irq);
		disable_irq_nosync(wcd9xxx_res->irq);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return IRQ_NONE;
}