int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = rtc_valid_tm(tm); if (err != 0) return err; err = rtc_valid_range(rtc, tm); if (err) return err; rtc_subtract_offset(rtc, tm); err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_time) err = rtc->ops->set_time(rtc->dev.parent, tm); else err = -EINVAL; pm_stay_awake(rtc->dev.parent); mutex_unlock(&rtc->ops_lock); /* A timer might have just expired */ schedule_work(&rtc->irqwork); trace_rtc_set_time(rtc_tm_to_time64(tm), err); return err; }
static irqreturn_t msm_ehci_host_wakeup_irq(int irq, void *data) { struct msm_hcd *mhcd = data; mhcd->pmic_gpio_int_cnt++; dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n", __func__, mhcd->pmic_gpio_int_cnt); pm_stay_awake(mhcd->dev); if (mhcd->pmic_gpio_dp_irq_enabled) { mhcd->pmic_gpio_dp_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } if (!atomic_read(&mhcd->pm_usage_cnt)) { atomic_set(&mhcd->pm_usage_cnt, 1); pm_runtime_get(mhcd->dev); } return IRQ_HANDLED; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; #ifdef CONFIG_SEC_DEBUG int state = (gpio_get_value(bdata->button->gpio) ? 1 : 0) ^ bdata->button->active_low; sec_debug_check_crash_key(bdata->button->code, state); #endif BUG_ON(irq != bdata->irq); if (suspend_state) { irq_in_suspend = true; wakeup_reason = bdata->button->code; pr_info("%s before resume by %d\n", __func__, wakeup_reason); } if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); return IRQ_HANDLED; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; BUG_ON(irq != bdata->irq); if (bdata->button->wakeup) { const struct gpio_keys_button *button = bdata->button; pm_stay_awake(bdata->input->dev.parent); if (bdata->suspended && (button->type == 0 || button->type == EV_KEY)) { /* * Simulate wakeup key press in case the key has * already released by the time we got interrupt * handler to run. */ input_report_key(bdata->input, button->code, 1); } } mod_delayed_work(system_wq, &bdata->work, msecs_to_jiffies(bdata->software_debounce)); return IRQ_HANDLED; }
/** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device * @timer timer being removed. * * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Clears the enabled bit on the removed timer. * * Must hold ops_lock for proper serialization of timerqueue */ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; if (next == &timer->node) { struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); if (!next) { rtc_alarm_disable(rtc); return; } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) { pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } } }
int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = rtc_valid_tm(tm); if (err != 0) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_time) err = rtc->ops->set_time(rtc->dev.parent, tm); else if (rtc->ops->set_mmss64) { time64_t secs64 = rtc_tm_to_time64(tm); err = rtc->ops->set_mmss64(rtc->dev.parent, secs64); } else if (rtc->ops->set_mmss) { time64_t secs64 = rtc_tm_to_time64(tm); err = rtc->ops->set_mmss(rtc->dev.parent, secs64); } else err = -EINVAL; pm_stay_awake(rtc->dev.parent); mutex_unlock(&rtc->ops_lock); /* A timer might have just expired */ schedule_work(&rtc->irqwork); return err; }
static irqreturn_t imx_keypad_irq_handler(int irq, void *dev_id) { struct imx_keypad *keypad = dev_id; unsigned short reg_val; reg_val = readw(keypad->mmio_base + KPSR); /* Disable both interrupt types */ reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); /* Clear interrupts status bits */ reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); if (keypad->enabled) { pm_stay_awake(keypad->input_dev->dev.parent); /* The matrix is supposed to be changed */ keypad->stable_count = 0; /* Schedule the scanning procedure near in the future */ mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(2)); } return IRQ_HANDLED; }
static irqreturn_t bu21150_irq_thread(int irq, void *dev_id) { struct bu21150_data *ts = dev_id; u8 *psbuf = (u8 *)ts->frame_work; mutex_lock(&ts->mutex_wake); if (!ts->stay_awake && ts->wake_up && ts->scan_mode == AFE_SCAN_GESTURE_SELF_CAP) { pm_stay_awake(&ts->client->dev); ts->stay_awake = true; } mutex_unlock(&ts->mutex_wake); /* get frame */ ts->frame_work_get = ts->req_get; bu21150_read_register(REG_READ_DATA, ts->frame_work_get.size, psbuf); if (ts->reset_flag == 0) { #ifdef CHECK_SAME_FRAME check_same_frame(ts); #endif copy_frame(ts); wake_up_frame_waitq(ts); } else { ts->reset_flag = 0; } return IRQ_HANDLED; }
/* * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID */ static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx, enum mtu3_vbus_id_state status) { struct ssusb_mtk *ssusb = container_of(otg_sx, struct ssusb_mtk, otg_switch); struct mtu3 *mtu = ssusb->u3d; dev_dbg(ssusb->dev, "mailbox state(%d)\n", status); switch (status) { case MTU3_ID_GROUND: switch_port_to_host(ssusb); ssusb_set_vbus(otg_sx, 1); ssusb->is_host = true; break; case MTU3_ID_FLOAT: ssusb->is_host = false; ssusb_set_vbus(otg_sx, 0); switch_port_to_device(ssusb); break; case MTU3_VBUS_OFF: mtu3_stop(mtu); pm_relax(ssusb->dev); break; case MTU3_VBUS_VALID: /* avoid suspend when works as device */ pm_stay_awake(ssusb->dev); mtu3_start(mtu); break; default: dev_err(ssusb->dev, "invalid state\n"); } }
void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh) { #if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; pm_stay_awake(bcmsdh_osinfo->dev); #endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ }
static irqreturn_t titsc_irq(int irq, void *dev) { struct titsc *ts_dev = dev; struct input_dev *input_dev = ts_dev->input; unsigned int fsm, status, irqclr = 0; unsigned int x = 0, y = 0; unsigned int z1, z2, z; status = titsc_readl(ts_dev, REG_RAWIRQSTATUS); if (status & IRQENB_HW_PEN) { ts_dev->pen_down = true; irqclr |= IRQENB_HW_PEN; pm_stay_awake(ts_dev->mfd_tscadc->dev); } if (status & IRQENB_PENUP) { fsm = titsc_readl(ts_dev, REG_ADCFSM); if (fsm == ADCFSM_STEPID) { ts_dev->pen_down = false; input_report_key(input_dev, BTN_TOUCH, 0); input_report_abs(input_dev, ABS_PRESSURE, 0); input_sync(input_dev); pm_relax(ts_dev->mfd_tscadc->dev); } else { ts_dev->pen_down = true; } irqclr |= IRQENB_PENUP; } if (status & IRQENB_EOS) irqclr |= IRQENB_EOS; /* * ADC and touchscreen share the IRQ line. * FIFO1 interrupts are used by ADC. Handle FIFO0 IRQs here only */ if (status & IRQENB_FIFO0THRES) { titsc_read_coordinates(ts_dev, &x, &y, &z1, &z2); if (ts_dev->pen_down && z1 != 0 && z2 != 0) { /* * Calculate pressure using formula * Resistance(touch) = x plate resistance * * x postion/4096 * ((z2 / z1) - 1) */ z = z1 - z2; z *= x; z *= ts_dev->x_plate_resistance; z /= z2; z = (z + 2047) >> 12; if (z <= MAX_12BIT) { input_report_abs(input_dev, ABS_X, x); input_report_abs(input_dev, ABS_Y, y); input_report_abs(input_dev, ABS_PRESSURE, z); input_report_key(input_dev, BTN_TOUCH, 1); input_sync(input_dev); } }
static irqreturn_t msm_async_irq(int irq, void *data) { struct msm_hcd *mhcd = data; int ret; mhcd->async_int_cnt++; dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n", __func__, mhcd->async_int_cnt); pm_stay_awake(mhcd->dev); spin_lock(&mhcd->wakeup_lock); if (mhcd->async_irq_enabled) { mhcd->async_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } spin_unlock(&mhcd->wakeup_lock); if (!atomic_read(&mhcd->pm_usage_cnt)) { ret = pm_runtime_get(mhcd->dev); if ((ret == 1) || (ret == -EINPROGRESS)) pm_runtime_put_noidle(mhcd->dev); else atomic_set(&mhcd->pm_usage_cnt, 1); } return IRQ_HANDLED; }
static int mxhci_hsic_resume(struct mxhci_hsic_hcd *mxhci) { struct usb_hcd *hcd = hsic_to_hcd(mxhci); int ret; unsigned long flags; if (!mxhci->in_lpm) { dev_dbg(mxhci->dev, "%s called in !in_lpm\n", __func__); return 0; } pm_stay_awake(mxhci->dev); /* enable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_RETAIN_PERIPH); if (mxhci->bus_perf_client) { mxhci->bus_vote = true; queue_work(mxhci->wq, &mxhci->bus_vote_w); } spin_lock_irqsave(&mxhci->wakeup_lock, flags); if (mxhci->wakeup_irq_enabled) { disable_irq_wake(mxhci->wakeup_irq); disable_irq_nosync(mxhci->wakeup_irq); mxhci->wakeup_irq_enabled = 0; } if (mxhci->pm_usage_cnt) { mxhci->pm_usage_cnt = 0; pm_runtime_put_noidle(mxhci->dev); } spin_unlock_irqrestore(&mxhci->wakeup_lock, flags); ret = regulator_set_voltage(mxhci->hsic_vddcx, mxhci->vdd_low_vol_level, mxhci->vdd_high_vol_level); if (ret < 0) dev_err(mxhci->dev, "unable to set nominal vddcx voltage (no VDD MIN)\n"); clk_prepare_enable(mxhci->system_clk); clk_prepare_enable(mxhci->cal_clk); clk_prepare_enable(mxhci->hsic_clk); clk_prepare_enable(mxhci->utmi_clk); clk_prepare_enable(mxhci->core_clk); if (mxhci->wakeup_irq) usb_hcd_resume_root_hub(hcd); mxhci->in_lpm = 0; dev_dbg(mxhci->dev, "HSIC-USB exited from low power mode\n"); xhci_dbg_log_event(&dbg_hsic, NULL, "Controller resumed", 0); return 0; }
/** * rtc_update_irq - Triggered when a RTC interrupt occurs. * @rtc: the rtc device * @num: how many irqs are being reported (usually one) * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF * Context: any */ void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { if (IS_ERR_OR_NULL(rtc)) return; pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); }
static int link_pm_runtime_get_active(struct link_pm_data *pm_data) { int ret; struct usb_link_device *usb_ld = pm_data->usb_ld; struct device *dev = &usb_ld->usbdev->dev; if (!usb_ld->if_usb_connected || usb_ld->ld.com_state == COM_NONE) return -ENODEV; if (pm_data->dpm_suspending) { mif_err("Kernel in suspending try get_active later\n"); /* during dpm_suspending.. * if AP get tx data, wake up. */ #ifdef CONFIG_HAS_WAKELOCK wake_lock(&pm_data->l2_wake); #else pm_stay_awake(pm_data->miscdev.this_device); #endif return -EAGAIN; } if (dev->power.runtime_status == RPM_ACTIVE) { pm_data->resume_retry_cnt = 0; return 0; } if (!pm_data->resume_requested) { mif_debug("QW PM\n"); queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0); } mif_debug("Wait pm\n"); INIT_COMPLETION(pm_data->active_done); ret = wait_for_completion_timeout(&pm_data->active_done, msecs_to_jiffies(500)); /* If usb link was disconnected while waiting ACTIVE State, usb device * was removed, usb_ld->usbdev->dev is invalid and below * dev->power.runtime_status is also invalid address. * It will be occured LPA L3 -> AP iniated L0 -> disconnect -> link * timeout */ if (!usb_ld->if_usb_connected || usb_ld->ld.com_state == COM_NONE) { mif_info("link disconnected after timed-out\n"); return -ENODEV; } if (dev->power.runtime_status != RPM_ACTIVE) { mif_info("link_active (%d) retry\n", dev->power.runtime_status); return -EAGAIN; } mif_debug("link_active success(%d)\n", ret); return 0; }
static int mv_otg_enable(struct mv_otg *mvotg) { pm_stay_awake(&mvotg->pdev->dev); pm_qos_update_request(&mvotg->qos_idle, mvotg->lpm_qos); if (mvotg->clock_gating) return mv_otg_enable_internal(mvotg); return 0; }
int32_t qpnp_iadc_vadc_sync_read( enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { struct qpnp_iadc_drv *iadc = qpnp_iadc; int rc = 0; if (!iadc || !iadc->iadc_initialized) return -EPROBE_DEFER; mutex_lock(&iadc->iadc_vadc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } rc = qpnp_check_pmic_temp(); if (rc) { pr_err("PMIC die temp check failed\n"); goto fail; } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } rc = qpnp_iadc_read(i_channel, i_result); if (rc) pr_err("Configuring IADC failed\n"); /* Intentional fall through to release VADC */ rc = qpnp_vadc_iadc_sync_complete_request(v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->iadc_vadc_lock); return rc; }
static void smb349_pm_stay_awake(struct smb349_dual_charger *chip, int reason) { int reasons; mutex_lock(&chip->pm_lock); reasons = chip->wake_reasons | reason; if (reasons != 0 && chip->wake_reasons == 0) { dev_dbg(chip->dev, "staying awake: 0x%02x (bit %d)\n", reasons, reason); pm_stay_awake(chip->dev); } chip->wake_reasons = reasons; mutex_unlock(&chip->pm_lock); }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; BUG_ON(irq != bdata->irq); if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); mod_delayed_work(system_wq, &bdata->work, msecs_to_jiffies(bdata->software_debounce)); return IRQ_HANDLED; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; BUG_ON(irq != bdata->irq); if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); return IRQ_HANDLED; }
bool wcd9xxx_spmi_lock_sleep() { /* * wcd9xxx_spmi_{lock/unlock}_sleep will be called by wcd9xxx_spmi_irq_thread * and its subroutines only motly. * but btn0_lpress_fn is not wcd9xxx_spmi_irq_thread's subroutine and * It can race with wcd9xxx_spmi_irq_thread. * So need to embrace wlock_holders with mutex. */ mutex_lock(&map.pm_lock); if (map.wlock_holders++ == 0) { pr_debug("%s: holding wake lock\n", __func__); pm_qos_update_request(&map.pm_qos_req, msm_cpuidle_get_deep_idle_latency()); #ifdef VENDOR_EDIT //[email protected], 2015/03/19, Add for Qcom patch, //Headset sometime not detected when phone is sleep pm_stay_awake(&map.spmi[0]->dev); #endif /* VENDOR_EDIT */ } mutex_unlock(&map.pm_lock); pr_debug("%s: wake lock counter %d\n", __func__, map.wlock_holders); pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state); if (!wait_event_timeout(map.pm_wq, ((wcd9xxx_spmi_pm_cmpxchg( WCD9XXX_PM_SLEEPABLE, WCD9XXX_PM_AWAKE)) == WCD9XXX_PM_SLEEPABLE || (wcd9xxx_spmi_pm_cmpxchg( WCD9XXX_PM_SLEEPABLE, WCD9XXX_PM_AWAKE) == WCD9XXX_PM_AWAKE)), msecs_to_jiffies( WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) { pr_warn("%s: system didn't resume within %dms, s %d, w %d\n", __func__, WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, map.pm_state, map.wlock_holders); wcd9xxx_spmi_unlock_sleep(); return false; } wake_up_all(&map.pm_wq); pr_debug("%s: leaving pm_state = %d\n", __func__, map.pm_state); return true; }
bool wcd9xxx_spmi_lock_sleep() { /* * wcd9xxx_spmi_{lock/unlock}_sleep will be called by * wcd9xxx_spmi_irq_thread * and its subroutines only motly. * but btn0_lpress_fn is not wcd9xxx_spmi_irq_thread's subroutine and * It can race with wcd9xxx_spmi_irq_thread. * So need to embrace wlock_holders with mutex. */ pr_info("%s, wlock_holders=%d\n", __func__, map.wlock_holders); mutex_lock(&map.pm_lock); if (map.wlock_holders++ == 0) { pr_info("%s: holding wake lock\n", __func__); pm_qos_update_request(&map.pm_qos_req, msm_cpuidle_get_deep_idle_latency()); pm_stay_awake(&map.spmi[0]->dev); } mutex_unlock(&map.pm_lock); pr_info("%s: wake lock counter %d\n", __func__, map.wlock_holders); pr_info("%s: map.pm_state = %d\n", __func__, map.pm_state); if (!wait_event_timeout(map.pm_wq, ((wcd9xxx_spmi_pm_cmpxchg( WCD9XXX_PM_SLEEPABLE, WCD9XXX_PM_AWAKE)) == WCD9XXX_PM_SLEEPABLE || (wcd9xxx_spmi_pm_cmpxchg( WCD9XXX_PM_SLEEPABLE, WCD9XXX_PM_AWAKE) == WCD9XXX_PM_AWAKE)), msecs_to_jiffies( WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) { pr_warn("%s: system didn't resume within %dms, s %d, w %d\n", __func__, WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, map.pm_state, map.wlock_holders); wcd9xxx_spmi_unlock_sleep(); return false; } wake_up_all(&map.pm_wq); pr_debug("%s: leaving pm_state = %d\n", __func__, map.pm_state); return true; }
static irqreturn_t msm_hsusb_wakeup_irq(int irq, void *data) { struct msm_hcd *mhcd = data; int ret; mhcd->wakeup_int_cnt++; dev_dbg(mhcd->dev, "%s: hsic remote wakeup interrupt cnt: %u\n", __func__, mhcd->wakeup_int_cnt); pm_stay_awake(mhcd->dev); spin_lock(&mhcd->wakeup_lock); if (mhcd->wakeup_irq_enabled) { mhcd->wakeup_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } spin_unlock(&mhcd->wakeup_lock); if (!atomic_read(&mhcd->pm_usage_cnt)) { ret = pm_runtime_get(mhcd->dev); /* * controller runtime resume can race with us. * if we are active (ret == 1) or resuming * (ret == -EINPROGRESS), decrement the * PM usage counter before returning. */ if ((ret == 1) || (ret == -EINPROGRESS)) { pm_runtime_put_noidle(mhcd->dev); } else { /* Let khubd know of hub port status change */ if (mhcd->ehci.no_selective_suspend) mhcd->ehci.suspended_ports = 1; atomic_set(&mhcd->pm_usage_cnt, 1); } } return IRQ_HANDLED; }
/** * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue * @rtc rtc device * @timer timer being added. * * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Sets the enabled bit on the added timer. * * Must hold ops_lock for proper serialization of timerqueue */ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); struct rtc_time tm; ktime_t now; timer->enabled = 1; __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); /* Skip over expired timers */ while (next) { if (next->expires >= now) break; next = timerqueue_iterate_next(next); } timerqueue_add(&rtc->timerqueue, &timer->node); trace_rtc_timer_enqueue(timer); if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) { pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } else if (err) { timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; return err; } } return 0; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; #ifdef CONFIG_SEC_DEBUG_ISR_CRASH int state = (gpio_get_value(bdata->button->gpio) ? 1 : 0) ^ bdata->button->active_low; #endif BUG_ON(irq != bdata->irq); if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); #ifdef CONFIG_SEC_DEBUG_ISR_CRASH sec_debug_check_crash_key(bdata->button->code, state); #endif return IRQ_HANDLED; }
static irqreturn_t mxhci_hsic_wakeup_irq(int irq, void *data) { struct mxhci_hsic_hcd *mxhci = data; int ret; mxhci->wakeup_int_cnt++; dev_dbg(mxhci->dev, "%s: remote wakeup interrupt cnt: %u\n", __func__, mxhci->wakeup_int_cnt); xhci_dbg_log_event(&dbg_hsic, NULL, "Remote Wakeup IRQ", mxhci->wakeup_int_cnt); pm_stay_awake(mxhci->dev); spin_lock(&mxhci->wakeup_lock); if (mxhci->wakeup_irq_enabled) { mxhci->wakeup_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } if (!mxhci->pm_usage_cnt) { ret = pm_runtime_get(mxhci->dev); /* * HSIC runtime resume can race with us. * if we are active (ret == 1) or resuming * (ret == -EINPROGRESS), decrement the * PM usage counter before returning. */ if ((ret == 1) || (ret == -EINPROGRESS)) pm_runtime_put_noidle(mxhci->dev); else mxhci->pm_usage_cnt = 1; } spin_unlock(&mxhci->wakeup_lock); return IRQ_HANDLED; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; BUG_ON(irq != bdata->irq); if (irqd_get_trigger_type(irq_get_irq_data(irq)) != bdata->irqflags) { pr_info("wake up cpu from deepsleep by gpio edge interrupt!"); irq_set_irq_type(bdata->irq, bdata->irqflags); } if (bdata->is_deepsleep) { bdata->is_deepsleep = false; } if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); return IRQ_HANDLED; }
int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc, bool batfet_closed) { uint8_t rslt_lsb, rslt_msb; int32_t rc = 0, version = 0; uint16_t raw_data; uint32_t mode_sel = 0; bool iadc_offset_ch_batfet_check; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->adc->amux_prop->decimation = DECIMATION_TYPE1; iadc->adc->amux_prop->fast_avg_setup = ADC_FAST_AVG_SAMPLE_1; rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } iadc->adc->calib.gain_raw = raw_data; /* * there is a features on PM8941 in the BMS where if the batfet is * opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for * internal rsense. */ version = qpnp_adc_get_revid_version(iadc->dev); if ((version == QPNP_REV_ID_8941_3_1) || (version == QPNP_REV_ID_8941_3_0) || (version == QPNP_REV_ID_8941_2_0)) iadc_offset_ch_batfet_check = true; else iadc_offset_ch_batfet_check = false; if ((iadc_offset_ch_batfet_check && !batfet_closed) || (iadc->external_rsense)) { /* external offset calculation */ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } else { /* internal offset calculation */ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } iadc->adc->calib.offset_raw = raw_data; if (rc < 0) { pr_err("qpnp adc offset/gain calculation failed\n"); goto fail; } if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2 && iadc->iadc_comp.revision_ana_minor == QPNP_IADC_PM8026_2_REV3) iadc->adc->calib.gain_raw = iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN; pr_debug("raw gain:0x%x, raw offset:0x%x\n", iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw); rc = qpnp_convert_raw_offset_voltage(iadc); if (rc < 0) { pr_err("qpnp raw_voltage conversion failed\n"); goto fail; } rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >> QPNP_BIT_SHIFT_8; rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK; pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb); rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET, rslt_msb); if (rc < 0) { pr_err("qpnp iadc configure error for MSB write\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET, rslt_lsb); if (rc < 0) { pr_err("qpnp iadc configure error for LSB write\n"); goto fail; } fail: if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc, enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0; int dt_index = 0; uint16_t raw_data; int32_t rsense_u_ohms = 0; int64_t result_current; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) { pr_err("raw offset errors! run iadc calibration again\n"); return -EINVAL; } mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } while (((enum qpnp_iadc_channels) iadc->adc->adc_channels[dt_index].channel_num != i_channel) && (dt_index < iadc->max_channels_available)) dt_index++; if (dt_index >= iadc->max_channels_available) { pr_err("not a valid IADC channel\n"); rc = -EINVAL; goto fail; } iadc->adc->amux_prop->decimation = iadc->adc->adc_channels[dt_index].adc_decimation; iadc->adc->amux_prop->fast_avg_setup = iadc->adc->adc_channels[dt_index].fast_avg_setup; rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail_release_vadc; } rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms); pr_debug("current raw:0%x and rsense:%d\n", raw_data, rsense_n_ohms); rsense_u_ohms = rsense_n_ohms/1000; num = raw_data - iadc->adc->calib.offset_raw; if (num < 0) { sign = 1; num = -num; } i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw); result_current = i_result->result_uv; result_current *= QPNP_IADC_NANO_VOLTS_FACTOR; /* Intentional fall through. Process the result w/o comp */ if (!rsense_u_ohms) { pr_err("rsense error=%d\n", rsense_u_ohms); goto fail_release_vadc; } do_div(result_current, rsense_u_ohms); if (sign) { i_result->result_uv = -i_result->result_uv; result_current = -result_current; } result_current *= -1; rc = qpnp_iadc_comp_result(iadc, &result_current); if (rc < 0) pr_err("Error during compensating the IADC\n"); rc = 0; result_current *= -1; i_result->result_ua = (int32_t) result_current; fail_release_vadc: rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc, enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0; uint16_t raw_data; int32_t rsense_u_ohms = 0; int64_t result_current; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail_release_vadc; } rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms); pr_debug("current raw:0%x and rsense:%d\n", raw_data, rsense_n_ohms); rsense_u_ohms = rsense_n_ohms/1000; num = raw_data - iadc->adc->calib.offset_raw; if (num < 0) { sign = 1; num = -num; } i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw); result_current = i_result->result_uv; result_current *= QPNP_IADC_NANO_VOLTS_FACTOR; /* Intentional fall through. Process the result w/o comp */ do_div(result_current, rsense_u_ohms); if (sign) { i_result->result_uv = -i_result->result_uv; result_current = -result_current; } result_current *= -1; rc = qpnp_iadc_comp_result(iadc, &result_current); if (rc < 0) pr_err("Error during compensating the IADC\n"); rc = 0; result_current *= -1; i_result->result_ua = (int32_t) result_current; fail_release_vadc: rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }