/** * gma_power_begin - begin requiring power * @dev: our DRM device * @force_on: true to force power on * * Begin an action that requires the display power island is enabled. * We refcount the islands. */ bool gma_power_begin(struct drm_device *dev, bool force_on) { struct drm_psb_private *dev_priv = dev->dev_private; int ret; unsigned long flags; spin_lock_irqsave(&power_ctrl_lock, flags); /* Power already on ? */ if (dev_priv->display_power) { dev_priv->display_count++; pm_runtime_get(&dev->pdev->dev); spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; } if (force_on == false) goto out_false; /* Ok power up needed */ ret = gma_resume_pci(dev->pdev); if (ret == 0) { psb_irq_preinstall(dev); psb_irq_postinstall(dev); pm_runtime_get(&dev->pdev->dev); dev_priv->display_count++; spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; } out_false: spin_unlock_irqrestore(&power_ctrl_lock, flags); return false; }
static int cyttsp4_mt_open(struct input_dev *input) { struct device *dev = input->dev.parent; struct cyttsp4_device *ttsp = container_of(dev, struct cyttsp4_device, dev); dev_dbg(dev, "%s\n", __func__); pm_runtime_get(dev); dev_vdbg(dev, "%s: setup subscriptions\n", __func__); /* set up touch call back */ cyttsp4_subscribe_attention(ttsp, CY_ATTEN_IRQ, cyttsp4_mt_attention, CY_MODE_OPERATIONAL); /* set up startup call back */ cyttsp4_subscribe_attention(ttsp, CY_ATTEN_STARTUP, cyttsp4_startup_attention, 0); /* set up wakeup call back */ cyttsp4_subscribe_attention(ttsp, CY_ATTEN_WAKE, cyttsp4_mt_wake_attention, 0); return 0; }
static int vpif_probe(struct platform_device *pdev) { int status = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; res_len = resource_size(res); res = request_mem_region(res->start, res_len, res->name); if (!res) return -EBUSY; vpif_base = ioremap(res->start, res_len); if (!vpif_base) { status = -EBUSY; goto fail; } pm_runtime_enable(&pdev->dev); pm_runtime_get(&pdev->dev); spin_lock_init(&vpif_lock); dev_info(&pdev->dev, "vpif probe success\n"); return 0; fail: release_mem_region(res->start, res_len); return status; }
static irqreturn_t intel_mid_gps_hostwake_isr(int irq, void *dev) { struct intel_mid_gps_platform_data *pdata = dev_get_drvdata(dev); int hostwake; hostwake = gpio_get_value(pdata->gpio_hostwake); tty_dev = intel_mid_hsu_set_wake_peer(pdata->hsu_port, NULL); if (!tty_dev) { pr_err("%s: unable to get the HSU tty device \n", __func__); } irq_set_irq_type(irq, hostwake ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING); if (hostwake) { wake_lock(&hostwake_lock); if (tty_dev) pm_runtime_get(tty_dev); } else { if (tty_dev) pm_runtime_put(tty_dev); wake_unlock(&hostwake_lock); } return IRQ_HANDLED; }
static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); u32 value; bool special; /* * In most cases, func pin mux 000 means GPIO function. * But, some pins may have func pin mux 001 represents * GPIO function. Only allow user to export pin with * func pin mux preset as GPIO function by BIOS/FW. */ value = readl(reg) & BYT_PIN_MUX; special = is_special_pin(vg, offset); if ((special && value != 1) || (!special && value)) { dev_err(&vg->pdev->dev, "pin %u cannot be used as GPIO.\n", offset); return -EINVAL; } pm_runtime_get(&vg->pdev->dev); return 0; }
static u32 res_trk_vidc_pwr_up(void) { mutex_lock(&resource_context.lock); if (pm_runtime_get(resource_context.device) < 0) { VCDRES_MSG_ERROR("Error : pm_runtime_get failed\n"); goto bail_out; } if (!resource_context.footswitch) resource_context.footswitch = regulator_get(resource_context.device, "vdd"); if (IS_ERR(resource_context.footswitch)) { VCDRES_MSG_ERROR("foot switch get failed\n"); resource_context.footswitch = NULL; } else regulator_enable(resource_context.footswitch); if (!res_trk_get_clk()) goto rel_vidc_pm_runtime; mutex_unlock(&resource_context.lock); return true; rel_vidc_pm_runtime: if (pm_runtime_put(resource_context.device) < 0) VCDRES_MSG_ERROR("Error : pm_runtime_put failed"); bail_out: mutex_unlock(&resource_context.lock); return false; }
static irqreturn_t tdisc_interrupt(int irq, void *dev_id) { /* * The touch disc intially generates an interrupt on any * touch. The interrupt line is pulled low and remains low * untill there are touch operations being performed. In case * there are no further touch operations, the line goes high. The * same process repeats again the next time,when the disc is touched. * * We do the following operations once we receive an interrupt. * 1. Disable the IRQ for any further interrutps. * 2. Schedule work every 25ms if the GPIO is still low. * 3. In the work queue do a I2C read to get the touch data. * 4. If the GPIO is pulled high, enable the IRQ and cancel the work. */ struct tdisc_data *dd = dev_id; int rc; rc = pm_runtime_get(&dd->clientp->dev); if (rc < 0) dev_dbg(&dd->clientp->dev, "%s: pm_runtime_get" " failed\n", __func__); pr_debug("%s: TDISC IRQ ! :-)\n", __func__); /* Schedule the work immediately */ disable_irq_nosync(dd->clientp->irq); schedule_delayed_work(&dd->tdisc_work, 0); return IRQ_HANDLED; }
void request_autopm_lock(int status) { struct diag_bridge *dev = __dev; if (!dev || !dev->udev) return; pr_info("%s: set runtime pm lock : %d\n", __func__, status); if (status) { if (!atomic_read(&dev->pmlock_cnt)) { atomic_inc(&dev->pmlock_cnt); pr_info("get lock\n"); pm_runtime_get(&dev->udev->dev); pm_runtime_forbid(&dev->udev->dev); } else atomic_inc(&dev->pmlock_cnt); } else { if (!atomic_read(&dev->pmlock_cnt)) pr_info("unbalanced release\n"); else if (atomic_dec_and_test(&dev->pmlock_cnt)) { pr_info("release lock\n"); pm_runtime_allow(&dev->udev->dev); pm_runtime_put(&dev->udev->dev); } } }
static void arizona_start_mic(struct arizona_extcon_info *info) { struct arizona *arizona = info->arizona; bool change; int ret; info->detecting = true; info->mic = false; info->jack_flips = 0; /* Microphone detection can't use idle mode */ pm_runtime_get(info->dev); ret = regulator_enable(info->micvdd); if (ret != 0) { dev_err(arizona->dev, "Failed to enable MICVDD: %d\n", ret); } if (info->micd_reva) { regmap_write(arizona->regmap, 0x80, 0x3); regmap_write(arizona->regmap, 0x294, 0); regmap_write(arizona->regmap, 0x80, 0x0); } regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1, ARIZONA_MICD_ENA, ARIZONA_MICD_ENA, &change); if (!change) { regulator_disable(info->micvdd); pm_runtime_put_autosuspend(info->dev); } }
static int mddi_ext_on(struct platform_device *pdev) { int ret = 0; u32 clk_rate; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); pm_runtime_get(&pdev->dev); clk_rate = mfd->fbi->var.pixclock; clk_rate = min(clk_rate, mfd->panel_info.clk_max); if (mddi_ext_pdata && mddi_ext_pdata->mddi_sel_clk && mddi_ext_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); clk_rate = clk_round_rate(mddi_ext_clk, clk_rate); if (clk_set_rate(mddi_ext_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_rate failed\n", __func__); mddi_host_start_ext_display(); ret = panel_next_on(pdev); return ret; }
static ssize_t debug_write_phy_data(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct msm_hcd *mhcd = file->private_data; char kbuf[10]; u32 data = 0; memset(kbuf, 0, 10); if (copy_from_user(kbuf, buf, min_t(size_t, sizeof(kbuf) - 1, count))) return -EFAULT; if (sscanf(kbuf, "%x", &data) != 1) return -EINVAL; pm_runtime_get(mhcd->dev); if (msm_ulpi_write(mhcd, data, addr) < 0) { dev_err(mhcd->dev, "%s(): ulpi write timeout\n", __func__); return -ETIMEDOUT; } pm_runtime_put(mhcd->dev); return count; }
static irqreturn_t msm_async_irq(int irq, void *data) { struct msm_hcd *mhcd = data; int ret; mhcd->async_int_cnt++; dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n", __func__, mhcd->async_int_cnt); pm_stay_awake(mhcd->dev); spin_lock(&mhcd->wakeup_lock); if (mhcd->async_irq_enabled) { mhcd->async_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } spin_unlock(&mhcd->wakeup_lock); if (!atomic_read(&mhcd->pm_usage_cnt)) { ret = pm_runtime_get(mhcd->dev); if ((ret == 1) || (ret == -EINPROGRESS)) pm_runtime_put_noidle(mhcd->dev); else atomic_set(&mhcd->pm_usage_cnt, 1); } return IRQ_HANDLED; }
static int lnw_irq_type(struct irq_data *d, unsigned type) { struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d); u32 gpio = irqd_to_hwirq(d); unsigned long flags; u32 value; void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER); void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER); if (gpio >= lnw->chip.ngpio) return -EINVAL; if (lnw->pdev) pm_runtime_get(&lnw->pdev->dev); spin_lock_irqsave(&lnw->lock, flags); if (type & IRQ_TYPE_EDGE_RISING) value = readl(grer) | BIT(gpio % 32); else value = readl(grer) & (~BIT(gpio % 32)); writel(value, grer); if (type & IRQ_TYPE_EDGE_FALLING) value = readl(gfer) | BIT(gpio % 32); else value = readl(gfer) & (~BIT(gpio % 32)); writel(value, gfer); spin_unlock_irqrestore(&lnw->lock, flags); if (lnw->pdev) pm_runtime_put(&lnw->pdev->dev); return 0; }
static ssize_t debug_read_phy_data(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct msm_hcd *mhcd = file->private_data; char *kbuf; size_t c = 0; u32 data = 0; int ret = 0; kbuf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; pm_runtime_get(mhcd->dev); data = msm_ulpi_read(mhcd, addr); pm_runtime_put(mhcd->dev); if (data < 0) { dev_err(mhcd->dev, "%s(): ulpi read timeout\n", __func__); return -ETIMEDOUT; } c = scnprintf(kbuf, BUF_SIZE, "addr: 0x%x: data: 0x%x\n", addr, data); ret = simple_read_from_buffer(ubuf, count, ppos, kbuf, c); kfree(kbuf); return ret; }
static int mddi_on(struct platform_device *pdev) { int ret = 0; u32 clk_rate; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); pm_runtime_get(&pdev->dev); if (mddi_pdata && mddi_pdata->mddi_power_save) mddi_pdata->mddi_power_save(1); clk_rate = mfd->fbi->var.pixclock; clk_rate = min(clk_rate, mfd->panel_info.clk_max); if (mddi_pdata && mddi_pdata->mddi_sel_clk && mddi_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); if (clk_set_min_rate(mddi_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); pm_qos_update_request(mfd->pm_qos_req, 65000); ret = panel_next_on(pdev); return ret; }
static irqreturn_t msm_hsic_wakeup_irq(int irq, void *data) { struct msm_hsic_hcd *mehci = data; int ret; mehci->wakeup_int_cnt++; dbg_log_event(NULL, "Remote Wakeup IRQ", mehci->wakeup_int_cnt); dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt cnt: %u\n", __func__, mehci->wakeup_int_cnt); wake_lock(&mehci->wlock); if (mehci->wakeup_irq_enabled) { mehci->wakeup_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } if (!atomic_read(&mehci->pm_usage_cnt)) { ret = pm_runtime_get(mehci->dev); /* * HSIC runtime resume can race with us. * if we are active (ret == 1) or resuming * (ret == -EINPROGRESS), decrement the * PM usage counter before returning. */ if ((ret == 1) || (ret == -EINPROGRESS)) pm_runtime_put_noidle(mehci->dev); else atomic_set(&mehci->pm_usage_cnt, 1); } return IRQ_HANDLED; }
static irqreturn_t msm_ehci_host_wakeup_irq(int irq, void *data) { struct msm_hcd *mhcd = data; mhcd->pmic_gpio_int_cnt++; dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n", __func__, mhcd->pmic_gpio_int_cnt); wake_lock(&mhcd->wlock); if (mhcd->pmic_gpio_dp_irq_enabled) { mhcd->pmic_gpio_dp_irq_enabled = 0; disable_irq_wake(irq); disable_irq_nosync(irq); } if (!atomic_read(&mhcd->pm_usage_cnt)) { atomic_set(&mhcd->pm_usage_cnt, 1); pm_runtime_get(mhcd->dev); } return IRQ_HANDLED; }
static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *dwc = pci_get_drvdata(pci); device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); platform_device_unregister(dwc->dwc3); }
static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); pm_runtime_get(&vg->pdev->dev); return 0; }
static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *dwc = pci_get_drvdata(pci); device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev)); platform_device_unregister(dwc->dwc3); }
static int mddi_on(struct platform_device *pdev) { int ret = 0; u32 clk_rate; struct msm_fb_data_type *mfd; #ifdef ENABLE_FWD_LINK_SKEW_CALIBRATION mddi_host_type host_idx = MDDI_HOST_PRIM; u32 stat_reg; #endif mfd = platform_get_drvdata(pdev); pmdh_clk_enable(); pm_runtime_get(&pdev->dev); if (mddi_pdata && mddi_pdata->mddi_power_save) mddi_pdata->mddi_power_save(1); pmdh_clk_enable(); #ifdef ENABLE_FWD_LINK_SKEW_CALIBRATION if (mddi_client_type < 2) { /* For skew calibration, clock should be less than 50MHz */ if (!clk_set_min_rate(mddi_clk, 49000000)) { stat_reg = mddi_host_reg_in(STAT); printk(KERN_DEBUG "\n stat_reg = 0x%x", stat_reg); mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE); if (stat_reg & (0x1 << 4)) mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE); mddi_host_reg_out(CMD, MDDI_CMD_SEND_RTD); mddi_send_fw_link_skew_cal(host_idx); mddi_host_reg_out(CMD, MDDI_CMD_SEND_RTD); mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1); } else { printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); } } #endif clk_rate = mfd->fbi->var.pixclock; clk_rate = min(clk_rate, mfd->panel_info.clk_max); if (mddi_pdata && mddi_pdata->mddi_sel_clk && mddi_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); if (clk_set_min_rate(mddi_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); ret = panel_next_on(pdev); return ret; }
static ssize_t store_pm_get(struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(_dev); struct usb_hcd *hcd = platform_get_drvdata(pdev); pm_runtime_get(hcd->self.controller); return count; }
static void dwc3_pci_remove(struct pci_dev *pci) { struct dwc3_pci *dwc = pci_get_drvdata(pci); #ifdef CONFIG_PM cancel_work_sync(&dwc->wakeup_work); #endif device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); platform_device_unregister(dwc->dwc3); }
static int dwc3_pci_runtime_resume(struct device *dev) { struct dwc3_pci *dwc = dev_get_drvdata(dev); struct platform_device *dwc3 = dwc->dwc3; int ret; ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0); if (ret) return ret; return pm_runtime_get(&dwc3->dev); }
/** * dwc3_ext_event_notify - callback to handle events from external transceiver * @otg: Pointer to the otg transceiver structure * @event: Event reported by transceiver * * Returns 0 on success */ static void dwc3_ext_event_notify(struct usb_otg *otg, enum dwc3_ext_events event) { static bool init; struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg); struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv; struct usb_phy *phy = dotg->otg.phy; int ret = 0; if (event == DWC3_EVENT_PHY_RESUME) { if (!pm_runtime_status_suspended(phy->dev)) { dev_warn(phy->dev, "PHY_RESUME event out of LPM!!!!\n"); } else { dev_dbg(phy->dev, "ext PHY_RESUME event received\n"); /* ext_xceiver would have taken h/w out of LPM by now */ ret = pm_runtime_get(phy->dev); if (ret == -EACCES) { /* pm_runtime_get may fail during system resume with -EACCES error */ pm_runtime_disable(phy->dev); pm_runtime_set_active(phy->dev); pm_runtime_enable(phy->dev); } else if (ret < 0) { dev_warn(phy->dev, "pm_runtime_get failed!\n"); } } } else if (event == DWC3_EVENT_XCEIV_STATE) { if (ext_xceiv->id == DWC3_ID_FLOAT) { dev_dbg(phy->dev, "XCVR: ID set\n"); set_bit(ID, &dotg->inputs); } else { dev_dbg(phy->dev, "XCVR: ID clear\n"); clear_bit(ID, &dotg->inputs); } if (ext_xceiv->bsv) { dev_dbg(phy->dev, "XCVR: BSV set\n"); set_bit(B_SESS_VLD, &dotg->inputs); } else { dev_dbg(phy->dev, "XCVR: BSV clear\n"); clear_bit(B_SESS_VLD, &dotg->inputs); } if (!init) { init = true; complete(&dotg->dwc3_xcvr_vbus_init); dev_dbg(phy->dev, "XCVR: BSV init complete\n"); return; } schedule_work(&dotg->sm_work); } }
static ssize_t show_test1(struct device *dev, struct device_attribute *attr, char *buf) { int ret; dev_err(dev, "%s\n", __func__); ret = pm_runtime_get(dev); dev_err(dev, "%s(%d): pm_runtime_get() returns %d\n", __func__, __LINE__, ret); return sprintf(buf, "%s\n", __func__); }
static irqreturn_t msm_ehci_irq(struct usb_hcd *hcd) { struct msm_hcd *mhcd = hcd_to_mhcd(hcd); if (atomic_read(&mhcd->in_lpm)) { disable_irq_nosync(hcd->irq); mhcd->async_int = true; pm_runtime_get(mhcd->dev); return IRQ_HANDLED; } return ehci_irq(hcd); }
/** * dwc3_otg_init - Initializes otg related registers * @dwc: Pointer to out controller context structure * * Returns 0 on success otherwise negative errno. */ int dwc3_otg_init(struct dwc3 *dwc) { struct dwc3_otg *dotg; dev_dbg(dwc->dev, "dwc3_otg_init\n"); /* Allocate and init otg instance */ dotg = devm_kzalloc(dwc->dev, sizeof(struct dwc3_otg), GFP_KERNEL); if (!dotg) { dev_err(dwc->dev, "unable to allocate dwc3_otg\n"); return -ENOMEM; } dotg->otg.phy = devm_kzalloc(dwc->dev, sizeof(struct usb_phy), GFP_KERNEL); if (!dotg->otg.phy) { dev_err(dwc->dev, "unable to allocate dwc3_otg.phy\n"); return -ENOMEM; } dotg->otg.phy->otg = &dotg->otg; dotg->otg.phy->dev = dwc->dev; dotg->otg.phy->set_power = dwc3_otg_set_power; dotg->otg.set_peripheral = dwc3_otg_set_peripheral; dotg->otg.phy->set_suspend = dwc3_otg_set_suspend; dotg->otg.phy->state = OTG_STATE_UNDEFINED; dotg->regs = dwc->regs; /* This reference is used by dwc3 modules for checking otg existance */ dwc->dotg = dotg; dotg->dwc = dwc; dotg->otg.phy->dev = dwc->dev; wake_lock_init(&dotg->host_wakelock, WAKE_LOCK_SUSPEND, "host_wakelock"); init_completion(&dotg->dwc3_xcvr_vbus_init); INIT_DELAYED_WORK(&dotg->sm_work, dwc3_otg_sm_work); INIT_DELAYED_WORK(&dotg->no_device_work, dwc3_otg_no_device_work); no_device_timeout = DWC3_OTG_TIME_NO_DEVICE; stop_host_retry_max = DWC3_OTG_STOP_HOST_RETRY_MAX; no_device_timeout_enable = true; dbg_event(0xFF, "OTGInit get", 0); pm_runtime_get(dwc->dev); return 0; }
static int usbhs_wakeup_handler(struct omap_hwmod_mux_info *unused) { int queued; queued = queue_delayed_work(pm_wq, &usbhs_wakeup, msecs_to_jiffies(20)); if (queued) { clkdm_wakeup(l3init_clkdm); pm_runtime_get(&pdev_usbhs->dev); } return 0; }
/* Can run in atomic context */ _mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void) { #ifdef CONFIG_PM_RUNTIME int err; MALI_DEBUG_ASSERT_POINTER(mali_platform_device); err = pm_runtime_get(&(mali_platform_device->dev)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) pm_runtime_mark_last_busy(&(mali_platform_device->dev)); #endif if (0 > err && -EINPROGRESS != err) { MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err)); return _MALI_OSK_ERR_FAULT; } #endif return _MALI_OSK_ERR_OK; }