/* * Synchronous version of the above, making sure the IRQ is * no longer running on any other IRQ.. */ void disable_irq(unsigned int irq) { disable_irq_nosync(irq); }
static int __devinit ehci_msm2_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; struct msm_hcd *mhcd; const struct msm_usb_host_platform_data *pdata; char pdev_name[PDEV_NAME_LEN]; int ret; dev_dbg(&pdev->dev, "ehci_msm2 probe\n"); hcd = usb_create_hcd(&msm_hc2_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto put_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto put_hcd; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto put_hcd; } mhcd = hcd_to_mhcd(hcd); mhcd->dev = &pdev->dev; snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id); mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name); if (IS_ERR(mhcd->xo_handle)) { dev_err(&pdev->dev, "%s not able to get the handle " "to vote for TCXO D0 buffer\n", __func__); ret = PTR_ERR(mhcd->xo_handle); goto unmap; } ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON); if (ret) { dev_err(&pdev->dev, "%s failed to vote for TCXO " "D0 buffer%d\n", __func__, ret); goto free_xo_handle; } ret = msm_ehci_init_clocks(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize clocks\n"); ret = -ENODEV; goto devote_xo_handle; } ret = msm_ehci_init_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to initialize VDDCX\n"); ret = -ENODEV; goto deinit_clocks; } ret = msm_ehci_config_vddcx(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vddcx configuration failed\n"); goto deinit_vddcx; } ret = msm_ehci_ldo_init(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); goto deinit_vddcx; } ret = msm_ehci_ldo_enable(mhcd, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg enable failed\n"); goto deinit_ldo; } ret = msm_ehci_init_vbus(mhcd, 1); if (ret) { dev_err(&pdev->dev, "unable to get vbus\n"); goto disable_ldo; } ret = msm_hsusb_reset(mhcd); if (ret) { dev_err(&pdev->dev, "hsusb PHY initialization failed\n"); goto vbus_deinit; } ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); if (ret) { dev_err(&pdev->dev, "unable to register HCD\n"); goto vbus_deinit; } pdata = mhcd->dev->platform_data; if (pdata && (!pdata->dock_connect_irq || !irq_read_line(pdata->dock_connect_irq))) msm_ehci_vbus_power(mhcd, 1); device_init_wakeup(&pdev->dev, 1); wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev)); wake_lock(&mhcd->wlock); INIT_WORK(&mhcd->phy_susp_fail_work, msm_ehci_phy_susp_fail_work); /* * This pdev->dev is assigned parent of root-hub by USB core, * hence, runtime framework automatically calls this driver's * runtime APIs based on root-hub's state. */ /* configure pmic_gpio_irq for D+ change */ if (pdata && pdata->pmic_gpio_dp_irq) mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq; if (mhcd->pmic_gpio_dp_irq) { ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL, msm_ehci_host_wakeup_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "msm_ehci_host_wakeup", mhcd); if (!ret) { disable_irq_nosync(mhcd->pmic_gpio_dp_irq); } else { dev_err(&pdev->dev, "request_irq(%d) failed: %d\n", mhcd->pmic_gpio_dp_irq, ret); mhcd->pmic_gpio_dp_irq = 0; } } else if (pdata->mpm_xo_wakeup_int) { msm_mpm_set_pin_type(pdata->mpm_xo_wakeup_int, IRQ_TYPE_LEVEL_HIGH); msm_mpm_set_pin_wake(pdata->mpm_xo_wakeup_int, 1); } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); if (ehci_debugfs_init(mhcd) < 0) dev_err(mhcd->dev, "%s: debugfs init failed\n", __func__); return 0; vbus_deinit: msm_ehci_init_vbus(mhcd, 0); disable_ldo: msm_ehci_ldo_enable(mhcd, 0); deinit_ldo: msm_ehci_ldo_init(mhcd, 0); deinit_vddcx: msm_ehci_init_vddcx(mhcd, 0); deinit_clocks: msm_ehci_init_clocks(mhcd, 0); devote_xo_handle: msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF); free_xo_handle: msm_xo_put(mhcd->xo_handle); unmap: iounmap(hcd->regs); put_hcd: usb_put_hcd(hcd); return ret; }
INT32 wmt_plat_eirq_ctrl(ENUM_PIN_ID id, ENUM_PIN_STATE state) { #ifdef CONFIG_OF struct device_node *node; unsigned int irq_info[3] = { 0, 0, 0 }; #endif INT32 iret = -EINVAL; static INT32 bgf_irq_num = -1; static UINT32 bgf_irq_flag; /* TODO: [ChangeFeature][GeorgeKuo]: use another function to handle this, as done in gpio_ctrls */ if ((PIN_STA_INIT != state) && (PIN_STA_DEINIT != state) && (PIN_STA_EINT_EN != state) && (PIN_STA_EINT_DIS != state)) { WMT_PLAT_WARN_FUNC("WMT-PLAT:invalid PIN_STATE(%d) in eirq_ctrl for PIN(%d)\n", state, id); return -1; } switch (id) { case PIN_BGF_EINT: if (PIN_STA_INIT == state) { #ifdef CONFIG_OF node = of_find_compatible_node(NULL, NULL, "mediatek,mt6735-consys"); if (node) { bgf_irq_num = irq_of_parse_and_map(node, 0); /* get the interrupt line behaviour */ if (of_property_read_u32_array(node, "interrupts", irq_info, ARRAY_SIZE(irq_info))) { WMT_PLAT_ERR_FUNC("get irq flags from DTS fail!!\n"); return iret; } bgf_irq_flag = irq_info[2]; WMT_PLAT_INFO_FUNC("get irq id(%d) and irq trigger flag(%d) from DT\n", bgf_irq_num, bgf_irq_flag); } else { WMT_PLAT_ERR_FUNC("[%s] can't find CONSYS compatible node\n", __func__); return iret; } #else bgf_irq_num = MT_CONN2AP_BTIF_WAKEUP_IRQ_ID; bgf_irq_flag = IRQF_TRIGGER_LOW; #endif iret = request_irq(bgf_irq_num, wmt_plat_bgf_irq_isr, bgf_irq_flag, "BTIF_WAKEUP_IRQ", NULL); if (iret) { WMT_PLAT_ERR_FUNC("request_irq fail,irq_no(%d),iret(%d)\n", bgf_irq_num, iret); return iret; } gbgfIrqBle.counter = 1; } else if (PIN_STA_EINT_EN == state) { spin_lock_irqsave(&gbgfIrqBle.lock, gbgfIrqBle.flags); if (gbgfIrqBle.counter) { WMT_PLAT_DBG_FUNC("BGF INT has been enabled,counter(%d)\n", gbgfIrqBle.counter); } else { enable_irq(bgf_irq_num); gbgfIrqBle.counter++; } WMT_PLAT_DBG_FUNC("WMT-PLAT:BGFInt (en)\n"); spin_unlock_irqrestore(&gbgfIrqBle.lock, gbgfIrqBle.flags); } else if (PIN_STA_EINT_DIS == state) { spin_lock_irqsave(&gbgfIrqBle.lock, gbgfIrqBle.flags); if (!gbgfIrqBle.counter) { WMT_PLAT_INFO_FUNC("BGF INT has been disabled,counter(%d)\n", gbgfIrqBle.counter); } else { disable_irq_nosync(bgf_irq_num); gbgfIrqBle.counter--; } WMT_PLAT_DBG_FUNC("WMT-PLAT:BGFInt (dis)\n"); spin_unlock_irqrestore(&gbgfIrqBle.lock, gbgfIrqBle.flags); } else { free_irq(bgf_irq_num, NULL); /* de-init: nothing to do in ALPS, such as un-registration... */ } iret = 0; break; default: WMT_PLAT_WARN_FUNC("WMT-PLAT:unsupported EIRQ(PIN_ID:%d) in eirq_ctrl\n", id); iret = -1; break; } return iret; }
/* * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt. * This is a chained interrupt, so there is no desc->action method for it. * Now we need to query the interrupt controller in the twl6030 to determine * which module is generating the interrupt request. However, we can't do i2c * transactions in interrupt context, so we must defer that work to a kernel * thread. All we do here is acknowledge and mask the interrupt and wakeup * the kernel thread. */ static irqreturn_t handle_twl6030_pih(int irq, void *devid) { disable_irq_nosync(irq); complete(devid); return IRQ_HANDLED; }
static void qsc_disable_irqs(void) { disable_irq_nosync(mdm_drv->mdm_errfatal_irq); disable_irq_nosync(mdm_drv->mdm_status_irq); disable_irq_nosync(mdm_drv->qsc_vddmin_irq); }
static irqreturn_t kpad_interrupt(int irq, void *dev_id) { unsigned int status; DPRINTK("Start\n"); /* Disable interrupt */ disable_irq_nosync(kpad.irq); /* * Get keypad interrupt status and clean interrput source. */ status = GPIO_INT_REQ_STS_VAL; DPRINTK("Status 1 = %x\n",status); status &= KPDA_IRQ_MASK; /*The IRQ is kpad trigger*/ if (status) { DPRINTK("Status 2 = %x\n",status); /*Clean IRQ*/ GPIO_INT_REQ_STS_VAL = status; if (status & COL0_IRQ_MASK) { input_report_key(kpad_dev, wmt_kpad_codes[WMT_COL0_KEY_NUM], 1); /*col0 key is pressed*/ input_sync(kpad_dev); mod_timer(&wmt_kpad_timer_col0, jiffies + wmt_kpad_timeout); DPRINTK("WMT_COL0_KEY_NUM press\n"); } if (status & COL1_IRQ_MASK) { input_report_key(kpad_dev, wmt_kpad_codes[WMT_COL1_KEY_NUM], 1); /*col1 key is pressed*/ input_sync(kpad_dev); mod_timer(&wmt_kpad_timer_col1, jiffies + wmt_kpad_timeout); DPRINTK("WMT_COL1_KEY_NUM press\n"); } if (status & ROW0_IRQ_MASK) { if (back_menu_timeout == 0) { input_report_key(kpad_dev, wmt_kpad_codes[WMT_ROW0_KEY_NUM], 1); /*row0 key is pressed*/ input_sync(kpad_dev); mod_timer(&wmt_kpad_timer_row0, jiffies + wmt_kpad_timeout); DPRINTK("WMT_ROW0_KEY_NUM press\n"); } else { if (back_menu_pressed == false) { back_menu_timeout_counter = 0; back_menu_pressed = true; } mod_timer(&wmt_kpad_timer_row0, jiffies + wmt_kpad_timeout); DPRINTK("WMT_ROW0_KEY_NUM press 1\n"); } } if (status & ROW1_IRQ_MASK) { input_report_key(kpad_dev, wmt_kpad_codes[WMT_ROW1_KEY_NUM], 1); /*row1 key is pressed*/ input_sync(kpad_dev); mod_timer(&wmt_kpad_timer_row1, jiffies + wmt_kpad_timeout); DPRINTK("WMT_ROW1_KEY_NUM press\n"); } }else { enable_irq(kpad.irq); return IRQ_NONE; } /* Enable interrupt */ enable_irq(kpad.irq); DPRINTK("End\n"); return IRQ_HANDLED; }
static int dsps_alloc_resources(struct platform_device *pdev) { int ret = -ENODEV; struct resource *ppss_res; struct resource *ppss_wdog; int i; pr_debug("%s.\n", __func__); if ((drv->pdata->signature != DSPS_SIGNATURE)) { pr_err("%s: invalid signature for pdata.", __func__); return -EINVAL; } ppss_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppss_reg"); if (!ppss_res) { pr_err("%s: failed to get ppss_reg resource.\n", __func__); return -EINVAL; } for (i = 0; i < drv->pdata->clks_num; i++) { const char *name = drv->pdata->clks[i].name; struct clk *clock; drv->pdata->clks[i].clock = NULL; pr_debug("%s: get clk %s.", __func__, name); clock = clk_get(drv->dev, name); if (IS_ERR(clock)) { pr_err("%s: can't get clk %s.", __func__, name); goto clk_err; } drv->pdata->clks[i].clock = clock; } for (i = 0; i < drv->pdata->gpios_num; i++) { const char *name = drv->pdata->gpios[i].name; int num = drv->pdata->gpios[i].num; drv->pdata->gpios[i].is_owner = false; pr_debug("%s: get gpio %s.", __func__, name); ret = gpio_request(num, name); if (ret) { pr_err("%s: request GPIO %s err %d.", __func__, name, ret); goto gpio_err; } drv->pdata->gpios[i].is_owner = true; } for (i = 0; i < drv->pdata->regs_num; i++) { const char *name = drv->pdata->regs[i].name; drv->pdata->regs[i].reg = NULL; pr_debug("%s: get regulator %s.", __func__, name); drv->pdata->regs[i].reg = regulator_get(drv->dev, name); if (IS_ERR(drv->pdata->regs[i].reg)) { pr_err("%s: get regulator %s failed.", __func__, name); goto reg_err; } } drv->ppss_base = ioremap(ppss_res->start, resource_size(ppss_res)); ppss_wdog = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ppss_wdog"); if (ppss_wdog) { drv->wdog_irq = ppss_wdog->start; ret = request_irq(drv->wdog_irq, dsps_wdog_bite_irq, IRQF_TRIGGER_RISING, "dsps_wdog", NULL); if (ret) { pr_err("%s: request_irq fail %d\n", __func__, ret); goto request_irq_err; } } else { drv->wdog_irq = -1; pr_debug("%s: ppss_wdog not supported.\n", __func__); } drv->dspsfw_ramdump_segments[0].address = drv->pdata->tcm_code_start; drv->dspsfw_ramdump_segments[0].size = drv->pdata->tcm_code_size; drv->dspsfw_ramdump_segments[1].address = drv->pdata->tcm_buf_start; drv->dspsfw_ramdump_segments[1].size = drv->pdata->tcm_buf_size; drv->dspsfw_ramdump_segments[2].address = drv->pdata->pipe_start; drv->dspsfw_ramdump_segments[2].size = drv->pdata->pipe_size; drv->dspsfw_ramdump_segments[3].address = drv->pdata->ddr_start; drv->dspsfw_ramdump_segments[3].size = drv->pdata->ddr_size; drv->dspsfw_ramdump_dev = create_ramdump_device("dsps"); if (!drv->dspsfw_ramdump_dev) { pr_err("%s: create_ramdump_device(\"dsps\") fail\n", __func__); goto create_ramdump_err; } drv->smem_ramdump_segments[0].address = drv->pdata->smem_start; drv->smem_ramdump_segments[0].size = drv->pdata->smem_size; drv->smem_ramdump_dev = create_ramdump_device("smem-dsps"); if (!drv->smem_ramdump_dev) { pr_err("%s: create_ramdump_device(\"smem\") fail\n", __func__); goto create_ramdump_err; } if (drv->pdata->init) drv->pdata->init(drv->pdata); return 0; create_ramdump_err: disable_irq_nosync(drv->wdog_irq); free_irq(drv->wdog_irq, NULL); request_irq_err: iounmap(drv->ppss_base); reg_err: for (i = 0; i < drv->pdata->regs_num; i++) { if (drv->pdata->regs[i].reg) { regulator_put(drv->pdata->regs[i].reg); drv->pdata->regs[i].reg = NULL; } } gpio_err: for (i = 0; i < drv->pdata->gpios_num; i++) if (drv->pdata->gpios[i].is_owner) { gpio_free(drv->pdata->gpios[i].num); drv->pdata->gpios[i].is_owner = false; } clk_err: for (i = 0; i < drv->pdata->clks_num; i++) if (drv->pdata->clks[i].clock) { clk_put(drv->pdata->clks[i].clock); drv->pdata->clks[i].clock = NULL; } return ret; }
static irqreturn_t mmc_omap_irq(int irq, void *dev_id) { struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; u16 status; int end_command; int end_transfer; int transfer_error, cmd_error; if (host->cmd == NULL && host->data == NULL) { status = OMAP_MMC_READ(host, STAT); dev_info(mmc_dev(host->slots[0]->mmc), "Spurious IRQ 0x%04x\n", status); if (status != 0) { OMAP_MMC_WRITE(host, STAT, status); OMAP_MMC_WRITE(host, IE, 0); } return IRQ_HANDLED; } end_command = 0; end_transfer = 0; transfer_error = 0; cmd_error = 0; while ((status = OMAP_MMC_READ(host, STAT)) != 0) { int cmd; OMAP_MMC_WRITE(host, STAT, status); if (host->cmd != NULL) cmd = host->cmd->opcode; else cmd = -1; #ifdef CONFIG_MMC_DEBUG dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", status, cmd); mmc_omap_report_irq(status); printk("\n"); #endif if (host->total_bytes_left) { if ((status & OMAP_MMC_STAT_A_FULL) || (status & OMAP_MMC_STAT_END_OF_DATA)) mmc_omap_xfer_data(host, 0); if (status & OMAP_MMC_STAT_A_EMPTY) mmc_omap_xfer_data(host, 1); } if (status & OMAP_MMC_STAT_END_OF_DATA) end_transfer = 1; if (status & OMAP_MMC_STAT_DATA_TOUT) { dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n", cmd); if (host->data) { host->data->error = -ETIMEDOUT; transfer_error = 1; } } if (status & OMAP_MMC_STAT_DATA_CRC) { if (host->data) { host->data->error = -EILSEQ; dev_dbg(mmc_dev(host->mmc), "data CRC error, bytes left %d\n", host->total_bytes_left); transfer_error = 1; } else { dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); } } if (status & OMAP_MMC_STAT_CMD_TOUT) { /* Timeouts are routine with some commands */ if (host->cmd) { struct mmc_omap_slot *slot = host->current_slot; if (slot == NULL || !mmc_omap_cover_is_open(slot)) dev_err(mmc_dev(host->mmc), "command timeout (CMD%d)\n", cmd); host->cmd->error = -ETIMEDOUT; end_command = 1; cmd_error = 1; } } if (status & OMAP_MMC_STAT_CMD_CRC) { if (host->cmd) { dev_err(mmc_dev(host->mmc), "command CRC error (CMD%d, arg 0x%08x)\n", cmd, host->cmd->arg); host->cmd->error = -EILSEQ; end_command = 1; cmd_error = 1; } else dev_err(mmc_dev(host->mmc), "command CRC error without cmd?\n"); } if (status & OMAP_MMC_STAT_CARD_ERR) { dev_dbg(mmc_dev(host->mmc), "ignoring card status error (CMD%d)\n", cmd); end_command = 1; } /* * NOTE: On 1610 the END_OF_CMD may come too early when * starting a write */ if ((status & OMAP_MMC_STAT_END_OF_CMD) && (!(status & OMAP_MMC_STAT_A_EMPTY))) { end_command = 1; } } if (cmd_error && host->data) { del_timer(&host->cmd_abort_timer); host->abort = 1; OMAP_MMC_WRITE(host, IE, 0); disable_irq_nosync(host->irq); queue_work(mmc_omap_wq, &host->cmd_abort_work); return IRQ_HANDLED; } if (end_command && host->cmd) mmc_omap_cmd_done(host, host->cmd); if (host->data != NULL) { if (transfer_error) mmc_omap_xfer_done(host, host->data); else if (end_transfer) mmc_omap_end_of_data(host, host->data); } return IRQ_HANDLED; }
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) { int ret; int i; struct intr_data irqdata; char linebuf[128]; static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1); struct wcd9xxx_core_resource *wcd9xxx_res = data; int num_irq_regs = wcd9xxx_res->num_irq_regs; u8 status[num_irq_regs], status1[num_irq_regs]; if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) { dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n"); return IRQ_NONE; } if (!wcd9xxx_res->codec_bulk_read) { dev_err(wcd9xxx_res->dev, "%s: Codec Bulk Register read callback not supplied\n", __func__); goto err_disable_irq; } ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res, WCD9XXX_A_INTR_STATUS0, num_irq_regs, status); if (ret < 0) { dev_err(wcd9xxx_res->dev, "Failed to read interrupt status: %d\n", ret); goto err_disable_irq; } /* Apply masking */ for (i = 0; i < num_irq_regs; i++) status[i] &= ~wcd9xxx_res->irq_masks_cur[i]; memcpy(status1, status, sizeof(status1)); /* Find out which interrupt was triggered and call that interrupt's * handler function * * Since codec has only one hardware irq line which is shared by * codec's different internal interrupts, so it's possible master irq * handler dispatches multiple nested irq handlers after breaking * order. Dispatch interrupts in the order that is maintained by * the interrupt table. */ for (i = 0; i < wcd9xxx_res->intr_table_size; i++) { irqdata = wcd9xxx_res->intr_table[i]; if (status[BIT_BYTE(irqdata.intr_num)] & BYTE_BIT_MASK(irqdata.intr_num)) { wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata); status1[BIT_BYTE(irqdata.intr_num)] &= ~BYTE_BIT_MASK(irqdata.intr_num); } } /* * As a failsafe if unhandled irq is found, clear it to prevent * interrupt storm. * Note that we can say there was an unhandled irq only when no irq * handled by nested irq handler since Taiko supports qdsp as irqs' * destination for few irqs. Therefore driver shouldn't clear pending * irqs when few handled while few others not. */ if (unlikely(!memcmp(status, status1, sizeof(status)))) { if (__ratelimit(&ratelimit)) { pr_warn("%s: Unhandled irq found\n", __func__); hex_dump_to_buffer(status, sizeof(status), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status0 : %s\n", __func__, linebuf); hex_dump_to_buffer(status1, sizeof(status1), 16, 1, linebuf, sizeof(linebuf), false); pr_warn("%s: status1 : %s\n", __func__, linebuf); } memset(status, 0xff, num_irq_regs); ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res, WCD9XXX_A_INTR_CLEAR0, num_irq_regs, status); if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MODE, 0x02); } wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_HANDLED; err_disable_irq: dev_err(wcd9xxx_res->dev, "Disable irq %d\n", wcd9xxx_res->irq); disable_irq_wake(wcd9xxx_res->irq); disable_irq_nosync(wcd9xxx_res->irq); wcd9xxx_unlock_sleep(wcd9xxx_res); return IRQ_NONE; }
static int qc_ioctl(struct dpram_link_device *dpld, struct io_device *iod, unsigned int cmd, unsigned long arg) { struct link_device *ld = &dpld->ld; int err = 0; switch (cmd) { case IOCTL_DPRAM_PHONE_POWON: err = qc_prepare_download(dpld); if (err < 0) mif_info("%s: ERR! prepare_download fail\n", ld->name); break; case IOCTL_DPRAM_PHONEIMG_LOAD: err = qc_download_binary(dpld, (void *)arg); if (err < 0) mif_info("%s: ERR! download_binary fail\n", ld->name); break; case IOCTL_DPRAM_NVDATA_LOAD: err = qc_download_nv(dpld, (void *)arg); if (err < 0) mif_info("%s: ERR! download_nv fail\n", ld->name); break; case IOCTL_DPRAM_PHONE_BOOTSTART: err = qc_boot_start(dpld); if (err < 0) { mif_info("%s: ERR! boot_start fail\n", ld->name); break; } err = qc_boot_post_process(dpld); if (err < 0) mif_info("%s: ERR! boot_post_process fail\n", ld->name); break; case IOCTL_DPRAM_PHONE_UPLOAD_STEP1: disable_irq_nosync(dpld->irq); err = qc_uload_step1(dpld); if (err < 0) { enable_irq(dpld->irq); mif_info("%s: ERR! upload_step1 fail\n", ld->name); } break; case IOCTL_DPRAM_PHONE_UPLOAD_STEP2: err = qc_uload_step2(dpld, (void *)arg); if (err < 0) { enable_irq(dpld->irq); mif_info("%s: ERR! upload_step2 fail\n", ld->name); } break; default: mif_err("%s: ERR! invalid cmd 0x%08X\n", ld->name, cmd); err = -EINVAL; break; } return err; }
static irqreturn_t imx_headphone_detect_handler(int irq, void *data) { disable_irq_nosync(irq); schedule_delayed_work(&hp_event, msecs_to_jiffies(200)); return IRQ_HANDLED; }
static void mdm_disable_irqs(void) { disable_irq_nosync(mdm_drv->mdm_errfatal_irq); disable_irq_nosync(mdm_drv->mdm_status_irq); }
static int pn547_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; int err; int addr; char tmp[4] = {0x20, 0x00, 0x01, 0x01}; int addrcnt; struct pn547_i2c_platform_data *platform_data; struct pn547_dev *pn547_dev; if (client->dev.of_node) { platform_data = devm_kzalloc(&client->dev, sizeof(struct pn547_i2c_platform_data), GFP_KERNEL); if (!platform_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } err = pn547_parse_dt(&client->dev, platform_data); if (err) return err; } else { platform_data = client->dev.platform_data; } if (platform_data == NULL) { pr_err("%s : nfc probe fail\n", __func__); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { pr_err("%s : need I2C_FUNC_I2C\n", __func__); return -ENODEV; } ret = gpio_request(platform_data->irq_gpio, "nfc_int"); if (ret) return -ENODEV; ret = gpio_request(platform_data->ven_gpio, "nfc_ven"); if (ret) goto err_ven; ret = gpio_request(platform_data->firm_gpio, "nfc_firm"); if (ret) goto err_firm; #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST ret = gpio_request(platform_data->clk_req_gpio, "nfc_clk_req"); if (ret) goto err_clk_req; #endif pn547_dev = kzalloc(sizeof(*pn547_dev), GFP_KERNEL); if (pn547_dev == NULL) { dev_err(&client->dev, "failed to allocate memory for module data\n"); ret = -ENOMEM; goto err_exit; } #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST pn547_dev->nfc_clock = msm_xo_get(MSM_XO_TCXO_A1, "nfc"); if (IS_ERR(pn547_dev->nfc_clock)) { ret = PTR_ERR(pn547_dev->nfc_clock); printk(KERN_ERR "%s: Couldn't get TCXO_A1 vote for NFC (%d)\n", __func__, ret); ret = -ENODEV; goto err_get_clock; } pn547_dev->clock_state = false; #endif #ifdef CONFIG_NFC_PN547_PMC8974_CLK_REQ pn547_dev->nfc_clk = clk_get(&client->dev, "nfc_clk"); if (IS_ERR(pn547_dev->nfc_clk)) { ret = PTR_ERR(pn547_dev->nfc_clk); printk(KERN_ERR "%s: Couldn't get D1 (%d)\n", __func__, ret); } else { if (clk_prepare_enable(pn547_dev->nfc_clk)) printk(KERN_ERR "%s: Couldn't prepare D1\n", __func__); } #endif pr_info("%s : IRQ num %d\n", __func__, client->irq); pn547_dev->irq_gpio = platform_data->irq_gpio; pn547_dev->ven_gpio = platform_data->ven_gpio; pn547_dev->firm_gpio = platform_data->firm_gpio; pn547_dev->conf_gpio = platform_data->conf_gpio; #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST pn547_dev->clk_req_gpio = platform_data->clk_req_gpio; pn547_dev->clk_req_irq = platform_data->clk_req_irq; #endif pn547_dev->client = client; /* init mutex and queues */ init_waitqueue_head(&pn547_dev->read_wq); mutex_init(&pn547_dev->read_mutex); pn547_dev->pn547_device.minor = MISC_DYNAMIC_MINOR; #ifdef CONFIG_NFC_PN547 pn547_dev->pn547_device.name = "pn547"; #else pn547_dev->pn547_device.name = "pn544"; #endif pn547_dev->pn547_device.fops = &pn547_dev_fops; ret = misc_register(&pn547_dev->pn547_device); if (ret) { pr_err("%s : misc_register failed\n", __FILE__); goto err_misc_register; } /* request irq. the irq is set whenever the chip has data available * for reading. it is cleared when all data has been read. */ pr_info("%s : requesting IRQ %d\n", __func__, client->irq); gpio_direction_input(pn547_dev->irq_gpio); gpio_direction_output(pn547_dev->ven_gpio, 0); gpio_direction_output(pn547_dev->firm_gpio, 0); #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST gpio_direction_input(pn547_dev->clk_req_gpio); #endif i2c_set_clientdata(client, pn547_dev); wake_lock_init(&pn547_dev->nfc_wake_lock, WAKE_LOCK_SUSPEND, "nfc_wake_lock"); #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST pn547_dev->wq_clock = create_singlethread_workqueue("nfc_wq"); if (!pn547_dev->wq_clock) { ret = -ENOMEM; pr_err("%s: could not create workqueue\n", __func__); goto err_create_workqueue; } INIT_WORK(&pn547_dev->work_nfc_clock, nfc_work_func_clock); #endif ret = request_irq(client->irq, pn547_dev_irq_handler, IRQF_TRIGGER_RISING, "pn547", pn547_dev); if (ret) { dev_err(&client->dev, "request_irq failed\n"); goto err_request_irq_failed; } disable_irq_nosync(pn547_dev->client->irq); atomic_set(&pn547_dev->irq_enabled, 0); #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST ret = request_irq(pn547_dev->clk_req_irq, pn547_dev_clk_req_irq_handler, IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING , "pn547_clk_req", pn547_dev); if (ret) { dev_err(&client->dev, "request_irq(clk_req) failed\n"); goto err_request_irq_failed; } enable_irq_wake(pn547_dev->clk_req_irq); #endif gpio_set_value(pn547_dev->ven_gpio, 1); usleep_range(10000, 11000); for (addr = 0x28; addr < 0x2C; addr++) { client->addr = addr; addrcnt = 2; do { ret = i2c_master_send(client, tmp, 4); if (ret > 0) { pr_info("%s : i2c addr=0x%X\n", __func__, client->addr); break; } msleep(20); } while (addrcnt--); if (ret > 0) break; } gpio_set_value(pn547_dev->ven_gpio, 0); if (ret < 0) { pr_err("%s : fail to get i2c addr\n", __func__); goto err_request_irq_failed; } pr_info("%s : success\n", __func__); return 0; err_request_irq_failed: #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST err_create_workqueue: #endif misc_deregister(&pn547_dev->pn547_device); wake_lock_destroy(&pn547_dev->nfc_wake_lock); err_misc_register: mutex_destroy(&pn547_dev->read_mutex); #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST msm_xo_put(pn547_dev->nfc_clock); err_get_clock: #endif kfree(pn547_dev); err_exit: #ifdef CONFIG_NFC_PN547_CLOCK_REQUEST gpio_free(platform_data->clk_req_gpio); err_clk_req: #endif gpio_free(platform_data->firm_gpio); err_firm: gpio_free(platform_data->ven_gpio); err_ven: gpio_free(platform_data->irq_gpio); pr_err("[pn547] pn547_probe fail!\n"); return ret; }
static long pn547_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pn547_dev *pn547_dev = filp->private_data; switch (cmd) { case PN547_SET_PWR: if (arg == 2) { /* power on with firmware download (requires hw reset) */ gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); gpio_set_value(pn547_dev->firm_gpio, 1); usleep_range(10000, 10050); gpio_set_value_cansleep(pn547_dev->ven_gpio, 0); usleep_range(10000, 10050); gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); usleep_range(10000, 10050); if (atomic_read(&pn547_dev->irq_enabled) == 0) { atomic_set(&pn547_dev->irq_enabled, 1); enable_irq(pn547_dev->client->irq); enable_irq_wake(pn547_dev->client->irq); } pr_info("%s power on with firmware, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); } else if (arg == 1) { /* power on */ if (pn547_dev->conf_gpio) pn547_dev->conf_gpio(); gpio_set_value(pn547_dev->firm_gpio, 0); gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); usleep_range(10000, 10050); if (atomic_read(&pn547_dev->irq_enabled) == 0) { atomic_set(&pn547_dev->irq_enabled, 1); enable_irq(pn547_dev->client->irq); enable_irq_wake(pn547_dev->client->irq); } pr_info("%s power on, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); } else if (arg == 0) { /* power off */ if (atomic_read(&pn547_dev->irq_enabled) == 1) { atomic_set(&pn547_dev->irq_enabled, 0); disable_irq_wake(pn547_dev->client->irq); disable_irq_nosync(pn547_dev->client->irq); } pr_info("%s power off, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); gpio_set_value(pn547_dev->firm_gpio, 0); gpio_set_value_cansleep(pn547_dev->ven_gpio, 0); usleep_range(10000, 10050); } else if (arg == 3) { pr_info("%s Read Cancel\n", __func__); pn547_dev->cancel_read = true; atomic_set(&pn547_dev->read_flag, 1); wake_up(&pn547_dev->read_wq); } else { pr_err("%s bad arg %lu\n", __func__, arg); return -EINVAL; } break; default: pr_err("%s bad ioctl %u\n", __func__, cmd); return -EINVAL; } return 0; }
static void charm_disable_irqs(void) { disable_irq_nosync(charm_errfatal_irq); disable_irq_nosync(charm_status_irq); }
void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq) { disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq)); }
static int sii9234_probe(struct platform_device *pdev) { int ret; struct mhl_platform_data *mhl_pdata = pdev->dev.platform_data; struct mhl_dev *mhl_dev; unsigned int mhl_wakeup_irq; struct class *sec_mhl; struct input_dev *input; if (mhl_pdata == NULL) { pr_err("MHL probe fail\n"); return -ENODEV; } ret = i2c_add_driver(&sii9234_i2c_driver); if (ret != 0) { pr_err("[MHL SII9234] can't add i2c driver\n"); return ret; } else { pr_err("[MHL SII9234] add i2c driver\n"); } ret = i2c_add_driver(&sii9234a_i2c_driver); if (ret != 0) { pr_err("[MHL SII9234A] can't add i2c driver\n"); goto err_i2c_a_add; } else { pr_err("[MHL SII9234A] add i2c driver\n"); } ret = i2c_add_driver(&sii9234b_i2c_driver); if (ret != 0) { pr_err("[MHL SII9234B] can't add i2c driver\n"); goto err_i2c_b_add; } else { pr_err("[MHL SII9234B] add i2c driver\n"); } ret = i2c_add_driver(&sii9234c_i2c_driver); if (ret != 0) { pr_err("[MHL SII9234C] can't add i2c driver\n"); goto err_i2c_c_add; } else { pr_err("[MHL SII9234C] add i2c driver\n"); } mhl_dev = kzalloc(sizeof(struct mhl_dev), GFP_KERNEL); if (!mhl_dev) { ret = -ENOMEM; goto err_mem; } input = input_allocate_device(); if (!input) { pr_err("failed to allocate input device.\n"); ret = -ENOMEM; goto err_input; } set_bit(EV_KEY, input->evbit); bitmap_fill(input->keybit, KEY_MAX); mhl_dev->input = input; input_set_drvdata(input, mhl_dev); input->name = "sii9234_rcp"; input->id.bustype = BUS_I2C; ret = input_register_device(input); if (ret < 0) { pr_err("fail to register input device\n"); goto err_misc_register; } mhl_dev->pdata = mhl_pdata; mhl_dev->irq_gpio = mhl_pdata->mhl_int; mhl_dev->wake_up_gpio = mhl_pdata->mhl_wake_up; INIT_WORK(&mhl_dev->sii9234_int_work, sii9234_interrupt_event_work); mhl_dev->sii9234_wq = create_singlethread_workqueue("sii9234_wq"); mhl_dev->mdev.minor = MISC_DYNAMIC_MINOR; mhl_dev->mdev.name = "mhl"; mhl_dev->mdev.fops = &mhl_fops; dev_set_drvdata(&pdev->dev, mhl_dev); mhl_dev->process_dev = &pdev->dev; ret = misc_register(&mhl_dev->mdev); if (ret) { pr_err("mhl misc_register failed\n"); goto err_misc_register; } g_mhl_dev = mhl_dev; mhl_pdata->mhl_irq = gpio_to_irq(mhl_dev->irq_gpio); irq_set_irq_type(mhl_pdata->mhl_irq, IRQ_TYPE_EDGE_RISING); ret = request_threaded_irq(mhl_pdata->mhl_irq, NULL, mhl_int_irq_handler, IRQF_DISABLED, "mhl_int", mhl_dev); if (ret) { pr_err("unable to request irq mhl_int err:: %d\n", ret); goto err_irq_request; } Sii9234_int_irq_disable(); pr_info("create mhl sysfile\n"); sec_mhl = class_create(THIS_MODULE, "mhl"); if (IS_ERR(sec_mhl)) { pr_err("Failed to create class(sec_mhl)!\n"); goto err_exit; } ret = class_create_file(sec_mhl, &class_attr_test_result); if (ret) { pr_err("[ERROR] Failed to create device file in sysfs entries!\n"); goto err_exit; } #if 0 mhl_wakeup_irq = gpio_to_irq(mhl_dev->wake_up_gpio); set_irq_type(mhl_wakeup_irq, IRQ_TYPE_EDGE_RISING); ret = request_threaded_irq(mhl_wakeup_irq, NULL, mhl_wake_up_irq_handler, IRQF_DISABLED, "mhl_wake_up", mhl_dev); if (ret) { pr_err("unable to request irq mhl_wake_up err:: %d\n", ret); goto err_wake_up_irq_request; } #endif #if 0 disable_irq_nosync(mhl_irq); disable_irq_nosync(mhl_wakeup_irq); #endif return 0; err_exit: class_destroy(sec_mhl); err_wake_up_irq_request: free_irq(gpio_to_irq(mhl_dev->irq_gpio), mhl_dev); err_irq_request: mhl_pdata->mhl_irq = NULL; misc_deregister(&mhl_dev->mdev); err_misc_register: input_free_device(input); err_input: destroy_workqueue(mhl_dev->sii9234_wq); kfree(mhl_dev); err_mem: i2c_del_driver(&sii9234c_i2c_driver); err_i2c_c_add: i2c_del_driver(&sii9234b_i2c_driver); err_i2c_b_add: i2c_del_driver(&sii9234a_i2c_driver); err_i2c_a_add: i2c_del_driver(&sii9234_i2c_driver); return ret; }
static long bcm2079x_dev_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct bcm2079x_dev *bcm2079x_dev = filp->private_data; switch (cmd) { case BCMNFC_READ_FULL_PACKET: DBG(dev_info(&bcm2079x_dev->client->dev, "%s, BCMNFC_READ_FULL_PACKET (%x, %lx):\n", __func__, cmd, arg)); break; case BCMNFC_READ_MULTI_PACKETS: DBG(dev_info(&bcm2079x_dev->client->dev, "%s, BCMNFC_READ_MULTI_PACKETS (%x, %lx):\n", __func__, cmd, arg)); break; case BCMNFC_CHANGE_ADDR: DBG(dev_info(&bcm2079x_dev->client->dev, "%s, BCMNFC_CHANGE_ADDR (%x, %lx):\n", __func__, cmd, arg)); change_client_addr(bcm2079x_dev, arg); break; case BCMNFC_POWER_CTL: DBG(dev_info(&bcm2079x_dev->client->dev, "%s, BCMNFC_POWER_CTL (%x, %lx):\n", __func__, cmd, arg)); if (arg == 1) { /* Power On */ gpio_set_value(bcm2079x_dev->en_gpio, 1); if (bcm2079x_dev->irq_enabled == FALSE) { bcm2079x_dev->count_irq = 0; enable_irq(bcm2079x_dev->client->irq); bcm2079x_dev->irq_enabled = true; } } else { if (bcm2079x_dev->irq_enabled == true) { bcm2079x_dev->irq_enabled = FALSE; disable_irq_nosync(bcm2079x_dev->client->irq); if (bcm2079x_dev->count_irq > 0) wake_unlock(&nfc_wake_lock); } gpio_set_value(bcm2079x_dev->en_gpio, 0); set_client_addr(bcm2079x_dev, bcm2079x_dev->original_address); } break; case BCMNFC_WAKE_CTL: DBG(dev_info(&bcm2079x_dev->client->dev, "%s, BCMNFC_WAKE_CTL (%x, %lx):\n", __func__, cmd, arg)); #ifdef CONFIG_HAS_WAKELOCK if (arg == 0) { wake_lock(&nfc_wake_lock); DBG(dev_info(&bcm2079x_dev->client->dev, "%s: got wake lock", __func__)); } #endif gpio_set_value(bcm2079x_dev->wake_gpio, arg); #ifdef CONFIG_HAS_WAKELOCK if (arg == 1) { wake_unlock(&nfc_wake_lock); DBG(dev_info(&bcm2079x_dev->client->dev, "%s: release wake lock, count_irq = %d", __func__, bcm2079x_dev->count_irq)); } #endif break; default: dev_err(&bcm2079x_dev->client->dev, "%s, unknown cmd (%x, %lx)\n", __func__, cmd, arg); return 0; } return 0; }
struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, struct sdhci_pltfm_data *pdata) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct device_node *np = pdev->dev.of_node; struct resource *iomem; int ret; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size!\n"); /* Some PCI-based MFD need the parent here */ if (pdev->dev.parent != &platform_bus && !np) host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); else host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); if (IS_ERR(host)) { ret = PTR_ERR(host); goto err; } pltfm_host = sdhci_priv(host); #ifdef CONFIG_ARCH_DOVE struct sdhci_dove_int_wa *data = (struct sdhci_dove_int_wa *) pdev->dev.platform_data; if (data) { dev_dbg(&pdev->dev, " request wa irq\n"); pltfm_host->dove_card_int_wa = 1; pltfm_host->dove_int_wa_info.irq = data->irq; pltfm_host->dove_int_wa_info.gpio = data->gpio; pltfm_host->dove_int_wa_info.func_select_bit = data->func_select_bit; pltfm_host->dove_int_wa_info.status = 0; //disabled ret = devm_request_irq(&pdev->dev, pltfm_host->dove_int_wa_info.irq, sdhci_dove_gpio_irq, IRQF_DISABLED, mmc_hostname(host->mmc), host); if(ret) { dev_err(&pdev->dev, "cannot request wa irq\n"); goto err; } /* to balance disable/enable_irq */ disable_irq_nosync(pltfm_host->dove_int_wa_info.irq); } else { dev_dbg(&pdev->dev, " no request wa irq\n"); pltfm_host->dove_card_int_wa = 0; } #endif host->hw_name = dev_name(&pdev->dev); if (pdata && pdata->ops) host->ops = pdata->ops; else host->ops = &sdhci_pltfm_ops; if (pdata) host->quirks = pdata->quirks; host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), mmc_hostname(host->mmc))) { dev_err(&pdev->dev, "cannot request region\n"); ret = -EBUSY; goto err_request; } host->ioaddr = ioremap(iomem->start, resource_size(iomem)); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto err_remap; } platform_set_drvdata(pdev, host); return host; err_remap: release_mem_region(iomem->start, resource_size(iomem)); err_request: sdhci_free_host(host); err: dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); return ERR_PTR(ret); }
static int bcm2079x_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct bcm2079x_platform_data *platform_data; struct bcm2079x_dev *bcm2079x_dev; platform_data = client->dev.platform_data; dev_info(&client->dev, "%s, probing bcm2079x driver flags = %x\n", __func__, client->flags); if (platform_data == NULL) { dev_err(&client->dev, "nfc probe fail\n"); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "need I2C_FUNC_I2C\n"); return -ENODEV; } ret = gpio_request(platform_data->irq_gpio, "nfc_int"); if (ret) return -ENODEV; gpio_direction_input(platform_data->irq_gpio); ret = gpio_request(platform_data->en_gpio, "nfc_ven"); if (ret) goto err_en; gpio_direction_output(platform_data->en_gpio, 0); ret = gpio_request(platform_data->wake_gpio, "nfc_firm"); if (ret) goto err_firm; gpio_direction_output(platform_data->wake_gpio, 1); gpio_set_value(platform_data->en_gpio, 0); gpio_set_value(platform_data->wake_gpio, 1); bcm2079x_dev = kzalloc(sizeof(*bcm2079x_dev), GFP_KERNEL); if (bcm2079x_dev == NULL) { dev_err(&client->dev, "failed to allocate memory for module data\n"); ret = -ENOMEM; goto err_exit; } bcm2079x_dev->wake_gpio = platform_data->wake_gpio; bcm2079x_dev->irq_gpio = platform_data->irq_gpio; bcm2079x_dev->en_gpio = platform_data->en_gpio; bcm2079x_dev->client = client; /* init mutex and queues */ init_waitqueue_head(&bcm2079x_dev->read_wq); mutex_init(&bcm2079x_dev->read_mutex); spin_lock_init(&bcm2079x_dev->irq_enabled_lock); bcm2079x_dev->bcm2079x_device.minor = MISC_DYNAMIC_MINOR; bcm2079x_dev->bcm2079x_device.name = "bcm2079x"; bcm2079x_dev->bcm2079x_device.fops = &bcm2079x_dev_fops; ret = misc_register(&bcm2079x_dev->bcm2079x_device); if (ret) { dev_err(&client->dev, "misc_register failed\n"); goto err_misc_register; } bcm2079x_dev->original_address = client->addr; #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&nfc_wake_lock, WAKE_LOCK_IDLE, "NFCWAKE"); #endif i2c_set_clientdata(client, bcm2079x_dev); /* request irq. the irq is set whenever the chip has data available * for reading. it is cleared when all data has been read. */ DBG(dev_info(&client->dev, "requesting IRQ %d\n", client->irq)); ret = request_irq(client->irq, bcm2079x_dev_irq_handler, INTERRUPT_TRIGGER_TYPE, client->name, bcm2079x_dev); if (ret) { dev_err(&client->dev, "request_irq failed\n"); goto err_request_irq_failed; } disable_irq_nosync(client->irq); bcm2079x_dev->irq_enabled = FALSE; DBG(dev_info(&client->dev, "%s, probing bcm2079x driver exited successfully\n", __func__)); return 0; err_request_irq_failed: misc_deregister(&bcm2079x_dev->bcm2079x_device); err_misc_register: mutex_destroy(&bcm2079x_dev->read_mutex); kfree(bcm2079x_dev); err_exit: gpio_free(platform_data->wake_gpio); err_firm: gpio_free(platform_data->en_gpio); err_en: gpio_free(platform_data->irq_gpio); return ret; }
static int msm_hsic_resume(struct msm_hsic_hcd *mehci) { struct usb_hcd *hcd = hsic_to_hcd(mehci); int cnt = 0, ret; unsigned temp; int min_vol, max_vol; unsigned long flags; if (!atomic_read(&mehci->in_lpm)) { dev_dbg(mehci->dev, "%s called in !in_lpm\n", __func__); return 0; } spin_lock_irqsave(&mehci->wakeup_lock, flags); if (mehci->wakeup_irq_enabled) { disable_irq_wake(mehci->wakeup_irq); disable_irq_nosync(mehci->wakeup_irq); mehci->wakeup_irq_enabled = 0; } spin_unlock_irqrestore(&mehci->wakeup_lock, flags); wake_lock(&mehci->wlock); if (mehci->bus_perf_client && debug_bus_voting_enabled) { mehci->bus_vote = true; queue_work(ehci_wq, &mehci->bus_vote_w); } min_vol = vdd_val[mehci->vdd_type][VDD_MIN]; max_vol = vdd_val[mehci->vdd_type][VDD_MAX]; ret = regulator_set_voltage(mehci->hsic_vddcx, min_vol, max_vol); if (ret < 0) dev_err(mehci->dev, "unable to set nominal vddcx voltage (no VDD MIN)\n"); clk_prepare_enable(mehci->core_clk); clk_prepare_enable(mehci->phy_clk); clk_prepare_enable(mehci->cal_clk); clk_prepare_enable(mehci->ahb_clk); temp = readl_relaxed(USB_USBCMD); temp &= ~ASYNC_INTR_CTRL; temp &= ~ULPI_STP_CTRL; writel_relaxed(temp, USB_USBCMD); if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) goto skip_phy_resume; temp = readl_relaxed(USB_PORTSC); temp &= ~(PORT_RWC_BITS | PORTSC_PHCD); writel_relaxed(temp, USB_PORTSC); while (cnt < PHY_RESUME_TIMEOUT_USEC) { if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD) && (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_SYNC_STATE)) break; udelay(1); cnt++; } if (cnt >= PHY_RESUME_TIMEOUT_USEC) { /* * This is a fatal error. Reset the link and * PHY to make hsic working. */ dev_err(mehci->dev, "Unable to resume USB. Reset the hsic\n"); msm_hsic_config_gpios(mehci, 0); msm_hsic_reset(mehci); } skip_phy_resume: usb_hcd_resume_root_hub(hcd); atomic_set(&mehci->in_lpm, 0); if (mehci->async_int) { mehci->async_int = false; pm_runtime_put_noidle(mehci->dev); enable_irq(hcd->irq); } if (atomic_read(&mehci->pm_usage_cnt)) { atomic_set(&mehci->pm_usage_cnt, 0); pm_runtime_put_noidle(mehci->dev); } dev_info(mehci->dev, "HSIC-USB exited from low power mode\n"); return 0; }
static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF; pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ #if defined(CONFIG_TC_VOLTAGE) if (status & INTSTAT_FALL0) { info->tmu_state = TMU_STATUS_TC; __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 0); } else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #endif } else if (status & INTSTAT_RISE1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & INTSTAT_RISE0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT); pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ if (status & TMU_INTSTAT2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } #ifdef CONFIG_TMU_SYSFS static ssize_t s5p_tmu_show_curr_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct s5p_tmu_info *info = dev_get_drvdata(dev); unsigned int curr_temp; curr_temp = get_curr_temp(info); curr_temp *= 10; pr_info("curr temp = %d\n", curr_temp); return sprintf(buf, "%d\n", curr_temp); } static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL); #endif static int __devinit s5p_tmu_probe(struct platform_device *pdev) { struct s5p_tmu_info *info; struct s5p_platform_tmu *pdata; struct resource *res; unsigned int mask = (enable_mask & ENABLE_DBGMASK); int ret = 0; pr_debug("%s: probe=%p\n", __func__, pdev); info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to alloc memory!\n"); ret = -ENOMEM; goto err_nomem; } platform_set_drvdata(pdev, info); info->dev = &pdev->dev; info->tmu_state = TMU_STATUS_INIT; /* set cpufreq limit level at 1st_throttle & 2nd throttle */ pdata = info->dev->platform_data; if (pdata->cpufreq.limit_1st_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle, &info->cpufreq_level_1st_throttle); if (pdata->cpufreq.limit_2nd_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle, &info->cpufreq_level_2nd_throttle); pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n", __func__, info->cpufreq_level_1st_throttle, info->cpufreq_level_2nd_throttle); #if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */ if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt, &info->cpulevel_tc) < 0) { dev_err(&pdev->dev, "cpufreq_get_level error\n"); ret = -EINVAL; goto err_nores; } #ifdef CONFIG_BUSFREQ_OPP /* To lock bus frequency in OPP mode */ info->bus_dev = dev_get("exynos-busfreq"); if (info->bus_dev < 0) { dev_err(&pdev->dev, "Failed to get_dev\n"); ret = -EINVAL; goto err_nores; } if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt, &info->busfreq_tc)) { dev_err(&pdev->dev, "get_busfreq_value error\n"); ret = -EINVAL; goto err_nores; } #endif pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n", __func__, info->cpulevel_tc, info->busfreq_tc); #endif /* Map auto_refresh_rate of normal & tq0 mode */ info->auto_refresh_tq0 = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0); info->auto_refresh_normal = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL); /* To poll current temp, set sampling rate to ONE second sampling */ info->sampling_rate = usecs_to_jiffies(1000 * 1000); /* 10sec monitroing */ info->monitor_period = usecs_to_jiffies(10000 * 1000); /* support test mode */ if (mask & ENABLE_TEST_MODE) set_temperature_params(info); else print_temperature_params(info); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory region resource\n"); ret = -ENODEV; goto err_nores; } info->ioarea = request_mem_region(res->start, res->end-res->start + 1, pdev->name); if (!(info->ioarea)) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -EBUSY; goto err_nores; } info->tmu_base = ioremap(res->start, (res->end - res->start) + 1); if (!(info->tmu_base)) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -ENOMEM; goto err_nomap; } tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev)); if (!tmu_monitor_wq) { pr_info("Creation of tmu_monitor_wq failed\n"); ret = -ENOMEM; goto err_wq; } /* To support periodic temprature monitoring */ if (mask & ENABLE_TEMP_MON) { INIT_DELAYED_WORK_DEFERRABLE(&info->monitor, exynos4_poll_cur_temp); queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->monitor_period); } INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state); info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq); ret = -EINVAL; goto err_irq; } if (soc_is_exynos4210()) ret = request_irq(info->irq, exynos4210_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); else ret = request_irq(info->irq, exynos4x12_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); if (ret) { dev_err(&pdev->dev, "request_irq is failed. %d\n", ret); goto err_irq; } ret = device_create_file(&pdev->dev, &dev_attr_temperature); if (ret != 0) { pr_err("Failed to create temperatue file: %d\n", ret); goto err_sysfs_file1; } ret = device_create_file(&pdev->dev, &dev_attr_tmu_state); if (ret != 0) { pr_err("Failed to create tmu_state file: %d\n", ret); goto err_sysfs_file2; } ret = device_create_file(&pdev->dev, &dev_attr_lot_id); if (ret != 0) { pr_err("Failed to create lot id file: %d\n", ret); goto err_sysfs_file3; } ret = tmu_initialize(pdev); if (ret) goto err_init; #ifdef CONFIG_TMU_SYSFS ret = device_create_file(&pdev->dev, &dev_attr_curr_temp); if (ret < 0) { dev_err(&pdev->dev, "Failed to create sysfs group\n"); goto err_init; } #endif #ifdef CONFIG_TMU_DEBUG ret = device_create_file(&pdev->dev, &dev_attr_print_state); if (ret) { dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n"); return ret; } #endif #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ if (get_curr_temp(info) <= pdata->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #if defined(CONFIG_VIDEO_MALI400MP) if (mali_voltage_lock_init()) pr_err("Failed to initialize mail voltage lock.\n"); #endif #endif /* initialize tmu_state */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return ret; err_init: device_remove_file(&pdev->dev, &dev_attr_lot_id); err_sysfs_file3: device_remove_file(&pdev->dev, &dev_attr_tmu_state); err_sysfs_file2: device_remove_file(&pdev->dev, &dev_attr_temperature); err_sysfs_file1: if (info->irq >= 0) free_irq(info->irq, info); err_irq: destroy_workqueue(tmu_monitor_wq); err_wq: iounmap(info->tmu_base); err_nomap: release_resource(info->ioarea); kfree(info->ioarea); err_nores: kfree(info); info = NULL; err_nomem: dev_err(&pdev->dev, "initialization failed.\n"); return ret; } static int __devinit s5p_tmu_remove(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); cancel_delayed_work(&info->polling); destroy_workqueue(tmu_monitor_wq); device_remove_file(&pdev->dev, &dev_attr_temperature); device_remove_file(&pdev->dev, &dev_attr_tmu_state); if (info->irq >= 0) free_irq(info->irq, info); iounmap(info->tmu_base); release_resource(info->ioarea); kfree(info->ioarea); kfree(info); info = NULL; pr_info("%s is removed\n", dev_name(&pdev->dev)); return 0; } #ifdef CONFIG_PM static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); if (!info) return -EAGAIN; /* save register value */ info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL); info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN); if (soc_is_exynos4210()) { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); info->reg_save[7] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); info->reg_save[8] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); info->reg_save[9] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } disable_irq(info->irq); return 0; } static int s5p_tmu_resume(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); struct s5p_platform_tmu *data; if (!info) return -EAGAIN; data = info->dev->platform_data; /* restore tmu register value */ __raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL); __raw_writel(info->reg_save[1], info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); __raw_writel(info->reg_save[2], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); __raw_writel(info->reg_save[3], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); if (soc_is_exynos4210()) { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); __raw_writel(info->reg_save[7], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); __raw_writel(info->reg_save[8], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); __raw_writel(info->reg_save[9], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } __raw_writel(info->reg_save[4], info->tmu_base + EXYNOS4_TMU_INTEN); #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased.. */ mdelay(1); if (get_curr_temp(info) <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #endif /* Find out tmu_state after wakeup */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0); return 0; } #else #define s5p_tmu_suspend NULL #define s5p_tmu_resume NULL #endif static struct platform_driver s5p_tmu_driver = { .probe = s5p_tmu_probe, .remove = s5p_tmu_remove, .suspend = s5p_tmu_suspend, .resume = s5p_tmu_resume, .driver = { .name = "s5p-tmu", .owner = THIS_MODULE, }, }; static int __init s5p_tmu_driver_init(void) { return platform_driver_register(&s5p_tmu_driver); } static void __exit s5p_tmu_driver_exit(void) { platform_driver_unregister(&s5p_tmu_driver); }
static int bq27520_power(bool enable, struct bq27520_device_info *di) { int rc = 0, ret; const struct bq27520_platform_data *platdata; platdata = di->pdata; if (enable) { /* switch on Vreg_S3 */ rc = regulator_enable(vreg_bq27520); if (rc < 0) { dev_err(di->dev, "%s: vreg %s %s failed (%d)\n", __func__, platdata->vreg_name, "enable", rc); goto vreg_fail; } /* Battery gauge enable and switch on onchip 2.5V LDO */ rc = gpio_request(platdata->chip_en, "GAUGE_EN"); if (rc) { dev_err(di->dev, "%s: fail to request gpio %d (%d)\n", __func__, platdata->chip_en, rc); goto vreg_fail; } gpio_direction_output(platdata->chip_en, 0); gpio_set_value(platdata->chip_en, 1); rc = gpio_request(platdata->soc_int, "GAUGE_SOC_INT"); if (rc) { dev_err(di->dev, "%s: fail to request gpio %d (%d)\n", __func__, platdata->soc_int, rc); goto gpio_fail; } gpio_direction_input(platdata->soc_int); di->irq = gpio_to_irq(platdata->soc_int); rc = request_threaded_irq(di->irq, NULL, soc_irqhandler, IRQF_TRIGGER_FALLING|IRQF_TRIGGER_RISING, "BQ27520_IRQ", di); if (rc) { dev_err(di->dev, "%s: fail to request irq %d (%d)\n", __func__, platdata->soc_int, rc); goto irqreq_fail; } else { disable_irq_nosync(di->irq); } } else { free_irq(di->irq, di); gpio_free(platdata->soc_int); /* switch off on-chip 2.5V LDO and disable Battery gauge */ gpio_set_value(platdata->chip_en, 0); gpio_free(platdata->chip_en); /* switch off Vreg_S3 */ rc = regulator_disable(vreg_bq27520); if (rc < 0) { dev_err(di->dev, "%s: vreg %s %s failed (%d)\n", __func__, platdata->vreg_name, "disable", rc); goto vreg_fail; } } return rc; irqreq_fail: gpio_free(platdata->soc_int); gpio_fail: gpio_set_value(platdata->chip_en, 0); gpio_free(platdata->chip_en); vreg_fail: ret = !enable ? regulator_enable(vreg_bq27520) : regulator_disable(vreg_bq27520); if (ret < 0) { dev_err(di->dev, "%s: vreg %s %s failed (%d) in err path\n", __func__, platdata->vreg_name, !enable ? "enable" : "disable", ret); } return rc; }
/* This is safe to call in IRQ context */ void stpio_disable_irq_nosync(struct stpio_pin *pin) { int irq = stpio_pin_to_irq(pin); disable_irq_nosync(irq); }
static int vpu_probe(struct platform_device *pdev) { int ret; struct resource *regs; struct jz_vpu *vpu; vpu = kzalloc(sizeof(struct jz_vpu), GFP_KERNEL); if (!vpu) ret = -ENOMEM; vpu->irq = platform_get_irq(pdev, 0); if(vpu->irq < 0) { dev_err(&pdev->dev, "get irq failed\n"); ret = vpu->irq; goto err_get_mem; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "No iomem resource\n"); ret = -ENXIO; goto err_get_mem; } vpu->iomem = ioremap(regs->start, resource_size(regs)); if (!vpu->iomem) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENXIO; goto err_get_mem; } vpu->clk_gate = clk_get(&pdev->dev, "vpu"); if (IS_ERR(vpu->clk_gate)) { ret = PTR_ERR(vpu->clk_gate); goto err_get_clk_gate; } vpu->dev = &pdev->dev; vpu->mdev.minor = MISC_DYNAMIC_MINOR; vpu->mdev.name = "jz-vpu"; vpu->mdev.fops = &vpu_misc_fops; spin_lock_init(&vpu->lock); ret = misc_register(&vpu->mdev); if (ret < 0) { dev_err(&pdev->dev, "misc_register failed\n"); goto err_registe_misc; } platform_set_drvdata(pdev, vpu); wake_lock_init(&vpu->wake_lock, WAKE_LOCK_SUSPEND, "vpu"); mutex_init(&vpu->mutex); init_completion(&vpu->done); ret = request_irq(vpu->irq, vpu_interrupt, IRQF_DISABLED, "vpu",vpu); if (ret < 0) { dev_err(&pdev->dev, "request_irq failed\n"); goto err_request_irq; } disable_irq_nosync(vpu->irq); return 0; err_request_irq: misc_deregister(&vpu->mdev); err_registe_misc: clk_put(vpu->clk_gate); err_get_clk_gate: iounmap(vpu->iomem); err_get_mem: kfree(vpu); return ret; }
/* ioctl - I/O control */ static long gsensor_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sensor_private_data *sensor = g_sensor[SENSOR_TYPE_ACCEL]; struct i2c_client *client = sensor->client; void __user *argp = (void __user *)arg; struct sensor_axis axis = {0}; char rate; int result = 0; switch (cmd) { case GSENSOR_IOCTL_APP_SET_RATE: if (copy_from_user(&rate, argp, sizeof(rate))) { result = -EFAULT; goto error; } break; default: break; } switch (cmd) { case GSENSOR_IOCTL_START: DBG("%s:GSENSOR_IOCTL_START start,status=%d\n", __func__,sensor->status_cur); mutex_lock(&sensor->operation_mutex); if(++sensor->start_count == 1) { if(sensor->status_cur == SENSOR_OFF) { atomic_set(&(sensor->data_ready), 0); if ( (result = sensor->ops->active(client, 1, 0) ) < 0 ) { mutex_unlock(&sensor->operation_mutex); printk("%s:fail to active sensor,ret=%d\n",__func__,result); goto error; } if(sensor->pdata->irq_enable) { DBG("%s:enable irq,irq=%d\n",__func__,client->irq); enable_irq(client->irq); //enable irq } else { PREPARE_DELAYED_WORK(&sensor->delaywork, sensor_delaywork_func); schedule_delayed_work(&sensor->delaywork, msecs_to_jiffies(sensor->pdata->poll_delay_ms)); } sensor->status_cur = SENSOR_ON; } } mutex_unlock(&sensor->operation_mutex); DBG("%s:GSENSOR_IOCTL_START OK\n", __func__); break; case GSENSOR_IOCTL_CLOSE: DBG("%s:GSENSOR_IOCTL_CLOSE start,status=%d\n", __func__,sensor->status_cur); mutex_lock(&sensor->operation_mutex); if(--sensor->start_count == 0) { if(sensor->status_cur == SENSOR_ON) { atomic_set(&(sensor->data_ready), 0); if ( (result = sensor->ops->active(client, 0, 0) ) < 0 ) { mutex_unlock(&sensor->operation_mutex); goto error; } if(sensor->pdata->irq_enable) { DBG("%s:disable irq,irq=%d\n",__func__,client->irq); disable_irq_nosync(client->irq);//disable irq } else cancel_delayed_work_sync(&sensor->delaywork); sensor->status_cur = SENSOR_OFF; } DBG("%s:GSENSOR_IOCTL_CLOSE OK\n", __func__); } mutex_unlock(&sensor->operation_mutex); break; case GSENSOR_IOCTL_APP_SET_RATE: DBG("%s:GSENSOR_IOCTL_APP_SET_RATE start\n", __func__); mutex_lock(&sensor->operation_mutex); result = sensor_reset_rate(client, rate); if (result < 0){ mutex_unlock(&sensor->operation_mutex); goto error; } sensor->status_cur = SENSOR_ON; mutex_unlock(&sensor->operation_mutex); DBG("%s:GSENSOR_IOCTL_APP_SET_RATE OK\n", __func__); break; case GSENSOR_IOCTL_GETDATA: mutex_lock(&sensor->data_mutex); memcpy(&axis, &sensor->axis, sizeof(sensor->axis)); //get data from buffer mutex_unlock(&sensor->data_mutex); break; default: result = -ENOTTY; goto error; } switch (cmd) { case GSENSOR_IOCTL_GETDATA: if ( copy_to_user(argp, &axis, sizeof(axis) ) ) { printk("failed to copy sense data to user space."); result = -EFAULT; goto error; } DBG("%s:GSENSOR_IOCTL_GETDATA OK\n", __func__); break; default: break; } error: return result; }
static int msm_ehci_resume(struct msm_hcd *mhcd) { struct msm_usb_host_platform_data *pdata; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); unsigned long timeout; unsigned temp; int ret; pdata = mhcd->dev->platform_data; if (!atomic_read(&mhcd->in_lpm)) { dev_dbg(mhcd->dev, "%s called in !in_lpm\n", __func__); return 0; } if (mhcd->pmic_gpio_dp_irq_enabled) { disable_irq_wake(mhcd->pmic_gpio_dp_irq); disable_irq_nosync(mhcd->pmic_gpio_dp_irq); mhcd->pmic_gpio_dp_irq_enabled = 0; } wake_lock(&mhcd->wlock); /* Vote for TCXO when waking up the phy */ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON); if (ret) dev_err(mhcd->dev, "%s failed to vote for " "TCXO D0 buffer%d\n", __func__, ret); clk_prepare_enable(mhcd->core_clk); clk_prepare_enable(mhcd->iface_clk); if (!pdata->mpm_xo_wakeup_int) msm_ehci_config_vddcx(mhcd, 1); temp = readl_relaxed(USB_USBCMD); temp &= ~ASYNC_INTR_CTRL; temp &= ~ULPI_STP_CTRL; writel_relaxed(temp, USB_USBCMD); if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) goto skip_phy_resume; temp = readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD; writel_relaxed(temp, USB_PORTSC); timeout = jiffies + usecs_to_jiffies(PHY_RESUME_TIMEOUT_USEC); while ((readl_relaxed(USB_PORTSC) & PORTSC_PHCD) || !(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_SYNC_STATE)) { if (time_after(jiffies, timeout)) { /*This is a fatal error. Reset the link and PHY*/ dev_err(mhcd->dev, "Unable to resume USB. Resetting the h/w\n"); msm_hsusb_reset(mhcd); break; } udelay(1); } skip_phy_resume: usb_hcd_resume_root_hub(hcd); atomic_set(&mhcd->in_lpm, 0); if (mhcd->async_int) { mhcd->async_int = false; pm_runtime_put_noidle(mhcd->dev); enable_irq(hcd->irq); } if (atomic_read(&mhcd->pm_usage_cnt)) { atomic_set(&mhcd->pm_usage_cnt, 0); pm_runtime_put_noidle(mhcd->dev); } dev_info(mhcd->dev, "EHCI USB exited from low power mode\n"); return 0; }
static irqreturn_t touchkey_interrupt(int irq, void *dummy) // ks 79 - threaded irq(becuase of pmic gpio int pin)-> when reg is read in work_func, data0 is always release. so temporarily move the work_func to threaded irq. { u8 data[3]; int ret; int retry = 10; set_touchkey_debug('I'); disable_irq_nosync(IRQ_TOUCHKEY_INT); tkey_vdd_enable(1); set_touchkey_debug('a'); ret = i2c_touchkey_read(KEYCODE_REG, data, 1); if(g_debug_switch) printk("[TKEY] DATA0 %d\n", data[0]); if (get_hw_rev() <= 0x04){ if (data[0] > 80) { data[0] = data[0] - 80; printk("[TKEY] DATA0 change [%d] \n", data[0]); } } set_touchkey_debug(data[0]); if ((data[0] & ESD_STATE_BIT) || (ret != 0)) { printk("[TKEY] ESD_STATE_BIT set or I2C fail: data: %d, retry: %d\n", data[0], retry); //releae key input_report_key(touchkey_driver->input_dev, touchkey_keycode[1], 0); input_report_key(touchkey_driver->input_dev, touchkey_keycode[2], 0); retry = 10; while (retry--) { mdelay(300); init_hw(); if (i2c_touchkey_read(KEYCODE_REG, data, 3) >= 0) { printk("[TKEY] %s touchkey init success\n", __func__); set_touchkey_debug('O'); enable_irq(IRQ_TOUCHKEY_INT); return IRQ_NONE; } printk("[TKEY] %s %d i2c transfer error retry = %d\n", __func__, __LINE__, retry); } //touchkey die , do not enable touchkey //enable_irq(IRQ_TOUCH_INT); touchkey_enable = -1; printk("[TKEY] %s touchkey died\n", __func__); set_touchkey_debug('D'); return IRQ_NONE; } if (data[0] & UPDOWN_EVENT_BIT) { if(press_check == touchkey_keycode[data[0] & KEYCODE_BIT]){ input_report_key(touchkey_driver->input_dev, touchkey_keycode[data[0] & KEYCODE_BIT], 0); touchkey_pressed &= ~(1 << (data[0] & KEYCODE_BIT)); input_sync(touchkey_driver->input_dev); if(g_debug_switch) printk(KERN_DEBUG "touchkey release keycode:%d \n", touchkey_keycode[data[0] & KEYCODE_BIT]); }else{ input_report_key(touchkey_driver->input_dev, press_check, 0); } press_check = 0; } else { if (touch_is_pressed) { printk(KERN_DEBUG "touchkey pressed but don't send event because touch is pressed. \n"); set_touchkey_debug('P'); } else { if ((data[0] & KEYCODE_BIT) == 2) { // if back key is pressed, release multitouch } input_report_key(touchkey_driver->input_dev, touchkey_keycode[data[0] & KEYCODE_BIT], 1); touchkey_pressed |= (1 << (data[0] & KEYCODE_BIT)); input_sync(touchkey_driver->input_dev); press_check = touchkey_keycode[data[0] & KEYCODE_BIT]; if(g_debug_switch) printk(KERN_DEBUG "touchkey press keycode:%d \n", touchkey_keycode[data[0] & KEYCODE_BIT]); } } set_touchkey_debug('A'); enable_irq(IRQ_TOUCHKEY_INT); //queue_work(touchkey_wq, &touchkey_work); return IRQ_HANDLED; }
void make_systemh_irq(unsigned int irq) { disable_irq_nosync(irq); irq_desc[irq].handler = &systemh_irq_type; disable_systemh_irq(irq); }
static irqreturn_t summit_smb347_irq(int irq, void *data) { disable_irq_nosync(summit_smb347_i2c_client->irq); schedule_work(&summit_smb347_irq_work); return IRQ_HANDLED; }