static int vpif_suspend(struct device *dev) { pm_runtime_put(dev); return 0; }
static ssize_t cyttsp5_panel_scan_show(struct device *dev, struct cyttsp5_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int status = STATUS_FAIL; u8 config; u16 actual_read_len; int length = 0; u8 element_size = 0; u8 *buf_offset; int elem_offset = 0; int size; int rc; mutex_lock(&dad->sysfs_lock); pm_runtime_get_sync(dev); rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) { dev_err(dev, "%s: Error on request exclusive r=%d\n", __func__, rc); goto put_pm_runtime; } rc = cyttsp5_suspend_scan_cmd_(dev); if (rc < 0) { dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc); goto release_exclusive; } rc = cyttsp5_exec_scan_cmd_(dev); if (rc < 0) { dev_err(dev, "%s: Error on execute panel scan r=%d\n", __func__, rc); goto resume_scan; } /* Set length to max to read all */ rc = cyttsp5_ret_scan_data_cmd_(dev, 0, 0xFFFF, dad->panel_scan_data_id, dad->ic_buf, &config, &actual_read_len, NULL); if (rc < 0) { dev_err(dev, "%s: Error on retrieve panel scan r=%d\n", __func__, rc); goto resume_scan; } length = get_unaligned_le16(&dad->ic_buf[0]); buf_offset = dad->ic_buf + length; element_size = config & 0x07; elem_offset = actual_read_len; while (actual_read_len > 0) { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, 0xFFFF, dad->panel_scan_data_id, NULL, &config, &actual_read_len, buf_offset); if (rc < 0) goto resume_scan; length += actual_read_len * element_size; buf_offset = dad->ic_buf + length; elem_offset += actual_read_len; } /* Reconstruct cmd header */ put_unaligned_le16(length, &dad->ic_buf[0]); put_unaligned_le16(elem_offset, &dad->ic_buf[7]); /* Do not print command header */ length -= 5; status = STATUS_SUCCESS; resume_scan: cyttsp5_resume_scan_cmd_(dev); release_exclusive: cmd->release_exclusive(dev); put_pm_runtime: pm_runtime_put(dev); if (status == STATUS_FAIL) length = 0; size = prepare_print_buffer(status, &dad->ic_buf[5], length, buf); mutex_unlock(&dad->sysfs_lock); return size; }
static ssize_t cyttsp5_opens_show(struct device *dev, struct cyttsp5_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int status = STATUS_FAIL; u8 cmd_status = 0; u8 summary_result = 0; u16 act_length = 0; int length = 0; int size; int rc; mutex_lock(&dad->sysfs_lock); pm_runtime_get_sync(dev); rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) { dev_err(dev, "%s: Error on request exclusive r=%d\n", __func__, rc); goto put_pm_runtime; } rc = cyttsp5_suspend_scan_cmd_(dev); if (rc < 0) { dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc); goto release_exclusive; } rc = cyttsp5_run_selftest_cmd_(dev, CY_ST_ID_OPENS, 0, &cmd_status, &summary_result, NULL); if (rc < 0) { dev_err(dev, "%s: Error on run self test r=%d\n", __func__, rc); goto resume_scan; } /* Form response buffer */ dad->ic_buf[0] = cmd_status; dad->ic_buf[1] = summary_result; length = 2; /* Get data unless test result is success */ if (cmd_status == CY_CMD_STATUS_SUCCESS && summary_result == CY_ST_RESULT_PASS) goto status_success; /* Set length to PIP_CMD_MAX_LENGTH to read all */ rc = cyttsp5_get_selftest_result_cmd_(dev, 0, PIP_CMD_MAX_LENGTH, CY_ST_ID_OPENS, &cmd_status, &act_length, &dad->ic_buf[6]); if (rc < 0) { dev_err(dev, "%s: Error on get self test result r=%d\n", __func__, rc); goto resume_scan; } dad->ic_buf[2] = cmd_status; dad->ic_buf[3] = CY_ST_ID_OPENS; dad->ic_buf[4] = LOW_BYTE(act_length); dad->ic_buf[5] = HI_BYTE(act_length); length = 6 + act_length; status_success: status = STATUS_SUCCESS; resume_scan: cyttsp5_resume_scan_cmd_(dev); release_exclusive: cmd->release_exclusive(dev); put_pm_runtime: pm_runtime_put(dev); if (status == STATUS_FAIL) length = 0; size = prepare_print_buffer(status, dad->ic_buf, length, buf); mutex_unlock(&dad->sysfs_lock); return size; }
static int start_ipc(struct link_device *ld, struct io_device *iod) { struct sk_buff *skb; char data[1] = {'a'}; int err; struct usb_link_device *usb_ld = to_usb_link_device(ld); struct link_pm_data *pm_data = usb_ld->link_pm_data; struct device *dev = &usb_ld->usbdev->dev; struct if_usb_devdata *pipe_data = &usb_ld->devdata[IF_USB_FMT_EP]; if (!usb_ld->if_usb_connected) { mif_err("HSIC not connected, skip start ipc\n"); err = -ENODEV; goto exit; } retry: if (ld->mc->phone_state != STATE_ONLINE) { mif_err("MODEM is not online, skip start ipc\n"); err = -ENODEV; goto exit; } /* check usb runtime pm first */ if (dev->power.runtime_status != RPM_ACTIVE) { if (!pm_data->resume_requested) { mif_debug("QW PM\n"); INIT_COMPLETION(pm_data->active_done); queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, 0); } mif_debug("Wait pm\n"); err = wait_for_completion_timeout(&pm_data->active_done, msecs_to_jiffies(500)); /* timeout or -ERESTARTSYS */ if (err <= 0) goto retry; } pm_runtime_get_sync(dev); mif_err("send 'a'\n"); skb = alloc_skb(16, GFP_ATOMIC); if (unlikely(!skb)) { pm_runtime_put(dev); return -ENOMEM; } memcpy(skb_put(skb, 1), data, 1); skbpriv(skb)->iod = iod; skbpriv(skb)->ld = ld; if (!usb_ld->if_usb_connected || !usb_ld->usbdev) return -ENODEV; usb_mark_last_busy(usb_ld->usbdev); err = usb_tx_urb_with_skb(usb_ld->usbdev, skb, pipe_data); if (err < 0) { mif_err("usb_tx_urb fail\n"); dev_kfree_skb_any(skb); } pm_runtime_put(dev); exit: return err; }
static int link_peers(struct usb_port *left, struct usb_port *right) { struct usb_port *ss_port, *hs_port; int rc; if (left->peer == right && right->peer == left) return 0; if (left->peer || right->peer) { struct usb_port *lpeer = left->peer; struct usb_port *rpeer = right->peer; char *method; if (left->location && left->location == right->location) method = "location"; else method = "default"; pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n", dev_name(&left->dev), dev_name(&right->dev), method, dev_name(&left->dev), lpeer ? dev_name(&lpeer->dev) : "none", dev_name(&right->dev), rpeer ? dev_name(&rpeer->dev) : "none"); return -EBUSY; } rc = sysfs_create_link(&left->dev.kobj, &right->dev.kobj, "peer"); if (rc) return rc; rc = sysfs_create_link(&right->dev.kobj, &left->dev.kobj, "peer"); if (rc) { sysfs_remove_link(&left->dev.kobj, "peer"); return rc; } /* * We need to wake the HiSpeed port to make sure we don't race * setting ->peer with usb_port_runtime_suspend(). Otherwise we * may miss a suspend event for the SuperSpeed port. */ if (left->is_superspeed) { ss_port = left; WARN_ON(right->is_superspeed); hs_port = right; } else { ss_port = right; WARN_ON(!right->is_superspeed); hs_port = left; } pm_runtime_get_sync(&hs_port->dev); left->peer = right; right->peer = left; /* * The SuperSpeed reference is dropped when the HiSpeed port in * this relationship suspends, i.e. when it is safe to allow a * SuperSpeed connection to drop since there is no risk of a * device degrading to its powered-off HiSpeed connection. * * Also, drop the HiSpeed ref taken above. */ pm_runtime_get_sync(&ss_port->dev); pm_runtime_put(&hs_port->dev); return 0; }
static int cyttsp4_debug_probe(struct cyttsp4_device *ttsp) { struct device *dev = &ttsp->dev; struct cyttsp4_debug_data *dd; struct cyttsp4_debug_platform_data *pdata = dev_get_platdata(dev); int rc; dev_info(dev, "%s: startup\n", __func__); dev_dbg(dev, "%s: debug on\n", __func__); dev_vdbg(dev, "%s: verbose debug on\n", __func__); /* get context and debug print buffers */ dd = kzalloc(sizeof(*dd), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "%s: Error, kzalloc\n", __func__); rc = -ENOMEM; goto cyttsp4_debug_probe_alloc_failed; } rc = device_create_file(dev, &dev_attr_int_count); if (rc) { dev_err(dev, "%s: Error, could not create int_count\n", __func__); goto cyttsp4_debug_probe_create_int_count_failed; } rc = device_create_file(dev, &dev_attr_formated_output); if (rc) { dev_err(dev, "%s: Error, could not create formated_output\n", __func__); goto cyttsp4_debug_probe_create_formated_failed; } mutex_init(&dd->sysfs_lock); dd->ttsp = ttsp; dd->pdata = pdata; dev_set_drvdata(dev, dd); pm_runtime_enable(dev); pm_runtime_get_sync(dev); dd->si = cyttsp4_request_sysinfo(ttsp); if (dd->si == NULL) { dev_err(dev, "%s: Fail get sysinfo pointer from core\n", __func__); rc = -ENODEV; goto cyttsp4_debug_probe_sysinfo_failed; } rc = cyttsp4_subscribe_attention(ttsp, CY_ATTEN_IRQ, cyttsp4_debug_attention, CY_MODE_OPERATIONAL); if (rc < 0) { dev_err(dev, "%s: Error, could not subscribe attention cb\n", __func__); goto cyttsp4_debug_probe_subscribe_failed; } pm_runtime_put(dev); return 0; cyttsp4_debug_probe_subscribe_failed: cyttsp4_debug_probe_sysinfo_failed: pm_runtime_put(dev); pm_runtime_suspend(dev); pm_runtime_disable(dev); dev_set_drvdata(dev, NULL); device_remove_file(dev, &dev_attr_formated_output); cyttsp4_debug_probe_create_formated_failed: device_remove_file(dev, &dev_attr_int_count); cyttsp4_debug_probe_create_int_count_failed: kfree(dd); cyttsp4_debug_probe_alloc_failed: dev_err(dev, "%s failed.\n", __func__); return rc; }
static void stmmac_ethtool_complete(struct net_device *netdev) { struct stmmac_priv *priv = netdev_priv(netdev); pm_runtime_put(priv->device); }
static int s3c_camif_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct s3c_camif_plat_data *pdata = dev->platform_data; struct s3c_camif_drvdata *drvdata; struct camif_dev *camif; struct resource *mres; int ret = 0; camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL); if (!camif) return -ENOMEM; spin_lock_init(&camif->slock); mutex_init(&camif->lock); camif->dev = dev; if (!pdata || !pdata->gpio_get || !pdata->gpio_put) { dev_err(dev, "wrong platform data\n"); return -EINVAL; } camif->pdata = *pdata; drvdata = (void *)platform_get_device_id(pdev)->driver_data; camif->variant = drvdata->variant; mres = platform_get_resource(pdev, IORESOURCE_MEM, 0); camif->io_base = devm_ioremap_resource(dev, mres); if (IS_ERR(camif->io_base)) return PTR_ERR(camif->io_base); ret = camif_request_irqs(pdev, camif); if (ret < 0) return ret; ret = pdata->gpio_get(); if (ret < 0) return ret; ret = s3c_camif_create_subdev(camif); if (ret < 0) goto err_sd; ret = camif_clk_get(camif); if (ret < 0) goto err_clk; platform_set_drvdata(pdev, camif); clk_set_rate(camif->clock[CLK_CAM], camif->pdata.sensor.clock_frequency); dev_info(dev, "sensor clock frequency: %lu\n", clk_get_rate(camif->clock[CLK_CAM])); /* * Set initial pixel format, resolution and crop rectangle. * Must be done before a sensor subdev is registered as some * settings are overrode with values from sensor subdev. */ s3c_camif_set_defaults(camif); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm; ret = camif_media_dev_init(camif); if (ret < 0) goto err_alloc; ret = camif_register_sensor(camif); if (ret < 0) goto err_sens; ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev); if (ret < 0) goto err_sens; ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); if (ret < 0) goto err_sens; ret = camif_register_video_nodes(camif); if (ret < 0) goto err_sens; ret = camif_create_media_links(camif); if (ret < 0) goto err_sens; ret = media_device_register(&camif->media_dev); if (ret < 0) goto err_sens; pm_runtime_put(dev); return 0; err_sens: v4l2_device_unregister(&camif->v4l2_dev); media_device_unregister(&camif->media_dev); media_device_cleanup(&camif->media_dev); camif_unregister_media_entities(camif); err_alloc: pm_runtime_put(dev); pm_runtime_disable(dev); err_pm: camif_clk_put(camif); err_clk: s3c_camif_unregister_subdev(camif); err_sd: pdata->gpio_put(); return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up; struct resource *mem, *irq; struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; int ret; if (pdev->dev.of_node) omap_up_info = of_get_uart_port_info(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } if (gpio_is_valid(omap_up_info->DTR_gpio) && omap_up_info->DTR_present) { ret = gpio_request(omap_up_info->DTR_gpio, "omap-serial"); if (ret < 0) return ret; ret = gpio_direction_output(omap_up_info->DTR_gpio, omap_up_info->DTR_inverted); if (ret < 0) return ret; } up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; if (gpio_is_valid(omap_up_info->DTR_gpio) && omap_up_info->DTR_present) { up->DTR_gpio = omap_up_info->DTR_gpio; up->DTR_inverted = omap_up_info->DTR_inverted; } else up->DTR_gpio = -EINVAL; up->DTR_active = 0; up->dev = &pdev->dev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = irq->start; up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; if (pdev->dev.of_node) up->port.line = of_alias_get_id(pdev->dev.of_node, "serial"); else up->port.line = pdev->id; if (up->port.line < 0) { dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", up->port.line); ret = -ENODEV; goto err_port_line; } up->pins = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(up->pins)) { dev_warn(&pdev->dev, "did not get pins for uart%i error: %li\n", up->port.line, PTR_ERR(up->pins)); up->pins = NULL; } sprintf(up->name, "OMAP UART%d", up->port.line); up->port.mapbase = mem->start; up->port.membase = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!up->port.membase) { dev_err(&pdev->dev, "can't ioremap UART\n"); ret = -ENOMEM; goto err_ioremap; } up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; if (!up->port.uartclk) { up->port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, "No clock speed specified: using default:" "%d\n", DEFAULT_CLK_SPEED); } up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; pm_qos_add_request(&up->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, up->latency); serial_omap_uart_wq = create_singlethread_workqueue(up->name); INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); platform_set_drvdata(pdev, up); pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->autosuspend_timeout); pm_runtime_irq_safe(&pdev->dev); pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(up); ui[up->port.line] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto err_add_port; pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); return 0; err_add_port: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); err_ioremap: err_port_line: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); return ret; }
static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct dwc3 *dwc; int ret; void __iomem *regs; dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); if (!dwc) return -ENOMEM; dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; res->start += DWC3_GLOBALS_REGS_START; /* * Request memory region but exclude xHCI regs, * since it will be requested by the xhci-plat driver. */ regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto err0; } dwc->regs = regs; dwc->regs_size = resource_size(res); dwc3_get_properties(dwc); platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); spin_lock_init(&dwc->lock); pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err1; pm_runtime_forbid(dev); ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); if (ret) { dev_err(dwc->dev, "failed to allocate event buffers\n"); ret = -ENOMEM; goto err2; } ret = dwc3_get_dr_mode(dwc); if (ret) goto err3; ret = dwc3_alloc_scratch_buffers(dwc); if (ret) goto err3; ret = dwc3_core_init(dwc); if (ret) { dev_err(dev, "failed to initialize core\n"); goto err4; } dwc3_check_params(dwc); ret = dwc3_core_init_mode(dwc); if (ret) goto err5; dwc3_debugfs_init(dwc); pm_runtime_put(dev); return 0; err5: dwc3_event_buffers_cleanup(dwc); err4: dwc3_free_scratch_buffers(dwc); err3: dwc3_free_event_buffers(dwc); dwc3_ulpi_exit(dwc); err2: pm_runtime_allow(&pdev->dev); err1: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); err0: /* * restore res->start back to its original value so that, in case the * probe is deferred, we don't end up getting error in request the * memory region the next time probe is called. */ res->start -= DWC3_GLOBALS_REGS_START; return ret; }
static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; const struct regmap_irq_chip *chip = data->chip; struct regmap *map = data->map; int ret, i; bool handled = false; u32 reg; if (chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); if (ret < 0) { dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); return IRQ_NONE; } } /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowleding the * interrupt. We assume that typically few of the interrupts * will fire simultaneously so don't worry about overhead from * doing a write per register. */ for (i = 0; i < data->chip->num_regs; i++) { ret = regmap_read(map, chip->status_base + (i * map->reg_stride * data->irq_reg_stride), &data->status_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); if (chip->runtime_pm) pm_runtime_put(map->dev); return IRQ_NONE; } data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && chip->ack_base) { reg = chip->ack_base + (i * map->reg_stride * data->irq_reg_stride); ret = regmap_write(map, reg, data->status_buf[i]); if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", reg, ret); } } for (i = 0; i < chip->num_irqs; i++) { if (data->status_buf[chip->irqs[i].reg_offset / map->reg_stride] & chip->irqs[i].mask) { handle_nested_irq(irq_find_mapping(data->domain, i)); handled = true; } } if (chip->runtime_pm) pm_runtime_put(map->dev); if (handled) return IRQ_HANDLED; else return IRQ_NONE; }
int mpu3050_common_probe(struct device *dev, struct regmap *map, int irq, const char *name) { struct iio_dev *indio_dev; struct mpu3050 *mpu3050; unsigned int val; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050)); if (!indio_dev) return -ENOMEM; mpu3050 = iio_priv(indio_dev); mpu3050->dev = dev; mpu3050->map = map; mutex_init(&mpu3050->lock); /* Default fullscale: 2000 degrees per second */ mpu3050->fullscale = FS_2000_DPS; /* 1 kHz, divide by 100, default frequency = 10 Hz */ mpu3050->lpf = MPU3050_DLPF_CFG_188HZ; mpu3050->divisor = 99; /* Read the mounting matrix, if present */ ret = of_iio_read_mount_matrix(dev, "mount-matrix", &mpu3050->orientation); if (ret) return ret; /* Fetch and turn on regulators */ mpu3050->regs[0].supply = mpu3050_reg_vdd; mpu3050->regs[1].supply = mpu3050_reg_vlogic; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs), mpu3050->regs); if (ret) { dev_err(dev, "Cannot get regulators\n"); return ret; } ret = mpu3050_power_up(mpu3050); if (ret) return ret; ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val); if (ret) { dev_err(dev, "could not read device ID\n"); ret = -ENODEV; goto err_power_down; } if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { dev_err(dev, "unsupported chip id %02x\n", (u8)(val & MPU3050_CHIP_ID_MASK)); ret = -ENODEV; goto err_power_down; } ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val); if (ret) { dev_err(dev, "could not read device ID\n"); ret = -ENODEV; goto err_power_down; } dev_info(dev, "found MPU-3050 part no: %d, version: %d\n", ((val >> 4) & 0xf), (val & 0xf)); ret = mpu3050_hw_init(mpu3050); if (ret) goto err_power_down; indio_dev->dev.parent = dev; indio_dev->channels = mpu3050_channels; indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels); indio_dev->info = &mpu3050_info; indio_dev->available_scan_masks = mpu3050_scan_masks; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->name = name; ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, mpu3050_trigger_handler, &mpu3050_buffer_setup_ops); if (ret) { dev_err(dev, "triggered buffer setup failed\n"); goto err_power_down; } ret = iio_device_register(indio_dev); if (ret) { dev_err(dev, "device register failed\n"); goto err_cleanup_buffer; } dev_set_drvdata(dev, indio_dev); /* Check if we have an assigned IRQ to use as trigger */ if (irq) { ret = mpu3050_trigger_probe(indio_dev, irq); if (ret) dev_err(dev, "failed to register trigger\n"); } /* Enable runtime PM */ pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); /* * Set autosuspend to two orders of magnitude larger than the * start-up time. 100ms start-up time means 10000ms autosuspend, * i.e. 10 seconds. */ pm_runtime_set_autosuspend_delay(dev, 10000); pm_runtime_use_autosuspend(dev); pm_runtime_put(dev); return 0; err_cleanup_buffer: iio_triggered_buffer_cleanup(indio_dev); err_power_down: mpu3050_power_down(mpu3050); return ret; }
static int tmc_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; u32 devid; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct tmc_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); drvdata->config_type = BMVAL(devid, 6, 7); if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { if (np) ret = of_property_read_u32(np, "arm,buffer-size", &drvdata->size); if (ret) drvdata->size = SZ_1M; } else { drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; } pm_runtime_put(&adev->dev); if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size, &drvdata->paddr, GFP_KERNEL); if (!drvdata->vaddr) return -ENOMEM; memset(drvdata->vaddr, 0, drvdata->size); drvdata->buf = drvdata->vaddr; } else { drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL); if (!drvdata->buf) return -ENOMEM; } desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) { ret = -ENOMEM; goto err_devm_kzalloc; } desc->pdata = pdata; desc->dev = dev; desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etb_cs_ops; desc->groups = coresight_etb_groups; } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { desc->type = CORESIGHT_DEV_TYPE_SINK; desc->ops = &tmc_etr_cs_ops; desc->groups = coresight_etr_groups; } else { desc->type = CORESIGHT_DEV_TYPE_LINKSINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; desc->ops = &tmc_etf_cs_ops; desc->groups = coresight_etf_groups; } drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_devm_kzalloc; } drvdata->miscdev.name = pdata->name; drvdata->miscdev.minor = MISC_DYNAMIC_MINOR; drvdata->miscdev.fops = &tmc_fops; ret = misc_register(&drvdata->miscdev); if (ret) goto err_misc_register; dev_info(dev, "TMC initialized\n"); return 0; err_misc_register: coresight_unregister(drvdata->csdev); err_devm_kzalloc: if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) dma_free_coherent(dev, drvdata->size, &drvdata->paddr, GFP_KERNEL); return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev); struct uart_omap_port *up; struct resource *mem; void __iomem *base; int uartirq = 0; int wakeirq = 0; int ret; /* The optional wakeirq may be specified in the board dts file */ if (pdev->dev.of_node) { uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!uartirq) return -EPROBE_DEFER; wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1); omap_up_info = of_get_uart_port_info(&pdev->dev); pdev->dev.platform_data = omap_up_info; } else { uartirq = platform_get_irq(pdev, 0); if (uartirq < 0) return -EPROBE_DEFER; } up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(base)) return PTR_ERR(base); up->dev = &pdev->dev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = uartirq; up->wakeirq = wakeirq; if (!up->wakeirq) dev_info(up->port.dev, "no wakeirq for uart%d\n", up->port.line); up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; if (pdev->dev.of_node) ret = of_alias_get_id(pdev->dev.of_node, "serial"); else ret = pdev->id; if (ret < 0) { dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); goto err_port_line; } up->port.line = ret; if (up->port.line >= OMAP_MAX_HSUART_PORTS) { dev_err(&pdev->dev, "uart ID %d > MAX %d.\n", up->port.line, OMAP_MAX_HSUART_PORTS); ret = -ENXIO; goto err_port_line; } ret = serial_omap_probe_rs485(up, pdev->dev.of_node); if (ret < 0) goto err_rs485; sprintf(up->name, "OMAP UART%d", up->port.line); up->port.mapbase = mem->start; up->port.membase = base; up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; up->port.rs485_config = serial_omap_config_rs485; if (!up->port.uartclk) { up->port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, "No clock speed specified: using default: %d\n", DEFAULT_CLK_SPEED); } up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; pm_qos_add_request(&up->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, up->latency); INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); platform_set_drvdata(pdev, up); if (omap_up_info->autosuspend_timeout == 0) omap_up_info->autosuspend_timeout = -1; device_init_wakeup(up->dev, true); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->autosuspend_timeout); pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(up); ui[up->port.line] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto err_add_port; pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); return 0; err_add_port: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); err_rs485: err_port_line: return ret; }
static int __devinit dsps_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; const struct dsps_musb_wrapper *wrp; struct dsps_glue *glue; struct resource *iomem; int ret, i; match = of_match_node(musb_dsps_of_match, np); if (!match) { dev_err(&pdev->dev, "fail to get matching of_match struct\n"); ret = -EINVAL; goto err0; } wrp = match->data; /* allocate glue */ glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "unable to allocate glue memory\n"); ret = -ENOMEM; goto err0; } /* get memory resource */ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { dev_err(&pdev->dev, "failed to get usbss mem resourse\n"); ret = -ENODEV; goto err1; } glue->dev = &pdev->dev; glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL); if (!glue->wrp) { dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n"); ret = -ENOMEM; goto err1; } platform_set_drvdata(pdev, glue); /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); goto err2; } /* create the child platform device for all instances of musb */ for (i = 0; i < wrp->instances ; i++) { ret = dsps_create_musb_pdev(glue, i); if (ret != 0) { dev_err(&pdev->dev, "failed to create child pdev\n"); /* release resources of previously created instances */ for (i--; i >= 0 ; i--) dsps_delete_musb_pdev(glue, i); goto err3; } } return 0; err3: pm_runtime_put(&pdev->dev); err2: pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); err0: return ret; }
static int omap8250_probe(struct platform_device *pdev) { struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct omap8250_priv *priv; struct uart_8250_port up; int ret; void __iomem *membase; if (!regs || !irq) { dev_err(&pdev->dev, "missing registers or irq\n"); return -EINVAL; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; membase = devm_ioremap_nocache(&pdev->dev, regs->start, resource_size(regs)); if (!membase) return -ENODEV; memset(&up, 0, sizeof(up)); up.port.dev = &pdev->dev; up.port.mapbase = regs->start; up.port.membase = membase; up.port.irq = irq->start; /* * It claims to be 16C750 compatible however it is a little different. * It has EFR and has no FCR7_64byte bit. The AFE (which it claims to * have) is enabled via EFR instead of MCR. The type is set here 8250 * just to get things going. UNKNOWN does not work for a few reasons and * we don't need our own type since we don't use 8250's set_termios() * or pm callback. */ up.port.type = PORT_8250; up.port.iotype = UPIO_MEM; up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW | UPF_HARD_FLOW; up.port.private_data = priv; up.port.regshift = 2; up.port.fifosize = 64; up.tx_loadsz = 64; up.capabilities = UART_CAP_FIFO; #ifdef CONFIG_PM /* * Runtime PM is mostly transparent. However to do it right we need to a * TX empty interrupt before we can put the device to auto idle. So if * PM is not enabled we don't add that flag and can spare that one extra * interrupt in the TX path. */ up.capabilities |= UART_CAP_RPM; #endif up.port.set_termios = omap_8250_set_termios; up.port.set_mctrl = omap8250_set_mctrl; up.port.pm = omap_8250_pm; up.port.startup = omap_8250_startup; up.port.shutdown = omap_8250_shutdown; up.port.throttle = omap_8250_throttle; up.port.unthrottle = omap_8250_unthrottle; up.port.rs485_config = omap_8250_rs485_config; if (pdev->dev.of_node) { const struct of_device_id *id; ret = of_alias_get_id(pdev->dev.of_node, "serial"); of_property_read_u32(pdev->dev.of_node, "clock-frequency", &up.port.uartclk); priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1); id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev); if (id && id->data) priv->habit |= *(u8 *)id->data; } else { ret = pdev->id; } if (ret < 0) { dev_err(&pdev->dev, "failed to get alias/pdev id\n"); return ret; } up.port.line = ret; if (!up.port.uartclk) { up.port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, "No clock speed specified: using default: %d\n", DEFAULT_CLK_SPEED); } priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, priv->latency); INIT_WORK(&priv->qos_work, omap8250_uart_qos_work); spin_lock_init(&priv->rx_dma_lock); device_init_wakeup(&pdev->dev, true); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, -1); pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(&up, priv); up.port.handle_irq = omap8250_no_handle_irq; #ifdef CONFIG_SERIAL_8250_DMA if (pdev->dev.of_node) { /* * Oh DMA support. If there are no DMA properties in the DT then * we will fall back to a generic DMA channel which does not * really work here. To ensure that we do not get a generic DMA * channel assigned, we have the the_no_dma_filter_fn() here. * To avoid "failed to request DMA" messages we check for DMA * properties in DT. */ ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); if (ret == 2) { up.dma = &priv->omap8250_dma; priv->omap8250_dma.fn = the_no_dma_filter_fn; priv->omap8250_dma.tx_dma = omap_8250_tx_dma; priv->omap8250_dma.rx_dma = omap_8250_rx_dma; priv->omap8250_dma.rx_size = RX_TRIGGER; priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER; priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER; if (of_machine_is_compatible("ti,am33xx")) priv->habit |= OMAP_DMA_TX_KICK; /* * pause is currently not supported atleast on omap-sdma * and edma on most earlier kernels. */ priv->rx_dma_broken = true; } } #endif ret = serial8250_register_8250_port(&up); if (ret < 0) { dev_err(&pdev->dev, "unable to register 8250 port\n"); goto err; } priv->line = ret; platform_set_drvdata(pdev, priv); pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; err: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; }
static void uart_disable(struct device *tty) { pr_debug("%s: runtime put\n", __func__); /* Tell PM runtime to release tty device and allow s0i3 */ pm_runtime_put(tty); }
static int intc_irqpin_probe(struct platform_device *pdev) { const struct intc_irqpin_config *config; struct device *dev = &pdev->dev; struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; struct resource *irq; struct irq_chip *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(dev); bool control_parent; unsigned int nirqs; int ref_irq; int ret; int k; p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); if (!p) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; } /* deal with driver instance configuration */ of_property_read_u32(dev->of_node, "sense-bitfield-width", &p->sense_bitfield_width); control_parent = of_property_read_bool(dev->of_node, "control-parent"); if (!p->sense_bitfield_width) p->sense_bitfield_width = 4; /* default to 4 bits */ p->pdev = pdev; platform_set_drvdata(pdev, p); config = of_device_get_match_data(dev); pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* get hold of register banks */ memset(io, 0, sizeof(io)); for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k); if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) { dev_err(dev, "not enough IOMEM resources\n"); ret = -EINVAL; goto err0; } } /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ for (k = 0; k < INTC_IRQPIN_MAX; k++) { irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); if (!irq) break; p->irq[k].p = p; p->irq[k].requested_irq = irq->start; } nirqs = k; if (nirqs < 1) { dev_err(dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err0; } /* ioremap IOMEM and setup read/write callbacks */ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { i = &p->iomem[k]; /* handle optional registers */ if (!io[k]) continue; switch (resource_size(io[k])) { case 1: i->width = 8; i->read = intc_irqpin_read8; i->write = intc_irqpin_write8; break; case 4: i->width = 32; i->read = intc_irqpin_read32; i->write = intc_irqpin_write32; break; default: dev_err(dev, "IOMEM size mismatch\n"); ret = -EINVAL; goto err0; } i->iomem = devm_ioremap_nocache(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; goto err0; } } /* configure "individual IRQ mode" where needed */ if (config && config->needs_irlm) { if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, config->irlm_bit, 1, 1); else dev_warn(dev, "unable to select IRLM mode\n"); } /* mask all interrupts using priority */ for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 1); /* clear all pending interrupts */ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0); /* scan for shared interrupt lines */ ref_irq = p->irq[0].requested_irq; p->shared_irqs = 1; for (k = 1; k < nirqs; k++) { if (ref_irq != p->irq[k].requested_irq) { p->shared_irqs = 0; break; } } /* use more severe masking method if requested */ if (control_parent) { enable_fn = intc_irqpin_irq_enable_force; disable_fn = intc_irqpin_irq_disable_force; } else if (!p->shared_irqs) { enable_fn = intc_irqpin_irq_enable; disable_fn = intc_irqpin_irq_disable; } else { enable_fn = intc_irqpin_shared_irq_enable; disable_fn = intc_irqpin_shared_irq_disable; } irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = disable_fn; irq_chip->irq_unmask = enable_fn; irq_chip->irq_set_type = intc_irqpin_irq_set_type; irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0, &intc_irqpin_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); goto err0; } if (p->shared_irqs) { /* request one shared interrupt */ if (devm_request_irq(dev, p->irq[0].requested_irq, intc_irqpin_shared_irq_handler, IRQF_SHARED, name, p)) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } else { /* request interrupts one by one */ for (k = 0; k < nirqs; k++) { if (devm_request_irq(dev, p->irq[k].requested_irq, intc_irqpin_irq_handler, 0, name, &p->irq[k])) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } } /* unmask all interrupts on prio level */ for (k = 0; k < nirqs; k++) intc_irqpin_mask_unmask_prio(p, k, 0); dev_info(dev, "driving %d irqs\n", nirqs); return 0; err1: irq_domain_remove(p->irq_domain); err0: pm_runtime_put(dev); pm_runtime_disable(dev); return ret; }
static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res, dwc_res; struct dwc3 *dwc; int ret; u32 mdwidth; void __iomem *regs; dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); if (!dwc) return -ENOMEM; dwc->clks = devm_kmemdup(dev, dwc3_core_clks, sizeof(dwc3_core_clks), GFP_KERNEL); if (!dwc->clks) return -ENOMEM; dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; /* * Request memory region but exclude xHCI regs, * since it will be requested by the xhci-plat driver. */ dwc_res = *res; dwc_res.start += DWC3_GLOBALS_REGS_START; regs = devm_ioremap_resource(dev, &dwc_res); if (IS_ERR(regs)) return PTR_ERR(regs); dwc->regs = regs; dwc->regs_size = resource_size(&dwc_res); dwc3_get_properties(dwc); dwc->reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(dwc->reset)) return PTR_ERR(dwc->reset); if (dev->of_node) { dwc->num_clks = ARRAY_SIZE(dwc3_core_clks); ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); if (ret == -EPROBE_DEFER) return ret; /* * Clocks are optional, but new DT platforms should support all * clocks as required by the DT-binding. */ if (ret) dwc->num_clks = 0; } ret = reset_control_deassert(dwc->reset); if (ret) goto put_clks; ret = clk_bulk_prepare(dwc->num_clks, dwc->clks); if (ret) goto assert_reset; ret = clk_bulk_enable(dwc->num_clks, dwc->clks); if (ret) goto unprepare_clks; platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); spin_lock_init(&dwc->lock); /* Set dma coherent mask to DMA BUS data width */ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0); dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth); dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth)); pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err1; pm_runtime_forbid(dev); ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); if (ret) { dev_err(dwc->dev, "failed to allocate event buffers\n"); ret = -ENOMEM; goto err2; } ret = dwc3_get_dr_mode(dwc); if (ret) goto err3; ret = dwc3_core_init(dwc); if (ret) { dev_err(dev, "failed to initialize core\n"); goto err4; } dwc3_check_params(dwc); ret = dwc3_core_init_mode(dwc); if (ret) goto err5; dwc3_debugfs_init(dwc); pm_runtime_put(dev); return 0; err5: dwc3_event_buffers_cleanup(dwc); err4: dwc3_free_scratch_buffers(dwc); err3: dwc3_free_event_buffers(dwc); err2: pm_runtime_allow(&pdev->dev); err1: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_bulk_disable(dwc->num_clks, dwc->clks); unprepare_clks: clk_bulk_unprepare(dwc->num_clks, dwc->clks); assert_reset: reset_control_assert(dwc->reset); put_clks: clk_bulk_put(dwc->num_clks, dwc->clks); return ret; }
static void usb_tx_work(struct work_struct *work) { int ret = 0; struct link_device *ld = container_of(work, struct link_device, tx_delayed_work.work); struct usb_link_device *usb_ld = to_usb_link_device(ld); struct sk_buff *skb; struct link_pm_data *pm_data = usb_ld->link_pm_data; if (!usb_ld->usbdev) { mif_info("usbdev is invalid\n"); return; } while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) { /* request and check usb runtime pm first */ ret = link_pm_runtime_get_active(pm_data); if (ret < 0) { if (ret == -ENODEV) mif_err("link not avail, retry reconnect.\n"); else queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, msecs_to_jiffies(20)); return; } usb_mark_last_busy(usb_ld->usbdev); pm_runtime_get_sync(&usb_ld->usbdev->dev); ret = 0; /* send skb from fmt_txq and raw_txq,*/ /* one by one for fair flow control */ skb = skb_dequeue(&ld->sk_fmt_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { if (ret != -ENODEV && ret != -ENOENT) pm_runtime_put(&usb_ld->usbdev->dev); /* Do not call runtime_put if ret is ENODEV. Unless it * will invoke bugs */ else skb_queue_head(&ld->sk_fmt_tx_q, skb); return; } skb = skb_dequeue(&ld->sk_raw_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { if (ret != -ENODEV && ret != -ENOENT) pm_runtime_put(&usb_ld->usbdev->dev); else skb_queue_head(&ld->sk_raw_tx_q, skb); return; } pm_runtime_put(&usb_ld->usbdev->dev); usb_mark_last_busy(usb_ld->usbdev); } wake_unlock(&pm_data->tx_async_wake); }
static int msm_otg_resume(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; struct usb_bus *bus = phy->otg->host; void __iomem *addr; int cnt = 0; unsigned temp; if (!atomic_read(&motg->in_lpm)) return 0; clk_prepare_enable(motg->pclk); clk_prepare_enable(motg->clk); if (!IS_ERR(motg->core_clk)) clk_prepare_enable(motg->core_clk); if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY && motg->pdata->otg_control == OTG_PMIC_CONTROL) { addr = USB_PHY_CTRL; if (motg->phy_number) addr = USB_PHY_CTRL2; msm_hsusb_ldo_set_mode(motg, 1); msm_hsusb_config_vddcx(motg, 1); writel(readl(addr) & ~PHY_RETEN, addr); } temp = readl(USB_USBCMD); temp &= ~ASYNC_INTR_CTRL; temp &= ~ULPI_STP_CTRL; writel(temp, USB_USBCMD); /* * PHY comes out of low power mode (LPM) in case of wakeup * from asynchronous interrupt. */ if (!(readl(USB_PORTSC) & PORTSC_PHCD)) goto skip_phy_resume; writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC); while (cnt < PHY_RESUME_TIMEOUT_USEC) { if (!(readl(USB_PORTSC) & PORTSC_PHCD)) break; udelay(1); cnt++; } if (cnt >= PHY_RESUME_TIMEOUT_USEC) { /* * This is a fatal error. Reset the link and * PHY. USB state can not be restored. Re-insertion * of USB cable is the only way to get USB working. */ dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n"); msm_otg_reset(phy); } skip_phy_resume: if (device_may_wakeup(phy->dev)) disable_irq_wake(motg->irq); if (bus) set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags); atomic_set(&motg->in_lpm, 0); if (motg->async_int) { motg->async_int = 0; pm_runtime_put(phy->dev); enable_irq(motg->irq); } dev_info(phy->dev, "USB exited from low power mode\n"); return 0; }
static int etm4_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct etmv4_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc *desc; struct device_node *np = adev->dev.of_node; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; } drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); drvdata->cpu = pdata ? pdata->cpu : 0; get_online_cpus(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); if (!etm4_count++) { cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING, "AP_ARM_CORESIGHT4_STARTING", etm4_starting_cpu, etm4_dying_cpu); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "AP_ARM_CORESIGHT4_ONLINE", etm4_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } put_online_cpus(); if (etm4_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; } etm4_init_trace_id(drvdata); etm4_set_default(&drvdata->config); desc->type = CORESIGHT_DEV_TYPE_SOURCE; desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc->ops = &etm4_cs_ops; desc->pdata = pdata; desc->dev = dev; desc->groups = coresight_etmv4_groups; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_arch_supported; } ret = etm_perf_symlink(drvdata->csdev, true); if (ret) { coresight_unregister(drvdata->csdev); goto err_arch_supported; } pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; } return 0; err_arch_supported: if (--etm4_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING); if (hp_online) cpuhp_remove_state_nocalls(hp_online); } return ret; }
static void usb_tx_work(struct work_struct *work) { int ret = 0; struct link_device *ld = container_of(work, struct link_device, tx_delayed_work.work); struct usb_link_device *usb_ld = to_usb_link_device(ld); struct sk_buff *skb; struct link_pm_data *pm_data = usb_ld->link_pm_data; if (!usb_ld->usbdev) { mif_info("usbdev is invalid\n"); return; } pm_data->tx_cnt++; while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) { /* request and check usb runtime pm first */ ret = link_pm_runtime_get_active(pm_data); if (ret < 0) { if (ret == -ENODEV) { mif_err("link not avail, retry reconnect.\n"); goto exit; } goto retry_tx_work; } /* If AP try to tx when interface disconnect->reconnect probe, * usbdev was created but one of interface channel device are * probing, _usb_tx_work return to -ENOENT then runtime usage * count allways positive and never enter to L2 */ if (!usb_ld->if_usb_connected) { mif_info("link is available, but if was not readey\n"); goto retry_tx_work; } pm_runtime_get_sync(&usb_ld->usbdev->dev); ret = 0; /* send skb from fmt_txq and raw_txq,*/ /* one by one for fair flow control */ skb = skb_dequeue(&ld->sk_fmt_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { mif_err("usb_tx_urb_with_skb for fmt_q %d\n", ret); skb_queue_head(&ld->sk_fmt_tx_q, skb); if (ret == -ENODEV || ret == -ENOENT) goto exit; /* tx fail and usbdev alived, retry tx work */ pm_runtime_put(&usb_ld->usbdev->dev); goto retry_tx_work; } skb = skb_dequeue(&ld->sk_raw_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { mif_err("usb_tx_urb_with_skb for raw_q %d\n", ret); skb_queue_head(&ld->sk_raw_tx_q, skb); if (ret == -ENODEV || ret == -ENOENT) goto exit; pm_runtime_put(&usb_ld->usbdev->dev); goto retry_tx_work; } pm_runtime_put(&usb_ld->usbdev->dev); } wake_unlock(&pm_data->tx_async_wake); exit: return; retry_tx_work: queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, msecs_to_jiffies(20)); return; }
/** * dwc3_otg_start_host - helper function for starting/stoping the host controller driver. * * @otg: Pointer to the otg_transceiver structure. * @on: start / stop the host controller driver. * * Returns 0 on success otherwise negative errno. */ static int dwc3_otg_start_host(struct usb_otg *otg, int on) { struct dwc3_otg *dotg = container_of(otg, struct dwc3_otg, otg); struct dwc3_ext_xceiv *ext_xceiv = dotg->ext_xceiv; struct dwc3 *dwc = dotg->dwc; struct usb_hcd *hcd; int ret = 0; if (!dwc->xhci) return -EINVAL; if (!dotg->vbus_otg) { dotg->vbus_otg = devm_regulator_get(dwc->dev->parent, "vbus_dwc3"); if (IS_ERR(dotg->vbus_otg)) { dev_err(dwc->dev, "Failed to get vbus regulator\n"); ret = PTR_ERR(dotg->vbus_otg); dotg->vbus_otg = 0; return ret; } } if (on) { dev_dbg(otg->phy->dev, "%s: turn on host\n", __func__); dwc3_otg_notify_host_mode(otg, on); usb_phy_notify_connect(dotg->dwc->usb2_phy, USB_SPEED_HIGH); ret = regulator_enable(dotg->vbus_otg); if (ret) { dev_err(otg->phy->dev, "unable to enable vbus_otg\n"); dwc3_otg_notify_host_mode(otg, 0); return ret; } dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST); /* * FIXME If micro A cable is disconnected during system suspend, * xhci platform device will be removed before runtime pm is * enabled for xhci device. Due to this, disable_depth becomes * greater than one and runtimepm is not enabled for next microA * connect. Fix this by calling pm_runtime_init for xhci device. */ pm_runtime_init(&dwc->xhci->dev); ret = platform_device_add(dwc->xhci); if (ret) { dev_err(otg->phy->dev, "%s: failed to add XHCI pdev ret=%d\n", __func__, ret); regulator_disable(dotg->vbus_otg); dwc3_otg_notify_host_mode(otg, 0); return ret; } /* * WORKAROUND: currently host mode suspend isn't working well. * Disable xHCI's runtime PM for now. */ pm_runtime_disable(&dwc->xhci->dev); hcd = platform_get_drvdata(dwc->xhci); otg->host = &hcd->self; dwc3_gadget_usb3_phy_suspend(dwc, true); } else { dev_dbg(otg->phy->dev, "%s: turn off host\n", __func__); ret = regulator_disable(dotg->vbus_otg); if (ret) { dev_err(otg->phy->dev, "unable to disable vbus_otg\n"); return ret; } dbg_event(0xFF, "StHost get", 0); pm_runtime_get(dwc->dev); usb_phy_notify_disconnect(dotg->dwc->usb2_phy, USB_SPEED_HIGH); dwc3_otg_notify_host_mode(otg, on); otg->host = NULL; platform_device_del(dwc->xhci); /* * Perform USB hardware RESET (both core reset and DBM reset) * when moving from host to peripheral. This is required for * peripheral mode to work. */ if (ext_xceiv && ext_xceiv->ext_block_reset) ext_xceiv->ext_block_reset(ext_xceiv, true); dwc3_gadget_usb3_phy_suspend(dwc, false); dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE); /* re-init core and OTG registers as block reset clears these */ dwc3_post_host_reset_core_init(dwc); dbg_event(0xFF, "StHost put", 0); pm_runtime_put(dwc->dev); } return 0; }
static ssize_t tthe_get_panel_data_debugfs_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cyttsp5_device_access_data *dad = filp->private_data; struct device *dev; u8 config; u16 actual_read_len; u16 length = 0; u8 element_size = 0; u8 *buf_offset; u8 *buf_out; int elem; int elem_offset = 0; int print_idx = 0; int rc; int rc1; int i; mutex_lock(&dad->debugfs_lock); dev = dad->dev; buf_out = dad->tthe_get_panel_data_buf; if (!buf_out) goto release_mutex; pm_runtime_get_sync(dev); rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) goto put_runtime; if (dad->heatmap.scan_start) { /* Start scan */ rc = cyttsp5_exec_scan_cmd_(dev); if (rc < 0) goto release_exclusive; } elem = dad->heatmap.num_element; #if defined(CY_ENABLE_MAX_ELEN) if(elem>CY_MAX_ELEN) { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, CY_MAX_ELEN, dad->heatmap.data_type, dad->ic_buf, &config, &actual_read_len, NULL); } else { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, dad->ic_buf, &config, &actual_read_len, NULL); } #else rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, dad->ic_buf, &config, &actual_read_len, NULL); #endif if (rc < 0) goto release_exclusive; length = get_unaligned_le16(&dad->ic_buf[0]); buf_offset = dad->ic_buf + length; element_size = config & CY_CMD_RET_PANEL_ELMNT_SZ_MASK; elem -= actual_read_len; elem_offset = actual_read_len; while (elem > 0) { #ifdef CY_ENABLE_MAX_ELEN if(elem>CY_MAX_ELEN) { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, CY_MAX_ELEN, dad->heatmap.data_type, NULL, &config, &actual_read_len, buf_offset); } else { rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, NULL, &config, &actual_read_len, buf_offset); } #else rc = cyttsp5_ret_scan_data_cmd_(dev, elem_offset, elem, dad->heatmap.data_type, NULL, &config, &actual_read_len, buf_offset); #endif if (rc < 0) goto release_exclusive; if (!actual_read_len) break; length += actual_read_len * element_size; buf_offset = dad->ic_buf + length; elem -= actual_read_len; elem_offset += actual_read_len; } /* Reconstruct cmd header */ put_unaligned_le16(length, &dad->ic_buf[0]); put_unaligned_le16(elem_offset, &dad->ic_buf[7]); release_exclusive: rc1 = cmd->release_exclusive(dev); put_runtime: pm_runtime_put(dev); if (rc < 0) goto release_mutex; print_idx += scnprintf(buf_out, TTHE_TUNER_MAX_BUF, "CY_DATA:"); for (i = 0; i < length; i++) print_idx += scnprintf(buf_out + print_idx, TTHE_TUNER_MAX_BUF - print_idx, "%02X ", dad->ic_buf[i]); print_idx += scnprintf(buf_out + print_idx, TTHE_TUNER_MAX_BUF - print_idx, ":(%d bytes)\n", length); rc = simple_read_from_buffer(buf, count, ppos, buf_out, print_idx); print_idx = rc; release_mutex: mutex_unlock(&dad->debugfs_lock); return print_idx; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up; struct resource *mem, *irq, *dma_tx, *dma_rx; struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; int ret = -ENOSPC; if (pdev->dev.of_node) omap_up_info = of_get_uart_port_info(&pdev->dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } if (!request_mem_region(mem->start, resource_size(mem), pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!dma_rx) { ret = -EINVAL; goto err; } dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!dma_tx) { ret = -EINVAL; goto err; } up = kzalloc(sizeof(*up), GFP_KERNEL); if (up == NULL) { ret = -ENOMEM; goto do_release_region; } up->pdev = pdev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = irq->start; up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; if (pdev->dev.of_node) up->port.line = of_alias_get_id(pdev->dev.of_node, "serial"); else up->port.line = pdev->id; if (up->port.line < 0) { dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", up->port.line); ret = -ENODEV; goto err; } sprintf(up->name, "OMAP UART%d", up->port.line); up->port.mapbase = mem->start; up->port.membase = ioremap(mem->start, resource_size(mem)); if (!up->port.membase) { dev_err(&pdev->dev, "can't ioremap UART\n"); ret = -ENOMEM; goto err; } up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; if (!up->port.uartclk) { up->port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, "No clock speed specified: using default:" "%d\n", DEFAULT_CLK_SPEED); } up->uart_dma.uart_base = mem->start; up->errata = omap_up_info->errata; if (omap_up_info->dma_enabled) { up->uart_dma.uart_dma_tx = dma_tx->start; up->uart_dma.uart_dma_rx = dma_rx->start; up->use_dma = 1; up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size; up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout; up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate; spin_lock_init(&(up->uart_dma.tx_lock)); spin_lock_init(&(up->uart_dma.rx_lock)); up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; } up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; pm_qos_add_request(&up->pm_qos_request, PM_QOS_CPU_DMA_LATENCY, up->latency); serial_omap_uart_wq = create_singlethread_workqueue(up->name); INIT_WORK(&up->qos_work, serial_omap_uart_qos_work); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->autosuspend_timeout); pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); ui[up->port.line] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto do_release_region; pm_runtime_put(&pdev->dev); platform_set_drvdata(pdev, up); return 0; err: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); do_release_region: release_mem_region(mem->start, resource_size(mem)); return ret; }
static ssize_t cyttsp5_get_idac_show(struct device *dev, struct cyttsp5_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int status = STATUS_FAIL; u8 cmd_status = 0; u8 data_format = 0; u16 act_length = 0; int length = 0; int size; int rc; mutex_lock(&dad->sysfs_lock); pm_runtime_get_sync(dev); rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) { dev_err(dev, "%s: Error on request exclusive r=%d\n", __func__, rc); goto put_pm_runtime; } rc = cyttsp5_suspend_scan_cmd_(dev); if (rc < 0) { dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc); goto release_exclusive; } rc = cyttsp5_get_data_structure_cmd_(dev, 0, PIP_CMD_MAX_LENGTH, dad->get_idac_data_id, &cmd_status, &data_format, &act_length, &dad->ic_buf[5]); if (rc < 0) { dev_err(dev, "%s: Error on get data structure r=%d\n", __func__, rc); goto resume_scan; } dad->ic_buf[0] = cmd_status; dad->ic_buf[1] = dad->get_idac_data_id; dad->ic_buf[2] = LOW_BYTE(act_length); dad->ic_buf[3] = HI_BYTE(act_length); dad->ic_buf[4] = data_format; length = 5 + act_length; status = STATUS_SUCCESS; resume_scan: cyttsp5_resume_scan_cmd_(dev); release_exclusive: cmd->release_exclusive(dev); put_pm_runtime: pm_runtime_put(dev); if (status == STATUS_FAIL) length = 0; size = prepare_print_buffer(status, dad->ic_buf, length, buf); mutex_unlock(&dad->sysfs_lock); return size; }
static void serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_omap_port *up = (struct uart_omap_port *)port; unsigned char cval = 0; unsigned char efr = 0; unsigned long flags = 0; unsigned int baud, quot; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13); quot = serial_omap_get_divisor(port, baud); /* calculate wakeup latency constraint */ up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8); up->latency = up->calc_latency; schedule_work(&up->qos_work); up->dll = quot & 0xff; up->dlh = quot >> 8; up->mdr1 = UART_OMAP_MDR1_DISABLE; up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 | UART_FCR_ENABLE_FIFO; if (up->use_dma) up->fcr |= UART_FCR_DMA_SELECT; /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ pm_runtime_get_sync(&up->pdev->dev); spin_lock_irqsave(&up->port.lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* * Characters to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * Modem status interrupts */ up->ier &= ~UART_IER_MSI; if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, cval); /* reset DLAB */ up->lcr = cval; up->scr = OMAP_UART_SCR_TX_EMPTY; /* FIFOs and DMA Settings */ /* FCR can be changed only when the * baud clock is not running * DLL_REG and DLH_REG set to 0. */ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); serial_out(up, UART_DLL, 0); serial_out(up, UART_DLM, 0); serial_out(up, UART_LCR, 0); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); up->mcr = serial_in(up, UART_MCR); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); /* FIFO ENABLE, DMA MODE */ up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; if (up->use_dma) { serial_out(up, UART_TI752_TLR, 0); up->scr |= UART_FCR_TRIGGER_4; } else { /* Set receive FIFO threshold to 1 byte */ up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK; up->fcr |= (0x1 << OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT); } serial_out(up, UART_FCR, up->fcr); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_OMAP_SCR, up->scr); serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); serial_out(up, UART_MCR, up->mcr); /* Protocol, Baud Rate, and Interrupt Settings */ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) serial_omap_mdr1_errataset(up, up->mdr1); else serial_out(up, UART_OMAP_MDR1, up->mdr1); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, 0); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_DLL, up->dll); /* LS of divisor */ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */ serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, cval); if (baud > 230400 && baud != 3000000) up->mdr1 = UART_OMAP_MDR1_13X_MODE; else up->mdr1 = UART_OMAP_MDR1_16X_MODE; if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) serial_omap_mdr1_errataset(up, up->mdr1); else serial_out(up, UART_OMAP_MDR1, up->mdr1); /* Hardware Flow Control Configuration */ if (termios->c_cflag & CRTSCTS) { efr |= (UART_EFR_CTS | UART_EFR_RTS); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); up->mcr = serial_in(up, UART_MCR); serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); up->efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, up->efr | UART_EFR_ECB); serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG); serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS); serial_out(up, UART_LCR, cval); } serial_omap_set_mctrl(&up->port, up->port.mctrl); /* Software Flow Control Configuration */ serial_omap_configure_xonxoff(up, termios); spin_unlock_irqrestore(&up->port.lock, flags); pm_runtime_put(&up->pdev->dev); dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line); }
static ssize_t cyttsp5_calibrate_show(struct device *dev, struct cyttsp5_attribute *attr, char *buf) { struct cyttsp5_device_access_data *dad = cyttsp5_get_device_access_data(dev); int status = STATUS_FAIL; int length = 0; int size; int rc; mutex_lock(&dad->sysfs_lock); pm_runtime_get_sync(dev); rc = cmd->request_exclusive(dev, CY_REQUEST_EXCLUSIVE_TIMEOUT); if (rc < 0) { dev_err(dev, "%s: Error on request exclusive r=%d\n", __func__, rc); goto put_pm_runtime; } rc = cyttsp5_suspend_scan_cmd_(dev); if (rc < 0) { dev_err(dev, "%s: Error on suspend scan r=%d\n", __func__, rc); goto release_exclusive; } rc = _cyttsp5_calibrate_idacs_cmd(dev, dad->calibrate_sensing_mode, &dad->ic_buf[0]); if (rc < 0) { dev_err(dev, "%s: Error on calibrate idacs r=%d\n", __func__, rc); goto resume_scan; } length = 1; /* Check if baseline initialization is requested */ if (dad->calibrate_initialize_baselines) { /* Perform baseline initialization for all modes */ rc = _cyttsp5_initialize_baselines_cmd(dev, CY_IB_SM_MUTCAP | CY_IB_SM_SELFCAP | CY_IB_SM_BUTTON, &dad->ic_buf[length]); if (rc < 0) { dev_err(dev, "%s: Error on initialize baselines r=%d\n", __func__, rc); goto resume_scan; } length++; } status = STATUS_SUCCESS; resume_scan: cyttsp5_resume_scan_cmd_(dev); release_exclusive: cmd->release_exclusive(dev); put_pm_runtime: pm_runtime_put(dev); if (status == STATUS_FAIL) length = 0; size = prepare_print_buffer(status, dad->ic_buf, length, buf); mutex_unlock(&dad->sysfs_lock); return size; }
static int stm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; unsigned long *guaranteed; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct stm_drvdata *drvdata; struct resource *res = &adev->res; struct resource ch_res; size_t res_size, bitmap_size; struct coresight_desc desc = { 0 }; struct device_node *np = adev->dev.of_node; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) return PTR_ERR(pdata); adev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &adev->dev; drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } dev_set_drvdata(dev, drvdata); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res); if (ret) return ret; drvdata->chs.phys = ch_res.start; base = devm_ioremap_resource(dev, &ch_res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->chs.base = base; drvdata->write_bytes = stm_fundamental_data_size(drvdata); if (boot_nr_channel) { drvdata->numsp = boot_nr_channel; res_size = min((resource_size_t)(boot_nr_channel * BYTES_PER_CHANNEL), resource_size(res)); } else { drvdata->numsp = stm_num_stimulus_port(drvdata); res_size = min((resource_size_t)(drvdata->numsp * BYTES_PER_CHANNEL), resource_size(res)); } bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); if (!guaranteed) return -ENOMEM; drvdata->chs.guaranteed = guaranteed; spin_lock_init(&drvdata->spinlock); stm_init_default_data(drvdata); stm_init_generic_data(drvdata); if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) { dev_info(dev, "stm_register_device failed, probing deffered\n"); return -EPROBE_DEFER; } desc.type = CORESIGHT_DEV_TYPE_SOURCE; desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE; desc.ops = &stm_cs_ops; desc.pdata = pdata; desc.dev = dev; desc.groups = coresight_stm_groups; drvdata->csdev = coresight_register(&desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto stm_unregister; } pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); return 0; stm_unregister: stm_unregister_device(&drvdata->stm); return ret; }