static int jz_nemc_probe(struct platform_device *pdev) { struct jz_nemc *nemc; struct jz_nand_params *nand_param = pdev->dev.platform_data; struct resource *res; int i; nemc = (struct jz_nemc *)devm_kzalloc(&pdev->dev, sizeof(struct jz_nemc), GFP_KERNEL); if (!nemc) return -ENOMEM; nemc->pdev = pdev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM , "nemc"); if (!res) return -ENOMEM; res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), "nemc"); if (!res) return -ENOMEM; nemc->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if(!nemc->base) return -ENOMEM; nemc->selected = -1; for (i = 0; i < MAX_CHIP_NUM; i++) { char name[20]; sprintf(name, "nemc-cs%d", i + 1); res = platform_get_resource_byname(pdev, IORESOURCE_MEM , name); if (!res) break; res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), name); if (!res) return -ENOMEM; nemc->chips[i].io_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!nemc->chips[i].io_base) return -ENOMEM; nemc->chips[i].bank = i + 1; nemc->max_chip_num++; } if (nemc->max_chip_num == 0) return -ENOMEM; if (jz_nand_chip_init(nemc, nand_param)) return -EIO; platform_set_drvdata(pdev, nemc); dev_info(&pdev->dev, "Successfully registered External Nand Memory Controller driver\n"); return 0; }
static int __init imx2_wdt_probe(struct platform_device *pdev) { int ret; int res_size; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENODEV; } res_size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, res_size, res->name)) { dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", res_size, res->start); return -ENOMEM; } imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size); if (!imx2_wdt.base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } <<<<<<< HEAD
static int msm_dbm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; struct dbm *dbm; struct resource *res; int ret = 0; dbm_data = devm_kzalloc(dev, sizeof(*dbm_data), GFP_KERNEL); if (!dbm_data) return -ENOMEM; dbm_data->dbm_num_eps = DBM_1_5_NUM_EP; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "missing memory base resource\n"); ret = -ENODEV; goto free_dbm_data; } dbm_data->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!dbm_data->base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto free_dbm_data; } dbm = devm_kzalloc(dev, sizeof(*dbm), GFP_KERNEL); if (!dbm) { dev_err(&pdev->dev, "not enough memory\n"); ret = -ENOMEM; goto free_dbm_data; } dbm_data->dbm_reset_ep_after_lpm = of_property_read_bool(node, "qcom,reset-ep-after-lpm-resume"); dbm->dev = dev; dbm->soft_reset = soft_reset; dbm->ep_config = ep_config; dbm->ep_unconfig = ep_unconfig; dbm->get_num_of_eps_configured = get_num_of_eps_configured; dbm->event_buffer_config = event_buffer_config; dbm->data_fifo_config = data_fifo_config; dbm->set_speed = set_speed; dbm->enable = enable; dbm->ep_soft_reset = usb_ep_soft_reset; dbm->reset_ep_after_lpm = reset_ep_after_lpm; platform_set_drvdata(pdev, dbm); return usb_add_dbm(dbm); free_dbm_data: kfree(dbm_data); return ret; }
static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, const char *name, int i) { struct device_node *np; struct resource r; int ret; np = of_parse_phandle(dev->of_node, name, 0); if (!np) { dev_err(dev, "No %s specified\n", name); return -EINVAL; } ret = of_address_to_resource(np, 0, &r); of_node_put(np); if (ret) return ret; smem->regions[i].aux_base = (u32)r.start; smem->regions[i].size = resource_size(&r); smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start, resource_size(&r)); if (!smem->regions[i].virt_base) return -ENOMEM; return 0; }
/** * devm_ioremap_resource() - check, request region, and ioremap resource * @dev: generic device to handle the resource for * @res: resource to be handled * * Checks that a resource is a valid memory region, requests the memory region * and ioremaps it either as cacheable or as non-cacheable memory depending on * the resource's flags. All operations are managed and will be undone on * driver detach. * * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. Usage example: * * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); * base = devm_ioremap_resource(&pdev->dev, res); * if (IS_ERR(base)) * return PTR_ERR(base); */ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) { resource_size_t size; const char *name; void __iomem *dest_ptr; BUG_ON(!dev); if (!res || resource_type(res) != IORESOURCE_MEM) { dev_err(dev, "invalid resource\n"); return IOMEM_ERR_PTR(-EINVAL); } size = resource_size(res); name = res->name ?: dev_name(dev); if (!devm_request_mem_region(dev, res->start, size, name)) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } if (res->flags & IORESOURCE_CACHEABLE) dest_ptr = devm_ioremap(dev, res->start, size); else dest_ptr = devm_ioremap_nocache(dev, res->start, size); if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); dest_ptr = IOMEM_ERR_PTR(-ENOMEM); } return dest_ptr; }
static int sh_pfc_ioremap(struct sh_pfc *pfc, struct platform_device *pdev) { struct resource *res; int k; if (pdev->num_resources == 0) return -EINVAL; pfc->window = devm_kzalloc(pfc->dev, pdev->num_resources * sizeof(*pfc->window), GFP_NOWAIT); if (!pfc->window) return -ENOMEM; pfc->num_windows = pdev->num_resources; for (k = 0, res = pdev->resource; k < pdev->num_resources; k++, res++) { WARN_ON(resource_type(res) != IORESOURCE_MEM); pfc->window[k].phys = res->start; pfc->window[k].size = resource_size(res); pfc->window[k].virt = devm_ioremap_nocache(pfc->dev, res->start, resource_size(res)); if (!pfc->window[k].virt) return -ENOMEM; } return 0; }
static int meson_uart_request_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct resource *res; int size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory region"); return -ENODEV; } size = resource_size(res); if (!devm_request_mem_region(port->dev, port->mapbase, size, dev_name(port->dev))) { dev_err(port->dev, "Memory region busy\n"); return -EBUSY; } if (port->flags & UPF_IOREMAP) { port->membase = devm_ioremap_nocache(port->dev, port->mapbase, size); if (port->membase == NULL) return -ENOMEM; } return 0; }
static int __init ltq_wdt_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct clk *clk; if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory region"); return -ENOENT; } res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "cannot request I/O memory region"); return -EBUSY; } ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ltq_wdt_membase) { dev_err(&pdev->dev, "cannot remap I/O memory region\n"); return -ENOMEM; } /* we do not need to enable the clock as it is always running */ clk = clk_get(&pdev->dev, "io"); WARN_ON(!clk); ltq_io_region_clk_rate = clk_get_rate(clk); clk_put(clk); if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) ltq_wdt_bootstatus = WDIOF_CARDRESET; return misc_register(<q_wdt_miscdev); }
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname) { struct resource *res; unsigned long size; void __iomem *ptr; if (name) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); else res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); return ERR_PTR(-EINVAL); } size = resource_size(res); ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); if (!ptr) { dev_err(&pdev->dev, "failed to ioremap: %s\n", name); return ERR_PTR(-ENOMEM); } if (reglog) printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); return ptr; }
static int __devinit remap_named_resource(struct platform_device *pdev, char *name, void __iomem **io_ptr) { struct resource *res; resource_size_t size; void __iomem *p; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); if (!res) { dev_err(&pdev->dev, "failed to find resource [%s]\n", name); return -ENXIO; } size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, size, name)) { dev_err(&pdev->dev, "failed to request memory region " "[%s:0x%08x-0x%08x]\n", name, res->start, res->end); return -EBUSY; } p = devm_ioremap_nocache(&pdev->dev, res->start, size); if (!p) { dev_err(&pdev->dev, "failed to remap memory region " "[%s:0x%08x-0x%08x]\n", name, res->start, res->end); return -ENOMEM; } *io_ptr = p; return 0; }
static int __devinit ltq_stp_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); int ret = 0; if (!res) return -ENOENT; res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "failed to request STP memory\n"); return -EBUSY; } ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ltq_stp_membase) { dev_err(&pdev->dev, "failed to remap STP memory\n"); return -ENOMEM; } ret = gpiochip_add(<q_stp_chip); if (!ret) ret = ltq_stp_hw_init(); return ret; }
static int ipmmu_probe(struct platform_device *pdev) { struct shmobile_ipmmu *ipmmu; struct resource *res; struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot get platform resources\n"); return -ENOENT; } ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); if (!ipmmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); return -ENOMEM; } mutex_init(&ipmmu->flush_lock); ipmmu->dev = &pdev->dev; ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ipmmu->ipmmu_base) { dev_err(&pdev->dev, "ioremap_nocache failed\n"); return -ENOMEM; } ipmmu->dev_names = pdata->dev_names; ipmmu->num_dev_names = pdata->num_dev_names; platform_set_drvdata(pdev, ipmmu); ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ ipmmu_iommu_init(ipmmu); return 0; }
/** * Creates an MC I/O object * * @dev: device to be associated with the MC I/O object * @mc_portal_phys_addr: physical address of the MC portal to use * @mc_portal_size: size in bytes of the MC portal * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O * object or NULL if none. * @flags: flags for the new MC I/O object * @new_mc_io: Area to return pointer to newly created MC I/O object * * Returns '0' on Success; Error code otherwise. */ int __must_check fsl_create_mc_io(struct device *dev, phys_addr_t mc_portal_phys_addr, u32 mc_portal_size, struct fsl_mc_device *dpmcp_dev, u32 flags, struct fsl_mc_io **new_mc_io) { int error; struct fsl_mc_io *mc_io; void __iomem *mc_portal_virt_addr; struct resource *res; mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); if (!mc_io) return -ENOMEM; mc_io->dev = dev; mc_io->flags = flags; mc_io->portal_phys_addr = mc_portal_phys_addr; mc_io->portal_size = mc_portal_size; if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) spin_lock_init(&mc_io->spinlock); else mutex_init(&mc_io->mutex); res = devm_request_mem_region(dev, mc_portal_phys_addr, mc_portal_size, "mc_portal"); if (!res) { dev_err(dev, "devm_request_mem_region failed for MC portal %#llx\n", mc_portal_phys_addr); return -EBUSY; } mc_portal_virt_addr = devm_ioremap_nocache(dev, mc_portal_phys_addr, mc_portal_size); if (!mc_portal_virt_addr) { dev_err(dev, "devm_ioremap_nocache failed for MC portal %#llx\n", mc_portal_phys_addr); return -ENXIO; } mc_io->portal_virt_addr = mc_portal_virt_addr; if (dpmcp_dev) { error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); if (error < 0) goto error_destroy_mc_io; } *new_mc_io = mc_io; return 0; error_destroy_mc_io: fsl_destroy_mc_io(mc_io); return error; }
int custom_uart_driver_probe(struct platform_device *pdev) { // uart_am33x_port up; // int ret; // int irq; void __iomem *membase; struct resource *mem_res; void __iomem *cm_per; unsigned int uart5_clkctrl=0; cm_per = ioremap(CM_PER_START_ADDR, CM_PER_SIZE); if(!cm_per) { printk (KERN_ERR "HI: ERROR: Failed to remap memory for CM_PER.\n"); return 0; } uart5_clkctrl = ioread32(cm_per+CM_PER_UART4_CLKCTRL); printk("\n uart5_clkctrl=%x\n",uart5_clkctrl); iowrite32(0x02,cm_per+CM_PER_UART4_CLKCTRL); //Enable the clock for UART5 uart5_clkctrl = ioread32(cm_per+CM_PER_UART4_CLKCTRL); printk("\n uart5_clkctrl=%x\n",uart5_clkctrl); iounmap(cm_per); printk("in probe: of_get_uart_port_info\n"); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } if (!devm_request_mem_region(&pdev->dev, mem_res->start, resource_size(mem_res), pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } membase = devm_ioremap_nocache(&pdev->dev, mem_res->start,resource_size(mem_res)); if (!membase) return -ENODEV; printk("LCR=%x\n",ioread16(membase+0X0C)); iowrite16(0x00BF,membase+0X0C); printk("LCR=%x\n",ioread16(membase+0X0C)); iowrite16( (ioread16(membase+0X0C))^0x01,membase+0X0C); printk("LCR=%x\n",ioread16(membase+0X0C)); { // unsigned int reg_val=0; printk("MDR1=%x\n",ioread32(membase+0X20)); iowrite32((ioread32(membase+0X20))^0x01,membase+0X20); // FCR : TX_FIFO_CLEAR,RX_FIFO_CLEAR,FIFO_1byte printk("MDR1=%x\n",ioread32(membase+0X20)); } return 0; }
static int jz_dmic_platfrom_probe(struct platform_device *pdev) { struct jz_dmic *jz_dmic; struct resource *res = NULL; int i = 0, ret; jz_dmic = devm_kzalloc(&pdev->dev, sizeof(struct jz_dmic), GFP_KERNEL); if (!jz_dmic) return -ENOMEM; jz_dmic->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),pdev->name)) return -EBUSY; jz_dmic->res_start = res->start; jz_dmic->res_size = resource_size(res); jz_dmic->vaddr_base = devm_ioremap_nocache(&pdev->dev, jz_dmic->res_start, jz_dmic->res_size); if (!jz_dmic->vaddr_base) { dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); return -ENOMEM; } jz_dmic->dmic_mode = 0; jz_dmic->rx_dma_data.dma_addr = (dma_addr_t)jz_dmic->res_start + DMICDR; jz_dmic->vcc_dmic = regulator_get(&pdev->dev,"vcc_dmic"); platform_set_drvdata(pdev, (void *)jz_dmic); for (; i < ARRAY_SIZE(jz_dmic_sysfs_attrs); i++) { ret = device_create_file(&pdev->dev, &jz_dmic_sysfs_attrs[i]); if (ret) dev_warn(&pdev->dev,"attribute %s create failed %x", attr_name(jz_dmic_sysfs_attrs[i]), ret); } jz_dmic->clk_gate_dmic = clk_get(&pdev->dev, "dmic"); if (IS_ERR_OR_NULL(jz_dmic->clk_gate_dmic)) { ret = PTR_ERR(jz_dmic->clk_gate_dmic); jz_dmic->clk_gate_dmic = NULL; dev_err(&pdev->dev, "Failed to get clock: %d\n", ret); return ret; } ret = snd_soc_register_component(&pdev->dev, &jz_dmic_component, &jz_dmic_dai, 1); if (ret) goto err_register_cpu_dai; dev_dbg(&pdev->dev, "dmic platform probe success\n"); return ret; err_register_cpu_dai: platform_set_drvdata(pdev, NULL); return ret; }
static int pmc_probe(struct pci_dev *pdev, const struct pci_device_id *unused) { struct pmc_dev *pmc; int ret; ret = pci_enable_device(pdev); if (ret < 0) { dev_err(&pdev->dev, "error: could not enable device\n"); goto err_enable_device; } ret = pci_request_regions(pdev, DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "error: could not request PCI region\n"); goto err_request_regions; } pmc = devm_kzalloc(&pdev->dev, sizeof(struct pmc_dev), GFP_KERNEL); if (!pmc) { ret = -ENOMEM; goto err_devm_kzalloc; } pmc->pdev = pci_dev_get(pdev); pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr); pmc->base_addr &= PMC_BASE_ADDR_MASK; pmc->regmap = devm_ioremap_nocache(&pdev->dev, pmc->base_addr, PMC_MMIO_REG_LEN); if (!pmc->regmap) { dev_err(&pdev->dev, "error: ioremap failed\n"); ret = -ENOMEM; goto err_devm_ioremap; } pci_set_drvdata(pdev, pmc); #ifdef CONFIG_DEBUG_FS pmc_dbgfs_register(pmc); #endif /* CONFIG_DEBUG_FS */ /* Install power off function */ pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr); acpi_base_addr &= ACPI_BASE_ADDR_MASK; if (acpi_base_addr != 0 && pm_power_off == NULL) pm_power_off = pmc_power_off; return 0; err_devm_ioremap: pci_dev_put(pdev); err_devm_kzalloc: pci_release_regions(pdev); err_request_regions: pci_disable_device(pdev); err_enable_device: dev_err(&pdev->dev, "error: probe failed\n"); return ret; }
static int au1xac97c_drvprobe(struct platform_device *pdev) { int ret; struct resource *iores, *dmares; struct au1xpsc_audio_data *ctx; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mutex_init(&ctx->lock); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) return -ENODEV; if (!devm_request_mem_region(&pdev->dev, iores->start, resource_size(iores), pdev->name)) return -EBUSY; ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, resource_size(iores)); if (!ctx->mmio) return -EBUSY; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmares) return -EBUSY; ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmares) return -EBUSY; ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; /* switch it on */ WR(ctx, AC97_ENABLE, EN_D | EN_CE); WR(ctx, AC97_ENABLE, EN_CE); ctx->cfg = CFG_RC(3) | CFG_XS(3); WR(ctx, AC97_CONFIG, ctx->cfg); platform_set_drvdata(pdev, ctx); ret = snd_soc_set_ac97_ops(&ac97c_bus_ops); if (ret) return ret; ret = snd_soc_register_component(&pdev->dev, &au1xac97c_component, &au1xac97c_dai_driver, 1); if (ret) return ret; ac97c_workdata = ctx; return 0; }
static int bcm63xx_rng_probe(struct platform_device *pdev) { struct resource *r; struct clk *clk; int ret; struct bcm63xx_rng_priv *priv; struct hwrng *rng; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no iomem resource\n"); return -ENXIO; } priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->rng.name = pdev->name; priv->rng.init = bcm63xx_rng_init; priv->rng.cleanup = bcm63xx_rng_cleanup; prov->rng.data_present = bcm63xx_rng_data_present; priv->rng.data_read = bcm63xx_rng_data_read; priv->clk = devm_clk_get(&pdev->dev, "ipsec"); if (IS_ERR(priv->clk)) { error = PTR_ERR(priv->clk); dev_err(&pdev->dev, "no clock for device: %d\n", error); return error; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), pdev->name)) { dev_err(&pdev->dev, "request mem failed"); return -EBUSY; } priv->regs = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); if (!priv->regs) { dev_err(&pdev->dev, "ioremap failed"); return -ENOMEM; } error = devm_hwrng_register(&pdev->dev, &priv->rng); if (error) { dev_err(&pdev->dev, "failed to register rng device: %d\n", error); return error; } dev_info(&pdev->dev, "registered RNG driver\n"); return 0; }
static int __init imx2_wdt_probe(struct platform_device *pdev) { int ret; int res_size; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENODEV; } res_size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, res_size, res->name)) { dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", res_size, res->start); return -ENOMEM; } imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size); if (!imx2_wdt.base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } imx2_wdt.clk = clk_get(&pdev->dev, NULL); if (IS_ERR(imx2_wdt.clk)) { dev_err(&pdev->dev, "can't get Watchdog clock\n"); return PTR_ERR(imx2_wdt.clk); } imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME); if (imx2_wdt.timeout != timeout) dev_warn(&pdev->dev, "Initial timeout out of range! " "Clamped from %u to %u\n", timeout, imx2_wdt.timeout); setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0); imx2_wdt_miscdev.parent = &pdev->dev; ret = misc_register(&imx2_wdt_miscdev); if (ret) goto fail; dev_info(&pdev->dev, "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n", imx2_wdt.timeout, nowayout); return 0; fail: imx2_wdt_miscdev.parent = NULL; clk_put(imx2_wdt.clk); return ret; }
static int sti_tvout_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sti_tvout *tvout; struct resource *res; struct device_node *child_np; struct component_match *match = NULL; DRM_INFO("%s\n", __func__); if (!node) return -ENODEV; tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL); if (!tvout) return -ENOMEM; tvout->dev = dev; /* get Memory ressources */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg"); if (!res) { DRM_ERROR("Invalid glue resource\n"); return -ENOMEM; } tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!tvout->regs) return -ENOMEM; /* get reset resources */ tvout->reset = devm_reset_control_get(dev, "tvout"); /* take tvout out of reset */ if (!IS_ERR(tvout->reset)) reset_control_deassert(tvout->reset); platform_set_drvdata(pdev, tvout); of_platform_populate(node, NULL, NULL, dev); child_np = of_get_next_available_child(node, NULL); while (child_np) { component_match_add(dev, &match, compare_of, child_np); of_node_put(child_np); child_np = of_get_next_available_child(node, child_np); } component_master_add_with_match(dev, &sti_tvout_master_ops, match); return component_add(dev, &sti_tvout_ops); }
static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct device *ptr_dev = &dev->dev; struct pmc_dev *pmcdev = &pmc; const struct x86_cpu_id *cpu_id; const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data; int err; cpu_id = x86_match_cpu(intel_pmc_core_ids); if (!cpu_id) { dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n"); return -EINVAL; } err = pcim_enable_device(dev); if (err < 0) { dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n"); return err; } err = pci_read_config_dword(dev, SPT_PMC_BASE_ADDR_OFFSET, &pmcdev->base_addr); if (err < 0) { dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n"); return err; } pmcdev->base_addr &= PMC_BASE_ADDR_MASK; dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr); pmcdev->regbase = devm_ioremap_nocache(ptr_dev, pmcdev->base_addr, SPT_PMC_MMIO_REG_LEN); if (!pmcdev->regbase) { dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n"); return -ENOMEM; } mutex_init(&pmcdev->lock); pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); pmcdev->map = map; err = pmc_core_dbgfs_register(pmcdev); if (err < 0) dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n"); pmc.has_slp_s0_res = true; return 0; }
static int bcm63xx_wdt_probe(struct platform_device *pdev) { int ret; struct resource *r; timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "failed to get resources\n"); return -ENODEV; } bcm63xx_wdt_device.regs = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); if (!bcm63xx_wdt_device.regs) { dev_err(&pdev->dev, "failed to remap I/O resources\n"); return -ENXIO; } ret = bcm63xx_timer_register(TIMER_WDT_ID, bcm63xx_wdt_isr, NULL); if (ret < 0) { dev_err(&pdev->dev, "failed to register wdt timer isr\n"); return ret; } if (bcm63xx_wdt_settimeout(wdt_time)) { bcm63xx_wdt_settimeout(WDT_DEFAULT_TIME); dev_info(&pdev->dev, ": wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time); } ret = misc_register(&bcm63xx_wdt_miscdev); if (ret < 0) { dev_err(&pdev->dev, "failed to register watchdog device\n"); goto unregister_timer; } dev_info(&pdev->dev, " started, timer margin: %d sec\n", WDT_DEFAULT_TIME); return 0; unregister_timer: bcm63xx_timer_unregister(TIMER_WDT_ID); return ret; }
static int __devinit stm_gpio_irqmux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm_plat_pio_irqmux_data *plat_data = dev->platform_data; struct stm_gpio_irqmux *irqmux; struct resource *memory; int irq; int port_no; BUG_ON(!plat_data); irqmux = devm_kzalloc(dev, sizeof(*irqmux), GFP_KERNEL); if (!irqmux) return -ENOMEM; memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!memory || irq < 0) return -EINVAL; if (!devm_request_mem_region(dev, memory->start, memory->end - memory->start + 1, pdev->name)) return -EBUSY; irqmux->base = devm_ioremap_nocache(dev, memory->start, memory->end - memory->start + 1); if (!irqmux->base) return -ENOMEM; irqmux->port_first = plat_data->port_first; set_irq_chained_handler(irq, stm_gpio_irqmux_handler); set_irq_data(irq, irqmux); for (port_no = irqmux->port_first; port_no < irqmux->port_first + plat_data->ports_num; port_no++) { BUG_ON(port_no >= stm_gpio_num); if (stm_gpio_irq_init(port_no) != 0) { printk(KERN_ERR "stm_gpio: Failed to init gpio " "interrupt for port %d!\n", port_no); return -EINVAL; } } return 0; }
static int xgene_gpio_probe(struct platform_device *pdev) { struct resource *res; struct xgene_gpio *gpio; int err = 0; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) { err = -ENOMEM; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); gpio->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!gpio->base) { err = -ENOMEM; goto err; } gpio->chip.ngpio = XGENE_MAX_GPIOS; spin_lock_init(&gpio->lock); gpio->chip.parent = &pdev->dev; gpio->chip.direction_input = xgene_gpio_dir_in; gpio->chip.direction_output = xgene_gpio_dir_out; gpio->chip.get = xgene_gpio_get; gpio->chip.set = xgene_gpio_set; gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; platform_set_drvdata(pdev, gpio); err = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); if (err) { dev_err(&pdev->dev, "failed to register gpiochip.\n"); goto err; } dev_info(&pdev->dev, "X-Gene GPIO driver registered.\n"); return 0; err: dev_err(&pdev->dev, "X-Gene GPIO driver registration failed.\n"); return err; }
static void __iomem *request_and_map(struct device *dev, const struct resource *res) { void __iomem *ptr; if (!devm_request_mem_region(dev, res->start, resource_size(res), "denali-dt")) { dev_err(dev, "unable to request %s\n", res->name); return NULL; } ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!res) dev_err(dev, "ioremap_nocache of %s failed!", res->name); return ptr; }
static int scet_probe(struct platform_device *pdev) { int retval = 0; struct resource *r; pr_debug("Probing SCET device\n"); scd = devm_kzalloc(&pdev->dev, sizeof(struct scet_cd), GFP_KERNEL); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_dbg(&pdev->dev, "No resource\n"); retval = -ENODEV; goto out; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), dev_name(&pdev->dev))) { dev_dbg(&pdev->dev, "Resource not available\n"); retval = -EBUSY; goto out; } scd->addr_reg = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r)); if (!scd->addr_reg) { retval = -ENOMEM; goto out; } scd->timer_freeze = 0; scd->timer_run = 0; pr_debug("Probed and resources allocated\n"); stop_timer(scd); reset_timer(scd); out: return retval; }
static int ltq_gpio_probe(struct platform_device *pdev) { struct resource *res; if (pdev->id >= MAX_PORTS) { dev_err(&pdev->dev, "invalid gpio port %d\n", pdev->id); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory for gpio port %d\n", pdev->id); return -ENOENT; } res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "failed to request memory for gpio port %d\n", pdev->id); return -EBUSY; } ltq_gpio_port[pdev->id].membase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ltq_gpio_port[pdev->id].membase) { dev_err(&pdev->dev, "failed to remap memory for gpio port %d\n", pdev->id); return -ENOMEM; } ltq_gpio_port[pdev->id].chip.label = "ltq_gpio"; ltq_gpio_port[pdev->id].chip.direction_input = ltq_gpio_direction_input; ltq_gpio_port[pdev->id].chip.direction_output = ltq_gpio_direction_output; ltq_gpio_port[pdev->id].chip.get = ltq_gpio_get; ltq_gpio_port[pdev->id].chip.set = ltq_gpio_set; ltq_gpio_port[pdev->id].chip.request = ltq_gpio_req; ltq_gpio_port[pdev->id].chip.base = PINS_PER_PORT * pdev->id; ltq_gpio_port[pdev->id].chip.ngpio = PINS_PER_PORT; platform_set_drvdata(pdev, <q_gpio_port[pdev->id]); return gpiochip_add(<q_gpio_port[pdev->id].chip); }
static int sti_tvout_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sti_tvout *tvout; struct resource *res; DRM_INFO("%s\n", __func__); if (!node) return -ENODEV; tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL); if (!tvout) return -ENOMEM; tvout->dev = dev; /* get memory resources */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg"); if (!res) { DRM_ERROR("Invalid glue resource\n"); return -ENOMEM; } tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!tvout->regs) return -ENOMEM; /* get reset resources */ tvout->reset = devm_reset_control_get(dev, "tvout"); /* take tvout out of reset */ if (!IS_ERR(tvout->reset)) reset_control_deassert(tvout->reset); platform_set_drvdata(pdev, tvout); return component_add(dev, &sti_tvout_ops); }
static int ltq_ebu_probe(struct platform_device *pdev) { int ret = 0; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory resource\n"); return -ENOENT; } res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "failed to request memory resource\n"); return -EBUSY; } ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ltq_ebu_gpio_membase) { dev_err(&pdev->dev, "Failed to ioremap mem region\n"); return -ENOMEM; } /* grab the default shadow value passed form the platform code */ ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; /* tell the ebu controller which memory address we will be using */ ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); /* write protect the region */ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); ret = gpiochip_add(<q_ebu_chip); if (!ret) ltq_ebu_apply(); return ret; }
static int au1xi2s_drvprobe(struct platform_device *pdev) { struct resource *iores, *dmares; struct au1xpsc_audio_data *ctx; ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) return -ENODEV; if (!devm_request_mem_region(&pdev->dev, iores->start, resource_size(iores), pdev->name)) return -EBUSY; ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, resource_size(iores)); if (!ctx->mmio) return -EBUSY; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmares) return -EBUSY; ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmares) return -EBUSY; ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; platform_set_drvdata(pdev, ctx); return snd_soc_register_component(&pdev->dev, &au1xi2s_component, &au1xi2s_dai_driver, 1); }