Esempio n. 1
0
static int jz_nemc_probe(struct platform_device *pdev)
{
	struct jz_nemc *nemc;
	struct jz_nand_params *nand_param = pdev->dev.platform_data;
	struct resource *res;
	int i;

	nemc = (struct jz_nemc *)devm_kzalloc(&pdev->dev,
			sizeof(struct jz_nemc), GFP_KERNEL);
	if (!nemc)
		return -ENOMEM;
	nemc->pdev = pdev;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM , "nemc");
	if (!res)
		return -ENOMEM;
	res = devm_request_mem_region(&pdev->dev, res->start,
			resource_size(res), "nemc");
	if (!res)
		return -ENOMEM;
	nemc->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
	if(!nemc->base)
		return -ENOMEM;
	nemc->selected = -1;
	for (i = 0; i < MAX_CHIP_NUM; i++) {
		char name[20];
		sprintf(name, "nemc-cs%d", i + 1);
		res = platform_get_resource_byname(pdev, IORESOURCE_MEM , name);
		if (!res)
			break;
		res = devm_request_mem_region(&pdev->dev, res->start,
				resource_size(res), name);
		if (!res)
			return -ENOMEM;
		nemc->chips[i].io_base = devm_ioremap_nocache(&pdev->dev,
				res->start, resource_size(res));
		if (!nemc->chips[i].io_base)
			return -ENOMEM;
		nemc->chips[i].bank = i + 1;
		nemc->max_chip_num++;
	}

	if (nemc->max_chip_num == 0)
		return -ENOMEM;

	if (jz_nand_chip_init(nemc, nand_param))
		return -EIO;

	platform_set_drvdata(pdev, nemc);

	dev_info(&pdev->dev, "Successfully registered External Nand Memory Controller driver\n");
	return 0;
}
Esempio n. 2
0
static int __devinit ltq_stp_probe(struct platform_device *pdev)
{
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	int ret = 0;

	if (!res)
		return -ENOENT;
	res = devm_request_mem_region(&pdev->dev, res->start,
		resource_size(res), dev_name(&pdev->dev));
	if (!res) {
		dev_err(&pdev->dev, "failed to request STP memory\n");
		return -EBUSY;
	}
	ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
		resource_size(res));
	if (!ltq_stp_membase) {
		dev_err(&pdev->dev, "failed to remap STP memory\n");
		return -ENOMEM;
	}
	ret = gpiochip_add(&ltq_stp_chip);
	if (!ret)
		ret = ltq_stp_hw_init();

	return ret;
}
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
	int ret;
	int res_size;
	struct resource *res;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "can't get device resources\n");
		return -ENODEV;
	}

	res_size = resource_size(res);
	if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
		res->name)) {
		dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
			res_size, res->start);
		return -ENOMEM;
	}

	imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
	if (!imx2_wdt.base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		return -ENOMEM;
	}

<<<<<<< HEAD
/**
 * devm_request_and_ioremap() - Check, request region, and ioremap resource
 * @dev: Generic device to handle the resource for
 * @res: resource to be handled
 *
 * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
 * everything is undone on driver detach. Checks arguments, so you can feed
 * it the result from e.g. platform_get_resource() directly. Returns the
 * remapped pointer or NULL on error. Usage example:
 *
 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 *	base = devm_request_and_ioremap(&pdev->dev, res);
 *	if (!base)
 *		return -EADDRNOTAVAIL;
 */
void __iomem *devm_request_and_ioremap(struct device *dev,
			struct resource *res)
{
	resource_size_t size;
	const char *name;
	void __iomem *dest_ptr;

	BUG_ON(!dev);

	if (!res || resource_type(res) != IORESOURCE_MEM) {
		dev_err(dev, "invalid resource\n");
		return NULL;
	}

	size = resource_size(res);
	name = res->name ?: dev_name(dev);

	if (!devm_request_mem_region(dev, res->start, size, name)) {
		dev_err(dev, "can't request region for resource %pR\n", res);
		return NULL;
	}

	if (res->flags & IORESOURCE_CACHEABLE)
		dest_ptr = devm_ioremap(dev, res->start, size);
	else
		dest_ptr = devm_ioremap_nocache(dev, res->start, size);

	if (!dest_ptr) {
		dev_err(dev, "ioremap failed for resource %pR\n", res);
		devm_release_mem_region(dev, res->start, size);
	}

	return dest_ptr;
}
Esempio n. 5
0
static int meson_uart_request_port(struct uart_port *port)
{
	struct platform_device *pdev = to_platform_device(port->dev);
	struct resource *res;
	int size;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "cannot obtain I/O memory region");
		return -ENODEV;
	}
	size = resource_size(res);

	if (!devm_request_mem_region(port->dev, port->mapbase, size,
				     dev_name(port->dev))) {
		dev_err(port->dev, "Memory region busy\n");
		return -EBUSY;
	}

	if (port->flags & UPF_IOREMAP) {
		port->membase = devm_ioremap_nocache(port->dev,
						     port->mapbase,
						     size);
		if (port->membase == NULL)
			return -ENOMEM;
	}

	return 0;
}
Esempio n. 6
0
/**
 * devm_ioremap_resource() - check, request region, and ioremap resource
 * @dev: generic device to handle the resource for
 * @res: resource to be handled
 *
 * Checks that a resource is a valid memory region, requests the memory
 * region and ioremaps it. All operations are managed and will be undone
 * on driver detach.
 *
 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
 * on failure. Usage example:
 *
 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 *	base = devm_ioremap_resource(&pdev->dev, res);
 *	if (IS_ERR(base))
 *		return PTR_ERR(base);
 */
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
{
	resource_size_t size;
	const char *name;
	void __iomem *dest_ptr;

	BUG_ON(!dev);

	if (!res || resource_type(res) != IORESOURCE_MEM) {
		dev_err(dev, "invalid resource\n");
		return IOMEM_ERR_PTR(-EINVAL);
	}

	size = resource_size(res);
	name = res->name ?: dev_name(dev);

	if (!devm_request_mem_region(dev, res->start, size, name)) {
		dev_err(dev, "can't request region for resource %pR\n", res);
		return IOMEM_ERR_PTR(-EBUSY);
	}

	dest_ptr = devm_ioremap(dev, res->start, size);
	if (!dest_ptr) {
		dev_err(dev, "ioremap failed for resource %pR\n", res);
		devm_release_mem_region(dev, res->start, size);
		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
	}

	return dest_ptr;
}
static int __init
ltq_wdt_probe(struct platform_device *pdev)
{
    struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    struct clk *clk;

    if (!res) {
        dev_err(&pdev->dev, "cannot obtain I/O memory region");
        return -ENOENT;
    }
    res = devm_request_mem_region(&pdev->dev, res->start,
                                  resource_size(res), dev_name(&pdev->dev));
    if (!res) {
        dev_err(&pdev->dev, "cannot request I/O memory region");
        return -EBUSY;
    }
    ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
                                           resource_size(res));
    if (!ltq_wdt_membase) {
        dev_err(&pdev->dev, "cannot remap I/O memory region\n");
        return -ENOMEM;
    }

    /* we do not need to enable the clock as it is always running */
    clk = clk_get(&pdev->dev, "io");
    WARN_ON(!clk);
    ltq_io_region_clk_rate = clk_get_rate(clk);
    clk_put(clk);

    if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
        ltq_wdt_bootstatus = WDIOF_CARDRESET;

    return misc_register(&ltq_wdt_miscdev);
}
/* Map the framebuffer from the card and configure the core */
static int mga_vram_init(struct mga_device *mdev)
{
	void __iomem *mem;
	struct apertures_struct *aper = alloc_apertures(1);
	if (!aper)
		return -ENOMEM;

	/* BAR 0 is VRAM */
	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
	mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);

	aper->ranges[0].base = mdev->mc.vram_base;
	aper->ranges[0].size = mdev->mc.vram_window;

	remove_conflicting_framebuffers(aper, "mgafb", true);
	kfree(aper);

	if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
				"mgadrmfb_vram")) {
		DRM_ERROR("can't reserve VRAM\n");
		return -ENXIO;
	}

	mem = pci_iomap(mdev->dev->pdev, 0, 0);

	mdev->mc.vram_size = mga_probe_vram(mdev, mem);

	pci_iounmap(mdev->dev->pdev, mem);

	return 0;
}
Esempio n. 9
0
static int __devinit remap_named_resource(struct platform_device *pdev,
				       char *name,
				       void __iomem **io_ptr)
{
	struct resource *res;
	resource_size_t size;
	void __iomem *p;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
	if (!res) {
		dev_err(&pdev->dev, "failed to find resource [%s]\n", name);
		return -ENXIO;
	}

	size = resource_size(res);

	if (!devm_request_mem_region(&pdev->dev,
				     res->start, size, name)) {
		dev_err(&pdev->dev, "failed to request memory region "
			"[%s:0x%08x-0x%08x]\n", name, res->start, res->end);
		return -EBUSY;
	}

	p = devm_ioremap_nocache(&pdev->dev, res->start, size);
	if (!p) {
		dev_err(&pdev->dev, "failed to remap memory region "
			"[%s:0x%08x-0x%08x]\n", name, res->start, res->end);
		return -ENOMEM;
	}

	*io_ptr = p;

	return 0;
}
static int ds1553_rtc_probe(struct platform_device *pdev)
{
	struct rtc_device *rtc;
	struct resource *res;
	unsigned int cen, sec;
	struct rtc_plat_data *pdata;
	void __iomem *ioaddr;
	int ret = 0;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;
	if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
			pdev->name))
		return -EBUSY;

	ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
	if (!ioaddr)
		return -ENOMEM;
	pdata->ioaddr = ioaddr;
	pdata->irq = platform_get_irq(pdev, 0);

	/* turn RTC on if it was not on */
	sec = readb(ioaddr + RTC_SECONDS);
	if (sec & RTC_STOP) {
		sec &= RTC_SECONDS_MASK;
		cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
		writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
		writeb(sec, ioaddr + RTC_SECONDS);
		writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
	}
	if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
		dev_warn(&pdev->dev, "voltage-low detected.\n");

	spin_lock_init(&pdata->lock);
	pdata->last_jiffies = jiffies;
	platform_set_drvdata(pdev, pdata);
	if (pdata->irq > 0) {
		writeb(0, ioaddr + RTC_INTERRUPTS);
		if (devm_request_irq(&pdev->dev, pdata->irq,
				ds1553_rtc_interrupt,
				0, pdev->name, pdev) < 0) {
			dev_warn(&pdev->dev, "interrupt not available.\n");
			pdata->irq = 0;
		}
	}

	rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
				  &ds1553_rtc_ops, THIS_MODULE);
	if (IS_ERR(rtc))
		return PTR_ERR(rtc);
	pdata->rtc = rtc;

	ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);

	return ret;
}
Esempio n. 11
0
static int __init tx4938ide_probe(struct platform_device *pdev)
{
    struct ide_hw hw, *hws[] = { &hw };
    struct ide_host *host;
    struct resource *res;
    struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
    int irq, ret, i;
    unsigned long mapbase, mapctl;
    struct ide_port_info d = tx4938ide_port_info;

    irq = platform_get_irq(pdev, 0);
    if (irq < 0)
        return -ENODEV;
    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!res)
        return -ENODEV;

    if (!devm_request_mem_region(&pdev->dev, res->start,
                                 resource_size(res), "tx4938ide"))
        return -EBUSY;
    mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
                                          8 << pdata->ioport_shift);
    mapctl = (unsigned long)devm_ioremap(&pdev->dev,
                                         res->start + 0x10000 +
                                         (6 << pdata->ioport_shift),
                                         1 << pdata->ioport_shift);
    if (!mapbase || !mapctl)
        return -EBUSY;

    memset(&hw, 0, sizeof(hw));
    if (pdata->ioport_shift) {
        unsigned long port = mapbase;
        unsigned long ctl = mapctl;

        hw.io_ports_array[0] = port;
#ifdef __BIG_ENDIAN
        port++;
        ctl++;
#endif
        for (i = 1; i <= 7; i++)
            hw.io_ports_array[i] =
                port + (i << pdata->ioport_shift);
        hw.io_ports.ctl_addr = ctl;
    } else
        ide_std_init_ports(&hw, mapbase, mapctl);
    hw.irq = irq;
    hw.dev = &pdev->dev;

    pr_info("TX4938 IDE interface (base %#lx, ctl %#lx, irq %d)\n",
            mapbase, mapctl, hw.irq);
    if (pdata->gbus_clock)
        tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
    else
        d.port_ops = NULL;
    ret = ide_host_add(&d, hws, 1, &host);
    if (!ret)
        platform_set_drvdata(pdev, host);
    return ret;
}
Esempio n. 12
0
/**
 * Creates an MC I/O object
 *
 * @dev: device to be associated with the MC I/O object
 * @mc_portal_phys_addr: physical address of the MC portal to use
 * @mc_portal_size: size in bytes of the MC portal
 * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
 * object or NULL if none.
 * @flags: flags for the new MC I/O object
 * @new_mc_io: Area to return pointer to newly created MC I/O object
 *
 * Returns '0' on Success; Error code otherwise.
 */
int __must_check fsl_create_mc_io(struct device *dev,
				  phys_addr_t mc_portal_phys_addr,
				  u32 mc_portal_size,
				  struct fsl_mc_device *dpmcp_dev,
				  u32 flags, struct fsl_mc_io **new_mc_io)
{
	int error;
	struct fsl_mc_io *mc_io;
	void __iomem *mc_portal_virt_addr;
	struct resource *res;

	mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
	if (!mc_io)
		return -ENOMEM;

	mc_io->dev = dev;
	mc_io->flags = flags;
	mc_io->portal_phys_addr = mc_portal_phys_addr;
	mc_io->portal_size = mc_portal_size;
	if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
		spin_lock_init(&mc_io->spinlock);
	else
		mutex_init(&mc_io->mutex);

	res = devm_request_mem_region(dev,
				      mc_portal_phys_addr,
				      mc_portal_size,
				      "mc_portal");
	if (!res) {
		dev_err(dev,
			"devm_request_mem_region failed for MC portal %#llx\n",
			mc_portal_phys_addr);
		return -EBUSY;
	}

	mc_portal_virt_addr = devm_ioremap_nocache(dev,
						   mc_portal_phys_addr,
						   mc_portal_size);
	if (!mc_portal_virt_addr) {
		dev_err(dev,
			"devm_ioremap_nocache failed for MC portal %#llx\n",
			mc_portal_phys_addr);
		return -ENXIO;
	}

	mc_io->portal_virt_addr = mc_portal_virt_addr;
	if (dpmcp_dev) {
		error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
		if (error < 0)
			goto error_destroy_mc_io;
	}

	*new_mc_io = mc_io;
	return 0;

error_destroy_mc_io:
	fsl_destroy_mc_io(mc_io);
	return error;
}
int custom_uart_driver_probe(struct platform_device *pdev)
{
//		uart_am33x_port up;
//		int ret;
//		int irq;
        void __iomem *membase;
		struct resource  *mem_res;
		void __iomem *cm_per;
		unsigned int uart5_clkctrl=0;

       cm_per = ioremap(CM_PER_START_ADDR, CM_PER_SIZE);
	   if(!cm_per)
	   {
			printk (KERN_ERR "HI: ERROR: Failed to remap memory for CM_PER.\n");
			return 0;
	   }
		uart5_clkctrl = ioread32(cm_per+CM_PER_UART4_CLKCTRL); 
		printk("\n uart5_clkctrl=%x\n",uart5_clkctrl);
		iowrite32(0x02,cm_per+CM_PER_UART4_CLKCTRL);    //Enable the clock for UART5 
		uart5_clkctrl = ioread32(cm_per+CM_PER_UART4_CLKCTRL); 
		printk("\n uart5_clkctrl=%x\n",uart5_clkctrl);
	    iounmap(cm_per);

		printk("in probe: of_get_uart_port_info\n");


        mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem_res)
     {
                dev_err(&pdev->dev, "no mem resource?\n");
                return -ENODEV;
     }
       if (!devm_request_mem_region(&pdev->dev, mem_res->start, resource_size(mem_res),
                                pdev->dev.driver->name)) {
                dev_err(&pdev->dev, "memory region already claimed\n");
                return -EBUSY;
        }
        membase = devm_ioremap_nocache(&pdev->dev, mem_res->start,resource_size(mem_res));
        if (!membase)
            return -ENODEV;


	printk("LCR=%x\n",ioread16(membase+0X0C));
	iowrite16(0x00BF,membase+0X0C);
	printk("LCR=%x\n",ioread16(membase+0X0C));

	iowrite16( (ioread16(membase+0X0C))^0x01,membase+0X0C);
	printk("LCR=%x\n",ioread16(membase+0X0C));

	{
//	unsigned int reg_val=0;
	printk("MDR1=%x\n",ioread32(membase+0X20));
	iowrite32((ioread32(membase+0X20))^0x01,membase+0X20); // FCR : TX_FIFO_CLEAR,RX_FIFO_CLEAR,FIFO_1byte
	printk("MDR1=%x\n",ioread32(membase+0X20));
	} 

return 0;
}
Esempio n. 14
0
static int jz_dmic_platfrom_probe(struct platform_device *pdev)
{
	struct jz_dmic *jz_dmic;
	struct resource *res = NULL;
	int i = 0, ret;

	jz_dmic = devm_kzalloc(&pdev->dev, sizeof(struct jz_dmic), GFP_KERNEL);
	if (!jz_dmic)
		return -ENOMEM;

	jz_dmic->dev = &pdev->dev;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENOENT;
	if (!devm_request_mem_region(&pdev->dev,
				res->start, resource_size(res),pdev->name))
		return -EBUSY;

	jz_dmic->res_start = res->start;
	jz_dmic->res_size = resource_size(res);
	jz_dmic->vaddr_base = devm_ioremap_nocache(&pdev->dev,
			jz_dmic->res_start, jz_dmic->res_size);
	if (!jz_dmic->vaddr_base) {
		dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
		return -ENOMEM;
	}

	jz_dmic->dmic_mode = 0;
	jz_dmic->rx_dma_data.dma_addr = (dma_addr_t)jz_dmic->res_start + DMICDR;

	jz_dmic->vcc_dmic = regulator_get(&pdev->dev,"vcc_dmic");
	platform_set_drvdata(pdev, (void *)jz_dmic);

	for (; i < ARRAY_SIZE(jz_dmic_sysfs_attrs); i++) {
		ret = device_create_file(&pdev->dev, &jz_dmic_sysfs_attrs[i]);
		if (ret)
			dev_warn(&pdev->dev,"attribute %s create failed %x",
					attr_name(jz_dmic_sysfs_attrs[i]), ret);
	}
	jz_dmic->clk_gate_dmic = clk_get(&pdev->dev, "dmic");
	if (IS_ERR_OR_NULL(jz_dmic->clk_gate_dmic)) {
		ret = PTR_ERR(jz_dmic->clk_gate_dmic);
		jz_dmic->clk_gate_dmic = NULL;
		dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
		return ret;
	}
	ret = snd_soc_register_component(&pdev->dev, &jz_dmic_component,
					 &jz_dmic_dai, 1);
	if (ret)
		goto err_register_cpu_dai;
	dev_dbg(&pdev->dev, "dmic platform probe success\n");
	return ret;

err_register_cpu_dai:
	platform_set_drvdata(pdev, NULL);
	return ret;
}
Esempio n. 15
0
File: ac97c.c Progetto: 020gzh/linux
static int au1xac97c_drvprobe(struct platform_device *pdev)
{
	int ret;
	struct resource *iores, *dmares;
	struct au1xpsc_audio_data *ctx;

	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	mutex_init(&ctx->lock);

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iores)
		return -ENODEV;

	if (!devm_request_mem_region(&pdev->dev, iores->start,
				     resource_size(iores),
				     pdev->name))
		return -EBUSY;

	ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start,
					 resource_size(iores));
	if (!ctx->mmio)
		return -EBUSY;

	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (!dmares)
		return -EBUSY;
	ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start;

	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 1);
	if (!dmares)
		return -EBUSY;
	ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start;

	/* switch it on */
	WR(ctx, AC97_ENABLE, EN_D | EN_CE);
	WR(ctx, AC97_ENABLE, EN_CE);

	ctx->cfg = CFG_RC(3) | CFG_XS(3);
	WR(ctx, AC97_CONFIG, ctx->cfg);

	platform_set_drvdata(pdev, ctx);

	ret = snd_soc_set_ac97_ops(&ac97c_bus_ops);
	if (ret)
		return ret;

	ret = snd_soc_register_component(&pdev->dev, &au1xac97c_component,
					 &au1xac97c_dai_driver, 1);
	if (ret)
		return ret;

	ac97c_workdata = ctx;
	return 0;
}
Esempio n. 16
0
static int bcm63xx_rng_probe(struct platform_device *pdev)
{
	struct resource *r;
	struct clk *clk;
	int ret;
	struct bcm63xx_rng_priv *priv;
	struct hwrng *rng;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		dev_err(&pdev->dev, "no iomem resource\n");
		return -ENXIO;
	}

	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->rng.name = pdev->name;
	priv->rng.init = bcm63xx_rng_init;
	priv->rng.cleanup = bcm63xx_rng_cleanup;
	prov->rng.data_present = bcm63xx_rng_data_present;
	priv->rng.data_read = bcm63xx_rng_data_read;

	priv->clk = devm_clk_get(&pdev->dev, "ipsec");
	if (IS_ERR(priv->clk)) {
		error = PTR_ERR(priv->clk);
		dev_err(&pdev->dev, "no clock for device: %d\n", error);
		return error;
	}

	if (!devm_request_mem_region(&pdev->dev, r->start,
					resource_size(r), pdev->name)) {
		dev_err(&pdev->dev, "request mem failed");
		return -EBUSY;
	}

	priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
					resource_size(r));
	if (!priv->regs) {
		dev_err(&pdev->dev, "ioremap failed");
		return -ENOMEM;
	}

	error = devm_hwrng_register(&pdev->dev, &priv->rng);
	if (error) {
		dev_err(&pdev->dev, "failed to register rng device: %d\n",
			error);
		return error;
	}

	dev_info(&pdev->dev, "registered RNG driver\n");

	return 0;
}
Esempio n. 17
0
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
	int ret;
	int res_size;
	struct resource *res;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "can't get device resources\n");
		return -ENODEV;
	}

	res_size = resource_size(res);
	if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
		res->name)) {
		dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
			res_size, res->start);
		return -ENOMEM;
	}

	imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
	if (!imx2_wdt.base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		return -ENOMEM;
	}

	imx2_wdt.clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(imx2_wdt.clk)) {
		dev_err(&pdev->dev, "can't get Watchdog clock\n");
		return PTR_ERR(imx2_wdt.clk);
	}

	imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
	if (imx2_wdt.timeout != timeout)
		dev_warn(&pdev->dev, "Initial timeout out of range! "
			"Clamped from %u to %u\n", timeout, imx2_wdt.timeout);

	setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0);

	imx2_wdt_miscdev.parent = &pdev->dev;
	ret = misc_register(&imx2_wdt_miscdev);
	if (ret)
		goto fail;

	dev_info(&pdev->dev,
		"IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n",
						imx2_wdt.timeout, nowayout);
	return 0;

fail:
	imx2_wdt_miscdev.parent = NULL;
	clk_put(imx2_wdt.clk);
	return ret;
}
static int spdif_out_probe(struct platform_device *pdev)
{
	struct spdif_out_dev *host;
	struct spear_spdif_platform_data *pdata;
	struct resource *res;
	int ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -EINVAL;

	if (!devm_request_mem_region(&pdev->dev, res->start,
				resource_size(res), pdev->name)) {
		dev_warn(&pdev->dev, "Failed to get memory resourse\n");
		return -ENOENT;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
	if (!host) {
		dev_warn(&pdev->dev, "kzalloc fail\n");
		return -ENOMEM;
	}

	host->io_base = devm_ioremap(&pdev->dev, res->start,
				resource_size(res));
	if (!host->io_base) {
		dev_warn(&pdev->dev, "ioremap failed\n");
		return -ENOMEM;
	}

	host->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk))
		return PTR_ERR(host->clk);

	pdata = dev_get_platdata(&pdev->dev);

	host->dma_params.data = pdata->dma_params;
	host->dma_params.addr = res->start + SPDIF_OUT_FIFO_DATA;
	host->dma_params.max_burst = 16;
	host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_params.filter = pdata->filter;

	dev_set_drvdata(&pdev->dev, host);

	ret = snd_soc_register_component(&pdev->dev, &spdif_out_component,
					 &spdif_out_dai, 1);
	if (ret != 0) {
		clk_put(host->clk);
		return ret;
	}

	return 0;
}
Esempio n. 19
0
static int __devinit
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
	int ret = 0;

	if (!devm_request_mem_region(&adev->dev, adev->res.start,
				resource_size(&adev->res), "sp805_wdt")) {
		dev_warn(&adev->dev, "Failed to get memory region resource\n");
		ret = -ENOENT;
		goto err;
	}

	wdt = devm_kzalloc(&adev->dev, sizeof(*wdt), GFP_KERNEL);
	if (!wdt) {
		dev_warn(&adev->dev, "Kzalloc failed\n");
		ret = -ENOMEM;
		goto err;
	}

	wdt->base = devm_ioremap(&adev->dev, adev->res.start,
			resource_size(&adev->res));
	if (!wdt->base) {
		ret = -ENOMEM;
		dev_warn(&adev->dev, "ioremap fail\n");
		goto err;
	}

	wdt->clk = clk_get(&adev->dev, NULL);
	if (IS_ERR(wdt->clk)) {
		dev_warn(&adev->dev, "Clock not found\n");
		ret = PTR_ERR(wdt->clk);
		goto err;
	}

	wdt->adev = adev;
	spin_lock_init(&wdt->lock);
	wdt_setload(DEFAULT_TIMEOUT);

	ret = misc_register(&sp805_wdt_miscdev);
	if (ret < 0) {
		dev_warn(&adev->dev, "cannot register misc device\n");
		goto err_misc_register;
	}

	dev_info(&adev->dev, "registration successful\n");
	return 0;

err_misc_register:
	clk_put(wdt->clk);
err:
	dev_err(&adev->dev, "Probe Failed!!!\n");
	return ret;
}
Esempio n. 20
0
static int __devinit virtio_mmio_probe(struct platform_device *pdev)
{
	struct virtio_mmio_device *vm_dev;
	struct resource *mem;
	unsigned long magic;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem)
		return -EINVAL;

	if (!devm_request_mem_region(&pdev->dev, mem->start,
			resource_size(mem), pdev->name))
		return -EBUSY;

	vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
	if (!vm_dev)
		return  -ENOMEM;

	vm_dev->vdev.dev.parent = &pdev->dev;
	vm_dev->vdev.config = &virtio_mmio_config_ops;
	vm_dev->pdev = pdev;
	INIT_LIST_HEAD(&vm_dev->virtqueues);
	spin_lock_init(&vm_dev->lock);

	vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
	if (vm_dev->base == NULL)
		return -EFAULT;

	/* Check magic value */
	magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
	if (memcmp(&magic, "virt", 4) != 0) {
		dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
		return -ENODEV;
	}

	/* Check device version */
	vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
	if (vm_dev->version != 1) {
		dev_err(&pdev->dev, "Version %ld not supported!\n",
				vm_dev->version);
		return -ENXIO;
	}

	vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
	vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);

	writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);

	platform_set_drvdata(pdev, vm_dev);

	return register_virtio_device(&vm_dev->vdev);
}
Esempio n. 21
0
static int s5p_aes_probe(struct platform_device *pdev)
{
	int                 i, j, err = -ENODEV;
	struct s5p_aes_dev *pdata;
	struct device      *dev = &pdev->dev;
	struct resource    *res;

	if (s5p_dev)
		return -EEXIST;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	if (!devm_request_mem_region(dev, res->start,
				     resource_size(res), pdev->name))
		return -EBUSY;

	pdata->clk = clk_get(dev, "secss");
	if (IS_ERR(pdata->clk)) {
		dev_err(dev, "failed to find secss clock source\n");
		return -ENOENT;
	}

	clk_enable(pdata->clk);

	spin_lock_init(&pdata->lock);
	pdata->ioaddr = devm_ioremap(dev, res->start,
				     resource_size(res));

	pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
	if (pdata->irq_hash < 0) {
		err = pdata->irq_hash;
		dev_warn(dev, "hash interrupt is not available.\n");
		goto err_irq;
	}
	err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
			       IRQF_SHARED, pdev->name, pdev);
	if (err < 0) {
		dev_warn(dev, "hash interrupt is not available.\n");
		goto err_irq;
	}

	pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
	if (pdata->irq_fc < 0) {
		err = pdata->irq_fc;
		dev_warn(dev, "feed control interrupt is not ava
Esempio n. 22
0
static int picoxcell_trng_probe(struct platform_device *pdev)
{
    int ret;
    struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

    if (!mem) {
        dev_warn(&pdev->dev, "no memory resource\n");
        return -ENOMEM;
    }

    if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
                                 "picoxcell_trng")) {
        dev_warn(&pdev->dev, "unable to request io mem\n");
        return -EBUSY;
    }

    rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
    if (!rng_base) {
        dev_warn(&pdev->dev, "unable to remap io mem\n");
        return -ENOMEM;
    }

    rng_clk = clk_get(&pdev->dev, NULL);
    if (IS_ERR(rng_clk)) {
        dev_warn(&pdev->dev, "no clk\n");
        return PTR_ERR(rng_clk);
    }

    ret = clk_enable(rng_clk);
    if (ret) {
        dev_warn(&pdev->dev, "unable to enable clk\n");
        goto err_enable;
    }

    picoxcell_trng_start();
    ret = hwrng_register(&picoxcell_trng);
    if (ret)
        goto err_register;

    rng_dev = &pdev->dev;
    dev_info(&pdev->dev, "pixoxcell random number generator active\n");

    return 0;

err_register:
    clk_disable(rng_clk);
err_enable:
    clk_put(rng_clk);

    return ret;
}
Esempio n. 23
0
static int gpio_apu2_probe (struct platform_device *dev)
{
	int ret = 0;
	int i;
	struct pci_dev *pci_dev = NULL;

	/* Match the PCI device */
	for_each_pci_dev (pci_dev) {
		if (pci_match_id (gpio_apu2_pci_tbl, pci_dev) != NULL) {
			gpio_apu2_pci = pci_dev;
			break;
		}
	}

	if (!gpio_apu2_pci)
		return -ENODEV;

	pr_info ("%s: PCI Revision ID: 0x%x\n", DEVNAME, gpio_apu2_pci->revision);

	/* Determine type of southbridge chipset */
	if (gpio_apu2_pci->revision < 0x40) {
		return -EACCES;
	}

	/* Request memory region for GPIO's */
	if (!devm_request_mem_region (&dev->dev, FCH_GPIO_BASE,
		FCH_GPIO_SIZE, DEVNAME)){
		pr_err ("%s: request GPIO mem region failed\n", DEVNAME);
		return -ENXIO;
	}

	/* Map IO's for GPIO's */
	for (i = 0; i < APU_NUM_GPIO; i++) {
		gpio_addr[i] = devm_ioremap (&dev->dev,
			FCH_GPIO_BASE + (gpio_offset[i] * sizeof (u32)), sizeof (u32));
		if (!gpio_addr[i]) {
			pr_err ("%s: map GPIO%d address failed\n", DEVNAME, gpio_offset[i]);
			return -ENXIO;
		}
	}

	gpio_apu2_chip.dev = &dev->dev;
	ret = gpiochip_add (&gpio_apu2_chip);
	if (ret) {
		pr_err ("%s: adding gpiochip failed\n", DEVNAME);
	}

	return ret;
}
Esempio n. 24
0
static int __devinit stm_gpio_irqmux_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct stm_plat_pio_irqmux_data *plat_data = dev->platform_data;
	struct stm_gpio_irqmux *irqmux;
	struct resource *memory;
	int irq;
	int port_no;

	BUG_ON(!plat_data);

	irqmux = devm_kzalloc(dev, sizeof(*irqmux), GFP_KERNEL);
	if (!irqmux)
		return -ENOMEM;

	memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (!memory || irq < 0)
		return -EINVAL;

	if (!devm_request_mem_region(dev, memory->start,
			memory->end - memory->start + 1, pdev->name))
		return -EBUSY;

	irqmux->base = devm_ioremap_nocache(dev, memory->start,
			memory->end - memory->start + 1);
	if (!irqmux->base)
		return -ENOMEM;

	irqmux->port_first = plat_data->port_first;

	set_irq_chained_handler(irq, stm_gpio_irqmux_handler);
	set_irq_data(irq, irqmux);

	for (port_no = irqmux->port_first;
			port_no < irqmux->port_first + plat_data->ports_num;
			port_no++) {
		BUG_ON(port_no >= stm_gpio_num);

		if (stm_gpio_irq_init(port_no) != 0) {
			printk(KERN_ERR "stm_gpio: Failed to init gpio "
					"interrupt for port %d!\n", port_no);
			return -EINVAL;
		}
	}

	return 0;
}
Esempio n. 25
0
static int octeon_gpio_probe(struct platform_device *pdev)
{
	struct octeon_gpio *gpio;
	struct gpio_chip *chip;
	struct resource *res_mem;
	int err = 0;

	gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
	if (!gpio)
		return -ENOMEM;
	chip = &gpio->chip;

	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_mem == NULL) {
		dev_err(&pdev->dev, "found no memory resource\n");
		err = -ENXIO;
		goto out;
	}
	if (!devm_request_mem_region(&pdev->dev, res_mem->start,
					resource_size(res_mem),
				     res_mem->name)) {
		dev_err(&pdev->dev, "request_mem_region failed\n");
		err = -ENXIO;
		goto out;
	}
	gpio->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
						resource_size(res_mem));

	pdev->dev.platform_data = chip;
	chip->label = "octeon-gpio";
	chip->parent = &pdev->dev;
	chip->owner = THIS_MODULE;
	chip->base = 0;
	chip->can_sleep = false;
	chip->ngpio = 20;
	chip->direction_input = octeon_gpio_dir_in;
	chip->get = octeon_gpio_get;
	chip->direction_output = octeon_gpio_dir_out;
	chip->set = octeon_gpio_set;
	err = gpiochip_add_data(chip, gpio);
	if (err)
		goto out;

	dev_info(&pdev->dev, "OCTEON GPIO driver probed.\n");
out:
	return err;
}
Esempio n. 26
0
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct resource *r;
	u32 base;

	/* Almost universally we can find the Graphics Base of Stolen Memory
	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
	 * machines this is also mirrored in the bridge device at different
	 * locations, or in the MCHBAR. On gen2, the layout is again slightly
	 * different with the Graphics Segment immediately following Top of
	 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
	 * reported by 865g, so we just use the top of memory as determined
	 * by the e820 probe.
	 *
	 * XXX However gen2 requires an unavailable symbol.
	 */
	base = 0;
	if (INTEL_INFO(dev)->gen >= 3) {
		/* Read Graphics Base of Stolen Memory directly */
		pci_read_config_dword(dev->pdev, 0x5c, &base);
		base &= ~((1<<20) - 1);
	} else { /* GEN2 */
#if 0
		/* Stolen is immediately above Top of Memory */
		base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
	}

	if (base == 0)
		return 0;

	/* Verify that nothing else uses this physical address. Stolen
	 * memory should be reserved by the BIOS and hidden from the
	 * kernel. So if the region is already marked as busy, something
	 * is seriously wrong.
	 */
	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
				    "Graphics Stolen Memory");
	if (r == NULL) {
		DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
			  base, base + (uint32_t)dev_priv->gtt.stolen_size);
		base = 0;
	}

	return base;
}
Esempio n. 27
0
static void __iomem *request_and_map(struct device *dev,
				     const struct resource *res)
{
	void __iomem *ptr;

	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				     "denali-dt")) {
		dev_err(dev, "unable to request %s\n", res->name);
		return NULL;
	}

	ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
	if (!res)
		dev_err(dev, "ioremap_nocache of %s failed!", res->name);

	return ptr;
}
Esempio n. 28
0
int dpa_bp_shared_port_seed(struct dpa_bp *bp)
{
	/* In MAC-less and Shared-MAC scenarios the physical
	 * address of the buffer pool in device tree is set
	 * to 0 to specify that another entity (USDPAA) will
	 * allocate and seed the buffers
	 */
	if (!bp->paddr)
		return 0;

	/* allocate memory region for buffers */
	devm_request_mem_region(bp->dev, bp->paddr,
			bp->size * bp->config_count, KBUILD_MODNAME);
	bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr,
			bp->size * bp->config_count, 0);
	if (bp->vaddr == NULL) {
		pr_err("Could not map memory for pool %d\n", bp->bpid);
		return -EIO;
	}

	/* seed pool with buffers from that memory region */
	if (bp->seed_pool) {
		int count = bp->target_count;
		size_t addr = bp->paddr;

		while (count) {
			struct bm_buffer bufs[8];
			int num_bufs = 0;

			do {
				BUG_ON(addr > 0xffffffffffffull);
				bufs[num_bufs].bpid = bp->bpid;
				bm_buffer_set64(&bufs[num_bufs++], addr);
				addr += bp->size;

			} while (--count && (num_bufs < 8));

			while (bman_release(bp->pool, bufs, num_bufs, 0))
				cpu_relax();
		}
	}

	return 0;
}
Esempio n. 29
0
static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
{
	int ret;
	struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!mem)
		return -EINVAL;

	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
				     "dw_wdt"))
		return -ENOMEM;

	dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
	if (!dw_wdt.regs)
		return -ENOMEM;

	dw_wdt.clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(dw_wdt.clk))
		return PTR_ERR(dw_wdt.clk);

	ret = clk_enable(dw_wdt.clk);
	if (ret)
		goto out_put_clk;

	spin_lock_init(&dw_wdt.lock);

	ret = misc_register(&dw_wdt_miscdev);
	if (ret)
		goto out_disable_clk;

	dw_wdt_set_next_heartbeat();
	setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
	mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);

	return 0;

out_disable_clk:
	clk_disable(dw_wdt.clk);
out_put_clk:
	clk_put(dw_wdt.clk);

	return ret;
}
Esempio n. 30
0
static int ep93xx_wdt_probe(struct platform_device *pdev)
{
    struct resource *res;
    unsigned long val;
    int err;

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!res)
        return -ENXIO;

    if (!devm_request_mem_region(&pdev->dev, res->start,
                                 resource_size(res), pdev->name))
        return -EBUSY;

    mmio_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
    if (!mmio_base)
        return -ENXIO;

    if (timeout < 1 || timeout > 3600) {
        timeout = WDT_TIMEOUT;
        dev_warn(&pdev->dev,
                 "timeout value must be 1<=x<=3600, using %d\n",
                 timeout);
    }

    val = readl(mmio_base + EP93XX_WATCHDOG);
    ep93xx_wdt_wdd.bootstatus = (val & 0x01) ? WDIOF_CARDRESET : 0;
    ep93xx_wdt_wdd.timeout = timeout;

    watchdog_set_nowayout(&ep93xx_wdt_wdd, nowayout);

    setup_timer(&timer, ep93xx_wdt_timer_ping, 1);

    err = watchdog_register_device(&ep93xx_wdt_wdd);
    if (err)
        return err;

    dev_info(&pdev->dev,
             "EP93XX watchdog, driver version " WDT_VERSION "%s\n",
             (val & 0x08) ? " (nCS1 disable detected)" : "");

    return 0;
}