示例#1
0
static int __init landisk_devices_setup(void)
{
	pgprot_t prot;
	unsigned long paddrbase;
	void *cf_ide_base;

	/* open I/O area window */
	paddrbase = virt_to_phys((void *)PA_AREA5_IO);
	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
	cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
	if (!cf_ide_base) {
		printk("allocate_cf_area : can't open CF I/O window!\n");
		return -ENOMEM;
	}

	/* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */
	cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40;
	cf_ide_resources[0].end   = (unsigned long)cf_ide_base + 0x40 + 0x0f;
	cf_ide_resources[0].flags = IORESOURCE_IO;
	cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c;
	cf_ide_resources[1].end   = (unsigned long)cf_ide_base + 0x2c + 0x03;
	cf_ide_resources[1].flags = IORESOURCE_IO;
	cf_ide_resources[2].start = IRQ_FATA;
	cf_ide_resources[2].flags = IORESOURCE_IRQ;

	return platform_add_devices(landisk_devices,
				    ARRAY_SIZE(landisk_devices));
}
示例#2
0
文件: kiorpc.c 项目: 0-T-0/ps4-linux
/* Create kernel-VA-space MMIO mapping for an on-chip IO device. */
void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
			    unsigned long size)
{
	pgprot_t mmio_base, prot = { 0 };
	unsigned long pfn;
	int err;

	/* Look up the shim's lotar and base PA. */
	err = __iorpc_get_mmio_base(hv_fd, &mmio_base);
	if (err) {
		TRACE("get_mmio_base() failure: %d\n", err);
		return NULL;
	}

	/* Make sure the HV driver approves of our offset and size. */
	err = __iorpc_check_mmio_offset(hv_fd, offset, size);
	if (err) {
		TRACE("check_mmio_offset() failure: %d\n", err);
		return NULL;
	}

	/*
	 * mmio_base contains a base pfn and homing coordinates.  Turn
	 * it into an MMIO pgprot and offset pfn.
	 */
	prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base));
	pfn = pte_pfn(mmio_base) + PFN_DOWN(offset);

	return ioremap_prot(PFN_PHYS(pfn), size, prot);
}
示例#3
0
文件: setup.c 项目: CSCLOG/beaglebone
static int __init lboxre2_devices_setup(void)
{
	u32 cf0_io_base;	/* Boot CF base address */
	pgprot_t prot;
	unsigned long paddrbase, psize;

	/* open I/O area window */
	paddrbase = virt_to_phys((void*)PA_AREA5_IO);
	psize = PAGE_SIZE;
	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
	cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot));
	if (!cf0_io_base) {
		printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ );
		return -ENOMEM;
	}

	cf_ide_resources[0].start += cf0_io_base ;
	cf_ide_resources[0].end   += cf0_io_base ;
	cf_ide_resources[1].start += cf0_io_base ;
	cf_ide_resources[1].end   += cf0_io_base ;

	return platform_add_devices(lboxre2_devices,
			ARRAY_SIZE(lboxre2_devices));

}
示例#4
0
文件: spu.c 项目: 1314cc/linux
static int __init setup_areas(struct spu *spu)
{
	struct table {char* name; unsigned long addr; unsigned long size;};
	unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));

	spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr,
					   sizeof(struct spe_shadow),
					   shadow_flags);
	if (!spu_pdata(spu)->shadow) {
		pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
		LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0))));

	if (!spu->local_store) {
		pr_debug("%s:%d: ioremap local_store failed\n",
			__func__, __LINE__);
		goto fail_ioremap;
	}

	spu->problem = ioremap(spu->problem_phys,
		sizeof(struct spu_problem));

	if (!spu->problem) {
		pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
		sizeof(struct spu_priv2));

	if (!spu->priv2) {
		pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
		goto fail_ioremap;
	}

	dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
		spu->problem_phys, spu->local_store_phys,
		spu_pdata(spu)->shadow_addr);
	dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
		(unsigned long)spu->problem, (unsigned long)spu->local_store,
		(unsigned long)spu_pdata(spu)->shadow);

	return 0;

fail_ioremap:
	spu_unmap(spu);

	return -ENOMEM;
}
示例#5
0
文件: ioremap.c 项目: 0-T-0/ps4-linux
void __iomem *ioremap(unsigned long paddr, unsigned long size)
{
	unsigned long end;

	/* Don't allow wraparound or zero size */
	end = paddr + size - 1;
	if (!size || (end < paddr))
		return NULL;

	/* If the region is h/w uncached, avoid MMU mappings */
	if (paddr >= ARC_UNCACHED_ADDR_SPACE)
		return (void __iomem *)paddr;

	return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
}
示例#6
0
文件: setup.c 项目: CSCLOG/beaglebone
static int __init sh03_devices_setup(void)
{
	pgprot_t prot;
	unsigned long paddrbase;
	void *cf_ide_base;

	/* open I/O area window */
	paddrbase = virt_to_phys((void *)PA_AREA5_IO);
	prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
	cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
	if (!cf_ide_base) {
		printk("allocate_cf_area : can't open CF I/O window!\n");
		return -ENOMEM;
	}

	/* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */
	cf_ide_resources[0].start += (unsigned long)cf_ide_base;
	cf_ide_resources[0].end   += (unsigned long)cf_ide_base;
	cf_ide_resources[1].start += (unsigned long)cf_ide_base;
	cf_ide_resources[1].end   += (unsigned long)cf_ide_base;

	return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices));
}
示例#7
0
static int bman_portal_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *node = dev->of_node;
	struct bm_portal_config *pcfg;
	struct resource *addr_phys[2];
	void __iomem *va;
	int irq, cpu;

	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
	if (!pcfg)
		return -ENOMEM;

	pcfg->dev = dev;

	addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
					     DPAA_PORTAL_CE);
	if (!addr_phys[0]) {
		dev_err(dev, "Can't get %s property 'reg::CE'\n",
			node->full_name);
		return -ENXIO;
	}

	addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
					     DPAA_PORTAL_CI);
	if (!addr_phys[1]) {
		dev_err(dev, "Can't get %s property 'reg::CI'\n",
			node->full_name);
		return -ENXIO;
	}

	pcfg->cpu = -1;

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
		return -ENXIO;
	}
	pcfg->irq = irq;

	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
	if (!va) {
		dev_err(dev, "ioremap::CE failed\n");
		goto err_ioremap1;
	}

	pcfg->addr_virt[DPAA_PORTAL_CE] = va;

	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
			  _PAGE_GUARDED | _PAGE_NO_CACHE);
	if (!va) {
		dev_err(dev, "ioremap::CI failed\n");
		goto err_ioremap2;
	}

	pcfg->addr_virt[DPAA_PORTAL_CI] = va;

	spin_lock(&bman_lock);
	cpu = cpumask_next_zero(-1, &portal_cpus);
	if (cpu >= nr_cpu_ids) {
		/* unassigned portal, skip init */
		spin_unlock(&bman_lock);
		return 0;
	}

	cpumask_set_cpu(cpu, &portal_cpus);
	spin_unlock(&bman_lock);
	pcfg->cpu = cpu;

	if (!init_pcfg(pcfg)) {
		dev_err(dev, "portal init failed\n");
		goto err_portal_init;
	}

	/* clear irq affinity if assigned cpu is offline */
	if (!cpu_online(cpu))
		bman_offline_cpu(cpu);

	return 0;

err_portal_init:
	iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
	return -ENXIO;
}
示例#8
0
int __init instantiate_cache_sram(struct platform_device *dev,
		struct sram_parameters sram_params)
{
	int ret = 0;

	if (cache_sram) {
		dev_err(&dev->dev, "Already initialized cache-sram\n");
		return -EBUSY;
	}

	cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL);
	if (!cache_sram) {
		dev_err(&dev->dev, "Out of memory for cache_sram structure\n");
		return -ENOMEM;
	}

	cache_sram->base_phys = sram_params.sram_offset;
	cache_sram->size = sram_params.sram_size;

	if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
						"fsl_85xx_cache_sram")) {
		dev_err(&dev->dev, "%s: request memory failed\n",
				dev->dev.of_node->full_name);
		ret = -ENXIO;
		goto out_free;
	}

	cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
				cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
	if (!cache_sram->base_virt) {
		dev_err(&dev->dev, "%s: ioremap_prot failed\n",
				dev->dev.of_node->full_name);
		ret = -ENOMEM;
		goto out_release;
	}

	cache_sram->rh = rh_create(sizeof(unsigned int));
	if (IS_ERR(cache_sram->rh)) {
		dev_err(&dev->dev, "%s: Unable to create remote heap\n",
				dev->dev.of_node->full_name);
		ret = PTR_ERR(cache_sram->rh);
		goto out_unmap;
	}

	rh_attach_region(cache_sram->rh, 0, cache_sram->size);
	spin_lock_init(&cache_sram->lock);

	dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n",
		(unsigned long long)cache_sram->base_phys, cache_sram->size);

	return 0;

out_unmap:
	iounmap(cache_sram->base_virt);

out_release:
	release_mem_region(cache_sram->base_phys, cache_sram->size);

out_free:
	kfree(cache_sram);
	return ret;
}
示例#9
0
/**
 * axon_ram_probe - probe() method for platform driver
 * @device: see platform_driver method
 */
static int axon_ram_probe(struct platform_device *device)
{
	static int axon_ram_bank_id = -1;
	struct axon_ram_bank *bank;
	struct resource resource;
	int rc = 0;

	axon_ram_bank_id++;

	dev_info(&device->dev, "Found memory controller on %s\n",
			device->dev.of_node->full_name);

	bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
	if (bank == NULL) {
		dev_err(&device->dev, "Out of memory\n");
		rc = -ENOMEM;
		goto failed;
	}

	device->dev.platform_data = bank;

	bank->device = device;

	if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
		dev_err(&device->dev, "Cannot access device tree\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->size = resource_size(&resource);

	if (bank->size == 0) {
		dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
				AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
		rc = -ENODEV;
		goto failed;
	}

	dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);

	bank->ph_addr = resource.start;
	bank->io_addr = (unsigned long) ioremap_prot(
			bank->ph_addr, bank->size, _PAGE_NO_CACHE);
	if (bank->io_addr == 0) {
		dev_err(&device->dev, "ioremap() failed\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK);
	if (bank->disk == NULL) {
		dev_err(&device->dev, "Cannot register disk\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk->major = azfs_major;
	bank->disk->first_minor = azfs_minor;
	bank->disk->fops = &axon_ram_devops;
	bank->disk->private_data = bank;
	bank->disk->driverfs_dev = &device->dev;

	sprintf(bank->disk->disk_name, "%s%d",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id);

	bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
	if (bank->disk->queue == NULL) {
		dev_err(&device->dev, "Cannot register disk queue\n");
		rc = -EFAULT;
		goto failed;
	}

	set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
	blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
	blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
	add_disk(bank->disk);

	bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
	if (bank->irq_id == NO_IRQ) {
		dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
		rc = -EFAULT;
		goto failed;
	}

	rc = request_irq(bank->irq_id, axon_ram_irq_handler,
			AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
		bank->irq_id = NO_IRQ;
		rc = -EFAULT;
		goto failed;
	}

	rc = device_create_file(&device->dev, &dev_attr_ecc);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot create sysfs file\n");
		rc = -EFAULT;
		goto failed;
	}

	azfs_minor += bank->disk->minors;

	return 0;

failed:
	if (bank != NULL) {
		if (bank->irq_id != NO_IRQ)
			free_irq(bank->irq_id, device);
		if (bank->disk != NULL) {
			if (bank->disk->major > 0)
				unregister_blkdev(bank->disk->major,
						bank->disk->disk_name);
			del_gendisk(bank->disk);
		}
		device->dev.platform_data = NULL;
		if (bank->io_addr != 0)
			iounmap((void __iomem *) bank->io_addr);
		kfree(bank);
	}

	return rc;
}
static int starlet_ioh_init(struct starlet_ioh *ioh, struct resource *mem)
{
	size_t size = mem->end - mem->start + 1;
	rh_info_t *rheap;
	int error = -ENOMEM;

	ioh->base = ioremap_prot(mem->start, size, _PAGE_GUARDED);
	if (!ioh->base) {
		drv_printk(KERN_ERR, "unable to ioremap ioh area\n");
		goto err;
	}
	ioh->base_phys = mem->start;
	ioh->size = size;

	{
		void *first = NULL, *last = NULL;
		u32 *p;

		p = ioh->base + size;
		do {
			p--;
			*p = 0xdeadbabe;
		} while (p != ioh->base);
		__dma_sync(ioh->base, size, DMA_TO_DEVICE);

		p = ioh->base + size;
		do {
			p--;
			if (*p != 0xdeadbabe) {
				if (!last)
					last = p;
				first = p;
			}
		} while (p != ioh->base);

		if (first)
			drv_printk(KERN_INFO, "unreliable writes from"
				   " %p to %p\n", first, last);
	}

	rheap = rh_create(STARLET_IOH_ALIGN+1);
	if (IS_ERR(rheap)) {
		error = PTR_ERR(rheap);
		goto err_rh_create;
	}
	ioh->rheap = rheap;

	error = rh_attach_region(rheap, 0, size);
	if (error)
		goto err_rh_attach_region;

	spin_lock_init(&ioh->lock);

	drv_printk(KERN_INFO, "ioh at 0x%08lx, mapped to 0x%p, size %uk\n",
		   ioh->base_phys, ioh->base, ioh->size / 1024);

	return 0;

err_rh_create:
	iounmap(ioh->base);
err_rh_attach_region:
	rh_destroy(ioh->rheap);
err:
	return error;
}