int __init instantiate_cache_sram(struct of_device *dev, struct sram_parameters sram_params) { int ret = 0; if (cache_sram) { dev_err(&dev->dev, "Already initialized cache-sram\n"); return -EBUSY; } cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); if (!cache_sram) { dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); return -ENOMEM; } cache_sram->base_phys = sram_params.sram_offset; cache_sram->size = sram_params.sram_size; if (!request_mem_region(cache_sram->base_phys, cache_sram->size, "fsl_85xx_cache_sram")) { dev_err(&dev->dev, "%s: request memory failed\n", dev->dev.of_node->full_name); ret = -ENXIO; goto out_free; } cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { dev_err(&dev->dev, "%s: ioremap_flags failed\n", dev->dev.of_node->full_name); ret = -ENOMEM; goto out_release; } cache_sram->rh = rh_create(sizeof(unsigned int)); if (IS_ERR(cache_sram->rh)) { dev_err(&dev->dev, "%s: Unable to create remote heap\n", dev->dev.of_node->full_name); ret = PTR_ERR(cache_sram->rh); goto out_unmap; } rh_attach_region(cache_sram->rh, 0, cache_sram->size); spin_lock_init(&cache_sram->lock); dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", (unsigned long long)cache_sram->base_phys, cache_sram->size); return 0; out_unmap: iounmap(cache_sram->base_virt); out_release: release_mem_region(cache_sram->base_phys, cache_sram->size); out_free: kfree(cache_sram); return ret; }
int bcom_sram_init(struct device_node *sram_node, char *owner) { int rv; const u32 *regaddr_p; u64 regaddr64, size64; unsigned int psize; /* Create our state struct */ if (bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Already initialized !\n", owner); return -EBUSY; } bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); if (!bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't allocate internal state !\n", owner); return -ENOMEM; } /* Get address and size of the sram */ regaddr_p = of_get_address(sram_node, 0, &size64, NULL); if (!regaddr_p) { printk(KERN_ERR "%s: bcom_sram_init: " "Invalid device node !\n", owner); rv = -EINVAL; goto error_free; } regaddr64 = of_translate_address(sram_node, regaddr_p); bcom_sram->base_phys = (phys_addr_t) regaddr64; bcom_sram->size = (unsigned int) size64; /* Request region */ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't request region !\n", owner); rv = -EBUSY; goto error_free; } /* Map SRAM */ /* sram is not really __iomem */ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); if (!bcom_sram->base_virt) { printk(KERN_ERR "%s: bcom_sram_init: " "Map error SRAM zone 0x%08lx (0x%0x)!\n", owner, (long)bcom_sram->base_phys, bcom_sram->size ); rv = -ENOMEM; goto error_release; } /* Create an rheap (defaults to 32 bits word alignment) */ bcom_sram->rh = rh_create(4); /* Attach the free zones */ #if 0 /* Currently disabled ... for future use only */ reg_addr_p = of_get_property(sram_node, "available", &psize); #else regaddr_p = NULL; psize = 0; #endif if (!regaddr_p || !psize) { /* Attach the whole zone */ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); } else { /* Attach each zone independently */ while (psize >= 2 * sizeof(u32)) { phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); regaddr_p += 2; psize -= 2 * sizeof(u32); } } /* Init our spinlock */ spin_lock_init(&bcom_sram->lock); return 0; error_release: release_mem_region(bcom_sram->base_phys, bcom_sram->size); error_free: kfree(bcom_sram); bcom_sram = NULL; return rv; }
int bcom_sram_init(struct device_node *sram_node, char *owner) { int rv; const u32 *regaddr_p; u64 regaddr64, size64; unsigned int psize; if (bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Already initialized !\n", owner); return -EBUSY; } bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); if (!bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't allocate internal state !\n", owner); return -ENOMEM; } regaddr_p = of_get_address(sram_node, 0, &size64, NULL); if (!regaddr_p) { printk(KERN_ERR "%s: bcom_sram_init: " "Invalid device node !\n", owner); rv = -EINVAL; goto error_free; } regaddr64 = of_translate_address(sram_node, regaddr_p); bcom_sram->base_phys = (phys_addr_t) regaddr64; bcom_sram->size = (unsigned int) size64; if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't request region !\n", owner); rv = -EBUSY; goto error_free; } bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); if (!bcom_sram->base_virt) { printk(KERN_ERR "%s: bcom_sram_init: " "Map error SRAM zone 0x%08lx (0x%0x)!\n", owner, (long)bcom_sram->base_phys, bcom_sram->size ); rv = -ENOMEM; goto error_release; } bcom_sram->rh = rh_create(4); #if 0 reg_addr_p = of_get_property(sram_node, "available", &psize); #else regaddr_p = NULL; psize = 0; #endif if (!regaddr_p || !psize) { rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); } else { while (psize >= 2 * sizeof(u32)) { phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); regaddr_p += 2; psize -= 2 * sizeof(u32); } } spin_lock_init(&bcom_sram->lock); return 0; error_release: release_mem_region(bcom_sram->base_phys, bcom_sram->size); error_free: kfree(bcom_sram); bcom_sram = NULL; return rv; }
static int starlet_ioh_init(struct starlet_ioh *ioh, struct resource *mem) { size_t size = mem->end - mem->start + 1; rh_info_t *rheap; int error = -ENOMEM; ioh->base = ioremap_prot(mem->start, size, _PAGE_GUARDED); if (!ioh->base) { drv_printk(KERN_ERR, "unable to ioremap ioh area\n"); goto err; } ioh->base_phys = mem->start; ioh->size = size; { void *first = NULL, *last = NULL; u32 *p; p = ioh->base + size; do { p--; *p = 0xdeadbabe; } while (p != ioh->base); __dma_sync(ioh->base, size, DMA_TO_DEVICE); p = ioh->base + size; do { p--; if (*p != 0xdeadbabe) { if (!last) last = p; first = p; } } while (p != ioh->base); if (first) drv_printk(KERN_INFO, "unreliable writes from" " %p to %p\n", first, last); } rheap = rh_create(STARLET_IOH_ALIGN+1); if (IS_ERR(rheap)) { error = PTR_ERR(rheap); goto err_rh_create; } ioh->rheap = rheap; error = rh_attach_region(rheap, 0, size); if (error) goto err_rh_attach_region; spin_lock_init(&ioh->lock); drv_printk(KERN_INFO, "ioh at 0x%08lx, mapped to 0x%p, size %uk\n", ioh->base_phys, ioh->base, ioh->size / 1024); return 0; err_rh_create: iounmap(ioh->base); err_rh_attach_region: rh_destroy(ioh->rheap); err: return error; }