static void qe_muram_init(void) { struct device_node *np; const u32 *address; u64 size; unsigned int flags; /* initialize the info header */ rh_init(&qe_muram_info, 1, sizeof(qe_boot_muram_rh_block) / sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block); /* Attach the usable muram area */ /* XXX: This is a subset of the available muram. It * varies with the processor and the microcode patches activated. */ np = of_find_compatible_node(NULL, NULL, "fsl,qe-muram-data"); if (!np) { np = of_find_node_by_name(NULL, "data-only"); if (!np) { WARN_ON(1); return; } } address = of_get_address(np, 0, &size, &flags); WARN_ON(!address); of_node_put(np); if (address) rh_attach_region(&qe_muram_info, *address, (int)size); }
int cpm_muram_init(void) { struct device_node *np; struct resource r; u32 zero[OF_MAX_ADDR_CELLS] = {}; resource_size_t max = 0; int i = 0; int ret = 0; if (muram_pbase) return 0; spin_lock_init(&cpm_muram_lock); /* initialize the info header */ rh_init(&cpm_muram_info, 1, sizeof(cpm_boot_muram_rh_block) / sizeof(cpm_boot_muram_rh_block[0]), cpm_boot_muram_rh_block); np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); if (!np) { /* try legacy bindings */ np = of_find_node_by_name(NULL, "data-only"); if (!np) { printk(KERN_ERR "Cannot find CPM muram data node"); ret = -ENODEV; goto out; } } muram_pbase = of_translate_address(np, zero); if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { printk(KERN_ERR "Cannot translate zero through CPM muram node"); ret = -ENODEV; goto out; } while (of_address_to_resource(np, i++, &r) == 0) { if (r.end > max) max = r.end; rh_attach_region(&cpm_muram_info, r.start - muram_pbase, resource_size(&r)); } muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); if (!muram_vbase) { printk(KERN_ERR "Cannot map CPM muram"); ret = -ENOMEM; } out: of_node_put(np); return ret; }
static void cpm2_dpinit(void) { spin_lock_init(&cpm_dpmem_lock); /* initialize the info header */ rh_init(&cpm_dpmem_info, 1, sizeof(cpm_boot_dpmem_rh_block) / sizeof(cpm_boot_dpmem_rh_block[0]), cpm_boot_dpmem_rh_block); /* Attach the usable dpmem area */ /* XXX: This is actually crap. CPM_DATAONLY_BASE and * CPM_DATAONLY_SIZE is only a subset of the available dpram. It * varies with the processor and the microcode patches activated. * But the following should be at least safe. */ rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); }
void m8xx_cpm_dpinit(void) { cpm8xx_t *cp = &((immap_t *)IMAP_ADDR)->im_cpm; spin_lock_init(&cpm_dpmem_lock); /* Initialize the info header */ rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT, sizeof(cpm_boot_dpmem_rh_block) / sizeof(cpm_boot_dpmem_rh_block[0]), cpm_boot_dpmem_rh_block); /* * Attach the usable dpmem area. * XXX: This is actually crap. CPM_DATAONLY_BASE and * CPM_DATAONLY_SIZE are a subset of the available dparm. It varies * with the processor and the microcode patches applied / activated. * But the following should be at least safe. */ rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); }
static void qe_muram_init(void) { struct device_node *np; u32 address; u64 size; unsigned int flags; /* initialize the info header */ rh_init(&qe_muram_info, 1, sizeof(qe_boot_muram_rh_block) / sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block); /* Attach the usable muram area */ /* XXX: This is a subset of the available muram. It * varies with the processor and the microcode patches activated. */ if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) { address = *of_get_address(np, 0, &size, &flags); of_node_put(np); rh_attach_region(&qe_muram_info, address, (int) size); } }
int __init instantiate_cache_sram(struct of_device *dev, struct sram_parameters sram_params) { int ret = 0; if (cache_sram) { dev_err(&dev->dev, "Already initialized cache-sram\n"); return -EBUSY; } cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); if (!cache_sram) { dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); return -ENOMEM; } cache_sram->base_phys = sram_params.sram_offset; cache_sram->size = sram_params.sram_size; if (!request_mem_region(cache_sram->base_phys, cache_sram->size, "fsl_85xx_cache_sram")) { dev_err(&dev->dev, "%s: request memory failed\n", dev->dev.of_node->full_name); ret = -ENXIO; goto out_free; } cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); if (!cache_sram->base_virt) { dev_err(&dev->dev, "%s: ioremap_flags failed\n", dev->dev.of_node->full_name); ret = -ENOMEM; goto out_release; } cache_sram->rh = rh_create(sizeof(unsigned int)); if (IS_ERR(cache_sram->rh)) { dev_err(&dev->dev, "%s: Unable to create remote heap\n", dev->dev.of_node->full_name); ret = PTR_ERR(cache_sram->rh); goto out_unmap; } rh_attach_region(cache_sram->rh, 0, cache_sram->size); spin_lock_init(&cache_sram->lock); dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", (unsigned long long)cache_sram->base_phys, cache_sram->size); return 0; out_unmap: iounmap(cache_sram->base_virt); out_release: release_mem_region(cache_sram->base_phys, cache_sram->size); out_free: kfree(cache_sram); return ret; }
int bcom_sram_init(struct device_node *sram_node, char *owner) { int rv; const u32 *regaddr_p; u64 regaddr64, size64; unsigned int psize; if (bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Already initialized !\n", owner); return -EBUSY; } bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); if (!bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't allocate internal state !\n", owner); return -ENOMEM; } regaddr_p = of_get_address(sram_node, 0, &size64, NULL); if (!regaddr_p) { printk(KERN_ERR "%s: bcom_sram_init: " "Invalid device node !\n", owner); rv = -EINVAL; goto error_free; } regaddr64 = of_translate_address(sram_node, regaddr_p); bcom_sram->base_phys = (phys_addr_t) regaddr64; bcom_sram->size = (unsigned int) size64; if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't request region !\n", owner); rv = -EBUSY; goto error_free; } bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); if (!bcom_sram->base_virt) { printk(KERN_ERR "%s: bcom_sram_init: " "Map error SRAM zone 0x%08lx (0x%0x)!\n", owner, (long)bcom_sram->base_phys, bcom_sram->size ); rv = -ENOMEM; goto error_release; } bcom_sram->rh = rh_create(4); #if 0 reg_addr_p = of_get_property(sram_node, "available", &psize); #else regaddr_p = NULL; psize = 0; #endif if (!regaddr_p || !psize) { rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); } else { while (psize >= 2 * sizeof(u32)) { phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); regaddr_p += 2; psize -= 2 * sizeof(u32); } } spin_lock_init(&bcom_sram->lock); return 0; error_release: release_mem_region(bcom_sram->base_phys, bcom_sram->size); error_free: kfree(bcom_sram); bcom_sram = NULL; return rv; }
int bcom_sram_init(struct device_node *sram_node, char *owner) { int rv; const u32 *regaddr_p; u64 regaddr64, size64; unsigned int psize; /* Create our state struct */ if (bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Already initialized !\n", owner); return -EBUSY; } bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); if (!bcom_sram) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't allocate internal state !\n", owner); return -ENOMEM; } /* Get address and size of the sram */ regaddr_p = of_get_address(sram_node, 0, &size64, NULL); if (!regaddr_p) { printk(KERN_ERR "%s: bcom_sram_init: " "Invalid device node !\n", owner); rv = -EINVAL; goto error_free; } regaddr64 = of_translate_address(sram_node, regaddr_p); bcom_sram->base_phys = (phys_addr_t) regaddr64; bcom_sram->size = (unsigned int) size64; /* Request region */ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { printk(KERN_ERR "%s: bcom_sram_init: " "Couldn't request region !\n", owner); rv = -EBUSY; goto error_free; } /* Map SRAM */ /* sram is not really __iomem */ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); if (!bcom_sram->base_virt) { printk(KERN_ERR "%s: bcom_sram_init: " "Map error SRAM zone 0x%08lx (0x%0x)!\n", owner, (long)bcom_sram->base_phys, bcom_sram->size ); rv = -ENOMEM; goto error_release; } /* Create an rheap (defaults to 32 bits word alignment) */ bcom_sram->rh = rh_create(4); /* Attach the free zones */ #if 0 /* Currently disabled ... for future use only */ reg_addr_p = of_get_property(sram_node, "available", &psize); #else regaddr_p = NULL; psize = 0; #endif if (!regaddr_p || !psize) { /* Attach the whole zone */ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); } else { /* Attach each zone independently */ while (psize >= 2 * sizeof(u32)) { phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); regaddr_p += 2; psize -= 2 * sizeof(u32); } } /* Init our spinlock */ spin_lock_init(&bcom_sram->lock); return 0; error_release: release_mem_region(bcom_sram->base_phys, bcom_sram->size); error_free: kfree(bcom_sram); bcom_sram = NULL; return rv; }
static int starlet_ioh_init(struct starlet_ioh *ioh, struct resource *mem) { size_t size = mem->end - mem->start + 1; rh_info_t *rheap; int error = -ENOMEM; ioh->base = ioremap_prot(mem->start, size, _PAGE_GUARDED); if (!ioh->base) { drv_printk(KERN_ERR, "unable to ioremap ioh area\n"); goto err; } ioh->base_phys = mem->start; ioh->size = size; { void *first = NULL, *last = NULL; u32 *p; p = ioh->base + size; do { p--; *p = 0xdeadbabe; } while (p != ioh->base); __dma_sync(ioh->base, size, DMA_TO_DEVICE); p = ioh->base + size; do { p--; if (*p != 0xdeadbabe) { if (!last) last = p; first = p; } } while (p != ioh->base); if (first) drv_printk(KERN_INFO, "unreliable writes from" " %p to %p\n", first, last); } rheap = rh_create(STARLET_IOH_ALIGN+1); if (IS_ERR(rheap)) { error = PTR_ERR(rheap); goto err_rh_create; } ioh->rheap = rheap; error = rh_attach_region(rheap, 0, size); if (error) goto err_rh_attach_region; spin_lock_init(&ioh->lock); drv_printk(KERN_INFO, "ioh at 0x%08lx, mapped to 0x%p, size %uk\n", ioh->base_phys, ioh->base, ioh->size / 1024); return 0; err_rh_create: iounmap(ioh->base); err_rh_attach_region: rh_destroy(ioh->rheap); err: return error; }