static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct amba_device *dev; const void *prop; int i, ret; pr_debug("Creating amba device %s\n", node->full_name); if (!of_device_is_available(node)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) { pr_err("%s(): amba_device_alloc() failed for %s\n", __func__, node->full_name); return NULL; } /* setup generic device info */ dev->dev.coherent_dma_mask = ~0; dev->dev.of_node = of_node_get(node); dev->dev.parent = parent; dev->dev.platform_data = platform_data; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); if (prop) dev->periphid = of_read_ulong(prop, 1); /* Decode the IRQs and address ranges */ for (i = 0; i < AMBA_NR_IRQS; i++) dev->irq[i] = irq_of_parse_and_map(node, i); ret = of_address_to_resource(node, 0, &dev->res); if (ret) { pr_err("%s(): of_address_to_resource() failed (%d) for %s\n", __func__, ret, node->full_name); goto err_free; } ret = amba_device_add(dev, &iomem_resource); if (ret) { pr_err("%s(): amba_device_add() failed (%d) for %s\n", __func__, ret, node->full_name); goto err_free; } return dev; err_free: amba_device_put(dev); return NULL; }
static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct amba_device *dev; const void *prop; int i, ret; pr_debug("Creating amba device %s\n", node->full_name); if (!of_device_is_available(node)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) return NULL; dev->dev.coherent_dma_mask = ~0; dev->dev.of_node = of_node_get(node); dev->dev.parent = parent; dev->dev.platform_data = platform_data; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); dev->dma_mask = ~0; prop = of_get_property(node, "arm,primecell-periphid", NULL); if (prop) dev->periphid = of_read_ulong(prop, 1); for (i = 0; i < AMBA_NR_IRQS; i++) dev->irq[i] = irq_of_parse_and_map(node, i); ret = of_address_to_resource(node, 0, &dev->res); if (ret) goto err_free; ret = amba_device_add(dev, &iomem_resource); if (ret) goto err_free; return dev; err_free: amba_device_put(dev); return NULL; }
static void netx5152_init(struct uio_info *info) { unsigned long win0_offset = DPM_HOST_WIN0_OFFSET; struct fsl_elbc_gpcm *priv = info->priv; const void *prop; /* get an optional initial win0 offset */ prop = of_get_property(priv->dev->of_node, "netx5152,init-win0-offset", NULL); if (prop) win0_offset = of_read_ulong(prop, 1); /* disable interrupts */ iowrite32(0, info->mem[0].internal_addr + win0_offset + DPM_HOST_INT_EN0); }
/** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt: virtual address pointer to dt blob * * If a dtb was passed to the kernel, then use it to choose the correct * machine_desc and to setup the system. */ const struct machine_desc * __init setup_machine_fdt(void *dt) { const struct machine_desc *mdesc; unsigned long dt_root; const void *clk; int len; if (!early_init_dt_scan(dt)) return NULL; mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach); if (!mdesc) machine_halt(); dt_root = of_get_flat_dt_root(); clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len); if (clk) arc_set_core_freq(of_read_ulong(clk, len/4)); return mdesc; }
static int __init xtensa_dt_io_area(unsigned long node, const char *uname, int depth, void *data) { const __be32 *ranges; int len; if (depth > 1) return 0; if (!of_flat_dt_is_compatible(node, "simple-bus")) return 0; ranges = of_get_flat_dt_prop(node, "ranges", &len); if (!ranges) return 1; if (len == 0) return 1; xtensa_kio_paddr = of_read_ulong(ranges+1, 1); /* round down to nearest 256MB boundary */ xtensa_kio_paddr &= 0xf0000000; return 1; }
int dev_ion_probe(struct platform_device *pdev) { int err; int i; struct resource *res; struct device_node *of_node = pdev->dev.of_node; const void *name; int offset,size; num_heaps = 1; my_ion_heap[0].type = ION_HEAP_TYPE_SYSTEM; my_ion_heap[0].id = ION_HEAP_TYPE_SYSTEM; my_ion_heap[0].name = "vmalloc_ion"; #if 0 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); #else res = &memobj; i = find_reserve_block(of_node->name,0); if(i < 0){ name = of_get_property(of_node, "share-memory-name", NULL); if(!name) { printk("\ndev_ion memory resource undefined1.\n"); return -EFAULT; } else { i= find_reserve_block_by_name((char *)name); if(i<0) { printk("\ndev_ion memory resource undefined2.\n"); return -EFAULT; } name = of_get_property(of_node, "share-memory-offset", NULL); if(name) offset = of_read_ulong(name,1); else { printk("\ndev_ion memory resource undefined3.\n"); return -EFAULT; } name = of_get_property(of_node, "share-memory-size", NULL); if(name) size = of_read_ulong(name,1); else { printk("\ndev_ion memory resource undefined4.\n"); return -EFAULT; } res->start = (phys_addr_t)get_reserve_block_addr(i)+offset; res->end = res->start+ size-1; } } else { res->start = (phys_addr_t)get_reserve_block_addr(i); res->end = res->start+ (phys_addr_t)get_reserve_block_size(i)-1; } #endif if (res) { num_heaps = 2; my_ion_heap[1].type = ION_HEAP_TYPE_CARVEOUT;//ION_HEAP_TYPE_CHUNK;//ION_HEAP_TYPE_CARVEOUT; my_ion_heap[1].id = ION_HEAP_TYPE_CARVEOUT; my_ion_heap[1].name = "carveout_ion"; my_ion_heap[1].base = (ion_phys_addr_t) res->start; my_ion_heap[1].size = res->end - res->start + 1; } heaps = kzalloc(sizeof(struct ion_heap *) * num_heaps, GFP_KERNEL); idev = ion_device_create(NULL); if (IS_ERR_OR_NULL(idev)) { kfree(heaps); panic(0); return PTR_ERR(idev); } /* create the heaps as specified in the board file */ for (i = 0; i < num_heaps; i++) { heaps[i] = ion_heap_create(&my_ion_heap[i]); if (IS_ERR_OR_NULL(heaps[i])) { err = PTR_ERR(heaps[i]); goto err; } ion_device_add_heap(idev, heaps[i]); dprintk(2, "add heap type:%d id:%d\n", my_ion_heap[i].type, my_ion_heap[i].id); } platform_set_drvdata(pdev, idev); return 0; err: for (i = 0; i < num_heaps; i++) { if (heaps[i]) ion_heap_destroy(heaps[i]); } kfree(heaps); panic(0); return err; }
/* Scan the Firmware Assisted dump configuration details. */ int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data) { const __be32 *sections; int i, num_sections; int size; const __be32 *token; if (depth != 1 || strcmp(uname, "rtas") != 0) return 0; /* * Check if Firmware Assisted dump is supported. if yes, check * if dump has been initiated on last reboot. */ token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); if (!token) return 1; fw_dump.fadump_supported = 1; fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token); /* * The 'ibm,kernel-dump' rtas node is present only if there is * dump data waiting for us. */ fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL); if (fdm_active) fw_dump.dump_active = 1; /* Get the sizes required to store dump data for the firmware provided * dump sections. * For each dump section type supported, a 32bit cell which defines * the ID of a supported section followed by two 32 bit cells which * gives teh size of the section in bytes. */ sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes", &size); if (!sections) return 1; num_sections = size / (3 * sizeof(u32)); for (i = 0; i < num_sections; i++, sections += 3) { u32 type = (u32)of_read_number(sections, 1); switch (type) { case FADUMP_CPU_STATE_DATA: fw_dump.cpu_state_data_size = of_read_ulong(§ions[1], 2); break; case FADUMP_HPTE_REGION: fw_dump.hpte_region_size = of_read_ulong(§ions[1], 2); break; } } return 1; }
int caam_secvio_startup(struct platform_device *pdev) { struct device *ctrldev, *svdev; struct caam_drv_private *ctrlpriv; struct caam_drv_private_secvio *svpriv; struct platform_device *svpdev; struct device_node *np; const void *prop; int i, error, secvio_inten_src; ctrldev = &pdev->dev; ctrlpriv = dev_get_drvdata(ctrldev); /* * Set up the private block for secure memory * Only one instance is possible */ svpriv = kzalloc(sizeof(struct caam_drv_private_secvio), GFP_KERNEL); if (svpriv == NULL) { dev_err(ctrldev, "can't alloc private mem for secvio\n"); return -ENOMEM; } svpriv->parentdev = ctrldev; /* Create the security violation dev */ #ifdef CONFIG_OF np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio"); if (!np) return -ENODEV; ctrlpriv->secvio_irq = of_irq_to_resource(np, 0, NULL); prop = of_get_property(np, "secvio_src", NULL); if (prop) secvio_inten_src = of_read_ulong(prop, 1); else secvio_inten_src = HP_SECVIO_INTEN_ALL; printk(KERN_ERR "secvio_inten_src = %x\n", secvio_inten_src); svpdev = of_platform_device_create(np, NULL, ctrldev); if (!svpdev) return -ENODEV; #else svpdev = platform_device_register_data(ctrldev, "caam_secvio", 0, svpriv, sizeof(struct caam_drv_private_secvio)); secvio_inten_src = HP_SECVIO_INTEN_ALL; #endif if (svpdev == NULL) { kfree(svpriv); return -EINVAL; } svdev = &svpdev->dev; dev_set_drvdata(svdev, svpriv); ctrlpriv->secviodev = svdev; svpriv->svregs = ctrlpriv->snvs; /* * Now we have all the dev data set up. Init interrupt * source descriptions */ for (i = 0; i < MAX_SECVIO_SOURCES; i++) { svpriv->intsrc[i].intname = violation_src_name[i]; svpriv->intsrc[i].handler = caam_secvio_default; } /* Connect main handler */ for_each_possible_cpu(i) tasklet_init(&svpriv->irqtask[i], caam_secvio_dispatch, (unsigned long)svdev); error = request_irq(ctrlpriv->secvio_irq, caam_secvio_interrupt, IRQF_SHARED, "caam_secvio", svdev); if (error) { dev_err(svdev, "can't connect secvio interrupt\n"); irq_dispose_mapping(ctrlpriv->secvio_irq); ctrlpriv->secvio_irq = 0; return -EINVAL; } /* Enable all sources */ wr_reg32(&svpriv->svregs->hp.secvio_int_ctl, secvio_inten_src); dev_info(svdev, "security violation service handlers armed\n"); return 0; }
/** * of_lm_device_create - Alloc, initialize and register an of_device * @np: pointer to node to create device for * @bus_id: name to assign device * @parent: Linux device model parent device. * * Returns pointer to created lm device, or NULL if a device was not * registered. Unavailable devices will not get registered. */ static struct lm_device *of_lm_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct lm_device *dev; const void *prop; // struct resource *res, temp_res; //int ret,id,irq; int ret,id; pr_debug("Creating of lm device %s\n", node->full_name); if (!of_device_is_available(node)) return NULL; /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "lm-periph-id", NULL); if (prop) id = of_read_ulong(prop, 1); else id = -1; // root dev dev = kzalloc(sizeof(struct lm_device),GFP_KERNEL); if (!dev){ printk(KERN_ERR "out of memory to alloc lm device\n"); return NULL; } /* prop = of_get_property(node, "irq", NULL); if (prop) irq = of_read_ulong(prop, 1); else{ irq = 0; } printk(KERN_ERR " --- irq: %d\n",irq); */ /* setup generic device info */ dev->id = id; // dev->irq = irq; dev->dev.coherent_dma_mask = ~0; dev->dev.of_node = of_node_get(node); dev->dev.parent = parent; dev->dev.platform_data = platform_data; dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if(id >= 0) dev_set_name(&dev->dev,"lm%d", dev->id); else dev_set_name(&dev->dev,"lm-root"); /* setup lm-specific device info */ dev->dma_mask_room = DMA_BIT_MASK(32); // ret = of_address_to_resource(node, 0, &dev->resource); // if (ret) // goto err_free; ret = lm_device_register(dev); if (ret) goto err_free; return dev; err_free: put_device(&dev->dev); return NULL; }