static void get_cells(struct device_node *dp, int *addrc, int *sizec)
{
	if (addrc)
		*addrc = of_n_addr_cells(dp);
	if (sizec)
		*sizec = of_n_size_cells(dp);
}
Exemple #2
0
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
		unsigned long *busno, unsigned long *phys, unsigned long *size)
{
	const u32 *dma_window;
	u32 cells;
	const unsigned char *prop;

	dma_window = dma_window_prop;

	/* busno is always one cell */
	*busno = *(dma_window++);

	prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
	if (!prop)
		prop = of_get_property(dn, "#address-cells", NULL);

	cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
	*phys = of_read_number(dma_window, cells);

	dma_window += cells;

	prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
	cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
	*size = of_read_number(dma_window, cells);
}
Exemple #3
0
static void of_bus_default_count_cells(struct device_node *dev,
				       int *addrc, int *sizec)
{
	if (addrc)
		*addrc = of_n_addr_cells(dev);
	if (sizec)
		*sizec = of_n_size_cells(dev);
}
static void hwadp_dts_register_base_addr(void)
{
	struct device_node *node = NULL, *np = NULL;
	int retval = 0;
	const char * nmi_node = "hisilicon,mdrv_nmi_interrupt_regs";
	unsigned int nmi_regs_val[NMI_INT_CORE_NUM] = {0,};

	node = of_find_compatible_node(NULL, NULL, "hisilicon,hardware_adapt");
	if (!node) {
		hwadp_printf("dts node not found!\n");
		return;
	}

	for_each_available_child_of_node(node, np)
	{
		unsigned int ip_type = ~0U, int_type = ~0U, irq_num = ~0U;
		int na = 0, ns = 0, len = 0;
		unsigned long base_addr = 0;
		const __be32 *prop = NULL;

		if(!np->name || !strlen(np->name))
			continue;

		/* try to obtain ip base address */
		na = of_n_addr_cells(np);
		ns = of_n_size_cells(np);
		prop = of_get_property(np, "reg", &len);
		retval = of_property_read_u32(np, "ip_type", &ip_type);
		if(prop && (len == (na + ns) * (int)sizeof(*prop)))
		{
			base_addr = (unsigned long)of_read_number(prop, na);
			if(base_addr && !(base_addr & ((0x1U << 12) - 1)))
				(void)of_iomap(np, 0);
			else
				hwadp_printf("hwad:reg addr = 0x%X Address of zero or that ones not aligned for page are not map!\n",base_addr);
			if(retval)
				ip_type = ~0U;
			retval = bsp_hwadp_register_base_addr(ip_type, (const char *)np->name, (void *)base_addr);
			if(retval)
				hwadp_printf("hwadp:failed to register base addr!ip_type=%d base_addr=%p name=%s\n",(int)ip_type, (void*)base_addr,np->name);
			continue;
		}

		/* try to obtain irq number */
		retval = of_property_read_u32_index(np, "interrupts", 1, &irq_num);
		if(retval)
			continue;

		retval = of_property_read_u32(np, "int_type", &int_type);
		if(retval)
			int_type = ~0U;

		if(bsp_hwadp_register_irq_num(int_type, (const char *)np->name, irq_num))
			hwadp_printf("hwadp:failed to register irq number!int_type=%d irq_num=%d name=%s\n", (int)int_type, irq_num, np->name);
	}
Exemple #5
0
static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
{
	struct device_node *memory = NULL;

	memory = of_find_node_by_type(memory, "memory");
	if (!memory)
		panic("numa.c: No memory nodes found!");

	*n_addr_cells = of_n_addr_cells(memory);
	*n_size_cells = of_n_size_cells(memory);
	of_node_put(memory);
}
Exemple #6
0
/**
 * of_get_dma_window - Parse *dma-window property and returns 0 if found.
 *
 * @dn: device node
 * @prefix: prefix for property name if any
 * @index: index to start to parse
 * @busno: Returns busno if supported. Otherwise pass NULL
 * @addr: Returns address that DMA starts
 * @size: Returns the range that DMA can handle
 *
 * This supports different formats flexibly. "prefix" can be
 * configured if any. "busno" and "index" are optionally
 * specified. Set 0(or NULL) if not used.
 */
int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
		      unsigned long *busno, dma_addr_t *addr, size_t *size)
{
	const __be32 *dma_window, *end;
	int bytes, cur_index = 0;
	char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];

	if (!dn || !addr || !size)
		return -EINVAL;

	if (!prefix)
		prefix = "";

	snprintf(propname, sizeof(propname), "%sdma-window", prefix);
	snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
	snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);

	dma_window = of_get_property(dn, propname, &bytes);
	if (!dma_window)
		return -ENODEV;
	end = dma_window + bytes / sizeof(*dma_window);

	while (dma_window < end) {
		u32 cells;
		const void *prop;

		/* busno is one cell if supported */
		if (busno)
			*busno = be32_to_cpup(dma_window++);

		prop = of_get_property(dn, addrname, NULL);
		if (!prop)
			prop = of_get_property(dn, "#address-cells", NULL);

		cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
		if (!cells)
			return -EINVAL;
		*addr = of_read_number(dma_window, cells);
		dma_window += cells;

		prop = of_get_property(dn, sizename, NULL);
		cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
		if (!cells)
			return -EINVAL;
		*size = of_read_number(dma_window, cells);
		dma_window += cells;

		if (cur_index++ == index)
			break;
	}
	return 0;
}
Exemple #7
0
static int parse_mc_ranges(struct device *dev,
			   int *paddr_cells,
			   int *mc_addr_cells,
			   int *mc_size_cells,
			   const __be32 **ranges_start,
			   u8 *num_ranges)
{
	const __be32 *prop;
	int range_tuple_cell_count;
	int ranges_len;
	int tuple_len;
	struct device_node *mc_node = dev->of_node;

	*ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
	if (!(*ranges_start) || !ranges_len) {
		dev_warn(dev,
			 "missing or empty ranges property for device tree node '%s'\n",
			 mc_node->name);

		*num_ranges = 0;
		return 0;
	}

	*paddr_cells = of_n_addr_cells(mc_node);

	prop = of_get_property(mc_node, "#address-cells", NULL);
	if (prop)
		*mc_addr_cells = be32_to_cpup(prop);
	else
		*mc_addr_cells = *paddr_cells;

	prop = of_get_property(mc_node, "#size-cells", NULL);
	if (prop)
		*mc_size_cells = be32_to_cpup(prop);
	else
		*mc_size_cells = of_n_size_cells(mc_node);

	range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
				 *mc_size_cells;

	tuple_len = range_tuple_cell_count * sizeof(__be32);
	if (ranges_len % tuple_len != 0) {
		dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
		return -EINVAL;
	}

	*num_ranges = ranges_len / tuple_len;
	return 0;
}
Exemple #8
0
static int read_phys_addr(struct device_node *np, char *prop_name,
			struct cxl_afu *afu)
{
	int i, len, entry_size, naddr, nsize, type;
	u64 addr, size;
	const __be32 *prop;

	naddr = of_n_addr_cells(np);
	nsize = of_n_size_cells(np);

	prop = of_get_property(np, prop_name, &len);
	if (prop) {
		entry_size = naddr + nsize;
		for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
			type = be32_to_cpu(prop[0]);
			addr = of_read_number(prop, naddr);
			size = of_read_number(&prop[naddr], nsize);
			switch (type) {
			case 0: /* unit address */
				afu->guest->handle = addr;
				break;
			case 1: /* p2 area */
				afu->guest->p2n_phys += addr;
				afu->guest->p2n_size = size;
				break;
			case 2: /* problem state area */
				afu->psn_phys += addr;
				afu->adapter->ps_size = size;
				break;
			default:
				pr_err("Invalid address type %d found in %s property of AFU\n",
					type, prop_name);
				return -EINVAL;
			}
			if (cxl_verbose)
				pr_info("%s: %#x %#llx (size %#llx)\n",
					prop_name, type, addr, size);
		}
	}
	return 0;
}
static int mnemosyne_parse_dt(struct device_node *node)
{
	phys_addr_t phys = 0, size = 0;
	struct device_node *pnode = NULL;
	int ret;

	pr_info("%s: init from device tree.\n", MNEMOSYNE_MODULE_NAME);

	if (node == NULL) {
		pr_err("%s: Can't find device_node", MNEMOSYNE_MODULE_NAME);
		ret = -ENODEV;
		goto PARSE_DT_ERR_OUT;
	}

	pnode = of_parse_phandle(node, "linux,contiguous-region", 0);
	if (!pnode) {
		pr_err("%s: mnemosyne memory is not reserved.\n", MNEMOSYNE_MODULE_NAME);
	} else {
		const __be32 *prop;
		int addr_cells, size_cells;
		prop = of_get_address(pnode, 0, NULL, NULL);
		if (!prop) {
			pr_err("%s: Can't find register property", MNEMOSYNE_MODULE_NAME);
			ret = -ENODEV;
			of_node_put(pnode);
			goto PARSE_DT_ERR_OUT;
		}
		of_node_put(pnode);

		addr_cells = of_n_addr_cells(pnode);
		size_cells = of_n_size_cells(pnode);

		phys = of_read_number(prop, addr_cells);
		size = of_read_number(prop + addr_cells, size_cells);
	}

	ret = mnemosyne_setup(phys, size);

PARSE_DT_ERR_OUT:
	return ret;
}
Exemple #10
0
static int __devinit of_flash_probe(struct platform_device *dev,
				    const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
	const char **part_probe_types;
#endif
	struct device_node *dp = dev->dev.of_node;
	struct resource res;
	struct of_flash *info;
	const char *probe_type = match->data;
	const u32 *width;
	int err;
	int i;
	int count;
	const u32 *p;
	int reg_tuple_size;
	struct mtd_info **mtd_list = NULL;
	resource_size_t res_size;

	reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);

	/*
	 * Get number of "reg" tuples. Scan for MTD devices on area's
	 * described by each "reg" region. This makes it possible (including
	 * the concat support) to support the Intel P30 48F4400 chips which
	 * consists internally of 2 non-identical NOR chips on one die.
	 */
	p = of_get_property(dp, "reg", &count);
	if (count % reg_tuple_size != 0) {
		dev_err(&dev->dev, "Malformed reg property on %s\n",
				dev->dev.of_node->full_name);
		err = -EINVAL;
		goto err_flash_remove;
	}
	count /= reg_tuple_size;

	err = -ENOMEM;
	info = kzalloc(sizeof(struct of_flash) +
		       sizeof(struct of_flash_list) * count, GFP_KERNEL);
	if (!info)
		goto err_flash_remove;

	dev_set_drvdata(&dev->dev, info);

	mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
	if (!mtd_list)
		goto err_flash_remove;

	for (i = 0; i < count; i++) {
		err = -ENXIO;
		if (of_address_to_resource(dp, i, &res)) {
			dev_err(&dev->dev, "Can't get IO address from device"
				" tree\n");
			goto err_out;
		}

		dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
			(unsigned long long)res.start,
			(unsigned long long)res.end);

		err = -EBUSY;
		res_size = resource_size(&res);
		info->list[i].res = request_mem_region(res.start, res_size,
						       dev_name(&dev->dev));
		if (!info->list[i].res)
			goto err_out;

		err = -ENXIO;
		width = of_get_property(dp, "bank-width", NULL);
		if (!width) {
			dev_err(&dev->dev, "Can't get bank width from device"
				" tree\n");
			goto err_out;
		}

		info->list[i].map.name = dev_name(&dev->dev);
		info->list[i].map.phys = res.start;
		info->list[i].map.size = res_size;
		info->list[i].map.bankwidth = *width;

		err = -ENOMEM;
		info->list[i].map.virt = ioremap(info->list[i].map.phys,
						 info->list[i].map.size);
		if (!info->list[i].map.virt) {
			dev_err(&dev->dev, "Failed to ioremap() flash"
				" region\n");
			goto err_out;
		}

		simple_map_init(&info->list[i].map);

		if (probe_type) {
			info->list[i].mtd = do_map_probe(probe_type,
							 &info->list[i].map);
		} else {
			info->list[i].mtd = obsolete_probe(dev,
							   &info->list[i].map);
		}
		mtd_list[i] = info->list[i].mtd;

		err = -ENXIO;
		if (!info->list[i].mtd) {
			dev_err(&dev->dev, "do_map_probe() failed\n");
			goto err_out;
		} else {
			info->list_size++;
		}
		info->list[i].mtd->owner = THIS_MODULE;
		info->list[i].mtd->dev.parent = &dev->dev;
	}

	err = 0;
	if (info->list_size == 1) {
		info->cmtd = info->list[0].mtd;
	} else if (info->list_size > 1) {
		/*
		 * We detected multiple devices. Concatenate them together.
		 */
#ifdef CONFIG_MTD_CONCAT
		info->cmtd = mtd_concat_create(mtd_list, info->list_size,
					       dev_name(&dev->dev));
		if (info->cmtd == NULL)
			err = -ENXIO;
#else
		printk(KERN_ERR "physmap_of: multiple devices "
		       "found but MTD concat support disabled.\n");
		err = -ENXIO;
#endif
	}
	if (err)
		goto err_out;

#ifdef CONFIG_MTD_PARTITIONS
	part_probe_types = of_get_probes(dp);
	err = parse_mtd_partitions(info->cmtd, part_probe_types,
				   &info->parts, 0);
	if (err < 0) {
		of_free_probes(part_probe_types);
		goto err_out;
	}
	of_free_probes(part_probe_types);

#ifdef CONFIG_MTD_OF_PARTS
	if (err == 0) {
		err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
		if (err < 0)
			goto err_out;
	}
#endif

	if (err == 0) {
		err = parse_obsolete_partitions(dev, info, dp);
		if (err < 0)
			goto err_out;
	}

	if (err > 0)
		add_mtd_partitions(info->cmtd, info->parts, err);
	else
#endif
		add_mtd_device(info->cmtd);

	kfree(mtd_list);

	return 0;

err_out:
	kfree(mtd_list);
err_flash_remove:
	of_flash_remove(dev);

	return err;
}
Exemple #11
0
static int parse_ofpart_partitions(struct mtd_info *master,
				   struct mtd_partition **pparts,
				   struct mtd_part_parser_data *data)
{
	struct device_node *node;
	const char *partname;
	struct device_node *pp;
	int nr_parts, i;


	if (!data)
		return 0;

	node = data->of_node;
	if (!node)
		return 0;

	/* First count the subnodes */
	nr_parts = 0;
	for_each_child_of_node(node,  pp) {
		if (node_has_compatible(pp))
			continue;

		nr_parts++;
	}

	if (nr_parts == 0)
		return 0;

	*pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
	if (!*pparts)
		return -ENOMEM;

	i = 0;
	for_each_child_of_node(node,  pp) {
		const __be32 *reg;
		int len;
		int a_cells, s_cells;

		if (node_has_compatible(pp))
			continue;

		reg = of_get_property(pp, "reg", &len);
		if (!reg) {
			nr_parts--;
			continue;
		}

		a_cells = of_n_addr_cells(pp);
		s_cells = of_n_size_cells(pp);
		(*pparts)[i].offset = of_read_number(reg, a_cells);
		(*pparts)[i].size = of_read_number(reg + a_cells, s_cells);

		partname = of_get_property(pp, "label", &len);
		if (!partname)
			partname = of_get_property(pp, "name", &len);
		(*pparts)[i].name = (char *)partname;

		if (of_get_property(pp, "read-only", &len))
			(*pparts)[i].mask_flags |= MTD_WRITEABLE;

		if (of_get_property(pp, "lock", &len))
			(*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;

		i++;
	}

	if (!i) {
		of_node_put(pp);
		pr_err("No valid partition found on %s\n", node->full_name);
		kfree(*pparts);
		*pparts = NULL;
		return -EINVAL;
	}

	return nr_parts;
}
Exemple #12
0
static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
	struct platform_device *pdev_parent = of_find_device_by_node(np);
	struct platform_device_info pdevinfo;
	struct of_phandle_args out_irq;
	struct device_node *child;
	struct resource *res;
	const __be32 *cell;
	int ret = 0, size, i, num;
	u64 addr, addr_size;

	for_each_available_child_of_node(np, child) {
		struct resource *res_iter;
		struct platform_device *new_pdev;

		cell = of_get_property(child, "reg", &size);
		if (!cell) {
			ret = -EINVAL;
			goto out;
		}

		size /= sizeof(*cell);
		num = size /
			(of_n_addr_cells(child) + of_n_size_cells(child)) + 1;

		/* allocate a resource array */
		res = kcalloc(num, sizeof(*res), GFP_KERNEL);
		if (!res) {
			ret = -ENOMEM;
			goto out;
		}

		/* read each reg value */
		i = 0;
		res_iter = res;
		while (i < size) {
			addr = of_read_number(&cell[i],
					      of_n_addr_cells(child));
			i += of_n_addr_cells(child);

			addr_size = of_read_number(&cell[i],
						   of_n_size_cells(child));
			i += of_n_size_cells(child);

			res_iter->start = addr;
			res_iter->end = res_iter->start + addr_size - 1;
			res_iter->flags = IORESOURCE_MEM;
			res_iter++;
		}

		ret = of_irq_parse_one(child, 0, &out_irq);
		if (ret)
			goto out;

		res_iter->start = irq_create_of_mapping(&out_irq);
		res_iter->name = "hidma event irq";
		res_iter->flags = IORESOURCE_IRQ;

		memset(&pdevinfo, 0, sizeof(pdevinfo));
		pdevinfo.fwnode = &child->fwnode;
		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
		pdevinfo.name = child->name;
		pdevinfo.id = object_counter++;
		pdevinfo.res = res;
		pdevinfo.num_res = num;
		pdevinfo.data = NULL;
		pdevinfo.size_data = 0;
		pdevinfo.dma_mask = DMA_BIT_MASK(64);
		new_pdev = platform_device_register_full(&pdevinfo);
		if (IS_ERR(new_pdev)) {
			ret = PTR_ERR(new_pdev);
			goto out;
		}
		of_node_get(child);
		new_pdev->dev.of_node = child;
		of_dma_configure(&new_pdev->dev, child);
		/*
		 * It is assumed that calling of_msi_configure is safe on
		 * platforms with or without MSI support.
		 */
		of_msi_configure(&new_pdev->dev, child);
		of_node_put(child);
		kfree(res);
		res = NULL;
	}
out:
	kfree(res);

	return ret;
}
Exemple #13
0
static int parse_ofpart_partitions(struct mtd_info *master,
				   struct mtd_partition **pparts,
				   struct mtd_part_parser_data *data)
{
	struct device_node *mtd_node;
	struct device_node *ofpart_node;
	const char *partname;
	struct device_node *pp;
	int nr_parts, i, ret = 0;
	bool dedicated = true;


	if (!data)
		return 0;

	mtd_node = data->of_node;
	if (!mtd_node)
		return 0;

	ofpart_node = of_get_child_by_name(mtd_node, "partitions");
	if (!ofpart_node) {
		pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
			master->name, mtd_node->full_name);
		ofpart_node = mtd_node;
		dedicated = false;
	}

	/* First count the subnodes */
	nr_parts = 0;
	for_each_child_of_node(ofpart_node,  pp) {
		if (!dedicated && node_has_compatible(pp))
			continue;

		nr_parts++;
	}

	if (nr_parts == 0)
		return 0;

	*pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
	if (!*pparts)
		return -ENOMEM;

	i = 0;
	for_each_child_of_node(ofpart_node,  pp) {
		const __be32 *reg;
		int len;
		int a_cells, s_cells;

		if (!dedicated && node_has_compatible(pp))
			continue;

		reg = of_get_property(pp, "reg", &len);
		if (!reg) {
			if (dedicated) {
				pr_debug("%s: ofpart partition %s (%s) missing reg property.\n",
					 master->name, pp->full_name,
					 mtd_node->full_name);
				goto ofpart_fail;
			} else {
				nr_parts--;
				continue;
			}
		}

		a_cells = of_n_addr_cells(pp);
		s_cells = of_n_size_cells(pp);
		if (len / 4 != a_cells + s_cells) {
			pr_debug("%s: ofpart partition %s (%s) error parsing reg property.\n",
				 master->name, pp->full_name,
				 mtd_node->full_name);
			goto ofpart_fail;
		}

		(*pparts)[i].offset = of_read_number(reg, a_cells);
		(*pparts)[i].size = of_read_number(reg + a_cells, s_cells);

		partname = of_get_property(pp, "label", &len);
		if (!partname)
			partname = of_get_property(pp, "name", &len);
		(*pparts)[i].name = partname;

		if (of_get_property(pp, "read-only", &len))
			(*pparts)[i].mask_flags |= MTD_WRITEABLE;

		if (of_get_property(pp, "lock", &len))
			(*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;

		i++;
	}

	if (!nr_parts)
		goto ofpart_none;

	return nr_parts;

ofpart_fail:
	pr_err("%s: error parsing ofpart partition %s (%s)\n",
	       master->name, pp->full_name, mtd_node->full_name);
	ret = -EINVAL;
ofpart_none:
	of_node_put(pp);
	kfree(*pparts);
	*pparts = NULL;
	return ret;
}
Exemple #14
0
struct dpa_bp * __cold __must_check /* __attribute__((nonnull)) */
dpa_bp_probe(struct platform_device *_of_dev, size_t *count)
{
	int			 i, lenp, na, ns;
	struct device		*dev;
	struct device_node	*dev_node;
	const phandle		*phandle_prop;
	const uint32_t		*bpid;
	const uint32_t		*bpool_cfg;
	struct dpa_bp		*dpa_bp;

	dev = &_of_dev->dev;

	/* The default is one, if there's no property */
	*count = 1;

	/* Get the buffer pools to be used */
	phandle_prop = of_get_property(dev->of_node,
					"fsl,bman-buffer-pools", &lenp);

	if (phandle_prop)
		*count = lenp / sizeof(phandle);
	else {
		dev_err(dev,
			"missing fsl,bman-buffer-pools device tree entry\n");
		return ERR_PTR(-EINVAL);
	}

	dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL);
	if (unlikely(dpa_bp == NULL)) {
		dev_err(dev, "devm_kzalloc() failed\n");
		return ERR_PTR(-ENOMEM);
	}

	dev_node = of_find_node_by_path("/");
	if (unlikely(dev_node == NULL)) {
		dev_err(dev, "of_find_node_by_path(/) failed\n");
		return ERR_PTR(-EINVAL);
	}

	na = of_n_addr_cells(dev_node);
	ns = of_n_size_cells(dev_node);

	for (i = 0; i < *count && phandle_prop; i++) {
		of_node_put(dev_node);
		dev_node = of_find_node_by_phandle(phandle_prop[i]);
		if (unlikely(dev_node == NULL)) {
			dev_err(dev, "of_find_node_by_phandle() failed\n");
			return ERR_PTR(-EFAULT);
		}

		if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) {
			dev_err(dev,
				"!of_device_is_compatible(%s, fsl,bpool)\n",
				dev_node->full_name);
			dpa_bp = ERR_PTR(-EINVAL);
			goto _return_of_node_put;
		}

		bpid = of_get_property(dev_node, "fsl,bpid", &lenp);
		if ((bpid == NULL) || (lenp != sizeof(*bpid))) {
			dev_err(dev, "fsl,bpid property not found.\n");
			dpa_bp = ERR_PTR(-EINVAL);
			goto _return_of_node_put;
		}
		dpa_bp[i].bpid = *bpid;

		bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg",
					&lenp);
		if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) {
			const uint32_t *seed_pool;

			dpa_bp[i].config_count =
				(int)of_read_number(bpool_cfg, ns);
			dpa_bp[i].size	= of_read_number(bpool_cfg + ns, ns);
			dpa_bp[i].paddr	=
				of_read_number(bpool_cfg + 2 * ns, na);

			seed_pool = of_get_property(dev_node,
					"fsl,bpool-ethernet-seeds", &lenp);
			dpa_bp[i].seed_pool = !!seed_pool;

		} else {
			dev_err(dev,
				"Missing/invalid fsl,bpool-ethernet-cfg device tree entry for node %s\n",
				dev_node->full_name);
			dpa_bp = ERR_PTR(-EINVAL);
			goto _return_of_node_put;
		}
	}

	sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL);

	return dpa_bp;

_return_of_node_put:
	if (dev_node)
		of_node_put(dev_node);

	return dpa_bp;
}
Exemple #15
0
static int of_flash_probe(struct platform_device *dev)
{
	const char * const *part_probe_types;
	const struct of_device_id *match;
	struct device_node *dp = dev->dev.of_node;
	struct resource res;
	struct of_flash *info;
	const char *probe_type;
	const __be32 *width;
	int err;
	int i;
	int count;
	const __be32 *p;
	int reg_tuple_size;
	struct mtd_info **mtd_list = NULL;
	resource_size_t res_size;
	struct mtd_part_parser_data ppdata;
	bool map_indirect;
	const char *mtd_name = NULL;

	match = of_match_device(of_flash_match, &dev->dev);
	if (!match)
		return -EINVAL;
	probe_type = match->data;

	reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);

	of_property_read_string(dp, "linux,mtd-name", &mtd_name);

	/*
	 * Get number of "reg" tuples. Scan for MTD devices on area's
	 * described by each "reg" region. This makes it possible (including
	 * the concat support) to support the Intel P30 48F4400 chips which
	 * consists internally of 2 non-identical NOR chips on one die.
	 */
	p = of_get_property(dp, "reg", &count);
	if (count % reg_tuple_size != 0) {
		dev_err(&dev->dev, "Malformed reg property on %s\n",
				dev->dev.of_node->full_name);
		err = -EINVAL;
		goto err_flash_remove;
	}
	count /= reg_tuple_size;

	map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");

	err = -ENOMEM;
	info = devm_kzalloc(&dev->dev,
			    sizeof(struct of_flash) +
			    sizeof(struct of_flash_list) * count, GFP_KERNEL);
	if (!info)
		goto err_flash_remove;

	dev_set_drvdata(&dev->dev, info);

	mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
	if (!mtd_list)
		goto err_flash_remove;

	for (i = 0; i < count; i++) {
		err = -ENXIO;
		if (of_address_to_resource(dp, i, &res)) {
			/*
			 * Continue with next register tuple if this
			 * one is not mappable
			 */
			continue;
		}

		dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);

		err = -EBUSY;
		res_size = resource_size(&res);
		info->list[i].res = request_mem_region(res.start, res_size,
						       dev_name(&dev->dev));
		if (!info->list[i].res)
			goto err_out;

		err = -ENXIO;
		width = of_get_property(dp, "bank-width", NULL);
		if (!width) {
			dev_err(&dev->dev, "Can't get bank width from device"
				" tree\n");
			goto err_out;
		}

		info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
		info->list[i].map.phys = res.start;
		info->list[i].map.size = res_size;
		info->list[i].map.bankwidth = be32_to_cpup(width);
		info->list[i].map.device_node = dp;

		err = -ENOMEM;
		info->list[i].map.virt = ioremap(info->list[i].map.phys,
						 info->list[i].map.size);
		if (!info->list[i].map.virt) {
			dev_err(&dev->dev, "Failed to ioremap() flash"
				" region\n");
			goto err_out;
		}

		simple_map_init(&info->list[i].map);

		/*
		 * On some platforms (e.g. MPC5200) a direct 1:1 mapping
		 * may cause problems with JFFS2 usage, as the local bus (LPB)
		 * doesn't support unaligned accesses as implemented in the
		 * JFFS2 code via memcpy(). By setting NO_XIP, the
		 * flash will not be exposed directly to the MTD users
		 * (e.g. JFFS2) any more.
		 */
		if (map_indirect)
			info->list[i].map.phys = NO_XIP;

		if (probe_type) {
			info->list[i].mtd = do_map_probe(probe_type,
							 &info->list[i].map);
		} else {
			info->list[i].mtd = obsolete_probe(dev,
							   &info->list[i].map);
		}
		mtd_list[i] = info->list[i].mtd;

		err = -ENXIO;
		if (!info->list[i].mtd) {
			dev_err(&dev->dev, "do_map_probe() failed\n");
			goto err_out;
		} else {
			info->list_size++;
		}
		info->list[i].mtd->owner = THIS_MODULE;
		info->list[i].mtd->dev.parent = &dev->dev;
	}

	err = 0;
	info->cmtd = NULL;
	if (info->list_size == 1) {
		info->cmtd = info->list[0].mtd;
	} else if (info->list_size > 1) {
		/*
		 * We detected multiple devices. Concatenate them together.
		 */
		info->cmtd = mtd_concat_create(mtd_list, info->list_size,
					       dev_name(&dev->dev));
	}
	if (info->cmtd == NULL)
		err = -ENXIO;

	if (err)
		goto err_out;

	ppdata.of_node = dp;
	part_probe_types = of_get_probes(dp);
	mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
			NULL, 0);
	of_free_probes(part_probe_types);

	kfree(mtd_list);

	return 0;

err_out:
	kfree(mtd_list);
err_flash_remove:
	of_flash_remove(dev);

	return err;
}
Exemple #16
0
/**
 * of_dma_get_range - Get DMA range info
 * @np:		device node to get DMA range info
 * @dma_addr:	pointer to store initial DMA address of DMA range
 * @paddr:	pointer to store initial CPU address of DMA range
 * @size:	pointer to store size of DMA range
 *
 * Look in bottom up direction for the first "dma-ranges" property
 * and parse it.
 *  dma-ranges format:
 *	DMA addr (dma_addr)	: naddr cells
 *	CPU addr (phys_addr_t)	: pna cells
 *	size			: nsize cells
 *
 * It returns -ENODEV if "dma-ranges" property was not found
 * for this device in DT.
 */
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
	struct device_node *node = of_node_get(np);
	const __be32 *ranges = NULL;
	int len, naddr, nsize, pna;
	int ret = 0;
	u64 dmaaddr;

	if (!node)
		return -EINVAL;

	while (1) {
		naddr = of_n_addr_cells(node);
		nsize = of_n_size_cells(node);
		node = of_get_next_parent(node);
		if (!node)
			break;

		ranges = of_get_property(node, "dma-ranges", &len);

		/* Ignore empty ranges, they imply no translation required */
		if (ranges && len > 0)
			break;

		/*
		 * At least empty ranges has to be defined for parent node if
		 * DMA is supported
		 */
		if (!ranges)
			break;
	}

	if (!ranges) {
		pr_debug("no dma-ranges found for node(%pOF)\n", np);
		ret = -ENODEV;
		goto out;
	}

	len /= sizeof(u32);

	pna = of_n_addr_cells(node);

	/* dma-ranges format:
	 * DMA addr	: naddr cells
	 * CPU addr	: pna cells
	 * size		: nsize cells
	 */
	dmaaddr = of_read_number(ranges, naddr);
	*paddr = of_translate_dma_address(np, ranges);
	if (*paddr == OF_BAD_ADDR) {
		pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n",
		       dma_addr, np);
		ret = -EINVAL;
		goto out;
	}
	*dma_addr = dmaaddr;

	*size = of_read_number(ranges + naddr + pna, nsize);

	pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
		 *dma_addr, *paddr, *size);

out:
	of_node_put(node);

	return ret;
}
Exemple #17
0
static int
oh_port_probe(struct platform_device *_of_dev)
{
	struct device		*dpa_oh_dev;
	struct device_node	*dpa_oh_node;
	int			 lenp, _errno = 0, fq_idx, n_size, i, ret;
	const phandle		*oh_port_handle, *bpool_handle;
	struct platform_device	*oh_of_dev;
	struct device_node	*oh_node, *bpool_node = NULL, *root_node;
	struct device		*oh_dev;
	struct dpa_oh_config_s	*oh_config = NULL;
	uint32_t		*oh_all_queues;
	uint32_t		*oh_tx_queues;
	uint32_t		 queues_count;
	uint32_t		 crt_fqid_base;
	uint32_t		 crt_fq_count;
	bool			frag_enabled = false;
	struct fm_port_params	oh_port_tx_params;
	struct fm_port_pcd_param	oh_port_pcd_params;
	struct dpa_buffer_layout_s buf_layout;
	/* True if the current partition owns the OH port. */
	bool init_oh_port;
	const struct of_device_id *match;
	uint32_t crt_ext_pools_count, ext_pool_size;
	const unsigned int *port_id;
	const unsigned int *channel_id;
	const uint32_t		*bpool_cfg;
	const uint32_t		*bpid;

	memset(&oh_port_tx_params, 0, sizeof(oh_port_tx_params));
	dpa_oh_dev = &_of_dev->dev;
	dpa_oh_node = dpa_oh_dev->of_node;
	BUG_ON(dpa_oh_node == NULL);

	match = of_match_device(oh_port_match_table, dpa_oh_dev);
	if (!match)
		return -EINVAL;

	dev_dbg(dpa_oh_dev, "Probing OH port...\n");

	/*
	 * Find the referenced OH node
	 */

	oh_port_handle = of_get_property(dpa_oh_node,
		"fsl,fman-oh-port", &lenp);
	if (oh_port_handle == NULL) {
		dev_err(dpa_oh_dev, "No OH port handle found in node %s\n",
			dpa_oh_node->full_name);
		return -EINVAL;
	}

	BUG_ON(lenp % sizeof(*oh_port_handle));
	if (lenp != sizeof(*oh_port_handle)) {
		dev_err(dpa_oh_dev, "Found %lu OH port bindings in node %s,"
			" only 1 phandle is allowed.\n",
			(unsigned long int)(lenp / sizeof(*oh_port_handle)),
			dpa_oh_node->full_name);
		return -EINVAL;
	}

	/* Read configuration for the OH port */
	oh_node = of_find_node_by_phandle(*oh_port_handle);
	if (oh_node == NULL) {
		dev_err(dpa_oh_dev, "Can't find OH node referenced from "
			"node %s\n", dpa_oh_node->full_name);
		return -EINVAL;
	}
	dev_info(dpa_oh_dev, "Found OH node handle compatible with %s.\n",
		match->compatible);

	port_id = of_get_property(oh_node, "cell-index", &lenp);

	if (port_id == NULL) {
		dev_err(dpa_oh_dev, "No port id found in node %s\n",
			dpa_oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}

	BUG_ON(lenp % sizeof(*port_id));

	oh_of_dev = of_find_device_by_node(oh_node);
	BUG_ON(oh_of_dev == NULL);
	oh_dev = &oh_of_dev->dev;

	/*
	 * The OH port must be initialized exactly once.
	 * The following scenarios are of interest:
	 *	- the node is Linux-private (will always initialize it);
	 *	- the node is shared between two Linux partitions
	 *	  (only one of them will initialize it);
	 *	- the node is shared between a Linux and a LWE partition
	 *	  (Linux will initialize it) - "fsl,dpa-oh-shared"
	 */

	/* Check if the current partition owns the OH port
	 * and ought to initialize it. It may be the case that we leave this
	 * to another (also Linux) partition. */
	init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared");

	/* If we aren't the "owner" of the OH node, we're done here. */
	if (!init_oh_port) {
		dev_dbg(dpa_oh_dev, "Not owning the shared OH port %s, "
			"will not initialize it.\n", oh_node->full_name);
		of_node_put(oh_node);
		return 0;
	}

	/* Allocate OH dev private data */
	oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL);
	if (oh_config == NULL) {
		dev_err(dpa_oh_dev, "Can't allocate private data for "
			"OH node %s referenced from node %s!\n",
			oh_node->full_name, dpa_oh_node->full_name);
		_errno = -ENOMEM;
		goto return_kfree;
	}

	/*
	 * Read FQ ids/nums for the DPA OH node
	 */
	oh_all_queues = (uint32_t *)of_get_property(dpa_oh_node,
		"fsl,qman-frame-queues-oh", &lenp);
	if (oh_all_queues == NULL) {
		dev_err(dpa_oh_dev, "No frame queues have been "
			"defined for OH node %s referenced from node %s\n",
			oh_node->full_name, dpa_oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}

	/* Check that the OH error and default FQs are there */
	BUG_ON(lenp % (2 * sizeof(*oh_all_queues)));
	queues_count = lenp / (2 * sizeof(*oh_all_queues));
	if (queues_count != 2) {
		dev_err(dpa_oh_dev, "Error and Default queues must be "
			"defined for OH node %s referenced from node %s\n",
			oh_node->full_name, dpa_oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}

	/* Read the FQIDs defined for this OH port */
	dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count);
	fq_idx = 0;

	/* Error FQID - must be present */
	crt_fqid_base = oh_all_queues[fq_idx++];
	crt_fq_count = oh_all_queues[fq_idx++];
	if (crt_fq_count != 1) {
		dev_err(dpa_oh_dev, "Only 1 Error FQ allowed in OH node %s "
			"referenced from node %s (read: %d FQIDs).\n",
			oh_node->full_name, dpa_oh_node->full_name,
			crt_fq_count);
		_errno = -EINVAL;
		goto return_kfree;
	}
	oh_config->error_fqid = crt_fqid_base;
	dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n",
		oh_config->error_fqid, oh_node->full_name);

	/* Default FQID - must be present */
	crt_fqid_base = oh_all_queues[fq_idx++];
	crt_fq_count = oh_all_queues[fq_idx++];
	if (crt_fq_count != 1) {
		dev_err(dpa_oh_dev, "Only 1 Default FQ allowed "
			"in OH node %s referenced from %s (read: %d FQIDs).\n",
			oh_node->full_name, dpa_oh_node->full_name,
			crt_fq_count);
		_errno = -EINVAL;
		goto return_kfree;
	}
	oh_config->default_fqid = crt_fqid_base;
	dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n",
		oh_config->default_fqid, oh_node->full_name);

	/* TX FQID - presence is optional */
	oh_tx_queues = (uint32_t *)of_get_property(dpa_oh_node,
		"fsl,qman-frame-queues-tx", &lenp);
	if (oh_tx_queues == NULL) {
		dev_dbg(dpa_oh_dev, "No tx queues have been "
		"defined for OH node %s referenced from node %s\n",
			oh_node->full_name, dpa_oh_node->full_name);
		goto config_port;
	}

	/* Check that queues-tx has only a base and a count defined */
	BUG_ON(lenp % (2 * sizeof(*oh_tx_queues)));
	queues_count = lenp / (2 * sizeof(*oh_tx_queues));
	if (queues_count != 1) {
		dev_err(dpa_oh_dev, "TX queues must be defined in"
			"only one <base count> tuple for OH node %s "
			"referenced from node %s\n",
			oh_node->full_name, dpa_oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}

	/* Read channel id for the queues */
	channel_id = of_get_property(oh_node, "fsl,qman-channel-id", &lenp);
	if (channel_id == NULL) {
		dev_err(dpa_oh_dev, "No channel id found in node %s\n",
			dpa_oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}
	BUG_ON(lenp % sizeof(*channel_id));

	fq_idx = 0;
	crt_fqid_base = oh_tx_queues[fq_idx++];
	crt_fq_count = oh_tx_queues[fq_idx++];
	oh_config->egress_cnt = crt_fq_count;

	/* Allocate TX queues */
	dev_dbg(dpa_oh_dev, "Allocating %d queues for TX...\n", crt_fq_count);
	oh_config->egress_fqs = devm_kzalloc(dpa_oh_dev,
		crt_fq_count * sizeof(struct qman_fq), GFP_KERNEL);
	if (oh_config->egress_fqs == NULL) {
		dev_err(dpa_oh_dev, "Can't allocate private data for "
			"TX queues for OH node %s referenced from node %s!\n",
			oh_node->full_name, dpa_oh_node->full_name);
		_errno = -ENOMEM;
		goto return_kfree;
	}

	/* Create TX queues */
	for (i = 0; i < crt_fq_count; i++) {
		ret = oh_fq_create(oh_config->egress_fqs + i,
			crt_fqid_base + i, *channel_id, 3);
		if (ret != 0) {
			dev_err(dpa_oh_dev, "Unable to create TX frame "
				"queue %d for OH node %s referenced "
				"from node %s!\n",
				crt_fqid_base + i, oh_node->full_name,
				dpa_oh_node->full_name);
			_errno = -EINVAL;
			goto return_kfree;
		}
	}

config_port:
	/* Get a handle to the fm_port so we can set
	 * its configuration params */
	oh_config->oh_port = fm_port_bind(oh_dev);
	if (oh_config->oh_port == NULL) {
		dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n",
			oh_node->full_name);
		_errno = -EINVAL;
		goto return_kfree;
	}

	oh_set_buffer_layout(oh_config->oh_port, &buf_layout);
	bpool_handle = of_get_property(dpa_oh_node,
			"fsl,bman-buffer-pools", &lenp);

	if (bpool_handle == NULL) {
		dev_info(dpa_oh_dev, "OH port %s has no buffer pool. Fragmentation will not be enabled\n",
			oh_node->full_name);
		goto init_port;
	}

	/* used for reading ext_pool_size*/
	root_node = of_find_node_by_path("/");
	if (root_node == NULL) {
		dev_err(dpa_oh_dev, "of_find_node_by_path(/) failed\n");
		_errno = -EINVAL;
		goto return_kfree;
	}

	n_size = of_n_size_cells(root_node);
	of_node_put(root_node);

	crt_ext_pools_count = lenp / sizeof(phandle);
	dev_dbg(dpa_oh_dev, "OH port number of pools = %u\n",
					crt_ext_pools_count);

	oh_port_tx_params.num_pools = crt_ext_pools_count;

	for (i = 0; i < crt_ext_pools_count; i++) {
		bpool_node = of_find_node_by_phandle(bpool_handle[i]);
		if (bpool_node == NULL) {
			dev_err(dpa_oh_dev, "Invalid Buffer pool node\n");
			_errno = -EINVAL;
			goto return_kfree;
		}

		bpid = of_get_property(bpool_node, "fsl,bpid", &lenp);
		if ((bpid == NULL) || (lenp != sizeof(*bpid))) {
			dev_err(dpa_oh_dev, "Invalid Buffer pool Id\n");
			_errno = -EINVAL;
			goto return_kfree;
		}

		oh_port_tx_params.pool_param[i].id = *bpid;
		dev_dbg(dpa_oh_dev, "OH port bpool id = %u\n", *bpid);

		bpool_cfg = of_get_property(bpool_node,
				"fsl,bpool-ethernet-cfg", &lenp);
		if (bpool_cfg == NULL) {
			dev_err(dpa_oh_dev, "Invalid Buffer pool config params\n");
			_errno = -EINVAL;
			goto return_kfree;
		}

		of_read_number(bpool_cfg, n_size);
		ext_pool_size = of_read_number(bpool_cfg + n_size, n_size);
		oh_port_tx_params.pool_param[i].size = ext_pool_size;
		dev_dbg(dpa_oh_dev, "OH port bpool size = %u\n",
			ext_pool_size);
		of_node_put(bpool_node);

	}

	if (buf_layout.data_align != FRAG_DATA_ALIGN ||
	    buf_layout.manip_extra_space != FRAG_MANIP_SPACE)
		goto init_port;

	frag_enabled = true;
	dev_info(dpa_oh_dev, "IP Fragmentation enabled for OH port %d",
		     *port_id);

init_port:
	of_node_put(oh_node);
	/* Set Tx params */
	dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params,
		oh_config->error_fqid, oh_config->default_fqid, (&buf_layout),
		frag_enabled);
	/* Set PCD params */
	oh_port_pcd_params.cba = oh_alloc_pcd_fqids;
	oh_port_pcd_params.cbf = oh_free_pcd_fqids;
	oh_port_pcd_params.dev = dpa_oh_dev;
	fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params);

	dev_set_drvdata(dpa_oh_dev, oh_config);

	/* Enable the OH port */
	_errno = fm_port_enable(oh_config->oh_port);
	if (_errno)
		goto return_kfree;

	dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name);

	return 0;

return_kfree:
	if (bpool_node)
		of_node_put(bpool_node);
	if (oh_node)
		of_node_put(oh_node);
	if (oh_config && oh_config->egress_fqs)
		devm_kfree(dpa_oh_dev, oh_config->egress_fqs);
	devm_kfree(dpa_oh_dev, oh_config);
	return _errno;
}