static void cma_assign_device_from_dt(struct device *dev)
{
    int ret;
    const char *name;
	struct device_node *node;
	struct cma *cma;
	u32 value;

	node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
	if (!node)
		return;
	if (of_property_read_u32(node, "reg", &value) && !value)
		return;
	cma = cma_get_area(value);

	if (cma)
    {
        pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
        dev_set_cma_area(dev, cma);
        return;
    }

    ret = of_property_read_string(node, "region_name", &name);
    if (ret == 0)
    {
        cma = cma_get_area_by_name(name);
        if (!cma)
            return;

        pr_info("Assigned CMA region with name %s to %s device\n", name, dev_name(dev));
        dev_set_cma_area(dev, cma);
    }

}
static int __init cma_init_reserved_areas(void)
{
	struct cma *cma;
	int i;

	for (i = 0; i < cma_area_count; i++) {
		phys_addr_t base = PFN_DOWN(cma_areas[i].base);
		unsigned int count = cma_areas[i].size >> PAGE_SHIFT;

		cma = cma_create_area(base, count);
		if (!IS_ERR(cma))
			cma_areas[i].cma = cma;
	}

	dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);

	for (i = 0; i < cma_map_count; i++) {
		cma = cma_get_area(cma_maps[i].base);
		dev_set_cma_area(cma_maps[i].dev, cma);
	}

#ifdef CONFIG_OF
	bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
#endif
	return 0;
}
Example #3
0
static int __init cma_init_reserved_areas(void)
{
	struct cma_reserved *r = cma_reserved;
	unsigned i = cma_reserved_count;

	pr_debug("%s()\n", __func__);

	for (; i; --i, ++r) {
		struct cma *cma;
		cma = cma_create_area(PFN_DOWN(r->start),
				      r->size >> PAGE_SHIFT);
		if (!IS_ERR(cma))
			dev_set_cma_area(r->dev, cma);
	}
	return 0;
}
static void __init hisi_cma_dev_init(void)
{
	struct cma *cma;
	struct page *page = NULL;
	int i;
#ifdef CONFIG_HISI_KERNELDUMP
	int k;
	struct page *tmp_page = NULL;
#endif

	for(i = 0 ; i < hisi_cma_area_count; i++){
		cma = hisi_cma_areas[i].cma_area;
		if (cma == NULL)
			continue;
		dev_set_cma_area(&hisi_cma_dev[i], cma);
		hisi_cma_areas[i].dev = &hisi_cma_dev[i];
		/* when is 0 mean it is static*/
		if (hisi_cma_areas[i].dynamic == 0){

			page = dma_alloc_from_contiguous(&hisi_cma_dev[i], cma->count, SZ_1M);

#ifdef CONFIG_HISI_KERNELDUMP
			if (page != NULL) {
				tmp_page = page;
				for (k=0;k < cma->count;k++){
					SetPageMemDump(tmp_page);
					tmp_page++;
				}
			}
#endif
			if (hisi_cma_areas[i].sec_prot){
				create_mapping_late(__pfn_to_phys(cma->base_pfn),
									__phys_to_virt(__pfn_to_phys(cma->base_pfn)),
									cma->count * PAGE_SIZE, __pgprot(PROT_DEVICE_nGnRE));
			}
			pr_err("%s:%d page addr 0x%llx size %lu\n", __func__,
					__LINE__, page_to_phys(page),(cma->count<<PAGE_SHIFT)/SZ_1M);
		}
	}
}