Esempio n. 1
0
static void *__dma_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
                                  struct dma_attrs *attrs)
{
    if (dev == NULL) {
        WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
        return NULL;
    }

    if (IS_ENABLED(CONFIG_ZONE_DMA) &&
            dev->coherent_dma_mask <= DMA_BIT_MASK(32))
        flags |= GFP_DMA;
    if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
        struct page *page;
        void *addr;

        page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
                                         get_order(size));
        if (!page)
            return NULL;

        *dma_handle = phys_to_dma(dev, page_to_phys(page));
        addr = page_address(page);
        memset(addr, 0, size);
        return addr;
    } else {
        return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;

	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
		struct page *page;

		page = dma_alloc_from_contiguous(dev,
						PAGE_ALIGN(size) >> PAGE_SHIFT,
							get_order(size));
		if (page) {
			*dma_handle = phys_to_dma(dev, page_to_phys(page));
			return page_address(page);
		} else if (dev_get_cma_priv_area(dev)) {
			pr_err("%s: failed to allocate from dma-contiguous\n",
								__func__);
			return NULL;
		}
	}

	return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
Esempio n. 3
0
void *dma_direct_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
	struct page *page;
	void *ret;

	page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
	if (!page)
		return NULL;

	if (PageHighMem(page)) {
		/*
		 * Depending on the cma= arguments and per-arch setup
		 * dma_alloc_from_contiguous could return highmem pages.
		 * Without remapping there is no way to return them here,
		 * so log an error and fail.
		 */
		dev_info(dev, "Rejecting highmem page from CMA.\n");
		__dma_direct_free_pages(dev, size, page);
		return NULL;
	}

	ret = page_address(page);
	if (force_dma_unencrypted()) {
		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
	} else {
		*dma_handle = phys_to_dma(dev, page_to_phys(page));
	}
	memset(ret, 0, size);
	return ret;
}
Esempio n. 4
0
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
		phys_addr_t phys)
{
	if (force_dma_unencrypted())
		return __phys_to_dma(dev, phys);
	return phys_to_dma(dev, phys);
}
Esempio n. 5
0
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
{
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return NULL;
	}

	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;
	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
		struct page *page;

		size = PAGE_ALIGN(size);
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		return page_address(page);
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
Esempio n. 6
0
/*
 * Allocates/reserves the Platform memory resources early in the boot process.
 * This ignores any resources that are designated IORESOURCE_IO
 */
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	/* Get persistent memory data from command line before allocating
	 * resources. This need to happen before normal command line parsing
	 * has been done */
	pmem_setup_resource();

	/* Loop through looking for resources that want a particular address */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(dma_to_phys(gp_resources[i].start),
				size, 0);
			total += resource_size(&gp_resources[i]);
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				resource_size(&gp_resources[i]));
		}
	}

	/* Loop through assigning addresses for those that are left */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_dma(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	/* indicate resources that are platform I/O related */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}
Esempio n. 7
0
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	pmem_setup_resource();

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(dma_to_phys(gp_resources[i].start),
				size, 0);
			total += resource_size(&gp_resources[i]);
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				resource_size(&gp_resources[i]));
		}
	}

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = resource_size(&gp_resources[i]);
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_dma(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}
Esempio n. 8
0
static void __init pmem_setup_resource(void)
{
	struct resource *resource;
	resource = asic_resource_get("DiagPersistentMemory");

	if (resource && pmemaddr && pmemlen) {
		resource->start = phys_to_dma(pmemaddr - 0x80000000);
		resource->end = resource->start + pmemlen - 1;

		pr_info("persistent memory: start=0x%x  end=0x%x\n",
			resource->start, resource->end);
	}
}
static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
				     dma_addr_t *dma_handle, gfp_t flags,
				     struct dma_attrs *attrs)
{
	void *ptr, *coherent_ptr;
	struct page *page;

	size = PAGE_ALIGN(size);

	if (!(flags & __GFP_WAIT)) {
		struct page *page = NULL;
		void *addr = __alloc_from_pool(size, &page);

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;

	}
	if (IS_ENABLED(CONFIG_DMA_CMA)) {
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
		if (page) {
			*dma_handle = phys_to_dma(dev, page_to_phys(page));
			ptr = page_address(page);
		} else if (dev_get_cma_priv_area(dev)) {
			pr_err("%s: failed to allocate from dma-contiguous\n",
								__func__);
			return NULL;
		} else {
			ptr = __dma_alloc_coherent(
					dev, size, dma_handle, flags, attrs);
		}
	} else {
Esempio n. 10
0
/*
 * Set up persistent memory. If we were given values, we patch the array of
 * resources. Otherwise, persistent memory may be allocated anywhere at all.
 */
static void __init pmem_setup_resource(void)
{
	struct resource *resource;
	resource = asic_resource_get("DiagPersistentMemory");

	if (resource && pmemaddr && pmemlen) {
		/* The address provided by bootloader is in kseg0. Convert to
		 * a bus address. */
		resource->start = phys_to_dma(pmemaddr - 0x80000000);
		resource->end = resource->start + pmemlen - 1;

		pr_info("persistent memory: start=0x%x  end=0x%x\n",
			resource->start, resource->end);
	}
}
Esempio n. 11
0
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
		gfp_t flag, unsigned long attrs)
{
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;

	/* ignore region speicifiers */

	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		flag |= GFP_DMA;

	if (gfpflags_allow_blocking(flag))
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
						 flag & __GFP_NOWARN);

	if (!page)
		page = alloc_pages(flag, get_order(size));

	if (!page)
		return NULL;

	*handle = phys_to_dma(dev, page_to_phys(page));

	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
		return page;
	}

#ifdef CONFIG_MMU
	if (PageHighMem(page)) {
		void *p;

		p = dma_common_contiguous_remap(page, size, VM_MAP,
						pgprot_noncached(PAGE_KERNEL),
						__builtin_return_address(0));
		if (!p) {
			if (!dma_release_from_contiguous(dev, page, count))
				__free_pages(page, get_order(size));
		}
		return p;
	}
#endif
	BUG_ON(!platform_vaddr_cached(page_address(page)));
	__invalidate_dcache_range((unsigned long)page_address(page), size);
	return platform_vaddr_to_uncached(page_address(page));
}
Esempio n. 12
0
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dma_addr = phys_to_dma(dev, phys);

	if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
	    !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
		report_addr(dev, dma_addr, size);
		return DMA_MAPPING_ERROR;
	}

	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		arch_sync_dma_for_device(dev, phys, size, dir);
	return dma_addr;
}
Esempio n. 13
0
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t flags, unsigned long attrs)
{
	struct page *page;
	void *ptr, *coherent_ptr;
	pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);

	size = PAGE_ALIGN(size);

	if (!gfpflags_allow_blocking(flags)) {
		struct page *page = NULL;
		void *addr = __alloc_from_pool(size, &page, flags);

		if (addr)
			*dma_handle = phys_to_dma(dev, page_to_phys(page));

		return addr;
	}

	ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
	if (!ptr)
		goto no_mem;

	/* remove any dirty cache lines on the kernel alias */
	__dma_flush_area(ptr, size);

	/* create a coherent mapping */
	page = virt_to_page(ptr);
	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
						   prot, __builtin_return_address(0));
	if (!coherent_ptr)
		goto no_map;

	return coherent_ptr;

no_map:
	dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
no_mem:
	return NULL;
}
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
{
	if (dev == NULL) {
		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
		return NULL;
	}

	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;
	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
		struct page *page;
		void *addr;

		size = PAGE_ALIGN(size);
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
							get_order(size));
		if (!page)
			return NULL;

		*dma_handle = phys_to_dma(dev, page_to_phys(page));
		addr = page_address(page);
		memset(addr, 0, size);

		if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs) ||
		    dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) {
			/*
			 * flush the caches here because we can't later
			 */
			__dma_flush_range(addr, addr + size);
			__dma_remap(page, size, 0, true);
		}

		return addr;
	} else {
		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
Esempio n. 15
0
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
{
	return phys_to_dma(hwdev, virt_to_phys(address));
}
Esempio n. 16
0
static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
{
	return phys_to_dma(dev, virt_to_phys(pages));
}