Пример #1
0
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
					 dma_addr_t *dma_handle, gfp_t gfp)
{
	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
		gfp |= GFP_DMA;
	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
	dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;

	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

#ifdef CONFIG_ZONE_DMA
	if (dev == NULL)
		gfp |= __GFP_DMA;
	else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
		gfp |= __GFP_DMA;
	else
#endif
#ifdef CONFIG_ZONE_DMA32
	     if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		gfp |= __GFP_DMA32;
	else
#endif
		;

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);

	mb();

	return ret;
}
Пример #3
0
static void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *dma_handle, gfp_t flags,
				  struct dma_attrs *attrs)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA;

	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
		struct page *page;

		page = dma_alloc_from_contiguous(dev,
						PAGE_ALIGN(size) >> PAGE_SHIFT,
							get_order(size));
		if (page) {
			*dma_handle = phys_to_dma(dev, page_to_phys(page));
			return page_address(page);
		} else if (dev_get_cma_priv_area(dev)) {
			pr_err("%s: failed to allocate from dma-contiguous\n",
								__func__);
			return NULL;
		}
	}

	return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
	dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
	void *ret;

	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

	if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
		gfp |= __GFP_DMA;
	else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
		 dev->coherent_dma_mask <= DMA_BIT_MASK(24))
		gfp |= __GFP_DMA;
	else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
		 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		gfp |= __GFP_DMA32;

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);

	mb();

	return ret;
}
Пример #5
0
static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
					  dma_addr_t *dma_handle, gfp_t flags,
					  struct dma_attrs *attrs)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		flags |= GFP_DMA32;
	return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
Пример #6
0
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
					dma_addr_t *dma_handle, gfp_t flags)
{
	void *vaddr;

	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
	if (vaddr)
		return vaddr;

	return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
	dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;

	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

#ifdef CONFIG_ZONE_DMA
	if (dev == NULL)
		gfp |= __GFP_DMA;
	else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
		gfp |= __GFP_DMA;
	else
#endif
#ifdef CONFIG_ZONE_DMA32
	if (dev == NULL)
		gfp |= __GFP_DMA32;
	else if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
		gfp |= __GFP_DMA32;
	else
#endif
		;

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);

#ifdef LOONGSON_BIGMEM_DEBUG
	if((*dma_handle >= 0x90000000) || (*dma_handle < 0x80000000)){
		printk("+++%s %s: *dma_handle(0x%lx)\n", __FILE__, __func__, *dma_handle);
		dump_stack();
	}
#endif

	mb();

	return ret;
}
Пример #8
0
void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
					dma_addr_t *dma_handle, gfp_t flags,
					unsigned long attrs)
{
	void *vaddr;

	/*
	 * Don't print a warning when the first allocation attempt fails.
	 * swiotlb_alloc_coherent() will print a warning when the DMA
	 * memory allocation ultimately failed.
	 */
	flags |= __GFP_NOWARN;

	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
					   attrs);
	if (vaddr)
		return vaddr;

	return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
Пример #9
0
__init int sprd_debug_init(void)
{
	dma_addr_t addr;

	if (!sprd_debug_level.en.kernel_fault)
		return -1;

	sprd_debug_set_build_info();
#ifndef CONFIG_64BIT
	sprd_debug_last_regs_access = (struct sprd_debug_regs_access*)dma_alloc_coherent(
				NULL, sizeof(struct sprd_debug_regs_access)*NR_CPUS, &addr, GFP_KERNEL);
#else
	sprd_debug_last_regs_access = (struct sprd_debug_regs_access*)swiotlb_alloc_coherent(
				NULL, sizeof(struct sprd_debug_regs_access)*NR_CPUS, &addr, GFP_KERNEL);
#endif
	printk("*** %s, size:%u, sprd_debug_last_regs_access:%p *** \n",
		__func__, sizeof(struct sprd_debug_regs_access)*NR_CPUS, sprd_debug_last_regs_access);

	return 0;
}
Пример #10
0
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
					    dma_addr_t *dma_handle, gfp_t flags,
					    struct dma_attrs *attrs)
{
	return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}