static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; void *addr; *handle = ~0; size = PAGE_ALIGN(size); page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; if (!arch_is_coherent()) addr = __dma_alloc_remap(page, size, gfp, prot); else addr = page_address(page); if (addr) *handle = page_to_dma(dev, page); else __dma_free_buffer(page,size); return addr; }
static void * __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; void *addr; /* Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on ARM as it is not supported on this * platform--see CONFIG_HUGETLB_PAGE. */ gfp &= ~(__GFP_COMP); *handle = ~0; size = PAGE_ALIGN(size); page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; if (!arch_is_coherent()) addr = __dma_alloc_remap(page, size, gfp, prot); else addr = page_address(page); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); else __dma_free_buffer(page, size); return addr; }
void *cma_map_kernel(u32 phys_addr, size_t size) { pgprot_t prot = __get_dma_pgprot(NULL, pgprot_kernel); struct page *page = phys_to_page(phys_addr); void *ptr = NULL; if (unlikely(phys_addr < mem_start || phys_addr > mem_start + mem_size)) { pr_err("%s(%d) err: phys_addr 0x%x invalid!\n", __func__, __LINE__, phys_addr); return NULL; } // BUG_ON(unlikely(!pfn_valid(__phys_to_pfn(phys_addr)))); size = PAGE_ALIGN(size); if (PageHighMem(page)) { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, __builtin_return_address(0)); if (!ptr) { pr_err("%s(%d) err: __dma_alloc_remap failed!\n", __func__, __LINE__); return NULL; } } else { __dma_remap(page, size, prot); ptr = page_address(page); } return ptr; }