static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); size_t iosize = size; void *addr; if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) return NULL; size = PAGE_ALIGN(size); /* * Some drivers rely on this, and we probably don't want the * possibility of stale kernel data being read by devices anyway. */ gfp |= __GFP_ZERO; if (!gfpflags_allow_blocking(gfp)) { struct page *page; /* * In atomic context we can't remap anything, so we'll only * get the virtually contiguous buffer we need by way of a * physically contiguous allocation. */ if (coherent) { page = alloc_pages(gfp, get_order(size)); addr = page ? page_address(page) : NULL; } else { addr = __alloc_from_pool(size, &page, gfp); } if (!addr) return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { if (coherent) __free_pages(page, get_order(size)); else __free_from_pool(addr, size); addr = NULL; } } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); struct page *page; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size), gfp & __GFP_NOWARN); if (!page) return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); if (iommu_dma_mapping_error(dev, *handle)) { dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); return NULL; }
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); size = PAGE_ALIGN(size); if (!gfpflags_allow_blocking(flags)) { struct page *page = NULL; void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); return addr; } ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; /* remove any dirty cache lines on the kernel alias */ __dma_flush_area(ptr, size); /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot, __builtin_return_address(0)); if (!coherent_ptr) goto no_map; return coherent_ptr; no_map: dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; }
static void *__dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { void *ptr, *coherent_ptr; struct page *page; size = PAGE_ALIGN(size); if (!(flags & __GFP_WAIT)) { struct page *page = NULL; void *addr = __alloc_from_pool(size, &page); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); return addr; } if (IS_ENABLED(CONFIG_DMA_CMA)) { struct page *page; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, get_order(size)); if (page) { *dma_handle = phys_to_dma(dev, page_to_phys(page)); ptr = page_address(page); } else if (dev_get_cma_priv_area(dev)) { pr_err("%s: failed to allocate from dma-contiguous\n", __func__); return NULL; } else { ptr = __dma_alloc_coherent( dev, size, dma_handle, flags, attrs); } } else {