static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, struct io_pgtable_cfg *cfg) { struct device *dev = cfg->iommu_dev; dma_addr_t dma; void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO); if (!pages) return NULL; if (!selftest_running) { dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; /* * We depend on the IOMMU being able to work with any physical * address directly, so if the DMA layer suggests it can't by * giving us back some translation, that bodes very badly... */ if (dma != __arm_lpae_dma_addr(dev, pages)) goto out_unmap; } return pages; out_unmap: dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); out_free: free_pages_exact(pages, size); return NULL; }
static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { if (!selftest_running) dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); }
static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { *ptep = pte; if (!selftest_running) dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); }