void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { void *memory; int gfp = GFP_ATOMIC; int i; unsigned long iommu_page; if (hwdev == NULL || hwdev->dma_mask < 0xffffffff || (no_iommu && !swiotlb)) gfp |= GFP_DMA; /* * First try to allocate continuous and use directly if already * in lowmem. */ size = round_up(size, PAGE_SIZE); memory = (void *)__get_free_pages(gfp, get_order(size)); if (memory == NULL) { return NULL; } else { int high = 0, mmu; if (((unsigned long)virt_to_bus(memory) + size) > 0xffffffffUL) high = 1; mmu = high; if (force_mmu && !(gfp & GFP_DMA)) mmu = 1; if (no_iommu) { #ifdef CONFIG_SWIOTLB if (swiotlb && high && hwdev) { unsigned long dma_mask = 0; if (hwdev->dma_mask == ~0UL) { hwdev->dma_mask = 0xffffffff; dma_mask = ~0UL; } *dma_handle = swiotlb_map_single(hwdev, memory, size, PCI_DMA_FROMDEVICE); if (dma_mask) hwdev->dma_mask = dma_mask; memset(phys_to_virt(*dma_handle), 0, size); free_pages((unsigned long)memory, get_order(size)); return phys_to_virt(*dma_handle); } #endif if (high) goto error; mmu = 0; } memset(memory, 0, size); if (!mmu) { *dma_handle = virt_to_bus(memory); return memory; } } size >>= PAGE_SHIFT; iommu_page = alloc_iommu(size); if (iommu_page == -1) goto error; /* Fill in the GATT, allocating pages as needed. */ for (i = 0; i < size; i++) { unsigned long phys_mem; void *mem = memory + i*PAGE_SIZE; if (i > 0) atomic_inc(&virt_to_page(mem)->count); phys_mem = virt_to_phys(mem); BUG_ON(phys_mem & ~PHYSICAL_PAGE_MASK); iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); } flush_gart(); *dma_handle = iommu_bus_base + (iommu_page << PAGE_SHIFT); return memory; error: free_pages((unsigned long)memory, get_order(size)); return NULL; }
static dma_addr_t swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, int direction) { return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); }