static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (slave) return; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { ttm_agp_tt_unpopulate(ttm); return; } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(>t->ttm, rdev->dev); return; } #endif bus_dmamap_unload(rdev->dmat, gtt->map); for (i = 0; i < ttm->num_pages; i++) gtt->ttm.dma_address[i] = 0; ttm_pool_unpopulate(ttm); }
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { ttm_agp_tt_unpopulate(ttm); return; } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(>t->ttm, rdev->dev); return; } #endif for (i = 0; i < ttm->num_pages; i++) { if (gtt->ttm.dma_address[i]) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } } ttm_pool_unpopulate(ttm); }
/** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. * * @dev_priv: Pointer to a struct vmw_private * * This functions tries to determine the IOMMU setup and what actions * need to be taken by the driver to make system pages visible to the * device. * If this function decides that DMA is not possible, it returns -EINVAL. * The driver may then try to disable features of the device that require * DMA. */ static int vmw_dma_select_mode(struct vmw_private *dev_priv) { static const char *names[vmw_dma_map_max] = { [vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early." }; #ifdef CONFIG_X86 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_enabled) { dev_priv->map_mode = vmw_dma_map_populate; goto out_fixup; } #endif if (!(vmw_force_iommu || vmw_force_coherent)) { dev_priv->map_mode = vmw_dma_phys; DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; } dev_priv->map_mode = vmw_dma_map_populate; if (dma_ops->sync_single_for_cpu) dev_priv->map_mode = vmw_dma_alloc_coherent; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl() == 0) dev_priv->map_mode = vmw_dma_map_populate; #endif #ifdef CONFIG_INTEL_IOMMU out_fixup: #endif if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) dev_priv->map_mode = vmw_dma_map_bind; if (vmw_force_coherent) dev_priv->map_mode = vmw_dma_alloc_coherent; #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) /* * No coherent page pool */ if (dev_priv->map_mode == vmw_dma_alloc_coherent) return -EINVAL; #endif #else /* CONFIG_X86 */ dev_priv->map_mode = vmw_dma_map_populate; #endif /* CONFIG_X86 */ DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (ttm->state != tt_unpopulated) return 0; if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; return 0; } rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(>t->ttm, rdev->dev); } #endif #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { while (--i) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } } return 0; }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; if (ttm->state != tt_unpopulated) return 0; rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(>t->ttm, rdev->dev); } #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { while (--i) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } } return 0; }
static int radeon_ttm_debugfs_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; unsigned i; for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { if (i == 0) sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); else sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; else radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ sprintf(radeon_mem_types_names[i], "ttm_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); radeon_mem_types_list[i].name = radeon_mem_types_names[i]; radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; radeon_mem_types_list[i].driver_features = 0; radeon_mem_types_list[i++].data = NULL; } #endif #endif return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); #endif return 0; }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r, seg; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (ttm->state != tt_unpopulated) return 0; if (slave && ttm->sg) { #ifdef notyet drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); #endif ttm->state = tt_unbound; return 0; } rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(>t->ttm, rdev->dev); } #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->segs[i].ds_addr = VM_PAGE_TO_PHYS(ttm->pages[i]); gtt->segs[i].ds_len = PAGE_SIZE; } if (bus_dmamap_load_raw(rdev->dmat, gtt->map, gtt->segs, ttm->num_pages, ttm->num_pages * PAGE_SIZE, 0)) { ttm_pool_unpopulate(ttm); return -EFAULT; } for (seg = 0, i = 0; seg < gtt->map->dm_nsegs; seg++) { bus_addr_t addr = gtt->map->dm_segs[seg].ds_addr; bus_size_t len = gtt->map->dm_segs[seg].ds_len; while (len > 0) { gtt->ttm.dma_address[i++] = addr; addr += PAGE_SIZE; len -= PAGE_SIZE; } } return 0; }
static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; int max_order; gfp_t gfp; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); } sg = st->sgl; st->nents = 0; max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); #endif gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; } do { int order = min(fls(npages) - 1, max_order); struct page *page; do { page = alloc_pages(gfp | (order ? QUIET : 0), order); if (page) break; if (!order--) goto err; /* Limit subsequent allocations as well */ max_order = order; } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); st->nents++; npages -= 1 << order; if (!npages) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while (1); if (i915_gem_gtt_prepare_pages(obj, st)) goto err; /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, * and the caller is expected to repopulate - the contents of this * object are only valid whilst active and pinned. */ obj->mm.madv = I915_MADV_DONTNEED; return st; err: sg_mark_end(sg); internal_free_pages(st); return ERR_PTR(-ENOMEM); }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; #ifdef DUMBBELL_WIP bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); #endif /* DUMBBELL_WIP */ if (ttm->state != tt_unpopulated) return 0; #ifdef DUMBBELL_WIP /* * Maybe unneeded on FreeBSD. * -- dumbbell@ */ if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; return 0; } #endif /* DUMBBELL_WIP */ rdev = radeon_get_rdev(ttm->bdev); #if __OS_HAS_AGP #ifdef DUMBBELL_WIP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_populate(ttm); } #endif /* DUMBBELL_WIP */ #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { return ttm_dma_populate(>t->ttm, rdev->dev); } #endif r = ttm_pool_populate(ttm); if (r) { return r; } for (i = 0; i < ttm->num_pages; i++) { gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]); #ifdef DUMBBELL_WIP gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { while (--i) { pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); gtt->ttm.dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } #endif /* DUMBBELL_WIP */ } return 0; }