int ttm_bus_dma_populate(struct ttm_dma_tt *ttm_dma) { int ret; /* If it's already populated, nothing to do. */ if (ttm_dma->ttm.state != tt_unpopulated) return 0; /* Wire the pages, allocating them if necessary. */ ret = ttm_tt_swapin(&ttm_dma->ttm); if (ret) goto fail0; /* Load the DMA map. */ /* XXX errno NetBSD->Linux */ ret = -bus_dmamap_load_pglist(ttm_dma->ttm.bdev->dmat, ttm_dma->dma_address, &ttm_dma->ttm.pglist, (ttm_dma->ttm.num_pages << PAGE_SHIFT), BUS_DMA_NOWAIT); if (ret) goto fail1; /* Success! */ ttm_dma->ttm.state = tt_unbound; return 0; fail2: __unused bus_dmamap_unload(ttm_dma->ttm.bdev->dmat, ttm_dma->dma_address); fail1: ttm_tt_swapout(&ttm_dma->ttm, NULL); fail0: KASSERT(ret); return ret; }
void ttm_bus_dma_unpopulate(struct ttm_dma_tt *ttm_dma) { struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; const size_t size = (ttm_dma->ttm.num_pages << PAGE_SHIFT); /* Unload the DMA map. */ bus_dmamap_unload(ttm_dma->ttm.bdev->dmat, ttm_dma->dma_address); /* Unwire the pages. */ ttm_tt_swapout(&ttm_dma->ttm, NULL); /* We are using uvm_aobj, which had better have a pgo_put. */ KASSERT(uobj->pgops->pgo_put); /* Release the pages. */ mutex_enter(uobj->vmobjlock); (void)(*uobj->pgops->pgo_put)(uobj, 0, size, PGO_CLEANIT|PGO_FREE); /* pgo_put unlocks uobj->vmobjlock. */ }
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { struct ttm_bo_global *glob = container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); spin_lock(&glob->lru_lock); while (ret == -EBUSY) { if (unlikely(list_empty(&glob->swap_lru))) { spin_unlock(&glob->lru_lock); return -EBUSY; } bo = list_first_entry(&glob->swap_lru, struct ttm_buffer_object, swap); kref_get(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); (void) ttm_bo_cleanup_refs(bo, false, false, false); kref_put(&bo->list_kref, ttm_bo_release_list); continue; } /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while * we slept. */ ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { spin_unlock(&glob->lru_lock); ttm_bo_wait_unreserved(bo, false); kref_put(&bo->list_kref, ttm_bo_release_list); spin_lock(&glob->lru_lock); } } BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. */ spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; if ((bo->mem.placement & swap_placement) != swap_placement) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false, false); if (unlikely(ret != 0)) goto out; } ttm_bo_unmap_virtual(bo); /** * Swap out. Buffer will be swapped in again as soon as * anyone tries to access a ttm page. */ if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); out: /** * * Unreserve without putting on LRU to avoid swapping out an * already swapped buffer. */ atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; }