static int nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; if (fpriv->vm) { vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (!vma) return -EINVAL; rep->offset = vma->offset; } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = nvbo->bo.addr_space_offset; rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; }
void nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, int delta, int length) { struct nouveau_bo *pb = chan->push.buffer; struct nouveau_vma *vma; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; u64 offset; vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm); BUG_ON(!vma); offset = vma->offset + delta; BUG_ON(chan->dma.ib_free < 1); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; mb(); /* Flush writes. */ nouveau_bo_rd32(pb, 0); nv_wo32(chan->object, 0x8c, chan->dma.ib_put); chan->dma.ib_free--; }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct device *dev = drm->dev->dev; struct nvkm_vma *vma; int ret; if (!cli->vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return; vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { if (--vma->refcount == 0) { ret = pm_runtime_get_sync(dev); if (!WARN_ON(ret < 0 && ret != -EACCES)) { nouveau_gem_object_unmap(nvbo, vma); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } } } ttm_bo_unreserve(&nvbo->bo); }
static int nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nvkm_vma *vma; if (is_power_of_2(nvbo->valid_domains)) rep->domain = nvbo->valid_domains; else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) return -EINVAL; rep->offset = vma->offset; } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; }
int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nvkm_vma *vma; struct device *dev = drm->dev->dev; int ret; if (!cli->vm) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { ret = -ENOMEM; goto out; } ret = pm_runtime_get_sync(dev); if (ret < 0 && ret != -EACCES) goto out; ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); if (ret) kfree(vma); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } else { vma->refcount++; } out: ttm_bo_unreserve(&nvbo->bo); return ret; }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!cli->base.vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return; vma = nouveau_bo_vma_find(nvbo, cli->base.vm); if (vma) { if (--vma->refcount == 0) nouveau_gem_object_unmap(nvbo, vma); } ttm_bo_unreserve(&nvbo->bo); }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!fpriv->vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return; vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (vma) { if (--vma->refcount == 0) { nouveau_bo_vma_del(nvbo, vma); kfree(vma); } } ttm_bo_unreserve(&nvbo->bo); }
int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!fpriv->vm) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return ret; vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { ret = -ENOMEM; goto out; } ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); if (ret) { kfree(vma); goto out; } } else { vma->refcount++; } out: ttm_bo_unreserve(&nvbo->bo); return ret; }