static int nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; if (fpriv->vm) { vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (!vma) return -EINVAL; rep->offset = vma->offset; } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = nvbo->bo.addr_space_offset; rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct device *dev = drm->dev->dev; struct nvkm_vma *vma; int ret; if (!cli->vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return; vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { if (--vma->refcount == 0) { ret = pm_runtime_get_sync(dev); if (!WARN_ON(ret < 0 && ret != -EACCES)) { nouveau_gem_object_unmap(nvbo, vma); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } } } ttm_bo_unreserve(&nvbo->bo); }
void nouveau_gem_object_del(struct drm_gem_object *gem) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; struct device *dev = drm->dev->dev; int ret; ret = pm_runtime_get_sync(dev); if (WARN_ON(ret < 0 && ret != -EACCES)) return; if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); drm_gem_object_release(gem); /* reset filp so nouveau_bo_del_ttm() can test for it */ gem->filp = NULL; ttm_bo_unref(&bo); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); }
int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct device *dev = drm->dev->dev; struct nouveau_vma *vma; int ret; if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); if (ret) return ret; ret = pm_runtime_get_sync(dev); if (ret < 0 && ret != -EACCES) goto out; ret = nouveau_vma_new(nvbo, &cli->vmm, &vma); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); out: ttm_bo_unreserve(&nvbo->bo); return ret; }
static int nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nvkm_vma *vma; if (is_power_of_2(nvbo->valid_domains)) rep->domain = nvbo->valid_domains; else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) return -EINVAL; rep->offset = vma->offset; } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; }
struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); int ret = 0; /* pin buffer into GTT */ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); if (ret) return ERR_PTR(-EINVAL); return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags); }
void nouveau_gem_object_del(struct drm_gem_object *gem) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); drm_gem_object_release(gem); /* reset filp so nouveau_bo_del_ttm() can test for it */ gem->filp = NULL; ttm_bo_unref(&bo); }
static int nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->offset = nvbo->bo.offset; rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; }
int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nvkm_vma *vma; struct device *dev = drm->dev->dev; int ret; if (!cli->vm) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { ret = -ENOMEM; goto out; } ret = pm_runtime_get_sync(dev); if (ret < 0 && ret != -EACCES) goto out; ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); if (ret) kfree(vma); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } else { vma->refcount++; } out: ttm_bo_unreserve(&nvbo->bo); return ret; }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!cli->base.vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return; vma = nouveau_bo_vma_find(nvbo, cli->base.vm); if (vma) { if (--vma->refcount == 0) nouveau_gem_object_unmap(nvbo, vma); } ttm_bo_unreserve(&nvbo->bo); }
void nouveau_gem_object_del(struct drm_gem_object *gem) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; #ifndef __NetBSD__ /* XXX drm prime */ if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); #endif drm_gem_object_release(gem); /* reset filp so nouveau_bo_del_ttm() can test for it */ #ifdef __NetBSD__ /* XXX Whattakludge! */ gem->gemo_shm_uao = NULL; #else gem->filp = NULL; #endif ttm_bo_unref(&bo); }
static int nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, uint32_t write_domains, uint32_t valid_domains) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); uint32_t pref_flags = 0, valid_flags = 0; if (!domains) return -EINVAL; if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) valid_flags |= TTM_PL_FLAG_VRAM; if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) valid_flags |= TTM_PL_FLAG_TT; if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && bo->mem.mem_type == TTM_PL_VRAM) pref_flags |= TTM_PL_FLAG_VRAM; else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && bo->mem.mem_type == TTM_PL_TT) pref_flags |= TTM_PL_FLAG_TT; else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) pref_flags |= TTM_PL_FLAG_VRAM; else pref_flags |= TTM_PL_FLAG_TT; nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); return 0; }
void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!fpriv->vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return; vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (vma) { if (--vma->refcount == 0) { nouveau_bo_vma_del(nvbo, vma); kfree(vma); } } ttm_bo_unreserve(&nvbo->bo); }
int nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; int ret; if (!fpriv->vm) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); if (ret) return ret; vma = nouveau_bo_vma_find(nvbo, fpriv->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { ret = -ENOMEM; goto out; } ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); if (ret) { kfree(vma); goto out; } } else { vma->refcount++; } out: ttm_bo_unreserve(&nvbo->bo); return ret; }
static int nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nouveau_vma *vma; if (is_power_of_2(nvbo->valid_domains)) rep->domain = nvbo->valid_domains; else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { vma = nouveau_vma_find(nvbo, &cli->vmm); if (!vma) return -EINVAL; rep->offset = vma->addr; } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->tile_mode = nvbo->mode; rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) rep->tile_flags |= nvbo->kind << 8; else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; else rep->tile_flags |= nvbo->zeta; return 0; }