Exemple #1
0
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (--nvbo->pin_refcnt)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        return ret;

    nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free += bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free += bo->mem.size;
            break;
        default:
            break;
        }
    }

    ttm_bo_unreserve(bo);
    return ret;
}
Exemple #2
0
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);

    /* as long as the bo isn't in vram, and isn't tiled, we've got
     * nothing to do here.
     */
    if (bo->mem.mem_type != TTM_PL_VRAM) {
        if (dev_priv->card_type < NV_50 ||
                !nouveau_bo_tile_layout(nvbo))
            return 0;
    }

    /* make sure bo is in mappable vram */
    if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
        return 0;


    nvbo->placement.fpfn = 0;
    nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
    nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
    return nouveau_bo_validate(nvbo, false, true, false);
}
Exemple #3
0
static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
    struct nouveau_bo *nvbo = nouveau_bo(bo);

    switch (bo->mem.mem_type) {
    case TTM_PL_VRAM:
        nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
                                 TTM_PL_FLAG_SYSTEM);
        break;
    default:
        nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
        break;
    }

    *pl = nvbo->placement;
}
Exemple #4
0
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
               int size, int align, uint32_t flags, uint32_t tile_mode,
               uint32_t tile_flags, bool no_vm, bool mappable,
               struct nouveau_bo **pnvbo)
{
    struct drm_nouveau_private *dev_priv = dev->dev_private;
    struct nouveau_bo *nvbo;
    int ret = 0, page_shift = 0;

    nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
    if (!nvbo)
        return -ENOMEM;
    INIT_LIST_HEAD(&nvbo->head);
    INIT_LIST_HEAD(&nvbo->entry);
    nvbo->mappable = mappable;
    nvbo->no_vm = no_vm;
    nvbo->tile_mode = tile_mode;
    nvbo->tile_flags = tile_flags;
    nvbo->bo.bdev = &dev_priv->ttm.bdev;

    nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
    align >>= PAGE_SHIFT;

    if (!nvbo->no_vm && dev_priv->chan_vm) {
        ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
                             NV_MEM_ACCESS_RW, &nvbo->vma);
        if (ret) {
            kfree(nvbo);
            return ret;
        }
    }

    nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
    nouveau_bo_placement_set(nvbo, flags, 0);

    nvbo->channel = chan;
    ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
                      ttm_bo_type_device, &nvbo->placement, align, 0,
                      false, NULL, size, nouveau_bo_del_ttm);
    if (ret) {
        /* ttm will call nouveau_bo_del_ttm if it fails.. */
        return ret;
    }
    nvbo->channel = NULL;

    if (nvbo->vma.node) {
        if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
            nvbo->bo.offset = nvbo->vma.offset;
    }

    *pnvbo = nvbo;
    return 0;
}
Exemple #5
0
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
        NV_ERROR(nouveau_bdev(bo->bdev)->dev,
                 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
                 1 << bo->mem.mem_type, memtype);
        return -EINVAL;
    }

    if (nvbo->pin_refcnt++)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        goto out;

    nouveau_bo_placement_set(nvbo, memtype, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free -= bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free -= bo->mem.size;
            break;
        default:
            break;
        }
    }
    ttm_bo_unreserve(bo);
out:
    if (unlikely(ret))
        nvbo->pin_refcnt--;
    return ret;
}
static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
		       uint32_t write_domains, uint32_t valid_domains)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;
	uint32_t domains = valid_domains & nvbo->valid_domains &
		(write_domains ? write_domains : read_domains);
	uint32_t pref_flags = 0, valid_flags = 0;

	if (!domains)
		return -EINVAL;

	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
		valid_flags |= TTM_PL_FLAG_VRAM;

	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
		valid_flags |= TTM_PL_FLAG_TT;

	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
	    bo->mem.mem_type == TTM_PL_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
		 bo->mem.mem_type == TTM_PL_TT)
		pref_flags |= TTM_PL_FLAG_TT;

	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
		pref_flags |= TTM_PL_FLAG_VRAM;

	else
		pref_flags |= TTM_PL_FLAG_TT;

	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);

	return 0;
}