예제 #1
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_buffer_object *bo = &nvbo->bo;
	struct device *dev = drm->dev->dev;
	int ret;

	ret = pm_runtime_get_sync(dev);
	if (WARN_ON(ret < 0 && ret != -EACCES))
		return;

	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);

	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);
}
예제 #2
0
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct device *dev = drm->dev->dev;
	struct nvkm_vma *vma;
	int ret;

	if (!cli->vm)
		return;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
	if (ret)
		return;

	vma = nouveau_bo_vma_find(nvbo, cli->vm);
	if (vma) {
		if (--vma->refcount == 0) {
			ret = pm_runtime_get_sync(dev);
			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
				nouveau_gem_object_unmap(nvbo, vma);
				pm_runtime_mark_last_busy(dev);
				pm_runtime_put_autosuspend(dev);
			}
		}
	}
	ttm_bo_unreserve(&nvbo->bo);
}
예제 #3
0
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (--nvbo->pin_refcnt)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        return ret;

    nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free += bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free += bo->mem.size;
            break;
        default:
            break;
        }
    }

    ttm_bo_unreserve(bo);
    return ret;
}
예제 #4
0
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
                   struct nouveau_tile_reg **new_tile)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct drm_device *dev = dev_priv->dev;
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    uint64_t offset;

    if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
        /* Nothing to do. */
        *new_tile = NULL;
        return 0;
    }

    offset = new_mem->start << PAGE_SHIFT;

    if (dev_priv->chan_vm) {
        nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
    } else if (dev_priv->card_type >= NV_10) {
        *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
                                        nvbo->tile_mode,
                                        nvbo->tile_flags);
    }

    return 0;
}
예제 #5
0
파일: nouveau_bo.c 프로젝트: Lyude/linux
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
		       int *align, u64 *size)
{
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct nvif_device *device = &drm->client.device;

	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
		if (nvbo->mode) {
			if (device->info.chipset >= 0x40) {
				*align = 65536;
				*size = roundup_64(*size, 64 * nvbo->mode);

			} else if (device->info.chipset >= 0x30) {
				*align = 32768;
				*size = roundup_64(*size, 64 * nvbo->mode);

			} else if (device->info.chipset >= 0x20) {
				*align = 16384;
				*size = roundup_64(*size, 64 * nvbo->mode);

			} else if (device->info.chipset >= 0x10) {
				*align = 16384;
				*size = roundup_64(*size, 32 * nvbo->mode);
			}
		}
	} else {
		*size = roundup_64(*size, (1 << nvbo->page));
		*align = max((1 <<  nvbo->page), *align);
	}

	*size = roundup_64(*size, PAGE_SIZE);
}
예제 #6
0
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
                     bool no_wait_reserve, bool no_wait_gpu,
                     struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    struct nouveau_channel *chan;
    int ret;

    chan = nvbo->channel;
    if (!chan || nvbo->no_vm) {
        chan = dev_priv->channel;
        mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
    }

    if (dev_priv->card_type < NV_50)
        ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
    else if (dev_priv->card_type < NV_C0)
        ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
    else
        ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
    if (ret == 0) {
        ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
                                            no_wait_reserve,
                                            no_wait_gpu, new_mem);
    }

    if (chan == dev_priv->channel)
        mutex_unlock(&chan->mutex);
    return ret;
}
예제 #7
0
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct device *dev = drm->dev->dev;
	struct nouveau_vma *vma;
	int ret;

	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
		return 0;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
	if (ret)
		return ret;

	ret = pm_runtime_get_sync(dev);
	if (ret < 0 && ret != -EACCES)
		goto out;

	ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);
out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}
예제 #8
0
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);

    /* as long as the bo isn't in vram, and isn't tiled, we've got
     * nothing to do here.
     */
    if (bo->mem.mem_type != TTM_PL_VRAM) {
        if (dev_priv->card_type < NV_50 ||
                !nouveau_bo_tile_layout(nvbo))
            return 0;
    }

    /* make sure bo is in mappable vram */
    if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
        return 0;


    nvbo->placement.fpfn = 0;
    nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
    nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
    return nouveau_bo_validate(nvbo, false, true, false);
}
예제 #9
0
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
			 unsigned long size, uint32_t page_flags,
			 struct page *dummy_read_page)
{
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	struct nouveau_sgdma_be *nvbe;

	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
	if (!nvbe)
		return NULL;

	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
	else
		nvbe->ttm.ttm.func = &nv50_sgdma_backend;

	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
		/*
		 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
		 * and thus our nouveau_sgdma_destroy() hook, so we don't need
		 * to free nvbe here.
		 */
		return NULL;
	return &nvbe->ttm.ttm;
}
예제 #10
0
static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                         struct ttm_mem_type_manager *man)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
    struct drm_device *dev = dev_priv->dev;

    switch (type) {
    case TTM_PL_SYSTEM:
        man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
        man->available_caching = TTM_PL_MASK_CACHING;
        man->default_caching = TTM_PL_FLAG_CACHED;
        break;
    case TTM_PL_VRAM:
        if (dev_priv->card_type >= NV_50) {
            man->func = &nouveau_vram_manager;
            man->io_reserve_fastpath = false;
            man->use_io_reserve_lru = true;
        } else {
            man->func = &ttm_bo_manager_func;
        }
        man->flags = TTM_MEMTYPE_FLAG_FIXED |
                     TTM_MEMTYPE_FLAG_MAPPABLE;
        man->available_caching = TTM_PL_FLAG_UNCACHED |
                                 TTM_PL_FLAG_WC;
        man->default_caching = TTM_PL_FLAG_WC;
        break;
    case TTM_PL_TT:
        man->func = &ttm_bo_manager_func;
        switch (dev_priv->gart_info.type) {
        case NOUVEAU_GART_AGP:
            man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
            man->available_caching = TTM_PL_FLAG_UNCACHED |
                                     TTM_PL_FLAG_WC;
            man->default_caching = TTM_PL_FLAG_WC;
            break;
        case NOUVEAU_GART_SGDMA:
            man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
                         TTM_MEMTYPE_FLAG_CMA;
            man->available_caching = TTM_PL_MASK_CACHING;
            man->default_caching = TTM_PL_FLAG_CACHED;
            man->gpu_offset = dev_priv->gart_info.aper_base;
            break;
        default:
            NV_ERROR(dev, "Unknown GART type: %d\n",
                     dev_priv->gart_info.type);
            return -EINVAL;
        }
        break;
    default:
        NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
        return -EINVAL;
    }
    return 0;
}
예제 #11
0
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
        NV_ERROR(nouveau_bdev(bo->bdev)->dev,
                 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
                 1 << bo->mem.mem_type, memtype);
        return -EINVAL;
    }

    if (nvbo->pin_refcnt++)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        goto out;

    nouveau_bo_placement_set(nvbo, memtype, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free -= bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free -= bo->mem.size;
            break;
        default:
            break;
        }
    }
    ttm_bo_unreserve(bo);
out:
    if (unlikely(ret))
        nvbo->pin_refcnt--;
    return ret;
}
예제 #12
0
파일: nouveau_bo.c 프로젝트: Lyude/linux
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	if (unlikely(nvbo->gem.filp))
		DRM_ERROR("bo %p still attached to GEM object\n", bo);
	WARN_ON(nvbo->pin_refcnt > 0);
	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
	kfree(nvbo);
}
예제 #13
0
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
                bool no_wait_reserve, bool no_wait_gpu,
                struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    struct ttm_mem_reg *old_mem = &bo->mem;
    struct nouveau_tile_reg *new_tile = NULL;
    int ret = 0;

    ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
    if (ret)
        return ret;

    /* Fake bo copy. */
    if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
        BUG_ON(bo->mem.mm_node != NULL);
        bo->mem = *new_mem;
        new_mem->mm_node = NULL;
        goto out;
    }

    /* Software copy if the card isn't up and running yet. */
    if (!dev_priv->channel) {
        ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
        goto out;
    }

    /* Hardware assisted copy. */
    if (new_mem->mem_type == TTM_PL_SYSTEM)
        ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
    else if (old_mem->mem_type == TTM_PL_SYSTEM)
        ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
    else
        ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);

    if (!ret)
        goto out;

    /* Fallback to software copy. */
    ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);

out:
    if (ret)
        nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
    else
        nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);

    return ret;
}
예제 #14
0
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    u64 src_offset = old_mem->start << PAGE_SHIFT;
    u64 dst_offset = new_mem->start << PAGE_SHIFT;
    u32 page_count = new_mem->num_pages;
    int ret;

    if (!nvbo->no_vm) {
        if (old_mem->mem_type == TTM_PL_VRAM)
            src_offset  = nvbo->vma.offset;
        else
            src_offset += dev_priv->gart_info.aper_base;

        if (new_mem->mem_type == TTM_PL_VRAM)
            dst_offset  = nvbo->vma.offset;
        else
            dst_offset += dev_priv->gart_info.aper_base;
    }

    page_count = new_mem->num_pages;
    while (page_count) {
        int line_count = (page_count > 2047) ? 2047 : page_count;

        ret = RING_SPACE(chan, 12);
        if (ret)
            return ret;

        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
        OUT_RING  (chan, upper_32_bits(dst_offset));
        OUT_RING  (chan, lower_32_bits(dst_offset));
        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
        OUT_RING  (chan, upper_32_bits(src_offset));
        OUT_RING  (chan, lower_32_bits(src_offset));
        OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* line_length */
        OUT_RING  (chan, line_count);
        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
        OUT_RING  (chan, 0x00100110);

        page_count -= line_count;
        src_offset += (PAGE_SIZE * line_count);
        dst_offset += (PAGE_SIZE * line_count);
    }

    return 0;
}
예제 #15
0
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct drm_device *dev = dev_priv->dev;
    struct nouveau_bo *nvbo = nouveau_bo(bo);

    if (unlikely(nvbo->gem))
        DRM_ERROR("bo %p still attached to GEM object\n", bo);

    nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
    nouveau_vm_put(&nvbo->vma);
    kfree(nvbo);
}
예제 #16
0
static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
                      struct nouveau_tile_reg *new_tile,
                      struct nouveau_tile_reg **old_tile)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct drm_device *dev = dev_priv->dev;

    if (dev_priv->card_type >= NV_10 &&
            dev_priv->card_type < NV_50) {
        nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
        *old_tile = new_tile;
    }
}
예제 #17
0
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
    struct nouveau_vram *vram = mem->mm_node;

    if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
        return;

    if (!vram->bar_vma.node)
        return;

    nouveau_vm_unmap(&vram->bar_vma);
    nouveau_vm_put(&vram->bar_vma);
}
예제 #18
0
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct nvkm_vma *vma;
	struct device *dev = drm->dev->dev;
	int ret;

	if (!cli->vm)
		return 0;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
	if (ret)
		return ret;

	vma = nouveau_bo_vma_find(nvbo, cli->vm);
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

		ret = pm_runtime_get_sync(dev);
		if (ret < 0 && ret != -EACCES)
			goto out;

		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
		if (ret)
			kfree(vma);

		pm_runtime_mark_last_busy(dev);
		pm_runtime_put_autosuspend(dev);
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}
예제 #19
0
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	ttm_bo_kunmap(&nvbo->kmap);

	if (unlikely(nvbo->gem))
		DRM_ERROR("bo %p still attached to GEM object\n", bo);

	if (nvbo->tile)
		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);

	spin_lock(&dev_priv->ttm.bo_list_lock);
	list_del(&nvbo->head);
	spin_unlock(&dev_priv->ttm.bo_list_lock);
	kfree(nvbo);
}
예제 #20
0
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
                       int *page_shift)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);

    if (dev_priv->card_type < NV_50) {
        if (nvbo->tile_mode) {
            if (dev_priv->chipset >= 0x40) {
                *align = 65536;
                *size = roundup(*size, 64 * nvbo->tile_mode);

            } else if (dev_priv->chipset >= 0x30) {
                *align = 32768;
                *size = roundup(*size, 64 * nvbo->tile_mode);

            } else if (dev_priv->chipset >= 0x20) {
                *align = 16384;
                *size = roundup(*size, 64 * nvbo->tile_mode);

            } else if (dev_priv->chipset >= 0x10) {
                *align = 16384;
                *size = roundup(*size, 32 * nvbo->tile_mode);
            }
        }
    } else {
        if (likely(dev_priv->chan_vm)) {
            if (*size > 256 * 1024)
                *page_shift = dev_priv->chan_vm->lpg_shift;
            else
                *page_shift = dev_priv->chan_vm->spg_shift;
        } else {
            *page_shift = 12;
        }

        *size = roundup(*size, (1 << *page_shift));
        *align = max((1 << *page_shift), *align);
    }

    *size = roundup(*size, PAGE_SIZE);
}
예제 #21
0
static struct ttm_backend *
nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
    struct drm_device *dev = dev_priv->dev;

    switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
    case NOUVEAU_GART_AGP:
        return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
    case NOUVEAU_GART_SGDMA:
        return nouveau_sgdma_init_ttm(dev);
    default:
        NV_ERROR(dev, "Unknown GART type %d\n",
                 dev_priv->gart_info.type);
        break;
    }

    return NULL;
}
예제 #22
0
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
			 unsigned long size, uint32_t page_flags,
			 struct page *dummy_read_page)
{
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	struct nouveau_sgdma_be *nvbe;

	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
	if (!nvbe)
		return NULL;

	nvbe->dev = drm->dev;
	if (nv_device(drm->device)->card_type < NV_50)
		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
	else
		nvbe->ttm.ttm.func = &nv50_sgdma_backend;

	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
		return NULL;
	return &nvbe->ttm.ttm;
}
예제 #23
0
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;

    if (dev_priv->card_type == NV_10 &&
            nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
            nvbo->bo.mem.num_pages < vram_pages / 2) {
        /*
         * Make sure that the color and depth buffers are handled
         * by independent memory controller units. Up to a 9x
         * speed up when alpha-blending and depth-test are enabled
         * at the same time.
         */
        if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
            nvbo->placement.fpfn = vram_pages / 2;
            nvbo->placement.lpfn = ~0;
        } else {
            nvbo->placement.fpfn = 0;
            nvbo->placement.lpfn = vram_pages / 2;
        }
    }
}
예제 #24
0
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
    struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
    struct drm_device *dev = dev_priv->dev;
    int ret;

    mem->bus.addr = NULL;
    mem->bus.offset = 0;
    mem->bus.size = mem->num_pages << PAGE_SHIFT;
    mem->bus.base = 0;
    mem->bus.is_iomem = false;
    if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
        return -EINVAL;
    switch (mem->mem_type) {
    case TTM_PL_SYSTEM:
        /* System memory */
        return 0;
    case TTM_PL_TT:
#if __OS_HAS_AGP
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
            mem->bus.offset = mem->start << PAGE_SHIFT;
            mem->bus.base = dev_priv->gart_info.aper_base;
            mem->bus.is_iomem = true;
        }
#endif
        break;
    case TTM_PL_VRAM:
    {
        struct nouveau_vram *vram = mem->mm_node;
        u8 page_shift;

        if (!dev_priv->bar1_vm) {
            mem->bus.offset = mem->start << PAGE_SHIFT;
            mem->bus.base = pci_resource_start(dev->pdev, 1);
            mem->bus.is_iomem = true;
            break;
        }

        if (dev_priv->card_type == NV_C0)
            page_shift = vram->page_shift;
        else
            page_shift = 12;

        ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
                             page_shift, NV_MEM_ACCESS_RW,
                             &vram->bar_vma);
        if (ret)
            return ret;

        nouveau_vm_map(&vram->bar_vma, vram);
        if (ret) {
            nouveau_vm_put(&vram->bar_vma);
            return ret;
        }

        mem->bus.offset = vram->bar_vma.offset;
        if (dev_priv->card_type == NV_50) /*XXX*/
            mem->bus.offset -= 0x0020000000ULL;
        mem->bus.base = pci_resource_start(dev->pdev, 1);
        mem->bus.is_iomem = true;
    }
    break;
    default:
        return -EINVAL;
    }
    return 0;
}
예제 #25
0
static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    u64 length = (new_mem->num_pages << PAGE_SHIFT);
    u64 src_offset, dst_offset;
    int ret;

    src_offset = old_mem->start << PAGE_SHIFT;
    dst_offset = new_mem->start << PAGE_SHIFT;
    if (!nvbo->no_vm) {
        if (old_mem->mem_type == TTM_PL_VRAM)
            src_offset  = nvbo->vma.offset;
        else
            src_offset += dev_priv->gart_info.aper_base;

        if (new_mem->mem_type == TTM_PL_VRAM)
            dst_offset  = nvbo->vma.offset;
        else
            dst_offset += dev_priv->gart_info.aper_base;
    }

    ret = RING_SPACE(chan, 3);
    if (ret)
        return ret;

    BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

    while (length) {
        u32 amount, stride, height;

        amount  = min(length, (u64)(4 * 1024 * 1024));
        stride  = 16 * 4;
        height  = amount / stride;

        if (new_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
            OUT_RING  (chan, 1);
        }
        if (old_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
            OUT_RING  (chan, 1);
        }

        ret = RING_SPACE(chan, 14);
        if (ret)
            return ret;

        BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
        OUT_RING  (chan, upper_32_bits(src_offset));
        OUT_RING  (chan, upper_32_bits(dst_offset));
        BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
        OUT_RING  (chan, lower_32_bits(src_offset));
        OUT_RING  (chan, lower_32_bits(dst_offset));
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, height);
        OUT_RING  (chan, 0x00000101);
        OUT_RING  (chan, 0x00000000);
        BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
        OUT_RING  (chan, 0);

        length -= amount;
        src_offset += amount;
        dst_offset += amount;
    }

    return 0;
}