static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	ttm_bo_kunmap(&nvbo->kmap);

	if (unlikely(nvbo->gem))
		DRM_ERROR("bo %p still attached to GEM object\n", bo);

	if (nvbo->tile)
		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);

	spin_lock(&dev_priv->ttm.bo_list_lock);
	list_del(&nvbo->head);
	spin_unlock(&dev_priv->ttm.bo_list_lock);
	kfree(nvbo);
}
Пример #2
0
static int
nv50_pre_pipebuffer_map(struct pipe_screen *pscreen, struct pipe_buffer *pb,
	unsigned usage)
{
	struct nv50_screen *screen = nv50_screen(pscreen);
	struct nv50_context *ctx = screen->cur_ctx;

	if (!(pb->usage & PIPE_BUFFER_USAGE_VERTEX))
		return 0;

	/* Our vtxbuf got mapped, it can no longer be considered part of current
	 * state, remove it to avoid emitting reloc markers.
	 */
	if (ctx && ctx->state.vtxbuf && so_bo_is_reloc(ctx->state.vtxbuf,
			nouveau_bo(pb))) {
		so_ref(NULL, &ctx->state.vtxbuf);
		ctx->dirty |= NV50_NEW_ARRAYS;
	}

	return 0;
}
Пример #3
0
static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    u64 length = (new_mem->num_pages << PAGE_SHIFT);
    u64 src_offset, dst_offset;
    int ret;

    src_offset = old_mem->start << PAGE_SHIFT;
    dst_offset = new_mem->start << PAGE_SHIFT;
    if (!nvbo->no_vm) {
        if (old_mem->mem_type == TTM_PL_VRAM)
            src_offset  = nvbo->vma.offset;
        else
            src_offset += dev_priv->gart_info.aper_base;

        if (new_mem->mem_type == TTM_PL_VRAM)
            dst_offset  = nvbo->vma.offset;
        else
            dst_offset += dev_priv->gart_info.aper_base;
    }

    ret = RING_SPACE(chan, 3);
    if (ret)
        return ret;

    BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

    while (length) {
        u32 amount, stride, height;

        amount  = min(length, (u64)(4 * 1024 * 1024));
        stride  = 16 * 4;
        height  = amount / stride;

        if (new_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
            OUT_RING  (chan, 1);
        }
        if (old_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
            OUT_RING  (chan, 1);
        }

        ret = RING_SPACE(chan, 14);
        if (ret)
            return ret;

        BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
        OUT_RING  (chan, upper_32_bits(src_offset));
        OUT_RING  (chan, upper_32_bits(dst_offset));
        BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
        OUT_RING  (chan, lower_32_bits(src_offset));
        OUT_RING  (chan, lower_32_bits(dst_offset));
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, height);
        OUT_RING  (chan, 0x00000101);
        OUT_RING  (chan, 0x00000000);
        BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
        OUT_RING  (chan, 0);

        length -= amount;
        src_offset += amount;
        dst_offset += amount;
    }

    return 0;
}
Пример #4
0
static boolean
nv30_vbo_validate(struct nv30_context *nv30)
{
	struct nouveau_stateobj *vtxbuf, *vtxfmt, *sattr = NULL;
	struct nouveau_grobj *rankine = nv30->screen->rankine;
	struct pipe_buffer *ib = nv30->idxbuf;
	unsigned ib_format = nv30->idxbuf_format;
	unsigned vb_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
	int hw;

	if (nv30->edgeflags) {
		/*nv30->fallback_swtnl |= NV30_NEW_ARRAYS;*/
		return FALSE;
	}

	vtxbuf = so_new(20, 18);
	so_method(vtxbuf, rankine, NV34TCL_VTXBUF_ADDRESS(0), nv30->vtxelt_nr);
	vtxfmt = so_new(17, 0);
	so_method(vtxfmt, rankine, NV34TCL_VTXFMT(0), nv30->vtxelt_nr);

	for (hw = 0; hw < nv30->vtxelt_nr; hw++) {
		struct pipe_vertex_element *ve;
		struct pipe_vertex_buffer *vb;
		unsigned type, ncomp;

		ve = &nv30->vtxelt[hw];
		vb = &nv30->vtxbuf[ve->vertex_buffer_index];

		if (!vb->stride) {
			if (!sattr)
				sattr = so_new(16 * 5, 0);

			if (nv30_vbo_static_attrib(nv30, sattr, hw, ve, vb)) {
				so_data(vtxbuf, 0);
				so_data(vtxfmt, NV34TCL_VTXFMT_TYPE_FLOAT);
				continue;
			}
		}

		if (nv30_vbo_format_to_hw(ve->src_format, &type, &ncomp)) {
			/*nv30->fallback_swtnl |= NV30_NEW_ARRAYS;*/
			so_ref(NULL, &vtxbuf);
			so_ref(NULL, &vtxfmt);
			return FALSE;
		}

		so_reloc(vtxbuf, nouveau_bo(vb->buffer), vb->buffer_offset +
				 ve->src_offset, vb_flags | NOUVEAU_BO_LOW |
				 NOUVEAU_BO_OR, 0, NV34TCL_VTXBUF_ADDRESS_DMA1);
		so_data (vtxfmt, ((vb->stride << NV34TCL_VTXFMT_STRIDE_SHIFT) |
				  (ncomp << NV34TCL_VTXFMT_SIZE_SHIFT) | type));
	}

	if (ib) {
		struct nouveau_bo *bo = nouveau_bo(ib);

		so_method(vtxbuf, rankine, NV34TCL_IDXBUF_ADDRESS, 2);
		so_reloc (vtxbuf, bo, 0, vb_flags | NOUVEAU_BO_LOW, 0, 0);
		so_reloc (vtxbuf, bo, ib_format, vb_flags | NOUVEAU_BO_OR,
				  0, NV34TCL_IDXBUF_FORMAT_DMA1);
	}

	so_method(vtxbuf, rankine, 0x1710, 1);
	so_data  (vtxbuf, 0);

	so_ref(vtxbuf, &nv30->state.hw[NV30_STATE_VTXBUF]);
	so_ref(NULL, &vtxbuf);
	nv30->state.dirty |= (1ULL << NV30_STATE_VTXBUF);
	so_ref(vtxfmt, &nv30->state.hw[NV30_STATE_VTXFMT]);
	so_ref(NULL, &vtxfmt);
	nv30->state.dirty |= (1ULL << NV30_STATE_VTXFMT);
	so_ref(sattr, &nv30->state.hw[NV30_STATE_VTXATTR]);
	so_ref(NULL, &sattr);
	nv30->state.dirty |= (1ULL << NV30_STATE_VTXATTR);
	return FALSE;
}
Пример #5
0
static struct drm_nouveau_gem_pushbuf_bo *
pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
	     uint32_t flags)
{
	struct nouveau_device *dev = push->client->device;
	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
	struct nouveau_pushbuf_krec *krec = nvpb->krec;
	struct nouveau_pushbuf *fpush;
	struct drm_nouveau_gem_pushbuf_bo *kref;
	uint32_t domains, domains_wr, domains_rd;

	domains = 0;
	if (flags & NOUVEAU_BO_VRAM)
		domains |= NOUVEAU_GEM_DOMAIN_VRAM;
	if (flags & NOUVEAU_BO_GART)
		domains |= NOUVEAU_GEM_DOMAIN_GART;
	domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
	domains_rd = domains * !!(flags & NOUVEAU_BO_RD);

	/* if buffer is referenced on another pushbuf that is owned by the
	 * same client, we need to flush the other pushbuf first to ensure
	 * the correct ordering of commands
	 */
	fpush = cli_push_get(push->client, bo);
	if (fpush && fpush != push)
		pushbuf_flush(fpush);

	kref = cli_kref_get(push->client, bo);
	if (kref) {
		/* possible conflict in memory types - flush and retry */
		if (!(kref->valid_domains & domains))
			return NULL;

		/* VRAM|GART buffer turning into a VRAM buffer.  Make sure
		 * it'll fit in VRAM and force a flush if not.
		 */
		if ((kref->valid_domains  & NOUVEAU_GEM_DOMAIN_GART) &&
		    (            domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
			if (krec->vram_used + bo->size > dev->vram_limit)
				return NULL;
			krec->vram_used += bo->size;
			krec->gart_used -= bo->size;
		}

		kref->valid_domains &= domains;
		kref->write_domains |= domains_wr;
		kref->read_domains  |= domains_rd;
	} else {
		if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
		    !pushbuf_kref_fits(push, bo, &domains))
			return NULL;

		kref = &krec->buffer[krec->nr_buffer++];
		kref->user_priv = (unsigned long)bo;
		kref->handle = bo->handle;
		kref->valid_domains = domains;
		kref->write_domains = domains_wr;
		kref->read_domains = domains_rd;
		kref->presumed.valid = 1;
		kref->presumed.offset = bo->offset;
		if (bo->flags & NOUVEAU_BO_VRAM)
			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
		else
			kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;

		cli_kref_set(push->client, bo, kref, push);
		atomic_inc(&nouveau_bo(bo)->refcnt);
	}

	return kref;
}