static int
nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
	struct nouveau_channel *chan;
	int ret, i;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + pgd,
				      chan->ramin->vinst + pgd,
				      0x4000, NVOBJ_FLAG_ZERO_ALLOC,
				      &chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	for (i = 0; i < 0x4000; i += 8) {
		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
	}

	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + fc,
				      chan->ramin->vinst + fc, 0x100,
				      NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	*pchan = chan;
	return 0;
}
Exemplo n.º 2
0
void
nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
{
	struct nouveau_bar *bar = nouveau_bar(ramht);
	nv_wo32(ramht, cookie + 0, 0x00000000);
	nv_wo32(ramht, cookie + 4, 0x00000000);
	if (bar)
		bar->flush(bar);
}
Exemplo n.º 3
0
static int
nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
	nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
	nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
	nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
	nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
	nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);

	nv_wo32(ctx, 0x70, 0x00801ec1);
	nv_wo32(ctx, 0x7c, 0x0000037c);
	dev_priv->engine.instmem.flush(dev);

	chan->engctx[engine] = ctx;
	return 0;
}
Exemplo n.º 4
0
Arquivo: nv10.c Projeto: 168519/linux
static int
nv10_fifo_chan_ctor(struct nvkm_object *parent,
		    struct nvkm_object *engine,
		    struct nvkm_oclass *oclass, void *data, u32 size,
		    struct nvkm_object **pobject)
{
	union {
		struct nv03_channel_dma_v0 v0;
	} *args = data;
	struct nv04_fifo_priv *priv = (void *)engine;
	struct nv04_fifo_chan *chan;
	int ret;

	nv_ioctl(parent, "create channel dma size %d\n", size);
	if (nvif_unpack(args->v0, 0, 0, false)) {
		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
				 "offset %016llx\n", args->v0.version,
			 args->v0.pushbuf, args->v0.offset);
	} else
		return ret;

	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
				       0x10000, args->v0.pushbuf,
				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
				       (1ULL << NVDEV_ENGINE_SW) |
				       (1ULL << NVDEV_ENGINE_GR), &chan);
	*pobject = nv_object(chan);
	if (ret)
		return ret;

	args->v0.chid = chan->base.chid;

	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
	chan->ramfc = chan->base.chid * 32;

	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
			     NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
	return 0;
}
Exemplo n.º 5
0
static void
nv50_fifo_playlist_update(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
	struct nouveau_gpuobj *cur;
	int i, nr;

	NV_DEBUG(dev, "\n");

	cur = pfifo->playlist[pfifo->cur_playlist];
	pfifo->cur_playlist = !pfifo->cur_playlist;

	/* We never schedule channel 0 or 127 */
	for (i = 1, nr = 0; i < 127; i++) {
		if (dev_priv->channels.ptr[i] &&
		    dev_priv->channels.ptr[i]->ramfc) {
			nv_wo32(cur, (nr * 4), i);
			nr++;
		}
	}
	dev_priv->engine.instmem.flush(dev);

	nv_wr32(dev, 0x32f4, cur->vinst >> 12);
	nv_wr32(dev, 0x32ec, nr);
	nv_wr32(dev, 0x2500, 0x101);
}
Exemplo n.º 6
0
int
nv40_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	int ret;

	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
				     16, NVOBJ_FLAG_ZERO_ALLOC,
				     &chan->ramin_grctx);
	if (ret)
		return ret;

	/* Initialise default context values */
	dev_priv->engine.instmem.prepare_access(dev, true);
	if (!pgraph->ctxprog) {
		struct nouveau_grctx ctx = {};

		ctx.dev = chan->dev;
		ctx.mode = NOUVEAU_GRCTX_VALS;
		ctx.data = chan->ramin_grctx->gpuobj;
		nv40_grctx_init(&ctx);
	} else {
		nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
	}
	nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
		     chan->ramin_grctx->gpuobj->im_pramin->start);
	dev_priv->engine.instmem.finish_access(dev);
	return 0;
}
Exemplo n.º 7
0
void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
              int delta, int length)
{
    struct nouveau_bo *pb = chan->push.buffer;
    struct nouveau_vma *vma;
    int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
    u64 offset;

    vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
    BUG_ON(!vma);
    offset = vma->offset + delta;

    BUG_ON(chan->dma.ib_free < 1);

    nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
    nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);

    chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;

    mb();
    /* Flush writes. */
    nouveau_bo_rd32(pb, 0);

    nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
    chan->dma.ib_free--;
}
Exemplo n.º 8
0
static int
nv84_fence_context_new(struct nouveau_channel *chan, int engine)
{
	struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
	struct nv84_fence_chan *fctx;
	struct nouveau_gpuobj *obj;
	int ret;

	fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
	if (!fctx)
		return -ENOMEM;

	nouveau_fence_context_new(&fctx->base);

	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
				     priv->mem->vinst, priv->mem->size,
				     NV_MEM_ACCESS_RW,
				     NV_MEM_TARGET_VRAM, &obj);
	if (ret == 0) {
		ret = nouveau_ramht_insert(chan, NvSema, obj);
		nouveau_gpuobj_ref(NULL, &obj);
		nv_wo32(priv->mem, chan->id * 16, 0x00000000);
	}

	if (ret)
		nv84_fence_context_del(chan, engine);
	return ret;
}
Exemplo n.º 9
0
void
nv50_graph_destroy_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
	unsigned long flags;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	if (!chan->ramin)
		return;

	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
	pfifo->reassign(dev, false);
	pgraph->fifo_access(dev, false);

	if (pgraph->channel(dev) == chan)
		pgraph->unload_context(dev);

	for (i = hdr; i < hdr + 24; i += 4)
		nv_wo32(chan->ramin, i, 0);
	dev_priv->engine.instmem.flush(dev);

	pgraph->fifo_access(dev, true);
	pfifo->reassign(dev, true);
	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);

	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);

	atomic_dec(&chan->vm->pgraph_refs);
}
Exemplo n.º 10
0
int
nv40_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ctx;
	int ret;

	/* Allocate a 175KiB block of PRAMIN to store the context.  This
	 * is massive overkill for a lot of chipsets, but it should be safe
	 * until we're able to implement this properly (will happen at more
	 * or less the same time we're able to write our own context programs.
	 */
	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
					  NVOBJ_FLAG_ZERO_ALLOC,
					  &chan->ramin_grctx);
	if (ret)
		return ret;
	ctx = chan->ramin_grctx->gpuobj;

	/* Initialise default context values */
	dev_priv->engine.instmem.prepare_access(dev, true);
	nv40_grctx_vals_load(dev, ctx);
	nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
	dev_priv->engine.instmem.finish_access(dev);

	return 0;
}
Exemplo n.º 11
0
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
	struct nouveau_gpuobj *ctx = chan->engctx[engine];
	struct drm_device *dev = chan->dev;
	unsigned long flags;
	u32 inst, i;

	if (!chan->ramin)
		return;

	inst  = chan->ramin->vinst >> 12;
	inst |= 0x80000000;

	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
	if (nv_rd32(dev, 0x00b318) == inst)
		nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);

	for (i = 0x00; i <= 0x14; i += 4)
		nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
	nouveau_gpuobj_ref(NULL, &ctx);
	chan->engctx[engine] = NULL;
}
Exemplo n.º 12
0
static void
nv40_graph_construct_shader(struct nouveau_grctx *ctx)
{
	struct drm_device *dev = ctx->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *obj = ctx->data;
	int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
	int offset, i;

	vs_nr    = nv40_graph_vs_count(ctx->dev);
	vs_nr_b0 = 363;
	vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
	if (dev_priv->chipset == 0x40) {
		b0_offset = 0x2200/4; /* 33a0 */
		b1_offset = 0x55a0/4; /* 1500 */
		vs_len = 0x6aa0/4;
	} else
	if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
		b0_offset = 0x2200/4; /* 2200 */
		b1_offset = 0x4400/4; /* 0b00 */
		vs_len = 0x4f00/4;
	} else {
		b0_offset = 0x1d40/4; /* 2200 */
		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
		vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
	}

	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
	cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);

	offset = ctx->ctxvals_pos;
	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));

	if (ctx->mode != NOUVEAU_GRCTX_VALS)
		return;

	offset += 0x0280/4;
	for (i = 0; i < 16; i++, offset += 2)
		nv_wo32(obj, offset * 4, 0x3f800000);

	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
		for (i = 0; i < vs_nr_b0 * 6; i += 6)
			nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
		for (i = 0; i < vs_nr_b1 * 4; i += 4)
			nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
	}
}
Exemplo n.º 13
0
static void
nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
{
	pte = pte * 4;
	while (cnt--) {
		nv_wo32(pgt, pte, 0x00000000);
		pte += 4;
	}
}
Exemplo n.º 14
0
void
nv20_graph_destroy_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;

	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
	nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
}
Exemplo n.º 15
0
int
nv20_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
	unsigned int idoffs = 0x28;
	int ret;

	switch (dev_priv->chipset) {
	case 0x20:
		ctx_init = nv20_graph_context_init;
		idoffs = 0;
		break;
	case 0x25:
	case 0x28:
		ctx_init = nv25_graph_context_init;
		break;
	case 0x2a:
		ctx_init = nv2a_graph_context_init;
		idoffs = 0;
		break;
	case 0x30:
	case 0x31:
		ctx_init = nv30_31_graph_context_init;
		break;
	case 0x34:
		ctx_init = nv34_graph_context_init;
		break;
	case 0x35:
	case 0x36:
		ctx_init = nv35_36_graph_context_init;
		break;
	default:
		BUG_ON(1);
	}

	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
				 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
	if (ret)
		return ret;

	/* Initialise default context values */
	ctx_init(dev, chan->ramin_grctx);

	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
	nv_wo32(chan->ramin_grctx, idoffs,
		(chan->id << 24) | 0x1); /* CTX_USER */

	nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
	return 0;
}
Exemplo n.º 16
0
static int
nv84_crypt_object_ctor(struct nouveau_object *parent,
		       struct nouveau_object *engine,
		       struct nouveau_oclass *oclass, void *data, u32 size,
		       struct nouveau_object **pobject)
{
	struct nouveau_gpuobj *obj;
	int ret;

	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
				    16, 16, 0, &obj);
	*pobject = nv_object(obj);
	if (ret)
		return ret;

	nv_wo32(obj, 0x00, nv_mclass(obj));
	nv_wo32(obj, 0x04, 0x00000000);
	nv_wo32(obj, 0x08, 0x00000000);
	nv_wo32(obj, 0x0c, 0x00000000);
	return 0;
}
Exemplo n.º 17
0
Arquivo: nv50.c Projeto: 168519/linux
int
nv50_mpeg_context_ctor(struct nvkm_object *parent,
		       struct nvkm_object *engine,
		       struct nvkm_oclass *oclass, void *data, u32 size,
		       struct nvkm_object **pobject)
{
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct nv50_mpeg_chan *chan;
	int ret;

	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
				       0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
	*pobject = nv_object(chan);
	if (ret)
		return ret;

	nv_wo32(chan, 0x0070, 0x00801ec1);
	nv_wo32(chan, 0x007c, 0x0000037c);
	bar->flush(bar);
	return 0;
}
Exemplo n.º 18
0
Arquivo: gf100.c Projeto: 168519/linux
static int
gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
		  struct nvkm_gpuobj **pgpuobj)
{
	struct gf100_dmaobj_priv *priv = (void *)dmaobj;
	int ret;

	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
		switch (nv_mclass(parent->parent)) {
		case GT214_DISP_CORE_CHANNEL_DMA:
		case GT214_DISP_BASE_CHANNEL_DMA:
		case GT214_DISP_OVERLAY_CHANNEL_DMA:
			break;
		default:
			return -EINVAL;
		}
	} else
		return 0;

	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
	if (ret == 0) {
		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
					upper_32_bits(priv->base.start));
		nv_wo32(*pgpuobj, 0x10, 0x00000000);
		nv_wo32(*pgpuobj, 0x14, priv->flags5);
	}

	return ret;
}
Exemplo n.º 19
0
Arquivo: nv10.c Projeto: 24hours/linux
static int
nv10_fifo_chan_ctor(struct nouveau_object *parent,
		    struct nouveau_object *engine,
		    struct nouveau_oclass *oclass, void *data, u32 size,
		    struct nouveau_object **pobject)
{
	struct nv04_fifo_priv *priv = (void *)engine;
	struct nv04_fifo_chan *chan;
	struct nv03_channel_dma_class *args = data;
	int ret;

	if (size < sizeof(*args))
		return -EINVAL;

	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
					  0x10000, args->pushbuf,
					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
					  (1ULL << NVDEV_ENGINE_SW) |
					  (1ULL << NVDEV_ENGINE_GR), &chan);
	*pobject = nv_object(chan);
	if (ret)
		return ret;

	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
	chan->ramfc = chan->base.chid * 32;

	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
			     NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
	return 0;
}
Exemplo n.º 20
0
static int
nva3_copy_context_new(struct nouveau_channel *chan, int engine)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, 0xc0, 0x00190000);
	nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
	nv_wo32(ramin, 0xc8, ctx->vinst);
	nv_wo32(ramin, 0xcc, 0x00000000);
	nv_wo32(ramin, 0xd0, 0x00000000);
	nv_wo32(ramin, 0xd4, 0x00000000);
	dev_priv->engine.instmem.flush(dev);

	atomic_inc(&chan->vm->engref[engine]);
	chan->engctx[engine] = ctx;
	return 0;
}
Exemplo n.º 21
0
int
nv50_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
	struct nouveau_gpuobj *ctx;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	int hdr, ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
				     0x1000, NVOBJ_FLAG_ZERO_ALLOC |
				     NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
	if (ret)
		return ret;
	ctx = chan->ramin_grctx->gpuobj;

	hdr = IS_G80 ? 0x200 : 0x20;
	dev_priv->engine.instmem.prepare_access(dev, true);
	nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
	nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
					   pgraph->grctx_size - 1);
	nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
	nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
	nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
	nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
	dev_priv->engine.instmem.finish_access(dev);

	dev_priv->engine.instmem.prepare_access(dev, true);
	if (!pgraph->ctxprog) {
		struct nouveau_grctx ctx = {};
		ctx.dev = chan->dev;
		ctx.mode = NOUVEAU_GRCTX_VALS;
		ctx.data = chan->ramin_grctx->gpuobj;
		nv50_grctx_init(&ctx);
	} else {
		nouveau_grctx_vals_load(dev, ctx);
	}
	nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
	if ((dev_priv->chipset & 0xf0) == 0xa0)
		nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
	else
		nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
	dev_priv->engine.instmem.finish_access(dev);

	return 0;
}
Exemplo n.º 22
0
static int
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan,
		 struct nouveau_gpuobj *pgd, u64 vm_size)
{
	struct nouveau_channel *chan;
	int ret;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));

	*pchan = chan;
	return 0;
}
Exemplo n.º 23
0
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
	struct nouveau_gpuobj *ctx = chan->engctx[engine];
	struct drm_device *dev = chan->dev;
	int i;

	for (i = 0x00; i <= 0x14; i += 4)
		nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);

	nouveau_gpuobj_ref(NULL, &ctx);
	chan->engctx[engine] = NULL;
}
Exemplo n.º 24
0
static int
nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
{
	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	ret = nouveau_gpuobj_new(dev, chan, 256, 256,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
				 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
	nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
	dev_priv->engine.instmem.flush(dev);

	chan->engctx[engine] = ctx;
	return 0;
}
Exemplo n.º 25
0
int
nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
		     u32 handle, u32 context)
{
	struct nouveau_bar *bar = nouveau_bar(ramht);
	u32 co, ho;

	co = ho = nouveau_ramht_hash(ramht, chid, handle);
	do {
		if (!nv_ro32(ramht, co + 4)) {
			nv_wo32(ramht, co + 0, handle);
			nv_wo32(ramht, co + 4, context);
			if (bar)
				bar->flush(bar);
			return co;
		}

		co += 8;
		if (co >= nv_gpuobj(ramht)->size)
			co = 0;
	} while (co != ho);

	return -ENOMEM;
}
void
nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	struct nouveau_ctxvals *cv = pgraph->ctxvals;
	int i;

	if (!cv)
		return;

	for (i = 0; i < le32_to_cpu(cv->length); i++)
		nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
			le32_to_cpu(cv->data[i].value));
}
Exemplo n.º 27
0
void
_nouveau_xtensa_intr(struct nouveau_subdev *subdev)
{
	struct nouveau_xtensa *xtensa = (void *)subdev;
	u32 unk104 = nv_ro32(xtensa, 0xd04);
	u32 intr = nv_ro32(xtensa, 0xc20);
	u32 chan = nv_ro32(xtensa, 0xc28);
	u32 unk10c = nv_ro32(xtensa, 0xd0c);

	if (intr & 0x10)
		nv_warn(xtensa, "Watchdog interrupt, engine hung.\n");
	nv_wo32(xtensa, 0xc20, intr);
	intr = nv_ro32(xtensa, 0xc20);
	if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
		nv_debug(xtensa, "Enabling FIFO_CTRL\n");
		nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val);
	}
}
Exemplo n.º 28
0
int
_nouveau_xtensa_init(struct nouveau_object *object)
{
	struct nouveau_device *device = nv_device(object);
	struct nouveau_xtensa *xtensa = (void *)object;
	const struct firmware *fw;
	char name[32];
	int i, ret;
	u32 tmp;

	ret = nouveau_engine_init(&xtensa->base);
	if (ret)
		return ret;

	if (!xtensa->gpu_fw) {
		snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
			 xtensa->addr >> 12);

		ret = request_firmware(&fw, name, nv_device_base(device));
		if (ret) {
			nv_warn(xtensa, "unable to load firmware %s\n", name);
			return ret;
		}

		if (fw->size > 0x40000) {
			nv_warn(xtensa, "firmware %s too large\n", name);
			release_firmware(fw);
			return -EINVAL;
		}

		ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
					 &xtensa->gpu_fw);
		if (ret) {
			release_firmware(fw);
			return ret;
		}

		nv_debug(xtensa, "Loading firmware to address: 0x%"PRIxMAX"\n",
			 (uintmax_t)xtensa->gpu_fw->addr);

		for (i = 0; i < fw->size / 4; i++)
			nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
		release_firmware(fw);
	}
Exemplo n.º 29
0
void
nv50_graph_destroy_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int i, hdr = IS_G80 ? 0x200 : 0x20;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	if (!chan->ramin || !chan->ramin->gpuobj)
		return;

	dev_priv->engine.instmem.prepare_access(dev, true);
	for (i = hdr; i < hdr + 24; i += 4)
		nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
	dev_priv->engine.instmem.finish_access(dev);

	nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
Exemplo n.º 30
0
static int
nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
{
	struct nouveau_object *object = handle->object;
	struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
	union {
		struct nvif_ioctl_wr_v0 v0;
	} *args = data;
	int ret;

	nv_ioctl(object, "wr size %d\n", size);
	if (nvif_unpack(args->v0, 0, 0, false)) {
		nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
			 args->v0.version, args->v0.size, args->v0.addr,
			 args->v0.data);
		switch (args->v0.size) {
		case 1:
			if (ret = -ENODEV, ofuncs->wr08) {
				nv_wo08(object, args->v0.addr, args->v0.data);
				ret = 0;
			}
			break;
		case 2:
			if (ret = -ENODEV, ofuncs->wr16) {
				nv_wo16(object, args->v0.addr, args->v0.data);
				ret = 0;
			}
			break;
		case 4:
			if (ret = -ENODEV, ofuncs->wr32) {
				nv_wo32(object, args->v0.addr, args->v0.data);
				ret = 0;
			}
			break;
		default:
			ret = -EINVAL;
			break;
		}
	}

	return ret;
}