static int
nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
	nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
	nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
	nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
	nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
	nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);

	nv_wo32(ctx, 0x70, 0x00801ec1);
	nv_wo32(ctx, 0x7c, 0x0000037c);
	dev_priv->engine.instmem.flush(dev);

	chan->engctx[engine] = ctx;
	return 0;
}
Example #2
0
static int
nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	       struct nouveau_oclass *oclass, void *data, u32 size,
	       struct nouveau_object **pobject)
{
	struct nv20_graph_priv *priv;
	int ret;

	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
	if (ret)
		return ret;

	nv_subdev(priv)->unit = 0x00001000;
	nv_subdev(priv)->intr = nv20_graph_intr;
	nv_engine(priv)->cclass = &nv25_graph_cclass;
	nv_engine(priv)->sclass = nv25_graph_sclass;
	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
	return 0;
}
static int
nva3_copy_context_new(struct nouveau_channel *chan, int engine)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, 0xc0, 0x00190000);
	nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
	nv_wo32(ramin, 0xc8, ctx->vinst);
	nv_wo32(ramin, 0xcc, 0x00000000);
	nv_wo32(ramin, 0xd0, 0x00000000);
	nv_wo32(ramin, 0xd4, 0x00000000);
	dev_priv->engine.instmem.flush(dev);

	atomic_inc(&chan->vm->engref[engine]);
	chan->engctx[engine] = ctx;
	return 0;
}
int
nv84_fence_create(struct drm_device *dev)
{
	struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv84_fence_priv *priv;
	int ret;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.engine.destroy = nv84_fence_destroy;
	priv->base.engine.init = nv84_fence_init;
	priv->base.engine.fini = nv84_fence_fini;
	priv->base.engine.context_new = nv84_fence_context_new;
	priv->base.engine.context_del = nv84_fence_context_del;
	priv->base.emit = nv84_fence_emit;
	priv->base.sync = nv84_fence_sync;
	priv->base.read = nv84_fence_read;
	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;

	ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
				 0x1000, 0, &priv->mem);
	if (ret)
		goto out;

out:
	if (ret)
		nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
	return ret;
}
static int
nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
	struct nouveau_channel *chan;
	int ret, i;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + pgd,
				      chan->ramin->vinst + pgd,
				      0x4000, NVOBJ_FLAG_ZERO_ALLOC,
				      &chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	for (i = 0; i < 0x4000; i += 8) {
		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
	}

	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + fc,
				      chan->ramin->vinst + fc, 0x100,
				      NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	*pchan = chan;
	return 0;
}
Example #6
0
File: nv04.c Project: 24hours/linux
static int
nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
		  struct nouveau_oclass *oclass, void *data, u32 size,
		  struct nouveau_object **pobject)
{
	struct nv04_instmem_priv *priv;
	int ret;

	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	/* PRAMIN aperture maps over the end of VRAM, reserve it */
	priv->base.reserved = 512 * 1024;

	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
	if (ret)
		return ret;

	/* 0x00000-0x10000: reserve for probable vbios image */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
				&priv->vbios);
	if (ret)
		return ret;

	/* 0x10000-0x18000: reserve for RAMHT */
	ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
	if (ret)
		return ret;

	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
	if (ret)
		return ret;

	/* 0x18800-0x18a00: reserve for RAMRO */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
				&priv->ramro);
	if (ret)
		return ret;

	return 0;
}
Example #7
0
int
nv20_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
	unsigned int idoffs = 0x28;
	int ret;

	switch (dev_priv->chipset) {
	case 0x20:
		ctx_init = nv20_graph_context_init;
		idoffs = 0;
		break;
	case 0x25:
	case 0x28:
		ctx_init = nv25_graph_context_init;
		break;
	case 0x2a:
		ctx_init = nv2a_graph_context_init;
		idoffs = 0;
		break;
	case 0x30:
	case 0x31:
		ctx_init = nv30_31_graph_context_init;
		break;
	case 0x34:
		ctx_init = nv34_graph_context_init;
		break;
	case 0x35:
	case 0x36:
		ctx_init = nv35_36_graph_context_init;
		break;
	default:
		BUG_ON(1);
	}

	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
				 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx);
	if (ret)
		return ret;

	/* Initialise default context values */
	ctx_init(dev, chan->ramin_grctx);

	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
	nv_wo32(chan->ramin_grctx, idoffs,
		(chan->id << 24) | 0x1); /* CTX_USER */

	nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4);
	return 0;
}
Example #8
0
static int
nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
		struct nouveau_oclass *oclass, void *data, u32 size,
		struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nv04_vmmgr_priv *priv;
	int ret;

	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
	    !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
		return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
					   data, size, pobject);
	}

	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
				   "pciegart", &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	priv->base.create = nv04_vm_create;
	priv->base.limit = NV41_GART_SIZE;
	priv->base.dma_bits = 39;
	priv->base.pgt_bits = 32 - 12;
	priv->base.spg_shift = 12;
	priv->base.lpg_shift = 12;
	priv->base.map_sg = nv41_vm_map_sg;
	priv->base.unmap = nv41_vm_unmap;
	priv->base.flush = nv41_vm_flush;

	ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
				&priv->vm);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
				(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
				 16, NVOBJ_FLAG_ZERO_ALLOC,
				 &priv->vm->pgt[0].obj[0]);
	priv->vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

	return 0;
}
Example #9
0
static int
nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
		 struct nouveau_object *parent,
		 struct nouveau_gpuobj **pgpuobj)
{
	struct nv50_dmaobj_priv *priv = (void *)dmaobj;
	int ret;

	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
		switch (nv_mclass(parent->parent)) {
		case NV40_CHANNEL_DMA:
		case NV50_CHANNEL_GPFIFO:
		case G82_CHANNEL_GPFIFO:
		case NV50_DISP_CORE_CHANNEL_DMA:
		case G82_DISP_CORE_CHANNEL_DMA:
		case GT206_DISP_CORE_CHANNEL_DMA:
		case GT200_DISP_CORE_CHANNEL_DMA:
		case GT214_DISP_CORE_CHANNEL_DMA:
		case NV50_DISP_BASE_CHANNEL_DMA:
		case G82_DISP_BASE_CHANNEL_DMA:
		case GT200_DISP_BASE_CHANNEL_DMA:
		case GT214_DISP_BASE_CHANNEL_DMA:
		case NV50_DISP_OVERLAY_CHANNEL_DMA:
		case G82_DISP_OVERLAY_CHANNEL_DMA:
		case GT200_DISP_OVERLAY_CHANNEL_DMA:
		case GT214_DISP_OVERLAY_CHANNEL_DMA:
			break;
		default:
			return -EINVAL;
		}
	}

	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
	if (ret == 0) {
		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
					upper_32_bits(priv->base.start));
		nv_wo32(*pgpuobj, 0x10, 0x00000000);
		nv_wo32(*pgpuobj, 0x14, priv->flags5);
	}

	return ret;
}
Example #10
0
int
_nouveau_xtensa_init(struct nouveau_object *object)
{
	struct nouveau_device *device = nv_device(object);
	struct nouveau_xtensa *xtensa = (void *)object;
	const struct firmware *fw;
	char name[32];
	int i, ret;
	u32 tmp;

	ret = nouveau_engine_init(&xtensa->base);
	if (ret)
		return ret;

	if (!xtensa->gpu_fw) {
		snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
			 xtensa->addr >> 12);

		ret = request_firmware(&fw, name, nv_device_base(device));
		if (ret) {
			nv_warn(xtensa, "unable to load firmware %s\n", name);
			return ret;
		}

		if (fw->size > 0x40000) {
			nv_warn(xtensa, "firmware %s too large\n", name);
			release_firmware(fw);
			return -EINVAL;
		}

		ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
					 &xtensa->gpu_fw);
		if (ret) {
			release_firmware(fw);
			return ret;
		}

		nv_debug(xtensa, "Loading firmware to address: 0x%"PRIxMAX"\n",
			 (uintmax_t)xtensa->gpu_fw->addr);

		for (i = 0; i < fw->size / 4; i++)
			nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
		release_firmware(fw);
	}
Example #11
0
static int
nv50_graph_context_new(struct nouveau_channel *chan, int engine)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *grctx = NULL;
	struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
	struct nouveau_grctx ctx = {};
	int hdr, ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
				 NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &grctx);
	if (ret)
		return ret;

	hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
	nv_wo32(ramin, hdr + 0x00, 0x00190002);
	nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
	nv_wo32(ramin, hdr + 0x08, grctx->vinst);
	nv_wo32(ramin, hdr + 0x0c, 0);
	nv_wo32(ramin, hdr + 0x10, 0);
	nv_wo32(ramin, hdr + 0x14, 0x00010000);

	ctx.dev = chan->dev;
	ctx.mode = NOUVEAU_GRCTX_VALS;
	ctx.data = grctx;
	nv50_grctx_init(&ctx);

	nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);

	dev_priv->engine.instmem.flush(dev);

	atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
	chan->engctx[NVOBJ_ENGINE_GR] = grctx;
	return 0;
}
Example #12
0
static int
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan,
		 struct nouveau_gpuobj *pgd, u64 vm_size)
{
	struct nouveau_channel *chan;
	int ret;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));

	*pchan = chan;
	return 0;
}
Example #13
0
int
nv50_graph_create_context(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	struct nouveau_grctx ctx = {};
	int hdr, ret;

	NV_DEBUG(dev, "ch%d\n", chan->id);

	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
				 NVOBJ_FLAG_ZERO_ALLOC |
				 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
	if (ret)
		return ret;

	hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
	nv_wo32(ramin, hdr + 0x00, 0x00190002);
	nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
				   pgraph->grctx_size - 1);
	nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
	nv_wo32(ramin, hdr + 0x0c, 0);
	nv_wo32(ramin, hdr + 0x10, 0);
	nv_wo32(ramin, hdr + 0x14, 0x00010000);

	ctx.dev = chan->dev;
	ctx.mode = NOUVEAU_GRCTX_VALS;
	ctx.data = chan->ramin_grctx;
	nv50_grctx_init(&ctx);

	nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);

	dev_priv->engine.instmem.flush(dev);
	atomic_inc(&chan->vm->pgraph_refs);
	return 0;
}
Example #14
0
static int
nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
{
	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	ret = nouveau_gpuobj_new(dev, chan, 256, 256,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
				 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
	if (ret)
		return ret;

	nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
	nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
	dev_priv->engine.instmem.flush(dev);

	chan->engctx[engine] = ctx;
	return 0;
}
Example #15
0
	nouveau_gpuobj_ref(NULL, &grctx);

	atomic_dec(&chan->vm->engref[engine]);
	chan->engctx[engine] = NULL;
}

static int
nv50_graph_object_new(struct nouveau_channel *chan, int engine,
		      u32 handle, u16 class)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *obj = NULL;
	int ret;

	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
	if (ret)
		return ret;
	obj->engine = 1;
	obj->class  = class;

	nv_wo32(obj, 0x00, class);
	nv_wo32(obj, 0x04, 0x00000000);
	nv_wo32(obj, 0x08, 0x00000000);
	nv_wo32(obj, 0x0c, 0x00000000);
	dev_priv->engine.instmem.flush(dev);

	ret = nouveau_ramht_insert(chan, handle, obj);
	nouveau_gpuobj_ref(NULL, &obj);
	return ret;
}
Example #16
0
static int
nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
		 struct nouveau_object *parent,
		 struct nouveau_dmaobj *dmaobj,
		 struct nouveau_gpuobj **pgpuobj)
{
	u32 flags0 = nv_mclass(dmaobj);
	u32 flags5 = 0x00000000;
	int ret;

	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
		switch (nv_mclass(parent->parent)) {
		case NVA3_DISP_MAST_CLASS:
		case NVA3_DISP_SYNC_CLASS:
		case NVA3_DISP_OVLY_CLASS:
			break;
		default:
			return -EINVAL;
		}
	} else
		return 0;

	if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
		if (dmaobj->target == NV_MEM_TARGET_VM) {
			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_VM;
			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
		} else {
			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_US;
			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
			dmaobj->conf0 |= 0x00020000;
		}
	}

	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
	flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);

	switch (dmaobj->target) {
	case NV_MEM_TARGET_VM:
		flags0 |= 0x00000000;
		break;
	case NV_MEM_TARGET_VRAM:
		flags0 |= 0x00010000;
		break;
	case NV_MEM_TARGET_PCI:
		flags0 |= 0x00020000;
		break;
	case NV_MEM_TARGET_PCI_NOSNOOP:
		flags0 |= 0x00030000;
		break;
	default:
		return -EINVAL;
	}

	switch (dmaobj->access) {
	case NV_MEM_ACCESS_VM:
		break;
	case NV_MEM_ACCESS_RO:
		flags0 |= 0x00040000;
		break;
	case NV_MEM_ACCESS_WO:
	case NV_MEM_ACCESS_RW:
		flags0 |= 0x00080000;
		break;
	}

	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
	if (ret == 0) {
		nv_wo32(*pgpuobj, 0x00, flags0);
		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
					upper_32_bits(dmaobj->start));
		nv_wo32(*pgpuobj, 0x10, 0x00000000);
		nv_wo32(*pgpuobj, 0x14, flags5);
	}

	return ret;
}
Example #17
0
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
	struct nouveau_device *device = nv_device(drm->device);
	struct nouveau_object *object;
	u32 arg0, arg1;
	int ret;

	if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
		return;

	/* initialise synchronisation routines */
	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
	else if (device->card_type < NV_11 ||
		 device->chipset   <  0x17) ret = nv10_fence_create(drm);
	else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
	else                                ret = nvc0_fence_create(drm);
	if (ret) {
		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type >= NV_E0) {
		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
					  NVDRM_CHAN + 1,
					  NVE0_CHANNEL_IND_ENGINE_CE0 |
					  NVE0_CHANNEL_IND_ENGINE_CE1, 0,
					  &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
		arg1 = 1;
	} else
	if (device->chipset >= 0xa3 &&
	    device->chipset != 0xaa &&
	    device->chipset != 0xac) {
		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
					  NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
					  &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	} else {
		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	}

	ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
				  arg0, arg1, &drm->channel);
	if (ret) {
		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
				 nouveau_abi16_swclass(drm), NULL, 0, &object);
	if (ret == 0) {
		struct nouveau_software_chan *swch = (void *)object->parent;
		ret = RING_SPACE(drm->channel, 2);
		if (ret == 0) {
			if (device->card_type < NV_C0) {
				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
				OUT_RING  (drm->channel, NVDRM_NVSW);
			} else
			if (device->card_type < NV_E0) {
				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
				OUT_RING  (drm->channel, 0x001f0000);
			}
		}
		swch = (void *)object->parent;
		swch->flip = nouveau_flip_complete;
		swch->flip_data = drm->channel;
	}

	if (ret) {
		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type < NV_C0) {
		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
					&drm->notify);
		if (ret) {
			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
			nouveau_accel_fini(drm);
			return;
		}

		ret = nouveau_object_new(nv_object(drm),
					 drm->channel->handle, NvNotify0,
					 0x003d, &(struct nv_dma_class) {
						.flags = NV_DMA_TARGET_VRAM |
							 NV_DMA_ACCESS_RDWR,
						.start = drm->notify->addr,
						.limit = drm->notify->addr + 31
						}, sizeof(struct nv_dma_class),
Example #18
0
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
	struct nouveau_device *device = nv_device(drm->device);
	struct nouveau_object *object;
	u32 arg0, arg1;
	int ret;

	if (nouveau_noaccel)
		return;

	/* initialise synchronisation routines */
	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
	else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
	else                                ret = nvc0_fence_create(drm);
	if (ret) {
		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type >= NV_E0) {
		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
					  NVDRM_CHAN + 1,
					  NVE0_CHANNEL_IND_ENGINE_CE0 |
					  NVE0_CHANNEL_IND_ENGINE_CE1, 0,
					  &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
		arg1 = 0;
	} else {
		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	}

	ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
				  arg0, arg1, &drm->channel);
	if (ret) {
		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type < NV_C0) {
		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
					&drm->notify);
		if (ret) {
			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
			nouveau_accel_fini(drm);
			return;
		}

		ret = nouveau_object_new(nv_object(drm),
					 drm->channel->handle, NvNotify0,
					 0x003d, &(struct nv_dma_class) {
						.flags = NV_DMA_TARGET_VRAM |
							 NV_DMA_ACCESS_RDWR,
						.start = drm->notify->addr,
						.limit = drm->notify->addr + 31
						}, sizeof(struct nv_dma_class),
Example #19
0
static int
nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	      struct nouveau_oclass *oclass, void *data, u32 size,
	      struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nouveau_object *heap;
	struct nouveau_vm *vm;
	struct nv50_bar_priv *priv;
	u64 start, limit;
	int ret;

	ret = nouveau_bar_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
				 NVOBJ_FLAG_HEAP, &priv->mem);
	heap = nv_object(priv->mem);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap,
				(device->chipset == 0x50) ? 0x1400 : 0x0200,
				 0, 0, &priv->pad);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
				 0, &priv->pgd);
	if (ret)
		return ret;

	/* BAR3 */
	start = 0x0100000000ULL;
	limit = start + nv_device_resource_len(device, 3);

	ret = nouveau_vm_new(device, start, limit, start, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_gpuobj_new(nv_object(priv), heap,
				 ((limit-- - start) >> 12) * 8, 0x1000,
				 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
	vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

	ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
	if (ret)
		return ret;

	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
				  upper_32_bits(start));
	nv_wo32(priv->bar3, 0x10, 0x00000000);
	nv_wo32(priv->bar3, 0x14, 0x00000000);

	/* BAR1 */
	start = 0x0000000000ULL;
	limit = start + nv_device_resource_len(device, 1);

	ret = nouveau_vm_new(device, start, limit--, start, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
	if (ret)
		return ret;

	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
				  upper_32_bits(start));
	nv_wo32(priv->bar1, 0x10, 0x00000000);
	nv_wo32(priv->bar1, 0x14, 0x00000000);

	priv->base.alloc = nouveau_bar_alloc;
	priv->base.kmap = nv50_bar_kmap;
	priv->base.umap = nv50_bar_umap;
	priv->base.unmap = nv50_bar_unmap;
	if (device->chipset == 0x50)
		priv->base.flush = nv50_bar_flush;
	else
		priv->base.flush = nv84_bar_flush;
	spin_lock_init(&priv->lock);
	return 0;
}
int
nvc0_graph_context_ctor(struct nouveau_object *parent,
			struct nouveau_object *engine,
			struct nouveau_oclass *oclass, void *args, u32 size,
			struct nouveau_object **pobject)
{
	struct nouveau_vm *vm = nouveau_client(parent)->vm;
	struct nvc0_graph_priv *priv = (void *)engine;
	struct nvc0_graph_data *data = priv->mmio_data;
	struct nvc0_graph_mmio *mmio = priv->mmio_list;
	struct nvc0_graph_chan *chan;
	int ret, i;

	/* allocate memory for context, and fill with default values */
	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
					   priv->size, 0x100,
					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
	*pobject = nv_object(chan);
	if (ret)
		return ret;

	/* allocate memory for a "mmio list" buffer that's used by the HUB
	 * fuc to modify some per-context register settings on first load
	 * of the context.
	 */
	ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
				&chan->mmio);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
				    NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
				    &chan->mmio_vma);
	if (ret)
		return ret;

	/* allocate buffers referenced by mmio list */
	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
		ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
					 data->align, 0, &chan->data[i].mem);
		if (ret)
			return ret;

		ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
					   &chan->data[i].vma);
		if (ret)
			return ret;

		data++;
	}

	/* finally, fill in the mmio list and point the context at it */
	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
		u32 addr = mmio->addr;
		u32 data = mmio->data;

		if (mmio->shift) {
			u64 info = chan->data[mmio->buffer].vma.offset;
			data |= info >> mmio->shift;
		}

		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
		mmio++;
	}
Example #21
0
static int
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	      struct nouveau_oclass *oclass, void *data, u32 size,
	      struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct pci_dev *pdev = device->pdev;
	struct nvc0_bar_priv *priv;
	struct nouveau_gpuobj *mem;
	struct nouveau_vm *vm;
	int ret;

	ret = nouveau_bar_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	/* BAR3 */
	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
	mem = priv->bar[0].mem;
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
	if (ret)
		return ret;

	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(parent, NULL,
				 (pci_resource_len(pdev, 3) >> 12) * 8,
				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
				 &vm->pgt[0].obj[0]);
	vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

	ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));

	/* BAR1 */
	ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
	mem = priv->bar[1].mem;
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
	if (ret)
		return ret;

	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
	if (ret)
		return ret;

	ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));

	priv->base.alloc = nouveau_bar_alloc;
	priv->base.kmap = nvc0_bar_kmap;
	priv->base.umap = nvc0_bar_umap;
	priv->base.unmap = nvc0_bar_unmap;
	priv->base.flush = nv84_bar_flush;
	spin_lock_init(&priv->lock);
	return 0;
}
Example #22
0
int
nv20_graph_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	uint32_t tmp, vramsz;
	int ret, i;

	switch (dev_priv->chipset) {
	case 0x20:
		pgraph->grctx_size = NV20_GRCTX_SIZE;
		break;
	case 0x25:
	case 0x28:
		pgraph->grctx_size = NV25_GRCTX_SIZE;
		break;
	case 0x2a:
		pgraph->grctx_size = NV2A_GRCTX_SIZE;
		break;
	default:
		NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
		pgraph->accel_blocked = true;
		return 0;
	}

	nv_wr32(dev, NV03_PMC_ENABLE,
		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
	nv_wr32(dev, NV03_PMC_ENABLE,
		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);

	if (!pgraph->ctx_table) {
		/* Create Context Pointer Table */
		ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
					 NVOBJ_FLAG_ZERO_ALLOC,
					 &pgraph->ctx_table);
		if (ret)
			return ret;
	}

	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
		     pgraph->ctx_table->pinst >> 4);

	nv20_graph_rdi(dev);

	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);

	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
	nv_wr32(dev, 0x40009C           , 0x00000040);

	if (dev_priv->chipset >= 0x25) {
		nv_wr32(dev, 0x400890, 0x00080000);
		nv_wr32(dev, 0x400610, 0x304B1FB6);
		nv_wr32(dev, 0x400B80, 0x18B82880);
		nv_wr32(dev, 0x400B84, 0x44000000);
		nv_wr32(dev, 0x400098, 0x40000080);
		nv_wr32(dev, 0x400B88, 0x000000ff);
	} else {
		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
		nv_wr32(dev, 0x400094, 0x00000005);
		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
		nv_wr32(dev, 0x400B84, 0x24000000);
		nv_wr32(dev, 0x400098, 0x00000040);
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
	}

	/* Turn all the tiling regions off. */
	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);

	for (i = 0; i < 8; i++) {
		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
					nv_rd32(dev, 0x100300 + i * 4));
	}
	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));

	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);

	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);

	/* begin RAM config */
	vramsz = pci_resource_len(dev->pdev, 0) - 1;
	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
	nv_wr32(dev, 0x400820, 0);
	nv_wr32(dev, 0x400824, 0);
	nv_wr32(dev, 0x400864, vramsz - 1);
	nv_wr32(dev, 0x400868, vramsz - 1);

	/* interesting.. the below overwrites some of the tile setup above.. */
	nv_wr32(dev, 0x400B20, 0x00000000);
	nv_wr32(dev, 0x400B04, 0xFFFFFFFF);

	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);

	return 0;
}
Example #23
0
int
nv30_graph_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
	int ret, i;

	switch (dev_priv->chipset) {
	case 0x30:
	case 0x31:
		pgraph->grctx_size = NV30_31_GRCTX_SIZE;
		break;
	case 0x34:
		pgraph->grctx_size = NV34_GRCTX_SIZE;
		break;
	case 0x35:
	case 0x36:
		pgraph->grctx_size = NV35_36_GRCTX_SIZE;
		break;
	default:
		NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
		pgraph->accel_blocked = true;
		return 0;
	}

	nv_wr32(dev, NV03_PMC_ENABLE,
		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
	nv_wr32(dev, NV03_PMC_ENABLE,
		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);

	if (!pgraph->ctx_table) {
		/* Create Context Pointer Table */
		ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16,
					 NVOBJ_FLAG_ZERO_ALLOC,
					 &pgraph->ctx_table);
		if (ret)
			return ret;
	}

	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
		     pgraph->ctx_table->pinst >> 4);

	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);

	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
	nv_wr32(dev, 0x400890, 0x01b463ff);
	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
	nv_wr32(dev, 0x400B80, 0x1003d888);
	nv_wr32(dev, 0x400B84, 0x0c000000);
	nv_wr32(dev, 0x400098, 0x00000000);
	nv_wr32(dev, 0x40009C, 0x0005ad00);
	nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
	nv_wr32(dev, 0x4000a0, 0x00000000);
	nv_wr32(dev, 0x4000a4, 0x00000008);
	nv_wr32(dev, 0x4008a8, 0xb784a400);
	nv_wr32(dev, 0x400ba0, 0x002f8685);
	nv_wr32(dev, 0x400ba4, 0x00231f3f);
	nv_wr32(dev, 0x4008a4, 0x40000020);

	if (dev_priv->chipset == 0x34) {
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
	}

	nv_wr32(dev, 0x4000c0, 0x00000016);

	/* Turn all the tiling regions off. */
	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);

	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
	nv_wr32(dev, 0x0040075c             , 0x00000001);

	/* begin RAM config */
	/* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
	if (dev_priv->chipset != 0x34) {
		nv_wr32(dev, 0x400750, 0x00EA0000);
		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
		nv_wr32(dev, 0x400750, 0x00EA0004);
		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
	}

	return 0;
}
Example #24
0
int
nvc0_instmem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
	struct pci_dev *pdev = dev->pdev;
	struct nvc0_instmem_priv *priv;
	struct nouveau_vm *vm = NULL;
	int ret;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;
	pinstmem->priv = priv;

	/* BAR3 VM */
	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
			     &dev_priv->bar3_vm);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_new(dev, NULL,
				 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
				 NVOBJ_FLAG_DONT_MAP |
				 NVOBJ_FLAG_ZERO_ALLOC,
				 &dev_priv->bar3_vm->pgt[0].obj[0]);
	if (ret)
		goto error;
	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;

	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);

	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
	if (ret)
		goto error;

	ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
	if (ret)
		goto error;
	nouveau_vm_ref(NULL, &vm, NULL);

	ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
			       priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
	if (ret)
		goto error;

	/* BAR1 VM */
	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
	if (ret)
		goto error;

	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
	if (ret)
		goto error;
	nouveau_vm_ref(NULL, &vm, NULL);

	ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
			       priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
	if (ret)
		goto error;

	/* channel vm */
	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
	if (ret)
		goto error;

	nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
	nouveau_vm_ref(NULL, &vm, NULL);

	nvc0_instmem_resume(dev);
	return 0;
error:
	nvc0_instmem_takedown(dev);
	return ret;
}
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
		  struct nouveau_oclass *oclass, void *data, u32 size,
		  struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nv04_instmem_priv *priv;
	int ret, bar, vs;

	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	/* map bar */
	if (nv_device_resource_len(device, 2))
		bar = 2;
	else
		bar = 3;

#ifdef __NetBSD__
	priv->iomemt = nv_device_resource_tag(device, bar);
	priv->iomemsz = nv_device_resource_len(device, bar);
	ret = bus_space_map(priv->iomemt,
	    nv_device_resource_start(device, bar),
	    priv->iomemsz, 0, &priv->iomemh);
	if (ret) {
		priv->iomemsz = 0;
		nv_error(priv, "unable to map PRAMIN BAR: %d\n", ret);
		return -EFAULT;
	}
#else
	priv->iomem = ioremap(nv_device_resource_start(device, bar),
			      nv_device_resource_len(device, bar));
	if (!priv->iomem) {
		nv_error(priv, "unable to map PRAMIN BAR\n");
		return -EFAULT;
	}
#endif

	/* PRAMIN aperture maps over the end of vram, reserve enough space
	 * to fit graphics contexts for every channel, the magics come
	 * from engine/graph/nv40.c
	 */
	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
	else if (nv44_graph_class(priv))  priv->base.reserved = 0x4980 * vs;
	else				  priv->base.reserved = 0x4a40 * vs;
	priv->base.reserved += 16 * 1024;
	priv->base.reserved *= 32;		/* per-channel */
	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
	priv->base.reserved += 512 * 1024;	/* object storage */

	priv->base.reserved = round_up(priv->base.reserved, 4096);

	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
	if (ret)
		return ret;

	/* 0x00000-0x10000: reserve for probable vbios image */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
				&priv->vbios);
	if (ret)
		return ret;

	/* 0x10000-0x18000: reserve for RAMHT */
	ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
			       &priv->ramht);
	if (ret)
		return ret;

	/* 0x18000-0x18200: reserve for RAMRO
	 * 0x18200-0x20000: padding
	 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
				&priv->ramro);
	if (ret)
		return ret;

	/* 0x20000-0x21000: reserve for RAMFC
	 * 0x21000-0x40000: padding and some unknown crap
	 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
	if (ret)
		return ret;

	return 0;
}
Example #26
0
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
	struct nvif_device *device = &drm->device;
	u32 arg0, arg1;
	u32 sclass[16];
	int ret, i;

	if (nouveau_noaccel)
		return;

	/* initialise synchronisation routines */
	/*XXX: this is crap, but the fence/channel stuff is a little
	 *     backwards in some places.  this will be fixed.
	 */
	ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
	if (ret < 0)
		return;

	for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
		switch (sclass[i]) {
		case NV03_CHANNEL_DMA:
			ret = nv04_fence_create(drm);
			break;
		case NV10_CHANNEL_DMA:
			ret = nv10_fence_create(drm);
			break;
		case NV17_CHANNEL_DMA:
		case NV40_CHANNEL_DMA:
			ret = nv17_fence_create(drm);
			break;
		case NV50_CHANNEL_GPFIFO:
			ret = nv50_fence_create(drm);
			break;
		case G82_CHANNEL_GPFIFO:
			ret = nv84_fence_create(drm);
			break;
		case FERMI_CHANNEL_GPFIFO:
		case KEPLER_CHANNEL_GPFIFO_A:
			ret = nvc0_fence_create(drm);
			break;
		default:
			break;
		}
	}

	if (ret) {
		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
		ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
					  0, &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
		arg1 = 1;
	} else
	if (device->info.chipset >= 0xa3 &&
	    device->info.chipset != 0xaa &&
	    device->info.chipset != 0xac) {
		ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
					  NvDmaFB, NvDmaTT, &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	} else {
		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	}

	ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1,
				 &drm->channel);
	if (ret) {
		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
	if (ret == 0) {
		struct nouveau_software_chan *swch;
		ret = RING_SPACE(drm->channel, 2);
		if (ret == 0) {
			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
				OUT_RING  (drm->channel, NVDRM_NVSW);
			} else
			if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
				OUT_RING  (drm->channel, 0x001f0000);
			}
		}
		swch = (void *)nvkm_object(&drm->nvsw)->parent;
		swch->flip = nouveau_flip_complete;
		swch->flip_data = drm->channel;
	}

	if (ret) {
		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
		ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32,
					 0, 0, &drm->notify);
		if (ret) {
			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
			nouveau_accel_fini(drm);
			return;
		}

		ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
				       NV_DMA_IN_MEMORY,
				       &(struct nv_dma_v0) {
						.target = NV_DMA_V0_TARGET_VRAM,
						.access = NV_DMA_V0_ACCESS_RDWR,
						.start = drm->notify->addr,
						.limit = drm->notify->addr + 31
				       }, sizeof(struct nv_dma_v0),