Beispiel #1
0
int pscnv_ramht_insert(struct pscnv_ramht *ramht, uint32_t handle, uint32_t context) {
	/* XXX: check if the object exists already... */
	struct drm_nouveau_private *dev_priv = ramht->vo->dev->dev_private;
	uint32_t hash = pscnv_ramht_hash(ramht, handle);
	uint32_t start = hash * 8;
	uint32_t pos = start;
	if (pscnv_ramht_debug >= 2)
		NV_INFO(ramht->vo->dev, "Handle %x hash %x\n", handle, hash);
	spin_lock (&ramht->lock);
	do {
		if (!nv_rv32(ramht->vo, ramht->offset + pos + 4)) {
			nv_wv32(ramht->vo, ramht->offset + pos, handle);
			nv_wv32(ramht->vo, ramht->offset + pos + 4, context);
			dev_priv->vm->bar_flush(ramht->vo->dev);
			spin_unlock (&ramht->lock);
			if (pscnv_ramht_debug >= 1)
				NV_INFO(ramht->vo->dev, "Adding RAMHT entry for object %x at %x, context %x\n", handle, pos, context);
			return 0;
		}
		pos += 8;
		if (pos == 8 << ramht->bits)
			pos = 0;
	} while (pos != start);
	spin_unlock (&ramht->lock);
	NV_ERROR(ramht->vo->dev, "No RAMHT space for object %x\n", handle);
	return -ENOMEM;
}
Beispiel #2
0
void
nvc0_graph_chan_free(struct pscnv_engine *eng, struct pscnv_chan *ch)
{
	struct drm_device *dev = eng->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
	struct nvc0_graph_chan *grch = ch->engdata[PSCNV_ENGINE_GRAPH];
	int i;
	
	ch->engdata[PSCNV_ENGINE_GRAPH] = NULL;

	for (i = 0; i < ARRAY_SIZE(graph->mmio_data); i++) {
		if (grch->data[i].mem) {
			pscnv_vspace_unmap(ch->vspace, grch->data[i].vm_base);
			pscnv_mem_free(grch->data[i].mem);
		}
	}
	pscnv_vspace_unmap(ch->vspace, grch->mmio_vm_base);
	pscnv_mem_free(grch->mmio);
	
	pscnv_vspace_unmap(ch->vspace, grch->grctx_vm_base);
	pscnv_mem_free(grch->grctx);
	
	kfree(grch);
	
	nv_wv32(ch->bo, 0x210, 0);
	nv_wv32(ch->bo, 0x214, 0);
	dev_priv->vm->bar_flush(dev);
}
Beispiel #3
0
int nv50_chan_new (struct pscnv_chan *ch) {
    struct pscnv_vspace *vs = ch->vspace;
    struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
    uint64_t size;
    uint32_t chan_pd;
    int i;
    /* determine size of underlying VO... for normal channels,
     * allocate 64kiB since they have to store the objects
     * heap. for the BAR fake channel, we'll only need two objects,
     * so keep it minimal
     */
    if (!ch->isbar)
        size = 0x10000;
    else if (dev_priv->chipset == 0x50)
        size = 0x6000;
    else
        size = 0x5000;
    ch->vo = pscnv_vram_alloc(vs->dev, size, PSCNV_VO_CONTIG,
                              0, (ch->isbar ? 0xc5a2ba7 : 0xc5a2f1f0));
    if (!ch->vo)
        return -ENOMEM;

    if (!vs->isbar)
        dev_priv->vm->map_kernel(ch->vo);

    if (dev_priv->chipset == 0x50)
        chan_pd = NV50_CHAN_PD;
    else
        chan_pd = NV84_CHAN_PD;
    for (i = 0; i < NV50_VM_PDE_COUNT; i++) {
        if (nv50_vs(vs)->pt[i]) {
            nv_wv32(ch->vo, chan_pd + i * 8 + 4, nv50_vs(vs)->pt[i]->start >> 32);
            nv_wv32(ch->vo, chan_pd + i * 8, nv50_vs(vs)->pt[i]->start | 0x3);
        } else {
Beispiel #4
0
static int
nvc0_fifo_chan_init_ib (struct pscnv_chan *ch, uint32_t pb_handle, uint32_t flags, uint32_t slimask, uint64_t ib_start, uint32_t ib_order) {
	struct drm_device *dev = ch->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nvc0_fifo_engine *fifo = nvc0_fifo_eng(dev_priv->fifo);
	struct nvc0_fifo_ctx *fifo_ctx;
	struct pscnv_bo *ib;
	unsigned long irqflags;
	enum pscnv_chan_state st;
	int ret;

	int i;
	uint64_t fifo_regs = nvc0_fifo_get_fifo_regs(ch);

	if (ib_order != 9) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: ib_order=%d requested, "
			"but only ib_order=9 supported atm\n", ib_order);
		return -EINVAL;
	}
	
	st = pscnv_chan_get_state(ch);
	if (st != PSCNV_CHAN_INITIALIZED) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: channel %d in unexpected"
			" state %s\n", ch->cid, pscnv_chan_state_str(st));
		return -EINVAL;
	}
	
	ib = pscnv_vspace_vm_addr_lookup(ch->vspace, ib_start);
	if (!ib) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: 0x%llx in vspace %d given"
			" as start address for indirect buffer of channel %d,"
			" but no BO mapped there\n", ib_start, ch->vspace->vid,
			ch->cid);
		return -EINVAL;
	}
	if (ib->size != 8*(1ULL << ib_order)) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: IB at BO %08x/%d has "
			"size 0x%llx, but expected 0x%llx\n",
			ib->cookie, ib->serial,	ib->size, 8*(1ULL << ib_order));
		return -EINVAL;
	}
	
	ib->flags |= PSCNV_GEM_IB;
	
	fifo_ctx = kmalloc(sizeof(*fifo_ctx), GFP_KERNEL);
	if (!fifo_ctx) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: out of memory\n");
		return -ENOMEM;
	}
	
	fifo_ctx->ib = ib;
	if (pscnv_mem_debug >= 2) {
		NV_INFO(dev, "chan_init_ib: ref BO%08x/%d\n", ib->cookie, ib->serial);
	}
	
	pscnv_bo_ref(ib);

	spin_lock_irqsave(&dev_priv->context_switch_lock, irqflags);

	for (i = 0; i < 0x1000; i += 4) {
		nv_wv32(fifo->ctrl_bo, (ch->cid << 12) + i, 0);
	}

	for (i = 0; i < 0x100; i += 4)
		nv_wv32(ch->bo, i, 0);

	dev_priv->vm->bar_flush(dev);

	nv_wv32(ch->bo, 0x08, fifo_regs);
	nv_wv32(ch->bo, 0x0c, fifo_regs >> 32);

	nv_wv32(ch->bo, 0x48, ib_start); /* IB */
	nv_wv32(ch->bo, 0x4c,
		(ib_start >> 32) | (ib_order << 16));
	nv_wv32(ch->bo, 0x10, 0xface);
	nv_wv32(ch->bo, 0x54, 0x2);
	nv_wv32(ch->bo, 0x9c, 0x100);
	nv_wv32(ch->bo, 0x84, 0x20400000);
	nv_wv32(ch->bo, 0x94, 0x30000001);
	nv_wv32(ch->bo, 0xa4, 0x1f1f1f1f);
	nv_wv32(ch->bo, 0xa8, 0x1f1f1f1f);
	nv_wv32(ch->bo, 0xac, 0x1f);
	nv_wv32(ch->bo, 0x30, 0xfffff902);
	nv_wv32(ch->bo, 0xb8, 0xf8000000); /* previously omitted */
	nv_wv32(ch->bo, 0xf8, 0x10003080);
	nv_wv32(ch->bo, 0xfc, 0x10000010);
	dev_priv->vm->bar_flush(dev);

	nv_wr32(dev, 0x3000 + ch->cid * 8, 0xc0000000 | ch->bo->start >> 12);
	nv_wr32(dev, 0x3004 + ch->cid * 8, 0x1f0001);

	nvc0_fifo_playlist_update(dev);

	spin_unlock_irqrestore(&dev_priv->context_switch_lock, irqflags);

	ch->engdata[PSCNV_ENGINE_FIFO] = fifo_ctx;
	
	dev_priv->engines[PSCNV_ENGINE_GRAPH]->
		chan_alloc(dev_priv->engines[PSCNV_ENGINE_GRAPH], ch);
	if (dev_priv->engines[PSCNV_ENGINE_COPY0])
		dev_priv->engines[PSCNV_ENGINE_COPY0]->
			chan_alloc(dev_priv->engines[PSCNV_ENGINE_COPY0], ch);
	if (dev_priv->engines[PSCNV_ENGINE_COPY1])
		dev_priv->engines[PSCNV_ENGINE_COPY1]->
			chan_alloc(dev_priv->engines[PSCNV_ENGINE_COPY1], ch);

	pscnv_chan_set_state(ch, PSCNV_CHAN_RUNNING);
	
	fifo_ctx->ib_chan = pscnv_ib_chan_init(ch);
	if (!fifo_ctx->ib_chan) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: failed to allocate "
			"ib_chan on channel %d\n", ch->cid);
		pscnv_chan_fail(ch);
		return -EFAULT;
	}
	
	ret = pscnv_ib_add_fence(fifo_ctx->ib_chan);
	if (ret) {
		NV_ERROR(dev, "nvc0_fifo_chan_init_ib: failed to allocate "
			"fence on channel %d\n", ch->cid);
		pscnv_chan_fail(ch);
		return ret;
	}
	
	return 0;
}
Beispiel #5
0
int
nvc0_graph_chan_alloc(struct pscnv_engine *eng, struct pscnv_chan *chan)
{
	struct drm_device *dev = eng->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	struct nvc0_graph_engine *graph = NVC0_GRAPH(eng);
	struct nvc0_graph_chan *grch; /* per channel graph data */
	
	uint32_t cookie = 0xcc000000 + (chan->cid << 8);
	int ret = 0;
	int i;
	
	NV_INFO(dev, "PGRAPH: adding to channel %d in vspace %d\n",
		chan->cid, chan->vspace->vid);
	
	grch = kzalloc(sizeof *grch, GFP_KERNEL);
	if (!grch) {
		ret = -ENOMEM;
		goto fail_kzalloc;
	}
	
	/* allocate the per-channel context page (grctx) */
	grch->grctx = pscnv_mem_alloc_and_map(chan->vspace, graph->grctx_size,
		PSCNV_GEM_CONTIG | PSCNV_GEM_NOUSER | PSCNV_ZEROFILL | PSCNV_MAP_KERNEL,
		cookie, &grch->grctx_vm_base);
	
	if (!grch->grctx) {
		ret = -ENOMEM;
		goto fail_grctx;
	}

	/* allocate memory for a "mmio list" buffer that's used by the HUB
	 * fuc to modify some per-context register settings on first load
	 * of the context.
	 */
	grch->mmio = pscnv_mem_alloc_and_map(chan->vspace, 0x1000 /* size */,
		PSCNV_GEM_CONTIG | PSCNV_MAP_KERNEL,
		cookie + 1, &grch->mmio_vm_base);
	
	if (!grch->mmio) {
		ret = -ENOMEM;
		goto fail_mmio_list;
	}

	/* allocate buffers referenced by mmio list
	 * these buffers are the counterpart to obj08004, obj0800c, obj19848
	 * of the original pscnv */
	for (i = 0; graph->mmio_data[i].size && i < ARRAY_SIZE(graph->mmio_data); i++) {
		
		grch->data[i].mem = pscnv_mem_alloc_and_map(chan->vspace,
			graph->mmio_data[i].size,
			PSCNV_GEM_CONTIG | PSCNV_MAP_KERNEL,
			cookie + 0x10 + i, &grch->data[i].vm_base);
	
		if (!grch->data[i].mem) {
			ret = -ENOMEM;
			goto fail_mmio_data;
		}
	}

	/* finally, fill in the mmio list and point the context at it */
	for (i = 0; graph->mmio_list[i].addr && i < ARRAY_SIZE(graph->mmio_list); i++) {
		u32 addr = graph->mmio_list[i].addr;
		u32 data = graph->mmio_list[i].data;
		u32 shift = graph->mmio_list[i].shift;
		u32 buffer = graph->mmio_list[i].buffer;
		

		if (shift) {
			u64 info = grch->data[buffer].vm_base;
			data |= info >> shift;
		}

		nv_wv32(grch->mmio, grch->mmio_nr++ * 4, addr);
		nv_wv32(grch->mmio, grch->mmio_nr++ * 4, data);
	}

	/* fill grctx with the initial values from the template channel */
	for (i = 0; i < graph->grctx_size; i += 4)
		nv_wv32(grch->grctx, i, graph->data[i / 4]);

	/* set pointer to mmio list */
	nv_wv32(grch->grctx, 0x00, grch->mmio_nr / 2);
	nv_wv32(grch->grctx, 0x04, grch->mmio_vm_base >> 8);
	
	chan->engdata[PSCNV_ENGINE_GRAPH] = grch;
	
	/* register this engines context with the channel */
	nv_wv32(chan->bo, 0x210, lower_32_bits(grch->grctx_vm_base) | 4);
	nv_wv32(chan->bo, 0x214, upper_32_bits(grch->grctx_vm_base));
	dev_priv->vm->bar_flush(dev);

	return 0;
	
fail_mmio_data:
	for (i = 0; i < ARRAY_SIZE(graph->mmio_data); i++) {
		if (grch->data[i].mem) {
			pscnv_vspace_unmap(chan->vspace, grch->data[i].vm_base);
			pscnv_mem_free(grch->data[i].mem);
		}
	}
	pscnv_vspace_unmap(chan->vspace, grch->mmio_vm_base);
	pscnv_mem_free(grch->mmio);
	
fail_mmio_list:
	pscnv_vspace_unmap(chan->vspace, grch->grctx_vm_base);
	pscnv_mem_free(grch->grctx);
	
fail_grctx:
	kfree(grch);

fail_kzalloc:
	NV_ERROR(dev, "PGRAPH: Couldn't allocate channel %d!\n", chan->cid);
	
	return ret;
}