示例#1
0
int
nouveau_bar_create_(struct nouveau_object *parent,
		    struct nouveau_object *engine,
		    struct nouveau_oclass *oclass, int length, void **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nouveau_bar *bar;
	int ret;

	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
				     "bar", length, pobject);
	bar = *pobject;
	if (ret)
		return ret;

	if (nv_device_resource_len(device, 3) != 0) {
		bar->iomem = ioremap(nv_device_resource_start(device, 3),
				     nv_device_resource_len(device, 3));
		if (!bar->iomem)
			nv_warn(bar, "PRAMIN ioremap failed\n");
	}

	return 0;
}
示例#2
0
文件: gf100.c 项目: 168519/linux
static int
gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
		  int bar_nr)
{
	struct nvkm_device *device = nv_device(&priv->base);
	struct nvkm_vm *vm;
	resource_size_t bar_len;
	int ret;

	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
			      &bar_vm->mem);
	if (ret)
		return ret;

	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
			      &bar_vm->pgd);
	if (ret)
		return ret;

	bar_len = nv_device_resource_len(device, bar_nr);

	ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	/*
	 * Bootstrap page table lookup.
	 */
	if (bar_nr == 3) {
		ret = nvkm_gpuobj_new(nv_object(priv), NULL,
				      (bar_len >> 12) * 8, 0x1000,
				      NVOBJ_FLAG_ZERO_ALLOC,
				      &vm->pgt[0].obj[0]);
		vm->pgt[0].refcount[0] = 1;
		if (ret)
			return ret;
	}
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
		  struct nouveau_oclass *oclass, void *data, u32 size,
		  struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nv04_instmem_priv *priv;
	int ret, bar, vs;

	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	/* map bar */
	if (nv_device_resource_len(device, 2))
		bar = 2;
	else
		bar = 3;

#ifdef __NetBSD__
	priv->iomemt = nv_device_resource_tag(device, bar);
	priv->iomemsz = nv_device_resource_len(device, bar);
	ret = bus_space_map(priv->iomemt,
	    nv_device_resource_start(device, bar),
	    priv->iomemsz, 0, &priv->iomemh);
	if (ret) {
		priv->iomemsz = 0;
		nv_error(priv, "unable to map PRAMIN BAR: %d\n", ret);
		return -EFAULT;
	}
#else
	priv->iomem = ioremap(nv_device_resource_start(device, bar),
			      nv_device_resource_len(device, bar));
	if (!priv->iomem) {
		nv_error(priv, "unable to map PRAMIN BAR\n");
		return -EFAULT;
	}
#endif

	/* PRAMIN aperture maps over the end of vram, reserve enough space
	 * to fit graphics contexts for every channel, the magics come
	 * from engine/graph/nv40.c
	 */
	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
	else if (nv44_graph_class(priv))  priv->base.reserved = 0x4980 * vs;
	else				  priv->base.reserved = 0x4a40 * vs;
	priv->base.reserved += 16 * 1024;
	priv->base.reserved *= 32;		/* per-channel */
	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
	priv->base.reserved += 512 * 1024;	/* object storage */

	priv->base.reserved = round_up(priv->base.reserved, 4096);

	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
	if (ret)
		return ret;

	/* 0x00000-0x10000: reserve for probable vbios image */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
				&priv->vbios);
	if (ret)
		return ret;

	/* 0x10000-0x18000: reserve for RAMHT */
	ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
			       &priv->ramht);
	if (ret)
		return ret;

	/* 0x18000-0x18200: reserve for RAMRO
	 * 0x18200-0x20000: padding
	 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
				&priv->ramro);
	if (ret)
		return ret;

	/* 0x20000-0x21000: reserve for RAMFC
	 * 0x21000-0x40000: padding and some unknown crap
	 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
	if (ret)
		return ret;

	return 0;
}
示例#4
0
static int
nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	      struct nouveau_oclass *oclass, void *data, u32 size,
	      struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nouveau_object *heap;
	struct nouveau_vm *vm;
	struct nv50_bar_priv *priv;
	u64 start, limit;
	int ret;

	ret = nouveau_bar_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
				 NVOBJ_FLAG_HEAP, &priv->mem);
	heap = nv_object(priv->mem);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap,
				(device->chipset == 0x50) ? 0x1400 : 0x0200,
				 0, 0, &priv->pad);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
				 0, &priv->pgd);
	if (ret)
		return ret;

	/* BAR3 */
	start = 0x0100000000ULL;
	limit = start + nv_device_resource_len(device, 3);

	ret = nouveau_vm_new(device, start, limit, start, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_gpuobj_new(nv_object(priv), heap,
				 ((limit-- - start) >> 12) * 8, 0x1000,
				 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
	vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

	ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
	if (ret)
		return ret;

	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
				  upper_32_bits(start));
	nv_wo32(priv->bar3, 0x10, 0x00000000);
	nv_wo32(priv->bar3, 0x14, 0x00000000);

	/* BAR1 */
	start = 0x0000000000ULL;
	limit = start + nv_device_resource_len(device, 1);

	ret = nouveau_vm_new(device, start, limit--, start, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
	if (ret)
		return ret;

	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
				  upper_32_bits(start));
	nv_wo32(priv->bar1, 0x10, 0x00000000);
	nv_wo32(priv->bar1, 0x14, 0x00000000);

	priv->base.alloc = nouveau_bar_alloc;
	priv->base.kmap = nv50_bar_kmap;
	priv->base.umap = nv50_bar_umap;
	priv->base.unmap = nv50_bar_unmap;
	if (device->chipset == 0x50)
		priv->base.flush = nv50_bar_flush;
	else
		priv->base.flush = nv84_bar_flush;
	spin_lock_init(&priv->lock);
	return 0;
}
示例#5
0
文件: nvc0.c 项目: 24hours/linux
static int
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	      struct nouveau_oclass *oclass, void *data, u32 size,
	      struct nouveau_object **pobject)
{
	struct nouveau_device *device = nv_device(parent);
	struct nvc0_bar_priv *priv;
	struct nouveau_gpuobj *mem;
	struct nouveau_vm *vm;
	int ret;

	ret = nouveau_bar_create(parent, engine, oclass, &priv);
	*pobject = nv_object(priv);
	if (ret)
		return ret;

	/* BAR3 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
				&priv->bar[0].mem);
	mem = priv->bar[0].mem;
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
				&priv->bar[0].pgd);
	if (ret)
		return ret;

	ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 3), 0, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
				 (nv_device_resource_len(device, 3) >> 12) * 8,
				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
				 &vm->pgt[0].obj[0]);
	vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

	ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
	nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 3) - 1));
	nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 3) - 1));

	/* BAR1 */
	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
				&priv->bar[1].mem);
	mem = priv->bar[1].mem;
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
				&priv->bar[1].pgd);
	if (ret)
		return ret;

	ret = nouveau_vm_new(device, 0, nv_device_resource_len(device, 1), 0, &vm);
	if (ret)
		return ret;

	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);

	ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
	nouveau_vm_ref(NULL, &vm, NULL);
	if (ret)
		return ret;

	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
	nv_wo32(mem, 0x0208, lower_32_bits(nv_device_resource_len(device, 1) - 1));
	nv_wo32(mem, 0x020c, upper_32_bits(nv_device_resource_len(device, 1) - 1));

	priv->base.alloc = nouveau_bar_alloc;
	priv->base.kmap = nvc0_bar_kmap;
	priv->base.umap = nvc0_bar_umap;
	priv->base.unmap = nvc0_bar_unmap;
	priv->base.flush = nv84_bar_flush;
	spin_lock_init(&priv->lock);
	return 0;
}