/* TODO: Figure out tag memory details and drop the over-cautious allocation. */ static int nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) { u32 tag_size, tag_margin, tag_align; int ret; nv_wr32(priv, 0x17e8d8, priv->part_nr); if (nv_device(pfb)->card_type >= NV_E0) nv_wr32(priv, 0x17e000, priv->part_nr); /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram->size >> 17) / 4; if (priv->num_tags > (1 << 17)) priv->num_tags = 1 << 17; /* we have 17 bits in PTE */ priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */ tag_align = priv->part_nr * 0x800; tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align; /* 4 part 4 sub: 0x2000 bytes for 56 tags */ /* 3 part 4 sub: 0x6000 bytes for 168 tags */ /* * About 147 bytes per tag. Let's be safe and allocate x2, which makes * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags. * * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %. */ tag_size = (priv->num_tags / 64) * 0x6000 + tag_margin; tag_size += tag_align; tag_size = (tag_size + 0xfff) >> 12; /* round up */ ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, &priv->tag_ram); if (ret) { priv->num_tags = 0; } else { u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin; tag_base += tag_align - 1; ret = do_div(tag_base, tag_align); nv_wr32(priv, 0x17e8d4, tag_base); } ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); return ret; }
static int nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv04_instmem_priv *priv; int ret; ret = nouveau_instmem_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; /* PRAMIN aperture maps over the end of VRAM, reserve it */ priv->base.reserved = 512 * 1024; ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, &priv->vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); if (ret) return ret; /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0, NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); if (ret) return ret; /* 0x18800-0x18a00: reserve for RAMRO */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0, &priv->ramro); if (ret) return ret; return 0; }
int nouveau_gpuobj_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pclass, struct nouveau_object *pargpu, u32 size, u32 align, u32 flags, int length, void **pobject) { struct nouveau_instmem *imem = nouveau_instmem(parent); struct nouveau_bar *bar = nouveau_bar(parent); struct nouveau_gpuobj *gpuobj; struct nouveau_mm *heap = NULL; int ret, i; u64 addr; *pobject = NULL; if (pargpu) { while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { if (nv_gpuobj(pargpu)->heap.block_size) break; pargpu = pargpu->parent; } if (unlikely(pargpu == NULL)) { nv_error(parent, "no gpuobj heap\n"); return -EINVAL; } addr = nv_gpuobj(pargpu)->addr; heap = &nv_gpuobj(pargpu)->heap; atomic_inc(&parent->refcount); } else { ret = imem->alloc(imem, parent, size, align, &parent); pargpu = parent; if (ret) return ret; addr = nv_memobj(pargpu)->addr; size = nv_memobj(pargpu)->size; if (bar && bar->alloc) { struct nouveau_instobj *iobj = (void *)parent; struct nouveau_mem **mem = (void *)(iobj + 1); struct nouveau_mem *node = *mem; if (!bar->alloc(bar, parent, node, &pargpu)) { nouveau_object_ref(NULL, &parent); parent = pargpu; } } } ret = nouveau_object_create_(parent, engine, oclass, pclass | NV_GPUOBJ_CLASS, length, pobject); nouveau_object_ref(NULL, &parent); gpuobj = *pobject; if (ret) return ret; gpuobj->parent = pargpu; gpuobj->flags = flags; gpuobj->addr = addr; gpuobj->size = size; if (heap) { ret = nouveau_mm_head(heap, 0, 1, size, size, max(align, (u32)1), &gpuobj->node); if (ret) return ret; gpuobj->addr += gpuobj->node->offset; } if (gpuobj->flags & NVOBJ_FLAG_HEAP) { ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); if (ret) return ret; } if (flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0x00000000); } return ret; }
static int nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nv04_instmem_priv *priv; int ret, bar, vs; ret = nouveau_instmem_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; /* map bar */ if (nv_device_resource_len(device, 2)) bar = 2; else bar = 3; #ifdef __NetBSD__ priv->iomemt = nv_device_resource_tag(device, bar); priv->iomemsz = nv_device_resource_len(device, bar); ret = bus_space_map(priv->iomemt, nv_device_resource_start(device, bar), priv->iomemsz, 0, &priv->iomemh); if (ret) { priv->iomemsz = 0; nv_error(priv, "unable to map PRAMIN BAR: %d\n", ret); return -EFAULT; } #else priv->iomem = ioremap(nv_device_resource_start(device, bar), nv_device_resource_len(device, bar)); if (!priv->iomem) { nv_error(priv, "unable to map PRAMIN BAR\n"); return -EFAULT; } #endif /* PRAMIN aperture maps over the end of vram, reserve enough space * to fit graphics contexts for every channel, the magics come * from engine/graph/nv40.c */ vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs; else priv->base.reserved = 0x4a40 * vs; priv->base.reserved += 16 * 1024; priv->base.reserved *= 32; /* per-channel */ priv->base.reserved += 512 * 1024; /* pci(e)gart table */ priv->base.reserved += 512 * 1024; /* object storage */ priv->base.reserved = round_up(priv->base.reserved, 4096); ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, &priv->vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); if (ret) return ret; /* 0x18000-0x18200: reserve for RAMRO * 0x18200-0x20000: padding */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, &priv->ramro); if (ret) return ret; /* 0x20000-0x21000: reserve for RAMFC * 0x21000-0x40000: padding and some unknown crap */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); if (ret) return ret; return 0; }