static int nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj) { u32 offset; int ret; if (parent) { if (align >= 0) { ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, max(align, 1), &gpuobj->node); } else { ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, -align, &gpuobj->node); } if (ret) return ret; gpuobj->parent = parent; gpuobj->func = &nvkm_gpuobj_func; gpuobj->addr = parent->addr + gpuobj->node->offset; gpuobj->size = gpuobj->node->length; if (zero) { nvkm_kmap(gpuobj); for (offset = 0; offset < gpuobj->size; offset += 4) nvkm_wo32(gpuobj, offset, 0x00000000); nvkm_done(gpuobj); } } else { ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, abs(align), zero, &gpuobj->memory); if (ret) return ret; gpuobj->func = &nvkm_gpuobj_heap; gpuobj->addr = nvkm_memory_addr(gpuobj->memory); gpuobj->size = nvkm_memory_size(gpuobj->memory); } return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1); }
static int nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nvkm_device *device = nv_device(parent); struct nv04_instmem_priv *priv; int ret, bar, vs; ret = nvkm_instmem_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; /* map bar */ if (nv_device_resource_len(device, 2)) bar = 2; else bar = 3; priv->iomem = ioremap(nv_device_resource_start(device, bar), nv_device_resource_len(device, bar)); if (!priv->iomem) { nv_error(priv, "unable to map PRAMIN BAR\n"); return -EFAULT; } /* PRAMIN aperture maps over the end of vram, reserve enough space * to fit graphics contexts for every channel, the magics come * from engine/gr/nv40.c */ vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs; else priv->base.reserved = 0x4a40 * vs; priv->base.reserved += 16 * 1024; priv->base.reserved *= 32; /* per-channel */ priv->base.reserved += 512 * 1024; /* pci(e)gart table */ priv->base.reserved += 512 * 1024; /* object storage */ priv->base.reserved = round_up(priv->base.reserved, 4096); ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, &priv->vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); if (ret) return ret; /* 0x18000-0x18200: reserve for RAMRO * 0x18200-0x20000: padding */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, &priv->ramro); if (ret) return ret; /* 0x20000-0x21000: reserve for RAMFC * 0x21000-0x40000: padding and some unknown crap */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); if (ret) return ret; return 0; }