static int gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, struct nvkm_gpuobj **pgpuobj) { struct gf100_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case GT214_DISP_CORE_CHANNEL_DMA: case GT214_DISP_BASE_CHANNEL_DMA: case GT214_DISP_OVERLAY_CHANNEL_DMA: break; default: return -EINVAL; } } else return 0; ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | upper_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, priv->flags5); } return ret; }
static int nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) { struct nouveau_object *object = handle->object; union { struct nvif_ioctl_sclass_v0 v0; } *args = data; int ret; if (!nv_iclass(object, NV_PARENT_CLASS)) { nv_debug(object, "cannot have children (sclass)\n"); return -ENODEV; } nv_ioctl(object, "sclass size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { nv_ioctl(object, "sclass vers %d count %d\n", args->v0.version, args->v0.count); if (size == args->v0.count * sizeof(args->v0.oclass[0])) { ret = nouveau_parent_lclass(object, args->v0.oclass, args->v0.count); if (ret >= 0) { args->v0.count = ret; ret = 0; } } else { ret = -EINVAL; } } return ret; }
void nv_printk_(struct nouveau_object *object, const char *pfx, int level, const char *fmt, ...) { static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; char mfmt[256]; va_list args; if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { struct nouveau_object *device = object; struct nouveau_object *subdev = object; char obuf[64], *ofmt = ""; if (object->engine) { snprintf(obuf, sizeof(obuf), "[0x%08x][%p]", nv_hclass(object), object); ofmt = obuf; subdev = object->engine; device = object->engine; } if (subdev->parent) device = subdev->parent; if (level > nv_subdev(subdev)->debug) return; snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx, name[level], nv_subdev(subdev)->name, nv_device(device)->name, ofmt, fmt); } else if (object && nv_iclass(object, NV_CLIENT_CLASS)) { if (level > nv_client(object)->debug) return; snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx, name[level], nv_client(object)->name, fmt); } else { snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt); } va_start(args, fmt); vprintk(mfmt, args); va_end(args); }
static int nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) { int engidx = nv_hclass(priv) & 0xff; while (object && object->parent) { if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && (nv_hclass(object->parent) & 0xff) == engidx) return nouveau_fifo_chan(object)->chid; object = object->parent; } return -1; }
int nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, struct nouveau_object **pengine, struct nouveau_oclass **poclass) { struct nouveau_sclass *sclass; struct nouveau_engine *engine; struct nouveau_oclass *oclass; u64 mask; sclass = nv_parent(parent)->sclass; while (sclass) { if ((sclass->oclass->handle & 0xffff) == handle) { *pengine = parent->engine; *poclass = sclass->oclass; return 0; } sclass = sclass->sclass; } mask = nv_parent(parent)->engine; while (mask) { int i = __ffs64(mask); if (nv_iclass(parent, NV_CLIENT_CLASS)) engine = nv_engine(nv_client(parent)->device); else engine = nouveau_engine(parent, i); if (engine) { oclass = engine->sclass; while (oclass->ofuncs) { if ((oclass->handle & 0xffff) == handle) { *pengine = nv_object(engine); *poclass = oclass; return 0; } oclass++; } } mask &= ~(1ULL << i); } return -EINVAL; }
bool nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) { struct nouveau_timer *ptimer = nouveau_timer(obj); u64 time0; time0 = ptimer->read(ptimer); do { if (nv_iclass(obj, NV_SUBDEV_CLASS)) { if ((nv_rd32(obj, addr) & mask) != data) return true; } else { if ((nv_ro32(obj, addr) & mask) != data) return true; } } while (ptimer->read(ptimer) - time0 < nsec); return false; }
static int nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, u32 *path, void *data, u32 size, u8 owner, u8 *route, u64 *token) { struct nouveau_handle *handle = parent; struct nouveau_namedb *namedb; struct nouveau_object *object; int ret; while ((object = parent->object), nr--) { nv_ioctl(object, "path 0x%08x\n", path[nr]); if (!nv_iclass(object, NV_PARENT_CLASS)) { nv_debug(object, "cannot have children (path)\n"); return -EINVAL; } if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || !(handle = nouveau_namedb_get(namedb, path[nr]))) { nv_debug(object, "handle 0x%08x not found\n", path[nr]); return -ENOENT; } nouveau_namedb_put(handle); parent = handle; } if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) { nv_ioctl(object, "object route != owner\n"); return -EACCES; } *route = handle->route; *token = handle->token; if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { if (nvkm_ioctl_v0[type].version == 0) { ret = nvkm_ioctl_v0[type].func(handle, data, size); } } return ret; }
int nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu, u32 size, u32 align, u32 flags, struct nouveau_gpuobj **pgpuobj) { struct nouveau_object *engine = parent; struct nouveau_gpuobj_class args = { .pargpu = pargpu, .size = size, .align = align, .flags = flags, }; if (!nv_iclass(engine, NV_SUBDEV_CLASS)) engine = engine->engine; BUG_ON(engine == NULL); return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass, &args, sizeof(args), (struct nouveau_object **)pgpuobj); }
static int nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd, void *args, u32 size) { struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent; struct nouveau_handle *handle; int ret = -EINVAL; handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args); if (!handle) return -ENOENT; if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object); chan->vblank.ctxdma = gpuobj->node->offset >> 4; ret = 0; } nouveau_namedb_put(handle); return ret; }
static int nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { u32 flags0 = nv_mclass(dmaobj); u32 flags5 = 0x00000000; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case NVA3_DISP_MAST_CLASS: case NVA3_DISP_SYNC_CLASS: case NVA3_DISP_OVLY_CLASS: break; default: return -EINVAL; } } else return 0; if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { if (dmaobj->target == NV_MEM_TARGET_VM) { dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; } else { dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; dmaobj->conf0 |= 0x00020000; } } flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); switch (dmaobj->target) { case NV_MEM_TARGET_VM: flags0 |= 0x00000000; break; case NV_MEM_TARGET_VRAM: flags0 |= 0x00010000; break; case NV_MEM_TARGET_PCI: flags0 |= 0x00020000; break; case NV_MEM_TARGET_PCI_NOSNOOP: flags0 |= 0x00030000; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: flags0 |= 0x00080000; break; } ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, flags0); nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | upper_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, flags5); } return ret; }
static int nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size) { union { struct nvif_ioctl_new_v0 v0; } *args = data; struct nouveau_client *client = nouveau_client(parent->object); struct nouveau_object *engctx = NULL; struct nouveau_object *object = NULL; struct nouveau_object *engine; struct nouveau_oclass *oclass; struct nouveau_handle *handle; u32 _handle, _oclass; int ret; nv_ioctl(client, "new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { _handle = args->v0.handle; _oclass = args->v0.oclass; } else return ret; nv_ioctl(client, "new vers %d handle %08x class %08x " "route %02x token %llx\n", args->v0.version, _handle, _oclass, args->v0.route, args->v0.token); if (!nv_iclass(parent->object, NV_PARENT_CLASS)) { nv_debug(parent->object, "cannot have children (ctor)\n"); ret = -ENODEV; goto fail_class; } /* check that parent supports the requested subclass */ ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass); if (ret) { nv_debug(parent->object, "illegal class 0x%04x\n", _oclass); goto fail_class; } /* make sure engine init has been completed *before* any objects * it controls are created - the constructors may depend on * state calculated at init (ie. default context construction) */ if (engine) { ret = nouveau_object_inc(engine); if (ret) goto fail_class; } /* if engine requires it, create a context object to insert * between the parent and its children (eg. PGRAPH context) */ if (engine && nv_engine(engine)->cclass) { ret = nouveau_object_ctor(parent->object, engine, nv_engine(engine)->cclass, data, size, &engctx); if (ret) goto fail_engctx; } else { nouveau_object_ref(parent->object, &engctx); } /* finally, create new object and bind it to its handle */ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); client->data = object; if (ret) goto fail_ctor; ret = nouveau_object_inc(object); if (ret) goto fail_init; ret = nouveau_handle_create(parent->object, parent->name, _handle, object, &handle); if (ret) goto fail_handle; ret = nouveau_handle_init(handle); handle->route = args->v0.route; handle->token = args->v0.token; if (ret) nouveau_handle_destroy(handle); fail_handle: nouveau_object_dec(object, false); fail_init: nouveau_object_ref(NULL, &object); fail_ctor: nouveau_object_ref(NULL, &engctx); fail_engctx: if (engine) nouveau_object_dec(engine, false); fail_class: return ret; }
void nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...) { static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' }; const char *pfx; char mfmt[256]; va_list args; switch (level) { case NV_DBG_FATAL: pfx = KERN_CRIT; break; case NV_DBG_ERROR: pfx = KERN_ERR; break; case NV_DBG_WARN: pfx = KERN_WARNING; break; case NV_DBG_INFO_NORMAL: pfx = KERN_INFO; break; case NV_DBG_DEBUG: case NV_DBG_PARANOIA: case NV_DBG_TRACE: case NV_DBG_SPAM: default: pfx = KERN_DEBUG; break; } if (object && !nv_iclass(object, NV_CLIENT_CLASS)) { struct nvkm_object *device; struct nvkm_object *subdev; char obuf[64], *ofmt = ""; if (object->engine == NULL) { subdev = object; while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS)) subdev = subdev->parent; } else { subdev = &object->engine->subdev.object; } device = subdev; if (device->parent) device = device->parent; if (object != subdev) { snprintf(obuf, sizeof(obuf), "[0x%08x]", nv_hclass(object)); ofmt = obuf; } if (level > nv_subdev(subdev)->debug) return; snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx, name[level], nv_subdev(subdev)->name, nv_device(device)->name, ofmt, fmt); } else if (object && nv_iclass(object, NV_CLIENT_CLASS)) { if (level > nv_client(object)->debug) return; snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx, name[level], nv_client(object)->name, fmt); } else { snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt); } va_start(args, fmt); vprintk(mfmt, args); va_end(args); }