static void nouveau_gpudup_dtor(struct nouveau_object *object) { struct nouveau_gpuobj *gpuobj = (void *)object; nouveau_object_ref(NULL, &gpuobj->parent); nouveau_object_destroy(&gpuobj->base); }
int nouveau_client_create_(const char *name, u64 devname, const char *cfg, const char *dbg, int length, void **pobject) { struct nouveau_object *device; struct nouveau_client *client; int ret; device = (void *)nouveau_device_find(devname); if (!device) return -ENODEV; ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass, NV_CLIENT_CLASS, NULL, (1ULL << NVDEV_ENGINE_DEVICE), length, pobject); client = *pobject; if (ret) return ret; ret = nouveau_handle_create(nv_object(client), ~0, ~0, nv_object(client), &client->root); if (ret) return ret; /* prevent init/fini being called, os in in charge of this */ atomic_set(&nv_object(client)->usecount, 2); nouveau_object_ref(device, &client->device); snprintf(client->name, sizeof(client->name), "%s", name); client->debug = nouveau_dbgopt(dbg, "CLIENT"); return 0; }
void nv50_disp_dmac_dtor(struct nouveau_object *object) { struct nv50_disp_dmac *dmac = (void *)object; nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma); nv50_disp_chan_destroy(&dmac->base); }
static void nvkm_client_fini(void *priv) { struct nouveau_object *client = priv; nouveau_client_fini(nv_client(client), false); atomic_set(&client->refcount, 1); nouveau_object_ref(NULL, &client); }
static void nouveau_client_dtor(struct nouveau_object *object) { struct nouveau_client *client = (void *)object; nouveau_object_ref(NULL, &client->device); nouveau_handle_destroy(client->root); nouveau_namedb_destroy(&client->base); }
static void nouveau_cli_destroy(struct nouveau_cli *cli) { struct nouveau_object *client = nv_object(cli); nouveau_vm_ref(NULL, &cli->base.vm, NULL); nouveau_client_fini(&cli->base, false); atomic_set(&client->refcount, 1); nouveau_object_ref(NULL, &client); }
void _nouveau_fb_dtor(struct nouveau_object *object) { struct nouveau_fb *pfb = (void *)object; int i; for (i = 0; i < pfb->tile.regions; i++) pfb->tile.fini(pfb, i, &pfb->tile.region[i]); nouveau_mm_fini(&pfb->tags); nouveau_mm_fini(&pfb->vram); nouveau_object_ref(NULL, (struct nouveau_object **)&pfb->ram); nouveau_subdev_destroy(&pfb->base); }
int nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base, struct nouveau_gpuobj **pgpuobj) { struct nouveau_gpuobj *gpuobj; int ret; ret = nouveau_object_create(parent, parent->engine, &nouveau_gpudup_oclass, 0, &gpuobj); *pgpuobj = gpuobj; if (ret) return ret; nouveau_object_ref(nv_object(base), &gpuobj->parent); gpuobj->addr = base->addr; gpuobj->size = base->size; return 0; }
static int nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, struct nouveau_gpuobj **pgpuobj) { const struct nvkm_dmaeng_impl *impl = (void *) nv_oclass(nv_object(dmaobj)->engine); int ret = 0; if (nv_object(dmaobj) == parent) { /* ctor bind */ if (nv_mclass(parent->parent) == NV_DEVICE) { /* delayed, or no, binding */ return 0; } ret = impl->bind(dmaobj, parent, pgpuobj); if (ret == 0) nouveau_object_ref(NULL, &parent); return ret; } return impl->bind(dmaobj, parent, pgpuobj); }
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan) { struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; unsigned long flags; #ifdef __NetBSD__ if (chan->mapped) { bus_space_unmap(chan->bst, chan->bsh, chan->size); chan->mapped = false; } #else iounmap(chan->user); #endif spin_lock_irqsave(&priv->lock, flags); priv->channel[chan->chid] = NULL; spin_unlock_irqrestore(&priv->lock, flags); nouveau_gpuobj_ref(NULL, &chan->pushgpu); nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma); nouveau_namedb_destroy(&chan->base); }
int nouveau_gpuobj_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pclass, struct nouveau_object *pargpu, u32 size, u32 align, u32 flags, int length, void **pobject) { struct nouveau_instmem *imem = nouveau_instmem(parent); struct nouveau_bar *bar = nouveau_bar(parent); struct nouveau_gpuobj *gpuobj; struct nouveau_mm *heap = NULL; int ret, i; u64 addr; *pobject = NULL; if (pargpu) { while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { if (nv_gpuobj(pargpu)->heap.block_size) break; pargpu = pargpu->parent; } if (unlikely(pargpu == NULL)) { nv_error(parent, "no gpuobj heap\n"); return -EINVAL; } addr = nv_gpuobj(pargpu)->addr; heap = &nv_gpuobj(pargpu)->heap; atomic_inc(&parent->refcount); } else { ret = imem->alloc(imem, parent, size, align, &parent); pargpu = parent; if (ret) return ret; addr = nv_memobj(pargpu)->addr; size = nv_memobj(pargpu)->size; if (bar && bar->alloc) { struct nouveau_instobj *iobj = (void *)parent; struct nouveau_mem **mem = (void *)(iobj + 1); struct nouveau_mem *node = *mem; if (!bar->alloc(bar, parent, node, &pargpu)) { nouveau_object_ref(NULL, &parent); parent = pargpu; } } } ret = nouveau_object_create_(parent, engine, oclass, pclass | NV_GPUOBJ_CLASS, length, pobject); nouveau_object_ref(NULL, &parent); gpuobj = *pobject; if (ret) return ret; gpuobj->parent = pargpu; gpuobj->flags = flags; gpuobj->addr = addr; gpuobj->size = size; if (heap) { ret = nouveau_mm_head(heap, 0, 1, size, size, max(align, (u32)1), &gpuobj->node); if (ret) return ret; gpuobj->addr += gpuobj->node->offset; } if (gpuobj->flags & NVOBJ_FLAG_HEAP) { ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); if (ret) return ret; } if (flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0x00000000); } return ret; }
static int nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size) { union { struct nvif_ioctl_new_v0 v0; } *args = data; struct nouveau_client *client = nouveau_client(parent->object); struct nouveau_object *engctx = NULL; struct nouveau_object *object = NULL; struct nouveau_object *engine; struct nouveau_oclass *oclass; struct nouveau_handle *handle; u32 _handle, _oclass; int ret; nv_ioctl(client, "new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { _handle = args->v0.handle; _oclass = args->v0.oclass; } else return ret; nv_ioctl(client, "new vers %d handle %08x class %08x " "route %02x token %llx\n", args->v0.version, _handle, _oclass, args->v0.route, args->v0.token); if (!nv_iclass(parent->object, NV_PARENT_CLASS)) { nv_debug(parent->object, "cannot have children (ctor)\n"); ret = -ENODEV; goto fail_class; } /* check that parent supports the requested subclass */ ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass); if (ret) { nv_debug(parent->object, "illegal class 0x%04x\n", _oclass); goto fail_class; } /* make sure engine init has been completed *before* any objects * it controls are created - the constructors may depend on * state calculated at init (ie. default context construction) */ if (engine) { ret = nouveau_object_inc(engine); if (ret) goto fail_class; } /* if engine requires it, create a context object to insert * between the parent and its children (eg. PGRAPH context) */ if (engine && nv_engine(engine)->cclass) { ret = nouveau_object_ctor(parent->object, engine, nv_engine(engine)->cclass, data, size, &engctx); if (ret) goto fail_engctx; } else { nouveau_object_ref(parent->object, &engctx); } /* finally, create new object and bind it to its handle */ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); client->data = object; if (ret) goto fail_ctor; ret = nouveau_object_inc(object); if (ret) goto fail_init; ret = nouveau_handle_create(parent->object, parent->name, _handle, object, &handle); if (ret) goto fail_handle; ret = nouveau_handle_init(handle); handle->route = args->v0.route; handle->token = args->v0.token; if (ret) nouveau_handle_destroy(handle); fail_handle: nouveau_object_dec(object, false); fail_init: nouveau_object_ref(NULL, &object); fail_ctor: nouveau_object_ref(NULL, &engctx); fail_engctx: if (engine) nouveau_object_dec(engine, false); fail_class: return ret; }