static int nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) { struct nouveau_client *client = nouveau_client(handle->object); struct nouveau_object *object = handle->object; struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_ntfy_new_v0 v0; } *args = data; struct nvkm_event *event; int ret; nv_ioctl(object, "ntfy new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { nv_ioctl(object, "ntfy new vers %d event %02x\n", args->v0.version, args->v0.event); if (ret = -ENODEV, ofuncs->ntfy) ret = ofuncs->ntfy(object, args->v0.event, &event); if (ret == 0) { ret = nvkm_client_notify_new(client, event, data, size); if (ret >= 0) { args->v0.index = ret; ret = 0; } } } return ret; }
const char * nouveau_client_name(void *obj) { const char *client_name = "unknown"; struct nouveau_client *client = nouveau_client(obj); if (client) client_name = client->name; return client_name; }
struct nouveau_pushbuf * cli_push_get(struct nouveau_client *client, struct nouveau_bo *bo) { struct nouveau_client_bo_map *bomap = &nouveau_client(client)->bomap; struct nouveau_client_bo_map_entry *ent = bo_map_lookup(bomap, bo); struct nouveau_pushbuf *push = NULL; if (ent) push = ent->push; return push; }
struct drm_nouveau_gem_pushbuf_bo * cli_kref_get(struct nouveau_client *client, struct nouveau_bo *bo) { struct nouveau_client_bo_map *bomap = &nouveau_client(client)->bomap; struct nouveau_client_bo_map_entry *ent = bo_map_lookup(bomap, bo); struct drm_nouveau_gem_pushbuf_bo *kref = NULL; if (ent) kref = ent->kref; return kref; }
void cli_map_free(struct nouveau_client *client) { struct nouveau_client_bo_map *bomap = &nouveau_client(client)->bomap; unsigned i; // Free all buckets for (i = 0; i < BO_MAP_NUM_BUCKETS+1; i ++) { struct nouveau_client_bo_map_entry *ent, *next; for (ent = bomap->buckets[i]; ent; ent = next) { next = ent->next; free(ent); } } }
void cli_kref_set(struct nouveau_client *client, struct nouveau_bo *bo, struct drm_nouveau_gem_pushbuf_bo *kref, struct nouveau_pushbuf *push) { struct nouveau_client_bo_map *bomap = &nouveau_client(client)->bomap; struct nouveau_client_bo_map_entry *ent = bo_map_lookup(bomap, bo); TRACE("setting 0x%x <-- {%p,%p}\n", bo->handle, kref, push); if (!ent) { // Do nothing if the user wanted to free the entry anyway if (!kref && !push) return; // Try to get a free entry for this bo ent = bo_map_get_free(bomap); if (!ent) { // Shouldn't we panic here? TRACE("panic: out of memory\n"); return; } // Add entry to bucket list unsigned hash = bo_map_hash(bo); ent->next = bomap->buckets[hash]; if (ent->next) ent->next->prev_next = &ent->next; ent->prev_next = &bomap->buckets[hash]; ent->bo_handle = bo->handle; bomap->buckets[hash] = ent; } if (kref || push) { // Update the entry ent->kref = kref; ent->push = push; } else { // Unlink the entry, and put it in the bucket of free entries *ent->prev_next = ent->next; if (ent->next) ent->next->prev_next = ent->prev_next; ent->next = bomap->buckets[BO_MAP_NUM_BUCKETS]; bomap->buckets[BO_MAP_NUM_BUCKETS] = ent; } }
static int nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size) { struct nouveau_client *client = nouveau_client(handle->object); struct nouveau_object *object = handle->object; union { struct nvif_ioctl_ntfy_put_v0 v0; } *args = data; int ret; nv_ioctl(object, "ntfy put size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { nv_ioctl(object, "ntfy put vers %d index %d\n", args->v0.version, args->v0.index); ret = nvkm_client_notify_put(client, args->v0.index); } return ret; }
int nv84_fence_context_new(struct nouveau_channel *chan) { struct nouveau_fifo_chan *fifo = (void *)chan->object; struct nouveau_client *client = nouveau_client(fifo); struct nv84_fence_priv *priv = chan->drm->fence; struct nv84_fence_chan *fctx; int ret, i; fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; nouveau_fence_context_new(&fctx->base); fctx->base.emit = nv84_fence_emit; fctx->base.sync = nv84_fence_sync; fctx->base.read = nv84_fence_read; fctx->base.emit32 = nv84_fence_emit32; fctx->base.sync32 = nv84_fence_sync32; ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); if (ret == 0) { ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, &fctx->vma_gart); } /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); } nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); if (ret) nv84_fence_context_del(chan); return ret; }
static int nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size) { union { struct nvif_ioctl_new_v0 v0; } *args = data; struct nouveau_client *client = nouveau_client(parent->object); struct nouveau_object *engctx = NULL; struct nouveau_object *object = NULL; struct nouveau_object *engine; struct nouveau_oclass *oclass; struct nouveau_handle *handle; u32 _handle, _oclass; int ret; nv_ioctl(client, "new size %d\n", size); if (nvif_unpack(args->v0, 0, 0, true)) { _handle = args->v0.handle; _oclass = args->v0.oclass; } else return ret; nv_ioctl(client, "new vers %d handle %08x class %08x " "route %02x token %llx\n", args->v0.version, _handle, _oclass, args->v0.route, args->v0.token); if (!nv_iclass(parent->object, NV_PARENT_CLASS)) { nv_debug(parent->object, "cannot have children (ctor)\n"); ret = -ENODEV; goto fail_class; } /* check that parent supports the requested subclass */ ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass); if (ret) { nv_debug(parent->object, "illegal class 0x%04x\n", _oclass); goto fail_class; } /* make sure engine init has been completed *before* any objects * it controls are created - the constructors may depend on * state calculated at init (ie. default context construction) */ if (engine) { ret = nouveau_object_inc(engine); if (ret) goto fail_class; } /* if engine requires it, create a context object to insert * between the parent and its children (eg. PGRAPH context) */ if (engine && nv_engine(engine)->cclass) { ret = nouveau_object_ctor(parent->object, engine, nv_engine(engine)->cclass, data, size, &engctx); if (ret) goto fail_engctx; } else { nouveau_object_ref(parent->object, &engctx); } /* finally, create new object and bind it to its handle */ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); client->data = object; if (ret) goto fail_ctor; ret = nouveau_object_inc(object); if (ret) goto fail_init; ret = nouveau_handle_create(parent->object, parent->name, _handle, object, &handle); if (ret) goto fail_handle; ret = nouveau_handle_init(handle); handle->route = args->v0.route; handle->token = args->v0.token; if (ret) nouveau_handle_destroy(handle); fail_handle: nouveau_object_dec(object, false); fail_init: nouveau_object_ref(NULL, &object); fail_ctor: nouveau_object_ref(NULL, &engctx); fail_engctx: if (engine) nouveau_object_dec(engine, false); fail_class: return ret; }
int nvc0_graph_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *args, u32 size, struct nouveau_object **pobject) { struct nouveau_vm *vm = nouveau_client(parent)->vm; struct nvc0_graph_priv *priv = (void *)engine; struct nvc0_graph_data *data = priv->mmio_data; struct nvc0_graph_mmio *mmio = priv->mmio_list; struct nvc0_graph_chan *chan; int ret, i; /* allocate memory for context, and fill with default values */ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, priv->size, 0x100, NVOBJ_FLAG_ZERO_ALLOC, &chan); *pobject = nv_object(chan); if (ret) return ret; /* allocate memory for a "mmio list" buffer that's used by the HUB * fuc to modify some per-context register settings on first load * of the context. */ ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0, &chan->mmio); if (ret) return ret; ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, &chan->mmio_vma); if (ret) return ret; /* allocate buffers referenced by mmio list */ for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) { ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size, data->align, 0, &chan->data[i].mem); if (ret) return ret; ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access, &chan->data[i].vma); if (ret) return ret; data++; } /* finally, fill in the mmio list and point the context at it */ for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) { u32 addr = mmio->addr; u32 data = mmio->data; if (mmio->shift) { u64 info = chan->data[mmio->buffer].vma.offset; data |= info >> mmio->shift; } nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr); nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data); mmio++; }
int nvkm_dmaobj_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void **pdata, u32 *psize, int length, void **pobject) { union { struct nv_dma_v0 v0; } *args = *pdata; struct nouveau_instmem *instmem = nouveau_instmem(parent); struct nouveau_client *client = nouveau_client(parent); struct nouveau_device *device = nv_device(parent); struct nouveau_fb *pfb = nouveau_fb(parent); struct nouveau_dmaobj *dmaobj; void *data = *pdata; u32 size = *psize; int ret; ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject); dmaobj = *pobject; if (ret) return ret; nv_ioctl(parent, "create dma size %d\n", *psize); if (nvif_unpack(args->v0, 0, 0, true)) { nv_ioctl(parent, "create dma vers %d target %d access %d " "start %016llx limit %016llx\n", args->v0.version, args->v0.target, args->v0.access, args->v0.start, args->v0.limit); dmaobj->target = args->v0.target; dmaobj->access = args->v0.access; dmaobj->start = args->v0.start; dmaobj->limit = args->v0.limit; } else return ret; *pdata = data; *psize = size; if (dmaobj->start > dmaobj->limit) return -EINVAL; switch (dmaobj->target) { case NV_DMA_V0_TARGET_VM: dmaobj->target = NV_MEM_TARGET_VM; break; case NV_DMA_V0_TARGET_VRAM: if (!client->super) { if (dmaobj->limit >= pfb->ram->size - instmem->reserved) return -EACCES; if (device->card_type >= NV_50) return -EACCES; } dmaobj->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_V0_TARGET_PCI: if (!client->super) return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; case NV_DMA_V0_TARGET_PCI_US: case NV_DMA_V0_TARGET_AGP: if (!client->super) return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_DMA_V0_ACCESS_VM: dmaobj->access = NV_MEM_ACCESS_VM; break; case NV_DMA_V0_ACCESS_RD: dmaobj->access = NV_MEM_ACCESS_RO; break; case NV_DMA_V0_ACCESS_WR: dmaobj->access = NV_MEM_ACCESS_WO; break; case NV_DMA_V0_ACCESS_RDWR: dmaobj->access = NV_MEM_ACCESS_RW; break; default: return -EINVAL; } return ret; }