static int gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, struct nvkm_gpuobj **pgpuobj) { struct gf100_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case GT214_DISP_CORE_CHANNEL_DMA: case GT214_DISP_BASE_CHANNEL_DMA: case GT214_DISP_OVERLAY_CHANNEL_DMA: break; default: return -EINVAL; } } else return 0; ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | upper_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, priv->flags5); } return ret; }
static int nv50_disp_data_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_disp_priv *priv = (void *)engine; struct nouveau_engctx *ectx; int ret = -EBUSY; /* no context needed for channel objects... */ if (nv_mclass(parent) != NV_DEVICE_CLASS) { atomic_inc(&parent->refcount); *pobject = parent; return 1; } /* allocate display hardware to client */ mutex_lock(&nv_subdev(priv)->mutex); if (list_empty(&nv_engine(priv)->contexts)) { ret = nouveau_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x10000, NVOBJ_FLAG_HEAP, &ectx); *pobject = nv_object(ectx); } mutex_unlock(&nv_subdev(priv)->mutex); return ret; }
int nv50_disp_dmac_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pushbuf, int chid, int length, void **pobject) { struct nv50_disp_dmac *dmac; int ret; ret = nv50_disp_chan_create_(parent, engine, oclass, chid, length, pobject); dmac = *pobject; if (ret) return ret; dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); if (!dmac->pushdma) return -ENOENT; switch (nv_mclass(dmac->pushdma)) { case 0x0002: case 0x003d: if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff) return -EINVAL; switch (dmac->pushdma->target) { case NV_MEM_TARGET_VRAM: dmac->push = 0x00000000 | dmac->pushdma->start >> 8; break; case NV_MEM_TARGET_PCI_NOSNOOP: dmac->push = 0x00000003 | dmac->pushdma->start >> 8; break; default: return -EINVAL; } break; default: return -EINVAL; } return 0; }
static int nv84_crypt_object_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_gpuobj *obj; int ret; ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, 16, 16, 0, &obj); *pobject = nv_object(obj); if (ret) return ret; nv_wo32(obj, 0x00, nv_mclass(obj)); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); return 0; }
static int nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, struct nouveau_gpuobj **pgpuobj) { const struct nvkm_dmaeng_impl *impl = (void *) nv_oclass(nv_object(dmaobj)->engine); int ret = 0; if (nv_object(dmaobj) == parent) { /* ctor bind */ if (nv_mclass(parent->parent) == NV_DEVICE) { /* delayed, or no, binding */ return 0; } ret = impl->bind(dmaobj, parent, pgpuobj); if (ret == 0) nouveau_object_ref(NULL, &parent); return ret; } return impl->bind(dmaobj, parent, pgpuobj); }
static int nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { u32 flags0 = nv_mclass(dmaobj); u32 flags5 = 0x00000000; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case NVA3_DISP_MAST_CLASS: case NVA3_DISP_SYNC_CLASS: case NVA3_DISP_OVLY_CLASS: break; default: return -EINVAL; } } else return 0; if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { if (dmaobj->target == NV_MEM_TARGET_VM) { dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; } else { dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; dmaobj->conf0 |= 0x00020000; } } flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); switch (dmaobj->target) { case NV_MEM_TARGET_VM: flags0 |= 0x00000000; break; case NV_MEM_TARGET_VRAM: flags0 |= 0x00010000; break; case NV_MEM_TARGET_PCI: flags0 |= 0x00020000; break; case NV_MEM_TARGET_PCI_NOSNOOP: flags0 |= 0x00030000; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: flags0 |= 0x00080000; break; } ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, flags0); nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | upper_32_bits(dmaobj->start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, flags5); } return ret; }