static int nv50_graph_init_ctxctl(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; NV_DEBUG(dev, "\n"); nouveau_grctx_prog_load(dev); if (!dev_priv->engine.graph.ctxprog) dev_priv->engine.graph.accel_blocked = true; nv_wr32(dev, 0x400320, 4); nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); return 0; }
static int nv50_graph_init_ctxctl(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; NV_DEBUG(dev, "\n"); if (nouveau_ctxfw) { nouveau_grctx_prog_load(dev); dev_priv->engine.graph.grctx_size = 0x70000; } if (!dev_priv->engine.graph.ctxprog) { struct nouveau_grctx ctx = {}; uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); int i; if (!cp) { NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); dev_priv->engine.graph.accel_blocked = true; return 0; } ctx.dev = dev; ctx.mode = NOUVEAU_GRCTX_PROG; ctx.data = cp; ctx.ctxprog_max = 512; if (!nv50_grctx_init(&ctx)) { dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); } else { dev_priv->engine.graph.accel_blocked = true; } kfree(cp); } nv_wr32(dev, 0x400320, 4); nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); return 0; }