static void nv50_graph_context_del(struct nouveau_channel *chan, int engine) { struct nouveau_gpuobj *grctx = chan->engctx[engine]; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin) return; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); pfifo->reassign(dev, false); nv50_graph_fifo_access(dev, false); if (nv50_graph_channel(dev) == chan) nv50_graph_unload_context(dev); for (i = hdr; i < hdr + 24; i += 4) nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); nv50_graph_fifo_access(dev, true); pfifo->reassign(dev, true); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); nouveau_gpuobj_ref(NULL, &grctx); atomic_dec(&chan->vm->engref[engine]); chan->engctx[engine] = NULL; }
static int nv50_graph_fini(struct drm_device *dev, int engine) { NV_DEBUG(dev, "\n"); nv50_graph_unload_context(dev); nv_wr32(dev, 0x40013c, 0x00000000); return 0; }
static int nv50_graph_fini(struct drm_device *dev, int engine, bool suspend) { nv_mask(dev, 0x400500, 0x00010001, 0x00000000); if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) { nv_mask(dev, 0x400500, 0x00010001, 0x00010001); return -EBUSY; } nv50_graph_unload_context(dev); nv_wr32(dev, 0x40013c, 0x00000000); return 0; }