int nouveau_channel_idle(struct nouveau_channel *chan) { struct nouveau_cli *cli = (void *)nvif_client(chan->object); struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, false, &fence); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n", chan->object->handle, nvkm_client(&cli->base)->name); return ret; }
static int nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, struct nouveau_bo *nvbo, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, &fence, true); if (ret) return ret; ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); nouveau_fence_unref(&fence); return ret; }
void nouveau_channel_idle(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_fence *fence = NULL; int ret; nouveau_fence_update(chan); if (chan->fence.sequence != chan->fence.sequence_ack) { ret = nouveau_fence_new(chan, &fence, true); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } }
int nouveau_channel_idle(struct nouveau_channel *chan) { if (likely(chan && chan->fence)) { struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, false, &fence); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) { NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n", chan->chid, nvxx_client(&cli->base)->name); return ret; } } return 0; }
/* stops a fifo */ void nouveau_channel_free(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; unsigned long flags; int ret; NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); nouveau_debugfs_channel_fini(chan); /* Give outstanding push buffers a chance to complete */ spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); spin_unlock_irqrestore(&chan->fence.lock, flags); if (chan->fence.sequence != chan->fence.sequence_ack) { struct nouveau_fence *fence = NULL; ret = nouveau_fence_new(chan, &fence, true); if (ret == 0) { ret = nouveau_fence_wait(fence, NULL, false, false); nouveau_fence_unref((void *)&fence); } if (ret) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } /* Ensure all outstanding fences are signaled. They should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_fini(chan); /* Ensure the channel is no longer active on the GPU */ pfifo->reassign(dev, false); pgraph->fifo_access(dev, false); if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); pgraph->destroy_context(chan); pgraph->fifo_access(dev, true); if (pfifo->channel_id(dev) == chan->id) { pfifo->disable(dev); pfifo->unload_context(dev); pfifo->enable(dev); } pfifo->destroy_context(chan); pfifo->reassign(dev, true); /* Release the channel's resources */ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); if (chan->user) iounmap(chan->user); dev_priv->fifos[chan->id] = NULL; dev_priv->fifo_alloc_count--; kfree(chan); }