/* Does a CPU wait for the buffer's backing data to become reliably accessible * for write/read by waiting on the buffer's relevant fences. */ static inline bool nouveau_buffer_sync(struct nouveau_context *nv, struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) { if (!buf->fence_wr) return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence_wr)); if (!nouveau_fence_wait(buf->fence_wr, &nv->debug)) return false; } else { if (!buf->fence) return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence)); if (!nouveau_fence_wait(buf->fence, &nv->debug)) return false; nouveau_fence_ref(NULL, &buf->fence); } nouveau_fence_ref(NULL, &buf->fence_wr); return true; }
static INLINE boolean nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage) { if (!mt->base.mm) { uint32_t access = (usage & PIPE_TRANSFER_WRITE) ? NOUVEAU_BO_WR : NOUVEAU_BO_RD; return !nouveau_bo_wait(mt->base.bo, access, nvc0->base.client); } if (usage & PIPE_TRANSFER_WRITE) return !mt->base.fence || nouveau_fence_wait(mt->base.fence); return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr); }
static boolean nouveau_screen_fence_finish(struct pipe_screen *screen, struct pipe_fence_handle *pfence, uint64_t timeout) { return nouveau_fence_wait(nouveau_fence(pfence)); }
static void nv30_screen_destroy(struct pipe_screen *pscreen) { struct nv30_screen *screen = nv30_screen(pscreen); if (screen->base.fence.current && screen->base.fence.current->state >= NOUVEAU_FENCE_STATE_EMITTED) { nouveau_fence_wait(screen->base.fence.current); nouveau_fence_ref (NULL, &screen->base.fence.current); } nouveau_object_del(&screen->query); nouveau_object_del(&screen->fence); nouveau_object_del(&screen->ntfy); nouveau_object_del(&screen->sifm); nouveau_object_del(&screen->swzsurf); nouveau_object_del(&screen->surf2d); nouveau_object_del(&screen->m2mf); nouveau_object_del(&screen->eng3d); nouveau_object_del(&screen->null); nouveau_screen_fini(&screen->base); FREE(screen); }
static void nv30_screen_destroy(struct pipe_screen *pscreen) { struct nv30_screen *screen = nv30_screen(pscreen); if (!nouveau_drm_screen_unref(&screen->base)) return; if (screen->base.fence.current) { struct nouveau_fence *current = NULL; /* nouveau_fence_wait will create a new current fence, so wait on the * _current_ one, and remove both. */ nouveau_fence_ref(screen->base.fence.current, ¤t); nouveau_fence_wait(current); nouveau_fence_ref(NULL, ¤t); nouveau_fence_ref(NULL, &screen->base.fence.current); } nouveau_object_del(&screen->query); nouveau_object_del(&screen->fence); nouveau_object_del(&screen->ntfy); nouveau_object_del(&screen->sifm); nouveau_object_del(&screen->swzsurf); nouveau_object_del(&screen->surf2d); nouveau_object_del(&screen->m2mf); nouveau_object_del(&screen->eng3d); nouveau_object_del(&screen->null); nouveau_screen_fini(&screen->base); FREE(screen); }
static boolean nouveau_screen_fence_finish(struct pipe_screen *screen, struct pipe_fence_handle *pfence, uint64_t timeout) { if (!timeout) return nouveau_fence_signalled(nouveau_fence(pfence)); return nouveau_fence_wait(nouveau_fence(pfence), NULL); }
static INLINE boolean nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) { if (!buf->fence_wr) return TRUE; if (!nouveau_fence_wait(buf->fence_wr)) return FALSE; } else { if (!buf->fence) return TRUE; if (!nouveau_fence_wait(buf->fence)) return FALSE; nouveau_fence_ref(NULL, &buf->fence); } nouveau_fence_ref(NULL, &buf->fence_wr); return TRUE; }
static void nv50_screen_destroy(struct pipe_screen *pscreen) { struct nv50_screen *screen = nv50_screen(pscreen); if (!nouveau_drm_screen_unref(&screen->base)) return; if (screen->base.fence.current) { struct nouveau_fence *current = NULL; /* nouveau_fence_wait will create a new current fence, so wait on the * _current_ one, and remove both. */ nouveau_fence_ref(screen->base.fence.current, ¤t); nouveau_fence_wait(current, NULL); nouveau_fence_ref(NULL, ¤t); nouveau_fence_ref(NULL, &screen->base.fence.current); } if (screen->base.pushbuf) screen->base.pushbuf->user_priv = NULL; if (screen->blitter) nv50_blitter_destroy(screen); if (screen->pm.prog) { screen->pm.prog->code = NULL; /* hardcoded, don't FREE */ nv50_program_destroy(NULL, screen->pm.prog); FREE(screen->pm.prog); } nouveau_bo_ref(NULL, &screen->code); nouveau_bo_ref(NULL, &screen->tls_bo); nouveau_bo_ref(NULL, &screen->stack_bo); nouveau_bo_ref(NULL, &screen->txc); nouveau_bo_ref(NULL, &screen->uniforms); nouveau_bo_ref(NULL, &screen->fence.bo); nouveau_heap_destroy(&screen->vp_code_heap); nouveau_heap_destroy(&screen->gp_code_heap); nouveau_heap_destroy(&screen->fp_code_heap); FREE(screen->tic.entries); nouveau_object_del(&screen->tesla); nouveau_object_del(&screen->eng2d); nouveau_object_del(&screen->m2mf); nouveau_object_del(&screen->compute); nouveau_object_del(&screen->sync); nouveau_screen_fini(&screen->base); FREE(screen); }
static INLINE boolean nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) { if (!buf->fence_wr) return TRUE; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence_wr)); if (!nouveau_fence_wait(buf->fence_wr)) return FALSE; } else { if (!buf->fence) return TRUE; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence)); if (!nouveau_fence_wait(buf->fence)) return FALSE; nouveau_fence_ref(NULL, &buf->fence); } nouveau_fence_ref(NULL, &buf->fence_wr); return TRUE; }
int nouveau_channel_idle(struct nouveau_channel *chan) { struct nouveau_cli *cli = (void *)nvif_client(chan->object); struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, false, &fence); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n", chan->object->handle, nvkm_client(&cli->base)->name); return ret; }
void nouveau_channel_idle(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_fence *fence = NULL; int ret; nouveau_fence_update(chan); if (chan->fence.sequence != chan->fence.sequence_ack) { ret = nouveau_fence_new(chan, &fence, true); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } }
int nouveau_channel_idle(struct nouveau_channel *chan) { if (likely(chan && chan->fence)) { struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_fence *fence = NULL; int ret; ret = nouveau_fence_new(chan, false, &fence); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) { NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n", chan->chid, nvxx_client(&cli->base)->name); return ret; } } return 0; }
static void nv50_screen_destroy(struct pipe_screen *pscreen) { struct nv50_screen *screen = nv50_screen(pscreen); if (screen->base.fence.current) { nouveau_fence_wait(screen->base.fence.current); nouveau_fence_ref (NULL, &screen->base.fence.current); } if (screen->base.pushbuf) screen->base.pushbuf->user_priv = NULL; if (screen->blitctx) FREE(screen->blitctx); nouveau_bo_ref(NULL, &screen->code); nouveau_bo_ref(NULL, &screen->tls_bo); nouveau_bo_ref(NULL, &screen->stack_bo); nouveau_bo_ref(NULL, &screen->txc); nouveau_bo_ref(NULL, &screen->uniforms); nouveau_bo_ref(NULL, &screen->fence.bo); nouveau_heap_destroy(&screen->vp_code_heap); nouveau_heap_destroy(&screen->gp_code_heap); nouveau_heap_destroy(&screen->fp_code_heap); if (screen->tic.entries) FREE(screen->tic.entries); nouveau_object_del(&screen->tesla); nouveau_object_del(&screen->eng2d); nouveau_object_del(&screen->m2mf); nouveau_object_del(&screen->sync); nouveau_screen_fini(&screen->base); FREE(screen); }
/* stops a fifo */ void nouveau_channel_free(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; unsigned long flags; int ret; NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); nouveau_debugfs_channel_fini(chan); /* Give outstanding push buffers a chance to complete */ spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); spin_unlock_irqrestore(&chan->fence.lock, flags); if (chan->fence.sequence != chan->fence.sequence_ack) { struct nouveau_fence *fence = NULL; ret = nouveau_fence_new(chan, &fence, true); if (ret == 0) { ret = nouveau_fence_wait(fence, NULL, false, false); nouveau_fence_unref((void *)&fence); } if (ret) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } /* Ensure all outstanding fences are signaled. They should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_fini(chan); /* Ensure the channel is no longer active on the GPU */ pfifo->reassign(dev, false); pgraph->fifo_access(dev, false); if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); pgraph->destroy_context(chan); pgraph->fifo_access(dev, true); if (pfifo->channel_id(dev) == chan->id) { pfifo->disable(dev); pfifo->unload_context(dev); pfifo->enable(dev); } pfifo->destroy_context(chan); pfifo->reassign(dev, true); /* Release the channel's resources */ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); if (chan->user) iounmap(chan->user); dev_priv->fifos[chan->id] = NULL; dev_priv->fifo_alloc_count--; kfree(chan); }