Example #1
0
static void
nv50_screen_destroy(struct pipe_screen *pscreen)
{
   struct nv50_screen *screen = nv50_screen(pscreen);

   if (screen->base.fence.current) {
      nouveau_fence_wait(screen->base.fence.current);
      nouveau_fence_ref (NULL, &screen->base.fence.current);
   }
   if (screen->base.pushbuf)
      screen->base.pushbuf->user_priv = NULL;

   if (screen->blitter)
      nv50_blitter_destroy(screen);

   nouveau_bo_ref(NULL, &screen->code);
   nouveau_bo_ref(NULL, &screen->tls_bo);
   nouveau_bo_ref(NULL, &screen->stack_bo);
   nouveau_bo_ref(NULL, &screen->txc);
   nouveau_bo_ref(NULL, &screen->uniforms);
   nouveau_bo_ref(NULL, &screen->fence.bo);

   nouveau_heap_destroy(&screen->vp_code_heap);
   nouveau_heap_destroy(&screen->gp_code_heap);
   nouveau_heap_destroy(&screen->fp_code_heap);

   FREE(screen->tic.entries);

   nouveau_object_del(&screen->tesla);
   nouveau_object_del(&screen->eng2d);
   nouveau_object_del(&screen->m2mf);
   nouveau_object_del(&screen->sync);

   nouveau_screen_fini(&screen->base);

   FREE(screen);
}
Example #2
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct nouveau_fence *fence = NULL;

	list_del(&vma->head);

	if (mapped) {
		spin_lock(&nvbo->bo.bdev->fence_lock);
		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
		spin_unlock(&nvbo->bo.bdev->fence_lock);
	}

	if (fence) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nouveau_vm_unmap(vma);
		nouveau_vm_put(vma);
		kfree(vma);
	}
	nouveau_fence_unref(&fence);
}
Example #3
0
static void
nv50_hw_end_query(struct nv50_context *nv50, struct nv50_query *q)
{
   struct nouveau_pushbuf *push = nv50->base.pushbuf;
   struct nv50_hw_query *hq = nv50_hw_query(q);

   hq->state = NV50_HW_QUERY_STATE_ENDED;

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      nv50_hw_query_get(push, q, 0, 0x0100f002);
      if (--nv50->screen->num_occlusion_queries_active == 0) {
         PUSH_SPACE(push, 2);
         BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
         PUSH_DATA (push, 0);
      }
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      nv50_hw_query_get(push, q, 0, 0x06805002);
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      nv50_hw_query_get(push, q, 0, 0x05805002);
      break;
   case PIPE_QUERY_SO_STATISTICS:
      nv50_hw_query_get(push, q, 0x00, 0x05805002);
      nv50_hw_query_get(push, q, 0x10, 0x06805002);
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      nv50_hw_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
      nv50_hw_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
      nv50_hw_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
      nv50_hw_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
      nv50_hw_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
      nv50_hw_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
      nv50_hw_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
      nv50_hw_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
      break;
   case PIPE_QUERY_TIMESTAMP:
      hq->sequence++;
      /* fall through */
   case PIPE_QUERY_TIME_ELAPSED:
      nv50_hw_query_get(push, q, 0, 0x00005002);
      break;
   case PIPE_QUERY_GPU_FINISHED:
      hq->sequence++;
      nv50_hw_query_get(push, q, 0, 0x1000f010);
      break;
   case NVA0_HW_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
      hq->sequence++;
      nv50_hw_query_get(push, q, 0, 0x0d005002 | (q->index << 5));
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      /* This query is not issued on GPU because disjoint is forced to false */
      hq->state = NV50_HW_QUERY_STATE_READY;
      break;
   default:
      assert(0);
      break;
   }
   if (hq->is64bit)
      nouveau_fence_ref(nv50->screen->base.fence.current, &hq->fence);
}
Example #4
0
static void
nvc0_query_end(struct pipe_context *pipe, struct pipe_query *pq)
{
   struct nvc0_context *nvc0 = nvc0_context(pipe);
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nvc0_query *q = nvc0_query(pq);

   if (q->state != NVC0_QUERY_STATE_ACTIVE) {
      /* some queries don't require 'begin' to be called (e.g. GPU_FINISHED) */
      if (q->rotate)
         nvc0_query_rotate(nvc0, q);
      q->sequence++;
   }
   q->state = NVC0_QUERY_STATE_ENDED;

   switch (q->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      nvc0_query_get(push, q, 0, 0x0100f002);
      if (--nvc0->screen->num_occlusion_queries_active == 0) {
         PUSH_SPACE(push, 1);
         IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0);
      }
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      nvc0_query_get(push, q, 0, 0x09005002 | (q->index << 5));
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      nvc0_query_get(push, q, 0, 0x05805002 | (q->index << 5));
      break;
   case PIPE_QUERY_SO_STATISTICS:
      nvc0_query_get(push, q, 0x00, 0x05805002 | (q->index << 5));
      nvc0_query_get(push, q, 0x10, 0x06805002 | (q->index << 5));
      break;
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
      /* TODO: How do we sum over all streams for render condition ? */
      /* PRIMS_DROPPED doesn't write sequence, use a ZERO query to sync on */
      nvc0_query_get(push, q, 0x00, 0x03005002 | (q->index << 5));
      nvc0_query_get(push, q, 0x20, 0x00005002);
      break;
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_TIME_ELAPSED:
      nvc0_query_get(push, q, 0, 0x00005002);
      break;
   case PIPE_QUERY_GPU_FINISHED:
      nvc0_query_get(push, q, 0, 0x1000f010);
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      nvc0_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
      nvc0_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
      nvc0_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
      nvc0_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
      nvc0_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
      nvc0_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
      nvc0_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
      nvc0_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
      nvc0_query_get(push, q, 0x80, 0x0d808002); /* TCP, LAUNCHES */
      nvc0_query_get(push, q, 0x90, 0x0e809002); /* TEP, LAUNCHES */
      break;
   case NVC0_QUERY_TFB_BUFFER_OFFSET:
      /* indexed by TFB buffer instead of by vertex stream */
      nvc0_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5));
      break;
   default:
#ifdef NOUVEAU_ENABLE_DRIVER_STATISTICS
      if (q->type >= NVC0_QUERY_DRV_STAT(0) &&
          q->type <= NVC0_QUERY_DRV_STAT_LAST) {
         q->u.value = nvc0->screen->base.stats.v[q->index] - q->u.value;
         return;
      } else
#endif
      if (q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST)
         nve4_mp_pm_query_end(nvc0, q);
      break;
   }
   if (q->is64bit)
      nouveau_fence_ref(nvc0->screen->base.fence.current, &q->fence);
}