Beispiel #1
0
void
swr_finish(struct pipe_context *pipe)
{
   struct pipe_fence_handle *fence = nullptr;

   swr_flush(pipe, &fence, 0);
   swr_fence_finish(pipe->screen, NULL, fence, 0);
   swr_fence_reference(pipe->screen, &fence, NULL);
}
static void
swr_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
{
   struct swr_query *pq = swr_query(q);

   if (pq->fence) {
      if (!swr_is_fence_done(swr_fence(pq->fence))) {
         swr_fence_submit(swr_context(pipe), pq->fence);
         swr_fence_finish(pipe->screen, pq->fence, 0);
      }
      swr_fence_reference(pipe->screen, &pq->fence, NULL);
   }

   FREE(pq);
}
Beispiel #3
0
static void
swr_destroy_screen(struct pipe_screen *p_screen)
{
   struct swr_screen *screen = swr_screen(p_screen);
   struct sw_winsys *winsys = screen->winsys;

   fprintf(stderr, "SWR destroy screen!\n");

   swr_fence_finish(p_screen, screen->flush_fence, 0);
   swr_fence_reference(p_screen, &screen->flush_fence, NULL);

   JitDestroyContext(screen->hJitMgr);

   if (winsys->destroy)
      winsys->destroy(winsys);

   FREE(screen);
}
Beispiel #4
0
static void
swr_flush(struct pipe_context *pipe,
          struct pipe_fence_handle **fence,
          unsigned flags)
{
   struct swr_context *ctx = swr_context(pipe);
   struct swr_screen *screen = swr_screen(pipe->screen);
   struct pipe_surface *cb = ctx->framebuffer.cbufs[0];

   /* If the current renderTarget is the display surface, store tiles back to
    * the surface, in preparation for present (swr_flush_frontbuffer).
    * Other renderTargets get stored back when attachment changes or
    * swr_surface_destroy */
   if (cb && swr_resource(cb->texture)->display_target)
      swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);

   if (fence)
      swr_fence_reference(pipe->screen, fence, screen->flush_fence);
}
Beispiel #5
0
static void
swr_flush(struct pipe_context *pipe,
          struct pipe_fence_handle **fence,
          unsigned flags)
{
   struct swr_context *ctx = swr_context(pipe);
   struct swr_screen *screen = swr_screen(pipe->screen);

   for (int i=0; i < ctx->framebuffer.nr_cbufs; i++) {
      struct pipe_surface *cb = ctx->framebuffer.cbufs[i];
      if (cb) {
         swr_store_dirty_resource(pipe, cb->texture, SWR_TILE_RESOLVED);
      }
   }
   if (ctx->framebuffer.zsbuf) {
      swr_store_dirty_resource(pipe, ctx->framebuffer.zsbuf->texture,
                               SWR_TILE_RESOLVED);
   }

   if (fence)
      swr_fence_reference(pipe->screen, fence, screen->flush_fence);
}
// XXX Create a fence callback, rather than stalling SwrWaitForIdle
static void
swr_gather_stats(struct pipe_context *pipe, struct swr_query *pq)
{
   struct swr_context *ctx = swr_context(pipe);

   assert(pq->result);
   union pipe_query_result *result = pq->result;
   boolean enable_stats = pq->enable_stats;
   SWR_STATS swr_stats = {0};

   if (pq->fence) {
      if (!swr_is_fence_done(swr_fence(pq->fence))) {
         swr_fence_submit(ctx, pq->fence);
         swr_fence_finish(pipe->screen, pq->fence, 0);
      }
      swr_fence_reference(pipe->screen, &pq->fence, NULL);
   }

   /*
    * These queries don't need SWR Stats enabled in the core
    * Set and return.
    */
   switch (pq->type) {
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_TIME_ELAPSED:
      result->u64 = swr_get_timestamp(pipe->screen);
      return;
      break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      /* nothing to do here */
      return;
      break;
   case PIPE_QUERY_GPU_FINISHED:
      result->b = TRUE; /* XXX TODO Add an api func to SWR to compare drawId
                           vs LastRetiredId? */
      return;
      break;
   default:
      /* Any query that needs SwrCore stats */
      break;
   }

   /*
    * All other results are collected from SwrCore counters
    */

   /* XXX, Should turn this into a fence callback and skip the stall */
   SwrGetStats(ctx->swrContext, &swr_stats);
   /* SwrGetStats returns immediately, wait for collection */
   SwrWaitForIdle(ctx->swrContext);

   switch (pq->type) {
   case PIPE_QUERY_OCCLUSION_PREDICATE:
   case PIPE_QUERY_OCCLUSION_COUNTER:
      result->u64 = swr_stats.DepthPassCount;
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      result->u64 = swr_stats.IaPrimitives;
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      result->u64 = swr_stats.SoNumPrimsWritten[pq->index];
      break;
   case PIPE_QUERY_SO_STATISTICS:
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE: {
      struct pipe_query_data_so_statistics *so_stats = &result->so_statistics;
      so_stats->num_primitives_written =
         swr_stats.SoNumPrimsWritten[pq->index];
      so_stats->primitives_storage_needed =
         swr_stats.SoPrimStorageNeeded[pq->index];
   } break;
   case PIPE_QUERY_PIPELINE_STATISTICS: {
      struct pipe_query_data_pipeline_statistics *p_stats =
         &result->pipeline_statistics;
      p_stats->ia_vertices = swr_stats.IaVertices;
      p_stats->ia_primitives = swr_stats.IaPrimitives;
      p_stats->vs_invocations = swr_stats.VsInvocations;
      p_stats->gs_invocations = swr_stats.GsInvocations;
      p_stats->gs_primitives = swr_stats.GsPrimitives;
      p_stats->c_invocations = swr_stats.CPrimitives;
      p_stats->c_primitives = swr_stats.CPrimitives;
      p_stats->ps_invocations = swr_stats.PsInvocations;
      p_stats->hs_invocations = swr_stats.HsInvocations;
      p_stats->ds_invocations = swr_stats.DsInvocations;
      p_stats->cs_invocations = swr_stats.CsInvocations;
   } break;
   default:
      assert(0 && "Unsupported query");
      break;
   }

   /* Only change stat collection if there are no active queries */
   if (ctx->active_queries == 0)
      SwrEnableStats(ctx->swrContext, enable_stats);
}
static boolean
swr_get_query_result(struct pipe_context *pipe,
                     struct pipe_query *q,
                     boolean wait,
                     union pipe_query_result *result)
{
   struct swr_context *ctx = swr_context(pipe);
   struct swr_query *pq = swr_query(q);

   if (pq->fence) {
      if (!swr_is_fence_done(swr_fence(pq->fence))) {
         swr_fence_submit(ctx, pq->fence);
         if (!wait)
            return FALSE;
         swr_fence_finish(pipe->screen, pq->fence, 0);
      }
      swr_fence_reference(pipe->screen, &pq->fence, NULL);
   }

   /* XXX: Need to handle counter rollover */

   switch (pq->type) {
   /* Booleans */
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      result->b = pq->end.u64 != pq->start.u64 ? TRUE : FALSE;
      break;
   case PIPE_QUERY_GPU_FINISHED:
      result->b = pq->end.b;
      break;
   /* Counters */
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_TIME_ELAPSED:
   case PIPE_QUERY_PRIMITIVES_GENERATED:
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      result->u64 = pq->end.u64 - pq->start.u64;
      break;
   /* Structures */
   case PIPE_QUERY_SO_STATISTICS: {
      struct pipe_query_data_so_statistics *so_stats = &result->so_statistics;
      struct pipe_query_data_so_statistics *start = &pq->start.so_statistics;
      struct pipe_query_data_so_statistics *end = &pq->end.so_statistics;
      so_stats->num_primitives_written =
         end->num_primitives_written - start->num_primitives_written;
      so_stats->primitives_storage_needed =
         end->primitives_storage_needed - start->primitives_storage_needed;
   } break;
   case PIPE_QUERY_TIMESTAMP_DISJOINT: {
      /* os_get_time_nano returns nanoseconds */
      result->timestamp_disjoint.frequency = UINT64_C(1000000000);
      result->timestamp_disjoint.disjoint = FALSE;
   } break;
   case PIPE_QUERY_PIPELINE_STATISTICS: {
      struct pipe_query_data_pipeline_statistics *p_stats =
         &result->pipeline_statistics;
      struct pipe_query_data_pipeline_statistics *start =
         &pq->start.pipeline_statistics;
      struct pipe_query_data_pipeline_statistics *end =
         &pq->end.pipeline_statistics;
      p_stats->ia_vertices = end->ia_vertices - start->ia_vertices;
      p_stats->ia_primitives = end->ia_primitives - start->ia_primitives;
      p_stats->vs_invocations = end->vs_invocations - start->vs_invocations;
      p_stats->gs_invocations = end->gs_invocations - start->gs_invocations;
      p_stats->gs_primitives = end->gs_primitives - start->gs_primitives;
      p_stats->c_invocations = end->c_invocations - start->c_invocations;
      p_stats->c_primitives = end->c_primitives - start->c_primitives;
      p_stats->ps_invocations = end->ps_invocations - start->ps_invocations;
      p_stats->hs_invocations = end->hs_invocations - start->hs_invocations;
      p_stats->ds_invocations = end->ds_invocations - start->ds_invocations;
      p_stats->cs_invocations = end->cs_invocations - start->cs_invocations;
   } break;
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE: {
      struct pipe_query_data_so_statistics *start = &pq->start.so_statistics;
      struct pipe_query_data_so_statistics *end = &pq->end.so_statistics;
      uint64_t num_primitives_written =
         end->num_primitives_written - start->num_primitives_written;
      uint64_t primitives_storage_needed =
         end->primitives_storage_needed - start->primitives_storage_needed;
      result->b = num_primitives_written > primitives_storage_needed;
   } break;
   default:
      assert(0 && "Unsupported query");
      break;
   }

   return TRUE;
}