bool
os_wait_until_zero(volatile int *var, uint64_t timeout)
{
   if (!p_atomic_read(var))
      return true;

   if (!timeout)
      return false;

   if (timeout == PIPE_TIMEOUT_INFINITE) {
      while (p_atomic_read(var)) {
#if defined(PIPE_OS_UNIX)
         sched_yield();
#endif
      }
      return true;
   }
   else {
      int64_t start_time = os_time_get_nano();
      int64_t end_time = start_time + timeout;

      while (p_atomic_read(var)) {
         if (os_time_timeout(start_time, end_time, os_time_get_nano()))
            return false;

#if defined(PIPE_OS_UNIX)
         sched_yield();
#endif
      }
      return true;
   }
}
Esempio n. 2
0
/**
 * End the current occlusion query.
 * This is a bin command put in all bins.
 * Called per thread.
 */
static void
lp_rast_end_query(struct lp_rasterizer_task *task,
                  const union lp_rast_cmd_arg arg)
{
   struct llvmpipe_query *pq = arg.query_obj;
   assert(task->query[pq->type] == pq || pq->type == PIPE_QUERY_TIMESTAMP);

   switch (pq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      pq->count[task->thread_index] += task->thread_data.vis_counter;
      break;
   case PIPE_QUERY_TIMESTAMP:
      pq->count[task->thread_index] = os_time_get_nano();
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
   case PIPE_QUERY_PRIMITIVES_EMITTED:
   case PIPE_QUERY_SO_STATISTICS:
   case PIPE_QUERY_PIPELINE_STATISTICS:
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
      break;
   default:
      assert(0);
      break;
   }

   if (task->query[pq->type] == pq) {
      task->query[pq->type] = NULL;
   }
}
Esempio n. 3
0
/**
 * End the current occlusion query.
 * This is a bin command put in all bins.
 * Called per thread.
 */
static void
lp_rast_end_query(struct lp_rasterizer_task *task,
                  const union lp_rast_cmd_arg arg)
{
   struct llvmpipe_query *pq = arg.query_obj;

   switch (pq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      pq->end[task->thread_index] +=
         task->thread_data.vis_counter - pq->start[task->thread_index];
      pq->start[task->thread_index] = 0;
      break;
   case PIPE_QUERY_TIMESTAMP:
      pq->end[task->thread_index] = os_time_get_nano();
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      pq->end[task->thread_index] +=
         task->ps_invocations - pq->start[task->thread_index];
      pq->start[task->thread_index] = 0;
      break;
   default:
      assert(0);
      break;
   }
}
static boolean r600_fence_finish(struct pipe_screen *screen,
				 struct pipe_context *ctx,
				 struct pipe_fence_handle *fence,
				 uint64_t timeout)
{
	struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
	struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
	struct r600_common_context *rctx;
	int64_t abs_timeout = os_time_get_absolute_timeout(timeout);

	ctx = threaded_context_unwrap_sync(ctx);
	rctx = ctx ? (struct r600_common_context*)ctx : NULL;

	if (rfence->sdma) {
		if (!rws->fence_wait(rws, rfence->sdma, timeout))
			return false;

		/* Recompute the timeout after waiting. */
		if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
			int64_t time = os_time_get_nano();
			timeout = abs_timeout > time ? abs_timeout - time : 0;
		}
	}

	if (!rfence->gfx)
		return true;

	/* Flush the gfx IB if it hasn't been flushed yet. */
	if (rctx &&
	    rfence->gfx_unflushed.ctx == rctx &&
	    rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
		rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
		rfence->gfx_unflushed.ctx = NULL;

		if (!timeout)
			return false;

		/* Recompute the timeout after all that. */
		if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
			int64_t time = os_time_get_nano();
			timeout = abs_timeout > time ? abs_timeout - time : 0;
		}
	}

	return rws->fence_wait(rws, rfence->gfx, timeout);
}
Esempio n. 5
0
static boolean
llvmpipe_get_query_result(struct pipe_context *pipe, 
                          struct pipe_query *q,
                          boolean wait,
                          union pipe_query_result *vresult)
{
   struct llvmpipe_query *pq = llvmpipe_query(q);
   uint64_t *result = (uint64_t *)vresult;
   int i;

   if (!pq->fence) {
      /* no fence because there was no scene, so results is zero */
      *result = 0;
      return TRUE;
   }

   if (!lp_fence_signalled(pq->fence)) {
      if (!lp_fence_issued(pq->fence))
         llvmpipe_flush(pipe, NULL, __FUNCTION__);

      if (!wait)
         return FALSE;

      lp_fence_wait(pq->fence);
   }

   /* Sum the results from each of the threads:
    */
   *result = 0;

   switch (pq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
      for (i = 0; i < LP_MAX_THREADS; i++) {
         *result += pq->count[i];
      }
      break;
   case PIPE_QUERY_TIMESTAMP:
      for (i = 0; i < LP_MAX_THREADS; i++) {
         if (pq->count[i] > *result) {
            *result = pq->count[i];
         }
         if (*result == 0)
            *result = os_time_get_nano();
      }
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      *result = pq->num_primitives_generated;
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      *result = pq->num_primitives_written;
      break;
   default:
      assert(0);
      break;
   }

   return TRUE;
}
Esempio n. 6
0
static boolean
softpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
{
   struct softpipe_context *softpipe = softpipe_context( pipe );
   struct softpipe_query *sq = softpipe_query(q);

   switch (sq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      sq->start = softpipe->occlusion_count;
      break;
   case PIPE_QUERY_TIME_ELAPSED:
      sq->start = os_time_get_nano();
      break;
   case PIPE_QUERY_SO_STATISTICS:
      sq->so.num_primitives_written = softpipe->so_stats.num_primitives_written;
      sq->so.primitives_storage_needed = softpipe->so_stats.primitives_storage_needed;
      break;
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
      sq->end = FALSE;
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      sq->so.num_primitives_written = softpipe->so_stats.num_primitives_written;
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      sq->so.primitives_storage_needed = softpipe->so_stats.primitives_storage_needed;
      break;
   case PIPE_QUERY_TIMESTAMP:
   case PIPE_QUERY_GPU_FINISHED:
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      /* reset our cache */
      if (softpipe->active_statistics_queries == 0) {
         memset(&softpipe->pipeline_statistics, 0,
                sizeof(softpipe->pipeline_statistics));
      }
      memcpy(&sq->stats, &softpipe->pipeline_statistics,
             sizeof(sq->stats));
      softpipe->active_statistics_queries++;
      break;
   default:
      assert(0);
      break;
   }
   softpipe->active_query_count++;
   softpipe->dirty |= SP_NEW_QUERY;
   return true;
}
int64_t
os_time_get_absolute_timeout(uint64_t timeout)
{
   int64_t time, abs_timeout;

   /* Also check for the type upper bound. */
   if (timeout == PIPE_TIMEOUT_INFINITE || timeout > INT64_MAX)
      return PIPE_TIMEOUT_INFINITE;

   time = os_time_get_nano();
   abs_timeout = time + (int64_t)timeout;

   /* Check for overflow. */
   if (abs_timeout < time)
      return PIPE_TIMEOUT_INFINITE;

   return abs_timeout;
}
bool
os_wait_until_zero_abs_timeout(volatile int *var, int64_t timeout)
{
   if (!p_atomic_read(var))
      return true;

   if (timeout == PIPE_TIMEOUT_INFINITE)
      return os_wait_until_zero(var, PIPE_TIMEOUT_INFINITE);

   while (p_atomic_read(var)) {
      if (os_time_get_nano() >= timeout)
         return false;

#if defined(PIPE_OS_UNIX)
      sched_yield();
#endif
   }
   return true;
}
Esempio n. 9
0
/**
 * This is called just before issuing the buffer swap/present.
 * We query the current time and determine if we should sleep before
 * issuing the swap/present.
 * This is a bit of a hack and is certainly not very accurate but it
 * basically works.
 * This is for the WGL_ARB_swap_interval extension.
 */
static void
wait_swap_interval(struct stw_framebuffer *fb)
{
   /* Note: all time variables here are in units of microseconds */
   int64_t cur_time = os_time_get_nano() / 1000;

   if (fb->prev_swap_time != 0) {
      /* Compute time since previous swap */
      int64_t delta = cur_time - fb->prev_swap_time;
      int64_t min_swap_period =
         1.0e6 / stw_dev->refresh_rate * stw_dev->swap_interval;

      /* if time since last swap is less than wait period, wait */
      if (delta < min_swap_period) {
         float fudge = 1.75f;  /* emperical fudge factor */
         int64_t wait = (min_swap_period - delta) * fudge;
         os_time_sleep(wait);
      }
   }

   fb->prev_swap_time = cur_time;
}
Esempio n. 10
0
static boolean r600_fence_finish(struct pipe_screen *screen,
				 struct pipe_fence_handle *fence,
				 uint64_t timeout)
{
	struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
	struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
	int64_t abs_timeout = os_time_get_absolute_timeout(timeout);

	if (rfence->sdma) {
		if (!rws->fence_wait(rws, rfence->sdma, timeout))
			return false;

		/* Recompute the timeout after waiting. */
		if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
			int64_t time = os_time_get_nano();
			timeout = abs_timeout > time ? abs_timeout - time : 0;
		}
	}

	if (!rfence->gfx)
		return true;

	return rws->fence_wait(rws, rfence->gfx, timeout);
}
Esempio n. 11
0
/**
 * Put an EndQuery command into all bins.
 */
void
lp_setup_end_query(struct lp_setup_context *setup, struct llvmpipe_query *pq)
{
   set_scene_state(setup, SETUP_ACTIVE, "end_query");

   assert(setup->scene);
   if (setup->scene) {
      /* pq->fence should be the fence of the *last* scene which
       * contributed to the query result.
       */
      lp_fence_reference(&pq->fence, setup->scene->fence);

      if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
          pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
          pq->type == PIPE_QUERY_PIPELINE_STATISTICS ||
          pq->type == PIPE_QUERY_TIMESTAMP) {
         if (pq->type == PIPE_QUERY_TIMESTAMP &&
               !(setup->scene->tiles_x | setup->scene->tiles_y)) {
            /*
             * If there's a zero width/height framebuffer, there's no bins and
             * hence no rast task is ever run. So fill in something here instead.
             */
            pq->end[0] = os_time_get_nano();
         }

         if (!lp_scene_bin_everywhere(setup->scene,
                                      LP_RAST_OP_END_QUERY,
                                      lp_rast_arg_query(pq))) {
            if (!lp_setup_flush_and_restart(setup))
               goto fail;

            if (!lp_scene_bin_everywhere(setup->scene,
                                         LP_RAST_OP_END_QUERY,
                                         lp_rast_arg_query(pq))) {
               goto fail;
            }
         }
         setup->scene->had_queries |= TRUE;
      }
   }
   else {
      lp_fence_reference(&pq->fence, setup->last_fence);
   }

fail:
   /* Need to do this now not earlier since it still needs to be marked as
    * active when binning it would cause a flush.
    */
   if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER ||
      pq->type == PIPE_QUERY_OCCLUSION_PREDICATE ||
      pq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
      unsigned i;

      /* remove from active binned query list */
      for (i = 0; i < setup->active_binned_queries; i++) {
         if (setup->active_queries[i] == pq)
            break;
      }
      assert(i < setup->active_binned_queries);
      if (i == setup->active_binned_queries)
         return;
      setup->active_binned_queries--;
      setup->active_queries[i] = setup->active_queries[setup->active_binned_queries];
      setup->active_queries[setup->active_binned_queries] = NULL;
   }
}
Esempio n. 12
0
static uint64_t
virgl_get_timestamp(struct pipe_screen *_screen)
{
   return os_time_get_nano();
}
Esempio n. 13
0
static uint64_t
etna_screen_get_timestamp(struct pipe_screen *pscreen)
{
   return os_time_get_nano();
}
Esempio n. 14
0
static void
softpipe_end_query(struct pipe_context *pipe, struct pipe_query *q)
{
   struct softpipe_context *softpipe = softpipe_context( pipe );
   struct softpipe_query *sq = softpipe_query(q);

   softpipe->active_query_count--;
   switch (sq->type) {
   case PIPE_QUERY_OCCLUSION_COUNTER:
   case PIPE_QUERY_OCCLUSION_PREDICATE:
      sq->end = softpipe->occlusion_count;
      break;
   case PIPE_QUERY_TIMESTAMP:
      sq->start = 0;
      /* fall through */
   case PIPE_QUERY_TIME_ELAPSED:
      sq->end = os_time_get_nano();
      break;
   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
      sq->so.num_primitives_written =
         softpipe->so_stats.num_primitives_written - sq->so.num_primitives_written;
      sq->so.primitives_storage_needed =
         softpipe->so_stats.primitives_storage_needed - sq->so.primitives_storage_needed;
      sq->end = sq->so.primitives_storage_needed > sq->so.num_primitives_written;
      break;
   case PIPE_QUERY_SO_STATISTICS:
      sq->so.num_primitives_written =
         softpipe->so_stats.num_primitives_written - sq->so.num_primitives_written;
      sq->so.primitives_storage_needed =
         softpipe->so_stats.primitives_storage_needed - sq->so.primitives_storage_needed;
      break;
   case PIPE_QUERY_PRIMITIVES_EMITTED:
      sq->so.num_primitives_written =
         softpipe->so_stats.num_primitives_written - sq->so.num_primitives_written;
      break;
   case PIPE_QUERY_PRIMITIVES_GENERATED:
      sq->so.primitives_storage_needed =
         softpipe->so_stats.primitives_storage_needed - sq->so.primitives_storage_needed;
      break;
   case PIPE_QUERY_GPU_FINISHED:
   case PIPE_QUERY_TIMESTAMP_DISJOINT:
      break;
   case PIPE_QUERY_PIPELINE_STATISTICS:
      sq->stats.ia_vertices =
         softpipe->pipeline_statistics.ia_vertices - sq->stats.ia_vertices;
      sq->stats.ia_primitives =
         softpipe->pipeline_statistics.ia_primitives - sq->stats.ia_primitives;
      sq->stats.vs_invocations =
         softpipe->pipeline_statistics.vs_invocations - sq->stats.vs_invocations;
      sq->stats.gs_invocations =
         softpipe->pipeline_statistics.gs_invocations - sq->stats.gs_invocations;
      sq->stats.gs_primitives =
         softpipe->pipeline_statistics.gs_primitives - sq->stats.gs_primitives;
      sq->stats.c_invocations =
         softpipe->pipeline_statistics.c_invocations - sq->stats.c_invocations;
      sq->stats.c_primitives =
         softpipe->pipeline_statistics.c_primitives - sq->stats.c_primitives;
      sq->stats.ps_invocations =
         softpipe->pipeline_statistics.ps_invocations - sq->stats.ps_invocations;

      softpipe->active_statistics_queries--;
      break;
   default:
      assert(0);
      break;
   }
   softpipe->dirty |= SP_NEW_QUERY;
}
Esempio n. 15
0
static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
                           struct radeon_winsys_cs *rcs,
                           enum pipe_transfer_usage usage)
{
   struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
   struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
   int r;
   void *cpu = NULL;

   /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
   if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
      /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
      if (usage & PIPE_TRANSFER_DONTBLOCK) {
         if (!(usage & PIPE_TRANSFER_WRITE)) {
            /* Mapping for read.
             *
             * Since we are mapping for read, we don't need to wait
             * if the GPU is using the buffer for read too
             * (neither one is changing it).
             *
             * Only check whether the buffer is being used for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                               RADEON_USAGE_WRITE)) {
               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
               return NULL;
            }

            if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                RADEON_USAGE_WRITE)) {
               return NULL;
            }
         } else {
            if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
               return NULL;
            }

            if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
                                RADEON_USAGE_READWRITE)) {
               return NULL;
            }
         }
      } else {
         uint64_t time = os_time_get_nano();

         if (!(usage & PIPE_TRANSFER_WRITE)) {
            /* Mapping for read.
             *
             * Since we are mapping for read, we don't need to wait
             * if the GPU is using the buffer for read too
             * (neither one is changing it).
             *
             * Only check whether the buffer is being used for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                               RADEON_USAGE_WRITE)) {
               cs->flush_cs(cs->flush_data, 0, NULL);
            }
            amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
                           RADEON_USAGE_WRITE);
         } else {
            /* Mapping for write. */
            if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
               cs->flush_cs(cs->flush_data, 0, NULL);

            amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
                           RADEON_USAGE_READWRITE);
         }

         bo->rws->buffer_wait_time += os_time_get_nano() - time;
      }
   }

   /* If the buffer is created from user memory, return the user pointer. */
   if (bo->user_ptr)
       return bo->user_ptr;

   r = amdgpu_bo_cpu_map(bo->bo, &cpu);
   return r ? NULL : cpu;
}