static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
{
	radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id);

#ifdef DRM_RADEON_GEM_BUSY
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);

	if (radeon->radeonScreen->kernel_mm) {
		struct radeon_query_object *query = (struct radeon_query_object *)q;
		uint32_t domain;

		/* Need to perform a flush, as per ARB_occlusion_query spec */
		if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
			ctx->Driver.Flush(ctx);
		}

		if (radeon_bo_is_busy(query->bo, &domain) == 0) {
			radeonQueryGetResult(ctx, q);
			query->Base.Ready = GL_TRUE;
		}
	} else {
		radeonWaitQuery(ctx, q);
	}
#else
	radeonWaitQuery(ctx, q);
#endif
}
示例#2
0
static void radeonWaitQuery(struct gl_context *ctx, struct gl_query_object *q)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = (struct radeon_query_object *)q;

	/* If the cmdbuf with packets for this query hasn't been flushed yet, do it now */
	if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
		ctx->Driver.Flush(ctx);

	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, q->Id, query->bo, query->curr_offset);

	radeonQueryGetResult(ctx, q);

	query->Base.Ready = GL_TRUE;
}