bool r600_common_context_init(struct r600_common_context *rctx,
			      struct r600_common_screen *rscreen,
			      unsigned context_flags)
{
	slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
	slab_create_child(&rctx->pool_transfers_unsync, &rscreen->pool_transfers);

	rctx->screen = rscreen;
	rctx->ws = rscreen->ws;
	rctx->family = rscreen->family;
	rctx->chip_class = rscreen->chip_class;

	rctx->b.invalidate_resource = r600_invalidate_resource;
	rctx->b.resource_commit = r600_resource_commit;
	rctx->b.transfer_map = u_transfer_map_vtbl;
	rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
	rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
	rctx->b.texture_subdata = u_default_texture_subdata;
	rctx->b.memory_barrier = r600_memory_barrier;
	rctx->b.flush = r600_flush_from_st;
	rctx->b.set_debug_callback = r600_set_debug_callback;
	rctx->b.fence_server_sync = r600_fence_server_sync;
	rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;

	/* evergreen_compute.c has a special codepath for global buffers.
	 * Everything else can use the direct path.
	 */
	if ((rscreen->chip_class == EVERGREEN || rscreen->chip_class == CAYMAN) &&
	    (context_flags & PIPE_CONTEXT_COMPUTE_ONLY))
		rctx->b.buffer_subdata = u_default_buffer_subdata;
	else
		rctx->b.buffer_subdata = r600_buffer_subdata;

	if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
		rctx->b.get_device_reset_status = r600_get_reset_status;
		rctx->gpu_reset_counter =
			rctx->ws->query_value(rctx->ws,
					      RADEON_GPU_RESET_COUNTER);
	}

	rctx->b.set_device_reset_callback = r600_set_device_reset_callback;

	r600_init_context_texture_functions(rctx);
	r600_init_viewport_functions(rctx);
	r600_streamout_init(rctx);
	r600_query_init(rctx);
	cayman_init_msaa(&rctx->b);

	rctx->allocator_zeroed_memory =
		u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
				      0, PIPE_USAGE_DEFAULT, 0, true);
	if (!rctx->allocator_zeroed_memory)
		return false;

	rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
						  0, PIPE_USAGE_STREAM);
	if (!rctx->b.stream_uploader)
		return false;

	rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
						 0, PIPE_USAGE_DEFAULT);
	if (!rctx->b.const_uploader)
		return false;

	rctx->ctx = rctx->ws->ctx_create(rctx->ws);
	if (!rctx->ctx)
		return false;

	if (rscreen->info.num_sdma_rings && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
		rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
						   r600_flush_dma_ring,
						   rctx);
		rctx->dma.flush = r600_flush_dma_ring;
	}

	return true;
}
Exemplo n.º 2
0
/**
 * Create a context.
 *
 * This is where each context begins.
 */
struct pipe_context *
iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
   struct iris_screen *screen = (struct iris_screen*)pscreen;
   const struct gen_device_info *devinfo = &screen->devinfo;
   struct iris_context *ice = rzalloc(NULL, struct iris_context);

   if (!ice)
      return NULL;

   struct pipe_context *ctx = &ice->ctx;

   ctx->screen = pscreen;
   ctx->priv = priv;

   ctx->stream_uploader = u_upload_create_default(ctx);
   if (!ctx->stream_uploader) {
      free(ctx);
      return NULL;
   }
   ctx->const_uploader = ctx->stream_uploader;

   ctx->destroy = iris_destroy_context;
   ctx->set_debug_callback = iris_set_debug_callback;
   ctx->set_device_reset_callback = iris_set_device_reset_callback;
   ctx->get_device_reset_status = iris_get_device_reset_status;
   ctx->get_sample_position = iris_get_sample_position;

   ice->shaders.urb_size = devinfo->urb.size;

   iris_init_context_fence_functions(ctx);
   iris_init_blit_functions(ctx);
   iris_init_clear_functions(ctx);
   iris_init_program_functions(ctx);
   iris_init_resource_functions(ctx);
   iris_init_query_functions(ctx);
   iris_init_flush_functions(ctx);

   iris_init_program_cache(ice);
   iris_init_border_color_pool(ice);
   iris_init_binder(ice);

   slab_create_child(&ice->transfer_pool, &screen->transfer_pool);

   ice->state.surface_uploader =
      u_upload_create(ctx, 16384, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
                      IRIS_RESOURCE_FLAG_SURFACE_MEMZONE);
   ice->state.dynamic_uploader =
      u_upload_create(ctx, 16384, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
                      IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE);

   ice->query_buffer_uploader =
      u_upload_create(ctx, 4096, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING,
                      0);

   genX_call(devinfo, init_state, ice);
   genX_call(devinfo, init_blorp, ice);

   int priority = 0;
   if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
      priority = GEN_CONTEXT_HIGH_PRIORITY;
   if (flags & PIPE_CONTEXT_LOW_PRIORITY)
      priority = GEN_CONTEXT_LOW_PRIORITY;

   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
      ice->state.sizes = _mesa_hash_table_u64_create(ice);

   for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
      iris_init_batch(&ice->batches[i], screen, &ice->vtbl, &ice->dbg,
                      &ice->reset, ice->state.sizes,
                      ice->batches, (enum iris_batch_name) i,
                      I915_EXEC_RENDER, priority);
   }

   ice->vtbl.init_render_context(screen, &ice->batches[IRIS_BATCH_RENDER],
                                 &ice->vtbl, &ice->dbg);
   ice->vtbl.init_compute_context(screen, &ice->batches[IRIS_BATCH_COMPUTE],
                                  &ice->vtbl, &ice->dbg);

   return ctx;
}
Exemplo n.º 3
0
static struct pipe_context *si_create_context(struct pipe_screen *screen,
                                              unsigned flags)
{
	struct si_context *sctx = CALLOC_STRUCT(si_context);
	struct si_screen* sscreen = (struct si_screen *)screen;
	struct radeon_winsys *ws = sscreen->ws;
	int shader, i;
	bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;

	if (!sctx)
		return NULL;

	sctx->has_graphics = sscreen->info.chip_class == SI ||
			     !(flags & PIPE_CONTEXT_COMPUTE_ONLY);

	if (flags & PIPE_CONTEXT_DEBUG)
		sscreen->record_llvm_ir = true; /* racy but not critical */

	sctx->b.screen = screen; /* this must be set first */
	sctx->b.priv = NULL;
	sctx->b.destroy = si_destroy_context;
	sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
	sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0;

	slab_create_child(&sctx->pool_transfers, &sscreen->pool_transfers);
	slab_create_child(&sctx->pool_transfers_unsync, &sscreen->pool_transfers);

	sctx->ws = sscreen->ws;
	sctx->family = sscreen->info.family;
	sctx->chip_class = sscreen->info.chip_class;

	if (sscreen->info.has_gpu_reset_counter_query) {
		sctx->gpu_reset_counter =
			sctx->ws->query_value(sctx->ws, RADEON_GPU_RESET_COUNTER);
	}


	if (sctx->chip_class == CIK ||
	    sctx->chip_class == VI ||
	    sctx->chip_class == GFX9) {
		sctx->eop_bug_scratch = si_resource(
			pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
					   16 * sscreen->info.num_render_backends));
		if (!sctx->eop_bug_scratch)
			goto fail;
	}

	/* Initialize context allocators. */
	sctx->allocator_zeroed_memory =
		u_suballocator_create(&sctx->b, 128 * 1024,
				      0, PIPE_USAGE_DEFAULT,
				      SI_RESOURCE_FLAG_UNMAPPABLE |
				      SI_RESOURCE_FLAG_CLEAR, false);
	if (!sctx->allocator_zeroed_memory)
		goto fail;

	sctx->b.stream_uploader = u_upload_create(&sctx->b, 1024 * 1024,
						    0, PIPE_USAGE_STREAM,
						    SI_RESOURCE_FLAG_READ_ONLY);
	if (!sctx->b.stream_uploader)
		goto fail;

	sctx->cached_gtt_allocator = u_upload_create(&sctx->b, 16 * 1024,
						       0, PIPE_USAGE_STAGING, 0);
	if (!sctx->cached_gtt_allocator)
		goto fail;

	sctx->ctx = sctx->ws->ctx_create(sctx->ws);
	if (!sctx->ctx)
		goto fail;

	if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
		sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
						   (void*)si_flush_dma_cs,
						   sctx, stop_exec_on_failure);
	}

	bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
	sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
						 0, PIPE_USAGE_DEFAULT,
						 SI_RESOURCE_FLAG_32BIT |
						 (use_sdma_upload ?
							  SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA :
							  (sscreen->cpdma_prefetch_writes_memory ?
								   0 : SI_RESOURCE_FLAG_READ_ONLY)));
	if (!sctx->b.const_uploader)
		goto fail;

	if (use_sdma_upload)
		u_upload_enable_flush_explicit(sctx->b.const_uploader);

	sctx->gfx_cs = ws->cs_create(sctx->ctx,
				     sctx->has_graphics ? RING_GFX : RING_COMPUTE,
				     (void*)si_flush_gfx_cs, sctx, stop_exec_on_failure);

	/* Border colors. */
	sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
					  sizeof(*sctx->border_color_table));
	if (!sctx->border_color_table)
		goto fail;

	sctx->border_color_buffer = si_resource(
		pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
				   SI_MAX_BORDER_COLORS *
				   sizeof(*sctx->border_color_table)));
	if (!sctx->border_color_buffer)
		goto fail;

	sctx->border_color_map =
		ws->buffer_map(sctx->border_color_buffer->buf,
			       NULL, PIPE_TRANSFER_WRITE);
	if (!sctx->border_color_map)
		goto fail;

	/* Initialize context functions used by graphics and compute. */
	sctx->b.emit_string_marker = si_emit_string_marker;
	sctx->b.set_debug_callback = si_set_debug_callback;
	sctx->b.set_log_context = si_set_log_context;
	sctx->b.set_context_param = si_set_context_param;
	sctx->b.get_device_reset_status = si_get_reset_status;
	sctx->b.set_device_reset_callback = si_set_device_reset_callback;

	si_init_all_descriptors(sctx);
	si_init_buffer_functions(sctx);
	si_init_clear_functions(sctx);
	si_init_blit_functions(sctx);
	si_init_compute_functions(sctx);
	si_init_compute_blit_functions(sctx);
	si_init_debug_functions(sctx);
	si_init_fence_functions(sctx);
	si_init_state_compute_functions(sctx);

	if (sscreen->debug_flags & DBG(FORCE_DMA))
		sctx->b.resource_copy_region = sctx->dma_copy;

	/* Initialize graphics-only context functions. */
	if (sctx->has_graphics) {
		si_init_context_texture_functions(sctx);
		si_init_query_functions(sctx);
		si_init_msaa_functions(sctx);
		si_init_shader_functions(sctx);
		si_init_state_functions(sctx);
		si_init_streamout_functions(sctx);
		si_init_viewport_functions(sctx);

		sctx->blitter = util_blitter_create(&sctx->b);
		if (sctx->blitter == NULL)
			goto fail;
		sctx->blitter->skip_viewport_restore = true;

		si_init_draw_functions(sctx);
	}

	/* Initialize SDMA functions. */
	if (sctx->chip_class >= CIK)
		cik_init_sdma_functions(sctx);
	else
		si_init_dma_functions(sctx);

	sctx->sample_mask = 0xffff;

	/* Initialize multimedia functions. */
	if (sscreen->info.has_hw_decode) {
		sctx->b.create_video_codec = si_uvd_create_decoder;
		sctx->b.create_video_buffer = si_video_buffer_create;
	} else {
		sctx->b.create_video_codec = vl_create_decoder;
		sctx->b.create_video_buffer = vl_video_buffer_create;
	}

	if (sctx->chip_class >= GFX9) {
		sctx->wait_mem_scratch = si_resource(
			pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
		if (!sctx->wait_mem_scratch)
			goto fail;

		/* Initialize the memory. */
		si_cp_write_data(sctx, sctx->wait_mem_scratch, 0, 4,
				 V_370_MEM, V_370_ME, &sctx->wait_mem_number);
	}

	/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
	 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
	if (sctx->chip_class == CIK) {
		sctx->null_const_buf.buffer =
			pipe_aligned_buffer_create(screen,
						   SI_RESOURCE_FLAG_32BIT,
						   PIPE_USAGE_DEFAULT, 16,
						   sctx->screen->info.tcc_cache_line_size);
		if (!sctx->null_const_buf.buffer)
			goto fail;
		sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;

		unsigned start_shader = sctx->has_graphics ? 0 :  PIPE_SHADER_COMPUTE;
		for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
			for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
				sctx->b.set_constant_buffer(&sctx->b, shader, i,
							      &sctx->null_const_buf);
			}
		}

		si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS,
				 &sctx->null_const_buf);
	}

	uint64_t max_threads_per_block;
	screen->get_compute_param(screen, PIPE_SHADER_IR_TGSI,
				  PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
				  &max_threads_per_block);

	/* The maximum number of scratch waves. Scratch space isn't divided
	 * evenly between CUs. The number is only a function of the number of CUs.
	 * We can decrease the constant to decrease the scratch buffer size.
	 *
	 * sctx->scratch_waves must be >= the maximum posible size of
	 * 1 threadgroup, so that the hw doesn't hang from being unable
	 * to start any.
	 *
	 * The recommended value is 4 per CU at most. Higher numbers don't
	 * bring much benefit, but they still occupy chip resources (think
	 * async compute). I've seen ~2% performance difference between 4 and 32.
	 */
	sctx->scratch_waves = MAX2(32 * sscreen->info.num_good_compute_units,
				   max_threads_per_block / 64);

	si_init_compiler(sscreen, &sctx->compiler);

	/* Bindless handles. */
	sctx->tex_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);
	sctx->img_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);

	util_dynarray_init(&sctx->resident_tex_handles, NULL);
	util_dynarray_init(&sctx->resident_img_handles, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_img_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_depth_decompress, NULL);

	sctx->sample_pos_buffer =
		pipe_buffer_create(sctx->b.screen, 0, PIPE_USAGE_DEFAULT,
				   sizeof(sctx->sample_positions));
	pipe_buffer_write(&sctx->b, sctx->sample_pos_buffer, 0,
			  sizeof(sctx->sample_positions), &sctx->sample_positions);

	/* this must be last */
	si_begin_new_gfx_cs(sctx);

	if (sctx->chip_class == CIK) {
		/* Clear the NULL constant buffer, because loads should return zeros.
		 * Note that this forces CP DMA to be used, because clover deadlocks
		 * for some reason when the compute codepath is used.
		 */
		uint32_t clear_value = 0;
		si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0,
				sctx->null_const_buf.buffer->width0,
				&clear_value, 4, SI_COHERENCY_SHADER, true);
	}
	return &sctx->b;
fail:
	fprintf(stderr, "radeonsi: Failed to create a context.\n");
	si_destroy_context(&sctx->b);
	return NULL;
}