Esempio n. 1
0
static void si_texture_transfer_unmap(struct pipe_context *ctx,
				      struct pipe_transfer* transfer)
{
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct radeon_winsys_cs_handle *buf;
	struct pipe_resource *texture = transfer->resource;
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;

	if (rtransfer->staging) {
		buf = si_resource(rtransfer->staging)->cs_buf;
	} else {
		buf = si_resource(transfer->resource)->cs_buf;
	}
	rctx->ws->buffer_unmap(buf);

	if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
		if (rtex->is_depth) {
			ctx->resource_copy_region(ctx, texture, transfer->level,
						  transfer->box.x, transfer->box.y, transfer->box.z,
						  &si_resource(rtransfer->staging)->b.b, transfer->level,
						  &transfer->box);
		} else {
			r600_copy_from_staging_texture(ctx, rtransfer);
		}
	}

	if (rtransfer->staging)
		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);

	pipe_resource_reference(&transfer->resource, NULL);
	FREE(transfer);
}
Esempio n. 2
0
static void* si_texture_transfer_map(struct pipe_context *ctx,
				     struct pipe_transfer* transfer)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct radeon_winsys_cs_handle *buf;
	enum pipe_format format = transfer->resource->format;
	unsigned offset = 0;
	char *map;

	if (rtransfer->staging_texture) {
		buf = si_resource(rtransfer->staging_texture)->cs_buf;
	} else {
		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;

		if (rtex->flushed_depth_texture)
			buf = rtex->flushed_depth_texture->resource.cs_buf;
		else
			buf = si_resource(transfer->resource)->cs_buf;

		offset = rtransfer->offset +
			transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
			transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
	}

	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) {
		return NULL;
	}

	return map + offset;
}
Esempio n. 3
0
static void si_test_vmfault(struct si_screen *sscreen)
{
	struct pipe_context *ctx = sscreen->aux_context;
	struct si_context *sctx = (struct si_context *)ctx;
	struct pipe_resource *buf =
		pipe_buffer_create_const0(&sscreen->b, 0, PIPE_USAGE_DEFAULT, 64);

	if (!buf) {
		puts("Buffer allocation failed.");
		exit(1);
	}

	si_resource(buf)->gpu_address = 0; /* cause a VM fault */

	if (sscreen->debug_flags & DBG(TEST_VMFAULT_CP)) {
		si_cp_dma_copy_buffer(sctx, buf, buf, 0, 4, 4, 0,
				      SI_COHERENCY_NONE, L2_BYPASS);
		ctx->flush(ctx, NULL, 0);
		puts("VM fault test: CP - done.");
	}
	if (sscreen->debug_flags & DBG(TEST_VMFAULT_SDMA)) {
		si_sdma_clear_buffer(sctx, buf, 0, 4, 0);
		ctx->flush(ctx, NULL, 0);
		puts("VM fault test: SDMA - done.");
	}
	if (sscreen->debug_flags & DBG(TEST_VMFAULT_SHADER)) {
		util_test_constant_buffer(ctx, buf);
		puts("VM fault test: Shader - done.");
	}
	exit(0);
}
Esempio n. 4
0
static void r600_buffer_destroy(struct pipe_screen *screen,
                                struct pipe_resource *buf)
{
    struct r600_screen *rscreen = (struct r600_screen*)screen;
    struct si_resource *rbuffer = si_resource(buf);

    pb_reference(&rbuffer->buf, NULL);
    FREE(rbuffer);
}
Esempio n. 5
0
static void si_texture_transfer_unmap(struct pipe_context *ctx,
				      struct pipe_transfer* transfer)
{
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct radeon_winsys_cs_handle *buf;

	if (rtransfer->staging_texture) {
		buf = si_resource(rtransfer->staging_texture)->cs_buf;
	} else {
		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;

		if (rtex->flushed_depth_texture) {
			buf = rtex->flushed_depth_texture->resource.cs_buf;
		} else {
			buf = si_resource(transfer->resource)->cs_buf;
		}
	}
	rctx->ws->buffer_unmap(buf);
}
Esempio n. 6
0
static void *r600_buffer_transfer_map(struct pipe_context *pipe,
                                      struct pipe_transfer *transfer)
{
    struct si_resource *rbuffer = si_resource(transfer->resource);
    struct r600_context *rctx = (struct r600_context*)pipe;
    uint8_t *data;

    data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
    if (!data)
        return NULL;

    return (uint8_t*)data + transfer->box.x;
}
Esempio n. 7
0
static void si_texture_transfer_unmap(struct pipe_context *ctx,
				      struct pipe_transfer* transfer)
{
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct radeon_winsys_cs_handle *buf;
	struct pipe_resource *texture = transfer->resource;
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;

	if (rtransfer->staging_texture) {
		buf = si_resource(rtransfer->staging_texture)->cs_buf;
	} else {
		struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;

		if (rtex->flushed_depth_texture) {
			buf = rtex->flushed_depth_texture->resource.cs_buf;
		} else {
			buf = si_resource(transfer->resource)->cs_buf;
		}
	}
	rctx->ws->buffer_unmap(buf);

	if (rtransfer->staging_texture) {
		if (transfer->usage & PIPE_TRANSFER_WRITE) {
			r600_copy_from_staging_texture(ctx, rtransfer);
		}
		pipe_resource_reference(&rtransfer->staging_texture, NULL);
	}

	if (rtex->depth && !rtex->is_flushing_texture) {
		if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
			r600_blit_push_depth(ctx, rtex);
	}

	pipe_resource_reference(&transfer->resource, NULL);
	FREE(transfer);
}
Esempio n. 8
0
static void *si_texture_transfer_map(struct pipe_context *ctx,
				     struct pipe_resource *texture,
				     unsigned level,
				     unsigned usage,
				     const struct pipe_box *box,
				     struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
	struct r600_transfer *trans;
	boolean use_staging_texture = FALSE;
	struct radeon_winsys_cs_handle *buf;
	enum pipe_format format = texture->format;
	unsigned offset = 0;
	char *map;

	/* We cannot map a tiled texture directly because the data is
	 * in a different order, therefore we do detiling using a blit.
	 *
	 * Also, use a temporary in GTT memory for read transfers, as
	 * the CPU is much happier reading out of cached system memory
	 * than uncached VRAM.
	 */
	if (rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR_ALIGNED &&
	    rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR)
		use_staging_texture = TRUE;

	/* XXX: Use a staging texture for uploads if the underlying BO
	 * is busy.  No interface for checking that currently? so do
	 * it eagerly whenever the transfer doesn't require a readback
	 * and might block.
	 */
	if ((usage & PIPE_TRANSFER_WRITE) &&
			!(usage & (PIPE_TRANSFER_READ |
					PIPE_TRANSFER_DONTBLOCK |
					PIPE_TRANSFER_UNSYNCHRONIZED)))
		use_staging_texture = TRUE;

	if (texture->flags & R600_RESOURCE_FLAG_TRANSFER)
		use_staging_texture = FALSE;

	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
		return NULL;

	trans = CALLOC_STRUCT(r600_transfer);
	if (trans == NULL)
		return NULL;
	pipe_resource_reference(&trans->transfer.resource, texture);
	trans->transfer.level = level;
	trans->transfer.usage = usage;
	trans->transfer.box = *box;
	if (rtex->is_depth) {
		/* XXX: only readback the rectangle which is being mapped?
		*/
		/* XXX: when discard is true, no need to read back from depth texture
		*/
		struct r600_resource_texture *staging_depth;

		if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}
		si_blit_uncompress_depth(ctx, rtex, staging_depth,
					 level, level,
					 box->z, box->z + box->depth - 1);
		trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
		trans->offset = r600_texture_get_offset(staging_depth, level, box->z);

		trans->staging = &staging_depth->resource.b.b;
	} else if (use_staging_texture) {
		struct pipe_resource resource;
		struct r600_resource_texture *staging;

		memset(&resource, 0, sizeof(resource));
		resource.format = texture->format;
		resource.width0 = box->width;
		resource.height0 = box->height;
		resource.depth0 = 1;
		resource.array_size = 1;
		resource.usage = PIPE_USAGE_STAGING;
		resource.flags = R600_RESOURCE_FLAG_TRANSFER;

		/* We must set the correct texture target and dimensions if needed for a 3D transfer. */
		if (box->depth > 1 && util_max_layer(texture, level) > 0)
			resource.target = texture->target;
		else
			resource.target = PIPE_TEXTURE_2D;

		switch (resource.target) {
		case PIPE_TEXTURE_1D_ARRAY:
		case PIPE_TEXTURE_2D_ARRAY:
		case PIPE_TEXTURE_CUBE_ARRAY:
			resource.array_size = box->depth;
			break;
		case PIPE_TEXTURE_3D:
			resource.depth0 = box->depth;
			break;
		default:;
		}
		/* Create the temporary texture. */
		staging = (struct r600_resource_texture*)ctx->screen->resource_create(ctx->screen, &resource);
		if (staging == NULL) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}

		trans->staging = &staging->resource.b.b;
		trans->transfer.stride = staging->surface.level[0].pitch_bytes;
		trans->transfer.layer_stride = staging->surface.level[0].slice_size;
		if (usage & PIPE_TRANSFER_READ) {
			r600_copy_to_staging_texture(ctx, trans);
			/* Always referenced in the blit. */
			radeonsi_flush(ctx, NULL, 0);
		}
	} else {
		trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
		trans->offset = r600_texture_get_offset(rtex, level, box->z);
	}

	if (trans->staging) {
		buf = si_resource(trans->staging)->cs_buf;
	} else {
		buf = rtex->resource.cs_buf;
	}

	if (rtex->is_depth || !trans->staging)
		offset = trans->offset +
			box->y / util_format_get_blockheight(format) * trans->transfer.stride +
			box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);

	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, usage))) {
		pipe_resource_reference(&trans->staging, NULL);
		pipe_resource_reference(&trans->transfer.resource, NULL);
		FREE(trans);
		return NULL;
	}

	*ptransfer = &trans->transfer;
	return map + offset;
}
Esempio n. 9
0
static void *si_texture_transfer_map(struct pipe_context *ctx,
				     struct pipe_resource *texture,
				     unsigned level,
				     unsigned usage,
				     const struct pipe_box *box,
				     struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
	struct pipe_resource resource;
	struct r600_transfer *trans;
	int r;
	boolean use_staging_texture = FALSE;
	struct radeon_winsys_cs_handle *buf;
	enum pipe_format format = texture->format;
	unsigned offset = 0;
	char *map;

	/* We cannot map a tiled texture directly because the data is
	 * in a different order, therefore we do detiling using a blit.
	 *
	 * Also, use a temporary in GTT memory for read transfers, as
	 * the CPU is much happier reading out of cached system memory
	 * than uncached VRAM.
	 */
	if (rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR_ALIGNED &&
	    rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR)
		use_staging_texture = TRUE;

	/* XXX: Use a staging texture for uploads if the underlying BO
	 * is busy.  No interface for checking that currently? so do
	 * it eagerly whenever the transfer doesn't require a readback
	 * and might block.
	 */
	if ((usage & PIPE_TRANSFER_WRITE) &&
			!(usage & (PIPE_TRANSFER_READ |
					PIPE_TRANSFER_DONTBLOCK |
					PIPE_TRANSFER_UNSYNCHRONIZED)))
		use_staging_texture = TRUE;

	if (!permit_hardware_blit(ctx->screen, texture) ||
		(texture->flags & R600_RESOURCE_FLAG_TRANSFER))
		use_staging_texture = FALSE;

	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
		return NULL;

	trans = CALLOC_STRUCT(r600_transfer);
	if (trans == NULL)
		return NULL;
	pipe_resource_reference(&trans->transfer.resource, texture);
	trans->transfer.level = level;
	trans->transfer.usage = usage;
	trans->transfer.box = *box;
	if (rtex->depth) {
		/* XXX: only readback the rectangle which is being mapped?
		*/
		/* XXX: when discard is true, no need to read back from depth texture
		*/
		r = r600_texture_depth_flush(ctx, texture, FALSE);
		if (r < 0) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}
		trans->transfer.stride = rtex->flushed_depth_texture->surface.level[level].pitch_bytes;
		trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
	} else if (use_staging_texture) {
		resource.target = PIPE_TEXTURE_2D;
		resource.format = texture->format;
		resource.width0 = box->width;
		resource.height0 = box->height;
		resource.depth0 = 1;
		resource.array_size = 1;
		resource.last_level = 0;
		resource.nr_samples = 0;
		resource.usage = PIPE_USAGE_STAGING;
		resource.bind = 0;
		resource.flags = R600_RESOURCE_FLAG_TRANSFER;
		/* For texture reading, the temporary (detiled) texture is used as
		 * a render target when blitting from a tiled texture. */
		if (usage & PIPE_TRANSFER_READ) {
			resource.bind |= PIPE_BIND_RENDER_TARGET;
		}
		/* For texture writing, the temporary texture is used as a sampler
		 * when blitting into a tiled texture. */
		if (usage & PIPE_TRANSFER_WRITE) {
			resource.bind |= PIPE_BIND_SAMPLER_VIEW;
		}
		/* Create the temporary texture. */
		trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
		if (trans->staging_texture == NULL) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}

		trans->transfer.stride = ((struct r600_resource_texture *)trans->staging_texture)
					->surface.level[0].pitch_bytes;
		if (usage & PIPE_TRANSFER_READ) {
			r600_copy_to_staging_texture(ctx, trans);
			/* Always referenced in the blit. */
			radeonsi_flush(ctx, NULL, 0);
		}
	} else {
		trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
		trans->offset = r600_texture_get_offset(rtex, level, box->z);
	}

	if (trans->staging_texture) {
		buf = si_resource(trans->staging_texture)->cs_buf;
	} else {
		struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;

		if (rtex->flushed_depth_texture)
			buf = rtex->flushed_depth_texture->resource.cs_buf;
		else
			buf = si_resource(texture)->cs_buf;

		offset = trans->offset +
			box->y / util_format_get_blockheight(format) * trans->transfer.stride +
			box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
	}

	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, usage))) {
		pipe_resource_reference(&trans->staging_texture, NULL);
		pipe_resource_reference(&trans->transfer.resource, NULL);
		FREE(trans);
		return NULL;
	}

	*ptransfer = &trans->transfer;
	return map + offset;
}
Esempio n. 10
0
static struct pipe_context *si_create_context(struct pipe_screen *screen,
                                              unsigned flags)
{
	struct si_context *sctx = CALLOC_STRUCT(si_context);
	struct si_screen* sscreen = (struct si_screen *)screen;
	struct radeon_winsys *ws = sscreen->ws;
	int shader, i;
	bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;

	if (!sctx)
		return NULL;

	sctx->has_graphics = sscreen->info.chip_class == SI ||
			     !(flags & PIPE_CONTEXT_COMPUTE_ONLY);

	if (flags & PIPE_CONTEXT_DEBUG)
		sscreen->record_llvm_ir = true; /* racy but not critical */

	sctx->b.screen = screen; /* this must be set first */
	sctx->b.priv = NULL;
	sctx->b.destroy = si_destroy_context;
	sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
	sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0;

	slab_create_child(&sctx->pool_transfers, &sscreen->pool_transfers);
	slab_create_child(&sctx->pool_transfers_unsync, &sscreen->pool_transfers);

	sctx->ws = sscreen->ws;
	sctx->family = sscreen->info.family;
	sctx->chip_class = sscreen->info.chip_class;

	if (sscreen->info.has_gpu_reset_counter_query) {
		sctx->gpu_reset_counter =
			sctx->ws->query_value(sctx->ws, RADEON_GPU_RESET_COUNTER);
	}


	if (sctx->chip_class == CIK ||
	    sctx->chip_class == VI ||
	    sctx->chip_class == GFX9) {
		sctx->eop_bug_scratch = si_resource(
			pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
					   16 * sscreen->info.num_render_backends));
		if (!sctx->eop_bug_scratch)
			goto fail;
	}

	/* Initialize context allocators. */
	sctx->allocator_zeroed_memory =
		u_suballocator_create(&sctx->b, 128 * 1024,
				      0, PIPE_USAGE_DEFAULT,
				      SI_RESOURCE_FLAG_UNMAPPABLE |
				      SI_RESOURCE_FLAG_CLEAR, false);
	if (!sctx->allocator_zeroed_memory)
		goto fail;

	sctx->b.stream_uploader = u_upload_create(&sctx->b, 1024 * 1024,
						    0, PIPE_USAGE_STREAM,
						    SI_RESOURCE_FLAG_READ_ONLY);
	if (!sctx->b.stream_uploader)
		goto fail;

	sctx->cached_gtt_allocator = u_upload_create(&sctx->b, 16 * 1024,
						       0, PIPE_USAGE_STAGING, 0);
	if (!sctx->cached_gtt_allocator)
		goto fail;

	sctx->ctx = sctx->ws->ctx_create(sctx->ws);
	if (!sctx->ctx)
		goto fail;

	if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
		sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
						   (void*)si_flush_dma_cs,
						   sctx, stop_exec_on_failure);
	}

	bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
	sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
						 0, PIPE_USAGE_DEFAULT,
						 SI_RESOURCE_FLAG_32BIT |
						 (use_sdma_upload ?
							  SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA :
							  (sscreen->cpdma_prefetch_writes_memory ?
								   0 : SI_RESOURCE_FLAG_READ_ONLY)));
	if (!sctx->b.const_uploader)
		goto fail;

	if (use_sdma_upload)
		u_upload_enable_flush_explicit(sctx->b.const_uploader);

	sctx->gfx_cs = ws->cs_create(sctx->ctx,
				     sctx->has_graphics ? RING_GFX : RING_COMPUTE,
				     (void*)si_flush_gfx_cs, sctx, stop_exec_on_failure);

	/* Border colors. */
	sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
					  sizeof(*sctx->border_color_table));
	if (!sctx->border_color_table)
		goto fail;

	sctx->border_color_buffer = si_resource(
		pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
				   SI_MAX_BORDER_COLORS *
				   sizeof(*sctx->border_color_table)));
	if (!sctx->border_color_buffer)
		goto fail;

	sctx->border_color_map =
		ws->buffer_map(sctx->border_color_buffer->buf,
			       NULL, PIPE_TRANSFER_WRITE);
	if (!sctx->border_color_map)
		goto fail;

	/* Initialize context functions used by graphics and compute. */
	sctx->b.emit_string_marker = si_emit_string_marker;
	sctx->b.set_debug_callback = si_set_debug_callback;
	sctx->b.set_log_context = si_set_log_context;
	sctx->b.set_context_param = si_set_context_param;
	sctx->b.get_device_reset_status = si_get_reset_status;
	sctx->b.set_device_reset_callback = si_set_device_reset_callback;

	si_init_all_descriptors(sctx);
	si_init_buffer_functions(sctx);
	si_init_clear_functions(sctx);
	si_init_blit_functions(sctx);
	si_init_compute_functions(sctx);
	si_init_compute_blit_functions(sctx);
	si_init_debug_functions(sctx);
	si_init_fence_functions(sctx);
	si_init_state_compute_functions(sctx);

	if (sscreen->debug_flags & DBG(FORCE_DMA))
		sctx->b.resource_copy_region = sctx->dma_copy;

	/* Initialize graphics-only context functions. */
	if (sctx->has_graphics) {
		si_init_context_texture_functions(sctx);
		si_init_query_functions(sctx);
		si_init_msaa_functions(sctx);
		si_init_shader_functions(sctx);
		si_init_state_functions(sctx);
		si_init_streamout_functions(sctx);
		si_init_viewport_functions(sctx);

		sctx->blitter = util_blitter_create(&sctx->b);
		if (sctx->blitter == NULL)
			goto fail;
		sctx->blitter->skip_viewport_restore = true;

		si_init_draw_functions(sctx);
	}

	/* Initialize SDMA functions. */
	if (sctx->chip_class >= CIK)
		cik_init_sdma_functions(sctx);
	else
		si_init_dma_functions(sctx);

	sctx->sample_mask = 0xffff;

	/* Initialize multimedia functions. */
	if (sscreen->info.has_hw_decode) {
		sctx->b.create_video_codec = si_uvd_create_decoder;
		sctx->b.create_video_buffer = si_video_buffer_create;
	} else {
		sctx->b.create_video_codec = vl_create_decoder;
		sctx->b.create_video_buffer = vl_video_buffer_create;
	}

	if (sctx->chip_class >= GFX9) {
		sctx->wait_mem_scratch = si_resource(
			pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
		if (!sctx->wait_mem_scratch)
			goto fail;

		/* Initialize the memory. */
		si_cp_write_data(sctx, sctx->wait_mem_scratch, 0, 4,
				 V_370_MEM, V_370_ME, &sctx->wait_mem_number);
	}

	/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
	 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
	if (sctx->chip_class == CIK) {
		sctx->null_const_buf.buffer =
			pipe_aligned_buffer_create(screen,
						   SI_RESOURCE_FLAG_32BIT,
						   PIPE_USAGE_DEFAULT, 16,
						   sctx->screen->info.tcc_cache_line_size);
		if (!sctx->null_const_buf.buffer)
			goto fail;
		sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;

		unsigned start_shader = sctx->has_graphics ? 0 :  PIPE_SHADER_COMPUTE;
		for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
			for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
				sctx->b.set_constant_buffer(&sctx->b, shader, i,
							      &sctx->null_const_buf);
			}
		}

		si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS,
				 &sctx->null_const_buf);
	}

	uint64_t max_threads_per_block;
	screen->get_compute_param(screen, PIPE_SHADER_IR_TGSI,
				  PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
				  &max_threads_per_block);

	/* The maximum number of scratch waves. Scratch space isn't divided
	 * evenly between CUs. The number is only a function of the number of CUs.
	 * We can decrease the constant to decrease the scratch buffer size.
	 *
	 * sctx->scratch_waves must be >= the maximum posible size of
	 * 1 threadgroup, so that the hw doesn't hang from being unable
	 * to start any.
	 *
	 * The recommended value is 4 per CU at most. Higher numbers don't
	 * bring much benefit, but they still occupy chip resources (think
	 * async compute). I've seen ~2% performance difference between 4 and 32.
	 */
	sctx->scratch_waves = MAX2(32 * sscreen->info.num_good_compute_units,
				   max_threads_per_block / 64);

	si_init_compiler(sscreen, &sctx->compiler);

	/* Bindless handles. */
	sctx->tex_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);
	sctx->img_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);

	util_dynarray_init(&sctx->resident_tex_handles, NULL);
	util_dynarray_init(&sctx->resident_img_handles, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_img_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_depth_decompress, NULL);

	sctx->sample_pos_buffer =
		pipe_buffer_create(sctx->b.screen, 0, PIPE_USAGE_DEFAULT,
				   sizeof(sctx->sample_positions));
	pipe_buffer_write(&sctx->b, sctx->sample_pos_buffer, 0,
			  sizeof(sctx->sample_positions), &sctx->sample_positions);

	/* this must be last */
	si_begin_new_gfx_cs(sctx);

	if (sctx->chip_class == CIK) {
		/* Clear the NULL constant buffer, because loads should return zeros.
		 * Note that this forces CP DMA to be used, because clover deadlocks
		 * for some reason when the compute codepath is used.
		 */
		uint32_t clear_value = 0;
		si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0,
				sctx->null_const_buf.buffer->width0,
				&clear_value, 4, SI_COHERENCY_SHADER, true);
	}
	return &sctx->b;
fail:
	fprintf(stderr, "radeonsi: Failed to create a context.\n");
	si_destroy_context(&sctx->b);
	return NULL;
}