示例#1
0
/* Helper for decompressing a portion of a color or depth resource before
 * blitting if any decompression is needed.
 * The driver doesn't decompress resources automatically while u_blitter is
 * rendering. */
static bool r600_decompress_subresource(struct pipe_context *ctx,
                                        struct pipe_resource *tex,
                                        unsigned level,
                                        unsigned first_layer, unsigned last_layer)
{
    struct r600_context *rctx = (struct r600_context *)ctx;
    struct r600_texture *rtex = (struct r600_texture*)tex;

    if (rtex->db_compatible) {
        if (r600_can_sample_zs(rtex, false)) {
            r600_blit_decompress_depth_in_place(rctx, rtex, false,
                                                level, level,
                                                first_layer, last_layer);
            if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
                r600_blit_decompress_depth_in_place(rctx, rtex, true,
                                                    level, level,
                                                    first_layer, last_layer);
            }
        } else {
            if (!r600_init_flushed_depth_texture(ctx, tex, NULL))
                return false; /* error */

            r600_blit_decompress_depth(ctx, rtex, NULL,
                                       level, level,
                                       first_layer, last_layer,
                                       0, u_max_sample(tex));
        }
    } else if (rtex->cmask.size) {
        r600_blit_decompress_color(ctx, rtex, level, level,
                                   first_layer, last_layer);
    }
    return true;
}
示例#2
0
文件: r600_blit.c 项目: iquiw/xsrc
/* Helper for decompressing a portion of a color or depth resource before
 * blitting if any decompression is needed.
 * The driver doesn't decompress resources automatically while u_blitter is
 * rendering. */
static bool r600_decompress_subresource(struct pipe_context *ctx,
					struct pipe_resource *tex,
					unsigned level,
					unsigned first_layer, unsigned last_layer)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_texture *rtex = (struct r600_texture*)tex;

	if (rtex->is_depth && !rtex->is_flushing_texture) {
		if (rctx->b.chip_class >= EVERGREEN ||
		    r600_can_read_depth(rtex)) {
			r600_blit_decompress_depth_in_place(rctx, rtex,
						   level, level,
						   first_layer, last_layer);
		} else {
			if (!r600_init_flushed_depth_texture(ctx, tex, NULL))
				return false; /* error */

			r600_blit_decompress_depth(ctx, rtex, NULL,
						   level, level,
						   first_layer, last_layer,
						   0, u_max_sample(tex));
		}
	} else if (rtex->cmask.size) {
		r600_blit_decompress_color(ctx, rtex, level, level,
					   first_layer, last_layer);
	}
	return true;
}
示例#3
0
static void r600_copy_first_sample(struct pipe_context *ctx,
				   const struct pipe_resolve_info *info)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_texture *rsrc = (struct r600_texture*)info->src.res;
	struct pipe_surface *dst_view, dst_templ;
	struct pipe_sampler_view src_templ, *src_view;
	struct pipe_box box;

	if (rsrc->is_depth && !rsrc->is_flushing_texture) {
		if (!r600_init_flushed_depth_texture(ctx, info->src.res, NULL))
			return; /* error */

		/* Decompress the first sample only. */
		r600_blit_decompress_depth(ctx,	rsrc, NULL,
					   0, 0,
					   info->src.layer, info->src.layer,
					   0, 0);
	}
	if (rctx->chip_class != CAYMAN && rsrc->fmask_size && rsrc->cmask_size) {
		r600_blit_decompress_color(ctx, rsrc,
					   0, 0,
					   info->src.layer, info->src.layer);
	}

	/* this is correct for upside-down blits too */
	u_box_2d(info->src.x0,
		 info->src.y0,
		 info->src.x1 - info->src.x0,
		 info->src.y1 - info->src.y0, &box);

	/* Initialize the surface. */
	util_blitter_default_dst_texture(&dst_templ, info->dst.res,
					 info->dst.level, info->dst.layer, &box);
	dst_view = ctx->create_surface(ctx, info->dst.res, &dst_templ);

	/* Initialize the sampler view. */
	util_blitter_default_src_texture(&src_templ, info->src.res, 0);
	src_view = ctx->create_sampler_view(ctx, info->src.res, &src_templ);

	/* Copy the first sample into dst. */
	r600_blitter_begin(ctx, R600_COPY_TEXTURE);
	util_blitter_copy_texture_view(rctx->blitter, dst_view, ~0, info->dst.x0,
				       info->dst.y0, src_view, 0, &box,
				       info->src.res->width0, info->src.res->height0,
				       info->mask);
	r600_blitter_end(ctx);

	pipe_surface_reference(&dst_view, NULL);
	pipe_sampler_view_reference(&src_view, NULL);
}
static void *r600_texture_transfer_map(struct pipe_context *ctx,
				       struct pipe_resource *texture,
				       unsigned level,
				       unsigned usage,
				       const struct pipe_box *box,
				       struct pipe_transfer **ptransfer)
{
	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
	struct r600_texture *rtex = (struct r600_texture*)texture;
	struct r600_transfer *trans;
	boolean use_staging_texture = FALSE;
	struct r600_resource *buf;
	unsigned offset = 0;
	char *map;

	/* We cannot map a tiled texture directly because the data is
	 * in a different order, therefore we do detiling using a blit.
	 *
	 * Also, use a temporary in GTT memory for read transfers, as
	 * the CPU is much happier reading out of cached system memory
	 * than uncached VRAM.
	 */
	if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
		use_staging_texture = TRUE;
	} else if ((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_MAP_DIRECTLY) &&
	    (rtex->resource.domains == RADEON_DOMAIN_VRAM)) {
		/* Untiled buffers in VRAM, which is slow for CPU reads */
		use_staging_texture = TRUE;
	} else if (!(usage & PIPE_TRANSFER_READ) &&
	    (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
	     !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
		/* Use a staging texture for uploads if the underlying BO is busy. */
		use_staging_texture = TRUE;
	}

	if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
		use_staging_texture = FALSE;
	}

	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
		return NULL;
	}

	trans = CALLOC_STRUCT(r600_transfer);
	if (trans == NULL)
		return NULL;
	trans->transfer.resource = texture;
	trans->transfer.level = level;
	trans->transfer.usage = usage;
	trans->transfer.box = *box;

	if (rtex->is_depth) {
		struct r600_texture *staging_depth;

		if (rtex->resource.b.b.nr_samples > 1) {
			/* MSAA depth buffers need to be converted to single sample buffers.
			 *
			 * Mapping MSAA depth buffers can occur if ReadPixels is called
			 * with a multisample GLX visual.
			 *
			 * First downsample the depth buffer to a temporary texture,
			 * then decompress the temporary one to staging.
			 *
			 * Only the region being mapped is transfered.
			 */
			struct pipe_resource resource;

			r600_init_temp_resource_from_box(&resource, texture, box, level, 0);

			if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
				R600_ERR("failed to create temporary texture to hold untiled copy\n");
				FREE(trans);
				return NULL;
			}

			if (usage & PIPE_TRANSFER_READ) {
				struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);

				r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
				rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
							    0, 0, 0, box->depth, 0, 0);
				pipe_resource_reference((struct pipe_resource**)&temp, NULL);
			}
		}
		else {
			/* XXX: only readback the rectangle which is being mapped? */
			/* XXX: when discard is true, no need to read back from depth texture */
			if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
				R600_ERR("failed to create temporary texture to hold untiled copy\n");
				FREE(trans);
				return NULL;
			}

			rctx->blit_decompress_depth(ctx, rtex, staging_depth,
						    level, level,
						    box->z, box->z + box->depth - 1,
						    0, 0);

			offset = r600_texture_get_offset(staging_depth, level, box);
		}

		trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
		trans->staging = (struct r600_resource*)staging_depth;
	} else if (use_staging_texture) {
		struct pipe_resource resource;
		struct r600_texture *staging;

		r600_init_temp_resource_from_box(&resource, texture, box, level,
						 R600_RESOURCE_FLAG_TRANSFER);
		resource.usage = (usage & PIPE_TRANSFER_READ) ?
			PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;

		/* Create the temporary texture. */
		staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
		if (staging == NULL) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			FREE(trans);
			return NULL;
		}
		trans->staging = &staging->resource;
		trans->transfer.stride = staging->surface.level[0].pitch_bytes;
		trans->transfer.layer_stride = staging->surface.level[0].slice_size;
		if (usage & PIPE_TRANSFER_READ) {
			r600_copy_to_staging_texture(ctx, trans);
		}
	} else {
		/* the resource is mapped directly */
		trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
		offset = r600_texture_get_offset(rtex, level, box);
	}

	if (trans->staging) {
		buf = trans->staging;
		if (!rtex->is_depth && !(usage & PIPE_TRANSFER_READ))
			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
	} else {
		buf = &rtex->resource;
	}

	if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
		pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
		FREE(trans);
		return NULL;
	}

	*ptransfer = &trans->transfer;
	return map + offset;
}
示例#5
0
文件: r600_blit.c 项目: dezelin/mesa
static void r600_resource_copy_region(struct pipe_context *ctx,
                                      struct pipe_resource *dst,
                                      unsigned dst_level,
                                      unsigned dstx, unsigned dsty, unsigned dstz,
                                      struct pipe_resource *src,
                                      unsigned src_level,
                                      const struct pipe_box *src_box)
{
    struct r600_context *rctx = (struct r600_context *)ctx;
    struct r600_texture *rsrc = (struct r600_texture*)src;
    struct texture_orig_info orig_info[2];
    struct pipe_box sbox;
    const struct pipe_box *psbox = src_box;
    boolean restore_orig[2];
    unsigned last_sample, i;

    memset(orig_info, 0, sizeof(orig_info));

    /* Handle buffers first. */
    if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
        r600_copy_buffer(ctx, dst, dstx, src, src_box);
        return;
    }

    assert(u_max_sample(dst) == u_max_sample(src));
    last_sample = u_max_sample(dst);

    /* This must be done before entering u_blitter to avoid recursion. */
    if (rsrc->is_depth && !rsrc->is_flushing_texture) {
        if (!r600_init_flushed_depth_texture(ctx, src, NULL))
            return; /* error */

        r600_blit_uncompress_depth(ctx, rsrc, NULL,
                                   src_level, src_level,
                                   src_box->z, src_box->z + src_box->depth - 1,
                                   0, u_max_sample(src));
    }

    restore_orig[0] = restore_orig[1] = FALSE;

    if (util_format_is_compressed(src->format) &&
            util_format_is_compressed(dst->format)) {
        r600_compressed_to_blittable(src, src_level, &orig_info[0]);
        restore_orig[0] = TRUE;
        sbox.x = util_format_get_nblocksx(orig_info[0].format, src_box->x);
        sbox.y = util_format_get_nblocksy(orig_info[0].format, src_box->y);
        sbox.z = src_box->z;
        sbox.width = util_format_get_nblocksx(orig_info[0].format, src_box->width);
        sbox.height = util_format_get_nblocksy(orig_info[0].format, src_box->height);
        sbox.depth = src_box->depth;
        psbox = &sbox;

        r600_compressed_to_blittable(dst, dst_level, &orig_info[1]);
        restore_orig[1] = TRUE;
        /* translate the dst box as well */
        dstx = util_format_get_nblocksx(orig_info[1].format, dstx);
        dsty = util_format_get_nblocksy(orig_info[1].format, dsty);
    } else if (!util_blitter_is_copy_supported(rctx->blitter, dst, src,
               PIPE_MASK_RGBAZS)) {
        if (util_format_is_subsampled_2x1_32bpp(src->format) &&
                util_format_is_subsampled_2x1_32bpp(dst->format)) {
            r600_subsampled_2x1_32bpp_to_blittable(src, src_level, &orig_info[0]);
            r600_subsampled_2x1_32bpp_to_blittable(dst, dst_level, &orig_info[1]);

            sbox = *src_box;
            sbox.x = util_format_get_nblocksx(orig_info[0].format, src_box->x);
            sbox.width = util_format_get_nblocksx(orig_info[0].format, src_box->width);
            psbox = &sbox;

            dstx = util_format_get_nblocksx(orig_info[1].format, dstx);
        } else {
            unsigned blocksize = util_format_get_blocksize(src->format);

            switch (blocksize) {
            case 1:
                r600_change_format(src, src_level, &orig_info[0],
                                   PIPE_FORMAT_R8_UNORM);
                r600_change_format(dst, dst_level, &orig_info[1],
                                   PIPE_FORMAT_R8_UNORM);
                break;
            case 4:
                r600_change_format(src, src_level, &orig_info[0],
                                   PIPE_FORMAT_R8G8B8A8_UNORM);
                r600_change_format(dst, dst_level, &orig_info[1],
                                   PIPE_FORMAT_R8G8B8A8_UNORM);
                break;
            default:
                fprintf(stderr, "Unhandled format %s with blocksize %u\n",
                        util_format_short_name(src->format), blocksize);
                assert(0);
            }
        }
        restore_orig[0] = TRUE;
        restore_orig[1] = TRUE;
    }

    for (i = 0; i <= last_sample; i++) {
        r600_blitter_begin(ctx, R600_COPY_TEXTURE);
        util_blitter_copy_texture(rctx->blitter, dst, dst_level, 1 << i, dstx, dsty, dstz,
                                  src, src_level, i, psbox);
        r600_blitter_end(ctx);
    }

    if (restore_orig[0])
        r600_reset_blittable_to_orig(src, src_level, &orig_info[0]);

    if (restore_orig[1])
        r600_reset_blittable_to_orig(dst, dst_level, &orig_info[1]);
}
示例#6
0
static void *si_texture_transfer_map(struct pipe_context *ctx,
				     struct pipe_resource *texture,
				     unsigned level,
				     unsigned usage,
				     const struct pipe_box *box,
				     struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context *)ctx;
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
	struct r600_transfer *trans;
	boolean use_staging_texture = FALSE;
	struct radeon_winsys_cs_handle *buf;
	enum pipe_format format = texture->format;
	unsigned offset = 0;
	char *map;

	/* We cannot map a tiled texture directly because the data is
	 * in a different order, therefore we do detiling using a blit.
	 *
	 * Also, use a temporary in GTT memory for read transfers, as
	 * the CPU is much happier reading out of cached system memory
	 * than uncached VRAM.
	 */
	if (rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR_ALIGNED &&
	    rtex->surface.level[level].mode != RADEON_SURF_MODE_LINEAR)
		use_staging_texture = TRUE;

	/* XXX: Use a staging texture for uploads if the underlying BO
	 * is busy.  No interface for checking that currently? so do
	 * it eagerly whenever the transfer doesn't require a readback
	 * and might block.
	 */
	if ((usage & PIPE_TRANSFER_WRITE) &&
			!(usage & (PIPE_TRANSFER_READ |
					PIPE_TRANSFER_DONTBLOCK |
					PIPE_TRANSFER_UNSYNCHRONIZED)))
		use_staging_texture = TRUE;

	if (texture->flags & R600_RESOURCE_FLAG_TRANSFER)
		use_staging_texture = FALSE;

	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY))
		return NULL;

	trans = CALLOC_STRUCT(r600_transfer);
	if (trans == NULL)
		return NULL;
	pipe_resource_reference(&trans->transfer.resource, texture);
	trans->transfer.level = level;
	trans->transfer.usage = usage;
	trans->transfer.box = *box;
	if (rtex->is_depth) {
		/* XXX: only readback the rectangle which is being mapped?
		*/
		/* XXX: when discard is true, no need to read back from depth texture
		*/
		struct r600_resource_texture *staging_depth;

		if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}
		si_blit_uncompress_depth(ctx, rtex, staging_depth,
					 level, level,
					 box->z, box->z + box->depth - 1);
		trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
		trans->offset = r600_texture_get_offset(staging_depth, level, box->z);

		trans->staging = &staging_depth->resource.b.b;
	} else if (use_staging_texture) {
		struct pipe_resource resource;
		struct r600_resource_texture *staging;

		memset(&resource, 0, sizeof(resource));
		resource.format = texture->format;
		resource.width0 = box->width;
		resource.height0 = box->height;
		resource.depth0 = 1;
		resource.array_size = 1;
		resource.usage = PIPE_USAGE_STAGING;
		resource.flags = R600_RESOURCE_FLAG_TRANSFER;

		/* We must set the correct texture target and dimensions if needed for a 3D transfer. */
		if (box->depth > 1 && util_max_layer(texture, level) > 0)
			resource.target = texture->target;
		else
			resource.target = PIPE_TEXTURE_2D;

		switch (resource.target) {
		case PIPE_TEXTURE_1D_ARRAY:
		case PIPE_TEXTURE_2D_ARRAY:
		case PIPE_TEXTURE_CUBE_ARRAY:
			resource.array_size = box->depth;
			break;
		case PIPE_TEXTURE_3D:
			resource.depth0 = box->depth;
			break;
		default:;
		}
		/* Create the temporary texture. */
		staging = (struct r600_resource_texture*)ctx->screen->resource_create(ctx->screen, &resource);
		if (staging == NULL) {
			R600_ERR("failed to create temporary texture to hold untiled copy\n");
			pipe_resource_reference(&trans->transfer.resource, NULL);
			FREE(trans);
			return NULL;
		}

		trans->staging = &staging->resource.b.b;
		trans->transfer.stride = staging->surface.level[0].pitch_bytes;
		trans->transfer.layer_stride = staging->surface.level[0].slice_size;
		if (usage & PIPE_TRANSFER_READ) {
			r600_copy_to_staging_texture(ctx, trans);
			/* Always referenced in the blit. */
			radeonsi_flush(ctx, NULL, 0);
		}
	} else {
		trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
		trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
		trans->offset = r600_texture_get_offset(rtex, level, box->z);
	}

	if (trans->staging) {
		buf = si_resource(trans->staging)->cs_buf;
	} else {
		buf = rtex->resource.cs_buf;
	}

	if (rtex->is_depth || !trans->staging)
		offset = trans->offset +
			box->y / util_format_get_blockheight(format) * trans->transfer.stride +
			box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);

	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, usage))) {
		pipe_resource_reference(&trans->staging, NULL);
		pipe_resource_reference(&trans->transfer.resource, NULL);
		FREE(trans);
		return NULL;
	}

	*ptransfer = &trans->transfer;
	return map + offset;
}
示例#7
0
文件: si_blit.c 项目: Kalamatee/mesa
static void
si_flush_depth_texture(struct si_context *sctx,
		       struct r600_texture *tex,
		       unsigned required_planes,
		       unsigned first_level, unsigned last_level,
		       unsigned first_layer, unsigned last_layer)
{
	unsigned inplace_planes = 0;
	unsigned copy_planes = 0;
	unsigned level_mask = u_bit_consecutive(first_level, last_level - first_level + 1);
	unsigned levels_z = 0;
	unsigned levels_s = 0;

	if (required_planes & PIPE_MASK_Z) {
		levels_z = level_mask & tex->dirty_level_mask;

		if (levels_z) {
			if (r600_can_sample_zs(tex, false))
				inplace_planes |= PIPE_MASK_Z;
			else
				copy_planes |= PIPE_MASK_Z;
		}
	}
	if (required_planes & PIPE_MASK_S) {
		levels_s = level_mask & tex->stencil_dirty_level_mask;

		if (levels_s) {
			if (r600_can_sample_zs(tex, true))
				inplace_planes |= PIPE_MASK_S;
			else
				copy_planes |= PIPE_MASK_S;
		}
	}

	/* We may have to allocate the flushed texture here when called from
	 * si_decompress_subresource.
	 */
	if (copy_planes &&
	    (tex->flushed_depth_texture ||
	     r600_init_flushed_depth_texture(&sctx->b.b, &tex->resource.b.b, NULL))) {
		struct r600_texture *dst = tex->flushed_depth_texture;
		unsigned fully_copied_levels;
		unsigned levels = 0;

		assert(tex->flushed_depth_texture);

		if (util_format_is_depth_and_stencil(dst->resource.b.b.format))
			copy_planes = PIPE_MASK_Z | PIPE_MASK_S;

		if (copy_planes & PIPE_MASK_Z) {
			levels |= levels_z;
			levels_z = 0;
		}
		if (copy_planes & PIPE_MASK_S) {
			levels |= levels_s;
			levels_s = 0;
		}

		fully_copied_levels = si_blit_dbcb_copy(
			sctx, tex, dst, copy_planes, levels,
			first_layer, last_layer,
			0, u_max_sample(&tex->resource.b.b));

		if (copy_planes & PIPE_MASK_Z)
			tex->dirty_level_mask &= ~fully_copied_levels;
		if (copy_planes & PIPE_MASK_S)
			tex->stencil_dirty_level_mask &= ~fully_copied_levels;
	}

	if (inplace_planes) {
		si_blit_decompress_zs_in_place(
			sctx, tex,
			levels_z, levels_s,
			first_layer, last_layer);
	}
}