static void etna_pipe_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer_) { struct etna_pipe_context *priv = etna_pipe_context(pipe); struct etna_transfer *ptrans = etna_transfer(transfer_); /* XXX * When writing to a resource that is already in use, replace the resource with a completely new buffer * and free the old one using a fenced free. * The most tricky case to implement will be: tiled or supertiled surface, partial write, target not aligned to 4/64 */ struct etna_resource *resource = etna_resource(ptrans->base.resource); assert(ptrans->base.level <= resource->base.last_level); if(ptrans->base.usage & PIPE_TRANSFER_WRITE) { /* write back */ if(unlikely(!ptrans->in_place)) { /* map buffer object */ struct etna_resource_level *res_level = &resource->levels[ptrans->base.level]; void *mapped = etna_bo_map(resource->bo) + res_level->offset; if(resource->layout == ETNA_LAYOUT_LINEAR || resource->layout == ETNA_LAYOUT_TILED) { if(resource->layout == ETNA_LAYOUT_TILED && !util_format_is_compressed(resource->base.format)) { etna_texture_tile(mapped + ptrans->base.box.z * res_level->layer_stride, ptrans->buffer, ptrans->base.box.x, ptrans->base.box.y, res_level->stride, ptrans->base.box.width, ptrans->base.box.height, ptrans->base.stride, util_format_get_blocksize(resource->base.format)); } else { /* non-tiled or compressed format */ util_copy_box(mapped, resource->base.format, res_level->stride, res_level->layer_stride, ptrans->base.box.x, ptrans->base.box.y, ptrans->base.box.z, ptrans->base.box.width, ptrans->base.box.height, ptrans->base.box.depth, ptrans->buffer, ptrans->base.stride, ptrans->base.layer_stride, 0, 0, 0 /* src x,y,z */); } } else { BUG("unsupported tiling %i", resource->layout); } FREE(ptrans->buffer); } if(resource->base.bind & PIPE_BIND_SAMPLER_VIEW) { /* XXX do we need to flush the CPU cache too or start a write barrier * to make sure the GPU sees it? */ priv->dirty_bits |= ETNA_STATE_TEXTURE_CACHES; } } util_slab_free(&priv->transfer_pool, ptrans); }
static void etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans) { struct etna_context *ctx = etna_context(pctx); struct etna_transfer *trans = etna_transfer(ptrans); struct etna_resource *rsc = etna_resource(ptrans->resource); /* XXX * When writing to a resource that is already in use, replace the resource * with a completely new buffer * and free the old one using a fenced free. * The most tricky case to implement will be: tiled or supertiled surface, * partial write, target not aligned to 4/64. */ assert(ptrans->level <= rsc->base.last_level); if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) rsc = etna_resource(rsc->texture); /* switch to using the texture resource */ /* * Temporary resources are always pulled into the CPU domain, must push them * back into GPU domain before the RS execs the blit to the base resource. */ if (trans->rsc) etna_bo_cpu_fini(etna_resource(trans->rsc)->bo); if (ptrans->usage & PIPE_TRANSFER_WRITE) { if (trans->rsc) { /* We have a temporary resource due to either tile status or * tiling format. Write back the updated buffer contents. * FIXME: we need to invalidate the tile status. */ etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box); } else if (trans->staging) { /* map buffer object */ struct etna_resource_level *res_level = &rsc->levels[ptrans->level]; void *mapped = etna_bo_map(rsc->bo) + res_level->offset; if (rsc->layout == ETNA_LAYOUT_TILED) { etna_texture_tile( mapped + ptrans->box.z * res_level->layer_stride, trans->staging, ptrans->box.x, ptrans->box.y, res_level->stride, ptrans->box.width, ptrans->box.height, ptrans->stride, util_format_get_blocksize(rsc->base.format)); } else if (rsc->layout == ETNA_LAYOUT_LINEAR) { util_copy_box(mapped, rsc->base.format, res_level->stride, res_level->layer_stride, ptrans->box.x, ptrans->box.y, ptrans->box.z, ptrans->box.width, ptrans->box.height, ptrans->box.depth, trans->staging, ptrans->stride, ptrans->layer_stride, 0, 0, 0 /* src x,y,z */); } else { BUG("unsupported tiling %i", rsc->layout); } FREE(trans->staging); } rsc->seqno++; if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) { ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES; } } /* * Transfers without a temporary are only pulled into the CPU domain if they * are not mapped unsynchronized. If they are, must push them back into GPU * domain after CPU access is finished. */ if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) etna_bo_cpu_fini(rsc->bo); pipe_resource_reference(&trans->rsc, NULL); pipe_resource_reference(&ptrans->resource, NULL); slab_free(&ctx->transfer_pool, trans); }