Exemplo n.º 1
0
static void virgl_texture_transfer_unmap(struct pipe_context *ctx,
                                         struct pipe_transfer *transfer)
{
   struct virgl_context *vctx = virgl_context(ctx);
   struct virgl_transfer *trans = virgl_transfer(transfer);
   struct virgl_texture *vtex = virgl_texture(transfer->resource);
   uint32_t l_stride;

   if (transfer->resource->target != PIPE_TEXTURE_3D &&
       transfer->resource->target != PIPE_TEXTURE_CUBE &&
       transfer->resource->target != PIPE_TEXTURE_1D_ARRAY &&
       transfer->resource->target != PIPE_TEXTURE_2D_ARRAY &&
       transfer->resource->target != PIPE_TEXTURE_CUBE_ARRAY)
      l_stride = 0;
   else
      l_stride = trans->base.layer_stride;

   if (trans->base.usage & PIPE_TRANSFER_WRITE) {
      if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
         struct virgl_screen *vs = virgl_screen(ctx->screen);
         vtex->base.clean = FALSE;
         vctx->num_transfers++;
         vs->vws->transfer_put(vs->vws, vtex->base.hw_res,
                               &transfer->box, trans->base.stride, l_stride, trans->offset, transfer->level);

      }
   }

   if (trans->resolve_tmp)
      pipe_resource_reference((struct pipe_resource **)&trans->resolve_tmp, NULL);

   util_slab_free(&vctx->texture_transfer_pool, trans);
}
Exemplo n.º 2
0
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
		struct pipe_transfer *ptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	util_slab_free(&ctx->transfer_pool, ptrans);
}
static void
vc4_resource_transfer_unmap(struct pipe_context *pctx,
                            struct pipe_transfer *ptrans)
{
    struct vc4_context *vc4 = vc4_context(pctx);
    struct vc4_transfer *trans = vc4_transfer(ptrans);
    struct pipe_resource *prsc = ptrans->resource;
    struct vc4_resource *rsc = vc4_resource(prsc);
    struct vc4_resource_slice *slice = &rsc->slices[ptrans->level];

    if (trans->map) {
        if (ptrans->usage & PIPE_TRANSFER_WRITE) {
            vc4_store_tiled_image(rsc->bo->map + slice->offset +
                                  ptrans->box.z * rsc->cube_map_stride,
                                  slice->stride,
                                  trans->map, ptrans->stride,
                                  slice->tiling, rsc->cpp,
                                  &ptrans->box);
        }
        free(trans->map);
    }

    pipe_resource_reference(&ptrans->resource, NULL);
    util_slab_free(&vc4->transfer_pool, ptrans);
}
Exemplo n.º 4
0
void r600_compute_global_transfer_destroy(
	struct pipe_context *ctx_,
	struct pipe_transfer *transfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx_;
	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 5
0
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
		struct pipe_transfer *ptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_resource *rsc = fd_resource(ptrans->resource);
	struct fd_transfer *trans = fd_transfer(ptrans);

	if (trans->staging && !(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
		struct pipe_box box;
		u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
		fd_resource_flush(trans, &box);
	}

	if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		fd_bo_cpu_fini(rsc->bo);
		if (rsc->stencil)
			fd_bo_cpu_fini(rsc->stencil->bo);
	}

	util_range_add(&rsc->valid_buffer_range,
				   ptrans->box.x,
				   ptrans->box.x + ptrans->box.width);

	pipe_resource_reference(&ptrans->resource, NULL);
	util_slab_free(&ctx->transfer_pool, ptrans);

	free(trans->staging);
}
Exemplo n.º 6
0
static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
				       struct pipe_transfer *transfer)
{
	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct r600_resource *rbuffer = r600_resource(transfer->resource);

	if (rtransfer->staging) {
		if (rtransfer->transfer.usage & PIPE_TRANSFER_WRITE) {
			struct pipe_resource *dst, *src;
			unsigned soffset, doffset, size;
			struct pipe_box box;

			dst = transfer->resource;
			src = &rtransfer->staging->b.b;
			size = transfer->box.width;
			doffset = transfer->box.x;
			soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;

			u_box_1d(soffset, size, &box);

			/* Copy the staging buffer into the original one. */
			rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box);
		}
		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
	}

	if (transfer->usage & PIPE_TRANSFER_WRITE) {
		util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
			       transfer->box.x + transfer->box.width);
	}
	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 7
0
static void
i915_transfer_destroy(struct pipe_context *pipe,
                      struct pipe_transfer *transfer)
{
    struct i915_context *i915 = i915_context(pipe);
    util_slab_free(&i915->transfer_pool, transfer);
}
Exemplo n.º 8
0
static void r300_buffer_transfer_unmap( struct pipe_context *pipe,
                                        struct pipe_transfer *transfer )
{
    struct r300_context *r300 = r300_context(pipe);

    util_slab_free(&r300->pool_transfers, transfer);
}
Exemplo n.º 9
0
static void etna_pipe_transfer_unmap(struct pipe_context *pipe,
                      struct pipe_transfer *transfer_)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_transfer *ptrans = etna_transfer(transfer_);

    /* XXX
     * When writing to a resource that is already in use, replace the resource with a completely new buffer
     * and free the old one using a fenced free.
     * The most tricky case to implement will be: tiled or supertiled surface, partial write, target not aligned to 4/64
     */
    struct etna_resource *resource = etna_resource(ptrans->base.resource);
    assert(ptrans->base.level <= resource->base.last_level);

    if(ptrans->base.usage & PIPE_TRANSFER_WRITE)
    {
        /* write back */
        if(unlikely(!ptrans->in_place))
        {
            /* map buffer object */
            struct etna_resource_level *res_level = &resource->levels[ptrans->base.level];
            void *mapped = etna_bo_map(resource->bo) + res_level->offset;
            if(resource->layout == ETNA_LAYOUT_LINEAR || resource->layout == ETNA_LAYOUT_TILED)
            {
                if(resource->layout == ETNA_LAYOUT_TILED && !util_format_is_compressed(resource->base.format))
                {
                    etna_texture_tile(mapped + ptrans->base.box.z * res_level->layer_stride, ptrans->buffer,
                            ptrans->base.box.x, ptrans->base.box.y, res_level->stride,
                            ptrans->base.box.width, ptrans->base.box.height, ptrans->base.stride,
                            util_format_get_blocksize(resource->base.format));
                } else { /* non-tiled or compressed format */
                    util_copy_box(mapped,
                      resource->base.format,
                      res_level->stride, res_level->layer_stride,
                      ptrans->base.box.x, ptrans->base.box.y, ptrans->base.box.z,
                      ptrans->base.box.width, ptrans->base.box.height, ptrans->base.box.depth,
                      ptrans->buffer,
                      ptrans->base.stride, ptrans->base.layer_stride,
                      0, 0, 0 /* src x,y,z */);
                }
            } else
            {
                BUG("unsupported tiling %i", resource->layout);
            }
            FREE(ptrans->buffer);
        }
        if(resource->base.bind & PIPE_BIND_SAMPLER_VIEW)
        {
            /* XXX do we need to flush the CPU cache too or start a write barrier
             * to make sure the GPU sees it? */
            priv->dirty_bits |= ETNA_STATE_TEXTURE_CACHES;
        }
    }

    util_slab_free(&priv->transfer_pool, ptrans);
}
Exemplo n.º 10
0
/**
 * Clean up the toy compiler.
 */
void
toy_compiler_cleanup(struct toy_compiler *tc)
{
   struct toy_inst *inst, *next;

   LIST_FOR_EACH_ENTRY_SAFE(inst, next, &tc->instructions, list)
      util_slab_free(&tc->mempool, inst);

   util_slab_destroy(&tc->mempool);
}
Exemplo n.º 11
0
static void
destroy_periods(struct fd_context *ctx, struct list_head *list)
{
	struct fd_hw_sample_period *period, *s;
	LIST_FOR_EACH_ENTRY_SAFE(period, s, list, list) {
		fd_hw_sample_reference(ctx, &period->start, NULL);
		fd_hw_sample_reference(ctx, &period->end, NULL);
		list_del(&period->list);
		util_slab_free(&ctx->sample_period_pool, period);
	}
Exemplo n.º 12
0
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
		struct pipe_transfer *ptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_resource *rsc = fd_resource(ptrans->resource);
	if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
		fd_bo_cpu_fini(rsc->bo);
	pipe_resource_reference(&ptrans->resource, NULL);
	util_slab_free(&ctx->transfer_pool, ptrans);
}
Exemplo n.º 13
0
void *r600_compute_global_transfer_map(
	struct pipe_context *ctx_,
	struct pipe_resource *resource,
	unsigned level,
	unsigned usage,
	const struct pipe_box *box,
	struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx_;
	struct compute_memory_pool *pool = rctx->screen->global_pool;
	struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
	struct r600_resource_global* buffer =
		(struct r600_resource_global*)resource;
	uint32_t* map;

	compute_memory_finalize_pending(pool, ctx_);

	assert(resource->target == PIPE_BUFFER);

	COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
			"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
			"width = %u, height = %u, depth = %u)\n", level, usage,
			box->x, box->y, box->z, box->width, box->height,
			box->depth);

	transfer->resource = resource;
	transfer->level = level;
	transfer->usage = usage;
	transfer->box = *box;
	transfer->stride = 0;
	transfer->layer_stride = 0;

	assert(transfer->resource->target == PIPE_BUFFER);
	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
	assert(transfer->box.x >= 0);
	assert(transfer->box.y == 0);
	assert(transfer->box.z == 0);

	///TODO: do it better, mapping is not possible if the pool is too big

	COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");

	if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
		util_slab_free(&rctx->pool_transfers, transfer);
		return NULL;
	}

	*ptransfer = transfer;

	COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
		"+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
	return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
}
Exemplo n.º 14
0
static void r300_buffer_destroy(struct pipe_screen *screen,
				struct pipe_resource *buf)
{
    struct r300_screen *r300screen = r300_screen(screen);
    struct r300_resource *rbuf = r300_resource(buf);

    if (rbuf->constant_buffer)
        FREE(rbuf->constant_buffer);

    if (rbuf->buf)
        pb_reference(&rbuf->buf, NULL);

    util_slab_free(&r300screen->pool_buffers, rbuf);
}
Exemplo n.º 15
0
static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
				       struct pipe_transfer *transfer)
{
	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;

	if (transfer->usage & PIPE_TRANSFER_WRITE &&
	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);

	if (rtransfer->staging)
		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);

	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 16
0
static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
					struct pipe_transfer *transfer)
{
	struct r600_context *rctx = (struct r600_context*)pipe;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;

	if (rtransfer->staging) {
		struct pipe_box box;
		u_box_1d(transfer->box.x % R600_MAP_BUFFER_ALIGNMENT, transfer->box.width, &box);

		/* Copy the staging buffer into the original one. */
		r600_copy_buffer(pipe, transfer->resource, transfer->box.x,
				 &rtransfer->staging->b.b, &box);
		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
	}
	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 17
0
void r600_compute_global_transfer_unmap(
	struct pipe_context *ctx_,
	struct pipe_transfer* transfer)
{
	struct r600_context *ctx = NULL;
	struct r600_resource_global* buffer = NULL;

	assert(transfer->resource->target == PIPE_BUFFER);
	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);

	ctx = (struct r600_context *)ctx_;
	buffer = (struct r600_resource_global*)transfer->resource;

	COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");

	ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
	util_slab_free(&ctx->pool_transfers, transfer);
}
Exemplo n.º 18
0
static void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
                                        struct pipe_transfer *transfer)
{
   struct virgl_context *vctx = virgl_context(ctx);
   struct virgl_transfer *trans = virgl_transfer(transfer);
   struct virgl_buffer *vbuf = virgl_buffer(transfer->resource);

   if (trans->base.usage & PIPE_TRANSFER_WRITE) {
      if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
         struct virgl_screen *vs = virgl_screen(ctx->screen);
         vbuf->base.clean = FALSE;
         vctx->num_transfers++;
         vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
                               &transfer->box, trans->base.stride, trans->base.layer_stride, trans->offset, transfer->level);

      }
   }

   util_slab_free(&vctx->texture_transfer_pool, trans);
}
Exemplo n.º 19
0
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
					 const struct pipe_resource *templ)
{
    struct r300_screen *r300screen = r300_screen(screen);
    struct r300_resource *rbuf;
    unsigned alignment = 16;

    rbuf = util_slab_alloc(&r300screen->pool_buffers);

    rbuf->b.b = *templ;
    rbuf->b.vtbl = &r300_buffer_vtbl;
    pipe_reference_init(&rbuf->b.b.reference, 1);
    rbuf->b.b.screen = screen;
    rbuf->b.b.user_ptr = NULL;
    rbuf->domain = RADEON_DOMAIN_GTT;
    rbuf->buf = NULL;
    rbuf->constant_buffer = NULL;

    /* Alloc constant buffers in RAM. */
    if (templ->bind & PIPE_BIND_CONSTANT_BUFFER) {
        rbuf->constant_buffer = MALLOC(templ->width0);
        return &rbuf->b.b;
    }

    rbuf->buf =
        r300screen->rws->buffer_create(r300screen->rws,
                                       rbuf->b.b.width0, alignment,
                                       rbuf->b.b.bind, rbuf->domain);
    if (!rbuf->buf) {
        util_slab_free(&r300screen->pool_buffers, rbuf);
        return NULL;
    }

    rbuf->cs_buf =
        r300screen->rws->buffer_get_cs_handle(rbuf->buf);

    return &rbuf->b.b;
}
Exemplo n.º 20
0
static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
					struct pipe_transfer *transfer)
{
	struct r600_context *rctx = (struct r600_context*)pipe;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct r600_resource *rbuffer = r600_resource(transfer->resource);

	if (rtransfer->staging) {
		struct pipe_resource *dst, *src;
		unsigned soffset, doffset, size;

		dst = transfer->resource;
		src = &rtransfer->staging->b.b;
		size = transfer->box.width;
		doffset = transfer->box.x;
		soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
		/* Copy the staging buffer into the original one. */
		if (rctx->b.rings.dma.cs && !(size % 4) && !(doffset % 4) && !(soffset % 4)) {
			if (rctx->screen->b.chip_class >= EVERGREEN) {
				evergreen_dma_copy(rctx, dst, src, doffset, soffset, size);
			} else {
				r600_dma_copy(rctx, dst, src, doffset, soffset, size);
			}
		} else {
			struct pipe_box box;

			u_box_1d(soffset, size, &box);
			r600_copy_buffer(pipe, dst, doffset, src, &box);
		}
		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
	}

	if (transfer->usage & PIPE_TRANSFER_WRITE) {
		util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
			       transfer->box.x + transfer->box.width);
	}
	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 21
0
static void r600_transfer_destroy(struct pipe_context *ctx,
				  struct pipe_transfer *transfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx;
	util_slab_free(&rctx->pool_transfers, transfer);
}
Exemplo n.º 22
0
static void *
r300_buffer_transfer_map( struct pipe_context *context,
                          struct pipe_resource *resource,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **ptransfer )
{
    struct r300_context *r300 = r300_context(context);
    struct radeon_winsys *rws = r300->screen->rws;
    struct r300_resource *rbuf = r300_resource(resource);
    struct pipe_transfer *transfer;
    uint8_t *map;

    transfer = util_slab_alloc(&r300->pool_transfers);
    transfer->resource = resource;
    transfer->level = level;
    transfer->usage = usage;
    transfer->box = *box;
    transfer->stride = 0;
    transfer->layer_stride = 0;

    if (rbuf->malloced_buffer) {
        *ptransfer = transfer;
        return rbuf->malloced_buffer + box->x;
    }

    if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
        !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
        assert(usage & PIPE_TRANSFER_WRITE);

        /* Check if mapping this buffer would cause waiting for the GPU. */
        if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
            !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
            unsigned i;
            struct pb_buffer *new_buf;

            /* Create a new one in the same pipe_resource. */
            new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
                                               R300_BUFFER_ALIGNMENT, TRUE,
                                               rbuf->domain, 0);
            if (new_buf) {
                /* Discard the old buffer. */
                pb_reference(&rbuf->buf, NULL);
                rbuf->buf = new_buf;
                rbuf->cs_buf = r300->rws->buffer_get_cs_handle(rbuf->buf);

                /* We changed the buffer, now we need to bind it where the old one was bound. */
                for (i = 0; i < r300->nr_vertex_buffers; i++) {
                    if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
                        r300->vertex_arrays_dirty = TRUE;
                        break;
                    }
                }
            }
        }
    }

    /* Buffers are never used for write, therefore mapping for read can be
     * unsynchronized. */
    if (!(usage & PIPE_TRANSFER_WRITE)) {
       usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    }

    map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);

    if (map == NULL) {
        util_slab_free(&r300->pool_transfers, transfer);
        return NULL;
    }

    *ptransfer = transfer;
    return map + box->x;
}
Exemplo n.º 23
0
static void *etna_pipe_transfer_map(struct pipe_context *pipe,
                         struct pipe_resource *resource,
                         unsigned level,
                         unsigned usage,  /* a combination of PIPE_TRANSFER_x */
                         const struct pipe_box *box,
                         struct pipe_transfer **out_transfer)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_transfer *ptrans = util_slab_alloc(&priv->transfer_pool);
    struct etna_resource *resource_priv = etna_resource(resource);
    enum pipe_format format = resource->format;
    if (!ptrans)
        return NULL;
    assert(level <= resource->last_level);

    /* PIPE_TRANSFER_READ always requires a sync. */
    if(usage & PIPE_TRANSFER_READ)
    {
        etna_finish(priv->ctx);
    }
    /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored when mapping in-place,
     * but when not in place we need to fire off the copy operation in transfer_flush_region (currently
     * a no-op) instead of unmap. Need to handle this to support ARB_map_buffer_range extension at least.
     */
    /* XXX we don't take care of current operations on the resource; which can be, at some point in the pipeline
       which is not yet executed:

       - bound as surface
       - bound through vertex buffer
       - bound through index buffer
       - bound in sampler view
       - used in clear_render_target / clear_depth_stencil operation
       - used in blit
       - used in resource_copy_region

       How do other drivers record this information over course of the rendering pipeline?
       Is it necessary at all? Only in case we want to provide a fast path and map the resource directly
       (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
       We also need to know whether the resource is in use to determine if a sync is needed (or just do it
       always, but that comes at the expense of performance).

       A conservative approximation without too much overhead would be to mark all resources that have
       been bound at some point as busy. A drawback would be that accessing resources that have
       been bound but are no longer in use for a while still carry a performance penalty. On the other hand,
       the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or PIPE_TRANSFER_UNSYNCHRONIZED to
       avoid this in the first place...

       A) We use an in-pipe copy engine, and queue the copy operation after unmap so that the copy
          will be performed when all current commands have been executed.
          Using the RS is possible, not sure if always efficient. This can also do any kind of tiling for us.
          Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
       B) We discard the entire resource (or at least, the mipmap level) and allocate new memory for it.
          Only possible when mapping the entire resource or PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
     */

    /* No need to allocate a buffer for copying if the resource is not in use,
     * and no tiling is needed, can just return a direct pointer.
     */
    ptrans->in_place = resource_priv->layout == ETNA_LAYOUT_LINEAR ||
                       (resource_priv->layout == ETNA_LAYOUT_TILED && util_format_is_compressed(resource->format));
    ptrans->base.resource = resource;
    ptrans->base.level = level;
    ptrans->base.usage = usage;
    ptrans->base.box = *box;

    struct etna_resource_level *res_level = &resource_priv->levels[level];
    /* map buffer object */
    void *mapped = etna_bo_map(resource_priv->bo) + res_level->offset;
    if(likely(ptrans->in_place))
    {
        ptrans->base.stride = res_level->stride;
        ptrans->base.layer_stride = res_level->layer_stride;
        ptrans->buffer = mapped + etna_compute_offset(resource->format, box, res_level->stride, res_level->layer_stride);
    } else {
        unsigned divSizeX = util_format_get_blockwidth(format);
        unsigned divSizeY = util_format_get_blockheight(format);
        if(usage & PIPE_TRANSFER_MAP_DIRECTLY)
        {
            /* No in-place transfer possible */
            util_slab_free(&priv->transfer_pool, ptrans);
            return NULL;
        }

        ptrans->base.stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
        ptrans->base.layer_stride = align(box->height, divSizeY) * ptrans->base.stride;
        size_t size = ptrans->base.layer_stride * box->depth;
        ptrans->buffer = MALLOC(size);

        if(usage & PIPE_TRANSFER_READ)
        {
            /* untile or copy resource for reading */
            if(resource_priv->layout == ETNA_LAYOUT_LINEAR || resource_priv->layout == ETNA_LAYOUT_TILED)
            {
                if(resource_priv->layout == ETNA_LAYOUT_TILED && !util_format_is_compressed(resource_priv->base.format))
                {
                    etna_texture_untile(ptrans->buffer, mapped + ptrans->base.box.z * res_level->layer_stride,
                            ptrans->base.box.x, ptrans->base.box.y, res_level->stride,
                            ptrans->base.box.width, ptrans->base.box.height, ptrans->base.stride,
                            util_format_get_blocksize(resource_priv->base.format));
                } else { /* non-tiled or compressed format */
                    util_copy_box(ptrans->buffer,
                      resource_priv->base.format,
                      ptrans->base.stride, ptrans->base.layer_stride,
                      0, 0, 0, /* dst x,y,z */
                      ptrans->base.box.width, ptrans->base.box.height, ptrans->base.box.depth,
                      mapped,
                      res_level->stride, res_level->layer_stride,
                      ptrans->base.box.x, ptrans->base.box.y, ptrans->base.box.z);
                }
            } else /* TODO supertiling */
            {
                BUG("unsupported tiling %i for reading", resource_priv->layout);
            }
        }
    }

    *out_transfer = &ptrans->base;
    return ptrans->buffer;
}