Пример #1
0
struct etna_resource *
etna_texture_handle_incompatible(struct pipe_context *pctx, struct pipe_resource *prsc)
{
   struct etna_resource *res = etna_resource(prsc);
   if (!etna_resource_sampler_compatible(res)) {
      /* The original resource is not compatible with the sampler.
       * Allocate an appropriately tiled texture. */
      if (!res->texture) {
         struct pipe_resource templat = *prsc;

         templat.bind &= ~(PIPE_BIND_DEPTH_STENCIL | PIPE_BIND_RENDER_TARGET |
                           PIPE_BIND_BLENDABLE);
         res->texture =
            etna_resource_alloc(pctx->screen, ETNA_LAYOUT_TILED,
                                ETNA_ADDRESSING_MODE_TILED,
                                DRM_FORMAT_MOD_LINEAR, &templat);
      }

      if (!res->texture) {
         return NULL;
      }
      res = etna_resource(res->texture);
   }
   return res;
}
Пример #2
0
static void
etna_update_sampler_source(struct pipe_sampler_view *view, int num)
{
   struct etna_resource *base = etna_resource(view->texture);
   struct etna_resource *to = base, *from = base;
   struct etna_context *ctx = etna_context(view->context);
   bool enable_sampler_ts = false;

   if (base->external && etna_resource_newer(etna_resource(base->external), base))
      from = etna_resource(base->external);

   if (base->texture)
      to = etna_resource(base->texture);

   if ((to != from) && etna_resource_older(to, from)) {
      etna_copy_resource(view->context, &to->base, &from->base, 0,
                         view->texture->last_level);
      to->seqno = from->seqno;
   } else if ((to == from) && etna_resource_needs_flush(to)) {
      if (ctx->ts_for_sampler_view && etna_can_use_sampler_ts(view, num)) {
         enable_sampler_ts = true;
         /* Do not set flush_seqno because the resolve-to-self was bypassed */
      } else {
         /* Resolve TS if needed */
         etna_copy_resource(view->context, &to->base, &from->base, 0,
                            view->texture->last_level);
         to->flush_seqno = from->seqno;
      }
   }
   if (ctx->ts_for_sampler_view) {
      etna_configure_sampler_ts(ctx->ts_for_sampler_view(view), view, enable_sampler_ts);
   }
}
Пример #3
0
static void
etna_resource_changed(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
   struct etna_resource *res = etna_resource(prsc);

   if (res->external)
      etna_resource(res->external)->seqno++;
   else
      res->seqno++;
}
Пример #4
0
static void
etna_update_render_resource(struct pipe_context *pctx, struct pipe_resource *pres)
{
   struct etna_resource *res = etna_resource(pres);

   if (res->texture && etna_resource_older(res, etna_resource(res->texture))) {
      /* The render buffer is older than the texture buffer. Copy it over. */
      etna_copy_resource(pctx, pres, res->texture, 0, pres->last_level);
      res->seqno = etna_resource(res->texture)->seqno;
   }
}
Пример #5
0
/* XXX this should use a blit or resource copy, when implemented, instead
 * of programming the RS directly.
 */
static void etna_screen_flush_frontbuffer( struct pipe_screen *screen,
                          struct pipe_resource *resource,
                          unsigned level, unsigned layer,
                          void *winsys_drawable_handle )
{
    struct etna_rs_target *drawable = (struct etna_rs_target *)winsys_drawable_handle;
    struct etna_resource *rt_resource = etna_resource(resource);
    struct etna_pipe_context *ectx = rt_resource->last_ctx;
    struct pipe_fence_handle **fence = 0;
    assert(level <= resource->last_level && layer < resource->array_size);
    assert(ectx);
    struct etna_ctx *ctx = ectx->ctx;

    /* release previous fence, make reference to fence if we need one */
    screen->fence_reference(screen, &drawable->fence, NULL);
    if(drawable->want_fence)
        fence = &drawable->fence;

    etna_set_state(ctx, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_COLOR);
    etna_stall(ctx, SYNC_RECIPIENT_RA, SYNC_RECIPIENT_PE);

    /* Set up color TS to source surface before blit, if needed */
    if(rt_resource->levels[level].ts_address != ectx->gpu3d.TS_COLOR_STATUS_BASE)
    {
        if(rt_resource->levels[level].ts_address)
        {
            etna_set_state_multi(ctx, VIVS_TS_MEM_CONFIG, 4, (uint32_t[]) {
              ectx->gpu3d.TS_MEM_CONFIG = VIVS_TS_MEM_CONFIG_COLOR_FAST_CLEAR, /* XXX |= VIVS_TS_MEM_CONFIG_MSAA | translate_msaa_format(cbuf->format) */
              ectx->gpu3d.TS_COLOR_STATUS_BASE = rt_resource->levels[level].ts_address,
              ectx->gpu3d.TS_COLOR_SURFACE_BASE = rt_resource->levels[level].address,
              ectx->gpu3d.TS_COLOR_CLEAR_VALUE = rt_resource->levels[level].clear_value
              });
        } else {
Пример #6
0
static void
etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
      unsigned num_buffers, const struct pipe_vertex_buffer *vb)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_vertexbuf_state *so = &ctx->vertex_buffer;

   util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, num_buffers);
   so->count = util_last_bit(so->enabled_mask);

   for (unsigned idx = start_slot; idx < start_slot + num_buffers; ++idx) {
      struct compiled_set_vertex_buffer *cs = &so->cvb[idx];
      struct pipe_vertex_buffer *vbi = &so->vb[idx];

      assert(!vbi->is_user_buffer); /* XXX support user_buffer using
                                       etna_usermem_map */

      if (vbi->buffer.resource) { /* GPU buffer */
         cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer.resource)->bo;
         cs->FE_VERTEX_STREAM_BASE_ADDR.offset = vbi->buffer_offset;
         cs->FE_VERTEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
         cs->FE_VERTEX_STREAM_CONTROL =
            FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(vbi->stride);
      } else {
         cs->FE_VERTEX_STREAM_BASE_ADDR.bo = NULL;
         cs->FE_VERTEX_STREAM_CONTROL = 0;
      }
   }

   ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS;
}
/* Generate clear command for a surface (non-fast clear case) */
void
etna_rs_gen_clear_surface(struct etna_context *ctx, struct etna_surface *surf,
                          uint32_t clear_value)
{
   struct etna_resource *dst = etna_resource(surf->base.texture);
   uint32_t format = translate_rs_format(surf->base.format);

   if (format == ETNA_NO_MATCH) {
      BUG("etna_rs_gen_clear_surface: Unhandled clear fmt %s", util_format_name(surf->base.format));
      format = RS_FORMAT_A8R8G8B8;
      assert(0);
   }

   /* use tiled clear if width is multiple of 16 */
   bool tiled_clear = (surf->surf.padded_width & ETNA_RS_WIDTH_MASK) == 0 &&
                      (surf->surf.padded_height & ETNA_RS_HEIGHT_MASK) == 0;

   etna_compile_rs_state( ctx, &surf->clear_command, &(struct rs_state) {
      .source_format = format,
      .dest_format = format,
      .dest = dst->bo,
      .dest_offset = surf->surf.offset,
      .dest_stride = surf->surf.stride,
      .dest_padded_height = surf->surf.padded_height,
      .dest_tiling = tiled_clear ? dst->layout : ETNA_LAYOUT_LINEAR,
      .dither = {0xffffffff, 0xffffffff},
      .width = surf->surf.padded_width, /* These must be padded to 16x4 if !LINEAR, otherwise RS will hang */
      .height = surf->surf.padded_height,
      .clear_value = {clear_value},
      .clear_mode = VIVS_RS_CLEAR_CONTROL_MODE_ENABLED1,
      .clear_bits = 0xffff
   });
Пример #8
0
static struct pipe_sampler_view *etna_pipe_create_sampler_view(struct pipe_context *pipe,
                                                 struct pipe_resource *texture,
                                                 const struct pipe_sampler_view *templat)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_sampler_view *sv = CALLOC_STRUCT(etna_sampler_view);
    sv->base = *templat;
    sv->base.context = pipe;
    sv->base.texture = 0;
    pipe_resource_reference(&sv->base.texture, texture);
    sv->base.texture = texture;
    assert(sv->base.texture);

    struct compiled_sampler_view *cs = CALLOC_STRUCT(compiled_sampler_view);
    struct etna_resource *res = etna_resource(sv->base.texture);
    assert(res != NULL);

    cs->TE_SAMPLER_CONFIG0 =
                VIVS_TE_SAMPLER_CONFIG0_TYPE(translate_texture_target(res->base.target, false)) |
                VIVS_TE_SAMPLER_CONFIG0_FORMAT(translate_texture_format(sv->base.format, false));
                /* merged with sampler state */
    cs->TE_SAMPLER_CONFIG0_MASK = 0xffffffff;

    cs->TE_SAMPLER_CONFIG1 =
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_R(templat->swizzle_r) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_G(templat->swizzle_g) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_B(templat->swizzle_b) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_A(templat->swizzle_a) |
                VIVS_TE_SAMPLER_CONFIG1_HALIGN(res->halign);
    cs->TE_SAMPLER_SIZE =
            VIVS_TE_SAMPLER_SIZE_WIDTH(res->base.width0)|
            VIVS_TE_SAMPLER_SIZE_HEIGHT(res->base.height0);
    cs->TE_SAMPLER_LOG_SIZE =
            VIVS_TE_SAMPLER_LOG_SIZE_WIDTH(etna_log2_fixp55(res->base.width0)) |
            VIVS_TE_SAMPLER_LOG_SIZE_HEIGHT(etna_log2_fixp55(res->base.height0));

    /* Set up levels-of-detail */
    for(int lod=0; lod<=res->base.last_level; ++lod)
    {
        cs->TE_SAMPLER_LOD_ADDR[lod] = etna_bo_gpu_address(res->bo) + res->levels[lod].offset;
    }
    cs->min_lod = sv->base.u.tex.first_level << 5;
    cs->max_lod = MIN2(sv->base.u.tex.last_level, res->base.last_level) << 5;

    /* Workaround for npot textures -- it appears that only CLAMP_TO_EDGE is supported when the
     * appropriate capability is not set. */
    if(!priv->specs.npot_tex_any_wrap &&
        (!util_is_power_of_two(res->base.width0) || !util_is_power_of_two(res->base.height0)))
    {
        cs->TE_SAMPLER_CONFIG0_MASK = ~(VIVS_TE_SAMPLER_CONFIG0_UWRAP__MASK |
                                        VIVS_TE_SAMPLER_CONFIG0_VWRAP__MASK);
        cs->TE_SAMPLER_CONFIG0 |= VIVS_TE_SAMPLER_CONFIG0_UWRAP(TEXTURE_WRAPMODE_CLAMP_TO_EDGE) |
                                  VIVS_TE_SAMPLER_CONFIG0_VWRAP(TEXTURE_WRAPMODE_CLAMP_TO_EDGE);
    }

    sv->internal = cs;
    pipe_reference_init(&sv->base.reference, 1);
    return &sv->base;
}
Пример #9
0
/* Associate an resource with this context when it is bound in any way
 * (vertex buffer, index buffer, texture, surface, blit).
 */
void etna_resource_touch(struct pipe_context *pipe, struct pipe_resource *resource_)
{
    struct etna_pipe_context *ectx = etna_pipe_context(pipe);
    struct etna_resource *resource = etna_resource(resource_);
    if(resource == NULL)
        return;
    resource->last_ctx = ectx;
}
Пример #10
0
static struct pipe_surface *etna_pipe_create_surface(struct pipe_context *pipe,
                                      struct pipe_resource *resource_,
                                      const struct pipe_surface *templat)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_surface *surf = CALLOC_STRUCT(etna_surface);
    struct etna_resource *resource = etna_resource(resource_);
    assert(templat->u.tex.first_layer == templat->u.tex.last_layer);
    unsigned layer = templat->u.tex.first_layer;
    unsigned level = templat->u.tex.level;
    assert(layer < resource->base.array_size);

    surf->base.context = pipe;

    pipe_reference_init(&surf->base.reference, 1);
    pipe_resource_reference(&surf->base.texture, &resource->base);

    /* Allocate a TS for the resource if there isn't one yet,
     * and it is allowed by the hw (width is a multiple of 16).
     */
    /* XXX for now, don't do TS for render textures as this path
     * is not stable.
     */
    if(!resource->ts & !(resource->base.bind & PIPE_BIND_SAMPLER_VIEW) &&
            (resource->levels[level].padded_width & ETNA_RS_WIDTH_MASK) == 0 &&
            (resource->levels[level].padded_height & ETNA_RS_HEIGHT_MASK) == 0)
        etna_screen_resource_alloc_ts(pipe->screen, resource);

    surf->base.texture = &resource->base;
    surf->base.format = resource->base.format;
    surf->base.width = resource->levels[level].width;
    surf->base.height = resource->levels[level].height;
    surf->base.writable = templat->writable; // what is this for anyway
    surf->base.u = templat->u;

    surf->layout = resource->layout;
    surf->surf = resource->levels[level];
    surf->surf.address += layer * surf->surf.layer_stride;
    surf->surf.logical += layer * surf->surf.layer_stride;
    surf->clear_value = 0; /* last clear value */

    if(surf->surf.ts_address)
    {
        /* This abuses the RS as a plain buffer memset().
           Currently uses a fixed row size of 64 bytes. Some benchmarking with different sizes may be in order.
         */
        etna_compile_rs_state(&surf->clear_command, &(struct rs_state){
                .source_format = RS_FORMAT_X8R8G8B8,
                .dest_format = RS_FORMAT_X8R8G8B8,
                .dest_addr = surf->surf.ts_address,
                .dest_stride = 0x40,
                .dither = {0xffffffff, 0xffffffff},
                .width = 16,
                .height = surf->surf.ts_size/0x40,
                .clear_value = {priv->specs.ts_clear_value},  /* XXX should be 0x11111111 for non-2BITPERTILE GPUs */
                .clear_mode = VIVS_RS_CLEAR_CONTROL_MODE_ENABLED1,
                .clear_bits = 0xffff
            });
Пример #11
0
static void etna_pipe_transfer_unmap(struct pipe_context *pipe,
                      struct pipe_transfer *transfer_)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_transfer *ptrans = etna_transfer(transfer_);

    /* XXX
     * When writing to a resource that is already in use, replace the resource with a completely new buffer
     * and free the old one using a fenced free.
     * The most tricky case to implement will be: tiled or supertiled surface, partial write, target not aligned to 4/64
     */
    struct etna_resource *resource = etna_resource(ptrans->base.resource);
    assert(ptrans->base.level <= resource->base.last_level);

    if(ptrans->base.usage & PIPE_TRANSFER_WRITE)
    {
        /* write back */
        if(unlikely(!ptrans->in_place))
        {
            /* map buffer object */
            struct etna_resource_level *res_level = &resource->levels[ptrans->base.level];
            void *mapped = etna_bo_map(resource->bo) + res_level->offset;
            if(resource->layout == ETNA_LAYOUT_LINEAR || resource->layout == ETNA_LAYOUT_TILED)
            {
                if(resource->layout == ETNA_LAYOUT_TILED && !util_format_is_compressed(resource->base.format))
                {
                    etna_texture_tile(mapped + ptrans->base.box.z * res_level->layer_stride, ptrans->buffer,
                            ptrans->base.box.x, ptrans->base.box.y, res_level->stride,
                            ptrans->base.box.width, ptrans->base.box.height, ptrans->base.stride,
                            util_format_get_blocksize(resource->base.format));
                } else { /* non-tiled or compressed format */
                    util_copy_box(mapped,
                      resource->base.format,
                      res_level->stride, res_level->layer_stride,
                      ptrans->base.box.x, ptrans->base.box.y, ptrans->base.box.z,
                      ptrans->base.box.width, ptrans->base.box.height, ptrans->base.box.depth,
                      ptrans->buffer,
                      ptrans->base.stride, ptrans->base.layer_stride,
                      0, 0, 0 /* src x,y,z */);
                }
            } else
            {
                BUG("unsupported tiling %i", resource->layout);
            }
            FREE(ptrans->buffer);
        }
        if(resource->base.bind & PIPE_BIND_SAMPLER_VIEW)
        {
            /* XXX do we need to flush the CPU cache too or start a write barrier
             * to make sure the GPU sees it? */
            priv->dirty_bits |= ETNA_STATE_TEXTURE_CACHES;
        }
    }

    util_slab_free(&priv->transfer_pool, ptrans);
}
Пример #12
0
static boolean
etna_resource_get_handle(struct pipe_screen *pscreen,
                         struct pipe_context *pctx,
                         struct pipe_resource *prsc,
                         struct winsys_handle *handle, unsigned usage)
{
   struct etna_resource *rsc = etna_resource(prsc);

   if (renderonly_get_handle(rsc->scanout, handle))
      return TRUE;

   return etna_screen_bo_get_handle(pscreen, rsc->bo, rsc->levels[0].stride,
                                    handle);
}
static void
etna_update_sampler_source(struct pipe_sampler_view *view)
{
   struct etna_resource *base = etna_resource(view->texture);
   struct etna_resource *to = base, *from = base;

   if (base->external && etna_resource_newer(etna_resource(base->external), base))
      from = etna_resource(base->external);

   if (base->texture)
      to = etna_resource(base->texture);

   if ((to != from) && etna_resource_older(to, from)) {
      etna_copy_resource(view->context, &to->base, &from->base, 0,
                         view->texture->last_level);
      to->seqno = from->seqno;
   } else if ((to == from) && etna_resource_needs_flush(to)) {
      /* Resolve TS if needed, remove when adding sampler TS */
      etna_copy_resource(view->context, &to->base, &from->base, 0,
                         view->texture->last_level);
      to->flush_seqno = from->seqno;
   }
}
Пример #14
0
static boolean
etna_resource_get_handle(struct pipe_screen *pscreen,
                         struct pipe_context *pctx,
                         struct pipe_resource *prsc,
                         struct winsys_handle *handle, unsigned usage)
{
   struct etna_resource *rsc = etna_resource(prsc);
   /* Scanout is always attached to the base resource */
   struct renderonly_scanout *scanout = rsc->scanout;

   /*
    * External resources are preferred, so a import->export chain of
    * render/sampler incompatible buffers yield the same handle.
    */
   if (rsc->external)
      rsc = etna_resource(rsc->external);

   handle->stride = rsc->levels[0].stride;
   handle->modifier = layout_to_modifier(rsc->layout);

   if (handle->type == WINSYS_HANDLE_TYPE_SHARED) {
      return etna_bo_get_name(rsc->bo, &handle->handle) == 0;
   } else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
      if (renderonly_get_handle(scanout, handle)) {
         return TRUE;
      } else {
         handle->handle = etna_bo_handle(rsc->bo);
         return TRUE;
      }
   } else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
      handle->handle = etna_bo_dmabuf(rsc->bo);
      return TRUE;
   } else {
      return FALSE;
   }
}
Пример #15
0
static struct pipe_sampler_view *etna_pipe_create_sampler_view(struct pipe_context *pipe,
                                                 struct pipe_resource *texture,
                                                 const struct pipe_sampler_view *templat)
{
    //struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_sampler_view *sv = CALLOC_STRUCT(etna_sampler_view);
    sv->base = *templat;
    sv->base.context = pipe;
    sv->base.texture = 0;
    pipe_resource_reference(&sv->base.texture, texture);
    sv->base.texture = texture;
    assert(sv->base.texture);

    struct compiled_sampler_view *cs = CALLOC_STRUCT(compiled_sampler_view);
    struct etna_resource *res = etna_resource(sv->base.texture);
    assert(res != NULL);

    cs->TE_SAMPLER_CONFIG0 =
                VIVS_TE_SAMPLER_CONFIG0_TYPE(translate_texture_target(res->base.target, false)) |
                VIVS_TE_SAMPLER_CONFIG0_FORMAT(translate_texture_format(sv->base.format, false));
                /* merged with sampler state */
    cs->TE_SAMPLER_CONFIG1 =
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_R(templat->swizzle_r) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_G(templat->swizzle_g) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_B(templat->swizzle_b) |
                VIVS_TE_SAMPLER_CONFIG1_SWIZZLE_A(templat->swizzle_a) |
                VIVS_TE_SAMPLER_CONFIG1_HALIGN(res->halign);
    cs->TE_SAMPLER_SIZE =
            VIVS_TE_SAMPLER_SIZE_WIDTH(res->base.width0)|
            VIVS_TE_SAMPLER_SIZE_HEIGHT(res->base.height0);
    cs->TE_SAMPLER_LOG_SIZE =
            VIVS_TE_SAMPLER_LOG_SIZE_WIDTH(log2_fixp55(res->base.width0)) |
            VIVS_TE_SAMPLER_LOG_SIZE_HEIGHT(log2_fixp55(res->base.height0));
    /* XXX in principle we only have to define lods sv->first_level .. sv->last_level */
    for(int lod=0; lod<=res->base.last_level; ++lod)
    {
        cs->TE_SAMPLER_LOD_ADDR[lod] = res->levels[lod].address;
    }
    cs->min_lod = sv->base.u.tex.first_level << 5;
    cs->max_lod = MIN2(sv->base.u.tex.last_level, res->base.last_level) << 5;

    sv->internal = cs;
    pipe_reference_init(&sv->base.reference, 1);
    return &sv->base;
}
Пример #16
0
void
etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
                   enum etna_resource_status status)
{
   struct etna_resource *rsc;

   if (!prsc)
      return;

   rsc = etna_resource(prsc);
   rsc->status |= status;

   /* TODO resources can actually be shared across contexts,
    * so I'm not sure a single list-head will do the trick? */
   debug_assert((rsc->pending_ctx == ctx) || !rsc->pending_ctx);
   list_delinit(&rsc->list);
   list_addtail(&rsc->list, &ctx->used_resources);
   rsc->pending_ctx = ctx;
}
Пример #17
0
static void
etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
   struct etna_resource *rsc = etna_resource(prsc);

   if (rsc->bo)
      etna_bo_del(rsc->bo);

   if (rsc->ts_bo)
      etna_bo_del(rsc->ts_bo);

   if (rsc->scanout)
      renderonly_scanout_destroy(rsc->scanout);

   list_delinit(&rsc->list);

   pipe_resource_reference(&rsc->texture, NULL);

   FREE(rsc);
}
Пример #18
0
static void etna_screen_resource_destroy(struct pipe_screen *screen,
                        struct pipe_resource *resource_)
{
    struct etna_screen *priv = etna_screen(screen);
    struct etna_resource *resource = etna_resource(resource_);
    if(resource == NULL)
        return;
    if(resource->last_ctx != NULL)
    {
        /* XXX This could fail when multiple contexts share this resource,
         * (the last one to bind it will "own" it) or fail miserably if
         * the context was since destroyed.
         */
        struct etna_pipe_context *ectx = resource->last_ctx;
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource queued destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
        etna_vidmem_queue_free(ectx->ctx->queue, resource->surface);
        etna_vidmem_queue_free(ectx->ctx->queue, resource->ts);
    } else {
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
        etna_vidmem_free(priv->dev, resource->surface);
        etna_vidmem_free(priv->dev, resource->ts);
    }
    FREE(resource);
}
Пример #19
0
/* Return true if the GPU can use sampler TS with this sampler view.
 * Sampler TS is an optimization used when rendering to textures, where
 * a resolve-in-place can be avoided when rendering has left a (valid) TS.
 */
static bool
etna_can_use_sampler_ts(struct pipe_sampler_view *view, int num)
{
    /* Can use sampler TS when:
     * - the hardware supports sampler TS.
     * - the sampler view will be bound to sampler <VIVS_TS_SAMPLER__LEN.
     *   HALTI5 adds a mapping from sampler to sampler TS unit, but this is AFAIK
     *   absent on earlier models.
     * - it is a texture, not a buffer.
     * - the sampler view has a supported format for sampler TS.
     * - the sampler will have one LOD, and it happens to be level 0.
     *   (it is not sure if the hw supports it for other levels, but available
     *   state strongly suggests only one at a time).
     * - the resource TS is valid for level 0.
     */
   struct etna_resource *rsc = etna_resource(view->texture);
   struct etna_screen *screen = etna_screen(rsc->base.screen);
   return VIV_FEATURE(screen, chipMinorFeatures2, TEXTURE_TILED_READ) &&
      num < VIVS_TS_SAMPLER__LEN &&
      rsc->base.target != PIPE_BUFFER &&
      translate_ts_sampler_format(rsc->base.format) != ETNA_NO_MATCH &&
      view->u.tex.first_level == 0 && MIN2(view->u.tex.last_level, rsc->base.last_level) == 0 &&
      rsc->levels[0].ts_valid;
}
Пример #20
0
static void
etna_configure_sampler_ts(struct etna_sampler_ts *sts, struct pipe_sampler_view *pview, bool enable)
{
   assert(sts);
   sts->enable = enable;
   if (enable) {
      struct etna_resource *rsc = etna_resource(pview->texture);
      struct etna_resource_level *lev = &rsc->levels[0];
      assert(rsc->ts_bo && lev->ts_valid);

      sts->TS_SAMPLER_CONFIG =
         VIVS_TS_SAMPLER_CONFIG_ENABLE(1) |
         VIVS_TS_SAMPLER_CONFIG_FORMAT(translate_ts_sampler_format(rsc->base.format));
      sts->TS_SAMPLER_CLEAR_VALUE = lev->clear_value;
      sts->TS_SAMPLER_CLEAR_VALUE2 = lev->clear_value; /* To handle 64-bit formats this needs a different value */
      sts->TS_SAMPLER_STATUS_BASE.bo = rsc->ts_bo;
      sts->TS_SAMPLER_STATUS_BASE.offset = lev->ts_offset;
      sts->TS_SAMPLER_STATUS_BASE.flags = ETNA_RELOC_READ;
   } else {
      sts->TS_SAMPLER_CONFIG = 0;
      sts->TS_SAMPLER_STATUS_BASE.bo = NULL;
   }
   /* n.b.: relies on caller to mark ETNA_DIRTY_SAMPLER_VIEWS */
}
Пример #21
0
static void etna_screen_resource_destroy(struct pipe_screen *screen,
                        struct pipe_resource *resource_)
{
    struct etna_screen *priv = etna_screen(screen);
    struct etna_resource *resource = etna_resource(resource_);
    if(resource == NULL)
        return;
    struct etna_queue *queue = NULL;
    if(resource->last_ctx != NULL)
    {
        /* XXX This could fail when multiple contexts share this resource,
         * (the last one to bind it will "own" it) or fail miserably if
         * the context was since destroyed.
         * Integrate this into etna_bo_del...
         */
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource queued destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
        queue = resource->last_ctx->ctx->queue;
    } else {
        DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0);
    }
    etna_bo_del(priv->dev, resource->bo, queue);
    etna_bo_del(priv->dev, resource->ts_bo, queue);
    FREE(resource);
}
static void
etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_transfer *trans = etna_transfer(ptrans);
   struct etna_resource *rsc = etna_resource(ptrans->resource);

   /* XXX
    * When writing to a resource that is already in use, replace the resource
    * with a completely new buffer
    * and free the old one using a fenced free.
    * The most tricky case to implement will be: tiled or supertiled surface,
    * partial write, target not aligned to 4/64. */
   assert(ptrans->level <= rsc->base.last_level);

   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
      rsc = etna_resource(rsc->texture); /* switch to using the texture resource */

   /*
    * Temporary resources are always pulled into the CPU domain, must push them
    * back into GPU domain before the RS execs the blit to the base resource.
    */
   if (trans->rsc)
      etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);

   if (ptrans->usage & PIPE_TRANSFER_WRITE) {
      if (trans->rsc) {
         /* We have a temporary resource due to either tile status or
          * tiling format. Write back the updated buffer contents.
          * FIXME: we need to invalidate the tile status. */
         etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
      } else if (trans->staging) {
         /* map buffer object */
         struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
         void *mapped = etna_bo_map(rsc->bo) + res_level->offset;

         if (rsc->layout == ETNA_LAYOUT_TILED) {
            etna_texture_tile(
               mapped + ptrans->box.z * res_level->layer_stride,
               trans->staging, ptrans->box.x, ptrans->box.y,
               res_level->stride, ptrans->box.width, ptrans->box.height,
               ptrans->stride, util_format_get_blocksize(rsc->base.format));
         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
            util_copy_box(mapped, rsc->base.format, res_level->stride,
                          res_level->layer_stride, ptrans->box.x,
                          ptrans->box.y, ptrans->box.z, ptrans->box.width,
                          ptrans->box.height, ptrans->box.depth,
                          trans->staging, ptrans->stride,
                          ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
         } else {
            BUG("unsupported tiling %i", rsc->layout);
         }

         FREE(trans->staging);
      }

      rsc->seqno++;

      if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
         ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
      }
   }

   /*
    * Transfers without a temporary are only pulled into the CPU domain if they
    * are not mapped unsynchronized. If they are, must push them back into GPU
    * domain after CPU access is finished.
    */
   if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
      etna_bo_cpu_fini(rsc->bo);

   pipe_resource_reference(&trans->rsc, NULL);
   pipe_resource_reference(&ptrans->resource, NULL);
   slab_free(&ctx->transfer_pool, trans);
}
static void *
etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
                  unsigned level,
                  unsigned usage,
                  const struct pipe_box *box,
                  struct pipe_transfer **out_transfer)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_resource *rsc = etna_resource(prsc);
   struct etna_transfer *trans;
   struct pipe_transfer *ptrans;
   enum pipe_format format = prsc->format;

   trans = slab_alloc(&ctx->transfer_pool);
   if (!trans)
      return NULL;

   /* slab_alloc() doesn't zero */
   memset(trans, 0, sizeof(*trans));

   ptrans = &trans->base;
   pipe_resource_reference(&ptrans->resource, prsc);
   ptrans->level = level;
   ptrans->usage = usage;
   ptrans->box = *box;

   assert(level <= prsc->last_level);

   /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
    * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
    * check needs to be extended to coherent mappings and shared resources.
    */
   if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
       !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
       prsc->last_level == 0 &&
       prsc->width0 == box->width &&
       prsc->height0 == box->height &&
       prsc->depth0 == box->depth &&
       prsc->array_size == 1) {
      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
   }

   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
      /* We have a texture resource which is the same age or newer than the
       * render resource. Use the texture resource, which avoids bouncing
       * pixels between the two resources, and we can de-tile it in s/w. */
      rsc = etna_resource(rsc->texture);
   } else if (rsc->ts_bo ||
              (rsc->layout != ETNA_LAYOUT_LINEAR &&
               util_format_get_blocksize(format) > 1 &&
               /* HALIGN 4 resources are incompatible with the resolve engine,
                * so fall back to using software to detile this resource. */
               rsc->halign != TEXTURE_HALIGN_FOUR)) {
      /* If the surface has tile status, we need to resolve it first.
       * The strategy we implement here is to use the RS to copy the
       * depth buffer, filling in the "holes" where the tile status
       * indicates that it's clear. We also do this for tiled
       * resources, but only if the RS can blit them. */
      if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
         slab_free(&ctx->transfer_pool, trans);
         BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
         return NULL;
      }

      if (prsc->depth0 > 1) {
         slab_free(&ctx->transfer_pool, trans);
         BUG("resource has depth >1 with tile status");
         return NULL;
      }

      struct pipe_resource templ = *prsc;
      templ.nr_samples = 0;
      templ.bind = PIPE_BIND_RENDER_TARGET;

      trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
                                       DRM_FORMAT_MOD_LINEAR, &templ);
      if (!trans->rsc) {
         slab_free(&ctx->transfer_pool, trans);
         return NULL;
      }

      /* Need to align the transfer region to satisfy RS restrictions, as we
       * really want to hit the RS blit path here.
       */
      unsigned w_align, h_align;

      if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
         w_align = h_align = 64;
      } else {
         w_align = ETNA_RS_WIDTH_MASK + 1;
         h_align = ETNA_RS_HEIGHT_MASK + 1;
      }
      h_align *= ctx->screen->specs.pixel_pipes;

      ptrans->box.width += ptrans->box.x & (w_align - 1);
      ptrans->box.x = ptrans->box.x & ~(w_align - 1);
      ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
      ptrans->box.height += ptrans->box.y & (h_align - 1);
      ptrans->box.y = ptrans->box.y & ~(h_align - 1);
      ptrans->box.height = align(ptrans->box.height,
                                 (ETNA_RS_HEIGHT_MASK + 1) *
                                  ctx->screen->specs.pixel_pipes);

      if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
         etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);

      /* Switch to using the temporary resource instead */
      rsc = etna_resource(trans->rsc);
   }

   struct etna_resource_level *res_level = &rsc->levels[level];

   /*
    * Always flush if we have the temporary resource and have a copy to this
    * outstanding. Otherwise infer flush requirement from resource access and
    * current GPU usage (reads must wait for GPU writes, writes must have
    * exclusive access to the buffer).
    */
   if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
       (!trans->rsc &&
        (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
        ((usage & PIPE_TRANSFER_WRITE) && rsc->status))))
      pctx->flush(pctx, NULL, 0);

   /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
    * when mapping in-place,
    * but when not in place we need to fire off the copy operation in
    * transfer_flush_region (currently
    * a no-op) instead of unmap. Need to handle this to support
    * ARB_map_buffer_range extension at least.
    */
   /* XXX we don't take care of current operations on the resource; which can
      be, at some point in the pipeline
      which is not yet executed:

      - bound as surface
      - bound through vertex buffer
      - bound through index buffer
      - bound in sampler view
      - used in clear_render_target / clear_depth_stencil operation
      - used in blit
      - used in resource_copy_region

      How do other drivers record this information over course of the rendering
      pipeline?
      Is it necessary at all? Only in case we want to provide a fast path and
      map the resource directly
      (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
      We also need to know whether the resource is in use to determine if a sync
      is needed (or just do it
      always, but that comes at the expense of performance).

      A conservative approximation without too much overhead would be to mark
      all resources that have
      been bound at some point as busy. A drawback would be that accessing
      resources that have
      been bound but are no longer in use for a while still carry a performance
      penalty. On the other hand,
      the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
      PIPE_TRANSFER_UNSYNCHRONIZED to
      avoid this in the first place...

      A) We use an in-pipe copy engine, and queue the copy operation after unmap
      so that the copy
         will be performed when all current commands have been executed.
         Using the RS is possible, not sure if always efficient. This can also
      do any kind of tiling for us.
         Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
      B) We discard the entire resource (or at least, the mipmap level) and
      allocate new memory for it.
         Only possible when mapping the entire resource or
      PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
    */

   /*
    * Pull resources into the CPU domain. Only skipped for unsynchronized
    * transfers without a temporary resource.
    */
   if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
      uint32_t prep_flags = 0;

      if (usage & PIPE_TRANSFER_READ)
         prep_flags |= DRM_ETNA_PREP_READ;
      if (usage & PIPE_TRANSFER_WRITE)
         prep_flags |= DRM_ETNA_PREP_WRITE;

      if (etna_bo_cpu_prep(rsc->bo, prep_flags))
         goto fail_prep;
   }

   /* map buffer object */
   void *mapped = etna_bo_map(rsc->bo);
   if (!mapped)
      goto fail;

   *out_transfer = ptrans;

   if (rsc->layout == ETNA_LAYOUT_LINEAR) {
      ptrans->stride = res_level->stride;
      ptrans->layer_stride = res_level->layer_stride;

      return mapped + res_level->offset +
             etna_compute_offset(prsc->format, box, res_level->stride,
                                 res_level->layer_stride);
   } else {
      unsigned divSizeX = util_format_get_blockwidth(format);
      unsigned divSizeY = util_format_get_blockheight(format);

      /* No direct mappings of tiled, since we need to manually
       * tile/untile.
       */
      if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
         goto fail;

      mapped += res_level->offset;
      ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
      ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
      size_t size = ptrans->layer_stride * box->depth;

      trans->staging = MALLOC(size);
      if (!trans->staging)
         goto fail;

      if (usage & PIPE_TRANSFER_READ) {
         if (rsc->layout == ETNA_LAYOUT_TILED) {
            etna_texture_untile(trans->staging,
                                mapped + ptrans->box.z * res_level->layer_stride,
                                ptrans->box.x, ptrans->box.y, res_level->stride,
                                ptrans->box.width, ptrans->box.height, ptrans->stride,
                                util_format_get_blocksize(rsc->base.format));
         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
            util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
                          ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
                          ptrans->box.width, ptrans->box.height,
                          ptrans->box.depth, mapped, res_level->stride,
                          res_level->layer_stride, ptrans->box.x,
                          ptrans->box.y, ptrans->box.z);
         } else {
            /* TODO supertiling */
            BUG("unsupported tiling %i for reading", rsc->layout);
         }
      }

      return trans->staging;
   }

fail:
   etna_bo_cpu_fini(rsc->bo);
fail_prep:
   etna_transfer_unmap(pctx, ptrans);
   return NULL;
}
Пример #24
0
static void
etna_set_framebuffer_state(struct pipe_context *pctx,
      const struct pipe_framebuffer_state *sv)
{
   struct etna_context *ctx = etna_context(pctx);
   struct compiled_framebuffer_state *cs = &ctx->framebuffer;
   int nr_samples_color = -1;
   int nr_samples_depth = -1;

   /* Set up TS as well. Warning: this state is used by both the RS and PE */
   uint32_t ts_mem_config = 0;

   if (sv->nr_cbufs > 0) { /* at least one color buffer? */
      struct etna_surface *cbuf = etna_surface(sv->cbufs[0]);
      struct etna_resource *res = etna_resource(cbuf->base.texture);
      bool color_supertiled = (res->layout & ETNA_LAYOUT_BIT_SUPER) != 0;

      assert(res->layout & ETNA_LAYOUT_BIT_TILE); /* Cannot render to linear surfaces */
      etna_update_render_resource(pctx, cbuf->base.texture);

      pipe_surface_reference(&cs->cbuf, &cbuf->base);
      cs->PE_COLOR_FORMAT =
         VIVS_PE_COLOR_FORMAT_FORMAT(translate_rs_format(cbuf->base.format)) |
         VIVS_PE_COLOR_FORMAT_COMPONENTS__MASK |
         VIVS_PE_COLOR_FORMAT_OVERWRITE |
         COND(color_supertiled, VIVS_PE_COLOR_FORMAT_SUPER_TILED) |
         COND(color_supertiled && ctx->specs.halti >= 5, VIVS_PE_COLOR_FORMAT_SUPER_TILED_NEW);
      /* VIVS_PE_COLOR_FORMAT_COMPONENTS() and
       * VIVS_PE_COLOR_FORMAT_OVERWRITE comes from blend_state
       * but only if we set the bits above. */
      /* merged with depth_stencil_alpha */
      if ((cbuf->surf.offset & 63) ||
          (((cbuf->surf.stride * 4) & 63) && cbuf->surf.height > 4)) {
         /* XXX Must make temporary surface here.
          * Need the same mechanism on gc2000 when we want to do mipmap
          * generation by
          * rendering to levels > 1 due to multitiled / tiled conversion. */
         BUG("Alignment error, trying to render to offset %08x with tile "
             "stride %i",
             cbuf->surf.offset, cbuf->surf.stride * 4);
      }

      if (ctx->specs.pixel_pipes == 1) {
         cs->PE_COLOR_ADDR = cbuf->reloc[0];
         cs->PE_COLOR_ADDR.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
      } else {
         /* Rendered textures must always be multi-tiled, or single-buffer mode must be supported */
         assert((res->layout & ETNA_LAYOUT_BIT_MULTI) || ctx->specs.single_buffer);
         for (int i = 0; i < ctx->specs.pixel_pipes; i++) {
            cs->PE_PIPE_COLOR_ADDR[i] = cbuf->reloc[i];
            cs->PE_PIPE_COLOR_ADDR[i].flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
         }
      }
      cs->PE_COLOR_STRIDE = cbuf->surf.stride;

      if (cbuf->surf.ts_size) {
         cs->TS_COLOR_CLEAR_VALUE = cbuf->level->clear_value;

         cs->TS_COLOR_STATUS_BASE = cbuf->ts_reloc;
         cs->TS_COLOR_STATUS_BASE.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;

         cs->TS_COLOR_SURFACE_BASE = cbuf->reloc[0];
         cs->TS_COLOR_SURFACE_BASE.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
      }

      /* MSAA */
      if (cbuf->base.texture->nr_samples > 1)
         ts_mem_config |=
            VIVS_TS_MEM_CONFIG_COLOR_COMPRESSION | translate_msaa_format(cbuf->base.format);

      nr_samples_color = cbuf->base.texture->nr_samples;
   } else {
      pipe_surface_reference(&cs->cbuf, NULL);
      /* Clearing VIVS_PE_COLOR_FORMAT_COMPONENTS__MASK and
       * VIVS_PE_COLOR_FORMAT_OVERWRITE prevents us from overwriting the
       * color target */
      cs->PE_COLOR_FORMAT = VIVS_PE_COLOR_FORMAT_OVERWRITE;
      cs->PE_COLOR_STRIDE = 0;
      cs->TS_COLOR_STATUS_BASE.bo = NULL;
      cs->TS_COLOR_SURFACE_BASE.bo = NULL;

      for (int i = 0; i < ETNA_MAX_PIXELPIPES; i++)
         cs->PE_PIPE_COLOR_ADDR[i].bo = NULL;
   }

   if (sv->zsbuf != NULL) {
      struct etna_surface *zsbuf = etna_surface(sv->zsbuf);
      struct etna_resource *res = etna_resource(zsbuf->base.texture);

      etna_update_render_resource(pctx, zsbuf->base.texture);

      pipe_surface_reference(&cs->zsbuf, &zsbuf->base);
      assert(res->layout &ETNA_LAYOUT_BIT_TILE); /* Cannot render to linear surfaces */

      uint32_t depth_format = translate_depth_format(zsbuf->base.format);
      unsigned depth_bits =
         depth_format == VIVS_PE_DEPTH_CONFIG_DEPTH_FORMAT_D16 ? 16 : 24;
      bool depth_supertiled = (res->layout & ETNA_LAYOUT_BIT_SUPER) != 0;

      cs->PE_DEPTH_CONFIG =
         depth_format |
         COND(depth_supertiled, VIVS_PE_DEPTH_CONFIG_SUPER_TILED) |
         VIVS_PE_DEPTH_CONFIG_DEPTH_MODE_Z |
         COND(ctx->specs.halti >= 5, VIVS_PE_DEPTH_CONFIG_DISABLE_ZS) /* Needs to be enabled on GC7000, otherwise depth writes hang w/ TS - apparently it does something else now */
         ;
      /* VIVS_PE_DEPTH_CONFIG_ONLY_DEPTH */
      /* merged with depth_stencil_alpha */

      if (ctx->specs.pixel_pipes == 1) {
         cs->PE_DEPTH_ADDR = zsbuf->reloc[0];
         cs->PE_DEPTH_ADDR.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
      } else {
         for (int i = 0; i < ctx->specs.pixel_pipes; i++) {
            cs->PE_PIPE_DEPTH_ADDR[i] = zsbuf->reloc[i];
            cs->PE_PIPE_DEPTH_ADDR[i].flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
         }
      }

      cs->PE_DEPTH_STRIDE = zsbuf->surf.stride;
      cs->PE_HDEPTH_CONTROL = VIVS_PE_HDEPTH_CONTROL_FORMAT_DISABLED;
      cs->PE_DEPTH_NORMALIZE = fui(exp2f(depth_bits) - 1.0f);

      if (zsbuf->surf.ts_size) {
         cs->TS_DEPTH_CLEAR_VALUE = zsbuf->level->clear_value;

         cs->TS_DEPTH_STATUS_BASE = zsbuf->ts_reloc;
         cs->TS_DEPTH_STATUS_BASE.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;

         cs->TS_DEPTH_SURFACE_BASE = zsbuf->reloc[0];
         cs->TS_DEPTH_SURFACE_BASE.flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE;
      }

      ts_mem_config |= COND(depth_bits == 16, VIVS_TS_MEM_CONFIG_DEPTH_16BPP);

      /* MSAA */
      if (zsbuf->base.texture->nr_samples > 1)
         /* XXX VIVS_TS_MEM_CONFIG_DEPTH_COMPRESSION;
          * Disable without MSAA for now, as it causes corruption in glquake. */
         ts_mem_config |= VIVS_TS_MEM_CONFIG_DEPTH_COMPRESSION;

      nr_samples_depth = zsbuf->base.texture->nr_samples;
   } else {
      pipe_surface_reference(&cs->zsbuf, NULL);
      cs->PE_DEPTH_CONFIG = VIVS_PE_DEPTH_CONFIG_DEPTH_MODE_NONE;
      cs->PE_DEPTH_ADDR.bo = NULL;
      cs->PE_DEPTH_STRIDE = 0;
      cs->TS_DEPTH_STATUS_BASE.bo = NULL;
      cs->TS_DEPTH_SURFACE_BASE.bo = NULL;

      for (int i = 0; i < ETNA_MAX_PIXELPIPES; i++)
         cs->PE_PIPE_DEPTH_ADDR[i].bo = NULL;
   }

   /* MSAA setup */
   if (nr_samples_depth != -1 && nr_samples_color != -1 &&
       nr_samples_depth != nr_samples_color) {
      BUG("Number of samples in color and depth texture must match (%i and %i respectively)",
          nr_samples_color, nr_samples_depth);
   }

   switch (MAX2(nr_samples_depth, nr_samples_color)) {
   case 0:
   case 1: /* Are 0 and 1 samples allowed? */
      cs->GL_MULTI_SAMPLE_CONFIG =
         VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE;
      cs->msaa_mode = false;
      break;
   case 2:
      cs->GL_MULTI_SAMPLE_CONFIG = VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X;
      cs->msaa_mode = true; /* Add input to PS */
      cs->RA_MULTISAMPLE_UNK00E04 = 0x0;
      cs->RA_MULTISAMPLE_UNK00E10[0] = 0x0000aa22;
      cs->RA_CENTROID_TABLE[0] = 0x66aa2288;
      cs->RA_CENTROID_TABLE[1] = 0x88558800;
      cs->RA_CENTROID_TABLE[2] = 0x88881100;
      cs->RA_CENTROID_TABLE[3] = 0x33888800;
      break;
   case 4:
      cs->GL_MULTI_SAMPLE_CONFIG = VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X;
      cs->msaa_mode = true; /* Add input to PS */
      cs->RA_MULTISAMPLE_UNK00E04 = 0x0;
      cs->RA_MULTISAMPLE_UNK00E10[0] = 0xeaa26e26;
      cs->RA_MULTISAMPLE_UNK00E10[1] = 0xe6ae622a;
      cs->RA_MULTISAMPLE_UNK00E10[2] = 0xaaa22a22;
      cs->RA_CENTROID_TABLE[0] = 0x4a6e2688;
      cs->RA_CENTROID_TABLE[1] = 0x888888a2;
      cs->RA_CENTROID_TABLE[2] = 0x888888ea;
      cs->RA_CENTROID_TABLE[3] = 0x888888c6;
      cs->RA_CENTROID_TABLE[4] = 0x46622a88;
      cs->RA_CENTROID_TABLE[5] = 0x888888ae;
      cs->RA_CENTROID_TABLE[6] = 0x888888e6;
      cs->RA_CENTROID_TABLE[7] = 0x888888ca;
      cs->RA_CENTROID_TABLE[8] = 0x262a2288;
      cs->RA_CENTROID_TABLE[9] = 0x886688a2;
      cs->RA_CENTROID_TABLE[10] = 0x888866aa;
      cs->RA_CENTROID_TABLE[11] = 0x668888a6;
      break;
   }

   /* Scissor setup */
   cs->SE_SCISSOR_LEFT = 0; /* affected by rasterizer and scissor state as well */
   cs->SE_SCISSOR_TOP = 0;
   cs->SE_SCISSOR_RIGHT = (sv->width << 16) + ETNA_SE_SCISSOR_MARGIN_RIGHT;
   cs->SE_SCISSOR_BOTTOM = (sv->height << 16) + ETNA_SE_SCISSOR_MARGIN_BOTTOM;
   cs->SE_CLIP_RIGHT = (sv->width << 16) + ETNA_SE_CLIP_MARGIN_RIGHT;
   cs->SE_CLIP_BOTTOM = (sv->height << 16) + ETNA_SE_CLIP_MARGIN_BOTTOM;

   cs->TS_MEM_CONFIG = ts_mem_config;

   /* Single buffer setup. There is only one switch for this, not a separate
    * one per color buffer / depth buffer. To keep the logic simple always use
    * single buffer when this feature is available.
    */
   cs->PE_LOGIC_OP = VIVS_PE_LOGIC_OP_SINGLE_BUFFER(ctx->specs.single_buffer ? 2 : 0);

   ctx->framebuffer_s = *sv; /* keep copy of original structure */
   ctx->dirty |= ETNA_DIRTY_FRAMEBUFFER | ETNA_DIRTY_DERIVE_TS;
}
Пример #25
0
static struct pipe_resource *
etna_resource_from_handle(struct pipe_screen *pscreen,
                          const struct pipe_resource *tmpl,
                          struct winsys_handle *handle, unsigned usage)
{
   struct etna_screen *screen = etna_screen(pscreen);
   struct etna_resource *rsc;
   struct etna_resource_level *level;
   struct pipe_resource *prsc;
   struct pipe_resource *ptiled = NULL;

   DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
       "nr_samples=%u, usage=%u, bind=%x, flags=%x",
       tmpl->target, util_format_name(tmpl->format), tmpl->width0,
       tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level,
       tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags);

   rsc = CALLOC_STRUCT(etna_resource);
   if (!rsc)
      return NULL;

   level = &rsc->levels[0];
   prsc = &rsc->base;

   *prsc = *tmpl;

   pipe_reference_init(&prsc->reference, 1);
   list_inithead(&rsc->list);
   prsc->screen = pscreen;

   rsc->bo = etna_screen_bo_from_handle(pscreen, handle, &level->stride);
   if (!rsc->bo)
      goto fail;

   rsc->seqno = 1;
   rsc->layout = modifier_to_layout(handle->modifier);
   rsc->halign = TEXTURE_HALIGN_FOUR;


   level->width = tmpl->width0;
   level->height = tmpl->height0;

   /* Determine padding of the imported resource. */
   unsigned paddingX = 0, paddingY = 0;
   etna_layout_multiple(rsc->layout, screen->specs.pixel_pipes,
                        VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN),
                        &paddingX, &paddingY, &rsc->halign);

   if (!screen->specs.use_blt)
      etna_adjust_rs_align(screen->specs.pixel_pipes, NULL, &paddingY);
   level->padded_width = align(level->width, paddingX);
   level->padded_height = align(level->height, paddingY);

   level->layer_stride = level->stride * util_format_get_nblocksy(prsc->format,
                                                                  level->padded_height);
   level->size = level->layer_stride;

   /* The DDX must give us a BO which conforms to our padding size.
    * The stride of the BO must be greater or equal to our padded
    * stride. The size of the BO must accomodate the padded height. */
   if (level->stride < util_format_get_stride(tmpl->format, level->padded_width)) {
      BUG("BO stride %u is too small for RS engine width padding (%zu, format %s)",
          level->stride, util_format_get_stride(tmpl->format, level->padded_width),
          util_format_name(tmpl->format));
      goto fail;
   }
   if (etna_bo_size(rsc->bo) < level->stride * level->padded_height) {
      BUG("BO size %u is too small for RS engine height padding (%u, format %s)",
          etna_bo_size(rsc->bo), level->stride * level->padded_height,
          util_format_name(tmpl->format));
      goto fail;
   }

   if (rsc->layout == ETNA_LAYOUT_LINEAR) {
      /*
       * Both sampler and pixel pipes can't handle linear, create a compatible
       * base resource, where we can attach the imported buffer as an external
       * resource.
       */
      struct pipe_resource tiled_templat = *tmpl;

      /*
       * Remove BIND_SCANOUT to avoid recursion, as etna_resource_create uses
       * this function to import the scanout buffer and get a tiled resource.
       */
      tiled_templat.bind &= ~PIPE_BIND_SCANOUT;

      ptiled = etna_resource_create(pscreen, &tiled_templat);
      if (!ptiled)
         goto fail;

      etna_resource(ptiled)->external = prsc;

      return ptiled;
   }

   return prsc;

fail:
   etna_resource_destroy(pscreen, prsc);
   if (ptiled)
      etna_resource_destroy(pscreen, ptiled);

   return NULL;
}
Пример #26
0
/* Create a new resource object, using the given template info */
struct pipe_resource *
etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout,
                    uint64_t modifier, const struct pipe_resource *templat)
{
   struct etna_screen *screen = etna_screen(pscreen);
   struct etna_resource *rsc;
   unsigned size;

   DBG_F(ETNA_DBG_RESOURCE_MSGS,
         "target=%d, format=%s, %ux%ux%u, array_size=%u, "
         "last_level=%u, nr_samples=%u, usage=%u, bind=%x, flags=%x",
         templat->target, util_format_name(templat->format), templat->width0,
         templat->height0, templat->depth0, templat->array_size,
         templat->last_level, templat->nr_samples, templat->usage,
         templat->bind, templat->flags);

   /* Determine scaling for antialiasing, allow override using debug flag */
   int nr_samples = templat->nr_samples;
   if ((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
       !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) {
      if (DBG_ENABLED(ETNA_DBG_MSAA_2X))
         nr_samples = 2;
      if (DBG_ENABLED(ETNA_DBG_MSAA_4X))
         nr_samples = 4;
   }

   int msaa_xscale = 1, msaa_yscale = 1;
   if (!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) {
      /* Number of samples not supported */
      return NULL;
   }

   /* Determine needed padding (alignment of height/width) */
   unsigned paddingX = 0, paddingY = 0;
   unsigned halign = TEXTURE_HALIGN_FOUR;
   if (!util_format_is_compressed(templat->format)) {
      /* If we have the TEXTURE_HALIGN feature, we can always align to the
       * resolve engine's width.  If not, we must not align resources used
       * only for textures. If this GPU uses the BLT engine, never do RS align.
       */
      bool rs_align = screen->specs.use_blt ? false : (
                         VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN) ||
                         !etna_resource_sampler_only(templat));
      etna_layout_multiple(layout, screen->specs.pixel_pipes, rs_align, &paddingX,
                           &paddingY, &halign);
      assert(paddingX && paddingY);
   } else {
      /* Compressed textures are padded to their block size, but we don't have
       * to do anything special for that. */
      paddingX = 1;
      paddingY = 1;
   }

   if (!screen->specs.use_blt && templat->target != PIPE_BUFFER)
      etna_adjust_rs_align(screen->specs.pixel_pipes, NULL, &paddingY);

   if (templat->bind & PIPE_BIND_SCANOUT) {
      struct pipe_resource scanout_templat = *templat;
      struct renderonly_scanout *scanout;
      struct winsys_handle handle;

      /* pad scanout buffer size to be compatible with the RS */
      if (!screen->specs.use_blt && modifier == DRM_FORMAT_MOD_LINEAR)
         etna_adjust_rs_align(screen->specs.pixel_pipes, &paddingX, &paddingY);

      scanout_templat.width0 = align(scanout_templat.width0, paddingX);
      scanout_templat.height0 = align(scanout_templat.height0, paddingY);

      scanout = renderonly_scanout_for_resource(&scanout_templat,
                                                screen->ro, &handle);
      if (!scanout)
         return NULL;

      assert(handle.type == WINSYS_HANDLE_TYPE_FD);
      handle.modifier = modifier;
      rsc = etna_resource(pscreen->resource_from_handle(pscreen, templat,
                                                        &handle,
                                                        PIPE_HANDLE_USAGE_WRITE));
      close(handle.handle);
      if (!rsc)
         return NULL;

      rsc->scanout = scanout;

      return &rsc->base;
   }

   rsc = CALLOC_STRUCT(etna_resource);
   if (!rsc)
      return NULL;

   rsc->base = *templat;
   rsc->base.screen = pscreen;
   rsc->base.nr_samples = nr_samples;
   rsc->layout = layout;
   rsc->halign = halign;

   pipe_reference_init(&rsc->base.reference, 1);
   list_inithead(&rsc->list);

   size = setup_miptree(rsc, paddingX, paddingY, msaa_xscale, msaa_yscale);

   uint32_t flags = DRM_ETNA_GEM_CACHE_WC;
   if (templat->bind & PIPE_BIND_VERTEX_BUFFER)
      flags |= DRM_ETNA_GEM_FORCE_MMU;
   struct etna_bo *bo = etna_bo_new(screen->dev, size, flags);
   if (unlikely(bo == NULL)) {
      BUG("Problem allocating video memory for resource");
      goto free_rsc;
   }

   rsc->bo = bo;
   rsc->ts_bo = 0; /* TS is only created when first bound to surface */

   if (DBG_ENABLED(ETNA_DBG_ZERO)) {
      void *map = etna_bo_map(bo);
      memset(map, 0, size);
   }

   return &rsc->base;

free_rsc:
   FREE(rsc);
   return NULL;
}
Пример #27
0
static struct pipe_resource *
etna_resource_from_handle(struct pipe_screen *pscreen,
                          const struct pipe_resource *tmpl,
                          struct winsys_handle *handle, unsigned usage)
{
   struct etna_screen *screen = etna_screen(pscreen);
   struct etna_resource *rsc = CALLOC_STRUCT(etna_resource);
   struct etna_resource_level *level = &rsc->levels[0];
   struct pipe_resource *prsc = &rsc->base;
   struct pipe_resource *ptiled = NULL;

   DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
       "nr_samples=%u, usage=%u, bind=%x, flags=%x",
       tmpl->target, util_format_name(tmpl->format), tmpl->width0,
       tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level,
       tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags);

   if (!rsc)
      return NULL;

   *prsc = *tmpl;

   pipe_reference_init(&prsc->reference, 1);
   list_inithead(&rsc->list);
   prsc->screen = pscreen;

   rsc->bo = etna_screen_bo_from_handle(pscreen, handle, &level->stride);
   if (!rsc->bo)
      goto fail;

   level->width = tmpl->width0;
   level->height = tmpl->height0;

   /* We will be using the RS to copy with this resource, so we must
    * ensure that it is appropriately aligned for the RS requirements. */
   unsigned paddingX = ETNA_RS_WIDTH_MASK + 1;
   unsigned paddingY = (ETNA_RS_HEIGHT_MASK + 1) * screen->specs.pixel_pipes;

   level->padded_width = align(level->width, paddingX);
   level->padded_height = align(level->height, paddingY);

   /* The DDX must give us a BO which conforms to our padding size.
    * The stride of the BO must be greater or equal to our padded
    * stride. The size of the BO must accomodate the padded height. */
   if (level->stride < util_format_get_stride(tmpl->format, level->padded_width)) {
      BUG("BO stride is too small for RS engine width padding");
      goto fail;
   }
   if (etna_bo_size(rsc->bo) < level->stride * level->padded_height) {
      BUG("BO size is too small for RS engine height padding");
      goto fail;
   }

   if (handle->type == DRM_API_HANDLE_TYPE_SHARED && tmpl->bind & PIPE_BIND_RENDER_TARGET) {
      /* Render targets are linear in Xorg but must be tiled
      * here. It would be nice if dri_drawable_get_format()
      * set scanout for these buffers too. */
      struct etna_resource *tiled;

      ptiled = etna_resource_create(pscreen, tmpl);
      if (!ptiled)
         goto fail;

      tiled = etna_resource(ptiled);
      tiled->scanout = renderonly_scanout_for_prime(prsc, screen->ro);
      if (!tiled->scanout)
         goto fail;

      return ptiled;
   }

   return prsc;

fail:
   etna_resource_destroy(pscreen, prsc);
   if (ptiled)
      etna_resource_destroy(pscreen, ptiled);

   return NULL;
}
static struct pipe_sampler_view *
etna_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
                         const struct pipe_sampler_view *so)
{
   struct etna_sampler_view *sv = CALLOC_STRUCT(etna_sampler_view);
   struct etna_resource *res = etna_resource(prsc);
   struct etna_context *ctx = etna_context(pctx);
   const uint32_t format = translate_texture_format(so->format);
   const bool ext = !!(format & EXT_FORMAT);
   const uint32_t swiz = get_texture_swiz(so->format, so->swizzle_r,
                                          so->swizzle_g, so->swizzle_b,
                                          so->swizzle_a);

   if (!sv)
      return NULL;

   if (!etna_resource_sampler_compatible(res)) {
      /* The original resource is not compatible with the sampler.
       * Allocate an appropriately tiled texture. */
      if (!res->texture) {
         struct pipe_resource templat = *prsc;

         templat.bind &= ~(PIPE_BIND_DEPTH_STENCIL | PIPE_BIND_RENDER_TARGET |
                           PIPE_BIND_BLENDABLE);
         res->texture =
            etna_resource_alloc(pctx->screen, ETNA_LAYOUT_TILED,
                                DRM_FORMAT_MOD_LINEAR, &templat);
      }

      if (!res->texture) {
         free(sv);
         return NULL;
      }
      res = etna_resource(res->texture);
   }

   sv->base = *so;
   pipe_reference_init(&sv->base.reference, 1);
   sv->base.texture = NULL;
   pipe_resource_reference(&sv->base.texture, prsc);
   sv->base.context = pctx;

   /* merged with sampler state */
   sv->TE_SAMPLER_CONFIG0 = COND(!ext, VIVS_TE_SAMPLER_CONFIG0_FORMAT(format));
   sv->TE_SAMPLER_CONFIG0_MASK = 0xffffffff;

   switch (sv->base.target) {
   case PIPE_TEXTURE_1D:
      /* For 1D textures, we will have a height of 1, so we can use 2D
       * but set T wrap to repeat */
      sv->TE_SAMPLER_CONFIG0_MASK = ~VIVS_TE_SAMPLER_CONFIG0_VWRAP__MASK;
      sv->TE_SAMPLER_CONFIG0 |= VIVS_TE_SAMPLER_CONFIG0_VWRAP(TEXTURE_WRAPMODE_REPEAT);
      /* fallthrough */
   case PIPE_TEXTURE_2D:
   case PIPE_TEXTURE_RECT:
      sv->TE_SAMPLER_CONFIG0 |= VIVS_TE_SAMPLER_CONFIG0_TYPE(TEXTURE_TYPE_2D);
      break;
   case PIPE_TEXTURE_CUBE:
      sv->TE_SAMPLER_CONFIG0 |= VIVS_TE_SAMPLER_CONFIG0_TYPE(TEXTURE_TYPE_CUBE_MAP);
      break;
   default:
      BUG("Unhandled texture target");
      free(sv);
      return NULL;
   }

   sv->TE_SAMPLER_CONFIG1 = COND(ext, VIVS_TE_SAMPLER_CONFIG1_FORMAT_EXT(format)) |
                            VIVS_TE_SAMPLER_CONFIG1_HALIGN(res->halign) | swiz;
   sv->TE_SAMPLER_SIZE = VIVS_TE_SAMPLER_SIZE_WIDTH(res->base.width0) |
                         VIVS_TE_SAMPLER_SIZE_HEIGHT(res->base.height0);
   sv->TE_SAMPLER_LOG_SIZE =
      VIVS_TE_SAMPLER_LOG_SIZE_WIDTH(etna_log2_fixp55(res->base.width0)) |
      VIVS_TE_SAMPLER_LOG_SIZE_HEIGHT(etna_log2_fixp55(res->base.height0));

   /* Set up levels-of-detail */
   for (int lod = 0; lod <= res->base.last_level; ++lod) {
      sv->TE_SAMPLER_LOD_ADDR[lod].bo = res->bo;
      sv->TE_SAMPLER_LOD_ADDR[lod].offset = res->levels[lod].offset;
      sv->TE_SAMPLER_LOD_ADDR[lod].flags = ETNA_RELOC_READ;
   }
   sv->min_lod = sv->base.u.tex.first_level << 5;
   sv->max_lod = MIN2(sv->base.u.tex.last_level, res->base.last_level) << 5;

   /* Workaround for npot textures -- it appears that only CLAMP_TO_EDGE is
    * supported when the appropriate capability is not set. */
   if (!ctx->specs.npot_tex_any_wrap &&
       (!util_is_power_of_two(res->base.width0) || !util_is_power_of_two(res->base.height0))) {
      sv->TE_SAMPLER_CONFIG0_MASK = ~(VIVS_TE_SAMPLER_CONFIG0_UWRAP__MASK |
                                      VIVS_TE_SAMPLER_CONFIG0_VWRAP__MASK);
      sv->TE_SAMPLER_CONFIG0 |=
         VIVS_TE_SAMPLER_CONFIG0_UWRAP(TEXTURE_WRAPMODE_CLAMP_TO_EDGE) |
         VIVS_TE_SAMPLER_CONFIG0_VWRAP(TEXTURE_WRAPMODE_CLAMP_TO_EDGE);
   }

   return &sv->base;
}
Пример #29
0
static void *etna_pipe_transfer_map(struct pipe_context *pipe,
                         struct pipe_resource *resource,
                         unsigned level,
                         unsigned usage,  /* a combination of PIPE_TRANSFER_x */
                         const struct pipe_box *box,
                         struct pipe_transfer **out_transfer)
{
    struct etna_pipe_context *priv = etna_pipe_context(pipe);
    struct etna_transfer *ptrans = util_slab_alloc(&priv->transfer_pool);
    struct etna_resource *resource_priv = etna_resource(resource);
    enum pipe_format format = resource->format;
    if (!ptrans)
        return NULL;
    assert(level <= resource->last_level);

    /* PIPE_TRANSFER_READ always requires a sync. */
    if(usage & PIPE_TRANSFER_READ)
    {
        etna_finish(priv->ctx);
    }
    /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored when mapping in-place,
     * but when not in place we need to fire off the copy operation in transfer_flush_region (currently
     * a no-op) instead of unmap. Need to handle this to support ARB_map_buffer_range extension at least.
     */
    /* XXX we don't take care of current operations on the resource; which can be, at some point in the pipeline
       which is not yet executed:

       - bound as surface
       - bound through vertex buffer
       - bound through index buffer
       - bound in sampler view
       - used in clear_render_target / clear_depth_stencil operation
       - used in blit
       - used in resource_copy_region

       How do other drivers record this information over course of the rendering pipeline?
       Is it necessary at all? Only in case we want to provide a fast path and map the resource directly
       (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
       We also need to know whether the resource is in use to determine if a sync is needed (or just do it
       always, but that comes at the expense of performance).

       A conservative approximation without too much overhead would be to mark all resources that have
       been bound at some point as busy. A drawback would be that accessing resources that have
       been bound but are no longer in use for a while still carry a performance penalty. On the other hand,
       the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or PIPE_TRANSFER_UNSYNCHRONIZED to
       avoid this in the first place...

       A) We use an in-pipe copy engine, and queue the copy operation after unmap so that the copy
          will be performed when all current commands have been executed.
          Using the RS is possible, not sure if always efficient. This can also do any kind of tiling for us.
          Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
       B) We discard the entire resource (or at least, the mipmap level) and allocate new memory for it.
          Only possible when mapping the entire resource or PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
     */

    /* No need to allocate a buffer for copying if the resource is not in use,
     * and no tiling is needed, can just return a direct pointer.
     */
    ptrans->in_place = resource_priv->layout == ETNA_LAYOUT_LINEAR ||
                       (resource_priv->layout == ETNA_LAYOUT_TILED && util_format_is_compressed(resource->format));
    ptrans->base.resource = resource;
    ptrans->base.level = level;
    ptrans->base.usage = usage;
    ptrans->base.box = *box;

    struct etna_resource_level *res_level = &resource_priv->levels[level];
    /* map buffer object */
    void *mapped = etna_bo_map(resource_priv->bo) + res_level->offset;
    if(likely(ptrans->in_place))
    {
        ptrans->base.stride = res_level->stride;
        ptrans->base.layer_stride = res_level->layer_stride;
        ptrans->buffer = mapped + etna_compute_offset(resource->format, box, res_level->stride, res_level->layer_stride);
    } else {
        unsigned divSizeX = util_format_get_blockwidth(format);
        unsigned divSizeY = util_format_get_blockheight(format);
        if(usage & PIPE_TRANSFER_MAP_DIRECTLY)
        {
            /* No in-place transfer possible */
            util_slab_free(&priv->transfer_pool, ptrans);
            return NULL;
        }

        ptrans->base.stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
        ptrans->base.layer_stride = align(box->height, divSizeY) * ptrans->base.stride;
        size_t size = ptrans->base.layer_stride * box->depth;
        ptrans->buffer = MALLOC(size);

        if(usage & PIPE_TRANSFER_READ)
        {
            /* untile or copy resource for reading */
            if(resource_priv->layout == ETNA_LAYOUT_LINEAR || resource_priv->layout == ETNA_LAYOUT_TILED)
            {
                if(resource_priv->layout == ETNA_LAYOUT_TILED && !util_format_is_compressed(resource_priv->base.format))
                {
                    etna_texture_untile(ptrans->buffer, mapped + ptrans->base.box.z * res_level->layer_stride,
                            ptrans->base.box.x, ptrans->base.box.y, res_level->stride,
                            ptrans->base.box.width, ptrans->base.box.height, ptrans->base.stride,
                            util_format_get_blocksize(resource_priv->base.format));
                } else { /* non-tiled or compressed format */
                    util_copy_box(ptrans->buffer,
                      resource_priv->base.format,
                      ptrans->base.stride, ptrans->base.layer_stride,
                      0, 0, 0, /* dst x,y,z */
                      ptrans->base.box.width, ptrans->base.box.height, ptrans->base.box.depth,
                      mapped,
                      res_level->stride, res_level->layer_stride,
                      ptrans->base.box.x, ptrans->base.box.y, ptrans->base.box.z);
                }
            } else /* TODO supertiling */
            {
                BUG("unsupported tiling %i for reading", resource_priv->layout);
            }
        }
    }

    *out_transfer = &ptrans->base;
    return ptrans->buffer;
}