static void etna_screen_destroy( struct pipe_screen *screen ) { struct etna_screen *priv = etna_screen(screen); etna_screen_destroy_fences(screen); pipe_mutex_destroy(priv->fence_mutex); FREE(screen); }
static bool etna_resource_sampler_compatible(struct etna_resource *res) { if (util_format_is_compressed(res->base.format)) return true; struct etna_screen *screen = etna_screen(res->base.screen); /* This GPU supports texturing from supertiled textures? */ if (res->layout == ETNA_LAYOUT_SUPER_TILED && VIV_FEATURE(screen, chipMinorFeatures2, SUPERTILED_TEXTURE)) return true; /* TODO: LINEAR_TEXTURE_SUPPORT */ /* Otherwise, only support tiled layouts */ if (res->layout != ETNA_LAYOUT_TILED) return false; /* If we have HALIGN support, we can allow for the RS padding */ if (VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN)) return true; /* Non-HALIGN GPUs only accept 4x4 tile-aligned textures */ if (res->halign != TEXTURE_HALIGN_FOUR) return false; return true; }
/** * Reference or unreference a fence. Once the reference count falls to zero, * the fence will be destroyed or put in the free list to be reused. */ static void etna_screen_fence_reference(struct pipe_screen *screen_h, struct pipe_fence_handle **ptr_h, struct pipe_fence_handle *fence_h ) { struct etna_screen *screen = etna_screen(screen_h); struct etna_fence *fence = etna_fence(fence_h); struct etna_fence **ptr = (struct etna_fence **) ptr_h; struct etna_fence *old_fence = *ptr; if (pipe_reference_described(&(*ptr)->reference, &fence->reference, (debug_reference_descriptor)debug_describe_fence)) { if(etna_screen_fence_signalled(screen_h, (struct pipe_fence_handle*)old_fence)) { /* If signalled, add old fence to free list, as it can be reused */ pipe_mutex_lock(screen->fence_mutex); old_fence->next_free = screen->fence_freelist; screen->fence_freelist = old_fence; pipe_mutex_unlock(screen->fence_mutex); } else { /* If fence is still to be signalled, destroy it, to prevent it from being * reused. */ etna_screen_destroy_fence(screen_h, old_fence); } } *ptr_h = fence_h; }
struct etna_bo * etna_screen_bo_from_handle(struct pipe_screen *pscreen, struct winsys_handle *whandle, unsigned *out_stride) { struct etna_screen *screen = etna_screen(pscreen); struct etna_bo *bo; if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { bo = etna_bo_from_name(screen->dev, whandle->handle); } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) { bo = etna_bo_from_dmabuf(screen->dev, whandle->handle); } else { DBG("Attempt to import unsupported handle type %d", whandle->type); return NULL; } if (!bo) { DBG("ref name 0x%08x failed", whandle->handle); return NULL; } *out_stride = whandle->stride; return bo; }
bool etna_screen_resource_alloc_ts(struct pipe_screen *screen, struct etna_resource *resource) { struct etna_screen *priv = etna_screen(screen); size_t rt_ts_size; assert(!resource->ts_bo); /* TS only for level 0 -- XXX is this formula correct? */ rt_ts_size = align(resource->levels[0].size*priv->specs.bits_per_tile/0x80, 0x100); if(rt_ts_size == 0) return true; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocating tile status of size %i", resource, rt_ts_size); struct etna_bo *rt_ts = 0; if(unlikely((rt_ts = etna_bo_new(priv->dev, rt_ts_size, DRM_ETNA_GEM_TYPE_TS)) == NULL)) { BUG("Problem allocating tile status for resource"); return false; } resource->ts_bo = rt_ts; resource->levels[0].ts_offset = 0; resource->levels[0].ts_size = etna_bo_size(resource->ts_bo); /* It is important to initialize the TS, as random pattern * can result in crashes. Do this on the CPU as this only happens once * per surface anyway and it's a small area, so it may not be worth * queuing this to the GPU. */ void *ts_map = etna_bo_map(rt_ts); memset(ts_map, priv->specs.ts_clear_value, rt_ts_size); return true; }
static void etna_screen_query_dmabuf_modifiers(struct pipe_screen *pscreen, enum pipe_format format, int max, uint64_t *modifiers, unsigned int *external_only, int *count) { struct etna_screen *screen = etna_screen(pscreen); int i, num_modifiers = 0; if (max > ARRAY_SIZE(supported_modifiers)) max = ARRAY_SIZE(supported_modifiers); if (!max) { modifiers = NULL; max = ARRAY_SIZE(supported_modifiers); } for (i = 0; num_modifiers < max; i++) { /* don't advertise split tiled formats on single pipe/buffer GPUs */ if ((screen->specs.pixel_pipes == 1 || screen->specs.single_buffer) && i >= 3) break; if (modifiers) modifiers[num_modifiers] = supported_modifiers[i]; if (external_only) external_only[num_modifiers] = util_format_is_yuv(format) ? 1 : 0; num_modifiers++; } *count = num_modifiers; }
bool etna_screen_resource_alloc_ts(struct pipe_screen *screen, struct etna_resource *resource) { struct etna_screen *priv = etna_screen(screen); size_t rt_ts_size; assert(!resource->ts); /* TS only for level 0 -- XXX is this formula correct? */ rt_ts_size = align(resource->levels[0].size*priv->specs.bits_per_tile/0x80, 0x100); if(rt_ts_size == 0) return true; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocating tile status of size %zi", resource, rt_ts_size); struct etna_vidmem *rt_ts = 0; if(unlikely(etna_vidmem_alloc_linear(priv->dev, &rt_ts, rt_ts_size, VIV_SURF_TILE_STATUS, VIV_POOL_DEFAULT, true)!=ETNA_OK)) { BUG("Problem allocating tile status for resource"); return false; } resource->ts = rt_ts; resource->levels[0].ts_address = resource->ts->address; resource->levels[0].ts_size = resource->ts->size; /* It is important to initialize the TS, as random pattern * can result in crashes. Do this on the CPU as this only happens once * per surface anyway and it's a small area, so it may not be worth * queuing this to the GPU. */ memset(rt_ts->logical, priv->specs.ts_clear_value, rt_ts_size); return true; }
static struct pipe_context * etna_screen_context_create( struct pipe_screen *screen, void *priv ) { struct etna_screen *es = etna_screen(screen); struct pipe_context *ctx = etna_new_pipe_context(es->dev, &es->specs, screen, priv); return ctx; }
void etna_screen_destroy_fence(struct pipe_screen *screen_h, struct etna_fence *fence) { struct etna_screen *screen = etna_screen(screen_h); if(viv_user_signal_destroy(screen->dev, fence->signal) != VIV_STATUS_OK) { BUG("cannot destroy signal %i", fence->signal); } FREE(fence); }
static struct pipe_resource * etna_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); /* Figure out what tiling to use -- for now, assume that texture cannot be linear. * there is a capability LINEAR_TEXTURE_SUPPORT (supported on gc880 and * gc2000 at least), but not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if (etna_resource_sampler_only(templat)) { /* The buffer is only used for texturing, so create something * directly compatible with the sampler. Such a buffer can * never be rendered to. */ layout = ETNA_LAYOUT_TILED; if (util_format_is_compressed(templat->format)) layout = ETNA_LAYOUT_LINEAR; } else if (templat->target != PIPE_BUFFER) { bool want_multitiled = false; bool want_supertiled = screen->specs.can_supertile; /* When this GPU supports single-buffer rendering, don't ever enable * multi-tiling. This replicates the blob behavior on GC3000. */ if (!screen->specs.single_buffer) want_multitiled = screen->specs.pixel_pipes > 1; /* Keep single byte blocksized resources as tiled, since we * are unable to use the RS blit to de-tile them. However, * if they're used as a render target or depth/stencil, they * must be multi-tiled for GPUs with multiple pixel pipes. * Ignore depth/stencil here, but it is an error for a render * target. */ if (util_format_get_blocksize(templat->format) == 1 && !(templat->bind & PIPE_BIND_DEPTH_STENCIL)) { assert(!(templat->bind & PIPE_BIND_RENDER_TARGET && want_multitiled)); want_multitiled = want_supertiled = false; } layout = ETNA_LAYOUT_BIT_TILE; if (want_multitiled) layout |= ETNA_LAYOUT_BIT_MULTI; if (want_supertiled) layout |= ETNA_LAYOUT_BIT_SUPER; } if (templat->target == PIPE_TEXTURE_3D) layout = ETNA_LAYOUT_LINEAR; /* modifier is only used for scanout surfaces, so safe to use LINEAR here */ return etna_resource_alloc(pscreen, layout, DRM_FORMAT_MOD_LINEAR, templat); }
static const char * etna_screen_get_name(struct pipe_screen *pscreen) { struct etna_screen *priv = etna_screen(pscreen); static char buffer[128]; util_snprintf(buffer, sizeof(buffer), "Vivante GC%x rev %04x", priv->model, priv->revision); return buffer; }
int etna_fence_new(struct pipe_screen *screen_h, struct etna_ctx *ctx, struct pipe_fence_handle **fence_p) { struct etna_fence *fence = NULL; struct etna_screen *screen = etna_screen(screen_h); int rv; /* XXX we do not release the fence_p reference here -- neither do the other drivers, * and clients don't seem to rely on this. */ if(fence_p == NULL) return ETNA_INVALID_ADDR; assert(*fence_p == NULL); /* re-use old fence, if available, and reset it first */ pipe_mutex_lock(screen->fence_mutex); if(screen->fence_freelist != NULL) { fence = screen->fence_freelist; screen->fence_freelist = fence->next_free; fence->next_free = NULL; } pipe_mutex_unlock(screen->fence_mutex); if(fence != NULL) { if((rv = viv_user_signal_signal(ctx->conn, fence->signal, 0)) != VIV_STATUS_OK) { BUG("Error: could not reset signal %i", fence->signal); etna_screen_destroy_fence(screen_h, fence); return rv; } fence->signalled = false; } else { fence = CALLOC_STRUCT(etna_fence); /* Create signal with manual reset; we want to be able to probe it * or wait for it without resetting it. */ if((rv = viv_user_signal_create(ctx->conn, /* manualReset */ true, &fence->signal)) != VIV_STATUS_OK) { FREE(fence); return rv; } } if((rv = etna_queue_signal(ctx->queue, fence->signal, VIV_WHERE_PIXEL)) != ETNA_OK) { BUG("error queueing signal %i", fence->signal); viv_user_signal_destroy(ctx->conn, fence->signal); FREE(fence); return rv; } pipe_reference_init(&fence->reference, 1); *fence_p = (struct pipe_fence_handle*)fence; return ETNA_OK; }
static struct pipe_resource * etna_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); /* Figure out what tiling to use -- for now, assume that textures cannot be * supertiled, and cannot be linear. * There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) * that may allow this, as well * as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but * not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if (etna_resource_sampler_only(templat)) { /* The buffer is only used for texturing, so create something * directly compatible with the sampler. Such a buffer can * never be rendered to. */ layout = ETNA_LAYOUT_TILED; if (util_format_is_compressed(templat->format)) layout = ETNA_LAYOUT_LINEAR; } else if (templat->target != PIPE_BUFFER) { bool want_multitiled = screen->specs.pixel_pipes > 1; bool want_supertiled = screen->specs.can_supertile && !DBG_ENABLED(ETNA_DBG_NO_SUPERTILE); /* Keep single byte blocksized resources as tiled, since we * are unable to use the RS blit to de-tile them. However, * if they're used as a render target or depth/stencil, they * must be multi-tiled for GPUs with multiple pixel pipes. * Ignore depth/stencil here, but it is an error for a render * target. */ if (util_format_get_blocksize(templat->format) == 1 && !(templat->bind & PIPE_BIND_DEPTH_STENCIL)) { assert(!(templat->bind & PIPE_BIND_RENDER_TARGET && want_multitiled)); want_multitiled = want_supertiled = false; } layout = ETNA_LAYOUT_BIT_TILE; if (want_multitiled) layout |= ETNA_LAYOUT_BIT_MULTI; if (want_supertiled) layout |= ETNA_LAYOUT_BIT_SUPER; } if (templat->target == PIPE_TEXTURE_3D) layout = ETNA_LAYOUT_LINEAR; return etna_resource_alloc(pscreen, layout, templat); }
void etna_screen_destroy_fences(struct pipe_screen *screen_h) { struct etna_screen *screen = etna_screen(screen_h); struct etna_fence *fence, *next; pipe_mutex_lock(screen->fence_mutex); for(fence = screen->fence_freelist; fence != NULL; fence = next) { next = fence->next_free; etna_screen_destroy_fence(screen_h, fence); } screen->fence_freelist = NULL; pipe_mutex_unlock(screen->fence_mutex); }
static boolean etna_screen_can_create_resource(struct pipe_screen *pscreen, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); if(!translate_samples_to_xyscale(templat->nr_samples, NULL, NULL, NULL)) return false; if(templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL | PIPE_BIND_SAMPLER_VIEW)) { uint max_size = (templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) ? screen->specs.max_rendertarget_size : screen->specs.max_texture_size; if(templat->width0 > max_size || templat->height0 > max_size) return false; } return true; }
static boolean etna_screen_can_create_resource(struct pipe_screen *pscreen, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); if (!translate_samples_to_xyscale(templat->nr_samples, NULL, NULL, NULL)) return false; /* templat->bind is not set here, so we must use the minimum sizes */ uint max_size = MIN2(screen->specs.max_rendertarget_size, screen->specs.max_texture_size); if (templat->width0 > max_size || templat->height0 > max_size) return false; return true; }
static void etna_screen_destroy(struct pipe_screen *pscreen) { struct etna_screen *screen = etna_screen(pscreen); if (screen->pipe) etna_pipe_del(screen->pipe); if (screen->gpu) etna_gpu_del(screen->gpu); if (screen->ro) FREE(screen->ro); if (screen->dev) etna_device_del(screen->dev); FREE(screen); }
/** * Wait until the fence has been signalled for the specified timeout in nanoseconds, * or PIPE_TIMEOUT_INFINITE. */ static boolean etna_screen_fence_finish(struct pipe_screen *screen_h, struct pipe_fence_handle *fence_h, uint64_t timeout ) { struct etna_screen *screen = etna_screen(screen_h); struct etna_fence *fence = etna_fence(fence_h); int rv; if(fence->signalled) /* avoid a kernel roundtrip */ return true; /* nanoseconds to milliseconds */ rv = viv_user_signal_wait(screen->dev, fence->signal, timeout == PIPE_TIMEOUT_INFINITE ? VIV_WAIT_INDEFINITE : (timeout / 1000000ULL)); if(rv != VIV_STATUS_OK && rv != VIV_STATUS_TIMEOUT) { BUG("error waiting for signal %i", fence->signal); } fence->signalled = (rv != VIV_STATUS_TIMEOUT); return fence->signalled; }
/* A tile is 4x4 pixels, having 'screen->specs.bits_per_tile' of tile status. * So, in a buffer of N pixels, there are N / (4 * 4) tiles. * We need N * screen->specs.bits_per_tile / (4 * 4) bits of tile status, or * N * screen->specs.bits_per_tile / (4 * 4 * 8) bytes. */ bool etna_screen_resource_alloc_ts(struct pipe_screen *pscreen, struct etna_resource *rsc) { struct etna_screen *screen = etna_screen(pscreen); size_t rt_ts_size, ts_layer_stride, pixels; assert(!rsc->ts_bo); /* TS only for level 0 -- XXX is this formula correct? */ pixels = rsc->levels[0].layer_stride / util_format_get_blocksize(rsc->base.format); ts_layer_stride = align(pixels * screen->specs.bits_per_tile / 0x80, 0x100 * screen->specs.pixel_pipes); rt_ts_size = ts_layer_stride * rsc->base.array_size; if (rt_ts_size == 0) return true; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocating tile status of size %zu", rsc, rt_ts_size); struct etna_bo *rt_ts; rt_ts = etna_bo_new(screen->dev, rt_ts_size, DRM_ETNA_GEM_CACHE_WC); if (unlikely(!rt_ts)) { BUG("Problem allocating tile status for resource"); return false; } rsc->ts_bo = rt_ts; rsc->levels[0].ts_offset = 0; rsc->levels[0].ts_layer_stride = ts_layer_stride; rsc->levels[0].ts_size = rt_ts_size; /* It is important to initialize the TS, as random pattern * can result in crashes. Do this on the CPU as this only happens once * per surface anyway and it's a small area, so it may not be worth * queuing this to the GPU. */ void *ts_map = etna_bo_map(rt_ts); memset(ts_map, screen->specs.ts_clear_value, rt_ts_size); return true; }
static float etna_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param) { struct etna_screen *screen = etna_screen(pscreen); switch (param) { case PIPE_CAPF_MAX_LINE_WIDTH: case PIPE_CAPF_MAX_LINE_WIDTH_AA: case PIPE_CAPF_MAX_POINT_WIDTH: case PIPE_CAPF_MAX_POINT_WIDTH_AA: return 8192.0f; case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY: return 16.0f; case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS: return util_last_bit(screen->specs.max_texture_size); } debug_printf("unknown paramf %d", param); return 0; }
static struct pipe_resource * etna_resource_create_modifiers(struct pipe_screen *pscreen, const struct pipe_resource *templat, const uint64_t *modifiers, int count) { struct etna_screen *screen = etna_screen(pscreen); struct pipe_resource tmpl = *templat; uint64_t modifier = select_best_modifier(screen, modifiers, count); if (modifier == DRM_FORMAT_MOD_INVALID) return NULL; /* * We currently assume that all buffers allocated through this interface * should be scanout enabled. */ tmpl.bind |= PIPE_BIND_SCANOUT; return etna_resource_alloc(pscreen, modifier_to_layout(modifier), modifier, &tmpl); }
static void etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc) { struct etna_resource *rsc = etna_resource(prsc); if (rsc->bo) etna_bo_del(rsc->bo); if (rsc->ts_bo) etna_bo_del(rsc->ts_bo); if (rsc->scanout) renderonly_scanout_destroy(rsc->scanout, etna_screen(pscreen)->ro); list_delinit(&rsc->list); pipe_resource_reference(&rsc->texture, NULL); pipe_resource_reference(&rsc->external, NULL); FREE(rsc); }
static void etna_screen_resource_destroy(struct pipe_screen *screen, struct pipe_resource *resource_) { struct etna_screen *priv = etna_screen(screen); struct etna_resource *resource = etna_resource(resource_); if(resource == NULL) return; if(resource->last_ctx != NULL) { /* XXX This could fail when multiple contexts share this resource, * (the last one to bind it will "own" it) or fail miserably if * the context was since destroyed. */ struct etna_pipe_context *ectx = resource->last_ctx; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource queued destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0); etna_vidmem_queue_free(ectx->ctx->queue, resource->surface); etna_vidmem_queue_free(ectx->ctx->queue, resource->ts); } else { DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0); etna_vidmem_free(priv->dev, resource->surface); etna_vidmem_free(priv->dev, resource->ts); } FREE(resource); }
static void etna_screen_resource_destroy(struct pipe_screen *screen, struct pipe_resource *resource_) { struct etna_screen *priv = etna_screen(screen); struct etna_resource *resource = etna_resource(resource_); if(resource == NULL) return; struct etna_queue *queue = NULL; if(resource->last_ctx != NULL) { /* XXX This could fail when multiple contexts share this resource, * (the last one to bind it will "own" it) or fail miserably if * the context was since destroyed. * Integrate this into etna_bo_del... */ DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource queued destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0); queue = resource->last_ctx->ctx->queue; } else { DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: resource destroyed (%ix%ix%i)", resource, resource_->width0, resource_->height0, resource_->depth0); } etna_bo_del(priv->dev, resource->bo, queue); etna_bo_del(priv->dev, resource->ts_bo, queue); FREE(resource); }
/* Return true if the GPU can use sampler TS with this sampler view. * Sampler TS is an optimization used when rendering to textures, where * a resolve-in-place can be avoided when rendering has left a (valid) TS. */ static bool etna_can_use_sampler_ts(struct pipe_sampler_view *view, int num) { /* Can use sampler TS when: * - the hardware supports sampler TS. * - the sampler view will be bound to sampler <VIVS_TS_SAMPLER__LEN. * HALTI5 adds a mapping from sampler to sampler TS unit, but this is AFAIK * absent on earlier models. * - it is a texture, not a buffer. * - the sampler view has a supported format for sampler TS. * - the sampler will have one LOD, and it happens to be level 0. * (it is not sure if the hw supports it for other levels, but available * state strongly suggests only one at a time). * - the resource TS is valid for level 0. */ struct etna_resource *rsc = etna_resource(view->texture); struct etna_screen *screen = etna_screen(rsc->base.screen); return VIV_FEATURE(screen, chipMinorFeatures2, TEXTURE_TILED_READ) && num < VIVS_TS_SAMPLER__LEN && rsc->base.target != PIPE_BUFFER && translate_ts_sampler_format(rsc->base.format) != ETNA_NO_MATCH && view->u.tex.first_level == 0 && MIN2(view->u.tex.last_level, rsc->base.last_level) == 0 && rsc->levels[0].ts_valid; }
/* Allocate 2D texture or render target resource */ static struct pipe_resource * etna_screen_resource_create(struct pipe_screen *screen, const struct pipe_resource *templat) { struct etna_screen *priv = etna_screen(screen); assert(templat); /* Check input */ if(templat->target == PIPE_TEXTURE_CUBE) { assert(templat->array_size == 6); } else if (templat->target == PIPE_BUFFER) { assert(templat->format == PIPE_FORMAT_R8_UNORM); /* bytes; want TYPELESS or similar */ assert(templat->array_size == 1); assert(templat->height0 == 1); assert(templat->depth0 == 1); assert(templat->array_size == 1); assert(templat->last_level == 0); } else { assert(templat->array_size == 1); } assert(templat->width0 != 0); assert(templat->height0 != 0); assert(templat->depth0 != 0); assert(templat->array_size != 0); /* Figure out what tiling to use -- for now, assume that textures cannot be supertiled, and cannot be linear. * There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) that may allow this, as well * as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if(templat->target != PIPE_BUFFER) { if(!(templat->bind & PIPE_BIND_SAMPLER_VIEW) && priv->specs.can_supertile && !DBG_ENABLED(ETNA_DBG_NO_SUPERTILE)) layout = ETNA_LAYOUT_SUPER_TILED; else layout = ETNA_LAYOUT_TILED; } /* XXX multi tiled formats */ /* Determine scaling for antialiasing, allow override using debug flag */ int nr_samples = templat->nr_samples; if((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) && !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) { if(DBG_ENABLED(ETNA_DBG_MSAA_2X)) nr_samples = 2; if(DBG_ENABLED(ETNA_DBG_MSAA_4X)) nr_samples = 4; } int msaa_xscale = 1, msaa_yscale = 1; if(!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) { /* Number of samples not supported */ return NULL; } /* Determine needed padding (alignment of height/width) */ unsigned paddingX = 0, paddingY = 0; unsigned halign = TEXTURE_HALIGN_FOUR; etna_layout_multiple(layout, priv->dev->chip.pixel_pipes, (templat->bind & PIPE_BIND_SAMPLER_VIEW) && !VIV_FEATURE(priv->dev, chipMinorFeatures1, TEXTURE_HALIGN), &paddingX, &paddingY, &halign); assert(paddingX && paddingY); /* compute mipmap level sizes and offsets */ struct etna_resource *resource = CALLOC_STRUCT(etna_resource); int max_mip_level = templat->last_level; if(unlikely(max_mip_level >= ETNA_NUM_LOD)) /* max LOD supported by hw */ max_mip_level = ETNA_NUM_LOD - 1; unsigned ix = 0; unsigned x = templat->width0, y = templat->height0; unsigned offset = 0; while(true) { struct etna_resource_level *mip = &resource->levels[ix]; mip->width = x; mip->height = y; mip->padded_width = align(x * msaa_xscale, paddingX); mip->padded_height = align(y * msaa_yscale, paddingY); mip->stride = util_format_get_stride(templat->format, mip->padded_width); mip->offset = offset; mip->layer_stride = mip->stride * util_format_get_nblocksy(templat->format, mip->padded_height); mip->size = templat->array_size * mip->layer_stride; offset += align(mip->size, ETNA_PE_ALIGNMENT); /* align mipmaps to 64 bytes to be able to render to them */ if(ix == max_mip_level || (x == 1 && y == 1)) break; // stop at last level x = u_minify(x, 1); y = u_minify(y, 1); ix += 1; } /* determine memory type */ uint32_t flags = 0; /* XXX DRM_ETNA_GEM_CACHE_xxx */ enum viv_surf_type memtype = VIV_SURF_UNKNOWN; if(templat->bind & PIPE_BIND_SAMPLER_VIEW) flags |= DRM_ETNA_GEM_TYPE_TEX; else if(templat->bind & PIPE_BIND_RENDER_TARGET) flags |= DRM_ETNA_GEM_TYPE_RT; else if(templat->bind & PIPE_BIND_DEPTH_STENCIL) flags |= DRM_ETNA_GEM_TYPE_ZS; else if(templat->bind & PIPE_BIND_INDEX_BUFFER) flags |= DRM_ETNA_GEM_TYPE_IDX; else if(templat->bind & PIPE_BIND_VERTEX_BUFFER) flags |= DRM_ETNA_GEM_TYPE_VTX; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocate surface of %ix%i (padded to %ix%i), %i layers, of format %s, size %08x flags %08x, memtype %i", resource, templat->width0, templat->height0, resource->levels[0].padded_width, resource->levels[0].padded_height, templat->array_size, util_format_name(templat->format), offset, templat->bind, memtype); struct etna_bo *bo = 0; if(unlikely((bo = etna_bo_new(priv->dev, offset, flags)) == NULL)) { BUG("Problem allocating video memory for resource"); return NULL; } resource->base = *templat; resource->base.last_level = ix; /* real last mipmap level */ resource->base.screen = screen; resource->base.nr_samples = nr_samples; resource->layout = layout; resource->halign = halign; resource->bo = bo; resource->ts_bo = 0; /* TS is only created when first bound to surface */ pipe_reference_init(&resource->base.reference, 1); if(DBG_ENABLED(ETNA_DBG_ZERO)) { void *map = etna_bo_map(bo); memset(map, 0, offset); } return &resource->base; }
static boolean etna_screen_is_format_supported(struct pipe_screen *pscreen, enum pipe_format format, enum pipe_texture_target target, unsigned sample_count, unsigned usage) { struct etna_screen *screen = etna_screen(pscreen); unsigned allowed = 0; if (target != PIPE_BUFFER && target != PIPE_TEXTURE_1D && target != PIPE_TEXTURE_2D && target != PIPE_TEXTURE_3D && target != PIPE_TEXTURE_CUBE && target != PIPE_TEXTURE_RECT) return FALSE; if (usage & PIPE_BIND_RENDER_TARGET) { /* If render target, must be RS-supported format that is not rb swapped. * Exposing rb swapped (or other swizzled) formats for rendering would * involve swizzing in the pixel shader. */ if (translate_rs_format(format) != ETNA_NO_MATCH && !translate_rs_format_rb_swap(format)) { /* Validate MSAA; number of samples must be allowed, and render target * must have MSAA'able format. */ if (sample_count > 1) { if (translate_samples_to_xyscale(sample_count, NULL, NULL, NULL) && translate_msaa_format(format) != ETNA_NO_MATCH) { allowed |= PIPE_BIND_RENDER_TARGET; } } else { allowed |= PIPE_BIND_RENDER_TARGET; } } } if (usage & PIPE_BIND_DEPTH_STENCIL) { if (translate_depth_format(format) != ETNA_NO_MATCH) allowed |= PIPE_BIND_DEPTH_STENCIL; } if (usage & PIPE_BIND_SAMPLER_VIEW) { uint32_t fmt = translate_texture_format(format); if (!gpu_supports_texure_format(screen, fmt)) fmt = ETNA_NO_MATCH; if (sample_count < 2 && fmt != ETNA_NO_MATCH) allowed |= PIPE_BIND_SAMPLER_VIEW; } if (usage & PIPE_BIND_VERTEX_BUFFER) { if (translate_vertex_format_type(format) != ETNA_NO_MATCH) allowed |= PIPE_BIND_VERTEX_BUFFER; } if (usage & PIPE_BIND_INDEX_BUFFER) { /* must be supported index format */ if (format == PIPE_FORMAT_I8_UINT || format == PIPE_FORMAT_I16_UINT || (format == PIPE_FORMAT_I32_UINT && VIV_FEATURE(screen, chipFeatures, 32_BIT_INDICES))) { allowed |= PIPE_BIND_INDEX_BUFFER; } } /* Always allowed */ allowed |= usage & (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED); if (usage != allowed) { DBG("not supported: format=%s, target=%d, sample_count=%d, " "usage=%x, allowed=%x", util_format_name(format), target, sample_count, usage, allowed); } return usage == allowed; }
static int etna_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader, enum pipe_shader_cap param) { struct etna_screen *screen = etna_screen(pscreen); switch (shader) { case PIPE_SHADER_FRAGMENT: case PIPE_SHADER_VERTEX: break; case PIPE_SHADER_COMPUTE: case PIPE_SHADER_GEOMETRY: case PIPE_SHADER_TESS_CTRL: case PIPE_SHADER_TESS_EVAL: return 0; default: DBG("unknown shader type %d", shader); return 0; } switch (param) { case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: return ETNA_MAX_TOKENS; case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: return ETNA_MAX_DEPTH; /* XXX */ case PIPE_SHADER_CAP_MAX_INPUTS: /* Maximum number of inputs for the vertex shader is the number * of vertex elements - each element defines one vertex shader * input register. For the fragment shader, this is the number * of varyings. */ return shader == PIPE_SHADER_FRAGMENT ? screen->specs.max_varyings : screen->specs.vertex_max_elements; case PIPE_SHADER_CAP_MAX_OUTPUTS: return 16; /* see VIVS_VS_OUTPUT */ case PIPE_SHADER_CAP_MAX_TEMPS: return 64; /* Max native temporaries. */ case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: return 1; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* nothing uses this */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: return 1; case PIPE_SHADER_CAP_SUBROUTINES: return 0; case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: return VIV_FEATURE(screen, chipMinorFeatures0, HAS_SQRT_TRIG); case PIPE_SHADER_CAP_INTEGERS: return 0; case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS: return shader == PIPE_SHADER_FRAGMENT ? screen->specs.fragment_sampler_count : screen->specs.vertex_sampler_count; case PIPE_SHADER_CAP_PREFERRED_IR: return PIPE_SHADER_IR_TGSI; case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: return 4096; case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED: case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED: case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED: case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE: return false; case PIPE_SHADER_CAP_SUPPORTED_IRS: return 0; case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT: return 32; case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS: case PIPE_SHADER_CAP_MAX_SHADER_IMAGES: case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD: return 0; } debug_printf("unknown shader param %d", param); return 0; }
static int etna_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param) { struct etna_screen *screen = etna_screen(pscreen); switch (param) { /* Supported features (boolean caps). */ case PIPE_CAP_TWO_SIDED_STENCIL: case PIPE_CAP_ANISOTROPIC_FILTER: case PIPE_CAP_POINT_SPRITE: case PIPE_CAP_TEXTURE_SHADOW_MAP: case PIPE_CAP_BLEND_EQUATION_SEPARATE: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: case PIPE_CAP_SM3: case PIPE_CAP_TEXTURE_BARRIER: case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_USER_CONSTANT_BUFFERS: case PIPE_CAP_TGSI_TEXCOORD: case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: return 1; /* Memory */ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 256; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return 4; /* XXX could easily be supported */ case PIPE_CAP_GLSL_FEATURE_LEVEL: return 120; case PIPE_CAP_NPOT_TEXTURES: return true; /* VIV_FEATURE(priv->dev, chipMinorFeatures1, NON_POWER_OF_TWO); */ case PIPE_CAP_PRIMITIVE_RESTART: return VIV_FEATURE(screen, chipMinorFeatures1, HALTI0); case PIPE_CAP_ENDIANNESS: return PIPE_ENDIAN_LITTLE; /* on most Viv hw this is configurable (feature ENDIANNESS_CONFIG) */ /* Unsupported features. */ case PIPE_CAP_SEAMLESS_CUBE_MAP: case PIPE_CAP_TEXTURE_SWIZZLE: /* XXX supported on gc2000 */ case PIPE_CAP_COMPUTE: /* XXX supported on gc2000 */ case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: /* only one colorbuffer supported, so mixing makes no sense */ case PIPE_CAP_CONDITIONAL_RENDER: /* no occlusion queries */ case PIPE_CAP_TGSI_INSTANCEID: /* no idea, really */ case PIPE_CAP_START_INSTANCE: /* instancing not supported AFAIK */ case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: /* instancing not supported AFAIK */ case PIPE_CAP_SHADER_STENCIL_EXPORT: /* Fragment shader cannot export stencil value */ case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: /* no dual-source supported */ case PIPE_CAP_TEXTURE_MULTISAMPLE: /* no texture multisample */ case PIPE_CAP_TEXTURE_MIRROR_CLAMP: /* only mirrored repeat */ case PIPE_CAP_INDEP_BLEND_ENABLE: case PIPE_CAP_INDEP_BLEND_FUNC: case PIPE_CAP_DEPTH_CLIP_DISABLE: case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: /* Don't skip strict max uniform limit check */ case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_CLAMPED: case PIPE_CAP_USER_VERTEX_BUFFERS: case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT: case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY: case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: /* TODO: test me out with piglit */ case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT: case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: case PIPE_CAP_TEXTURE_GATHER_SM5: case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: case PIPE_CAP_FAKE_SW_MSAA: case PIPE_CAP_TEXTURE_QUERY_LOD: case PIPE_CAP_SAMPLE_SHADING: case PIPE_CAP_TEXTURE_GATHER_OFFSETS: case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: case PIPE_CAP_DRAW_INDIRECT: case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE: case PIPE_CAP_CONDITIONAL_RENDER_INVERTED: case PIPE_CAP_SAMPLER_VIEW_TARGET: case PIPE_CAP_CLIP_HALFZ: case PIPE_CAP_VERTEXID_NOBASE: case PIPE_CAP_POLYGON_OFFSET_CLAMP: case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: case PIPE_CAP_RESOURCE_FROM_USER_MEMORY: case PIPE_CAP_DEVICE_RESET_STATUS_QUERY: case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS: case PIPE_CAP_TEXTURE_FLOAT_LINEAR: case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR: case PIPE_CAP_DEPTH_BOUNDS_TEST: case PIPE_CAP_TGSI_TXQS: case PIPE_CAP_FORCE_PERSAMPLE_INTERP: case PIPE_CAP_SHAREABLE_SHADERS: case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS: case PIPE_CAP_CLEAR_TEXTURE: case PIPE_CAP_DRAW_PARAMETERS: case PIPE_CAP_TGSI_PACK_HALF_FLOAT: case PIPE_CAP_MULTI_DRAW_INDIRECT: case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL: case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL: case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT: case PIPE_CAP_INVALIDATE_BUFFER: case PIPE_CAP_GENERATE_MIPMAP: case PIPE_CAP_STRING_MARKER: case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS: case PIPE_CAP_QUERY_BUFFER_OBJECT: case PIPE_CAP_QUERY_MEMORY_INFO: case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT: case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR: case PIPE_CAP_CULL_DISTANCE: case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES: case PIPE_CAP_TGSI_VOTE: case PIPE_CAP_MAX_WINDOW_RECTANGLES: case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED: case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS: case PIPE_CAP_MIXED_COLOR_DEPTH_BITS: case PIPE_CAP_TGSI_ARRAY_COMPONENTS: case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS: case PIPE_CAP_TGSI_CAN_READ_OUTPUTS: case PIPE_CAP_NATIVE_FENCE_FD: case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY: case PIPE_CAP_TGSI_FS_FBFETCH: case PIPE_CAP_TGSI_MUL_ZERO_WINS: case PIPE_CAP_DOUBLES: case PIPE_CAP_INT64: case PIPE_CAP_INT64_DIVMOD: return 0; /* Stream output. */ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: return 0; /* Geometry shader output, unsupported. */ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: case PIPE_CAP_MAX_VERTEX_STREAMS: return 0; case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE: return 128; /* Texturing. */ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: { int log2_max_tex_size = util_last_bit(screen->specs.max_texture_size); assert(log2_max_tex_size > 0); return log2_max_tex_size; } case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: /* 3D textures not supported - fake it */ return 5; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return 0; case PIPE_CAP_CUBE_MAP_ARRAY: return 0; case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MAX_TEXEL_OFFSET: return 7; case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: return 0; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: return 65536; /* Render targets. */ case PIPE_CAP_MAX_RENDER_TARGETS: return 1; /* Viewports and scissors. */ case PIPE_CAP_MAX_VIEWPORTS: return 1; /* Timer queries. */ case PIPE_CAP_QUERY_TIME_ELAPSED: case PIPE_CAP_OCCLUSION_QUERY: return 0; case PIPE_CAP_QUERY_TIMESTAMP: return 1; case PIPE_CAP_QUERY_PIPELINE_STATISTICS: return 0; /* Preferences */ case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: return 0; case PIPE_CAP_PCI_GROUP: case PIPE_CAP_PCI_BUS: case PIPE_CAP_PCI_DEVICE: case PIPE_CAP_PCI_FUNCTION: return 0; case PIPE_CAP_VENDOR_ID: case PIPE_CAP_DEVICE_ID: return 0xFFFFFFFF; case PIPE_CAP_ACCELERATED: return 1; case PIPE_CAP_VIDEO_MEMORY: return 0; case PIPE_CAP_UMA: return 1; } debug_printf("unknown param %d", param); return 0; }
static struct pipe_resource * etna_resource_from_handle(struct pipe_screen *pscreen, const struct pipe_resource *tmpl, struct winsys_handle *handle, unsigned usage) { struct etna_screen *screen = etna_screen(pscreen); struct etna_resource *rsc; struct etna_resource_level *level; struct pipe_resource *prsc; struct pipe_resource *ptiled = NULL; DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); rsc = CALLOC_STRUCT(etna_resource); if (!rsc) return NULL; level = &rsc->levels[0]; prsc = &rsc->base; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); list_inithead(&rsc->list); prsc->screen = pscreen; rsc->bo = etna_screen_bo_from_handle(pscreen, handle, &level->stride); if (!rsc->bo) goto fail; rsc->seqno = 1; rsc->layout = modifier_to_layout(handle->modifier); rsc->halign = TEXTURE_HALIGN_FOUR; level->width = tmpl->width0; level->height = tmpl->height0; /* Determine padding of the imported resource. */ unsigned paddingX = 0, paddingY = 0; etna_layout_multiple(rsc->layout, screen->specs.pixel_pipes, VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN), &paddingX, &paddingY, &rsc->halign); if (!screen->specs.use_blt) etna_adjust_rs_align(screen->specs.pixel_pipes, NULL, &paddingY); level->padded_width = align(level->width, paddingX); level->padded_height = align(level->height, paddingY); level->layer_stride = level->stride * util_format_get_nblocksy(prsc->format, level->padded_height); level->size = level->layer_stride; /* The DDX must give us a BO which conforms to our padding size. * The stride of the BO must be greater or equal to our padded * stride. The size of the BO must accomodate the padded height. */ if (level->stride < util_format_get_stride(tmpl->format, level->padded_width)) { BUG("BO stride %u is too small for RS engine width padding (%zu, format %s)", level->stride, util_format_get_stride(tmpl->format, level->padded_width), util_format_name(tmpl->format)); goto fail; } if (etna_bo_size(rsc->bo) < level->stride * level->padded_height) { BUG("BO size %u is too small for RS engine height padding (%u, format %s)", etna_bo_size(rsc->bo), level->stride * level->padded_height, util_format_name(tmpl->format)); goto fail; } if (rsc->layout == ETNA_LAYOUT_LINEAR) { /* * Both sampler and pixel pipes can't handle linear, create a compatible * base resource, where we can attach the imported buffer as an external * resource. */ struct pipe_resource tiled_templat = *tmpl; /* * Remove BIND_SCANOUT to avoid recursion, as etna_resource_create uses * this function to import the scanout buffer and get a tiled resource. */ tiled_templat.bind &= ~PIPE_BIND_SCANOUT; ptiled = etna_resource_create(pscreen, &tiled_templat); if (!ptiled) goto fail; etna_resource(ptiled)->external = prsc; return ptiled; } return prsc; fail: etna_resource_destroy(pscreen, prsc); if (ptiled) etna_resource_destroy(pscreen, ptiled); return NULL; }