static struct pipe_resource * etna_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); /* Figure out what tiling to use -- for now, assume that textures cannot be * supertiled, and cannot be linear. * There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) * that may allow this, as well * as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but * not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if (etna_resource_sampler_only(templat)) { /* The buffer is only used for texturing, so create something * directly compatible with the sampler. Such a buffer can * never be rendered to. */ layout = ETNA_LAYOUT_TILED; if (util_format_is_compressed(templat->format)) layout = ETNA_LAYOUT_LINEAR; } else if (templat->target != PIPE_BUFFER) { bool want_multitiled = screen->specs.pixel_pipes > 1; bool want_supertiled = screen->specs.can_supertile && !DBG_ENABLED(ETNA_DBG_NO_SUPERTILE); /* Keep single byte blocksized resources as tiled, since we * are unable to use the RS blit to de-tile them. However, * if they're used as a render target or depth/stencil, they * must be multi-tiled for GPUs with multiple pixel pipes. * Ignore depth/stencil here, but it is an error for a render * target. */ if (util_format_get_blocksize(templat->format) == 1 && !(templat->bind & PIPE_BIND_DEPTH_STENCIL)) { assert(!(templat->bind & PIPE_BIND_RENDER_TARGET && want_multitiled)); want_multitiled = want_supertiled = false; } layout = ETNA_LAYOUT_BIT_TILE; if (want_multitiled) layout |= ETNA_LAYOUT_BIT_MULTI; if (want_supertiled) layout |= ETNA_LAYOUT_BIT_SUPER; } if (templat->target == PIPE_TEXTURE_3D) layout = ETNA_LAYOUT_LINEAR; return etna_resource_alloc(pscreen, layout, templat); }
/* Allocate 2D texture or render target resource */ static struct pipe_resource * etna_screen_resource_create(struct pipe_screen *screen, const struct pipe_resource *templat) { struct etna_screen *priv = etna_screen(screen); assert(templat); /* Check input */ if(templat->target == PIPE_TEXTURE_CUBE) { assert(templat->array_size == 6); } else if (templat->target == PIPE_BUFFER) { assert(templat->format == PIPE_FORMAT_R8_UNORM); /* bytes; want TYPELESS or similar */ assert(templat->array_size == 1); assert(templat->height0 == 1); assert(templat->depth0 == 1); assert(templat->array_size == 1); assert(templat->last_level == 0); } else { assert(templat->array_size == 1); } assert(templat->width0 != 0); assert(templat->height0 != 0); assert(templat->depth0 != 0); assert(templat->array_size != 0); /* Figure out what tiling to use -- for now, assume that textures cannot be supertiled, and cannot be linear. * There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) that may allow this, as well * as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if(templat->target != PIPE_BUFFER) { if(!(templat->bind & PIPE_BIND_SAMPLER_VIEW) && priv->specs.can_supertile && !DBG_ENABLED(ETNA_DBG_NO_SUPERTILE)) layout = ETNA_LAYOUT_SUPER_TILED; else layout = ETNA_LAYOUT_TILED; } /* XXX multi tiled formats */ /* Determine scaling for antialiasing, allow override using debug flag */ int nr_samples = templat->nr_samples; if((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) && !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) { if(DBG_ENABLED(ETNA_DBG_MSAA_2X)) nr_samples = 2; if(DBG_ENABLED(ETNA_DBG_MSAA_4X)) nr_samples = 4; } int msaa_xscale = 1, msaa_yscale = 1; if(!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) { /* Number of samples not supported */ return NULL; } /* Determine needed padding (alignment of height/width) */ unsigned paddingX = 0, paddingY = 0; unsigned halign = TEXTURE_HALIGN_FOUR; etna_layout_multiple(layout, priv->dev->chip.pixel_pipes, (templat->bind & PIPE_BIND_SAMPLER_VIEW) && !VIV_FEATURE(priv->dev, chipMinorFeatures1, TEXTURE_HALIGN), &paddingX, &paddingY, &halign); assert(paddingX && paddingY); /* compute mipmap level sizes and offsets */ struct etna_resource *resource = CALLOC_STRUCT(etna_resource); int max_mip_level = templat->last_level; if(unlikely(max_mip_level >= ETNA_NUM_LOD)) /* max LOD supported by hw */ max_mip_level = ETNA_NUM_LOD - 1; unsigned ix = 0; unsigned x = templat->width0, y = templat->height0; unsigned offset = 0; while(true) { struct etna_resource_level *mip = &resource->levels[ix]; mip->width = x; mip->height = y; mip->padded_width = align(x * msaa_xscale, paddingX); mip->padded_height = align(y * msaa_yscale, paddingY); mip->stride = util_format_get_stride(templat->format, mip->padded_width); mip->offset = offset; mip->layer_stride = mip->stride * util_format_get_nblocksy(templat->format, mip->padded_height); mip->size = templat->array_size * mip->layer_stride; offset += align(mip->size, ETNA_PE_ALIGNMENT); /* align mipmaps to 64 bytes to be able to render to them */ if(ix == max_mip_level || (x == 1 && y == 1)) break; // stop at last level x = u_minify(x, 1); y = u_minify(y, 1); ix += 1; } /* determine memory type */ uint32_t flags = 0; /* XXX DRM_ETNA_GEM_CACHE_xxx */ enum viv_surf_type memtype = VIV_SURF_UNKNOWN; if(templat->bind & PIPE_BIND_SAMPLER_VIEW) flags |= DRM_ETNA_GEM_TYPE_TEX; else if(templat->bind & PIPE_BIND_RENDER_TARGET) flags |= DRM_ETNA_GEM_TYPE_RT; else if(templat->bind & PIPE_BIND_DEPTH_STENCIL) flags |= DRM_ETNA_GEM_TYPE_ZS; else if(templat->bind & PIPE_BIND_INDEX_BUFFER) flags |= DRM_ETNA_GEM_TYPE_IDX; else if(templat->bind & PIPE_BIND_VERTEX_BUFFER) flags |= DRM_ETNA_GEM_TYPE_VTX; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocate surface of %ix%i (padded to %ix%i), %i layers, of format %s, size %08x flags %08x, memtype %i", resource, templat->width0, templat->height0, resource->levels[0].padded_width, resource->levels[0].padded_height, templat->array_size, util_format_name(templat->format), offset, templat->bind, memtype); struct etna_bo *bo = 0; if(unlikely((bo = etna_bo_new(priv->dev, offset, flags)) == NULL)) { BUG("Problem allocating video memory for resource"); return NULL; } resource->base = *templat; resource->base.last_level = ix; /* real last mipmap level */ resource->base.screen = screen; resource->base.nr_samples = nr_samples; resource->layout = layout; resource->halign = halign; resource->bo = bo; resource->ts_bo = 0; /* TS is only created when first bound to surface */ pipe_reference_init(&resource->base.reference, 1); if(DBG_ENABLED(ETNA_DBG_ZERO)) { void *map = etna_bo_map(bo); memset(map, 0, offset); } return &resource->base; }
/* Create a new resource object, using the given template info */ struct pipe_resource * etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout, uint64_t modifier, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); struct etna_resource *rsc; unsigned size; DBG_F(ETNA_DBG_RESOURCE_MSGS, "target=%d, format=%s, %ux%ux%u, array_size=%u, " "last_level=%u, nr_samples=%u, usage=%u, bind=%x, flags=%x", templat->target, util_format_name(templat->format), templat->width0, templat->height0, templat->depth0, templat->array_size, templat->last_level, templat->nr_samples, templat->usage, templat->bind, templat->flags); /* Determine scaling for antialiasing, allow override using debug flag */ int nr_samples = templat->nr_samples; if ((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) && !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) { if (DBG_ENABLED(ETNA_DBG_MSAA_2X)) nr_samples = 2; if (DBG_ENABLED(ETNA_DBG_MSAA_4X)) nr_samples = 4; } int msaa_xscale = 1, msaa_yscale = 1; if (!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) { /* Number of samples not supported */ return NULL; } /* Determine needed padding (alignment of height/width) */ unsigned paddingX = 0, paddingY = 0; unsigned halign = TEXTURE_HALIGN_FOUR; if (!util_format_is_compressed(templat->format)) { /* If we have the TEXTURE_HALIGN feature, we can always align to the * resolve engine's width. If not, we must not align resources used * only for textures. If this GPU uses the BLT engine, never do RS align. */ bool rs_align = screen->specs.use_blt ? false : ( VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN) || !etna_resource_sampler_only(templat)); etna_layout_multiple(layout, screen->specs.pixel_pipes, rs_align, &paddingX, &paddingY, &halign); assert(paddingX && paddingY); } else { /* Compressed textures are padded to their block size, but we don't have * to do anything special for that. */ paddingX = 1; paddingY = 1; } if (!screen->specs.use_blt && templat->target != PIPE_BUFFER) etna_adjust_rs_align(screen->specs.pixel_pipes, NULL, &paddingY); if (templat->bind & PIPE_BIND_SCANOUT) { struct pipe_resource scanout_templat = *templat; struct renderonly_scanout *scanout; struct winsys_handle handle; /* pad scanout buffer size to be compatible with the RS */ if (!screen->specs.use_blt && modifier == DRM_FORMAT_MOD_LINEAR) etna_adjust_rs_align(screen->specs.pixel_pipes, &paddingX, &paddingY); scanout_templat.width0 = align(scanout_templat.width0, paddingX); scanout_templat.height0 = align(scanout_templat.height0, paddingY); scanout = renderonly_scanout_for_resource(&scanout_templat, screen->ro, &handle); if (!scanout) return NULL; assert(handle.type == WINSYS_HANDLE_TYPE_FD); handle.modifier = modifier; rsc = etna_resource(pscreen->resource_from_handle(pscreen, templat, &handle, PIPE_HANDLE_USAGE_WRITE)); close(handle.handle); if (!rsc) return NULL; rsc->scanout = scanout; return &rsc->base; } rsc = CALLOC_STRUCT(etna_resource); if (!rsc) return NULL; rsc->base = *templat; rsc->base.screen = pscreen; rsc->base.nr_samples = nr_samples; rsc->layout = layout; rsc->halign = halign; pipe_reference_init(&rsc->base.reference, 1); list_inithead(&rsc->list); size = setup_miptree(rsc, paddingX, paddingY, msaa_xscale, msaa_yscale); uint32_t flags = DRM_ETNA_GEM_CACHE_WC; if (templat->bind & PIPE_BIND_VERTEX_BUFFER) flags |= DRM_ETNA_GEM_FORCE_MMU; struct etna_bo *bo = etna_bo_new(screen->dev, size, flags); if (unlikely(bo == NULL)) { BUG("Problem allocating video memory for resource"); goto free_rsc; } rsc->bo = bo; rsc->ts_bo = 0; /* TS is only created when first bound to surface */ if (DBG_ENABLED(ETNA_DBG_ZERO)) { void *map = etna_bo_map(bo); memset(map, 0, size); } return &rsc->base; free_rsc: FREE(rsc); return NULL; }
/* Create a new resource object, using the given template info */ struct pipe_resource * etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout, const struct pipe_resource *templat) { struct etna_screen *screen = etna_screen(pscreen); unsigned size; DBG_F(ETNA_DBG_RESOURCE_MSGS, "target=%d, format=%s, %ux%ux%u, array_size=%u, " "last_level=%u, nr_samples=%u, usage=%u, bind=%x, flags=%x", templat->target, util_format_name(templat->format), templat->width0, templat->height0, templat->depth0, templat->array_size, templat->last_level, templat->nr_samples, templat->usage, templat->bind, templat->flags); /* Determine scaling for antialiasing, allow override using debug flag */ int nr_samples = templat->nr_samples; if ((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) && !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) { if (DBG_ENABLED(ETNA_DBG_MSAA_2X)) nr_samples = 2; if (DBG_ENABLED(ETNA_DBG_MSAA_4X)) nr_samples = 4; } int msaa_xscale = 1, msaa_yscale = 1; if (!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) { /* Number of samples not supported */ return NULL; } /* If we have the TEXTURE_HALIGN feature, we can always align to the * resolve engine's width. If not, we must not align resources used * only for textures. */ bool rs_align = VIV_FEATURE(screen, chipMinorFeatures1, TEXTURE_HALIGN) || !etna_resource_sampler_only(templat); /* Determine needed padding (alignment of height/width) */ unsigned paddingX = 0, paddingY = 0; unsigned halign = TEXTURE_HALIGN_FOUR; etna_layout_multiple(layout, screen->specs.pixel_pipes, rs_align, &paddingX, &paddingY, &halign); assert(paddingX && paddingY); if (templat->bind != PIPE_BUFFER) { unsigned min_paddingY = 4 * screen->specs.pixel_pipes; if (paddingY < min_paddingY) paddingY = min_paddingY; } struct etna_resource *rsc = CALLOC_STRUCT(etna_resource); if (!rsc) return NULL; rsc->base = *templat; rsc->base.screen = pscreen; rsc->base.nr_samples = nr_samples; rsc->layout = layout; rsc->halign = halign; pipe_reference_init(&rsc->base.reference, 1); list_inithead(&rsc->list); size = setup_miptree(rsc, paddingX, paddingY, msaa_xscale, msaa_yscale); uint32_t flags = DRM_ETNA_GEM_CACHE_WC; if (templat->bind & PIPE_BIND_VERTEX_BUFFER) flags |= DRM_ETNA_GEM_FORCE_MMU; struct etna_bo *bo = etna_bo_new(screen->dev, size, flags); if (unlikely(bo == NULL)) { BUG("Problem allocating video memory for resource"); return NULL; } rsc->bo = bo; rsc->ts_bo = 0; /* TS is only created when first bound to surface */ if (templat->bind & PIPE_BIND_SCANOUT) rsc->scanout = renderonly_scanout_for_resource(&rsc->base, screen->ro); if (DBG_ENABLED(ETNA_DBG_ZERO)) { void *map = etna_bo_map(bo); memset(map, 0, size); } return &rsc->base; }
struct pipe_screen * etna_screen_create(struct etna_device *dev, struct etna_gpu *gpu, struct renderonly *ro) { struct etna_screen *screen = CALLOC_STRUCT(etna_screen); struct pipe_screen *pscreen; drmVersionPtr version; uint64_t val; if (!screen) return NULL; pscreen = &screen->base; screen->dev = dev; screen->gpu = gpu; screen->ro = renderonly_dup(ro); screen->refcnt = 1; if (!screen->ro) { DBG("could not create renderonly object"); goto fail; } version = drmGetVersion(screen->ro->gpu_fd); screen->drm_version = ETNA_DRM_VERSION(version->version_major, version->version_minor); drmFreeVersion(version); etna_mesa_debug = debug_get_option_etna_mesa_debug(); /* Disable autodisable for correct rendering with TS */ etna_mesa_debug |= ETNA_DBG_NO_AUTODISABLE; screen->pipe = etna_pipe_new(gpu, ETNA_PIPE_3D); if (!screen->pipe) { DBG("could not create 3d pipe"); goto fail; } if (etna_gpu_get_param(screen->gpu, ETNA_GPU_MODEL, &val)) { DBG("could not get ETNA_GPU_MODEL"); goto fail; } screen->model = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_REVISION, &val)) { DBG("could not get ETNA_GPU_REVISION"); goto fail; } screen->revision = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_0, &val)) { DBG("could not get ETNA_GPU_FEATURES_0"); goto fail; } screen->features[0] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_1, &val)) { DBG("could not get ETNA_GPU_FEATURES_1"); goto fail; } screen->features[1] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_2, &val)) { DBG("could not get ETNA_GPU_FEATURES_2"); goto fail; } screen->features[2] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_3, &val)) { DBG("could not get ETNA_GPU_FEATURES_3"); goto fail; } screen->features[3] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_4, &val)) { DBG("could not get ETNA_GPU_FEATURES_4"); goto fail; } screen->features[4] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_5, &val)) { DBG("could not get ETNA_GPU_FEATURES_5"); goto fail; } screen->features[5] = val; if (etna_gpu_get_param(screen->gpu, ETNA_GPU_FEATURES_6, &val)) { DBG("could not get ETNA_GPU_FEATURES_6"); goto fail; } screen->features[6] = val; if (!etna_get_specs(screen)) goto fail; /* apply debug options that disable individual features */ if (DBG_ENABLED(ETNA_DBG_NO_EARLY_Z)) screen->features[viv_chipFeatures] |= chipFeatures_NO_EARLY_Z; if (DBG_ENABLED(ETNA_DBG_NO_TS)) screen->features[viv_chipFeatures] &= ~chipFeatures_FAST_CLEAR; if (DBG_ENABLED(ETNA_DBG_NO_AUTODISABLE)) screen->features[viv_chipMinorFeatures1] &= ~chipMinorFeatures1_AUTO_DISABLE; if (DBG_ENABLED(ETNA_DBG_NO_SUPERTILE)) screen->specs.can_supertile = 0; if (DBG_ENABLED(ETNA_DBG_NO_SINGLEBUF)) screen->specs.single_buffer = 0; pscreen->destroy = etna_screen_destroy; pscreen->get_param = etna_screen_get_param; pscreen->get_paramf = etna_screen_get_paramf; pscreen->get_shader_param = etna_screen_get_shader_param; pscreen->get_name = etna_screen_get_name; pscreen->get_vendor = etna_screen_get_vendor; pscreen->get_device_vendor = etna_screen_get_device_vendor; pscreen->get_timestamp = etna_screen_get_timestamp; pscreen->context_create = etna_context_create; pscreen->is_format_supported = etna_screen_is_format_supported; pscreen->query_dmabuf_modifiers = etna_screen_query_dmabuf_modifiers; etna_fence_screen_init(pscreen); etna_query_screen_init(pscreen); etna_resource_screen_init(pscreen); util_dynarray_init(&screen->supported_pm_queries, NULL); slab_create_parent(&screen->transfer_pool, sizeof(struct etna_transfer), 16); if (screen->drm_version >= ETNA_DRM_VERSION_PERFMON) etna_pm_query_setup(screen); return pscreen; fail: etna_screen_destroy(pscreen); return NULL; }
/* Allocate 2D texture or render target resource */ static struct pipe_resource * etna_screen_resource_create(struct pipe_screen *screen, const struct pipe_resource *templat) { struct etna_screen *priv = etna_screen(screen); assert(templat); unsigned element_size = util_format_get_blocksize(templat->format); if(!element_size) return NULL; /* Check input */ if(templat->target == PIPE_TEXTURE_CUBE) { assert(templat->array_size == 6); } else if (templat->target == PIPE_BUFFER) { assert(templat->format == PIPE_FORMAT_R8_UNORM); /* bytes; want TYPELESS or similar */ assert(templat->array_size == 1); assert(templat->height0 == 1); assert(templat->depth0 == 1); assert(templat->array_size == 1); assert(templat->last_level == 0); } else { assert(templat->array_size == 1); } assert(templat->width0 != 0); assert(templat->height0 != 0); assert(templat->depth0 != 0); assert(templat->array_size != 0); /* Figure out what tiling to use -- for now, assume that textures cannot be supertiled, and cannot be linear. * There is a feature flag SUPERTILED_TEXTURE (not supported on any known hw) that may allow this, as well * as LINEAR_TEXTURE_SUPPORT (supported on gc880 and gc2000 at least), but not sure how it works. * Buffers always have LINEAR layout. */ unsigned layout = ETNA_LAYOUT_LINEAR; if(templat->target != PIPE_BUFFER) { if(!(templat->bind & PIPE_BIND_SAMPLER_VIEW) && priv->specs.can_supertile && !DBG_ENABLED(ETNA_DBG_NO_SUPERTILE)) layout = ETNA_LAYOUT_SUPER_TILED; else layout = ETNA_LAYOUT_TILED; } /* XXX multi tiled formats */ /* Determine scaling for antialiasing, allow override using debug flag */ int nr_samples = templat->nr_samples; if((templat->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) && !(templat->bind & PIPE_BIND_SAMPLER_VIEW)) { if(DBG_ENABLED(ETNA_DBG_MSAA_2X)) nr_samples = 2; if(DBG_ENABLED(ETNA_DBG_MSAA_4X)) nr_samples = 4; } int msaa_xscale = 1, msaa_yscale = 1; if(!translate_samples_to_xyscale(nr_samples, &msaa_xscale, &msaa_yscale, NULL)) { /* Number of samples not supported */ assert(0); } /* Determine needed padding (alignment of height/width) */ unsigned paddingX = 0, paddingY = 0; unsigned halign = TEXTURE_HALIGN_FOUR; etna_layout_multiple(layout, priv->dev->chip.pixel_pipes, (templat->bind & PIPE_BIND_SAMPLER_VIEW) && !VIV_FEATURE(priv->dev, chipMinorFeatures1, TEXTURE_HALIGN), &paddingX, &paddingY, &halign); assert(paddingX && paddingY); /* determine mipmap levels */ struct etna_resource *resource = CALLOC_STRUCT(etna_resource); int max_mip_level = templat->last_level; if(unlikely(max_mip_level >= ETNA_NUM_LOD)) /* max LOD supported by hw */ max_mip_level = ETNA_NUM_LOD - 1; /* take care about DXTx formats, which have a divSize of non-1x1 * also: lower mipmaps are still 4x4 due to tiling. In as sense, compressed formats are already tiled. * XXX UYVY formats? */ unsigned divSizeX = util_format_get_blockwidth(templat->format); unsigned divSizeY = util_format_get_blockheight(templat->format); unsigned ix = 0; unsigned x = templat->width0, y = templat->height0; unsigned offset = 0; while(true) { struct etna_resource_level *mip = &resource->levels[ix]; mip->width = x; mip->height = y; mip->padded_width = align(x * msaa_xscale, paddingX); mip->padded_height = align(y * msaa_yscale, paddingY); mip->stride = align(mip->padded_width, divSizeX)/divSizeX * element_size; mip->offset = offset; mip->layer_stride = align(mip->padded_width, divSizeX)/divSizeX * align(mip->padded_height, divSizeY)/divSizeY * element_size; mip->size = templat->array_size * mip->layer_stride; offset += align(mip->size, ETNA_PE_ALIGNMENT); /* align mipmaps to 64 bytes to be able to render to them */ if(ix == max_mip_level || (x == 1 && y == 1)) break; // stop at last level x = MAX2(x >> 1, 1); y = MAX2(y >> 1, 1); ix += 1; } /* Determine memory size, and whether to create a tile status */ size_t rt_size = offset; /* determine memory type */ enum viv_surf_type memtype = VIV_SURF_UNKNOWN; if(templat->bind & PIPE_BIND_SAMPLER_VIEW) memtype = VIV_SURF_TEXTURE; else if(templat->bind & PIPE_BIND_RENDER_TARGET) memtype = VIV_SURF_RENDER_TARGET; else if(templat->bind & PIPE_BIND_DEPTH_STENCIL) memtype = VIV_SURF_DEPTH; else if(templat->bind & PIPE_BIND_INDEX_BUFFER) memtype = VIV_SURF_INDEX; else if(templat->bind & PIPE_BIND_VERTEX_BUFFER) memtype = VIV_SURF_VERTEX; DBG_F(ETNA_DBG_RESOURCE_MSGS, "%p: Allocate surface of %ix%i (padded to %ix%i) of format %s (%i bpe %ix%i), size %08zx flags %08x, memtype %i", resource, templat->width0, templat->height0, resource->levels[0].padded_width, resource->levels[0].padded_height, util_format_name(templat->format), element_size, divSizeX, divSizeY, rt_size, templat->bind, memtype); struct etna_vidmem *rt = 0; if(unlikely(etna_vidmem_alloc_linear(priv->dev, &rt, rt_size, memtype, VIV_POOL_DEFAULT, true) != ETNA_OK)) { BUG("Problem allocating video memory for resource"); return NULL; } resource->base = *templat; resource->base.last_level = ix; /* real last mipmap level */ resource->base.screen = screen; resource->base.nr_samples = nr_samples; resource->layout = layout; resource->halign = halign; resource->surface = rt; resource->ts = 0; /* TS is only created when first bound to surface */ pipe_reference_init(&resource->base.reference, 1); if(DBG_ENABLED(ETNA_DBG_ZERO)) { memset(resource->surface->logical, 0, rt_size); } for(unsigned ix=0; ix<=resource->base.last_level; ++ix) { struct etna_resource_level *mip = &resource->levels[ix]; mip->address = resource->surface->address + mip->offset; mip->logical = resource->surface->logical + mip->offset; DBG_F(ETNA_DBG_RESOURCE_MSGS, " %08x level %i: %ix%i (%i) stride=%i layer_stride=%i", (int)mip->address, ix, (int)mip->width, (int)mip->height, (int)mip->size, (int)mip->stride, (int)mip->layer_stride); } return &resource->base; }
/* Link vs and fs together: fill in shader_state from vs and fs * as this function is called every time a new fs or vs is bound, the goal is to do * little processing as possible here, and to precompute as much as possible in the * vs/fs shader_object. * XXX we could cache the link result for a certain set of VS/PS; usually a pair * of VS and PS will be used together anyway. */ void etna_link_shaders(struct pipe_context *pipe, struct compiled_shader_state *cs, const struct etna_shader_object *vs, const struct etna_shader_object *fs) { assert(vs->processor == TGSI_PROCESSOR_VERTEX); assert(fs->processor == TGSI_PROCESSOR_FRAGMENT); #ifdef DEBUG if(DBG_ENABLED(ETNA_DBG_DUMP_SHADERS)) { etna_dump_shader_object(vs); etna_dump_shader_object(fs); } #endif /* set last_varying_2x flag if the last varying has 1 or 2 components */ bool last_varying_2x = false; if(fs->num_inputs>0 && fs->inputs[fs->num_inputs-1].num_components <= 2) last_varying_2x = true; cs->RA_CONTROL = VIVS_RA_CONTROL_UNK0 | (last_varying_2x ? VIVS_RA_CONTROL_LAST_VARYING_2X : 0); cs->PA_ATTRIBUTE_ELEMENT_COUNT = VIVS_PA_ATTRIBUTE_ELEMENT_COUNT_COUNT(fs->num_inputs); for(int idx=0; idx<fs->num_inputs; ++idx) cs->PA_SHADER_ATTRIBUTES[idx] = fs->inputs[idx].pa_attributes; cs->VS_END_PC = vs->code_size / 4; cs->VS_OUTPUT_COUNT = fs->num_inputs + 1; /* position + varyings */ /* Number of vertex elements determines number of VS inputs. Otherwise, the GPU crashes */ cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8); cs->VS_TEMP_REGISTER_CONTROL = VIVS_VS_TEMP_REGISTER_CONTROL_NUM_TEMPS(vs->num_temps); /* link vs outputs to fs inputs */ struct etna_shader_link_info link = {}; if(etna_link_shader_objects(&link, vs, fs)) { assert(0); /* linking failed: some fs inputs do not have corresponding vs outputs */ } DBG_F(ETNA_DBG_LINKER_MSGS, "link result:"); for(int idx=0; idx<fs->num_inputs; ++idx) { DBG_F(ETNA_DBG_LINKER_MSGS," %i -> %i", link.varyings_vs_reg[idx], idx+1); } /* vs outputs (varyings) */ uint32_t vs_output[16] = {0}; int varid = 0; vs_output[varid++] = vs->vs_pos_out_reg; for(int idx=0; idx<fs->num_inputs; ++idx) vs_output[varid++] = link.varyings_vs_reg[idx]; if(vs->vs_pointsize_out_reg >= 0) vs_output[varid++] = vs->vs_pointsize_out_reg; /* pointsize is last */ for(int idx=0; idx<4; ++idx) { cs->VS_OUTPUT[idx] =(vs_output[idx*4+0] << 0) | (vs_output[idx*4+1] << 8) | (vs_output[idx*4+2] << 16) | (vs_output[idx*4+3] << 24); } if(vs->vs_pointsize_out_reg != -1) { /* vertex shader outputs point coordinate, provide extra output and make sure PA config is * not masked */ cs->PA_CONFIG = ~0; cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT + 1; } else { /* vertex shader does not output point coordinate, make sure thate POINT_SIZE_ENABLE is masked * and no extra output is given */ cs->PA_CONFIG = ~VIVS_PA_CONFIG_POINT_SIZE_ENABLE; cs->VS_OUTPUT_COUNT_PSIZE = cs->VS_OUTPUT_COUNT; } /* vs inputs (attributes) */ uint32_t vs_input[4] = {0}; for(int idx=0; idx<vs->num_inputs; ++idx) vs_input[idx/4] |= vs->inputs[idx].reg << ((idx%4)*8); for(int idx=0; idx<4; ++idx) cs->VS_INPUT[idx] = vs_input[idx]; cs->VS_LOAD_BALANCING = vs->vs_load_balancing; cs->VS_START_PC = 0; cs->PS_END_PC = fs->code_size / 4; cs->PS_OUTPUT_REG = fs->ps_color_out_reg; cs->PS_INPUT_COUNT = VIVS_PS_INPUT_COUNT_COUNT(fs->num_inputs + 1) | /* Number of inputs plus position */ VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8); cs->PS_TEMP_REGISTER_CONTROL = VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps, fs->num_inputs + 1)); cs->PS_CONTROL = VIVS_PS_CONTROL_UNK1; /* XXX when can we set BYPASS? */ cs->PS_START_PC = 0; /* Precompute PS_INPUT_COUNT and TEMP_REGISTER_CONTROL in the case of MSAA mode, avoids * some fumbling in sync_context. */ cs->PS_INPUT_COUNT_MSAA = VIVS_PS_INPUT_COUNT_COUNT(fs->num_inputs + 2) | /* MSAA adds another input */ VIVS_PS_INPUT_COUNT_UNK8(fs->input_count_unk8); cs->PS_TEMP_REGISTER_CONTROL_MSAA = VIVS_PS_TEMP_REGISTER_CONTROL_NUM_TEMPS(MAX2(fs->num_temps, fs->num_inputs + 2)); uint32_t total_components = 0; uint32_t num_components = 0; uint32_t component_use[2] = {0}; for(int idx=0; idx<fs->num_inputs; ++idx) { num_components |= fs->inputs[idx].num_components << ((idx%8)*4); for(int comp=0; comp<fs->inputs[idx].num_components; ++comp) { unsigned use = VARYING_COMPONENT_USE_USED; if(fs->inputs[idx].semantic.Name == TGSI_SEMANTIC_PCOORD) { if(comp == 0) use = VARYING_COMPONENT_USE_POINTCOORD_X; else if(comp == 1) use = VARYING_COMPONENT_USE_POINTCOORD_Y; } /* 16 components per uint32 */ component_use[total_components/16] |= use << ((total_components%16)*2); total_components += 1; } } cs->GL_VARYING_TOTAL_COMPONENTS = VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(align(total_components, 2)); cs->GL_VARYING_NUM_COMPONENTS = num_components; cs->GL_VARYING_COMPONENT_USE[0] = component_use[0]; cs->GL_VARYING_COMPONENT_USE[1] = component_use[1]; /* reference instruction memory */ cs->vs_inst_mem_size = vs->code_size; cs->VS_INST_MEM = vs->code; cs->ps_inst_mem_size = fs->code_size; cs->PS_INST_MEM = fs->code; /* uniforms layout -- first constants, then immediates */ cs->vs_uniforms_size = vs->const_size + vs->imm_size; memcpy(&cs->VS_UNIFORMS[vs->imm_base], vs->imm_data, vs->imm_size*4); cs->ps_uniforms_size = fs->const_size + fs->imm_size; memcpy(&cs->PS_UNIFORMS[fs->imm_base], fs->imm_data, fs->imm_size*4); /* fetch any previous uniforms from buffer */ etna_fetch_uniforms(pipe, PIPE_SHADER_VERTEX); etna_fetch_uniforms(pipe, PIPE_SHADER_FRAGMENT); }