static boolean etna_screen_is_format_supported(struct pipe_screen *pscreen, enum pipe_format format, enum pipe_texture_target target, unsigned sample_count, unsigned usage) { struct etna_screen *screen = etna_screen(pscreen); unsigned allowed = 0; if (target != PIPE_BUFFER && target != PIPE_TEXTURE_1D && target != PIPE_TEXTURE_2D && target != PIPE_TEXTURE_3D && target != PIPE_TEXTURE_CUBE && target != PIPE_TEXTURE_RECT) return FALSE; if (usage & PIPE_BIND_RENDER_TARGET) { /* If render target, must be RS-supported format that is not rb swapped. * Exposing rb swapped (or other swizzled) formats for rendering would * involve swizzing in the pixel shader. */ if (translate_rs_format(format) != ETNA_NO_MATCH && !translate_rs_format_rb_swap(format)) { /* Validate MSAA; number of samples must be allowed, and render target * must have MSAA'able format. */ if (sample_count > 1) { if (translate_samples_to_xyscale(sample_count, NULL, NULL, NULL) && translate_msaa_format(format) != ETNA_NO_MATCH) { allowed |= PIPE_BIND_RENDER_TARGET; } } else { allowed |= PIPE_BIND_RENDER_TARGET; } } } if (usage & PIPE_BIND_DEPTH_STENCIL) { if (translate_depth_format(format) != ETNA_NO_MATCH) allowed |= PIPE_BIND_DEPTH_STENCIL; } if (usage & PIPE_BIND_SAMPLER_VIEW) { uint32_t fmt = translate_texture_format(format); if (!gpu_supports_texure_format(screen, fmt)) fmt = ETNA_NO_MATCH; if (sample_count < 2 && fmt != ETNA_NO_MATCH) allowed |= PIPE_BIND_SAMPLER_VIEW; } if (usage & PIPE_BIND_VERTEX_BUFFER) { if (translate_vertex_format_type(format) != ETNA_NO_MATCH) allowed |= PIPE_BIND_VERTEX_BUFFER; } if (usage & PIPE_BIND_INDEX_BUFFER) { /* must be supported index format */ if (format == PIPE_FORMAT_I8_UINT || format == PIPE_FORMAT_I16_UINT || (format == PIPE_FORMAT_I32_UINT && VIV_FEATURE(screen, chipFeatures, 32_BIT_INDICES))) { allowed |= PIPE_BIND_INDEX_BUFFER; } } /* Always allowed */ allowed |= usage & (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED); if (usage != allowed) { DBG("not supported: format=%s, target=%d, sample_count=%d, " "usage=%x, allowed=%x", util_format_name(format), target, sample_count, usage, allowed); } return usage == allowed; }
/** Create vertex element states, which define a layout for fetching * vertices for rendering. */ static void * etna_vertex_elements_state_create(struct pipe_context *pctx, unsigned num_elements, const struct pipe_vertex_element *elements) { struct etna_context *ctx = etna_context(pctx); struct compiled_vertex_elements_state *cs = CALLOC_STRUCT(compiled_vertex_elements_state); if (!cs) return NULL; if (num_elements > ctx->specs.vertex_max_elements) { BUG("number of elements (%u) exceeds chip maximum (%u)", num_elements, ctx->specs.vertex_max_elements); return NULL; } /* XXX could minimize number of consecutive stretches here by sorting, and * permuting the inputs in shader or does Mesa do this already? */ /* Check that vertex element binding is compatible with hardware; thus * elements[idx].vertex_buffer_index are < stream_count. If not, the binding * uses more streams than is supported, and u_vbuf should have done some * reorganization for compatibility. */ /* TODO: does mesa this for us? */ bool incompatible = false; for (unsigned idx = 0; idx < num_elements; ++idx) { if (elements[idx].vertex_buffer_index >= ctx->specs.stream_count || elements[idx].instance_divisor > 0) incompatible = true; } cs->num_elements = num_elements; if (incompatible || num_elements == 0) { DBG("Error: zero vertex elements, or more vertex buffers used than supported"); FREE(cs); return NULL; } unsigned start_offset = 0; /* start of current consecutive stretch */ bool nonconsecutive = true; /* previous value of nonconsecutive */ for (unsigned idx = 0; idx < num_elements; ++idx) { unsigned element_size = util_format_get_blocksize(elements[idx].src_format); unsigned end_offset = elements[idx].src_offset + element_size; uint32_t format_type, normalize; if (nonconsecutive) start_offset = elements[idx].src_offset; /* maximum vertex size is 256 bytes */ assert(element_size != 0 && end_offset <= 256); /* check whether next element is consecutive to this one */ nonconsecutive = (idx == (num_elements - 1)) || elements[idx + 1].vertex_buffer_index != elements[idx].vertex_buffer_index || end_offset != elements[idx + 1].src_offset; format_type = translate_vertex_format_type(elements[idx].src_format); normalize = translate_vertex_format_normalize(elements[idx].src_format); assert(format_type != ETNA_NO_MATCH); assert(normalize != ETNA_NO_MATCH); if (ctx->specs.halti < 5) { cs->FE_VERTEX_ELEMENT_CONFIG[idx] = COND(nonconsecutive, VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE) | format_type | VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(util_format_get_nr_components(elements[idx].src_format)) | normalize | VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(ENDIAN_MODE_NO_SWAP) | VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(elements[idx].vertex_buffer_index) | VIVS_FE_VERTEX_ELEMENT_CONFIG_START(elements[idx].src_offset) | VIVS_FE_VERTEX_ELEMENT_CONFIG_END(end_offset - start_offset); } else { /* HALTI5 spread vertex attrib config over two registers */ cs->NFE_GENERIC_ATTRIB_CONFIG0[idx] = format_type | VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM(util_format_get_nr_components(elements[idx].src_format)) | normalize | VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN(ENDIAN_MODE_NO_SWAP) | VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM(elements[idx].vertex_buffer_index) | VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START(elements[idx].src_offset); cs->NFE_GENERIC_ATTRIB_CONFIG1[idx] = COND(nonconsecutive, VIVS_NFE_GENERIC_ATTRIB_CONFIG1_NONCONSECUTIVE) | VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END(end_offset - start_offset); } cs->NFE_GENERIC_ATTRIB_SCALE[idx] = 0x3f800000; /* 1 for integer, 1.0 for float */ } return cs; }
static boolean etna_screen_is_format_supported( struct pipe_screen *screen, enum pipe_format format, enum pipe_texture_target target, unsigned sample_count, unsigned usage) { struct etna_screen *priv = etna_screen(screen); unsigned allowed = 0; if (target >= PIPE_MAX_TEXTURE_TYPES) { return FALSE; } if (usage & PIPE_BIND_RENDER_TARGET) { /* if render target, must be RS-supported format */ if(translate_rt_format(format, true) != ETNA_NO_MATCH) { /* Validate MSAA; number of samples must be allowed, and render target must have * MSAA'able format. */ if(sample_count > 1) { if(translate_samples_to_xyscale(sample_count, NULL, NULL, NULL) && translate_msaa_format(format, true) != ETNA_NO_MATCH) { allowed |= PIPE_BIND_RENDER_TARGET; } } else { allowed |= PIPE_BIND_RENDER_TARGET; } } } if (usage & PIPE_BIND_DEPTH_STENCIL) { /* must be supported depth format */ if(translate_depth_format(format, true) != ETNA_NO_MATCH) { allowed |= PIPE_BIND_DEPTH_STENCIL; } } if (usage & PIPE_BIND_SAMPLER_VIEW) { /* must be supported texture format */ if(sample_count < 2 && translate_texture_format(format, true) != ETNA_NO_MATCH) { allowed |= PIPE_BIND_SAMPLER_VIEW; } } if (usage & PIPE_BIND_VERTEX_BUFFER) { /* must be supported vertex format */ if(translate_vertex_format_type(format, true) == ETNA_NO_MATCH) { allowed |= PIPE_BIND_VERTEX_BUFFER; } } if (usage & PIPE_BIND_INDEX_BUFFER) { /* must be supported index format */ if(format == PIPE_FORMAT_I8_UINT || format == PIPE_FORMAT_I16_UINT || (format == PIPE_FORMAT_I32_UINT && VIV_FEATURE(priv->dev, chipFeatures, 32_BIT_INDICES))) { allowed |= PIPE_BIND_INDEX_BUFFER; } } /* Always allowed */ allowed |= usage & (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED | PIPE_BIND_TRANSFER_READ | PIPE_BIND_TRANSFER_WRITE); return usage == allowed; }