static enum pipe_error u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key, unsigned vb_mask, unsigned out_vb, int start_vertex, unsigned num_vertices, int start_index, unsigned num_indices, int min_index, boolean unroll_indices) { struct translate *tr; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_resource *out_buffer = NULL; uint8_t *out_map; unsigned out_offset, mask; /* Get a translate object. */ tr = translate_cache_find(mgr->translate_cache, key); /* Map buffers we want to translate. */ mask = vb_mask; while (mask) { struct pipe_vertex_buffer *vb; unsigned offset; uint8_t *map; unsigned i = u_bit_scan(&mask); vb = &mgr->vertex_buffer[i]; offset = vb->buffer_offset + vb->stride * start_vertex; if (vb->user_buffer) { map = (uint8_t*)vb->user_buffer + offset; } else { unsigned size = vb->stride ? num_vertices * vb->stride : sizeof(double)*4; if (offset+size > vb->buffer->width0) { size = vb->buffer->width0 - offset; } map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size, PIPE_TRANSFER_READ, &vb_transfer[i]); } /* Subtract min_index so that indexing with the index buffer works. */ if (unroll_indices) { map -= (ptrdiff_t)vb->stride * min_index; } tr->set_buffer(tr, i, map, vb->stride, ~0); } /* Translate. */ if (unroll_indices) { struct pipe_index_buffer *ib = &mgr->index_buffer; struct pipe_transfer *transfer = NULL; unsigned offset = ib->offset + start_index * ib->index_size; uint8_t *map; assert((ib->buffer || ib->user_buffer) && ib->index_size); /* Create and map the output buffer. */ u_upload_alloc(mgr->pipe->stream_uploader, 0, key->output_stride * num_indices, 4, &out_offset, &out_buffer, (void**)&out_map); if (!out_buffer) return PIPE_ERROR_OUT_OF_MEMORY; if (ib->user_buffer) { map = (uint8_t*)ib->user_buffer + offset; } else { map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset, num_indices * ib->index_size, PIPE_TRANSFER_READ, &transfer); } switch (ib->index_size) { case 4: tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map); break; case 2: tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map); break; case 1: tr->run_elts8(tr, map, num_indices, 0, 0, out_map); break; } if (transfer) { pipe_buffer_unmap(mgr->pipe, transfer); } } else { /* Create and map the output buffer. */ u_upload_alloc(mgr->pipe->stream_uploader, key->output_stride * start_vertex, key->output_stride * num_vertices, 4, &out_offset, &out_buffer, (void**)&out_map); if (!out_buffer) return PIPE_ERROR_OUT_OF_MEMORY; out_offset -= key->output_stride * start_vertex; tr->run(tr, 0, num_vertices, 0, 0, out_map); } /* Unmap all buffers. */ mask = vb_mask; while (mask) { unsigned i = u_bit_scan(&mask); if (vb_transfer[i]) { pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); } } /* Setup the new vertex buffer. */ mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; mgr->real_vertex_buffer[out_vb].stride = key->output_stride; /* Move the buffer reference. */ pipe_resource_reference( &mgr->real_vertex_buffer[out_vb].buffer, NULL); mgr->real_vertex_buffer[out_vb].buffer = out_buffer; return PIPE_OK; }
/* Perform the fetch from API vertex elements & vertex buffers, to a * contiguous set of float[4] attributes as required for the * vertex_shader->run_linear() method. * * This is used in all cases except pure passthrough * (draw_pt_fetch_emit.c) which has its own version to translate * directly to hw vertices. * */ void draw_pt_fetch_prepare( struct pt_fetch *fetch, unsigned vs_input_count, unsigned vertex_size ) { struct draw_context *draw = fetch->draw; unsigned nr_inputs; unsigned i, nr = 0; unsigned dst_offset = 0; struct translate_key key; fetch->vertex_size = vertex_size; /* Always emit/leave space for a vertex header. * * It's worth considering whether the vertex headers should contain * a pointer to the 'data', rather than having it inline. * Something to look at after we've fully switched over to the pt * paths. */ { /* Need to set header->vertex_id = 0xffff somehow. */ key.element[nr].input_format = PIPE_FORMAT_R32_FLOAT; key.element[nr].input_buffer = draw->pt.nr_vertex_buffers; key.element[nr].input_offset = 0; key.element[nr].output_format = PIPE_FORMAT_R32_FLOAT; key.element[nr].output_offset = dst_offset; dst_offset += 1 * sizeof(float); nr++; /* Just leave the clip[] array untouched. */ dst_offset += 4 * sizeof(float); } assert( draw->pt.nr_vertex_elements >= vs_input_count ); nr_inputs = MIN2( vs_input_count, draw->pt.nr_vertex_elements ); for (i = 0; i < nr_inputs; i++) { key.element[nr].input_format = draw->pt.vertex_element[i].src_format; key.element[nr].input_buffer = draw->pt.vertex_element[i].vertex_buffer_index; key.element[nr].input_offset = draw->pt.vertex_element[i].src_offset; key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; key.element[nr].output_offset = dst_offset; dst_offset += 4 * sizeof(float); nr++; } assert(dst_offset <= vertex_size); key.nr_elements = nr; key.output_stride = vertex_size; if (!fetch->translate || translate_key_compare(&fetch->translate->key, &key) != 0) { translate_key_sanitize(&key); fetch->translate = translate_cache_find(fetch->cache, &key); { static struct vertex_header vh = { 0, 1, 0, UNDEFINED_VERTEX_ID, { .0f, .0f, .0f, .0f } }; fetch->translate->set_buffer(fetch->translate, draw->pt.nr_vertex_buffers, &vh, 0); } } fetch->need_edgeflags = ((draw->rasterizer->fill_cw != PIPE_POLYGON_MODE_FILL || draw->rasterizer->fill_ccw != PIPE_POLYGON_MODE_FILL) && draw->pt.user.edgeflag); }
static void u_vbuf_translate_begin(struct u_vbuf_priv *mgr, int min_index, int max_index) { struct translate_key key; struct translate_element *te; unsigned tr_elem_index[PIPE_MAX_ATTRIBS]; struct translate *tr; boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; uint8_t *out_map; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_resource *out_buffer = NULL; unsigned i, num_verts, out_offset; boolean upload_flushed = FALSE; memset(&key, 0, sizeof(key)); memset(tr_elem_index, 0xff, sizeof(tr_elem_index)); /* Get a new vertex buffer slot. */ mgr->fallback_vb_slot = u_vbuf_get_free_real_vb_slot(mgr); if (mgr->fallback_vb_slot == ~0) { return; /* XXX error, not enough attribs */ } /* Initialize the description of how vertices should be translated. */ for (i = 0; i < mgr->ve->count; i++) { enum pipe_format output_format = mgr->ve->native_format[i]; unsigned output_format_size = mgr->ve->native_format_size[i]; /* Check for support. */ if (!mgr->ve->incompatible_layout_elem[i] && !mgr->incompatible_vb[mgr->ve->ve[i].vertex_buffer_index]) { continue; } /* Workaround for translate: output floats instead of halfs. */ switch (output_format) { case PIPE_FORMAT_R16_FLOAT: output_format = PIPE_FORMAT_R32_FLOAT; output_format_size = 4; break; case PIPE_FORMAT_R16G16_FLOAT: output_format = PIPE_FORMAT_R32G32_FLOAT; output_format_size = 8; break; case PIPE_FORMAT_R16G16B16_FLOAT: output_format = PIPE_FORMAT_R32G32B32_FLOAT; output_format_size = 12; break; case PIPE_FORMAT_R16G16B16A16_FLOAT: output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; output_format_size = 16; break; default:; } /* Add this vertex element. */ te = &key.element[key.nr_elements]; te->type = TRANSLATE_ELEMENT_NORMAL; te->instance_divisor = 0; te->input_buffer = mgr->ve->ve[i].vertex_buffer_index; te->input_format = mgr->ve->ve[i].src_format; te->input_offset = mgr->ve->ve[i].src_offset; te->output_format = output_format; te->output_offset = key.output_stride; key.output_stride += output_format_size; vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE; tr_elem_index[i] = key.nr_elements; key.nr_elements++; } /* Get a translate object. */ tr = translate_cache_find(mgr->translate_cache, &key); /* Map buffers we want to translate. */ for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { if (vb_translated[i]) { struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i]; uint8_t *map = pipe_buffer_map(mgr->pipe, vb->buffer, PIPE_TRANSFER_READ, &vb_transfer[i]); tr->set_buffer(tr, i, map + vb->buffer_offset + vb->stride * min_index, vb->stride, ~0); } } /* Create and map the output buffer. */ num_verts = max_index + 1 - min_index; u_upload_alloc(mgr->b.uploader, key.output_stride * min_index, key.output_stride * num_verts, &out_offset, &out_buffer, &upload_flushed, (void**)&out_map); out_offset -= key.output_stride * min_index; /* Translate. */ tr->run(tr, 0, num_verts, 0, out_map); /* Unmap all buffers. */ for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { if (vb_translated[i]) { pipe_buffer_unmap(mgr->pipe, vb_transfer[i]); } } /* Setup the new vertex buffer. */ mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer_offset = out_offset; mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].stride = key.output_stride; /* Move the buffer reference. */ pipe_resource_reference( &mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer, NULL); mgr->b.real_vertex_buffer[mgr->fallback_vb_slot].buffer = out_buffer; out_buffer = NULL; /* Setup new vertex elements. */ for (i = 0; i < mgr->ve->count; i++) { if (tr_elem_index[i] < key.nr_elements) { te = &key.element[tr_elem_index[i]]; mgr->fallback_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor; mgr->fallback_velems[i].src_format = te->output_format; mgr->fallback_velems[i].src_offset = te->output_offset; mgr->fallback_velems[i].vertex_buffer_index = mgr->fallback_vb_slot; } else { memcpy(&mgr->fallback_velems[i], &mgr->ve->ve[i], sizeof(struct pipe_vertex_element)); } } mgr->fallback_ve = mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count, mgr->fallback_velems); /* Preserve saved_ve. */ mgr->ve_binding_lock = TRUE; mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve); mgr->ve_binding_lock = FALSE; }
void draw_pt_emit_prepare(struct pt_emit *emit, unsigned prim, unsigned *max_vertices) { struct draw_context *draw = emit->draw; const struct vertex_info *vinfo; unsigned dst_offset; struct translate_key hw_key; unsigned i; /* XXX: need to flush to get prim_vbuf.c to release its allocation?? */ draw_do_flush( draw, DRAW_FLUSH_BACKEND ); /* XXX: may need to defensively reset this later on as clipping can * clobber this state in the render backend. */ emit->prim = prim; draw->render->set_primitive(draw->render, emit->prim); /* Must do this after set_primitive() above: */ emit->vinfo = vinfo = draw->render->get_vertex_info(draw->render); /* Translate from pipeline vertices to hw vertices. */ dst_offset = 0; for (i = 0; i < vinfo->num_attribs; i++) { unsigned emit_sz = 0; unsigned src_buffer = 0; unsigned output_format; unsigned src_offset = (vinfo->attrib[i].src_index * 4 * sizeof(float) ); output_format = draw_translate_vinfo_format(vinfo->attrib[i].emit); emit_sz = draw_translate_vinfo_size(vinfo->attrib[i].emit); /* doesn't handle EMIT_OMIT */ assert(emit_sz != 0); if (vinfo->attrib[i].emit == EMIT_1F_PSIZE) { src_buffer = 1; src_offset = 0; } hw_key.element[i].type = TRANSLATE_ELEMENT_NORMAL; hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT; hw_key.element[i].input_buffer = src_buffer; hw_key.element[i].input_offset = src_offset; hw_key.element[i].instance_divisor = 0; hw_key.element[i].output_format = output_format; hw_key.element[i].output_offset = dst_offset; dst_offset += emit_sz; } hw_key.nr_elements = vinfo->num_attribs; hw_key.output_stride = vinfo->size * 4; if (!emit->translate || translate_key_compare(&emit->translate->key, &hw_key) != 0) { translate_key_sanitize(&hw_key); emit->translate = translate_cache_find(emit->cache, &hw_key); } *max_vertices = (draw->render->max_vertex_buffer_bytes / (vinfo->size * 4)); }
static void fetch_emit_prepare( struct draw_pt_middle_end *middle, unsigned prim, unsigned opt, unsigned *max_vertices ) { struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle; struct draw_context *draw = feme->draw; const struct vertex_info *vinfo; unsigned i, dst_offset; boolean ok; struct translate_key key; ok = draw->render->set_primitive( draw->render, prim ); if (!ok) { assert(0); return; } /* Must do this after set_primitive() above: */ vinfo = feme->vinfo = draw->render->get_vertex_info(draw->render); /* Transform from API vertices to HW vertices, skipping the * pipeline_vertex intermediate step. */ dst_offset = 0; memset(&key, 0, sizeof(key)); for (i = 0; i < vinfo->num_attribs; i++) { const struct pipe_vertex_element *src = &draw->pt.vertex_element[vinfo->attrib[i].src_index]; unsigned emit_sz = 0; unsigned input_format = src->src_format; unsigned input_buffer = src->vertex_buffer_index; unsigned input_offset = src->src_offset; unsigned output_format; switch (vinfo->attrib[i].emit) { case EMIT_4UB: output_format = PIPE_FORMAT_R8G8B8A8_UNORM; emit_sz = 4 * sizeof(unsigned char); break; case EMIT_4F: output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; emit_sz = 4 * sizeof(float); break; case EMIT_3F: output_format = PIPE_FORMAT_R32G32B32_FLOAT; emit_sz = 3 * sizeof(float); break; case EMIT_2F: output_format = PIPE_FORMAT_R32G32_FLOAT; emit_sz = 2 * sizeof(float); break; case EMIT_1F: output_format = PIPE_FORMAT_R32_FLOAT; emit_sz = 1 * sizeof(float); break; case EMIT_1F_PSIZE: input_format = PIPE_FORMAT_R32_FLOAT; input_buffer = draw->pt.nr_vertex_buffers; input_offset = 0; output_format = PIPE_FORMAT_R32_FLOAT; emit_sz = 1 * sizeof(float); break; case EMIT_OMIT: continue; default: assert(0); output_format = PIPE_FORMAT_NONE; emit_sz = 0; continue; } key.element[i].input_format = input_format; key.element[i].input_buffer = input_buffer; key.element[i].input_offset = input_offset; key.element[i].output_format = output_format; key.element[i].output_offset = dst_offset; dst_offset += emit_sz; } key.nr_elements = vinfo->num_attribs; key.output_stride = vinfo->size * 4; /* Don't bother with caching at this stage: */ if (!feme->translate || translate_key_compare(&feme->translate->key, &key) != 0) { translate_key_sanitize(&key); feme->translate = translate_cache_find(feme->cache, &key); feme->translate->set_buffer(feme->translate, draw->pt.nr_vertex_buffers, &feme->point_size, 0); } feme->point_size = draw->rasterizer->point_size; for (i = 0; i < draw->pt.nr_vertex_buffers; i++) { feme->translate->set_buffer(feme->translate, i, ((char *)draw->pt.user.vbuffer[i] + draw->pt.vertex_buffer[i].buffer_offset), draw->pt.vertex_buffer[i].stride ); } *max_vertices = (draw->render->max_vertex_buffer_bytes / (vinfo->size * 4)); /* Return an even number of verts. * This prevents "parity" errors when splitting long triangle strips which * can lead to front/back culling mix-ups. * Every other triangle in a strip has an alternate front/back orientation * so splitting at an odd position can cause the orientation of subsequent * triangles to get reversed. */ *max_vertices = *max_vertices & ~1; }
static void fetch_emit_prepare( struct draw_pt_middle_end *middle, unsigned prim, unsigned opt, unsigned *max_vertices ) { struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle; struct draw_context *draw = feme->draw; const struct vertex_info *vinfo; unsigned i, dst_offset; boolean ok; struct translate_key key; unsigned gs_out_prim = (draw->gs.geometry_shader ? draw->gs.geometry_shader->output_primitive : prim); ok = draw->render->set_primitive( draw->render, gs_out_prim ); if (!ok) { assert(0); return; } /* Must do this after set_primitive() above: */ vinfo = feme->vinfo = draw->render->get_vertex_info(draw->render); /* Transform from API vertices to HW vertices, skipping the * pipeline_vertex intermediate step. */ dst_offset = 0; memset(&key, 0, sizeof(key)); for (i = 0; i < vinfo->num_attribs; i++) { const struct pipe_vertex_element *src = &draw->pt.vertex_element[vinfo->attrib[i].src_index]; unsigned emit_sz = 0; unsigned input_format = src->src_format; unsigned input_buffer = src->vertex_buffer_index; unsigned input_offset = src->src_offset; unsigned output_format; output_format = draw_translate_vinfo_format(vinfo->attrib[i].emit); emit_sz = draw_translate_vinfo_size(vinfo->attrib[i].emit); if (vinfo->attrib[i].emit == EMIT_OMIT) continue; if (vinfo->attrib[i].emit == EMIT_1F_PSIZE) { input_format = PIPE_FORMAT_R32_FLOAT; input_buffer = draw->pt.nr_vertex_buffers; input_offset = 0; } key.element[i].type = TRANSLATE_ELEMENT_NORMAL; key.element[i].input_format = input_format; key.element[i].input_buffer = input_buffer; key.element[i].input_offset = input_offset; key.element[i].instance_divisor = src->instance_divisor; key.element[i].output_format = output_format; key.element[i].output_offset = dst_offset; dst_offset += emit_sz; } key.nr_elements = vinfo->num_attribs; key.output_stride = vinfo->size * 4; /* Don't bother with caching at this stage: */ if (!feme->translate || translate_key_compare(&feme->translate->key, &key) != 0) { translate_key_sanitize(&key); feme->translate = translate_cache_find(feme->cache, &key); feme->translate->set_buffer(feme->translate, draw->pt.nr_vertex_buffers, &feme->point_size, 0, ~0); } feme->point_size = draw->rasterizer->point_size; for (i = 0; i < draw->pt.nr_vertex_buffers; i++) { feme->translate->set_buffer(feme->translate, i, ((char *)draw->pt.user.vbuffer[i] + draw->pt.vertex_buffer[i].buffer_offset), draw->pt.vertex_buffer[i].stride, draw->pt.max_index); } *max_vertices = (draw->render->max_vertex_buffer_bytes / (vinfo->size * 4)); }
/** * Perform the fetch from API vertex elements & vertex buffers, to a * contiguous set of float[4] attributes as required for the * vertex_shader->run_linear() method. * * This is used in all cases except pure passthrough * (draw_pt_fetch_emit.c) which has its own version to translate * directly to hw vertices. * */ void draw_pt_fetch_prepare(struct pt_fetch *fetch, unsigned vs_input_count, unsigned vertex_size, unsigned instance_id_index) { struct draw_context *draw = fetch->draw; unsigned nr_inputs; unsigned i, nr = 0, ei = 0; unsigned dst_offset = 0; unsigned num_extra_inputs = 0; struct translate_key key; fetch->vertex_size = vertex_size; /* Leave the clipmask/edgeflags/pad/vertex_id untouched */ dst_offset += 1 * sizeof(float); /* Just leave the clip[] and pre_clip_pos[] array untouched. */ dst_offset += 8 * sizeof(float); if (instance_id_index != ~0) { num_extra_inputs++; } assert(draw->pt.nr_vertex_elements + num_extra_inputs >= vs_input_count); nr_inputs = MIN2(vs_input_count, draw->pt.nr_vertex_elements + num_extra_inputs); for (i = 0; i < nr_inputs; i++) { if (i == instance_id_index) { key.element[nr].type = TRANSLATE_ELEMENT_INSTANCE_ID; key.element[nr].input_format = PIPE_FORMAT_R32_USCALED; key.element[nr].output_format = PIPE_FORMAT_R32_USCALED; key.element[nr].output_offset = dst_offset; dst_offset += sizeof(uint); } else if (util_format_is_pure_sint(draw->pt.vertex_element[i].src_format)) { key.element[nr].type = TRANSLATE_ELEMENT_NORMAL; key.element[nr].input_format = draw->pt.vertex_element[ei].src_format; key.element[nr].input_buffer = draw->pt.vertex_element[ei].vertex_buffer_index; key.element[nr].input_offset = draw->pt.vertex_element[ei].src_offset; key.element[nr].instance_divisor = draw->pt.vertex_element[ei].instance_divisor; key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_SINT; key.element[nr].output_offset = dst_offset; ei++; dst_offset += 4 * sizeof(int); } else if (util_format_is_pure_uint(draw->pt.vertex_element[i].src_format)) { key.element[nr].type = TRANSLATE_ELEMENT_NORMAL; key.element[nr].input_format = draw->pt.vertex_element[ei].src_format; key.element[nr].input_buffer = draw->pt.vertex_element[ei].vertex_buffer_index; key.element[nr].input_offset = draw->pt.vertex_element[ei].src_offset; key.element[nr].instance_divisor = draw->pt.vertex_element[ei].instance_divisor; key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_UINT; key.element[nr].output_offset = dst_offset; ei++; dst_offset += 4 * sizeof(unsigned); } else { key.element[nr].type = TRANSLATE_ELEMENT_NORMAL; key.element[nr].input_format = draw->pt.vertex_element[ei].src_format; key.element[nr].input_buffer = draw->pt.vertex_element[ei].vertex_buffer_index; key.element[nr].input_offset = draw->pt.vertex_element[ei].src_offset; key.element[nr].instance_divisor = draw->pt.vertex_element[ei].instance_divisor; key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; key.element[nr].output_offset = dst_offset; ei++; dst_offset += 4 * sizeof(float); } nr++; } assert(dst_offset <= vertex_size); key.nr_elements = nr; key.output_stride = vertex_size; if (!fetch->translate || translate_key_compare(&fetch->translate->key, &key) != 0) { translate_key_sanitize(&key); fetch->translate = translate_cache_find(fetch->cache, &key); } }
/** * Set the prim type for subsequent vertices. * This may result in a new vertex size. The existing vbuffer (if any) * will be flushed if needed and a new one allocated. */ static void vbuf_start_prim( struct vbuf_stage *vbuf, uint prim ) { struct translate_key hw_key; unsigned dst_offset; unsigned i; vbuf->render->set_primitive(vbuf->render, prim); /* Must do this after set_primitive() above: * * XXX: need some state managment to track when this needs to be * recalculated. The driver should tell us whether there was a * state change. */ vbuf->vinfo = vbuf->render->get_vertex_info(vbuf->render); vbuf->vertex_size = vbuf->vinfo->size * sizeof(float); /* Translate from pipeline vertices to hw vertices. */ dst_offset = 0; for (i = 0; i < vbuf->vinfo->num_attribs; i++) { unsigned emit_sz = 0; unsigned src_buffer = 0; unsigned output_format; unsigned src_offset = (vbuf->vinfo->attrib[i].src_index * 4 * sizeof(float) ); switch (vbuf->vinfo->attrib[i].emit) { case EMIT_4F: output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; emit_sz = 4 * sizeof(float); break; case EMIT_3F: output_format = PIPE_FORMAT_R32G32B32_FLOAT; emit_sz = 3 * sizeof(float); break; case EMIT_2F: output_format = PIPE_FORMAT_R32G32_FLOAT; emit_sz = 2 * sizeof(float); break; case EMIT_1F: output_format = PIPE_FORMAT_R32_FLOAT; emit_sz = 1 * sizeof(float); break; case EMIT_1F_PSIZE: output_format = PIPE_FORMAT_R32_FLOAT; emit_sz = 1 * sizeof(float); src_buffer = 1; src_offset = 0; break; case EMIT_4UB: output_format = PIPE_FORMAT_B8G8R8A8_UNORM; emit_sz = 4 * sizeof(ubyte); break; default: assert(0); output_format = PIPE_FORMAT_NONE; emit_sz = 0; break; } hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT; hw_key.element[i].input_buffer = src_buffer; hw_key.element[i].input_offset = src_offset; hw_key.element[i].output_format = output_format; hw_key.element[i].output_offset = dst_offset; dst_offset += emit_sz; } hw_key.nr_elements = vbuf->vinfo->num_attribs; hw_key.output_stride = vbuf->vinfo->size * 4; /* Don't bother with caching at this stage: */ if (!vbuf->translate || translate_key_compare(&vbuf->translate->key, &hw_key) != 0) { translate_key_sanitize(&hw_key); vbuf->translate = translate_cache_find(vbuf->cache, &hw_key); vbuf->translate->set_buffer(vbuf->translate, 1, &vbuf->point_size, 0); } vbuf->point_size = vbuf->stage.draw->rasterizer->point_size; /* Allocate new buffer? */ assert(vbuf->vertices == NULL); vbuf_alloc_vertices(vbuf); }
/** * Set the prim type for subsequent vertices. * This may result in a new vertex size. The existing vbuffer (if any) * will be flushed if needed and a new one allocated. */ static void vbuf_start_prim( struct vbuf_stage *vbuf, uint prim ) { struct translate_key hw_key; unsigned dst_offset; unsigned i; const struct vertex_info *vinfo; vbuf->render->set_primitive(vbuf->render, prim); /* Must do this after set_primitive() above: * * XXX: need some state managment to track when this needs to be * recalculated. The driver should tell us whether there was a * state change. */ vbuf->vinfo = vbuf->render->get_vertex_info(vbuf->render); vinfo = vbuf->vinfo; vbuf->vertex_size = vinfo->size * sizeof(float); /* Translate from pipeline vertices to hw vertices. */ dst_offset = 0; for (i = 0; i < vinfo->num_attribs; i++) { unsigned emit_sz = 0; unsigned src_buffer = 0; enum pipe_format output_format; unsigned src_offset = (vinfo->attrib[i].src_index * 4 * sizeof(float) ); output_format = draw_translate_vinfo_format(vinfo->attrib[i].emit); emit_sz = draw_translate_vinfo_size(vinfo->attrib[i].emit); /* doesn't handle EMIT_OMIT */ assert(emit_sz != 0); if (vinfo->attrib[i].emit == EMIT_1F_PSIZE) { src_buffer = 1; src_offset = 0; } else if (vinfo->attrib[i].src_index == DRAW_ATTR_NONEXIST) { /* elements which don't exist will get assigned zeros */ src_buffer = 2; src_offset = 0; } hw_key.element[i].type = TRANSLATE_ELEMENT_NORMAL; hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT; hw_key.element[i].input_buffer = src_buffer; hw_key.element[i].input_offset = src_offset; hw_key.element[i].instance_divisor = 0; hw_key.element[i].output_format = output_format; hw_key.element[i].output_offset = dst_offset; dst_offset += emit_sz; } hw_key.nr_elements = vinfo->num_attribs; hw_key.output_stride = vbuf->vertex_size; /* Don't bother with caching at this stage: */ if (!vbuf->translate || translate_key_compare(&vbuf->translate->key, &hw_key) != 0) { translate_key_sanitize(&hw_key); vbuf->translate = translate_cache_find(vbuf->cache, &hw_key); vbuf->translate->set_buffer(vbuf->translate, 1, &vbuf->point_size, 0, ~0); vbuf->translate->set_buffer(vbuf->translate, 2, &vbuf->zero4[0], 0, ~0); } vbuf->point_size = vbuf->stage.draw->rasterizer->point_size; /* Allocate new buffer? */ assert(vbuf->vertices == NULL); vbuf_alloc_vertices(vbuf); }