boolean brw_upload_vertex_elements( struct brw_context *brw ) { struct brw_vertex_element_packet vep; unsigned i; unsigned nr_enabled = brw->attribs.VertexProgram->info.num_inputs; memset(&vep, 0, sizeof(vep)); for (i = 0; i < nr_enabled; i++) vep.ve[i] = brw->vb.inputs[i]; vep.header.length = (1 + nr_enabled * sizeof(vep.ve[0])/4) - 2; vep.header.opcode = CMD_VERTEX_ELEMENT; brw_cached_batch_struct(brw, &vep, 4 + nr_enabled * sizeof(vep.ve[0])); return TRUE; }
GLboolean brw_upload_vertices( struct brw_context *brw, GLuint min_index, GLuint max_index ) { GLcontext *ctx = &brw->intel.ctx; struct intel_context *intel = intel_context(ctx); GLuint tmp = brw->vs.prog_data->inputs_read; struct brw_vertex_element_packet vep; struct brw_array_state vbp; GLuint i; const void *ptr = NULL; GLuint interleave = 0; struct brw_vertex_element *enabled[VERT_ATTRIB_MAX]; GLuint nr_enabled = 0; struct brw_vertex_element *upload[VERT_ATTRIB_MAX]; GLuint nr_uploads = 0; memset(&vbp, 0, sizeof(vbp)); memset(&vep, 0, sizeof(vep)); /* First build an array of pointers to ve's in vb.inputs_read */ if (0) _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); while (tmp) { GLuint i = _mesa_ffsll(tmp)-1; struct brw_vertex_element *input = &brw->vb.inputs[i]; tmp &= ~(1<<i); enabled[nr_enabled++] = input; input->index = i; input->element_size = get_size(input->glarray->Type) * input->glarray->Size; input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1; if (!input->glarray->BufferObj->Name) { if (i == 0) { /* Position array not properly enabled: */ if (input->glarray->StrideB == 0) return GL_FALSE; interleave = input->glarray->StrideB; ptr = input->glarray->Ptr; } else if (interleave != input->glarray->StrideB || (const char *)input->glarray->Ptr - (const char *)ptr < 0 || (const char *)input->glarray->Ptr - (const char *)ptr > interleave) { interleave = 0; } upload[nr_uploads++] = input; /* We rebase drawing to start at element zero only when * varyings are not in vbos, which means we can end up * uploading non-varying arrays (stride != 0) when min_index * is zero. This doesn't matter as the amount to upload is * the same for these arrays whether the draw call is rebased * or not - we just have to upload the one element. */ assert(min_index == 0 || input->glarray->StrideB == 0); } } /* Upload interleaved arrays if all uploads are interleaved */ if (nr_uploads > 1 && interleave && interleave <= 256) { struct brw_vertex_element *input0 = upload[0]; input0->glarray = copy_array_to_vbo_array(brw, 0, input0->glarray, interleave, input0->count); for (i = 1; i < nr_uploads; i++) { upload[i]->glarray = interleaved_vbo_array(brw, i, input0->glarray, upload[i]->glarray, ptr); } } else { for (i = 0; i < nr_uploads; i++) { struct brw_vertex_element *input = upload[i]; input->glarray = copy_array_to_vbo_array(brw, i, input->glarray, input->element_size, input->count); } } /* XXX: In the rare cases where this happens we fallback all * the way to software rasterization, although a tnl fallback * would be sufficient. I don't know of *any* real world * cases with > 17 vertex attributes enabled, so it probably * isn't an issue at this point. */ if (nr_enabled >= BRW_VEP_MAX) return GL_FALSE; /* This still defines a hardware VB for each input, even if they * are interleaved or from the same VBO. TBD if this makes a * performance difference. */ for (i = 0; i < nr_enabled; i++) { struct brw_vertex_element *input = enabled[i]; input->vep = &vep.ve[i]; input->vep->ve0.src_format = get_surface_type(input->glarray->Type, input->glarray->Size, input->glarray->Normalized); input->vep->ve0.valid = 1; input->vep->ve1.dst_offset = (i) * 4; input->vep->ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_SRC; input->vep->ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_SRC; input->vep->ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_SRC; input->vep->ve1.vfcomponent0 = BRW_VFCOMPONENT_STORE_SRC; switch (input->glarray->Size) { case 0: input->vep->ve1.vfcomponent0 = BRW_VFCOMPONENT_STORE_0; case 1: input->vep->ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_0; case 2: input->vep->ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_0; case 3: input->vep->ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_1_FLT; break; } input->vep->ve0.vertex_buffer_index = i; input->vep->ve0.src_offset = 0; vbp.vb[i].vb0.bits.pitch = input->glarray->StrideB; vbp.vb[i].vb0.bits.pad = 0; vbp.vb[i].vb0.bits.access_type = BRW_VERTEXBUFFER_ACCESS_VERTEXDATA; vbp.vb[i].vb0.bits.vb_index = i; vbp.vb[i].offset = (GLuint)input->glarray->Ptr; vbp.vb[i].buffer = array_buffer(input->glarray); vbp.vb[i].max_index = max_index; } /* Now emit VB and VEP state packets: */ vbp.header.bits.length = (1 + nr_enabled * 4) - 2; vbp.header.bits.opcode = CMD_VERTEX_BUFFER; BEGIN_BATCH(vbp.header.bits.length+2, 0); OUT_BATCH( vbp.header.dword ); for (i = 0; i < nr_enabled; i++) { OUT_BATCH( vbp.vb[i].vb0.dword ); OUT_BATCH( bmBufferOffset(&brw->intel, vbp.vb[i].buffer) + vbp.vb[i].offset); OUT_BATCH( vbp.vb[i].max_index ); OUT_BATCH( vbp.vb[i].instance_data_step_rate ); } ADVANCE_BATCH(); vep.header.length = (1 + nr_enabled * sizeof(vep.ve[0])/4) - 2; vep.header.opcode = CMD_VERTEX_ELEMENT; brw_cached_batch_struct(brw, &vep, 4 + nr_enabled * sizeof(vep.ve[0])); return GL_TRUE; }