static const struct vertex_info* r300_render_get_vertex_info(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; return &r300->vertex_info; }
static void r300_render_set_primitive(struct vbuf_render* render, unsigned prim) { struct r300_render* r300render = r300_render(render); r300render->prim = prim; r300render->hwprim = r300_translate_primitive(prim); }
static void* r300_render_map_vertices(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct pipe_screen* screen = r300render->r300->context.screen; r300render->vbo_ptr = pipe_buffer_map(screen, r300render->vbo, PIPE_BUFFER_USAGE_CPU_WRITE); return (r300render->vbo_ptr + r300render->vbo_offset); }
static const struct vertex_info* r300_render_get_vertex_info(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; r300_update_derived_state(r300); return &r300->vertex_info->vinfo; }
static void* r300_render_map_vertices(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; DBG(r300, DBG_DRAW, "r300: render_map_vertices\n"); assert(r300render->vbo_ptr); return r300render->vbo_ptr + r300->draw_vbo_offset; }
static boolean r300_render_set_primitive(struct vbuf_render* render, unsigned prim) { struct r300_render* r300render = r300_render(render); r300render->prim = prim; r300render->hwprim = r300_translate_primitive(prim); return TRUE; }
static void r300_render_release_vertices(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; DBG(r300, DBG_DRAW, "r300: render_release_vertices\n"); r300->draw_vbo_offset += r300render->vbo_max_used; r300render->vbo_max_used = 0; }
static void r300_render_draw_arrays(struct vbuf_render* render, unsigned start, unsigned count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; uint8_t* ptr; unsigned i; unsigned dwords = 6; CS_LOCALS(r300); (void) i; (void) ptr; DBG(r300, DBG_DRAW, "r300: render_draw_arrays (count: %d)\n", count); if (r300->draw_first_emitted) { if (!r300_prepare_for_rendering(r300, PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL, NULL, 6, 0, 0)) return; } else { if (!r300_emit_states(r300, PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL, NULL, 0, 0)) return; } /* Uncomment to dump all VBOs rendered through this interface. * Slow and noisy! ptr = pipe_buffer_map(&r300render->r300->context, r300render->vbo, PIPE_TRANSFER_READ, &r300render->vbo_transfer); for (i = 0; i < count; i++) { printf("r300: Vertex %d\n", i); draw_dump_emitted_vertex(&r300->vertex_info, ptr); ptr += r300->vertex_info.size * 4; printf("\n"); } pipe_buffer_unmap(&r300render->r300->context, r300render->vbo, r300render->vbo_transfer); */ BEGIN_CS(dwords); OUT_CS_REG(R300_GA_COLOR_CONTROL, r300_provoking_vertex_fixes(r300, r300render->prim)); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) | r300render->hwprim); END_CS; r300->draw_first_emitted = TRUE; }
static void r300_render_unmap_vertices(struct vbuf_render* render, ushort min, ushort max) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; DBG(r300, DBG_DRAW, "r300: render_unmap_vertices\n"); r300render->vbo_max_used = MAX2(r300render->vbo_max_used, r300render->vertex_size * (max + 1)); }
static void r300_render_unmap_vertices(struct vbuf_render* render, ushort min, ushort max) { struct r300_render* r300render = r300_render(render); struct pipe_screen* screen = r300render->r300->context.screen; CS_LOCALS(r300render->r300); BEGIN_CS(2); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max); END_CS; r300render->vbo_max_used = MAX2(r300render->vbo_max_used, r300render->vertex_size * (max + 1)); pipe_buffer_unmap(screen, r300render->vbo); }
static void r300_render_draw_elements(struct vbuf_render* render, const ushort* indices, uint count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; unsigned max_index = (r300->vbo->size - r300->draw_vbo_offset) / (r300render->r300->vertex_info.size * 4) - 1; struct pipe_resource *index_buffer = NULL; unsigned index_buffer_offset; CS_LOCALS(r300); DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count); u_upload_data(r300->uploader, 0, count * 2, indices, &index_buffer_offset, &index_buffer); if (!index_buffer) { return; } if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED, index_buffer, 12, 0, 0, -1)) { pipe_resource_reference(&index_buffer, NULL); return; } BEGIN_CS(12); OUT_CS_REG(R300_GA_COLOR_CONTROL, r300_provoking_vertex_fixes(r300, r300render->prim)); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) | r300render->hwprim); OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2); OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2)); OUT_CS(index_buffer_offset); OUT_CS((count + 1) / 2); OUT_CS_RELOC(r300_resource(index_buffer)); END_CS; pipe_resource_reference(&index_buffer, NULL); }
static void* r300_render_map_vertices(struct vbuf_render* render) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; assert(!r300render->vbo_transfer); DBG(r300, DBG_DRAW, "r300: render_map_vertices\n"); r300render->vbo_ptr = pipe_buffer_map(&r300render->r300->context, r300->vbo, PIPE_TRANSFER_WRITE, &r300render->vbo_transfer); assert(r300render->vbo_ptr); return ((uint8_t*)r300render->vbo_ptr + r300->draw_vbo_offset); }
static void r300_render_unmap_vertices(struct vbuf_render* render, ushort min, ushort max) { struct r300_render* r300render = r300_render(render); struct pipe_context* context = &r300render->r300->context; struct r300_context* r300 = r300render->r300; assert(r300render->vbo_transfer); DBG(r300, DBG_DRAW, "r300: render_unmap_vertices\n"); r300render->vbo_max_used = MAX2(r300render->vbo_max_used, r300render->vertex_size * (max + 1)); pipe_buffer_unmap(context, r300render->vbo_transfer); r300render->vbo_transfer = NULL; }
static void r300_render_draw_arrays(struct vbuf_render* render, unsigned start, unsigned count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; CS_LOCALS(r300); r300_emit_dirty_state(r300); DBG(r300, DBG_DRAW, "r300: Doing vbuf render, count %d\n", count); BEGIN_CS(2); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) | r300render->hwprim); END_CS; }
static void r300_render_draw_arrays(struct vbuf_render* render, unsigned start, unsigned count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; uint8_t* ptr; unsigned i; unsigned dwords = 6; CS_LOCALS(r300); (void) i; (void) ptr; DBG(r300, DBG_DRAW, "r300: render_draw_arrays (count: %d)\n", count); if (r300->draw_first_emitted) { if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL, NULL, dwords, 0, 0, -1)) return; } else { if (!r300_emit_states(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL, NULL, 0, 0, -1)) return; } BEGIN_CS(dwords); OUT_CS_REG(R300_GA_COLOR_CONTROL, r300_provoking_vertex_fixes(r300, r300render->prim)); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, count - 1); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) | r300render->hwprim); END_CS; r300->draw_first_emitted = TRUE; }
static void r300_render_draw(struct vbuf_render* render, const ushort* indices, uint count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; int i; CS_LOCALS(r300); r300_emit_dirty_state(r300); BEGIN_CS(2 + (count+1)/2); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (count+1)/2); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) | r300render->hwprim); for (i = 0; i < count-1; i += 2) { OUT_CS(indices[i+1] << 16 | indices[i]); } if (count % 2) { OUT_CS(indices[count-1]); } END_CS; }
static void r300_render_draw_elements(struct vbuf_render* render, const ushort* indices, uint count) { struct r300_render* r300render = r300_render(render); struct r300_context* r300 = r300render->r300; int i; unsigned end_cs_dwords; unsigned max_index = (r300->draw_vbo_size - r300->draw_vbo_offset) / (r300render->r300->vertex_info.size * 4) - 1; unsigned short_count; unsigned free_dwords; CS_LOCALS(r300); DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count); if (r300->draw_first_emitted) { if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED, NULL, 256, 0, 0, -1)) return; } else { if (!r300_emit_states(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED, NULL, 0, 0, -1)) return; } /* Below we manage the CS space manually because there may be more * indices than it can fit in CS. */ end_cs_dwords = r300_get_num_cs_end_dwords(r300); while (count) { free_dwords = RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw; short_count = MIN2(count, (free_dwords - end_cs_dwords - 6) * 2); BEGIN_CS(6 + (short_count+1)/2); OUT_CS_REG(R300_GA_COLOR_CONTROL, r300_provoking_vertex_fixes(r300, r300render->prim)); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index); OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (short_count+1)/2); OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (short_count << 16) | r300render->hwprim); for (i = 0; i < short_count-1; i += 2) { OUT_CS(indices[i+1] << 16 | indices[i]); } if (short_count % 2) { OUT_CS(indices[short_count-1]); } END_CS; /* OK now subtract the emitted indices and see if we need to emit * another draw packet. */ indices += short_count; count -= short_count; if (count) { if (!r300_prepare_for_rendering(r300, PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED, NULL, 256, 0, 0, -1)) return; end_cs_dwords = r300_get_num_cs_end_dwords(r300); } } r300->draw_first_emitted = TRUE; }