static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx, int start, int end, int prim) { int type, num_verts; type = r300PrimitiveType(rmesa, ctx, prim); num_verts = r300NumVerts(rmesa, end - start, prim); if (type < 0 || num_verts <= 0) return; if (rmesa->state.VB.Elts) { r300EmitAOS(rmesa, rmesa->state.aos_count, start); if (num_verts > 65535) { /* not implemented yet */ WARN_ONCE("Too many elts\n"); return; } r300EmitElts(ctx, rmesa->state.VB.Elts, num_verts, rmesa->state.VB.elt_size); r300FireEB(rmesa, rmesa->state.elt_dma.aos_offset, num_verts, type, rmesa->state.VB.elt_size); } else { r300EmitAOS(rmesa, rmesa->state.aos_count, start); r300FireAOS(rmesa, num_verts, type); } }
static void r300EmitVbufPrim(r300ContextPtr rmesa, GLuint primitive, GLuint vertex_nr) { BATCH_LOCALS(&rmesa->radeon); int type, num_verts; if (RADEON_DEBUG & RADEON_VERTS) fprintf(stderr, "%s\n", __func__); type = r300PrimitiveType(rmesa, primitive); num_verts = r300NumVerts(rmesa, vertex_nr, primitive); BEGIN_BATCH(3); OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0); OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type); END_BATCH(); }
static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx, int start, int end, int prim) { int type, num_verts; TNLcontext *tnl = TNL_CONTEXT(ctx); struct vertex_buffer *vb = &tnl->vb; type = r300PrimitiveType(rmesa, prim); num_verts = r300NumVerts(rmesa, end - start, prim); if (type < 0 || num_verts <= 0) return; if (vb->Elts) { if (num_verts > 65535) { /* not implemented yet */ WARN_ONCE("Too many elts\n"); return; } /* Note: The following is incorrect, but it's the best I can do * without a major refactoring of how DMA memory is handled. * The problem: Ensuring that both vertex arrays *and* index * arrays are at the right position, and then ensuring that * the LOAD_VBPNTR, DRAW_INDX and INDX_BUFFER packets are emitted * at once. * * So why is the following incorrect? Well, it seems like * allocating the index array might actually evict the vertex * arrays. *sigh* */ r300EmitElts(ctx, vb->Elts, num_verts); r300EmitAOS(rmesa, rmesa->state.aos_count, start); r300FireEB(rmesa, rmesa->state.elt_dma.aos_offset, num_verts, type); } else { r300EmitAOS(rmesa, rmesa->state.aos_count, start); r300FireAOS(rmesa, num_verts, type); } }
static GLuint r300PredictTryDrawPrimsSize(GLcontext *ctx, GLuint nr_prims, const struct _mesa_prim *prim) { struct r300_context *r300 = R300_CONTEXT(ctx); struct r300_vertex_buffer *vbuf = &r300->vbuf; GLboolean flushed; GLuint dwords; GLuint state_size; int i; GLuint extra_prims = 0; /* Check for primitive splitting. */ for (i = 0; i < nr_prims; ++i) { const GLuint num_verts = r300NumVerts(r300, prim[i].count, prim[i].mode); extra_prims += num_verts/(65535 - 32); } nr_prims += extra_prims; dwords = 2*CACHE_FLUSH_BUFSZ; dwords += PRE_EMIT_STATE_BUFSZ; dwords += (AOS_BUFSZ(vbuf->num_attribs) + SCISSORS_BUFSZ*2 + FIREAOS_BUFSZ )*nr_prims; state_size = radeonCountStateEmitSize(&r300->radeon); flushed = rcommonEnsureCmdBufSpace(&r300->radeon, dwords + state_size, __FUNCTION__); if (flushed) dwords += radeonCountStateEmitSize(&r300->radeon); else dwords += state_size; radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords); return dwords; }