static void nv50_draw_arrays_instanced(struct pipe_context *pipe, unsigned mode, unsigned start, unsigned count, unsigned startInstance, unsigned instanceCount) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_channel *chan = nv50->screen->tesla->channel; struct nouveau_grobj *tesla = nv50->screen->tesla; struct instance a[16]; unsigned prim = nv50_prim(mode); instance_init(nv50, a, startInstance); if (!nv50_state_validate(nv50, 10 + 16*3)) return; if (nv50->vbo_fifo) { nv50_push_elements_instanced(pipe, NULL, 0, 0, mode, start, count, startInstance, instanceCount); return; } BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2); OUT_RING (chan, NV50_CB_AUX | (24 << 8)); OUT_RING (chan, startInstance); while (instanceCount--) { if (AVAIL_RING(chan) < (7 + 16*3)) { FIRE_RING(chan); if (!nv50_state_validate(nv50, 7 + 16*3)) { assert(0); return; } } instance_step(nv50, a); BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1); OUT_RING (chan, prim); BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2); OUT_RING (chan, start); OUT_RING (chan, count); BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1); OUT_RING (chan, 0); prim |= (1 << 28); } }
void nv50_push_elements_instanced(struct pipe_context *pipe, struct pipe_resource *idxbuf, unsigned idxsize, int idxbias, unsigned mode, unsigned start, unsigned count, unsigned i_start, unsigned i_count) { struct nv50_context *nv50 = nv50_context(pipe); struct nouveau_grobj *tesla = nv50->screen->tesla; struct nouveau_channel *chan = tesla->channel; struct push_context ctx; const unsigned p_overhead = 4 + /* begin/end */ 4; /* potential edgeflag enable/disable */ const unsigned v_overhead = 1 + /* VERTEX_DATA packet header */ 2; /* potential edgeflag modification */ struct util_split_prim s; unsigned vtx_size; boolean nzi = FALSE; int i; ctx.nv50 = nv50; ctx.attr_nr = 0; ctx.idxbuf = NULL; ctx.vtx_size = 0; ctx.edgeflag = 0.5f; ctx.edgeflag_attr = nv50->vertprog->vp.edgeflag; /* map vertex buffers, determine vertex size */ for (i = 0; i < nv50->vtxelt->num_elements; i++) { struct pipe_vertex_element *ve = &nv50->vtxelt->pipe[i]; struct pipe_vertex_buffer *vb = &nv50->vtxbuf[ve->vertex_buffer_index]; struct nouveau_bo *bo = nv50_resource(vb->buffer)->bo; unsigned size, nr_components, n; if (!(nv50->vbo_fifo & (1 << i))) continue; n = ctx.attr_nr++; if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) { assert(bo->map); return; } ctx.attr[n].map = (uint8_t *)bo->map + vb->buffer_offset + ve->src_offset; nouveau_bo_unmap(bo); ctx.attr[n].stride = vb->stride; ctx.attr[n].divisor = ve->instance_divisor; if (ctx.attr[n].divisor) { ctx.attr[n].step = i_start % ve->instance_divisor; ctx.attr[n].map = (uint8_t *)ctx.attr[n].map + i_start * vb->stride; } size = util_format_get_component_bits(ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0); nr_components = util_format_get_nr_components(ve->src_format); switch (size) { case 8: switch (nr_components) { case 1: ctx.attr[n].push = emit_b08_1; break; case 2: ctx.attr[n].push = emit_b16_1; break; case 3: ctx.attr[n].push = emit_b08_3; break; case 4: ctx.attr[n].push = emit_b32_1; break; } ctx.vtx_size++; break; case 16: switch (nr_components) { case 1: ctx.attr[n].push = emit_b16_1; break; case 2: ctx.attr[n].push = emit_b32_1; break; case 3: ctx.attr[n].push = emit_b16_3; break; case 4: ctx.attr[n].push = emit_b32_2; break; } ctx.vtx_size += (nr_components + 1) >> 1; break; case 32: switch (nr_components) { case 1: ctx.attr[n].push = emit_b32_1; break; case 2: ctx.attr[n].push = emit_b32_2; break; case 3: ctx.attr[n].push = emit_b32_3; break; case 4: ctx.attr[n].push = emit_b32_4; break; } ctx.vtx_size += nr_components; break; default: assert(0); return; } } vtx_size = ctx.vtx_size + v_overhead; /* map index buffer, if present */ if (idxbuf) { struct nouveau_bo *bo = nv50_resource(idxbuf)->bo; if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) { assert(bo->map); return; } ctx.idxbuf = bo->map; ctx.idxbias = idxbias; ctx.idxsize = idxsize; nouveau_bo_unmap(bo); } s.priv = &ctx; s.edge = emit_edgeflag; if (idxbuf) { if (idxsize == 1) s.emit = idxbias ? emit_elt08_biased : emit_elt08; else if (idxsize == 2) s.emit = idxbias ? emit_elt16_biased : emit_elt16; else s.emit = idxbias ? emit_elt32_biased : emit_elt32; } else s.emit = emit_verts; /* per-instance loop */ BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 2); OUT_RING (chan, NV50_CB_AUX | (24 << 8)); OUT_RING (chan, i_start); while (i_count--) { unsigned max_verts; boolean done; for (i = 0; i < ctx.attr_nr; i++) { if (!ctx.attr[i].divisor || ctx.attr[i].divisor != ++ctx.attr[i].step) continue; ctx.attr[i].step = 0; ctx.attr[i].map = (uint8_t *)ctx.attr[i].map + ctx.attr[i].stride; } util_split_prim_init(&s, mode, start, count); do { if (AVAIL_RING(chan) < p_overhead + (6 * vtx_size)) { FIRE_RING(chan); if (!nv50_state_validate(nv50, p_overhead + (6 * vtx_size))) { assert(0); return; } } max_verts = AVAIL_RING(chan); max_verts -= p_overhead; max_verts /= vtx_size; BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1); OUT_RING (chan, nv50_prim(s.mode) | (nzi ? (1 << 28) : 0)); done = util_split_prim_next(&s, max_verts); BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1); OUT_RING (chan, 0); } while (!done); nzi = TRUE; } }