void nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, index_size; unsigned inst_count = info->instance_count; unsigned vert_count = info->count; unsigned prim; nvc0_push_context_init(nvc0, &ctx); nvc0_vertex_configure_translate(nvc0, info->index_bias); if (unlikely(ctx.edgeflag.enabled)) nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias); ctx.prim_restart = info->primitive_restart; ctx.restart_index = info->restart_index; if (info->indexed) { nvc0_push_map_idxbuf(&ctx, nvc0); index_size = nvc0->idxbuf.index_size; if (info->primitive_restart) { BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2); PUSH_DATA (ctx.push, 1); PUSH_DATA (ctx.push, info->restart_index); } else if (nvc0->state.prim_restart) { IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0); } nvc0->state.prim_restart = info->primitive_restart; } else { if (unlikely(info->count_from_stream_output)) { struct pipe_context *pipe = &nvc0->base.pipe; struct nvc0_so_target *targ; targ = nvc0_so_target(info->count_from_stream_output); pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count); vert_count /= targ->stride; } ctx.idxbuf = NULL; /* shut up warnings */ index_size = 0; } ctx.instance_id = info->start_instance; prim = nvc0_prim_gl(info->mode); do { PUSH_SPACE(ctx.push, 9); ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count); if (unlikely(!ctx.dest)) break; if (unlikely(ctx.need_vertex_id)) nvc0_push_upload_vertex_ids(&ctx, nvc0, info); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0); BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1); PUSH_DATA (ctx.push, prim); switch (index_size) { case 1: disp_vertices_i08(&ctx, info->start, vert_count); break; case 2: disp_vertices_i16(&ctx, info->start, vert_count); break; case 4: disp_vertices_i32(&ctx, info->start, vert_count); break; default: assert(index_size == 0); disp_vertices_seq(&ctx, info->start, vert_count); break; } PUSH_SPACE(ctx.push, 1); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0); if (--inst_count) { prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; ++ctx.instance_id; } nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP); nouveau_scratch_done(&nvc0->base); } while (inst_count); /* reset state and unmap buffers (no-op) */ if (unlikely(!ctx.edgeflag.value)) { PUSH_SPACE(ctx.push, 1); IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1); } if (unlikely(ctx.need_vertex_id)) { PUSH_SPACE(ctx.push, 4); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0); BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1); PUSH_DATA (ctx.push, NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST | NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT | NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0); } if (info->indexed) nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer)); for (i = 0; i < nvc0->num_vtxbufs; ++i) nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer)); NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1); }
void nvc0_push_vbo2(struct nvc0_context *nvc0, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, n; unsigned inst = info->instance_count; unsigned prim = nvc0_prim_gl(info->mode); ctx.nvc0 = nvc0; ctx.vertex_size = nvc0->vertex->vtx_size; ctx.idxbuf = NULL; ctx.num_attrs = 0; ctx.edgeflag = 0.5f; ctx.edgeflag_input = 32; for (i = 0; i < nvc0->vertex->num_elements; ++i) { struct pipe_vertex_element *ve = &nvc0->vertex->element[i].pipe; struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index]; struct nouveau_bo *bo = nvc0_resource(vb->buffer)->bo; unsigned nr_components; if (!(nvc0->vbo_fifo & (1 << i))) continue; n = ctx.num_attrs++; if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) return; ctx.attr[n].map = (uint8_t *)bo->map + vb->buffer_offset + ve->src_offset; nouveau_bo_unmap(bo); ctx.attr[n].stride = vb->stride; ctx.attr[n].divisor = ve->instance_divisor; nr_components = util_format_get_nr_components(ve->src_format); switch (util_format_get_component_bits(ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0)) { case 8: switch (nr_components) { case 1: ctx.attr[n].push = emit_b08_1; break; case 2: ctx.attr[n].push = emit_b16_1; break; case 3: ctx.attr[n].push = emit_b08_3; break; case 4: ctx.attr[n].push = emit_b32_1; break; } break; case 16: switch (nr_components) { case 1: ctx.attr[n].push = emit_b16_1; break; case 2: ctx.attr[n].push = emit_b32_1; break; case 3: ctx.attr[n].push = emit_b16_3; break; case 4: ctx.attr[n].push = emit_b32_2; break; } break; case 32: switch (nr_components) { case 1: ctx.attr[n].push = emit_b32_1; break; case 2: ctx.attr[n].push = emit_b32_2; break; case 3: ctx.attr[n].push = emit_b32_3; break; case 4: ctx.attr[n].push = emit_b32_4; break; } break; default: assert(0); break; } } if (info->indexed) { struct nvc0_resource *res = nvc0_resource(nvc0->idxbuf.buffer); if (!res || nouveau_bo_map(res->bo, NOUVEAU_BO_RD)) return; ctx.idxbuf = (uint8_t *)res->bo->map + nvc0->idxbuf.offset + res->offset; nouveau_bo_unmap(res->bo); ctx.idxsize = nvc0->idxbuf.index_size; } else { ctx.idxsize = 0; } while (inst--) { BEGIN_RING(nvc0->screen->base.channel, RING_3D(VERTEX_BEGIN_GL), 1); OUT_RING (nvc0->screen->base.channel, prim); switch (ctx.idxsize) { case 0: emit_seq(&ctx, info->start, info->count); break; case 1: emit_elt08(&ctx, info->start, info->count); break; case 2: emit_elt16(&ctx, info->start, info->count); break; case 4: emit_elt32(&ctx, info->start, info->count); break; } IMMED_RING(nvc0->screen->base.channel, RING_3D(VERTEX_END_GL), 0); prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; } }
void nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, index_size; unsigned inst_count = info->instance_count; unsigned vert_count = info->count; unsigned prim; nvc0_push_context_init(nvc0, &ctx); nvc0_vertex_configure_translate(nvc0, info->index_bias); if (nvc0->state.index_bias) { /* this is already taken care of by translate */ IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0); nvc0->state.index_bias = 0; } if (unlikely(ctx.edgeflag.enabled)) nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias); ctx.prim_restart = info->primitive_restart; ctx.restart_index = info->restart_index; if (info->primitive_restart) { /* NOTE: I hope we won't ever need that last index (~0). * If we do, we have to disable primitive restart here always and * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?) * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily, * and add manual restart to disp_vertices_seq. */ BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2); PUSH_DATA (ctx.push, 1); PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index); } else if (nvc0->state.prim_restart) { IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0); } nvc0->state.prim_restart = info->primitive_restart; if (info->indexed) { nvc0_push_map_idxbuf(&ctx, nvc0); index_size = nvc0->idxbuf.index_size; } else { if (unlikely(info->count_from_stream_output)) { struct pipe_context *pipe = &nvc0->base.pipe; struct nvc0_so_target *targ; targ = nvc0_so_target(info->count_from_stream_output); pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count); vert_count /= targ->stride; } ctx.idxbuf = NULL; /* shut up warnings */ index_size = 0; } ctx.instance_id = info->start_instance; prim = nvc0_prim_gl(info->mode); do { PUSH_SPACE(ctx.push, 9); ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count); if (unlikely(!ctx.dest)) break; if (unlikely(ctx.need_vertex_id)) nvc0_push_upload_vertex_ids(&ctx, nvc0, info); if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS) IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0); BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1); PUSH_DATA (ctx.push, prim); switch (index_size) { case 1: disp_vertices_i08(&ctx, info->start, vert_count); break; case 2: disp_vertices_i16(&ctx, info->start, vert_count); break; case 4: disp_vertices_i32(&ctx, info->start, vert_count); break; default: assert(index_size == 0); disp_vertices_seq(&ctx, info->start, vert_count); break; } PUSH_SPACE(ctx.push, 1); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0); if (--inst_count) { prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; ++ctx.instance_id; } nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP); nouveau_scratch_done(&nvc0->base); } while (inst_count); /* reset state and unmap buffers (no-op) */ if (unlikely(!ctx.edgeflag.value)) { PUSH_SPACE(ctx.push, 1); IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1); } if (unlikely(ctx.need_vertex_id)) { PUSH_SPACE(ctx.push, 4); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0); BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1); PUSH_DATA (ctx.push, NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST | NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT | NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32); IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0); } if (info->indexed) nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer)); for (i = 0; i < nvc0->num_vtxbufs; ++i) nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer)); NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1); }