static INLINE void nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0) { if (nvc0->idxbuf.buffer) { struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer); ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base, buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD); } else { ctx->idxbuf = nvc0->idxbuf.user_buffer; } }
static void nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb, struct pipe_vertex_element *ve, unsigned attr) { const unsigned nc = util_format_get_nr_components(ve->src_format); struct nouveau_pushbuf *push = nv30->base.pushbuf; struct nv04_resource *res = nv04_resource(vb->buffer); const struct util_format_description *desc = util_format_description(ve->src_format); const void *data; float v[4]; data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset + ve->src_offset, NOUVEAU_BO_RD); desc->unpack_rgba_float(v, 0, data, 0, 1, 1); switch (nc) { case 4: BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4); PUSH_DATAf(push, v[0]); PUSH_DATAf(push, v[1]); PUSH_DATAf(push, v[2]); PUSH_DATAf(push, v[3]); break; case 3: BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3); PUSH_DATAf(push, v[0]); PUSH_DATAf(push, v[1]); PUSH_DATAf(push, v[2]); break; case 2: BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2); PUSH_DATAf(push, v[0]); PUSH_DATAf(push, v[1]); break; case 1: BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1); PUSH_DATAf(push, v[0]); break; default: assert(0); break; } }
static INLINE void nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0, int32_t index_bias) { unsigned attr = nvc0->vertprog->vp.edgeflag; struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe; struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index]; struct nv04_resource *buf = nv04_resource(vb->buffer); ctx->edgeflag.stride = vb->stride; if (buf) { unsigned offset = vb->buffer_offset + ve->src_offset; ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base, buf, offset, NOUVEAU_BO_RD); } else { ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset; } if (index_bias) ctx->edgeflag.data += (intptr_t)index_bias * vb->stride; }
static INLINE void nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias) { struct translate *translate = nvc0->vertex->translate; unsigned i; for (i = 0; i < nvc0->num_vtxbufs; ++i) { const uint8_t *map; const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i]; if (likely(!vb->buffer)) map = (const uint8_t *)vb->user_buffer; else map = nouveau_resource_map_offset(&nvc0->base, nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD); if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i))) map += (intptr_t)index_bias * vb->stride; translate->set_buffer(translate, i, map, vb->stride, ~0); } }
void nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, index_size; boolean apply_bias = info->indexed && info->index_bias; ctx.push = nv30->base.pushbuf; ctx.translate = nv30->vertex->translate; ctx.packet_vertex_limit = nv30->vertex->vtx_per_packet_max; ctx.vertex_words = nv30->vertex->vtx_size; for (i = 0; i < nv30->num_vtxbufs; ++i) { uint8_t *data; struct pipe_vertex_buffer *vb = &nv30->vtxbuf[i]; struct nv04_resource *res = nv04_resource(vb->buffer); if (!vb->buffer && !vb->user_buffer) { continue; } data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset, NOUVEAU_BO_RD); if (apply_bias) data += info->index_bias * vb->stride; ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0); } if (info->indexed) { if (nv30->idxbuf.buffer) ctx.idxbuf = nouveau_resource_map_offset(&nv30->base, nv04_resource(nv30->idxbuf.buffer), nv30->idxbuf.offset, NOUVEAU_BO_RD); else ctx.idxbuf = nv30->idxbuf.user_buffer; if (!ctx.idxbuf) { nv30_state_release(nv30); return; } index_size = nv30->idxbuf.index_size; ctx.primitive_restart = info->primitive_restart; ctx.restart_index = info->restart_index; } else { ctx.idxbuf = NULL; index_size = 0; ctx.primitive_restart = FALSE; ctx.restart_index = 0; } if (nv30->screen->eng3d->oclass >= NV40_3D_CLASS) { BEGIN_NV04(ctx.push, NV40_3D(PRIM_RESTART_ENABLE), 2); PUSH_DATA (ctx.push, info->primitive_restart); PUSH_DATA (ctx.push, info->restart_index); nv30->state.prim_restart = info->primitive_restart; } ctx.prim = nv30_prim_gl(info->mode); PUSH_RESET(ctx.push, BUFCTX_IDXBUF); BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (ctx.push, ctx.prim); switch (index_size) { case 0: emit_vertices_seq(&ctx, info->start, info->count); break; case 1: emit_vertices_i08(&ctx, info->start, info->count); break; case 2: emit_vertices_i16(&ctx, info->start, info->count); break; case 4: emit_vertices_i32(&ctx, info->start, info->count); break; default: assert(0); break; } BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1); PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP); if (info->indexed) nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer)); for (i = 0; i < nv30->num_vtxbufs; ++i) { if (nv30->vtxbuf[i].buffer) { nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer)); } } nv30_state_release(nv30); }
void nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, index_size; unsigned inst_count = info->instance_count; unsigned vert_count = info->count; boolean apply_bias = info->indexed && info->index_bias; ctx.push = nv50->base.pushbuf; ctx.translate = nv50->vertex->translate; ctx.packet_vertex_limit = nv50->vertex->packet_vertex_limit; ctx.vertex_words = nv50->vertex->vertex_size; for (i = 0; i < nv50->num_vtxbufs; ++i) { const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i]; const uint8_t *data; if (unlikely(vb->buffer)) data = nouveau_resource_map_offset(&nv50->base, nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD); else data = vb->user_buffer; if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i)))) data += (ptrdiff_t)info->index_bias * vb->stride; ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0); } if (info->indexed) { if (nv50->idxbuf.buffer) { ctx.idxbuf = nouveau_resource_map_offset(&nv50->base, nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset, NOUVEAU_BO_RD); } else { ctx.idxbuf = nv50->idxbuf.user_buffer; } if (!ctx.idxbuf) return; index_size = nv50->idxbuf.index_size; ctx.primitive_restart = info->primitive_restart; ctx.restart_index = info->restart_index; } else { if (unlikely(info->count_from_stream_output)) { struct pipe_context *pipe = &nv50->base.pipe; struct nv50_so_target *targ; targ = nv50_so_target(info->count_from_stream_output); if (!targ->pq) { NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n"); return; } pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count); vert_count /= targ->stride; } ctx.idxbuf = NULL; index_size = 0; ctx.primitive_restart = FALSE; ctx.restart_index = 0; } ctx.instance_id = info->start_instance; ctx.prim = nv50_prim_gl(info->mode); if (info->primitive_restart) { BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 2); PUSH_DATA (ctx.push, 1); PUSH_DATA (ctx.push, info->restart_index); } else if (nv50->state.prim_restart) { BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 1); PUSH_DATA (ctx.push, 0); } nv50->state.prim_restart = info->primitive_restart; while (inst_count--) { BEGIN_NV04(ctx.push, NV50_3D(VERTEX_BEGIN_GL), 1); PUSH_DATA (ctx.push, ctx.prim); switch (index_size) { case 0: emit_vertices_seq(&ctx, info->start, vert_count); break; case 1: emit_vertices_i08(&ctx, info->start, vert_count); break; case 2: emit_vertices_i16(&ctx, info->start, vert_count); break; case 4: emit_vertices_i32(&ctx, info->start, vert_count); break; default: assert(0); break; } BEGIN_NV04(ctx.push, NV50_3D(VERTEX_END_GL), 1); PUSH_DATA (ctx.push, 0); ctx.instance_id++; ctx.prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; } }