static void nvc0_texture_barrier(struct pipe_context *pipe) { struct nouveau_channel *chan = nvc0_context(pipe)->screen->base.channel; IMMED_RING(chan, RING_3D(SERIALIZE), 0); IMMED_RING(chan, RING_3D(TEX_CACHE_CTL), 0); }
static void emit_edgeflag(struct push_context *ctx, boolean enabled) { struct nouveau_channel *chan = ctx->nvc0->screen->base.channel; IMMED_RING(chan, RING_3D(EDGEFLAG_ENABLE), enabled); }
void nvc0_gmtyprog_validate(struct nvc0_context *nvc0) { struct nouveau_channel *chan = nvc0->screen->base.channel; struct nvc0_program *gp = nvc0->gmtyprog; if (gp) nvc0_program_validate(nvc0, gp); /* we allow GPs with no code for specifying stream output state only */ if (!gp || !gp->code_size) { BEGIN_RING(chan, RING_3D(GP_SELECT), 1); OUT_RING (chan, 0x40); IMMED_RING(chan, RING_3D(LAYER), 0); return; } nvc0_program_update_context_state(nvc0, gp, 3); BEGIN_RING(chan, RING_3D(GP_SELECT), 1); OUT_RING (chan, 0x41); BEGIN_RING(chan, RING_3D(SP_START_ID(4)), 1); OUT_RING (chan, gp->code_base); BEGIN_RING(chan, RING_3D(SP_GPR_ALLOC(4)), 1); OUT_RING (chan, gp->max_gpr); BEGIN_RING(chan, RING_3D(LAYER), 1); OUT_RING (chan, (gp->hdr[13] & (1 << 9)) ? NVC0_3D_LAYER_USE_GP : 0); nvc0_program_validate_clip(nvc0, gp); }
void nvc0_tctlprog_validate(struct nvc0_context *nvc0) { struct nouveau_channel *chan = nvc0->screen->base.channel; struct nvc0_program *tp = nvc0->tctlprog; if (!tp) { BEGIN_RING(chan, RING_3D(SP_SELECT(2)), 1); OUT_RING (chan, 0x20); return; } if (!nvc0_program_validate(nvc0, tp)) return; nvc0_program_update_context_state(nvc0, tp, 1); if (tp->tp.tess_mode != ~0) { BEGIN_RING(chan, RING_3D(TESS_MODE), 1); OUT_RING (chan, tp->tp.tess_mode); } BEGIN_RING(chan, RING_3D(SP_SELECT(2)), 2); OUT_RING (chan, 0x21); OUT_RING (chan, tp->code_base); BEGIN_RING(chan, RING_3D(SP_GPR_ALLOC(2)), 1); OUT_RING (chan, tp->max_gpr); if (tp->tp.input_patch_size <= 32) IMMED_RING(chan, RING_3D(PATCH_VERTICES), tp->tp.input_patch_size); }
static void nvc0_program_validate_clip(struct nvc0_context *nvc0, struct nvc0_program *vp) { struct nouveau_channel *chan = nvc0->screen->base.channel; if (nvc0->vertprog->vp.num_ucps) return; if (nvc0->state.clip_enable != vp->vp.clip_enable) { nvc0->state.clip_enable = vp->vp.clip_enable; IMMED_RING(chan, RING_3D(CLIP_DISTANCE_ENABLE), vp->vp.clip_enable); } if (nvc0->state.clip_mode != vp->vp.clip_mode) { nvc0->state.clip_mode = vp->vp.clip_mode; BEGIN_RING(chan, RING_3D(CLIP_DISTANCE_MODE), 1); OUT_RING (chan, vp->vp.clip_mode); } }
void nvc0_push_vbo2(struct nvc0_context *nvc0, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, n; unsigned inst = info->instance_count; unsigned prim = nvc0_prim_gl(info->mode); ctx.nvc0 = nvc0; ctx.vertex_size = nvc0->vertex->vtx_size; ctx.idxbuf = NULL; ctx.num_attrs = 0; ctx.edgeflag = 0.5f; ctx.edgeflag_input = 32; for (i = 0; i < nvc0->vertex->num_elements; ++i) { struct pipe_vertex_element *ve = &nvc0->vertex->element[i].pipe; struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index]; struct nouveau_bo *bo = nvc0_resource(vb->buffer)->bo; unsigned nr_components; if (!(nvc0->vbo_fifo & (1 << i))) continue; n = ctx.num_attrs++; if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) return; ctx.attr[n].map = (uint8_t *)bo->map + vb->buffer_offset + ve->src_offset; nouveau_bo_unmap(bo); ctx.attr[n].stride = vb->stride; ctx.attr[n].divisor = ve->instance_divisor; nr_components = util_format_get_nr_components(ve->src_format); switch (util_format_get_component_bits(ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0)) { case 8: switch (nr_components) { case 1: ctx.attr[n].push = emit_b08_1; break; case 2: ctx.attr[n].push = emit_b16_1; break; case 3: ctx.attr[n].push = emit_b08_3; break; case 4: ctx.attr[n].push = emit_b32_1; break; } break; case 16: switch (nr_components) { case 1: ctx.attr[n].push = emit_b16_1; break; case 2: ctx.attr[n].push = emit_b32_1; break; case 3: ctx.attr[n].push = emit_b16_3; break; case 4: ctx.attr[n].push = emit_b32_2; break; } break; case 32: switch (nr_components) { case 1: ctx.attr[n].push = emit_b32_1; break; case 2: ctx.attr[n].push = emit_b32_2; break; case 3: ctx.attr[n].push = emit_b32_3; break; case 4: ctx.attr[n].push = emit_b32_4; break; } break; default: assert(0); break; } } if (info->indexed) { struct nvc0_resource *res = nvc0_resource(nvc0->idxbuf.buffer); if (!res || nouveau_bo_map(res->bo, NOUVEAU_BO_RD)) return; ctx.idxbuf = (uint8_t *)res->bo->map + nvc0->idxbuf.offset + res->offset; nouveau_bo_unmap(res->bo); ctx.idxsize = nvc0->idxbuf.index_size; } else { ctx.idxsize = 0; } while (inst--) { BEGIN_RING(nvc0->screen->base.channel, RING_3D(VERTEX_BEGIN_GL), 1); OUT_RING (nvc0->screen->base.channel, prim); switch (ctx.idxsize) { case 0: emit_seq(&ctx, info->start, info->count); break; case 1: emit_elt08(&ctx, info->start, info->count); break; case 2: emit_elt16(&ctx, info->start, info->count); break; case 4: emit_elt32(&ctx, info->start, info->count); break; } IMMED_RING(nvc0->screen->base.channel, RING_3D(VERTEX_END_GL), 0); prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; } }
void nvc0_tfb_validate(struct nvc0_context *nvc0) { struct nouveau_channel *chan = nvc0->screen->base.channel; struct nvc0_transform_feedback_state *tfb; unsigned b, n, i; if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb; else if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb; else tfb = nvc0->vertprog->tfb; IMMED_RING(chan, RING_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0); if (tfb && tfb != nvc0->state.tfb) { uint8_t var[128]; for (n = 0, b = 0; b < 4; n += tfb->varying_count[b++]) { if (tfb->varying_count[b]) { BEGIN_RING(chan, RING_3D(TFB_STREAM(b)), 3); OUT_RING (chan, 0); OUT_RING (chan, tfb->varying_count[b]); OUT_RING (chan, tfb->stride[b]); for (i = 0; i < tfb->varying_count[b]; ++i) var[i] = tfb->varying_index[n + i]; for (; i & 3; ++i) var[i] = 0; /* zero rest of method word bits */ BEGIN_RING(chan, RING_3D(TFB_VARYING_LOCS(b, 0)), i / 4); OUT_RINGp (chan, var, i / 4); if (nvc0->tfbbuf[b]) nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b]; } else { IMMED_RING(chan, RING_3D(TFB_VARYING_COUNT(b)), 0); } } } nvc0->state.tfb = tfb; if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS)) return; nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_TFB); for (b = 0; b < nvc0->num_tfbbufs; ++b) { struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]); struct nv04_resource *buf = nv04_resource(targ->pipe.buffer); if (tfb) targ->stride = tfb->stride[b]; if (!(nvc0->tfbbuf_dirty & (1 << b))) continue; if (!targ->clean) nvc0_query_fifo_wait(chan, targ->pq); BEGIN_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 5); OUT_RING (chan, 1); OUT_RESRCh(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR); OUT_RESRCl(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR); OUT_RING (chan, targ->pipe.buffer_size); if (!targ->clean) { nvc0_query_pushbuf_submit(chan, targ->pq, 0x4); } else { OUT_RING(chan, 0); /* TFB_BUFFER_OFFSET */ targ->clean = FALSE; } nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TFB, buf, NOUVEAU_BO_WR); } for (; b < 4; ++b) IMMED_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 0); }