static void nvc0_render_condition(struct pipe_context *pipe, struct pipe_query *pq, boolean condition, uint mode) { struct nvc0_context *nvc0 = nvc0_context(pipe); struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_query *q; uint32_t cond; boolean negated = FALSE; boolean wait = mode != PIPE_RENDER_COND_NO_WAIT && mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT; nvc0->cond_query = pq; nvc0->cond_cond = condition; nvc0->cond_mode = mode; if (!pq) { PUSH_SPACE(push, 1); IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS); return; } q = nvc0_query(pq); /* NOTE: comparison of 2 queries only works if both have completed */ switch (q->type) { case PIPE_QUERY_SO_OVERFLOW_PREDICATE: cond = negated ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_NOT_EQUAL; wait = TRUE; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: if (likely(!negated)) { if (unlikely(q->nesting)) cond = wait ? NVC0_3D_COND_MODE_NOT_EQUAL : NVC0_3D_COND_MODE_ALWAYS; else cond = NVC0_3D_COND_MODE_RES_NON_ZERO; } else { cond = wait ? NVC0_3D_COND_MODE_EQUAL : NVC0_3D_COND_MODE_ALWAYS; } break; default: assert(!"render condition query not a predicate"); mode = NVC0_3D_COND_MODE_ALWAYS; break; } if (wait) nvc0_query_fifo_wait(push, pq); PUSH_SPACE(push, 4); PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); BEGIN_NVC0(push, NVC0_3D(COND_ADDRESS_HIGH), 3); PUSH_DATAh(push, q->bo->offset + q->offset); PUSH_DATA (push, q->bo->offset + q->offset); PUSH_DATA (push, cond); }
void nvc0_tfb_validate(struct nvc0_context *nvc0) { struct nouveau_pushbuf *push = nvc0->base.pushbuf; struct nvc0_transform_feedback_state *tfb; unsigned b; if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb; else if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb; else tfb = nvc0->vertprog->tfb; IMMED_NVC0(push, NVC0_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0); if (tfb && tfb != nvc0->state.tfb) { for (b = 0; b < 4; ++b) { if (tfb->varying_count[b]) { unsigned n = (tfb->varying_count[b] + 3) / 4; BEGIN_NVC0(push, NVC0_3D(TFB_STREAM(b)), 3); PUSH_DATA (push, 0); PUSH_DATA (push, tfb->varying_count[b]); PUSH_DATA (push, tfb->stride[b]); BEGIN_NVC0(push, NVC0_3D(TFB_VARYING_LOCS(b, 0)), n); PUSH_DATAp(push, tfb->varying_index[b], n); if (nvc0->tfbbuf[b]) nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b]; } else { IMMED_NVC0(push, NVC0_3D(TFB_VARYING_COUNT(b)), 0); } } } nvc0->state.tfb = tfb; if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS)) return; nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TFB); for (b = 0; b < nvc0->num_tfbbufs; ++b) { struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]); struct nv04_resource *buf = nv04_resource(targ->pipe.buffer); if (tfb) targ->stride = tfb->stride[b]; if (!(nvc0->tfbbuf_dirty & (1 << b))) continue; if (!targ->clean) nvc0_query_fifo_wait(push, targ->pq); BEGIN_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 5); PUSH_DATA (push, 1); PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset); PUSH_DATA (push, buf->address + targ->pipe.buffer_offset); PUSH_DATA (push, targ->pipe.buffer_size); if (!targ->clean) { nvc0_query_pushbuf_submit(push, targ->pq, 0x4); } else { PUSH_DATA(push, 0); /* TFB_BUFFER_OFFSET */ targ->clean = FALSE; } BCTX_REFN(nvc0->bufctx_3d, TFB, buf, WR); } for (; b < 4; ++b) IMMED_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 0); }
void nvc0_tfb_validate(struct nvc0_context *nvc0) { struct nouveau_channel *chan = nvc0->screen->base.channel; struct nvc0_transform_feedback_state *tfb; unsigned b, n, i; if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb; else if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb; else tfb = nvc0->vertprog->tfb; IMMED_RING(chan, RING_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0); if (tfb && tfb != nvc0->state.tfb) { uint8_t var[128]; for (n = 0, b = 0; b < 4; n += tfb->varying_count[b++]) { if (tfb->varying_count[b]) { BEGIN_RING(chan, RING_3D(TFB_STREAM(b)), 3); OUT_RING (chan, 0); OUT_RING (chan, tfb->varying_count[b]); OUT_RING (chan, tfb->stride[b]); for (i = 0; i < tfb->varying_count[b]; ++i) var[i] = tfb->varying_index[n + i]; for (; i & 3; ++i) var[i] = 0; /* zero rest of method word bits */ BEGIN_RING(chan, RING_3D(TFB_VARYING_LOCS(b, 0)), i / 4); OUT_RINGp (chan, var, i / 4); if (nvc0->tfbbuf[b]) nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b]; } else { IMMED_RING(chan, RING_3D(TFB_VARYING_COUNT(b)), 0); } } } nvc0->state.tfb = tfb; if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS)) return; nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_TFB); for (b = 0; b < nvc0->num_tfbbufs; ++b) { struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]); struct nv04_resource *buf = nv04_resource(targ->pipe.buffer); if (tfb) targ->stride = tfb->stride[b]; if (!(nvc0->tfbbuf_dirty & (1 << b))) continue; if (!targ->clean) nvc0_query_fifo_wait(chan, targ->pq); BEGIN_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 5); OUT_RING (chan, 1); OUT_RESRCh(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR); OUT_RESRCl(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR); OUT_RING (chan, targ->pipe.buffer_size); if (!targ->clean) { nvc0_query_pushbuf_submit(chan, targ->pq, 0x4); } else { OUT_RING(chan, 0); /* TFB_BUFFER_OFFSET */ targ->clean = FALSE; } nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TFB, buf, NOUVEAU_BO_WR); } for (; b < 4; ++b) IMMED_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 0); }