void
nvc0_tfb_validate(struct nvc0_context *nvc0)
{
   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
   struct nvc0_transform_feedback_state *tfb;
   unsigned b;

   if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb;
   else
   if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb;
   else
      tfb = nvc0->vertprog->tfb;

   IMMED_NVC0(push, NVC0_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0);

   if (tfb && tfb != nvc0->state.tfb) {
      for (b = 0; b < 4; ++b) {
         if (tfb->varying_count[b]) {
            unsigned n = (tfb->varying_count[b] + 3) / 4;

            BEGIN_NVC0(push, NVC0_3D(TFB_STREAM(b)), 3);
            PUSH_DATA (push, 0);
            PUSH_DATA (push, tfb->varying_count[b]);
            PUSH_DATA (push, tfb->stride[b]);
            BEGIN_NVC0(push, NVC0_3D(TFB_VARYING_LOCS(b, 0)), n);
            PUSH_DATAp(push, tfb->varying_index[b], n);

            if (nvc0->tfbbuf[b])
               nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b];
         } else {
            IMMED_NVC0(push, NVC0_3D(TFB_VARYING_COUNT(b)), 0);
         }
      }
   }
   nvc0->state.tfb = tfb;

   if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS))
      return;
   nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TFB);

   for (b = 0; b < nvc0->num_tfbbufs; ++b) {
      struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]);
      struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);

      if (tfb)
         targ->stride = tfb->stride[b];

      if (!(nvc0->tfbbuf_dirty & (1 << b)))
         continue;

      if (!targ->clean)
         nvc0_query_fifo_wait(push, targ->pq);
      BEGIN_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 5);
      PUSH_DATA (push, 1);
      PUSH_DATAh(push, buf->address + targ->pipe.buffer_offset);
      PUSH_DATA (push, buf->address + targ->pipe.buffer_offset);
      PUSH_DATA (push, targ->pipe.buffer_size);
      if (!targ->clean) {
         nvc0_query_pushbuf_submit(push, targ->pq, 0x4);
      } else {
         PUSH_DATA(push, 0); /* TFB_BUFFER_OFFSET */
         targ->clean = FALSE;
      }
      BCTX_REFN(nvc0->bufctx_3d, TFB, buf, WR);
   }
   for (; b < 4; ++b)
      IMMED_NVC0(push, NVC0_3D(TFB_BUFFER_ENABLE(b)), 0);
}
Beispiel #2
0
void
nvc0_tfb_validate(struct nvc0_context *nvc0)
{
   struct nouveau_channel *chan = nvc0->screen->base.channel;
   struct nvc0_transform_feedback_state *tfb;
   unsigned b, n, i;

   if (nvc0->gmtyprog) tfb = nvc0->gmtyprog->tfb;
   else
   if (nvc0->tevlprog) tfb = nvc0->tevlprog->tfb;
   else
      tfb = nvc0->vertprog->tfb;

   IMMED_RING(chan, RING_3D(TFB_ENABLE), (tfb && nvc0->num_tfbbufs) ? 1 : 0);

   if (tfb && tfb != nvc0->state.tfb) {
      uint8_t var[128];

      for (n = 0, b = 0; b < 4; n += tfb->varying_count[b++]) {
         if (tfb->varying_count[b]) {
            BEGIN_RING(chan, RING_3D(TFB_STREAM(b)), 3);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, tfb->varying_count[b]);
            OUT_RING  (chan, tfb->stride[b]);

            for (i = 0; i < tfb->varying_count[b]; ++i)
               var[i] = tfb->varying_index[n + i];
            for (; i & 3; ++i)
               var[i] = 0; /* zero rest of method word bits */

            BEGIN_RING(chan, RING_3D(TFB_VARYING_LOCS(b, 0)), i / 4);
            OUT_RINGp (chan, var, i / 4);

            if (nvc0->tfbbuf[b])
               nvc0_so_target(nvc0->tfbbuf[b])->stride = tfb->stride[b];
         } else {
            IMMED_RING(chan, RING_3D(TFB_VARYING_COUNT(b)), 0);
         }
      }
   }
   nvc0->state.tfb = tfb;

   if (!(nvc0->dirty & NVC0_NEW_TFB_TARGETS))
      return;
   nvc0_bufctx_reset(nvc0, NVC0_BUFCTX_TFB);

   for (b = 0; b < nvc0->num_tfbbufs; ++b) {
      struct nvc0_so_target *targ = nvc0_so_target(nvc0->tfbbuf[b]);
      struct nv04_resource *buf = nv04_resource(targ->pipe.buffer);

      if (tfb)
         targ->stride = tfb->stride[b];

      if (!(nvc0->tfbbuf_dirty & (1 << b)))
         continue;

      if (!targ->clean)
         nvc0_query_fifo_wait(chan, targ->pq);
      BEGIN_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 5);
      OUT_RING  (chan, 1);
      OUT_RESRCh(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR);
      OUT_RESRCl(chan, buf, targ->pipe.buffer_offset, NOUVEAU_BO_WR);
      OUT_RING  (chan, targ->pipe.buffer_size);
      if (!targ->clean) {
         nvc0_query_pushbuf_submit(chan, targ->pq, 0x4);
      } else {
         OUT_RING(chan, 0); /* TFB_BUFFER_OFFSET */
         targ->clean = FALSE;
      }
      nvc0_bufctx_add_resident(nvc0, NVC0_BUFCTX_TFB, buf, NOUVEAU_BO_WR);
   }
   for (; b < 4; ++b)
      IMMED_RING(chan, RING_3D(TFB_BUFFER_ENABLE(b)), 0);
}