static void
nv50_so_target_destroy(struct pipe_context *pipe,
                       struct pipe_stream_output_target *ptarg)
{
   struct nv50_so_target *targ = nv50_so_target(ptarg);
   if (targ->pq)
      pipe->destroy_query(pipe, targ->pq);
   pipe_resource_reference(&targ->pipe.buffer, NULL);
   FREE(targ);
}
void
nva0_so_target_save_offset(struct pipe_context *pipe,
                           struct pipe_stream_output_target *ptarg,
                           unsigned index, boolean serialize)
{
   struct nv50_so_target *targ = nv50_so_target(ptarg);

   if (serialize) {
      struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
      PUSH_SPACE(push, 2);
      BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
      PUSH_DATA (push, 0);
   }

   nv50_query(targ->pq)->index = index;
   nv50_query_end(pipe, targ->pq);
}
Beispiel #3
0
static void
nv50_set_stream_output_targets(struct pipe_context *pipe,
                               unsigned num_targets,
                               struct pipe_stream_output_target **targets,
                               const unsigned *offsets)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   unsigned i;
   bool serialize = true;
   const bool can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS;

   assert(num_targets <= 4);

   for (i = 0; i < num_targets; ++i) {
      const bool changed = nv50->so_target[i] != targets[i];
      const bool append = (offsets[i] == (unsigned)-1);
      if (!changed && append)
         continue;
      nv50->so_targets_dirty |= 1 << i;

      if (can_resume && changed && nv50->so_target[i]) {
         nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
         serialize = false;
      }

      if (targets[i] && !append)
         nv50_so_target(targets[i])->clean = true;

      pipe_so_target_reference(&nv50->so_target[i], targets[i]);
   }
   for (; i < nv50->num_so_targets; ++i) {
      if (can_resume && nv50->so_target[i]) {
         nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
         serialize = false;
      }
      pipe_so_target_reference(&nv50->so_target[i], NULL);
      nv50->so_targets_dirty |= 1 << i;
   }
   nv50->num_so_targets = num_targets;

   if (nv50->so_targets_dirty) {
      nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_SO);
      nv50->dirty_3d |= NV50_NEW_3D_STRMOUT;
   }
}
Beispiel #4
0
static void
nv50_set_stream_output_targets(struct pipe_context *pipe,
                               unsigned num_targets,
                               struct pipe_stream_output_target **targets,
                               unsigned append_mask)
{
   struct nv50_context *nv50 = nv50_context(pipe);
   unsigned i;
   boolean serialize = TRUE;
   const boolean can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS;

   assert(num_targets <= 4);

   for (i = 0; i < num_targets; ++i) {
      const boolean changed = nv50->so_target[i] != targets[i];
      if (!changed && (append_mask & (1 << i)))
         continue;
      nv50->so_targets_dirty |= 1 << i;

      if (can_resume && changed && nv50->so_target[i]) {
         nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
         serialize = FALSE;
      }

      if (targets[i] && !(append_mask & (1 << i)))
         nv50_so_target(targets[i])->clean = TRUE;

      pipe_so_target_reference(&nv50->so_target[i], targets[i]);
   }
   for (; i < nv50->num_so_targets; ++i) {
      if (can_resume && nv50->so_target[i]) {
         nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
         serialize = FALSE;
      }
      pipe_so_target_reference(&nv50->so_target[i], NULL);
      nv50->so_targets_dirty |= 1 << i;
   }
   nv50->num_so_targets = num_targets;

   if (nv50->so_targets_dirty)
      nv50->dirty |= NV50_NEW_STRMOUT;
}
void
nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
{
   struct push_context ctx;
   unsigned i, index_size;
   unsigned inst_count = info->instance_count;
   unsigned vert_count = info->count;
   boolean apply_bias = info->indexed && info->index_bias;

   ctx.push = nv50->base.pushbuf;
   ctx.translate = nv50->vertex->translate;
   ctx.packet_vertex_limit = nv50->vertex->packet_vertex_limit;
   ctx.vertex_words = nv50->vertex->vertex_size;

   for (i = 0; i < nv50->num_vtxbufs; ++i) {
      const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
      const uint8_t *data;

      if (unlikely(vb->buffer))
         data = nouveau_resource_map_offset(&nv50->base,
            nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
      else
         data = vb->user_buffer;

      if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
         data += (ptrdiff_t)info->index_bias * vb->stride;

      ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
   }

   if (info->indexed) {
      if (nv50->idxbuf.buffer) {
         ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
            nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
            NOUVEAU_BO_RD);
      } else {
         ctx.idxbuf = nv50->idxbuf.user_buffer;
      }
      if (!ctx.idxbuf)
         return;
      index_size = nv50->idxbuf.index_size;
      ctx.primitive_restart = info->primitive_restart;
      ctx.restart_index = info->restart_index;
   } else {
      if (unlikely(info->count_from_stream_output)) {
         struct pipe_context *pipe = &nv50->base.pipe;
         struct nv50_so_target *targ;
         targ = nv50_so_target(info->count_from_stream_output);
         if (!targ->pq) {
            NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n");
            return;
         }
         pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
         vert_count /= targ->stride;
      }
      ctx.idxbuf = NULL;
      index_size = 0;
      ctx.primitive_restart = FALSE;
      ctx.restart_index = 0;
   }

   ctx.instance_id = info->start_instance;
   ctx.prim = nv50_prim_gl(info->mode);

   if (info->primitive_restart) {
      BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 2);
      PUSH_DATA (ctx.push, 1);
      PUSH_DATA (ctx.push, info->restart_index);
   } else
   if (nv50->state.prim_restart) {
      BEGIN_NV04(ctx.push, NV50_3D(PRIM_RESTART_ENABLE), 1);
      PUSH_DATA (ctx.push, 0);
   }
   nv50->state.prim_restart = info->primitive_restart;

   while (inst_count--) {
      BEGIN_NV04(ctx.push, NV50_3D(VERTEX_BEGIN_GL), 1);
      PUSH_DATA (ctx.push, ctx.prim);
      switch (index_size) {
      case 0:
         emit_vertices_seq(&ctx, info->start, vert_count);
         break;
      case 1:
         emit_vertices_i08(&ctx, info->start, vert_count);
         break;
      case 2:
         emit_vertices_i16(&ctx, info->start, vert_count);
         break;
      case 4:
         emit_vertices_i32(&ctx, info->start, vert_count);
         break;
      default:
         assert(0);
         break;
      }
      BEGIN_NV04(ctx.push, NV50_3D(VERTEX_END_GL), 1);
      PUSH_DATA (ctx.push, 0);

      ctx.instance_id++;
      ctx.prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
   }
}