Ejemplo n.º 1
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   if (!u_trim_pipe_prim( info->mode, &count ))
      return;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }
   
   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

#ifdef DEBUG
   if (svga->curr.vs->base.id == svga->debug.disable_shader ||
       svga->curr.fs->base.id == svga->debug.disable_shader)
      return;
#endif

   if (svga->state.sw.need_swtnl) {
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          info->count,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays( svga,
                                  info->mode,
                                  info->start,
                                  info->count,
                                  info->instance_count,
                                  TRUE );
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   svga_release_user_upl_buffers( svga );

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }
}
Ejemplo n.º 2
0
static enum pipe_error
emit_hw_vs_vdecl(struct svga_context *svga, unsigned dirty)
{
   const struct pipe_vertex_element *ve = svga->curr.velems->velem;
   SVGA3dVertexDecl decls[SVGA3D_INPUTREG_MAX];
   unsigned buffer_indexes[SVGA3D_INPUTREG_MAX];
   unsigned i;
   unsigned neg_bias = 0;

   assert(svga->curr.velems->count >=
          svga->curr.vs->base.info.file_count[TGSI_FILE_INPUT]);

   /**
    * We can't set the VDECL offset to something negative, so we
    * must calculate a common negative additional index bias, and modify
    * the VDECL offsets accordingly so they *all* end up positive.
    *
    * Note that the exact value of the negative index bias is not that
    * important, since we compensate for it when we calculate the vertex
    * buffer offset below. The important thing is that all vertex buffer
    * offsets remain positive.
    *
    * Note that we use a negative bias variable in order to make the
    * rounding maths more easy to follow, and to avoid int / unsigned
    * confusion.
    */

   for (i = 0; i < svga->curr.velems->count; i++) {
      const struct pipe_vertex_buffer *vb =
         &svga->curr.vb[ve[i].vertex_buffer_index];
      struct svga_buffer *buffer;
      unsigned int offset = vb->buffer_offset + ve[i].src_offset;
      unsigned tmp_neg_bias = 0;

      if (!vb->buffer)
         continue;

      buffer = svga_buffer(vb->buffer);
      if (buffer->uploaded.start > offset) {
         tmp_neg_bias = buffer->uploaded.start - offset;
         if (vb->stride)
            tmp_neg_bias = (tmp_neg_bias + vb->stride - 1) / vb->stride;
         neg_bias = MAX2(neg_bias, tmp_neg_bias);
      }
   }

   for (i = 0; i < svga->curr.velems->count; i++) {
      const struct pipe_vertex_buffer *vb =
         &svga->curr.vb[ve[i].vertex_buffer_index];
      unsigned usage, index;
      struct svga_buffer *buffer;

      if (!vb->buffer)
         continue;

      buffer = svga_buffer(vb->buffer);
      svga_generate_vdecl_semantics( i, &usage, &index );

      /* SVGA_NEW_VELEMENT
       */
      decls[i].identity.type = svga->curr.velems->decl_type[i];
      decls[i].identity.method = SVGA3D_DECLMETHOD_DEFAULT;
      decls[i].identity.usage = usage;
      decls[i].identity.usageIndex = index;
      decls[i].array.stride = vb->stride;

      /* Compensate for partially uploaded vbo, and
       * for the negative index bias.
       */
      decls[i].array.offset = (vb->buffer_offset
                           + ve[i].src_offset
			   + neg_bias * vb->stride
			   - buffer->uploaded.start);

      assert(decls[i].array.offset >= 0);

      buffer_indexes[i] = ve[i].vertex_buffer_index;

      assert(!buffer->uploaded.buffer);
   }

   svga_hwtnl_vertex_decls(svga->hwtnl,
                           svga->curr.velems->count,
                           decls,
                           buffer_indexes,
                           svga->curr.velems->id);

   svga_hwtnl_vertex_buffers(svga->hwtnl,
                             svga->curr.num_vertex_buffers,
                             svga->curr.vb);

   svga_hwtnl_set_index_bias( svga->hwtnl, -(int) neg_bias );
   return PIPE_OK;
}
Ejemplo n.º 3
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);

   svga->hud.num_draw_calls++;  /* for SVGA_QUERY_NUM_DRAW_CALLS */

   if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES &&
       svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK)
      goto done;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }

   if (need_fallback_prim_restart(svga, info)) {
      enum pipe_error r;
      r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
      assert(r == PIPE_OK);
      (void) r;
      goto done;
   }

   if (!u_trim_pipe_prim( info->mode, &count ))
      goto done;

   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

   if (svga->state.sw.need_swtnl) {
      svga->hud.num_fallbacks++;  /* for SVGA_QUERY_NUM_FALLBACKS */
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          count,
                                          info->start_instance,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays(svga, info->mode, info->start, count,
                                 info->start_instance, info->instance_count,
                                 TRUE);
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
;
}