static void
svga_vbuf_submit_state( struct svga_vbuf_render *svga_render )
{
   struct svga_context *svga = svga_render->svga;
   SVGA3dVertexDecl vdecl[PIPE_MAX_ATTRIBS];
   enum pipe_error ret;
   int i;

   /* if the vdecl or vbuf hasn't changed do nothing */
   if (!svga->swtnl.new_vdecl)
      return;

   memcpy(vdecl, svga_render->vdecl, sizeof(vdecl));

   /* flush the hw state */
   ret = svga_hwtnl_flush(svga->hwtnl);
   if (ret != PIPE_OK) {
      svga_context_flush(svga, NULL);
      ret = svga_hwtnl_flush(svga->hwtnl);
      /* if we hit this path we might become synced with hw */
      svga->swtnl.new_vbuf = TRUE;
      assert(ret == 0);
   }

   svga_hwtnl_reset_vdecl(svga->hwtnl, svga_render->vdecl_count);

   for (i = 0; i < svga_render->vdecl_count; i++) {
      vdecl[i].array.offset += svga_render->vdecl_offset;

      svga_hwtnl_vdecl( svga->hwtnl,
                        i,
                        &vdecl[i],
                        svga_render->vbuf );
   }

   /* We have already taken care of flatshading, so let the hwtnl
    * module use whatever is most convenient:
    */
   if (svga->state.sw.need_pipeline) {
      svga_hwtnl_set_flatshade(svga->hwtnl, FALSE, FALSE);
      svga_hwtnl_set_unfilled(svga->hwtnl, PIPE_POLYGON_MODE_FILL);
   }
   else {
      svga_hwtnl_set_flatshade( svga->hwtnl,
                                svga->curr.rast->templ.flatshade,
                                svga->curr.rast->templ.flatshade_first );

      svga_hwtnl_set_unfilled( svga->hwtnl,
                               svga->curr.rast->hw_unfilled );
   }

   svga->swtnl.new_vdecl = FALSE;
}
Exemple #2
0
void svga_hwtnl_flush_retry( struct svga_context *svga )
{
   enum pipe_error ret = PIPE_OK;

   ret = svga_hwtnl_flush( svga->hwtnl );
   if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
      svga_context_flush( svga, NULL );
      ret = svga_hwtnl_flush( svga->hwtnl );
   }

   assert(ret == 0);
}
Exemple #3
0
enum pipe_error
svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
                const SVGA3dPrimitiveRange * range,
                unsigned min_index,
                unsigned max_index, struct pipe_resource *ib)
{
   enum pipe_error ret = PIPE_OK;

#ifdef DEBUG
   check_draw_params(hwtnl, range, min_index, max_index, ib);
#endif

   if (hwtnl->cmd.prim_count + 1 >= QSZ) {
      ret = svga_hwtnl_flush(hwtnl);
      if (ret != PIPE_OK)
         return ret;
   }

   /* min/max indices are relative to bias */
   hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
   hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;

   hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
   hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;

   pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
   hwtnl->cmd.prim_count++;

   return ret;
}
Exemple #4
0
/**
 * All drawing filters down into this function, either directly
 * on the hardware path or after doing software vertex processing.
 */
enum pipe_error
svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
                const SVGA3dPrimitiveRange * range,
                unsigned vcount,
                unsigned min_index,
                unsigned max_index, struct pipe_resource *ib,
                unsigned start_instance, unsigned instance_count)
{
   enum pipe_error ret = PIPE_OK;

   SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM);

   if (svga_have_vgpu10(hwtnl->svga)) {
      /* draw immediately */
      ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                        start_instance, instance_count);
      if (ret != PIPE_OK) {
         svga_context_flush(hwtnl->svga, NULL);
         ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                           start_instance, instance_count);
         assert(ret == PIPE_OK);
      }
   }
   else {
      /* batch up drawing commands */
#ifdef DEBUG
      check_draw_params(hwtnl, range, min_index, max_index, ib);
      assert(start_instance == 0);
      assert(instance_count <= 1);
#else
      (void) check_draw_params;
#endif

      if (hwtnl->cmd.prim_count + 1 >= QSZ) {
         ret = svga_hwtnl_flush(hwtnl);
         if (ret != PIPE_OK)
            goto done;
      }

      /* min/max indices are relative to bias */
      hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
      hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;

      hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
      hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;

      pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
      hwtnl->cmd.prim_count++;
   }

done:
   SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
   return ret;
}
Exemple #5
0
static enum pipe_error
retry_draw_range_elements( struct svga_context *svga,
                           struct pipe_buffer *index_buffer,
                           unsigned index_size,
                           unsigned min_index,
                           unsigned max_index,
                           unsigned prim,
                           unsigned start,
                           unsigned count,
                           boolean do_retry )
{
    enum pipe_error ret = 0;

    svga_hwtnl_set_unfilled( svga->hwtnl,
    svga->curr.rast->hw_unfilled );

    svga_hwtnl_set_flatshade( svga->hwtnl,
    svga->curr.rast->templ.flatshade,
    svga->curr.rast->templ.flatshade_first );


    ret = svga_update_state( svga, SVGA_STATE_HW_DRAW );
    if (ret)
        goto retry;

    ret = svga_hwtnl_draw_range_elements( svga->hwtnl,
    index_buffer, index_size,
    min_index, max_index,
    prim, start, count, 0 );
    if (ret)
        goto retry;

    if (svga->curr.any_user_vertex_buffers) {
        ret = svga_hwtnl_flush( svga->hwtnl );
        if (ret)
            goto retry;
    }

    return PIPE_OK;

retry:
    svga_context_flush( svga, NULL );

    if (do_retry)
    {
        return retry_draw_range_elements( svga,
        index_buffer, index_size,
        min_index, max_index,
        prim, start, count,
        FALSE );
    }

    return ret;
}
Exemple #6
0
static enum pipe_error
retry_draw_arrays( struct svga_context *svga,
                   unsigned prim,
                   unsigned start,
                   unsigned count,
                   boolean do_retry )
{
    enum pipe_error ret;

    svga_hwtnl_set_unfilled( svga->hwtnl,
    svga->curr.rast->hw_unfilled );

    svga_hwtnl_set_flatshade( svga->hwtnl,
    svga->curr.rast->templ.flatshade,
    svga->curr.rast->templ.flatshade_first );

    ret = svga_update_state( svga, SVGA_STATE_HW_DRAW );
    if (ret)
        goto retry;

    ret = svga_hwtnl_draw_arrays( svga->hwtnl, prim,
    start, count );
    if (ret)
        goto retry;

    if (svga->curr.any_user_vertex_buffers) {
        ret = svga_hwtnl_flush( svga->hwtnl );
        if (ret)
            goto retry;
    }

    return 0;

retry:
    if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry)
    {
        svga_context_flush( svga, NULL );

        return retry_draw_arrays( svga,
        prim,
        start,
        count,
        FALSE );
    }

    return ret;
}
Exemple #7
0
static enum pipe_error
update_state(struct svga_context *svga,
             const struct svga_tracked_state *atoms[],
             unsigned *state)
{
#ifdef DEBUG
   boolean debug = TRUE;
#else
   boolean debug = FALSE;
#endif
   enum pipe_error ret = PIPE_OK;
   unsigned i;

   ret = svga_hwtnl_flush( svga->hwtnl );
   if (ret != PIPE_OK)
      return ret;

   if (debug) {
      /* Debug version which enforces various sanity checks on the
       * state flags which are generated and checked to help ensure
       * state atoms are ordered correctly in the list.
       */
      unsigned examined, prev;      

      examined = 0;
      prev = *state;

      for (i = 0; atoms[i] != NULL; i++) {	 
	 unsigned generated;

	 assert(atoms[i]->dirty); 
	 assert(atoms[i]->update);

	 if (check_state(*state, atoms[i]->dirty)) {
	    if (0)
               debug_printf("update: %s\n", atoms[i]->name);
	    ret = atoms[i]->update( svga, *state );
            if (ret != PIPE_OK)
               return ret;
	 }

	 /* generated = (prev ^ state)
	  * if (examined & generated)
	  *     fail;
	  */
	 xor_states(&generated, prev, *state);
	 if (check_state(examined, generated)) {
	    debug_printf("state atom %s generated state already examined\n", 
                         atoms[i]->name);
	    assert(0);
	 }
			 
	 prev = *state;
	 accumulate_state(&examined, atoms[i]->dirty);
      }
   }
   else {
      for (i = 0; atoms[i] != NULL; i++) {	 
	 if (check_state(*state, atoms[i]->dirty)) {
	    ret = atoms[i]->update( svga, *state );
            if (ret != PIPE_OK)
               return ret;
         }
      }
   }

   return PIPE_OK;
}
Exemple #8
0
enum pipe_error
svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
                const SVGA3dPrimitiveRange * range,
                unsigned min_index,
                unsigned max_index, struct pipe_resource *ib)
{
   enum pipe_error ret = PIPE_OK;

#ifdef DEBUG
   {
      unsigned i;
      for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
         struct pipe_resource *vb = hwtnl->cmd.vdecl_vb[i];
         unsigned size = vb ? vb->width0 : 0;
         unsigned offset = hwtnl->cmd.vdecl[i].array.offset;
         unsigned stride = hwtnl->cmd.vdecl[i].array.stride;
         int index_bias = (int) range->indexBias + hwtnl->index_bias;
         unsigned width;

         assert(vb);
         assert(size);
         assert(offset < size);
         assert(min_index <= max_index);

         switch (hwtnl->cmd.vdecl[i].identity.type) {
         case SVGA3D_DECLTYPE_FLOAT1:
            width = 4;
            break;
         case SVGA3D_DECLTYPE_FLOAT2:
            width = 4 * 2;
            break;
         case SVGA3D_DECLTYPE_FLOAT3:
            width = 4 * 3;
            break;
         case SVGA3D_DECLTYPE_FLOAT4:
            width = 4 * 4;
            break;
         case SVGA3D_DECLTYPE_D3DCOLOR:
            width = 4;
            break;
         case SVGA3D_DECLTYPE_UBYTE4:
            width = 1 * 4;
            break;
         case SVGA3D_DECLTYPE_SHORT2:
            width = 2 * 2;
            break;
         case SVGA3D_DECLTYPE_SHORT4:
            width = 2 * 4;
            break;
         case SVGA3D_DECLTYPE_UBYTE4N:
            width = 1 * 4;
            break;
         case SVGA3D_DECLTYPE_SHORT2N:
            width = 2 * 2;
            break;
         case SVGA3D_DECLTYPE_SHORT4N:
            width = 2 * 4;
            break;
         case SVGA3D_DECLTYPE_USHORT2N:
            width = 2 * 2;
            break;
         case SVGA3D_DECLTYPE_USHORT4N:
            width = 2 * 4;
            break;
         case SVGA3D_DECLTYPE_UDEC3:
            width = 4;
            break;
         case SVGA3D_DECLTYPE_DEC3N:
            width = 4;
            break;
         case SVGA3D_DECLTYPE_FLOAT16_2:
            width = 2 * 2;
            break;
         case SVGA3D_DECLTYPE_FLOAT16_4:
            width = 2 * 4;
            break;
         default:
            assert(0);
            width = 0;
            break;
         }

         if (index_bias >= 0) {
            assert(offset + index_bias * stride + width <= size);
         }

         /*
          * min_index/max_index are merely conservative guesses, so we can't
          * make buffer overflow detection based on their values.
          */
      }

      assert(range->indexWidth == range->indexArray.stride);

      if (ib) {
         unsigned size = ib->width0;
         unsigned offset = range->indexArray.offset;
         unsigned stride = range->indexArray.stride;
         unsigned count;

         assert(size);
         assert(offset < size);
         assert(stride);

         switch (range->primType) {
         case SVGA3D_PRIMITIVE_POINTLIST:
            count = range->primitiveCount;
            break;
         case SVGA3D_PRIMITIVE_LINELIST:
            count = range->primitiveCount * 2;
            break;
         case SVGA3D_PRIMITIVE_LINESTRIP:
            count = range->primitiveCount + 1;
            break;
         case SVGA3D_PRIMITIVE_TRIANGLELIST:
            count = range->primitiveCount * 3;
            break;
         case SVGA3D_PRIMITIVE_TRIANGLESTRIP:
            count = range->primitiveCount + 2;
            break;
         case SVGA3D_PRIMITIVE_TRIANGLEFAN:
            count = range->primitiveCount + 2;
            break;
         default:
            assert(0);
            count = 0;
            break;
         }

         assert(offset + count * stride <= size);
      }
   }
#endif

   if (hwtnl->cmd.prim_count + 1 >= QSZ) {
      ret = svga_hwtnl_flush(hwtnl);
      if (ret != PIPE_OK)
         return ret;
   }

   /* min/max indices are relative to bias */
   hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
   hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;

   hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
   hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;

   pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
   hwtnl->cmd.prim_count++;

   return ret;
}
static void
svga_vbuf_submit_state( struct svga_vbuf_render *svga_render )
{
   struct svga_context *svga = svga_render->svga;
   SVGA3dVertexDecl vdecl[PIPE_MAX_ATTRIBS];
   enum pipe_error ret;
   unsigned i;
   static const unsigned zero[PIPE_MAX_ATTRIBS] = {0};

   /* if the vdecl or vbuf hasn't changed do nothing */
   if (!svga->swtnl.new_vdecl)
      return;

   memcpy(vdecl, svga_render->vdecl, sizeof(vdecl));

   /* flush the hw state */
   ret = svga_hwtnl_flush(svga->hwtnl);
   if (ret != PIPE_OK) {
      svga_context_flush(svga, NULL);
      ret = svga_hwtnl_flush(svga->hwtnl);
      /* if we hit this path we might become synced with hw */
      svga->swtnl.new_vbuf = TRUE;
      assert(ret == PIPE_OK);
   }

   for (i = 0; i < svga_render->vdecl_count; i++) {
      vdecl[i].array.offset += svga_render->vdecl_offset;
   }

   svga_hwtnl_vertex_decls(svga->hwtnl,
                           svga_render->vdecl_count,
                           vdecl,
                           zero,
                           svga_render->layout_id);

   /* Specify the vertex buffer (there's only ever one) */
   {
      struct pipe_vertex_buffer vb;
      vb.buffer = svga_render->vbuf;
      vb.buffer_offset = svga_render->vdecl_offset;
      vb.stride = vdecl[0].array.stride;
      vb.user_buffer = NULL;
      svga_hwtnl_vertex_buffers(svga->hwtnl, 1, &vb);
   }

   /* We have already taken care of flatshading, so let the hwtnl
    * module use whatever is most convenient:
    */
   if (svga->state.sw.need_pipeline) {
      svga_hwtnl_set_flatshade(svga->hwtnl, FALSE, FALSE);
      svga_hwtnl_set_fillmode(svga->hwtnl, PIPE_POLYGON_MODE_FILL);
   }
   else {
      svga_hwtnl_set_flatshade( svga->hwtnl,
                                svga->curr.rast->templ.flatshade ||
                                svga->state.hw_draw.fs->uses_flat_interp,
                                svga->curr.rast->templ.flatshade_first );

      svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode);
   }

   svga->swtnl.new_vdecl = FALSE;
}