Example #1
0
static void svga_end_query(struct pipe_context *pipe, 
                           struct pipe_query *q)
{
   struct svga_context *svga = svga_context( pipe );
   struct svga_query *sq = svga_query( q );
   enum pipe_error ret;

   SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
   assert(svga->sq == sq);

   svga_hwtnl_flush_retry(svga);
   
   /* Set to PENDING before sending EndQuery. */
   sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;

   ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf);
   if(ret != PIPE_OK) {
      svga_context_flush(svga, NULL);
      ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf);
      assert(ret == PIPE_OK);
   }
   
   /* TODO: Delay flushing. We don't really need to flush here, just ensure 
    * that there is one flush before svga_get_query_result attempts to get the
    * result */
   svga_context_flush(svga, NULL);

   svga->sq = NULL;
}
/**
 * Clear the given surface to the specified value.
 * No masking, no scissor (clear entire buffer).
 */
void
svga_clear(struct pipe_context *pipe, unsigned buffers,
           const union pipe_color_union *color,
	   double depth, unsigned stencil)
{
   struct svga_context *svga = svga_context( pipe );
   enum pipe_error ret;

   if (buffers & PIPE_CLEAR_COLOR)
      SVGA_DBG(DEBUG_DMA, "clear sid %p\n",
               svga_surface(svga->curr.framebuffer.cbufs[0])->handle);

   /* flush any queued prims (don't want them to appear after the clear!) */
   svga_hwtnl_flush_retry(svga);

   ret = try_clear( svga, buffers, color, depth, stencil );

   if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
      /* Flush command buffer and retry:
       */
      svga_context_flush( svga, NULL );

      ret = try_clear( svga, buffers, color, depth, stencil );
   }

   /*
    * Mark target surfaces as dirty
    * TODO Mark only cleared surfaces.
    */
   svga_mark_surfaces_dirty(svga);

   assert (ret == PIPE_OK);
}
Example #3
0
static void svga_delete_vs_state(struct pipe_context *pipe, void *shader)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader;
   struct svga_shader_result *result, *tmp;
   enum pipe_error ret;

   svga_hwtnl_flush_retry( svga );

   draw_delete_vertex_shader(svga->swtnl.draw, vs->draw_shader);
   
   for (result = vs->base.results; result; result = tmp ) {
      tmp = result->next;

      ret = SVGA3D_DestroyShader(svga->swc, 
                                 result->id,
                                 SVGA3D_SHADERTYPE_VS );
      if(ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_DestroyShader(svga->swc, 
                                    result->id,
                                    SVGA3D_SHADERTYPE_VS );
         assert(ret == PIPE_OK);
      }

      svga_destroy_shader_result( result );
   }

   FREE((void *)vs->base.tokens);
   FREE(vs);
}
Example #4
0
/**
 * Flush the primitive queue if this buffer is referred.
 *
 * Otherwise DMA commands on the referred buffer will be emitted too late.
 */
void svga_hwtnl_flush_buffer( struct svga_context *svga,
                              struct pipe_resource *buffer )
{
   if (svga_hwtnl_is_buffer_referred(svga->hwtnl, buffer)) {
      svga_hwtnl_flush_retry(svga);
   }
}
Example #5
0
static void
svga_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_velems_state *velems = (struct svga_velems_state *) state;

   if (svga_have_vgpu10(svga)) {
      enum pipe_error ret;

      svga_hwtnl_flush_retry(svga);

      ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id);
      if (ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id);
         assert(ret == PIPE_OK);
      }

      if (velems->id == svga->state.hw_draw.layout_id)
         svga->state.hw_draw.layout_id = SVGA3D_INVALID_ID;

      util_bitmask_clear(svga->input_element_object_id_bm, velems->id);
      velems->id = SVGA3D_INVALID_ID;
   }

   FREE(velems);
}
Example #6
0
static void
svga_delete_gs_state(struct pipe_context *pipe, void *shader)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_geometry_shader *gs = (struct svga_geometry_shader *)shader;
   struct svga_geometry_shader *next_gs;
   struct svga_shader_variant *variant, *tmp;
   enum pipe_error ret;

   svga_hwtnl_flush_retry(svga);

   /* Start deletion from the original geometry shader state */
   if (gs->base.parent != NULL)
      gs = (struct svga_geometry_shader *)gs->base.parent;

   /* Free the list of geometry shaders */
   while (gs) {
      next_gs = (struct svga_geometry_shader *)gs->base.next;

      if (gs->base.stream_output != NULL)
         svga_delete_stream_output(svga, gs->base.stream_output);

      draw_delete_geometry_shader(svga->swtnl.draw, gs->draw_shader);

      for (variant = gs->base.variants; variant; variant = tmp) {
         tmp = variant->next;

         /* Check if deleting currently bound shader */
         if (variant == svga->state.hw_draw.gs) {
            ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, NULL);
            if (ret != PIPE_OK) {
               svga_context_flush(svga, NULL);
               ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, NULL);
               assert(ret == PIPE_OK);
            }
            svga->state.hw_draw.gs = NULL;
         }

         ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_GS, variant);
         if (ret != PIPE_OK) {
            svga_context_flush(svga, NULL);
            ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_GS,
                                              variant);
            assert(ret == PIPE_OK);
         }
      }

      FREE((void *)gs->base.tokens);
      FREE(gs);
      gs = next_gs;
   }
}
Example #7
0
static void
svga_delete_vs_state(struct pipe_context *pipe, void *shader)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader;
   struct svga_shader_variant *variant, *tmp;
   enum pipe_error ret;

   svga_hwtnl_flush_retry(svga);

   assert(vs->base.parent == NULL);

   /* Check if there is a generated geometry shader to go with this
    * vertex shader. If there is, then delete the geometry shader as well.
    */
   if (vs->gs != NULL) {
      svga->pipe.delete_gs_state(&svga->pipe, vs->gs);
   }

   if (vs->base.stream_output != NULL)
      svga_delete_stream_output(svga, vs->base.stream_output);

   draw_delete_vertex_shader(svga->swtnl.draw, vs->draw_shader);

   for (variant = vs->base.variants; variant; variant = tmp) {
      tmp = variant->next;

      /* Check if deleting currently bound shader */
      if (variant == svga->state.hw_draw.vs) {
         ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, NULL);
         if (ret != PIPE_OK) {
            svga_context_flush(svga, NULL);
            ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, NULL);
            assert(ret == PIPE_OK);
         }
         svga->state.hw_draw.vs = NULL;
      }

      ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
      if (ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant);
         assert(ret == PIPE_OK);
      }
   }

   FREE((void *)vs->base.tokens);
   FREE(vs);
}
Example #8
0
static void svga_begin_query(struct pipe_context *pipe, 
                             struct pipe_query *q)
{
   struct svga_screen *svgascreen = svga_screen(pipe->screen);
   struct svga_winsys_screen *sws = svgascreen->sws;
   struct svga_context *svga = svga_context( pipe );
   struct svga_query *sq = svga_query( q );
   enum pipe_error ret;

   SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
   
   assert(!svga->sq);

   /* Need to flush out buffered drawing commands so that they don't
    * get counted in the query results.
    */
   svga_hwtnl_flush_retry(svga);
   
   if(sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
      /* The application doesn't care for the pending query result. We cannot
       * let go the existing buffer and just get a new one because its storage
       * may be reused for other purposes and clobbered by the host when it
       * determines the query result. So the only option here is to wait for
       * the existing query's result -- not a big deal, given that no sane
       * application would do this.
       */
      uint64_t result;

      svga_get_query_result(pipe, q, TRUE, &result);
      
      assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
   }
   
   sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
   sws->fence_reference(sws, &sq->fence, NULL);

   ret = SVGA3D_BeginQuery(svga->swc, sq->type);
   if(ret != PIPE_OK) {
      svga_context_flush(svga, NULL);
      ret = SVGA3D_BeginQuery(svga->swc, sq->type);
      assert(ret == PIPE_OK);
   }

   svga->sq = sq;
}
Example #9
0
/* Emit all operations pending on host surfaces.
 */ 
void svga_surfaces_flush(struct svga_context *svga)
{
   unsigned i;

   /* Emit buffered drawing commands.
    */
   svga_hwtnl_flush_retry( svga );

   /* Emit back-copy from render target view to texture.
    */
   for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
      if (svga->curr.framebuffer.cbufs[i])
         svga_propagate_surface(svga, svga->curr.framebuffer.cbufs[i]);
   }

   if (svga->curr.framebuffer.zsbuf)
      svga_propagate_surface(svga, svga->curr.framebuffer.zsbuf);

}
/* Emit all operations pending on host surfaces.
 */ 
void svga_surfaces_flush(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   unsigned i;

   /* Emit buffered drawing commands.
    */
   svga_hwtnl_flush_retry( svga );

   /* Emit back-copy from render target view to texture.
    */
   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if (svga->curr.framebuffer.cbufs[i])
         svga_propagate_surface(svga, svga->curr.framebuffer.cbufs[i]);
   }

   if (svga->curr.framebuffer.zsbuf)
      svga_propagate_surface(svga, svga->curr.framebuffer.zsbuf);

}
Example #11
0
static
void svga_delete_fs_state(struct pipe_context *pipe, void *shader)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_fragment_shader *fs = (struct svga_fragment_shader *) shader;
   struct svga_shader_result *result, *tmp;
   enum pipe_error ret;

   svga_hwtnl_flush_retry( svga );

   draw_delete_fragment_shader(svga->swtnl.draw, fs->draw_shader);

   for (result = fs->base.results; result; result = tmp ) {
      tmp = result->next;

      ret = SVGA3D_DestroyShader(svga->swc, 
                                 result->id,
                                 SVGA3D_SHADERTYPE_PS );
      if(ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_DestroyShader(svga->swc, 
                                    result->id,
                                    SVGA3D_SHADERTYPE_PS );
         assert(ret == PIPE_OK);
      }

      util_bitmask_clear( svga->fs_bm, result->id );

      svga_destroy_shader_result( result );

      /*
       * Remove stale references to this result to ensure a new result on the
       * same address will be detected as a change.
       */
      if(result == svga->state.hw_draw.fs)
         svga->state.hw_draw.fs = NULL;
   }

   FREE((void *)fs->base.tokens);
   FREE(fs);
}
Example #12
0
static void svga_set_framebuffer_state(struct pipe_context *pipe,
				       const struct pipe_framebuffer_state *fb)
{
   struct svga_context *svga = svga_context(pipe);
   struct pipe_framebuffer_state *dst = &svga->curr.framebuffer;
   boolean propagate = FALSE;
   unsigned i;

   dst->width = fb->width;
   dst->height = fb->height;
   dst->nr_cbufs = fb->nr_cbufs;

   /* check if we need to propagate any of the target surfaces */
   for (i = 0; i < dst->nr_cbufs; i++) {
      struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
      if (dst->cbufs[i] && dst->cbufs[i] != s) {
         if (svga_surface_needs_propagation(dst->cbufs[i])) {
            propagate = TRUE;
            break;
         }
      }
   }

   if (propagate) {
      /* make sure that drawing calls comes before propagation calls */
      svga_hwtnl_flush_retry( svga );
   
      for (i = 0; i < dst->nr_cbufs; i++) {
         struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
         if (dst->cbufs[i] && dst->cbufs[i] != s)
            svga_propagate_surface(svga, dst->cbufs[i]);
      }
   }

   /* XXX: Actually the virtual hardware may support rendertargets with
    * different size, depending on the host API and driver, but since we cannot
    * know that make no such assumption here. */
   for(i = 0; i < fb->nr_cbufs; ++i) {
      if (fb->zsbuf && fb->cbufs[i]) {
         assert(fb->zsbuf->width == fb->cbufs[i]->width); 
         assert(fb->zsbuf->height == fb->cbufs[i]->height); 
      }
   }

   util_copy_framebuffer_state(dst, fb);

   /* Set the rendered-to flags */
   for (i = 0; i < dst->nr_cbufs; i++) {
      struct pipe_surface *s = dst->cbufs[i];
      if (s) {
         struct svga_texture *t = svga_texture(s->texture);
         svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level);
      }
   }

   if (svga->curr.framebuffer.zsbuf)
   {
      switch (svga->curr.framebuffer.zsbuf->format) {
      case PIPE_FORMAT_Z16_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16;
         break;
      case PIPE_FORMAT_Z24_UNORM_S8_UINT:
      case PIPE_FORMAT_Z24X8_UNORM:
      case PIPE_FORMAT_S8_UINT_Z24_UNORM:
      case PIPE_FORMAT_X8Z24_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8;
         break;
      case PIPE_FORMAT_Z32_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32;
         break;
      case PIPE_FORMAT_Z32_FLOAT:
         svga->curr.depthscale = 1.0f / ((float)(1<<23));
         break;
      default:
         svga->curr.depthscale = 0.0f;
         break;
      }

      /* Set rendered-to flag */
      {
         struct pipe_surface *s = dst->zsbuf;
         struct svga_texture *t = svga_texture(s->texture);
         svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level);
      }
   }
   else {
      svga->curr.depthscale = 0.0f;
   }

   svga->dirty |= SVGA_NEW_FRAME_BUFFER;
}
Example #13
0
/**
 * Define a vgpu10 sampler state.
 */
static void
define_sampler_state_object(struct svga_context *svga,
                            struct svga_sampler_state *ss,
                            const struct pipe_sampler_state *ps)
{
   uint8_t max_aniso = (uint8_t) 255; /* XXX fix me */
   boolean anisotropic;
   uint8 compare_func;
   SVGA3dFilter filter;
   SVGA3dRGBAFloat bcolor;
   unsigned try;
   float min_lod, max_lod;

   assert(svga_have_vgpu10(svga));

   anisotropic = ss->aniso_level > 1.0f;

   filter = translate_filter_mode(ps->min_mip_filter,
                                  ps->min_img_filter,
                                  ps->mag_img_filter,
                                  anisotropic,
                                  ss->compare_mode);

   compare_func = translate_comparison_func(ss->compare_func);

   COPY_4V(bcolor.value, ps->border_color.f);

   assert(ps->min_lod <= ps->max_lod);

   if (ps->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) {
      /* just use the base level image */
      min_lod = max_lod = 0.0f;
   }
   else {
      min_lod = ps->min_lod;
      max_lod = ps->max_lod;
   }

   /* If shadow comparisons are enabled, create two sampler states: one
    * with the given shadow compare mode, another with shadow comparison off.
    * We need the later because in some cases, we have to do the shadow
    * compare in the shader.  So, we don't want to do it twice.
    */
   STATIC_ASSERT(PIPE_TEX_COMPARE_NONE == 0);
   STATIC_ASSERT(PIPE_TEX_COMPARE_R_TO_TEXTURE == 1);
   ss->id[1] = SVGA3D_INVALID_ID;

   unsigned i;
   for (i = 0; i <= ss->compare_mode; i++) {
      ss->id[i] = util_bitmask_add(svga->sampler_object_id_bm);

      /* Loop in case command buffer is full and we need to flush and retry */
      for (try = 0; try < 2; try++) {
         enum pipe_error ret =
            SVGA3D_vgpu10_DefineSamplerState(svga->swc,
                                             ss->id[i],
                                             filter,
                                             ss->addressu,
                                             ss->addressv,
                                             ss->addressw,
                                             ss->lod_bias, /* float */
                                             max_aniso,
                                             compare_func,
                                             bcolor,
                                             min_lod,       /* float */
                                             max_lod);      /* float */
         if (ret == PIPE_OK)
            break;
         svga_context_flush(svga, NULL);
      }

      /* turn off the shadow compare option for second iteration */
      filter &= ~SVGA3D_FILTER_COMPARE;
   }
}


static void *
svga_create_sampler_state(struct pipe_context *pipe,
                          const struct pipe_sampler_state *sampler)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_sampler_state *cso = CALLOC_STRUCT( svga_sampler_state );

   if (!cso)
      return NULL;

   cso->mipfilter = translate_mip_filter(sampler->min_mip_filter);
   cso->magfilter = translate_img_filter( sampler->mag_img_filter );
   cso->minfilter = translate_img_filter( sampler->min_img_filter );
   cso->aniso_level = MAX2( sampler->max_anisotropy, 1 );
   if (sampler->max_anisotropy)
      cso->magfilter = cso->minfilter = SVGA3D_TEX_FILTER_ANISOTROPIC;
   cso->lod_bias = sampler->lod_bias;
   cso->addressu = translate_wrap_mode(sampler->wrap_s);
   cso->addressv = translate_wrap_mode(sampler->wrap_t);
   cso->addressw = translate_wrap_mode(sampler->wrap_r);
   cso->normalized_coords = sampler->normalized_coords;
   cso->compare_mode = sampler->compare_mode;
   cso->compare_func = sampler->compare_func;

   {
      uint32 r = float_to_ubyte(sampler->border_color.f[0]);
      uint32 g = float_to_ubyte(sampler->border_color.f[1]);
      uint32 b = float_to_ubyte(sampler->border_color.f[2]);
      uint32 a = float_to_ubyte(sampler->border_color.f[3]);

      cso->bordercolor = (a << 24) | (r << 16) | (g << 8) | b;
   }

   /* No SVGA3D support for:
    *    - min/max LOD clamping
    */
   cso->min_lod = 0;
   cso->view_min_lod = MAX2((int) (sampler->min_lod + 0.5), 0);
   cso->view_max_lod = MAX2((int) (sampler->max_lod + 0.5), 0);

   /* Use min_mipmap */
   if (svga->debug.use_min_mipmap) {
      if (cso->view_min_lod == cso->view_max_lod) {
         cso->min_lod = cso->view_min_lod;
         cso->view_min_lod = 0;
         cso->view_max_lod = 1000; /* Just a high number */
         cso->mipfilter = SVGA3D_TEX_FILTER_NONE;
      }
   }

   if (svga_have_vgpu10(svga)) {
      define_sampler_state_object(svga, cso, sampler);
   }

   SVGA_DBG(DEBUG_SAMPLERS,
            "New sampler: min %u, view(min %u, max %u) lod, mipfilter %s\n",
            cso->min_lod, cso->view_min_lod, cso->view_max_lod,
            cso->mipfilter == SVGA3D_TEX_FILTER_NONE ? "SVGA3D_TEX_FILTER_NONE" : "SOMETHING");

   svga->hud.num_sampler_objects++;
   SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
                        SVGA_STATS_COUNT_SAMPLER);

   return cso;
}


static void
svga_bind_sampler_states(struct pipe_context *pipe,
                         enum pipe_shader_type shader,
                         unsigned start,
                         unsigned num,
                         void **samplers)
{
   struct svga_context *svga = svga_context(pipe);
   unsigned i;
   boolean any_change = FALSE;

   assert(shader < PIPE_SHADER_TYPES);
   assert(start + num <= PIPE_MAX_SAMPLERS);

   /* Pre-VGPU10 only supports FS textures */
   if (!svga_have_vgpu10(svga) && shader != PIPE_SHADER_FRAGMENT)
      return;

   for (i = 0; i < num; i++) {
      if (svga->curr.sampler[shader][start + i] != samplers[i])
         any_change = TRUE;
      svga->curr.sampler[shader][start + i] = samplers[i];
   }

   if (!any_change) {
      return;
   }

   /* find highest non-null sampler[] entry */
   {
      unsigned j = MAX2(svga->curr.num_samplers[shader], start + num);
      while (j > 0 && svga->curr.sampler[shader][j - 1] == NULL)
         j--;
      svga->curr.num_samplers[shader] = j;
   }

   svga->dirty |= SVGA_NEW_SAMPLER;
}


static void
svga_delete_sampler_state(struct pipe_context *pipe, void *sampler)
{
   struct svga_sampler_state *ss = (struct svga_sampler_state *) sampler;
   struct svga_context *svga = svga_context(pipe);

   if (svga_have_vgpu10(svga)) {
      unsigned i;
      for (i = 0; i < 2; i++) {
         enum pipe_error ret;

         if (ss->id[i] != SVGA3D_INVALID_ID) {
            svga_hwtnl_flush_retry(svga);

            ret = SVGA3D_vgpu10_DestroySamplerState(svga->swc, ss->id[i]);
            if (ret != PIPE_OK) {
               svga_context_flush(svga, NULL);
               ret = SVGA3D_vgpu10_DestroySamplerState(svga->swc, ss->id[i]);
            }
            util_bitmask_clear(svga->sampler_object_id_bm, ss->id[i]);
         }
      }
   }

   FREE(sampler);
   svga->hud.num_sampler_objects--;
}


static struct pipe_sampler_view *
svga_create_sampler_view(struct pipe_context *pipe,
                         struct pipe_resource *texture,
                         const struct pipe_sampler_view *templ)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_pipe_sampler_view *sv = CALLOC_STRUCT(svga_pipe_sampler_view);

   if (!sv) {
      return NULL;
   }

   sv->base = *templ;
   sv->base.reference.count = 1;
   sv->base.texture = NULL;
   pipe_resource_reference(&sv->base.texture, texture);

   sv->base.context = pipe;
   sv->id = SVGA3D_INVALID_ID;

   svga->hud.num_samplerview_objects++;
   SVGA_STATS_COUNT_INC(svga_screen(svga->pipe.screen)->sws,
                        SVGA_STATS_COUNT_SAMPLERVIEW);

   return &sv->base;
}


static void
svga_sampler_view_destroy(struct pipe_context *pipe,
                          struct pipe_sampler_view *view)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_pipe_sampler_view *sv = svga_pipe_sampler_view(view);

   if (svga_have_vgpu10(svga) && sv->id != SVGA3D_INVALID_ID) {
      if (view->context != pipe) {
         /* The SVGA3D device will generate an error (and on Linux, cause
          * us to abort) if we try to destroy a shader resource view from
          * a context other than the one it was created with.  Skip the
          * SVGA3D_vgpu10_DestroyShaderResourceView() and leak the sampler
          * view for now.  This should only sometimes happen when a shared
          * texture is deleted.
          */
         _debug_printf("context mismatch in %s\n", __func__);
      }
      else {
         enum pipe_error ret;

         svga_hwtnl_flush_retry(svga); /* XXX is this needed? */

         ret = SVGA3D_vgpu10_DestroyShaderResourceView(svga->swc, sv->id);
         if (ret != PIPE_OK) {
            svga_context_flush(svga, NULL);
            ret = SVGA3D_vgpu10_DestroyShaderResourceView(svga->swc, sv->id);
         }
         util_bitmask_clear(svga->sampler_view_id_bm, sv->id);
      }
   }

   pipe_resource_reference(&sv->base.texture, NULL);

   FREE(sv);
   svga->hud.num_samplerview_objects--;
}


static void
svga_set_sampler_views(struct pipe_context *pipe,
                       enum pipe_shader_type shader,
                       unsigned start,
                       unsigned num,
                       struct pipe_sampler_view **views)
{
   struct svga_context *svga = svga_context(pipe);
   unsigned flag_1d = 0;
   unsigned flag_srgb = 0;
   uint i;
   boolean any_change = FALSE;

   assert(shader < PIPE_SHADER_TYPES);
   assert(start + num <= ARRAY_SIZE(svga->curr.sampler_views[shader]));

   /* Pre-VGPU10 only supports FS textures */
   if (!svga_have_vgpu10(svga) && shader != PIPE_SHADER_FRAGMENT)
      return;

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_SETSAMPLERVIEWS);

   /* This bit of code works around a quirk in the CSO module.
    * If start=num=0 it means all sampler views should be released.
    * Note that the CSO module treats sampler views for fragment shaders
    * differently than other shader types.
    */
   if (start == 0 && num == 0 && svga->curr.num_sampler_views[shader] > 0) {
      for (i = 0; i < svga->curr.num_sampler_views[shader]; i++) {
         pipe_sampler_view_release(pipe, &svga->curr.sampler_views[shader][i]);
      }
      any_change = TRUE;
   }

   for (i = 0; i < num; i++) {
      enum pipe_texture_target target;

      if (svga->curr.sampler_views[shader][start + i] != views[i]) {
         /* Note: we're using pipe_sampler_view_release() here to work around
          * a possible crash when the old view belongs to another context that
          * was already destroyed.
          */
         pipe_sampler_view_release(pipe, &svga->curr.sampler_views[shader][start + i]);
         pipe_sampler_view_reference(&svga->curr.sampler_views[shader][start + i],
                                     views[i]);
         any_change = TRUE;
      }

      if (!views[i])
         continue;

      if (util_format_is_srgb(views[i]->format))
         flag_srgb |= 1 << (start + i);

      target = views[i]->target;
      if (target == PIPE_TEXTURE_1D) {
         flag_1d |= 1 << (start + i);
      } else if (target == PIPE_TEXTURE_RECT) {
         /* If the size of the bound texture changes, we need to emit new
          * const buffer values.
          */
         svga->dirty |= SVGA_NEW_TEXTURE_CONSTS;
      } else if (target == PIPE_BUFFER) {
         /* If the size of the bound buffer changes, we need to emit new
          * const buffer values.
          */
         svga->dirty |= SVGA_NEW_TEXTURE_CONSTS;
      }
   }

   if (!any_change) {
      goto done;
   }

   /* find highest non-null sampler_views[] entry */
   {
      unsigned j = MAX2(svga->curr.num_sampler_views[shader], start + num);
      while (j > 0 && svga->curr.sampler_views[shader][j - 1] == NULL)
         j--;
      svga->curr.num_sampler_views[shader] = j;
   }

   svga->dirty |= SVGA_NEW_TEXTURE_BINDING;

   if (flag_srgb != svga->curr.tex_flags.flag_srgb ||
       flag_1d != svga->curr.tex_flags.flag_1d) {
      svga->dirty |= SVGA_NEW_TEXTURE_FLAGS;
      svga->curr.tex_flags.flag_1d = flag_1d;
      svga->curr.tex_flags.flag_srgb = flag_srgb;
   }

   /* Check if any of the sampler view resources collide with the framebuffer
    * color buffers or depth stencil resource. If so, set the NEW_FRAME_BUFFER
    * dirty bit so that emit_framebuffer can be invoked to create backed view
    * for the conflicted surface view.
    */
   if (svga_check_sampler_framebuffer_resource_collision(svga, shader)) {
      svga->dirty |= SVGA_NEW_FRAME_BUFFER;
   }

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
}

/**
 * Clean up sampler, sampler view state at context destruction time
 */
void
svga_cleanup_sampler_state(struct svga_context *svga)
{
   enum pipe_shader_type shader;

   for (shader = 0; shader <= PIPE_SHADER_GEOMETRY; shader++) {
      unsigned i;

      for (i = 0; i < svga->state.hw_draw.num_sampler_views[shader]; i++) {
         pipe_sampler_view_release(&svga->pipe,
                                   &svga->state.hw_draw.sampler_views[shader][i]);
      }
   }
   
   /* free polygon stipple state */
   if (svga->polygon_stipple.sampler) {
      svga->pipe.delete_sampler_state(&svga->pipe, svga->polygon_stipple.sampler);
   }

   if (svga->polygon_stipple.sampler_view) {
      svga->pipe.sampler_view_destroy(&svga->pipe,
                                      &svga->polygon_stipple.sampler_view->base);
   }
   pipe_resource_reference(&svga->polygon_stipple.texture, NULL);
}

void
svga_init_sampler_functions( struct svga_context *svga )
{
   svga->pipe.create_sampler_state = svga_create_sampler_state;
   svga->pipe.bind_sampler_states = svga_bind_sampler_states;
   svga->pipe.delete_sampler_state = svga_delete_sampler_state;
   svga->pipe.set_sampler_views = svga_set_sampler_views;
   svga->pipe.create_sampler_view = svga_create_sampler_view;
   svga->pipe.sampler_view_destroy = svga_sampler_view_destroy;
}
Example #14
0
static void svga_set_framebuffer_state(struct pipe_context *pipe,
				       const struct pipe_framebuffer_state *fb)
{
   struct svga_context *svga = svga_context(pipe);
   struct pipe_framebuffer_state *dst = &svga->curr.framebuffer;
   boolean propagate = FALSE;
   int i;

   dst->width = fb->width;
   dst->height = fb->height;
   dst->nr_cbufs = fb->nr_cbufs;

   /* check if we need to propaget any of the target surfaces */
   for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
      if (dst->cbufs[i] && dst->cbufs[i] != fb->cbufs[i])
         if (svga_surface_needs_propagation(dst->cbufs[i]))
            propagate = TRUE;
   }

   if (propagate) {
      /* make sure that drawing calls comes before propagation calls */
      svga_hwtnl_flush_retry( svga );
   
      for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
         if (dst->cbufs[i] && dst->cbufs[i] != fb->cbufs[i])
            svga_propagate_surface(pipe, dst->cbufs[i]);
   }

   /* XXX: Actually the virtual hardware may support rendertargets with
    * different size, depending on the host API and driver, but since we cannot
    * know that make no such assumption here. */
   for(i = 0; i < fb->nr_cbufs; ++i) {
      if (fb->zsbuf && fb->cbufs[i]) {
         assert(fb->zsbuf->width == fb->cbufs[i]->width); 
         assert(fb->zsbuf->height == fb->cbufs[i]->height); 
      }
   }

   for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
      pipe_surface_reference(&dst->cbufs[i], fb->cbufs[i]);
   pipe_surface_reference(&dst->zsbuf, fb->zsbuf);


   if (svga->curr.framebuffer.zsbuf)
   {
      switch (svga->curr.framebuffer.zsbuf->format) {
      case PIPE_FORMAT_Z16_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16;
         break;
      case PIPE_FORMAT_S8Z24_UNORM:
      case PIPE_FORMAT_X8Z24_UNORM:
      case PIPE_FORMAT_Z24S8_UNORM:
      case PIPE_FORMAT_Z24X8_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8;
         break;
      case PIPE_FORMAT_Z32_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32;
         break;
      case PIPE_FORMAT_Z32_FLOAT:
         svga->curr.depthscale = 1.0f / ((float)(1<<23));
         break;
      default:
         svga->curr.depthscale = 0.0f;
         break;
      }
   }
   else {
      svga->curr.depthscale = 0.0f;
   }

   svga->dirty |= SVGA_NEW_FRAME_BUFFER;
}
Example #15
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);

   svga->hud.num_draw_calls++;  /* for SVGA_QUERY_NUM_DRAW_CALLS */

   if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES &&
       svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK)
      goto done;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }

   if (need_fallback_prim_restart(svga, info)) {
      enum pipe_error r;
      r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
      assert(r == PIPE_OK);
      (void) r;
      goto done;
   }

   if (!u_trim_pipe_prim( info->mode, &count ))
      goto done;

   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

   if (svga->state.sw.need_swtnl) {
      svga->hud.num_fallbacks++;  /* for SVGA_QUERY_NUM_FALLBACKS */
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          count,
                                          info->start_instance,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays(svga, info->mode, info->start, count,
                                 info->start_instance, info->instance_count,
                                 TRUE);
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }

done:
   SVGA_STATS_TIME_POP(svga_sws(svga));
;
}
Example #16
0
static void svga_set_framebuffer_state(struct pipe_context *pipe,
				       const struct pipe_framebuffer_state *fb)
{
   struct svga_context *svga = svga_context(pipe);
   struct pipe_framebuffer_state *dst = &svga->curr.framebuffer;
   boolean propagate = FALSE;
   unsigned i;

   /* make sure any pending drawing calls are flushed before changing
    * the framebuffer state
    */
   svga_hwtnl_flush_retry(svga);

   dst->width = fb->width;
   dst->height = fb->height;
   dst->nr_cbufs = fb->nr_cbufs;

   /* check if we need to propagate any of the target surfaces */
   for (i = 0; i < dst->nr_cbufs; i++) {
      struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
      if (dst->cbufs[i] && dst->cbufs[i] != s) {
         if (svga_surface_needs_propagation(dst->cbufs[i])) {
            propagate = TRUE;
            break;
         }
      }
   }

   if (propagate) {
      for (i = 0; i < dst->nr_cbufs; i++) {
         struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL;
         if (dst->cbufs[i] && dst->cbufs[i] != s)
            svga_propagate_surface(svga, dst->cbufs[i]);
      }
   }

   /* Check that all surfaces are the same size.
    * Actually, the virtual hardware may support rendertargets with
    * different size, depending on the host API and driver,
    */
   {
      int width = 0, height = 0;
      if (fb->zsbuf) {
         width = fb->zsbuf->width;
         height = fb->zsbuf->height;
      }
      for (i = 0; i < fb->nr_cbufs; ++i) {
         if (fb->cbufs[i]) {
            if (width && height) {
               if (fb->cbufs[i]->width != width ||
                   fb->cbufs[i]->height != height) {
                  debug_warning("Mixed-size color and depth/stencil surfaces "
                                "may not work properly");
               }
            }
            else {
               width = fb->cbufs[i]->width;
               height = fb->cbufs[i]->height;
            }
         }
      }
   }

   util_copy_framebuffer_state(dst, fb);

   /* Set the rendered-to flags */
   for (i = 0; i < dst->nr_cbufs; i++) {
      struct pipe_surface *s = dst->cbufs[i];
      if (s) {
         struct svga_texture *t = svga_texture(s->texture);
         svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level);
      }
   }

   if (svga->curr.framebuffer.zsbuf)
   {
      switch (svga->curr.framebuffer.zsbuf->format) {
      case PIPE_FORMAT_Z16_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16;
         break;
      case PIPE_FORMAT_Z24_UNORM_S8_UINT:
      case PIPE_FORMAT_Z24X8_UNORM:
      case PIPE_FORMAT_S8_UINT_Z24_UNORM:
      case PIPE_FORMAT_X8Z24_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8;
         break;
      case PIPE_FORMAT_Z32_UNORM:
         svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32;
         break;
      case PIPE_FORMAT_Z32_FLOAT:
         svga->curr.depthscale = 1.0f / ((float)(1<<23));
         break;
      default:
         svga->curr.depthscale = 0.0f;
         break;
      }

      /* Set rendered-to flag */
      {
         struct pipe_surface *s = dst->zsbuf;
         struct svga_texture *t = svga_texture(s->texture);
         svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level);
      }
   }
   else {
      svga->curr.depthscale = 0.0f;
   }

   svga->dirty |= SVGA_NEW_FRAME_BUFFER;
}
Example #17
0
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct svga_context *svga = svga_context( pipe );
   unsigned reduced_prim = u_reduced_prim( info->mode );
   unsigned count = info->count;
   enum pipe_error ret = 0;
   boolean needed_swtnl;

   if (!u_trim_pipe_prim( info->mode, &count ))
      return;

   /*
    * Mark currently bound target surfaces as dirty
    * doesn't really matter if it is done before drawing.
    *
    * TODO If we ever normaly return something other then
    * true we should not mark it as dirty then.
    */
   svga_mark_surfaces_dirty(svga_context(pipe));

   if (svga->curr.reduced_prim != reduced_prim) {
      svga->curr.reduced_prim = reduced_prim;
      svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
   }
   
   needed_swtnl = svga->state.sw.need_swtnl;

   svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

#ifdef DEBUG
   if (svga->curr.vs->base.id == svga->debug.disable_shader ||
       svga->curr.fs->base.id == svga->debug.disable_shader)
      return;
#endif

   if (svga->state.sw.need_swtnl) {
      if (!needed_swtnl) {
         /*
          * We're switching from HW to SW TNL.  SW TNL will require mapping all
          * currently bound vertex buffers, some of which may already be
          * referenced in the current command buffer as result of previous HW
          * TNL. So flush now, to prevent the context to flush while a referred
          * vertex buffer is mapped.
          */

         svga_context_flush(svga, NULL);
      }

      /* Avoid leaking the previous hwtnl bias to swtnl */
      svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
      ret = svga_swtnl_draw_vbo( svga, info );
   }
   else {
      if (info->indexed && svga->curr.ib.buffer) {
         unsigned offset;

         assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
         offset = svga->curr.ib.offset / svga->curr.ib.index_size;

         ret = retry_draw_range_elements( svga,
                                          svga->curr.ib.buffer,
                                          svga->curr.ib.index_size,
                                          info->index_bias,
                                          info->min_index,
                                          info->max_index,
                                          info->mode,
                                          info->start + offset,
                                          info->count,
                                          info->instance_count,
                                          TRUE );
      }
      else {
         ret = retry_draw_arrays( svga,
                                  info->mode,
                                  info->start,
                                  info->count,
                                  info->instance_count,
                                  TRUE );
      }
   }

   /* XXX: Silence warnings, do something sensible here? */
   (void)ret;

   svga_release_user_upl_buffers( svga );

   if (SVGA_DEBUG & DEBUG_FLUSH) {
      svga_hwtnl_flush_retry( svga );
      svga_context_flush(svga, NULL);
   }
}
Example #18
0
static enum pipe_error
emit_rss_vgpu10(struct svga_context *svga, unsigned dirty)
{
   enum pipe_error ret = PIPE_OK;

   svga_hwtnl_flush_retry(svga);

   if (dirty & (SVGA_NEW_BLEND | SVGA_NEW_BLEND_COLOR)) {
      const struct svga_blend_state *curr;
      float blend_factor[4];

      if (svga_has_any_integer_cbufs(svga)) {
         /* Blending is not supported in integer-valued render targets. */
         curr = svga->noop_blend;
         blend_factor[0] =
         blend_factor[1] =
         blend_factor[2] =
         blend_factor[3] = 0;
      }
      else {
         curr = svga->curr.blend;

         if (curr->blend_color_alpha) {
            blend_factor[0] =
            blend_factor[1] =
            blend_factor[2] =
            blend_factor[3] = svga->curr.blend_color.color[3];
         }
         else {
            blend_factor[0] = svga->curr.blend_color.color[0];
            blend_factor[1] = svga->curr.blend_color.color[1];
            blend_factor[2] = svga->curr.blend_color.color[2];
            blend_factor[3] = svga->curr.blend_color.color[3];
         }
      }

      /* Set/bind the blend state object */
      if (svga->state.hw_draw.blend_id != curr->id ||
          svga->state.hw_draw.blend_factor[0] != blend_factor[0] ||
          svga->state.hw_draw.blend_factor[1] != blend_factor[1] ||
          svga->state.hw_draw.blend_factor[2] != blend_factor[2] ||
          svga->state.hw_draw.blend_factor[3] != blend_factor[3] ||
          svga->state.hw_draw.blend_sample_mask != svga->curr.sample_mask) {
         ret = SVGA3D_vgpu10_SetBlendState(svga->swc, curr->id,
                                           blend_factor,
                                           svga->curr.sample_mask);
         if (ret != PIPE_OK)
            return ret;

         svga->state.hw_draw.blend_id = curr->id;
         svga->state.hw_draw.blend_factor[0] = blend_factor[0];
         svga->state.hw_draw.blend_factor[1] = blend_factor[1];
         svga->state.hw_draw.blend_factor[2] = blend_factor[2];
         svga->state.hw_draw.blend_factor[3] = blend_factor[3];
         svga->state.hw_draw.blend_sample_mask = svga->curr.sample_mask;
      }
   }

   if (dirty & (SVGA_NEW_DEPTH_STENCIL_ALPHA | SVGA_NEW_STENCIL_REF)) {
      const struct svga_depth_stencil_state *curr = svga->curr.depth;
      unsigned curr_ref = svga->curr.stencil_ref.ref_value[0];

      if (curr->id != svga->state.hw_draw.depth_stencil_id ||
          curr_ref != svga->state.hw_draw.stencil_ref) {
         /* Set/bind the depth/stencil state object */
         ret = SVGA3D_vgpu10_SetDepthStencilState(svga->swc, curr->id,
                                                  curr_ref);
         if (ret != PIPE_OK)
            return ret;

         svga->state.hw_draw.depth_stencil_id = curr->id;
         svga->state.hw_draw.stencil_ref = curr_ref;
      }
   }

   if (dirty & (SVGA_NEW_REDUCED_PRIMITIVE | SVGA_NEW_RAST)) {
      const struct svga_rasterizer_state *rast;

      if (svga->curr.reduced_prim == PIPE_PRIM_POINTS &&
          svga->curr.gs && svga->curr.gs->wide_point) {

         /* If we are drawing a point sprite, we will need to
          * bind a non-culling rasterizer state object
          */
         rast = get_no_cull_rasterizer_state(svga);
      }
      else {
         rast = svga->curr.rast;
      }

      if (svga->state.hw_draw.rasterizer_id != rast->id) {
         /* Set/bind the rasterizer state object */
         ret = SVGA3D_vgpu10_SetRasterizerState(svga->swc, rast->id);
         if (ret != PIPE_OK)
            return ret;
         svga->state.hw_draw.rasterizer_id = rast->id;
      }
   }
   return PIPE_OK;
}
Example #19
0
static boolean
svga_draw_range_elements( struct pipe_context *pipe,
                          struct pipe_buffer *index_buffer,
                          unsigned index_size,
                          unsigned min_index,
                          unsigned max_index,
                          unsigned prim, unsigned start, unsigned count)
{
    struct svga_context *svga = svga_context( pipe );
    unsigned reduced_prim = u_reduced_prim(prim);
    enum pipe_error ret = 0;

    if (!u_trim_pipe_prim( prim, &count ))
        return TRUE;

    /*
     * Mark currently bound target surfaces as dirty
     * doesn't really matter if it is done before drawing.
     *
     * TODO If we ever normaly return something other then
     * true we should not mark it as dirty then.
     */
    svga_mark_surfaces_dirty(svga_context(pipe));

    if (svga->curr.reduced_prim != reduced_prim) {
        svga->curr.reduced_prim = reduced_prim;
        svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
    }

    svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL );

#ifdef DEBUG
    if (svga->curr.vs->base.id == svga->debug.disable_shader ||
            svga->curr.fs->base.id == svga->debug.disable_shader)
        return 0;
#endif

    if (svga->state.sw.need_swtnl)
    {
        ret = svga_swtnl_draw_range_elements( svga,
                                              index_buffer,
                                              index_size,
                                              min_index, max_index,
                                              prim,
                                              start, count );
    }
    else {
        if (index_buffer) {
            ret = retry_draw_range_elements( svga,
                                             index_buffer,
                                             index_size,
                                             min_index,
                                             max_index,
                                             prim,
                                             start,
                                             count,
                                             TRUE );
        }
        else {
            ret = retry_draw_arrays( svga,
                                     prim,
                                     start,
                                     count,
                                     TRUE );
        }
    }

    if (SVGA_DEBUG & DEBUG_FLUSH) {
        static unsigned id;
        debug_printf("%s %d\n", __FUNCTION__, id++);
        svga_hwtnl_flush_retry( svga );
        svga_context_flush(svga, NULL);
    }

    return ret == PIPE_OK;
}