static enum pipe_error retry_draw_arrays( struct svga_context *svga, unsigned prim, unsigned start, unsigned count, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_upload_user_buffers( svga, start, count, instance_count ); if (ret != PIPE_OK) goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; ret = svga_hwtnl_draw_arrays( svga->hwtnl, prim, start, count ); if (ret != PIPE_OK) goto retry; return 0; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); return retry_draw_arrays( svga, prim, start, count, instance_count, FALSE ); } return ret; }
static enum pipe_error retry_draw_arrays( struct svga_context *svga, enum pipe_prim_type prim, unsigned start, unsigned count, unsigned start_instance, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWARRAYS); svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; /** determine if flatshade is to be used after svga_update_state() * in case the fragment shader is changed. */ svga_hwtnl_set_flatshade(svga->hwtnl, svga->curr.rast->templ.flatshade || svga->state.hw_draw.fs->uses_flat_interp, svga->curr.rast->templ.flatshade_first); ret = svga_hwtnl_draw_arrays(svga->hwtnl, prim, start, count, start_instance, instance_count); if (ret != PIPE_OK) goto retry; goto done; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); ret = retry_draw_arrays(svga, prim, start, count, start_instance, instance_count, FALSE); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); return ret; }
static void svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct svga_context *svga = svga_context( pipe ); unsigned reduced_prim = u_reduced_prim( info->mode ); unsigned count = info->count; enum pipe_error ret = 0; boolean needed_swtnl; if (!u_trim_pipe_prim( info->mode, &count )) return; /* * Mark currently bound target surfaces as dirty * doesn't really matter if it is done before drawing. * * TODO If we ever normaly return something other then * true we should not mark it as dirty then. */ svga_mark_surfaces_dirty(svga_context(pipe)); if (svga->curr.reduced_prim != reduced_prim) { svga->curr.reduced_prim = reduced_prim; svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE; } needed_swtnl = svga->state.sw.need_swtnl; svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL ); #ifdef DEBUG if (svga->curr.vs->base.id == svga->debug.disable_shader || svga->curr.fs->base.id == svga->debug.disable_shader) return; #endif if (svga->state.sw.need_swtnl) { if (!needed_swtnl) { /* * We're switching from HW to SW TNL. SW TNL will require mapping all * currently bound vertex buffers, some of which may already be * referenced in the current command buffer as result of previous HW * TNL. So flush now, to prevent the context to flush while a referred * vertex buffer is mapped. */ svga_context_flush(svga, NULL); } /* Avoid leaking the previous hwtnl bias to swtnl */ svga_hwtnl_set_index_bias( svga->hwtnl, 0 ); ret = svga_swtnl_draw_vbo( svga, info ); } else { if (info->indexed && svga->curr.ib.buffer) { unsigned offset; assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0); offset = svga->curr.ib.offset / svga->curr.ib.index_size; ret = retry_draw_range_elements( svga, svga->curr.ib.buffer, svga->curr.ib.index_size, info->index_bias, info->min_index, info->max_index, info->mode, info->start + offset, info->count, info->instance_count, TRUE ); } else { ret = retry_draw_arrays( svga, info->mode, info->start, info->count, info->instance_count, TRUE ); } } /* XXX: Silence warnings, do something sensible here? */ (void)ret; svga_release_user_upl_buffers( svga ); if (SVGA_DEBUG & DEBUG_FLUSH) { svga_hwtnl_flush_retry( svga ); svga_context_flush(svga, NULL); } }
static void svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct svga_context *svga = svga_context( pipe ); unsigned reduced_prim = u_reduced_prim( info->mode ); unsigned count = info->count; enum pipe_error ret = 0; boolean needed_swtnl; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO); svga->hud.num_draw_calls++; /* for SVGA_QUERY_NUM_DRAW_CALLS */ if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES && svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK) goto done; /* * Mark currently bound target surfaces as dirty * doesn't really matter if it is done before drawing. * * TODO If we ever normaly return something other then * true we should not mark it as dirty then. */ svga_mark_surfaces_dirty(svga_context(pipe)); if (svga->curr.reduced_prim != reduced_prim) { svga->curr.reduced_prim = reduced_prim; svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE; } if (need_fallback_prim_restart(svga, info)) { enum pipe_error r; r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info); assert(r == PIPE_OK); (void) r; goto done; } if (!u_trim_pipe_prim( info->mode, &count )) goto done; needed_swtnl = svga->state.sw.need_swtnl; svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL ); if (svga->state.sw.need_swtnl) { svga->hud.num_fallbacks++; /* for SVGA_QUERY_NUM_FALLBACKS */ if (!needed_swtnl) { /* * We're switching from HW to SW TNL. SW TNL will require mapping all * currently bound vertex buffers, some of which may already be * referenced in the current command buffer as result of previous HW * TNL. So flush now, to prevent the context to flush while a referred * vertex buffer is mapped. */ svga_context_flush(svga, NULL); } /* Avoid leaking the previous hwtnl bias to swtnl */ svga_hwtnl_set_index_bias( svga->hwtnl, 0 ); ret = svga_swtnl_draw_vbo( svga, info ); } else { if (info->indexed && svga->curr.ib.buffer) { unsigned offset; assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0); offset = svga->curr.ib.offset / svga->curr.ib.index_size; ret = retry_draw_range_elements( svga, svga->curr.ib.buffer, svga->curr.ib.index_size, info->index_bias, info->min_index, info->max_index, info->mode, info->start + offset, count, info->start_instance, info->instance_count, TRUE ); } else { ret = retry_draw_arrays(svga, info->mode, info->start, count, info->start_instance, info->instance_count, TRUE); } } /* XXX: Silence warnings, do something sensible here? */ (void)ret; if (SVGA_DEBUG & DEBUG_FLUSH) { svga_hwtnl_flush_retry( svga ); svga_context_flush(svga, NULL); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); ; }
static boolean svga_draw_range_elements( struct pipe_context *pipe, struct pipe_buffer *index_buffer, unsigned index_size, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count) { struct svga_context *svga = svga_context( pipe ); unsigned reduced_prim = u_reduced_prim(prim); enum pipe_error ret = 0; if (!u_trim_pipe_prim( prim, &count )) return TRUE; /* * Mark currently bound target surfaces as dirty * doesn't really matter if it is done before drawing. * * TODO If we ever normaly return something other then * true we should not mark it as dirty then. */ svga_mark_surfaces_dirty(svga_context(pipe)); if (svga->curr.reduced_prim != reduced_prim) { svga->curr.reduced_prim = reduced_prim; svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE; } svga_update_state_retry( svga, SVGA_STATE_NEED_SWTNL ); #ifdef DEBUG if (svga->curr.vs->base.id == svga->debug.disable_shader || svga->curr.fs->base.id == svga->debug.disable_shader) return 0; #endif if (svga->state.sw.need_swtnl) { ret = svga_swtnl_draw_range_elements( svga, index_buffer, index_size, min_index, max_index, prim, start, count ); } else { if (index_buffer) { ret = retry_draw_range_elements( svga, index_buffer, index_size, min_index, max_index, prim, start, count, TRUE ); } else { ret = retry_draw_arrays( svga, prim, start, count, TRUE ); } } if (SVGA_DEBUG & DEBUG_FLUSH) { static unsigned id; debug_printf("%s %d\n", __FUNCTION__, id++); svga_hwtnl_flush_retry( svga ); svga_context_flush(svga, NULL); } return ret == PIPE_OK; }