void svga_update_state_retry( struct svga_context *svga, unsigned max_level ) { enum pipe_error ret; ret = svga_update_state( svga, max_level ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { svga_context_flush(svga, NULL); ret = svga_update_state( svga, max_level ); } assert( ret == PIPE_OK ); }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_resource *index_buffer, unsigned index_size, int index_bias, unsigned min_index, unsigned max_index, enum pipe_prim_type prim, unsigned start, unsigned count, unsigned start_instance, unsigned instance_count, boolean do_retry ) { enum pipe_error ret = PIPE_OK; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWELEMENTS); svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; /** determine if flatshade is to be used after svga_update_state() * in case the fragment shader is changed. */ svga_hwtnl_set_flatshade(svga->hwtnl, svga->curr.rast->templ.flatshade || svga->state.hw_draw.fs->uses_flat_interp, svga->curr.rast->templ.flatshade_first); ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, start_instance, instance_count); if (ret != PIPE_OK) goto retry; goto done; retry: svga_context_flush( svga, NULL ); if (do_retry) { ret = retry_draw_range_elements(svga, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, start_instance, instance_count, FALSE); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); return ret; }
static enum pipe_error try_clear(struct svga_context *svga, unsigned buffers, const float *rgba, double depth, unsigned stencil) { int ret = PIPE_OK; SVGA3dRect rect = { 0, 0, 0, 0 }; boolean restore_viewport = FALSE; SVGA3dClearFlag flags = 0; struct pipe_framebuffer_state *fb = &svga->curr.framebuffer; unsigned color = 0; ret = svga_update_state(svga, SVGA_STATE_HW_CLEAR); if (ret) return ret; if ((buffers & PIPE_CLEAR_COLOR) && fb->cbufs[0]) { flags |= SVGA3D_CLEAR_COLOR; util_pack_color(rgba, PIPE_FORMAT_A8R8G8B8_UNORM, &color); rect.w = fb->cbufs[0]->width; rect.h = fb->cbufs[0]->height; } if ((buffers & PIPE_CLEAR_DEPTHSTENCIL) && fb->zsbuf) { flags |= SVGA3D_CLEAR_DEPTH; if (svga->curr.framebuffer.zsbuf->format == PIPE_FORMAT_Z24S8_UNORM) flags |= SVGA3D_CLEAR_STENCIL; rect.w = MAX2(rect.w, fb->zsbuf->width); rect.h = MAX2(rect.h, fb->zsbuf->height); } if (memcmp(&rect, &svga->state.hw_clear.viewport, sizeof(rect)) != 0) { restore_viewport = TRUE; ret = SVGA3D_SetViewport(svga->swc, &rect); if (ret) return ret; } ret = SVGA3D_ClearRect(svga->swc, flags, color, depth, stencil, rect.x, rect.y, rect.w, rect.h); if (ret != PIPE_OK) return ret; if (restore_viewport) { memcpy(&rect, &svga->state.hw_clear.viewport, sizeof rect); ret = SVGA3D_SetViewport(svga->swc, &rect); } return ret; }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_resource *index_buffer, unsigned index_size, int index_bias, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count, unsigned instance_count, boolean do_retry ) { enum pipe_error ret = PIPE_OK; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_upload_user_buffers( svga, min_index + index_bias, max_index - min_index + 1, instance_count ); if (ret != PIPE_OK) goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count ); if (ret != PIPE_OK) goto retry; return PIPE_OK; retry: svga_context_flush( svga, NULL ); if (do_retry) { return retry_draw_range_elements( svga, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, instance_count, FALSE ); } return ret; }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_buffer *index_buffer, unsigned index_size, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count, boolean do_retry ) { enum pipe_error ret = 0; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret) goto retry; ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, min_index, max_index, prim, start, count, 0 ); if (ret) goto retry; if (svga->curr.any_user_vertex_buffers) { ret = svga_hwtnl_flush( svga->hwtnl ); if (ret) goto retry; } return PIPE_OK; retry: svga_context_flush( svga, NULL ); if (do_retry) { return retry_draw_range_elements( svga, index_buffer, index_size, min_index, max_index, prim, start, count, FALSE ); } return ret; }
static enum pipe_error retry_draw_arrays( struct svga_context *svga, unsigned prim, unsigned start, unsigned count, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_upload_user_buffers( svga, start, count, instance_count ); if (ret != PIPE_OK) goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; ret = svga_hwtnl_draw_arrays( svga->hwtnl, prim, start, count ); if (ret != PIPE_OK) goto retry; return 0; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); return retry_draw_arrays( svga, prim, start, count, instance_count, FALSE ); } return ret; }
static enum pipe_error retry_draw_arrays( struct svga_context *svga, enum pipe_prim_type prim, unsigned start, unsigned count, unsigned start_instance, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWARRAYS); svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; /** determine if flatshade is to be used after svga_update_state() * in case the fragment shader is changed. */ svga_hwtnl_set_flatshade(svga->hwtnl, svga->curr.rast->templ.flatshade || svga->state.hw_draw.fs->uses_flat_interp, svga->curr.rast->templ.flatshade_first); ret = svga_hwtnl_draw_arrays(svga->hwtnl, prim, start, count, start_instance, instance_count); if (ret != PIPE_OK) goto retry; goto done; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); ret = retry_draw_arrays(svga, prim, start, count, start_instance, instance_count, FALSE); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); return ret; }
static enum pipe_error try_clear(struct svga_context *svga, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { enum pipe_error ret = PIPE_OK; SVGA3dRect rect = { 0, 0, 0, 0 }; boolean restore_viewport = FALSE; SVGA3dClearFlag flags = 0; struct pipe_framebuffer_state *fb = &svga->curr.framebuffer; union util_color uc = {0}; ret = svga_update_state(svga, SVGA_STATE_HW_CLEAR); if (ret != PIPE_OK) return ret; if (svga->rebind.rendertargets) { ret = svga_reemit_framebuffer_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (buffers & PIPE_CLEAR_COLOR) { flags |= SVGA3D_CLEAR_COLOR; util_pack_color(color->f, PIPE_FORMAT_B8G8R8A8_UNORM, &uc); rect.w = fb->width; rect.h = fb->height; } if ((buffers & PIPE_CLEAR_DEPTHSTENCIL) && fb->zsbuf) { if (buffers & PIPE_CLEAR_DEPTH) flags |= SVGA3D_CLEAR_DEPTH; if ((svga->curr.framebuffer.zsbuf->format == PIPE_FORMAT_S8_UINT_Z24_UNORM) && (buffers & PIPE_CLEAR_STENCIL)) flags |= SVGA3D_CLEAR_STENCIL; rect.w = MAX2(rect.w, fb->zsbuf->width); rect.h = MAX2(rect.h, fb->zsbuf->height); } if (memcmp(&rect, &svga->state.hw_clear.viewport, sizeof(rect)) != 0) { restore_viewport = TRUE; ret = SVGA3D_SetViewport(svga->swc, &rect); if (ret != PIPE_OK) return ret; } ret = SVGA3D_ClearRect(svga->swc, flags, uc.ui, (float) depth, stencil, rect.x, rect.y, rect.w, rect.h); if (ret != PIPE_OK) return ret; if (restore_viewport) { memcpy(&rect, &svga->state.hw_clear.viewport, sizeof rect); ret = SVGA3D_SetViewport(svga->swc, &rect); } return ret; }
enum pipe_error svga_swtnl_draw_vbo(struct svga_context *svga, const struct pipe_draw_info *info) { struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS]; struct pipe_transfer *ib_transfer = NULL; struct pipe_transfer *cb_transfer = NULL; struct draw_context *draw = svga->swtnl.draw; unsigned i; const void *map; enum pipe_error ret; assert(!svga->dirty); assert(svga->state.sw.need_swtnl); assert(draw); /* Make sure that the need_swtnl flag does not go away */ svga->state.sw.in_swtnl_draw = TRUE; ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } /* * Map vertex buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { if (svga->curr.vb[i].buffer) { map = pipe_buffer_map(&svga->pipe, svga->curr.vb[i].buffer, PIPE_TRANSFER_READ, &vb_transfer[i]); draw_set_mapped_vertex_buffer(draw, i, map); } } /* Map index buffer, if present */ map = NULL; if (info->indexed && svga->curr.ib.buffer) { map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer, PIPE_TRANSFER_READ, &ib_transfer); draw_set_indexes(draw, (const ubyte *) map + svga->curr.ib.offset, svga->curr.ib.index_size); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { map = pipe_buffer_map(&svga->pipe, svga->curr.cb[PIPE_SHADER_VERTEX], PIPE_TRANSFER_READ, &cb_transfer); assert(map); draw_set_mapped_constant_buffer( draw, PIPE_SHADER_VERTEX, 0, map, svga->curr.cb[PIPE_SHADER_VERTEX]->width0); } draw_vbo(draw, info); draw_flush(svga->swtnl.draw); /* Ensure the draw module didn't touch this */ assert(i == svga->curr.num_vertex_buffers); /* * unmap vertex/index buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { if (svga->curr.vb[i].buffer) { pipe_buffer_unmap(&svga->pipe, vb_transfer[i]); draw_set_mapped_vertex_buffer(draw, i, NULL); } } if (ib_transfer) { pipe_buffer_unmap(&svga->pipe, ib_transfer); draw_set_indexes(draw, NULL, 0); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { pipe_buffer_unmap(&svga->pipe, cb_transfer); } /* Now safe to remove the need_swtnl flag in any update_state call */ svga->state.sw.in_swtnl_draw = FALSE; svga->dirty |= SVGA_NEW_NEED_PIPELINE | SVGA_NEW_NEED_SWVFETCH; return ret; }
enum pipe_error svga_swtnl_draw_range_elements(struct svga_context *svga, struct pipe_buffer *indexBuffer, unsigned indexSize, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count) { struct draw_context *draw = svga->swtnl.draw; unsigned i; const void *map; enum pipe_error ret; assert(!svga->dirty); assert(svga->state.sw.need_swtnl); assert(draw); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); if (ret) { svga_context_flush(svga, NULL); ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } /* * Map vertex buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { map = pipe_buffer_map(svga->pipe.screen, svga->curr.vb[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(draw, i, map); } /* Map index buffer, if present */ if (indexBuffer) { map = pipe_buffer_map(svga->pipe.screen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_element_buffer_range(draw, indexSize, min_index, max_index, map); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { map = pipe_buffer_map(svga->pipe.screen, svga->curr.cb[PIPE_SHADER_VERTEX], PIPE_BUFFER_USAGE_CPU_READ); assert(map); draw_set_mapped_constant_buffer( draw, PIPE_SHADER_VERTEX, 0, map, svga->curr.cb[PIPE_SHADER_VERTEX]->size); } draw_arrays(svga->swtnl.draw, prim, start, count); draw_flush(svga->swtnl.draw); /* Ensure the draw module didn't touch this */ assert(i == svga->curr.num_vertex_buffers); /* * unmap vertex/index buffers */ for (i = 0; i < svga->curr.num_vertex_buffers; i++) { pipe_buffer_unmap(svga->pipe.screen, svga->curr.vb[i].buffer); draw_set_mapped_vertex_buffer(draw, i, NULL); } if (indexBuffer) { pipe_buffer_unmap(svga->pipe.screen, indexBuffer); draw_set_mapped_element_buffer(draw, 0, NULL); } if (svga->curr.cb[PIPE_SHADER_VERTEX]) { pipe_buffer_unmap(svga->pipe.screen, svga->curr.cb[PIPE_SHADER_VERTEX]); } return ret; }
static enum pipe_error try_clear(struct svga_context *svga, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { enum pipe_error ret = PIPE_OK; SVGA3dRect rect = { 0, 0, 0, 0 }; boolean restore_viewport = FALSE; SVGA3dClearFlag flags = 0; struct pipe_framebuffer_state *fb = &svga->curr.framebuffer; union util_color uc = {0}; ret = svga_update_state(svga, SVGA_STATE_HW_CLEAR); if (ret != PIPE_OK) return ret; if (svga->rebind.flags.rendertargets) { ret = svga_reemit_framebuffer_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (buffers & PIPE_CLEAR_COLOR) { flags |= SVGA3D_CLEAR_COLOR; util_pack_color(color->f, PIPE_FORMAT_B8G8R8A8_UNORM, &uc); rect.w = fb->width; rect.h = fb->height; } if ((buffers & PIPE_CLEAR_DEPTHSTENCIL) && fb->zsbuf) { if (buffers & PIPE_CLEAR_DEPTH) flags |= SVGA3D_CLEAR_DEPTH; if (buffers & PIPE_CLEAR_STENCIL) flags |= SVGA3D_CLEAR_STENCIL; rect.w = MAX2(rect.w, fb->zsbuf->width); rect.h = MAX2(rect.h, fb->zsbuf->height); } if (!svga_have_vgpu10(svga) && !svga_rects_equal(&rect, &svga->state.hw_clear.viewport)) { restore_viewport = TRUE; ret = SVGA3D_SetViewport(svga->swc, &rect); if (ret != PIPE_OK) return ret; } if (svga_have_vgpu10(svga)) { if (flags & SVGA3D_CLEAR_COLOR) { unsigned i; if (is_integer_target(fb, buffers) && !ints_fit_in_floats(color)) { clear_buffers_with_quad(svga, buffers, color, depth, stencil); /* We also cleared depth/stencil, so that's done */ flags &= ~(SVGA3D_CLEAR_DEPTH | SVGA3D_CLEAR_STENCIL); } else { struct pipe_surface *rtv; /* Issue VGPU10 Clear commands */ for (i = 0; i < fb->nr_cbufs; i++) { if ((fb->cbufs[i] == NULL) || !(buffers & (PIPE_CLEAR_COLOR0 << i))) continue; rtv = svga_validate_surface_view(svga, svga_surface(fb->cbufs[i])); if (!rtv) return PIPE_ERROR_OUT_OF_MEMORY; ret = SVGA3D_vgpu10_ClearRenderTargetView(svga->swc, rtv, color->f); if (ret != PIPE_OK) return ret; } } } if (flags & (SVGA3D_CLEAR_DEPTH | SVGA3D_CLEAR_STENCIL)) { struct pipe_surface *dsv = svga_validate_surface_view(svga, svga_surface(fb->zsbuf)); if (!dsv) return PIPE_ERROR_OUT_OF_MEMORY; ret = SVGA3D_vgpu10_ClearDepthStencilView(svga->swc, dsv, flags, stencil, (float) depth); if (ret != PIPE_OK) return ret; } } else { ret = SVGA3D_ClearRect(svga->swc, flags, uc.ui[0], (float) depth, stencil, rect.x, rect.y, rect.w, rect.h); if (ret != PIPE_OK) return ret; } if (restore_viewport) { ret = SVGA3D_SetViewport(svga->swc, &svga->state.hw_clear.viewport); } return ret; }