static void svga_end_query(struct pipe_context *pipe, struct pipe_query *q) { struct svga_context *svga = svga_context( pipe ); struct svga_query *sq = svga_query( q ); enum pipe_error ret; SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); assert(svga->sq == sq); svga_hwtnl_flush_retry(svga); /* Set to PENDING before sending EndQuery. */ sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING; ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_EndQuery( svga->swc, sq->type, sq->hwbuf); assert(ret == PIPE_OK); } /* TODO: Delay flushing. We don't really need to flush here, just ensure * that there is one flush before svga_get_query_result attempts to get the * result */ svga_context_flush(svga, NULL); svga->sq = NULL; }
static void svga_delete_gs_state(struct pipe_context *pipe, void *shader) { struct svga_context *svga = svga_context(pipe); struct svga_geometry_shader *gs = (struct svga_geometry_shader *)shader; struct svga_geometry_shader *next_gs; struct svga_shader_variant *variant, *tmp; enum pipe_error ret; svga_hwtnl_flush_retry(svga); /* Start deletion from the original geometry shader state */ if (gs->base.parent != NULL) gs = (struct svga_geometry_shader *)gs->base.parent; /* Free the list of geometry shaders */ while (gs) { next_gs = (struct svga_geometry_shader *)gs->base.next; if (gs->base.stream_output != NULL) svga_delete_stream_output(svga, gs->base.stream_output); draw_delete_geometry_shader(svga->swtnl.draw, gs->draw_shader); for (variant = gs->base.variants; variant; variant = tmp) { tmp = variant->next; /* Check if deleting currently bound shader */ if (variant == svga->state.hw_draw.gs) { ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, NULL); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_GS, NULL); assert(ret == PIPE_OK); } svga->state.hw_draw.gs = NULL; } ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_GS, variant); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_GS, variant); assert(ret == PIPE_OK); } } FREE((void *)gs->base.tokens); FREE(gs); gs = next_gs; } }
static boolean svga_get_query_result(struct pipe_context *pipe, struct pipe_query *q, boolean wait, uint64_t *result) { struct svga_context *svga = svga_context( pipe ); struct svga_screen *svgascreen = svga_screen( pipe->screen ); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_query *sq = svga_query( q ); SVGA3dQueryState state; SVGA_DBG(DEBUG_QUERY, "%s wait: %d\n", __FUNCTION__); /* The query status won't be updated by the host unless * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause a * synchronous wait on the host */ if(!sq->fence) { enum pipe_error ret; ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf); assert(ret == PIPE_OK); } svga_context_flush(svga, &sq->fence); assert(sq->fence); } state = sq->queryResult->state; if(state == SVGA3D_QUERYSTATE_PENDING) { if(!wait) return FALSE; sws->fence_finish(sws, sq->fence, 0); state = sq->queryResult->state; } assert(state == SVGA3D_QUERYSTATE_SUCCEEDED || state == SVGA3D_QUERYSTATE_FAILED); *result = (uint64_t)sq->queryResult->result32; SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, (unsigned)*result); return TRUE; }
static void svga_delete_vs_state(struct pipe_context *pipe, void *shader) { struct svga_context *svga = svga_context(pipe); struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader; struct svga_shader_variant *variant, *tmp; enum pipe_error ret; svga_hwtnl_flush_retry(svga); assert(vs->base.parent == NULL); /* Check if there is a generated geometry shader to go with this * vertex shader. If there is, then delete the geometry shader as well. */ if (vs->gs != NULL) { svga->pipe.delete_gs_state(&svga->pipe, vs->gs); } if (vs->base.stream_output != NULL) svga_delete_stream_output(svga, vs->base.stream_output); draw_delete_vertex_shader(svga->swtnl.draw, vs->draw_shader); for (variant = vs->base.variants; variant; variant = tmp) { tmp = variant->next; /* Check if deleting currently bound shader */ if (variant == svga->state.hw_draw.vs) { ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, NULL); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, NULL); assert(ret == PIPE_OK); } svga->state.hw_draw.vs = NULL; } ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_destroy_shader_variant(svga, SVGA3D_SHADERTYPE_VS, variant); assert(ret == PIPE_OK); } } FREE((void *)vs->base.tokens); FREE(vs); }
/** * Clear the given surface to the specified value. * No masking, no scissor (clear entire buffer). */ void svga_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba, double depth, unsigned stencil) { struct svga_context *svga = svga_context( pipe ); int ret; if (buffers & PIPE_CLEAR_COLOR) SVGA_DBG(DEBUG_DMA, "clear sid %p\n", svga_surface(svga->curr.framebuffer.cbufs[0])->handle); ret = try_clear( svga, buffers, rgba, depth, stencil ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { /* Flush command buffer and retry: */ svga_context_flush( svga, NULL ); ret = try_clear( svga, buffers, rgba, depth, stencil ); } /* * Mark target surfaces as dirty * TODO Mark only cleared surfaces. */ svga_mark_surfaces_dirty(svga); assert (ret == PIPE_OK); }
/** * Clear the given surface to the specified value. * No masking, no scissor (clear entire buffer). */ void svga_clear(struct pipe_context *pipe, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct svga_context *svga = svga_context( pipe ); enum pipe_error ret; if (buffers & PIPE_CLEAR_COLOR) SVGA_DBG(DEBUG_DMA, "clear sid %p\n", svga_surface(svga->curr.framebuffer.cbufs[0])->handle); /* flush any queued prims (don't want them to appear after the clear!) */ svga_hwtnl_flush_retry(svga); ret = try_clear( svga, buffers, color, depth, stencil ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { /* Flush command buffer and retry: */ svga_context_flush( svga, NULL ); ret = try_clear( svga, buffers, color, depth, stencil ); } /* * Mark target surfaces as dirty * TODO Mark only cleared surfaces. */ svga_mark_surfaces_dirty(svga); assert (ret == PIPE_OK); }
enum pipe_error svga_set_stream_output(struct svga_context *svga, struct svga_stream_output *streamout) { enum pipe_error ret = PIPE_OK; unsigned id = streamout ? streamout->id : SVGA3D_INVALID_ID; if (!svga_have_vgpu10(svga)) { return PIPE_OK; } SVGA_DBG(DEBUG_STREAMOUT, "%s streamout=0x%x id=%d\n", __FUNCTION__, streamout, id); if (svga->current_so != streamout) { /* Save current SO state */ svga->current_so = streamout; ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id); } } return ret; }
static void svga_delete_vertex_elements_state(struct pipe_context *pipe, void *state) { struct svga_context *svga = svga_context(pipe); struct svga_velems_state *velems = (struct svga_velems_state *) state; if (svga_have_vgpu10(svga)) { enum pipe_error ret; svga_hwtnl_flush_retry(svga); ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id); assert(ret == PIPE_OK); } if (velems->id == svga->state.hw_draw.layout_id) svga->state.hw_draw.layout_id = SVGA3D_INVALID_ID; util_bitmask_clear(svga->input_element_object_id_bm, velems->id); velems->id = SVGA3D_INVALID_ID; } FREE(velems); }
/** * \brief Clear render target pipe callback * * \param pipe[in] The pipe context * \param dst[in] The surface to clear * \param color[in] Clear color * \param dstx[in] Clear region left * \param dsty[in] Clear region top * \param width[in] Clear region width * \param height[in] Clear region height * \param render_condition_enabled[in] Whether to use conditional rendering * to clear (if elsewhere enabled). */ static void svga_clear_render_target(struct pipe_context *pipe, struct pipe_surface *dst, const union pipe_color_union *color, unsigned dstx, unsigned dsty, unsigned width, unsigned height, bool render_condition_enabled) { struct svga_context *svga = svga_context( pipe ); svga_toggle_render_condition(svga, render_condition_enabled, FALSE); if (!svga_have_vgpu10(svga) || dstx != 0 || dsty != 0 || width != dst->width || height != dst->height) { svga_blitter_clear_render_target(svga, dst, color, dstx, dsty, width, height); } else { enum pipe_error ret; ret = svga_try_clear_render_target(svga, dst, color); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { svga_context_flush( svga, NULL ); ret = svga_try_clear_render_target(svga, dst, color); } assert (ret == PIPE_OK); } svga_toggle_render_condition(svga, render_condition_enabled, TRUE); }
static void svga_vbuf_render_draw_arrays( struct vbuf_render *render, unsigned start, uint nr ) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; unsigned bias = (svga_render->vbuf_offset - svga_render->vdecl_offset) / svga_render->vertex_size; enum pipe_error ret = PIPE_OK; /* off to hardware */ svga_vbuf_submit_state(svga_render); /* Need to call update_state() again as the draw module may have * altered some of our state behind our backs. Testcase: * redbook/polys.c */ svga_update_state_retry( svga, SVGA_STATE_HW_DRAW ); ret = svga_hwtnl_draw_arrays(svga->hwtnl, svga_render->prim, start + bias, nr); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_hwtnl_draw_arrays(svga->hwtnl, svga_render->prim, start + bias, nr); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } }
static void svga_delete_vs_state(struct pipe_context *pipe, void *shader) { struct svga_context *svga = svga_context(pipe); struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader; struct svga_shader_result *result, *tmp; enum pipe_error ret; svga_hwtnl_flush_retry( svga ); draw_delete_vertex_shader(svga->swtnl.draw, vs->draw_shader); for (result = vs->base.results; result; result = tmp ) { tmp = result->next; ret = SVGA3D_DestroyShader(svga->swc, result->id, SVGA3D_SHADERTYPE_VS ); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_DestroyShader(svga->swc, result->id, SVGA3D_SHADERTYPE_VS ); assert(ret == PIPE_OK); } svga_destroy_shader_result( result ); } FREE((void *)vs->base.tokens); FREE(vs); }
static boolean svga_vbuf_render_allocate_vertices( struct vbuf_render *render, ushort vertex_size, ushort nr_vertices ) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; struct pipe_screen *screen = svga->pipe.screen; size_t size = (size_t)nr_vertices * (size_t)vertex_size; boolean new_vbuf = FALSE; boolean new_ibuf = FALSE; if (svga_render->vertex_size != vertex_size) svga->swtnl.new_vdecl = TRUE; svga_render->vertex_size = (size_t)vertex_size; if (svga->swtnl.new_vbuf) new_ibuf = new_vbuf = TRUE; svga->swtnl.new_vbuf = FALSE; if (svga_render->vbuf_size < svga_render->vbuf_offset + svga_render->vbuf_used + size) new_vbuf = TRUE; if (new_vbuf) pipe_resource_reference(&svga_render->vbuf, NULL); if (new_ibuf) pipe_resource_reference(&svga_render->ibuf, NULL); if (!svga_render->vbuf) { svga_render->vbuf_size = MAX2(size, svga_render->vbuf_alloc_size); svga_render->vbuf = pipe_buffer_create(screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM, svga_render->vbuf_size); if(!svga_render->vbuf) { svga_context_flush(svga, NULL); assert(!svga_render->vbuf); svga_render->vbuf = pipe_buffer_create(screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM, svga_render->vbuf_size); /* The buffer allocation may fail if we run out of memory. * The draw module's vbuf code should handle that without crashing. */ } svga->swtnl.new_vdecl = TRUE; svga_render->vbuf_offset = 0; } else { svga_render->vbuf_offset += svga_render->vbuf_used; } svga_render->vbuf_used = 0; if (svga->swtnl.new_vdecl) svga_render->vdecl_offset = svga_render->vbuf_offset; return TRUE; }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_resource *index_buffer, unsigned index_size, int index_bias, unsigned min_index, unsigned max_index, enum pipe_prim_type prim, unsigned start, unsigned count, unsigned start_instance, unsigned instance_count, boolean do_retry ) { enum pipe_error ret = PIPE_OK; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWELEMENTS); svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; /** determine if flatshade is to be used after svga_update_state() * in case the fragment shader is changed. */ svga_hwtnl_set_flatshade(svga->hwtnl, svga->curr.rast->templ.flatshade || svga->state.hw_draw.fs->uses_flat_interp, svga->curr.rast->templ.flatshade_first); ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, start_instance, instance_count); if (ret != PIPE_OK) goto retry; goto done; retry: svga_context_flush( svga, NULL ); if (do_retry) { ret = retry_draw_range_elements(svga, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, start_instance, instance_count, FALSE); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); return ret; }
void svga_texture_copy_handle(struct svga_context *svga, struct svga_winsys_surface *src_handle, unsigned src_x, unsigned src_y, unsigned src_z, unsigned src_level, unsigned src_face, struct svga_winsys_surface *dst_handle, unsigned dst_x, unsigned dst_y, unsigned dst_z, unsigned dst_level, unsigned dst_face, unsigned width, unsigned height, unsigned depth) { struct svga_surface dst, src; enum pipe_error ret; SVGA3dCopyBox box, *boxes; assert(svga); src.handle = src_handle; src.real_level = src_level; src.real_face = src_face; src.real_zslice = 0; dst.handle = dst_handle; dst.real_level = dst_level; dst.real_face = dst_face; dst.real_zslice = 0; box.x = dst_x; box.y = dst_y; box.z = dst_z; box.w = width; box.h = height; box.d = depth; box.srcx = src_x; box.srcy = src_y; box.srcz = src_z; /* SVGA_DBG(DEBUG_VIEWS, "mipcopy src: %p %u (%ux%ux%u), dst: %p %u (%ux%ux%u)\n", src_handle, src_level, src_x, src_y, src_z, dst_handle, dst_level, dst_x, dst_y, dst_z); */ ret = SVGA3D_BeginSurfaceCopy(svga->swc, &src.base, &dst.base, &boxes, 1); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginSurfaceCopy(svga->swc, &src.base, &dst.base, &boxes, 1); assert(ret == PIPE_OK); } *boxes = box; SVGA_FIFOCommitAll(svga->swc); }
static boolean svga_vbuf_render_allocate_vertices( struct vbuf_render *render, ushort vertex_size, ushort nr_vertices ) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; struct pipe_screen *screen = svga->pipe.screen; size_t size = (size_t)nr_vertices * (size_t)vertex_size; boolean new_vbuf = FALSE; boolean new_ibuf = FALSE; if (svga_render->vertex_size != vertex_size) svga->swtnl.new_vdecl = TRUE; svga_render->vertex_size = (size_t)vertex_size; if (svga->swtnl.new_vbuf) new_ibuf = new_vbuf = TRUE; svga->swtnl.new_vbuf = FALSE; if (svga_render->vbuf_size < svga_render->vbuf_offset + svga_render->vbuf_used + size) new_vbuf = TRUE; if (new_vbuf) pipe_buffer_reference(&svga_render->vbuf, NULL); if (new_ibuf) pipe_buffer_reference(&svga_render->ibuf, NULL); if (!svga_render->vbuf) { svga_render->vbuf_size = MAX2(size, svga_render->vbuf_alloc_size); svga_render->vbuf = pipe_buffer_create(screen, 16, PIPE_BUFFER_USAGE_VERTEX, svga_render->vbuf_size); if(!svga_render->vbuf) { svga_context_flush(svga, NULL); svga_render->vbuf = pipe_buffer_create(screen, 16, PIPE_BUFFER_USAGE_VERTEX, svga_render->vbuf_size); assert(svga_render->vbuf); } svga->swtnl.new_vdecl = TRUE; svga_render->vbuf_offset = 0; } else { svga_render->vbuf_offset += svga_render->vbuf_used; } svga_render->vbuf_used = 0; if (svga->swtnl.new_vdecl) svga_render->vdecl_offset = svga_render->vbuf_offset; return TRUE; }
/** * Flush pending commands and wait for completion with a fence. */ void svga_context_finish(struct svga_context *svga) { struct pipe_screen *screen = svga->pipe.screen; struct pipe_fence_handle *fence = NULL; svga_context_flush(svga, &fence); svga->pipe.screen->fence_finish(screen, fence, PIPE_TIMEOUT_INFINITE); screen->fence_reference(screen, &fence, NULL); }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_resource *index_buffer, unsigned index_size, int index_bias, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count, unsigned instance_count, boolean do_retry ) { enum pipe_error ret = PIPE_OK; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_upload_user_buffers( svga, min_index + index_bias, max_index - min_index + 1, instance_count ); if (ret != PIPE_OK) goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count ); if (ret != PIPE_OK) goto retry; return PIPE_OK; retry: svga_context_flush( svga, NULL ); if (do_retry) { return retry_draw_range_elements( svga, index_buffer, index_size, index_bias, min_index, max_index, prim, start, count, instance_count, FALSE ); } return ret; }
static enum pipe_error retry_draw_range_elements( struct svga_context *svga, struct pipe_buffer *index_buffer, unsigned index_size, unsigned min_index, unsigned max_index, unsigned prim, unsigned start, unsigned count, boolean do_retry ) { enum pipe_error ret = 0; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret) goto retry; ret = svga_hwtnl_draw_range_elements( svga->hwtnl, index_buffer, index_size, min_index, max_index, prim, start, count, 0 ); if (ret) goto retry; if (svga->curr.any_user_vertex_buffers) { ret = svga_hwtnl_flush( svga->hwtnl ); if (ret) goto retry; } return PIPE_OK; retry: svga_context_flush( svga, NULL ); if (do_retry) { return retry_draw_range_elements( svga, index_buffer, index_size, min_index, max_index, prim, start, count, FALSE ); } return ret; }
/** * All drawing filters down into this function, either directly * on the hardware path or after doing software vertex processing. */ enum pipe_error svga_hwtnl_prim(struct svga_hwtnl *hwtnl, const SVGA3dPrimitiveRange * range, unsigned vcount, unsigned min_index, unsigned max_index, struct pipe_resource *ib, unsigned start_instance, unsigned instance_count) { enum pipe_error ret = PIPE_OK; SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM); if (svga_have_vgpu10(hwtnl->svga)) { /* draw immediately */ ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib, start_instance, instance_count); if (ret != PIPE_OK) { svga_context_flush(hwtnl->svga, NULL); ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib, start_instance, instance_count); assert(ret == PIPE_OK); } } else { /* batch up drawing commands */ #ifdef DEBUG check_draw_params(hwtnl, range, min_index, max_index, ib); assert(start_instance == 0); assert(instance_count <= 1); #else (void) check_draw_params; #endif if (hwtnl->cmd.prim_count + 1 >= QSZ) { ret = svga_hwtnl_flush(hwtnl); if (ret != PIPE_OK) goto done; } /* min/max indices are relative to bias */ hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index; hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index; hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range; hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias; pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib); hwtnl->cmd.prim_count++; } done: SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws); return ret; }
static void svga_vbuf_submit_state( struct svga_vbuf_render *svga_render ) { struct svga_context *svga = svga_render->svga; SVGA3dVertexDecl vdecl[PIPE_MAX_ATTRIBS]; enum pipe_error ret; int i; /* if the vdecl or vbuf hasn't changed do nothing */ if (!svga->swtnl.new_vdecl) return; memcpy(vdecl, svga_render->vdecl, sizeof(vdecl)); /* flush the hw state */ ret = svga_hwtnl_flush(svga->hwtnl); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_hwtnl_flush(svga->hwtnl); /* if we hit this path we might become synced with hw */ svga->swtnl.new_vbuf = TRUE; assert(ret == 0); } svga_hwtnl_reset_vdecl(svga->hwtnl, svga_render->vdecl_count); for (i = 0; i < svga_render->vdecl_count; i++) { vdecl[i].array.offset += svga_render->vdecl_offset; svga_hwtnl_vdecl( svga->hwtnl, i, &vdecl[i], svga_render->vbuf ); } /* We have already taken care of flatshading, so let the hwtnl * module use whatever is most convenient: */ if (svga->state.sw.need_pipeline) { svga_hwtnl_set_flatshade(svga->hwtnl, FALSE, FALSE); svga_hwtnl_set_unfilled(svga->hwtnl, PIPE_POLYGON_MODE_FILL); } else { svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); } svga->swtnl.new_vdecl = FALSE; }
void svga_hwtnl_flush_retry( struct svga_context *svga ) { enum pipe_error ret = PIPE_OK; ret = svga_hwtnl_flush( svga->hwtnl ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { svga_context_flush( svga, NULL ); ret = svga_hwtnl_flush( svga->hwtnl ); } assert(ret == 0); }
void svga_update_state_retry( struct svga_context *svga, unsigned max_level ) { enum pipe_error ret; ret = svga_update_state( svga, max_level ); if (ret == PIPE_ERROR_OUT_OF_MEMORY) { svga_context_flush(svga, NULL); ret = svga_update_state( svga, max_level ); } assert( ret == PIPE_OK ); }
static enum pipe_error readback_image_vgpu9(struct svga_context *svga, struct svga_winsys_surface *surf, unsigned slice, unsigned level) { enum pipe_error ret; ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level); } return ret; }
static INLINE void svga_transfer_dma_band(struct svga_context *svga, struct svga_transfer *st, SVGA3dTransferType transfer, unsigned y, unsigned h, unsigned srcy, SVGA3dSurfaceDMAFlags flags) { struct svga_texture *texture = svga_texture(st->base.resource); SVGA3dCopyBox box; enum pipe_error ret; box.x = st->base.box.x; box.y = y; box.z = st->base.box.z; box.w = st->base.box.width; box.h = h; box.d = 1; box.srcx = 0; box.srcy = srcy; box.srcz = 0; if (st->base.resource->target == PIPE_TEXTURE_CUBE) { st->face = st->base.box.z; box.z = 0; } else st->face = 0; SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - (%u, %u, %u), %ubpp\n", transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from", texture->handle, st->face, st->base.box.x, y, box.z, st->base.box.x + st->base.box.width, y + h, box.z + 1, util_format_get_blocksize(texture->b.b.format) * 8 / (util_format_get_blockwidth(texture->b.b.format)*util_format_get_blockheight(texture->b.b.format))); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); assert(ret == PIPE_OK); } }
static enum pipe_error retry_draw_arrays( struct svga_context *svga, unsigned prim, unsigned start, unsigned count, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; svga_hwtnl_set_unfilled( svga->hwtnl, svga->curr.rast->hw_unfilled ); svga_hwtnl_set_flatshade( svga->hwtnl, svga->curr.rast->templ.flatshade, svga->curr.rast->templ.flatshade_first ); ret = svga_upload_user_buffers( svga, start, count, instance_count ); if (ret != PIPE_OK) goto retry; ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; ret = svga_hwtnl_draw_arrays( svga->hwtnl, prim, start, count ); if (ret != PIPE_OK) goto retry; return 0; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); return retry_draw_arrays( svga, prim, start, count, instance_count, FALSE ); } return ret; }
static enum pipe_error update_image_vgpu9(struct svga_context *svga, struct svga_winsys_surface *surf, const SVGA3dBox *box, unsigned slice, unsigned level) { enum pipe_error ret; ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level); } return ret; }
static void svga_transfer_dma_band(struct svga_context *svga, struct svga_transfer *st, SVGA3dTransferType transfer, unsigned x, unsigned y, unsigned z, unsigned w, unsigned h, unsigned d, unsigned srcx, unsigned srcy, unsigned srcz, SVGA3dSurfaceDMAFlags flags) { struct svga_texture *texture = svga_texture(st->base.resource); SVGA3dCopyBox box; enum pipe_error ret; assert(!st->use_direct_map); box.x = x; box.y = y; box.z = z; box.w = w; box.h = h; box.d = d; box.srcx = srcx; box.srcy = srcy; box.srcz = srcz; SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - " "(%u, %u, %u), %ubpp\n", transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from", texture->handle, st->slice, x, y, z, x + w, y + h, z + 1, util_format_get_blocksize(texture->b.b.format) * 8 / (util_format_get_blockwidth(texture->b.b.format) * util_format_get_blockheight(texture->b.b.format))); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags); assert(ret == PIPE_OK); } }
static enum pipe_error retry_draw_arrays( struct svga_context *svga, enum pipe_prim_type prim, unsigned start, unsigned count, unsigned start_instance, unsigned instance_count, boolean do_retry ) { enum pipe_error ret; SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWARRAYS); svga_hwtnl_set_fillmode(svga->hwtnl, svga->curr.rast->hw_fillmode); ret = svga_update_state( svga, SVGA_STATE_HW_DRAW ); if (ret != PIPE_OK) goto retry; /** determine if flatshade is to be used after svga_update_state() * in case the fragment shader is changed. */ svga_hwtnl_set_flatshade(svga->hwtnl, svga->curr.rast->templ.flatshade || svga->state.hw_draw.fs->uses_flat_interp, svga->curr.rast->templ.flatshade_first); ret = svga_hwtnl_draw_arrays(svga->hwtnl, prim, start, count, start_instance, instance_count); if (ret != PIPE_OK) goto retry; goto done; retry: if (ret == PIPE_ERROR_OUT_OF_MEMORY && do_retry) { svga_context_flush( svga, NULL ); ret = retry_draw_arrays(svga, prim, start, count, start_instance, instance_count, FALSE); } done: SVGA_STATS_TIME_POP(svga_sws(svga)); return ret; }
static void svga_begin_query(struct pipe_context *pipe, struct pipe_query *q) { struct svga_screen *svgascreen = svga_screen(pipe->screen); struct svga_winsys_screen *sws = svgascreen->sws; struct svga_context *svga = svga_context( pipe ); struct svga_query *sq = svga_query( q ); enum pipe_error ret; SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__); assert(!svga->sq); /* Need to flush out buffered drawing commands so that they don't * get counted in the query results. */ svga_hwtnl_flush_retry(svga); if(sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) { /* The application doesn't care for the pending query result. We cannot * let go the existing buffer and just get a new one because its storage * may be reused for other purposes and clobbered by the host when it * determines the query result. So the only option here is to wait for * the existing query's result -- not a big deal, given that no sane * application would do this. */ uint64_t result; svga_get_query_result(pipe, q, TRUE, &result); assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING); } sq->queryResult->state = SVGA3D_QUERYSTATE_NEW; sws->fence_reference(sws, &sq->fence, NULL); ret = SVGA3D_BeginQuery(svga->swc, sq->type); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginQuery(svga->swc, sq->type); assert(ret == PIPE_OK); } svga->sq = sq; }
static enum pipe_error readback_image_vgpu10(struct svga_context *svga, struct svga_winsys_surface *surf, unsigned slice, unsigned level, unsigned numMipLevels) { enum pipe_error ret; unsigned subResource; subResource = slice * numMipLevels + level; ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource); if (ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource); } return ret; }