/* Emit all operations pending on host surfaces. */ void svga_surfaces_flush(struct svga_context *svga) { unsigned i; /* Emit buffered drawing commands. */ svga_hwtnl_flush_retry( svga ); /* Emit back-copy from render target view to texture. */ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { if (svga->curr.framebuffer.cbufs[i]) svga_propagate_surface(svga, svga->curr.framebuffer.cbufs[i]); } if (svga->curr.framebuffer.zsbuf) svga_propagate_surface(svga, svga->curr.framebuffer.zsbuf); }
/* Emit all operations pending on host surfaces. */ void svga_surfaces_flush(struct svga_context *svga) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); unsigned i; /* Emit buffered drawing commands. */ svga_hwtnl_flush_retry( svga ); /* Emit back-copy from render target view to texture. */ for (i = 0; i < svgascreen->max_color_buffers; i++) { if (svga->curr.framebuffer.cbufs[i]) svga_propagate_surface(svga, svga->curr.framebuffer.cbufs[i]); } if (svga->curr.framebuffer.zsbuf) svga_propagate_surface(svga, svga->curr.framebuffer.zsbuf); }
/* XXX still have doubts about this... */ static void svga_surface_copy(struct pipe_context *pipe, struct pipe_resource* dst_tex, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource* src_tex, unsigned src_level, const struct pipe_box *src_box) { struct svga_context *svga = svga_context(pipe); struct svga_texture *stex, *dtex; /* struct pipe_screen *screen = pipe->screen; SVGA3dCopyBox *box; enum pipe_error ret; struct pipe_surface *srcsurf, *dstsurf;*/ unsigned dst_face, dst_z, src_face, src_z; /* Emit buffered drawing commands, and any back copies. */ svga_surfaces_flush( svga ); /* Fallback for buffers. */ if (dst_tex->target == PIPE_BUFFER && src_tex->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst_tex, dst_level, dstx, dsty, dstz, src_tex, src_level, src_box); return; } stex = svga_texture(src_tex); dtex = svga_texture(dst_tex); #if 0 srcsurf = screen->get_tex_surface(screen, src_tex, src_level, src_box->z, src_box->z, PIPE_BIND_SAMPLER_VIEW); dstsurf = screen->get_tex_surface(screen, dst_tex, dst_level, dst_box->z, dst_box->z, PIPE_BIND_RENDER_TARGET); SVGA_DBG(DEBUG_DMA, "blit to sid %p (%d,%d), from sid %p (%d,%d) sz %dx%d\n", svga_surface(dstsurf)->handle, dstx, dsty, svga_surface(srcsurf)->handle, src_box->x, src_box->y, width, height); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); assert(ret == PIPE_OK); } box->x = dstx; box->y = dsty; box->z = 0; box->w = width; box->h = height; box->d = 1; box->srcx = src_box->x; box->srcy = src_box->y; box->srcz = 0; SVGA_FIFOCommitAll(svga->swc); svga_surface(dstsurf)->dirty = TRUE; svga_propagate_surface(pipe, dstsurf); pipe_surface_reference(&srcsurf, NULL); pipe_surface_reference(&dstsurf, NULL); #else if (src_tex->target == PIPE_TEXTURE_CUBE) { src_face = src_box->z; src_z = 0; assert(src_box->depth == 1); } else { src_face = 0; src_z = src_box->z; } /* different src/dst type???*/ if (dst_tex->target == PIPE_TEXTURE_CUBE) { dst_face = dstz; dst_z = 0; assert(src_box->depth == 1); } else { dst_face = 0; dst_z = dstz; } svga_texture_copy_handle(svga, stex->handle, src_box->x, src_box->y, src_z, src_level, src_face, dtex->handle, dstx, dsty, dst_z, dst_level, dst_face, src_box->width, src_box->height, src_box->depth); #endif }
static enum pipe_error emit_fb_vgpu10(struct svga_context *svga) { const struct svga_screen *ss = svga_screen(svga->pipe.screen); struct pipe_surface *rtv[SVGA3D_MAX_RENDER_TARGETS]; struct pipe_surface *dsv; struct pipe_framebuffer_state *curr = &svga->curr.framebuffer; struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer; const unsigned num_color = MAX2(curr->nr_cbufs, hw->nr_cbufs); unsigned i; enum pipe_error ret; assert(svga_have_vgpu10(svga)); /* Setup render targets array. Note that we loop over the max of the * number of previously bound buffers and the new buffers to unbind * any previously bound buffers when the new number of buffers is less * than the old number of buffers. */ for (i = 0; i < num_color; i++) { if (curr->cbufs[i]) { rtv[i] = svga_validate_surface_view(svga, svga_surface(curr->cbufs[i])); if (rtv[i] == NULL) { return PIPE_ERROR_OUT_OF_MEMORY; } assert(svga_surface(rtv[i])->view_id != SVGA3D_INVALID_ID); } else { rtv[i] = NULL; } } /* Setup depth stencil view */ if (curr->zsbuf) { dsv = svga_validate_surface_view(svga, svga_surface(curr->zsbuf)); if (!dsv) { return PIPE_ERROR_OUT_OF_MEMORY; } } else { dsv = NULL; } ret = SVGA3D_vgpu10_SetRenderTargets(svga->swc, num_color, rtv, dsv); if (ret != PIPE_OK) return ret; for (i = 0; i < ss->max_color_buffers; i++) { if (hw->cbufs[i] != curr->cbufs[i]) { /* propagate the backed view surface before unbinding it */ if (hw->cbufs[i] && svga_surface(hw->cbufs[i])->backed) { svga_propagate_surface(svga, &svga_surface(hw->cbufs[i])->backed->base); } pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]); } } hw->nr_cbufs = curr->nr_cbufs; if (hw->zsbuf != curr->zsbuf) { /* propagate the backed view surface before unbinding it */ if (hw->zsbuf && svga_surface(hw->zsbuf)->backed) { svga_propagate_surface(svga, &svga_surface(hw->zsbuf)->backed->base); } pipe_surface_reference(&hw->zsbuf, curr->zsbuf); } return ret; }
static void svga_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct svga_context *svga = svga_context(pipe); struct pipe_framebuffer_state *dst = &svga->curr.framebuffer; boolean propagate = FALSE; unsigned i; /* make sure any pending drawing calls are flushed before changing * the framebuffer state */ svga_hwtnl_flush_retry(svga); dst->width = fb->width; dst->height = fb->height; dst->nr_cbufs = fb->nr_cbufs; /* check if we need to propagate any of the target surfaces */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) { if (svga_surface_needs_propagation(dst->cbufs[i])) { propagate = TRUE; break; } } } if (propagate) { for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) svga_propagate_surface(svga, dst->cbufs[i]); } } /* Check that all surfaces are the same size. * Actually, the virtual hardware may support rendertargets with * different size, depending on the host API and driver, */ { int width = 0, height = 0; if (fb->zsbuf) { width = fb->zsbuf->width; height = fb->zsbuf->height; } for (i = 0; i < fb->nr_cbufs; ++i) { if (fb->cbufs[i]) { if (width && height) { if (fb->cbufs[i]->width != width || fb->cbufs[i]->height != height) { debug_warning("Mixed-size color and depth/stencil surfaces " "may not work properly"); } } else { width = fb->cbufs[i]->width; height = fb->cbufs[i]->height; } } } } util_copy_framebuffer_state(dst, fb); /* Set the rendered-to flags */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = dst->cbufs[i]; if (s) { struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } if (svga->curr.framebuffer.zsbuf) { switch (svga->curr.framebuffer.zsbuf->format) { case PIPE_FORMAT_Z16_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16; break; case PIPE_FORMAT_Z24_UNORM_S8_UINT: case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_S8_UINT_Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8; break; case PIPE_FORMAT_Z32_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32; break; case PIPE_FORMAT_Z32_FLOAT: svga->curr.depthscale = 1.0f / ((float)(1<<23)); break; default: svga->curr.depthscale = 0.0f; break; } /* Set rendered-to flag */ { struct pipe_surface *s = dst->zsbuf; struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } else { svga->curr.depthscale = 0.0f; } svga->dirty |= SVGA_NEW_FRAME_BUFFER; }
static void svga_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct svga_context *svga = svga_context(pipe); struct pipe_framebuffer_state *dst = &svga->curr.framebuffer; boolean propagate = FALSE; int i; dst->width = fb->width; dst->height = fb->height; dst->nr_cbufs = fb->nr_cbufs; /* check if we need to propaget any of the target surfaces */ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { if (dst->cbufs[i] && dst->cbufs[i] != fb->cbufs[i]) if (svga_surface_needs_propagation(dst->cbufs[i])) propagate = TRUE; } if (propagate) { /* make sure that drawing calls comes before propagation calls */ svga_hwtnl_flush_retry( svga ); for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) if (dst->cbufs[i] && dst->cbufs[i] != fb->cbufs[i]) svga_propagate_surface(pipe, dst->cbufs[i]); } /* XXX: Actually the virtual hardware may support rendertargets with * different size, depending on the host API and driver, but since we cannot * know that make no such assumption here. */ for(i = 0; i < fb->nr_cbufs; ++i) { if (fb->zsbuf && fb->cbufs[i]) { assert(fb->zsbuf->width == fb->cbufs[i]->width); assert(fb->zsbuf->height == fb->cbufs[i]->height); } } for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) pipe_surface_reference(&dst->cbufs[i], fb->cbufs[i]); pipe_surface_reference(&dst->zsbuf, fb->zsbuf); if (svga->curr.framebuffer.zsbuf) { switch (svga->curr.framebuffer.zsbuf->format) { case PIPE_FORMAT_Z16_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16; break; case PIPE_FORMAT_S8Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: case PIPE_FORMAT_Z24S8_UNORM: case PIPE_FORMAT_Z24X8_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8; break; case PIPE_FORMAT_Z32_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32; break; case PIPE_FORMAT_Z32_FLOAT: svga->curr.depthscale = 1.0f / ((float)(1<<23)); break; default: svga->curr.depthscale = 0.0f; break; } } else { svga->curr.depthscale = 0.0f; } svga->dirty |= SVGA_NEW_FRAME_BUFFER; }
static void svga_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct svga_context *svga = svga_context(pipe); struct pipe_framebuffer_state *dst = &svga->curr.framebuffer; boolean propagate = FALSE; unsigned i; dst->width = fb->width; dst->height = fb->height; dst->nr_cbufs = fb->nr_cbufs; /* check if we need to propagate any of the target surfaces */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) { if (svga_surface_needs_propagation(dst->cbufs[i])) { propagate = TRUE; break; } } } if (propagate) { /* make sure that drawing calls comes before propagation calls */ svga_hwtnl_flush_retry( svga ); for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) svga_propagate_surface(svga, dst->cbufs[i]); } } /* XXX: Actually the virtual hardware may support rendertargets with * different size, depending on the host API and driver, but since we cannot * know that make no such assumption here. */ for(i = 0; i < fb->nr_cbufs; ++i) { if (fb->zsbuf && fb->cbufs[i]) { assert(fb->zsbuf->width == fb->cbufs[i]->width); assert(fb->zsbuf->height == fb->cbufs[i]->height); } } util_copy_framebuffer_state(dst, fb); /* Set the rendered-to flags */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = dst->cbufs[i]; if (s) { struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } if (svga->curr.framebuffer.zsbuf) { switch (svga->curr.framebuffer.zsbuf->format) { case PIPE_FORMAT_Z16_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16; break; case PIPE_FORMAT_Z24_UNORM_S8_UINT: case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_S8_UINT_Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8; break; case PIPE_FORMAT_Z32_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32; break; case PIPE_FORMAT_Z32_FLOAT: svga->curr.depthscale = 1.0f / ((float)(1<<23)); break; default: svga->curr.depthscale = 0.0f; break; } /* Set rendered-to flag */ { struct pipe_surface *s = dst->zsbuf; struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } else { svga->curr.depthscale = 0.0f; } svga->dirty |= SVGA_NEW_FRAME_BUFFER; }