static void fd_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *cso; DBG("%ux%u, %u layers, %u samples", framebuffer->width, framebuffer->height, framebuffer->layers, framebuffer->samples); cso = &ctx->framebuffer; if (util_framebuffer_state_equal(cso, framebuffer)) return; util_copy_framebuffer_state(cso, framebuffer); cso->samples = util_framebuffer_get_num_samples(cso); if (ctx->screen->reorder) { struct fd_batch *old_batch = NULL; fd_batch_reference(&old_batch, ctx->batch); if (likely(old_batch)) fd_batch_set_stage(old_batch, FD_STAGE_NULL); fd_batch_reference(&ctx->batch, NULL); fd_context_all_dirty(ctx); if (old_batch && old_batch->blit && !old_batch->back_blit) { /* for blits, there is not really much point in hanging on * to the uncommitted batch (ie. you probably don't blit * multiple times to the same surface), so we might as * well go ahead and flush this one: */ fd_batch_flush(old_batch, false, false); } fd_batch_reference(&old_batch, NULL); } else { DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush, framebuffer->cbufs[0], framebuffer->zsbuf); fd_batch_flush(ctx->batch, false, false); util_copy_framebuffer_state(&ctx->batch->framebuffer, cso); } ctx->dirty |= FD_DIRTY_FRAMEBUFFER; ctx->disabled_scissor.minx = 0; ctx->disabled_scissor.miny = 0; ctx->disabled_scissor.maxx = cso->width; ctx->disabled_scissor.maxy = cso->height; ctx->dirty |= FD_DIRTY_SCISSOR; }
void lp_setup_bind_framebuffer( struct lp_setup_context *setup, const struct pipe_framebuffer_state *fb ) { LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); /* Flush any old scene. */ set_scene_state( setup, SETUP_FLUSHED, __FUNCTION__ ); /* * Ensure the old scene is not reused. */ assert(!setup->scene); /* Set new state. This will be picked up later when we next need a * scene. */ util_copy_framebuffer_state(&setup->fb, fb); setup->framebuffer.x0 = 0; setup->framebuffer.y0 = 0; setup->framebuffer.x1 = fb->width-1; setup->framebuffer.y1 = fb->height-1; setup->dirty |= LP_SETUP_NEW_SCISSOR; }
void cso_restore_framebuffer(struct cso_context *ctx) { if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) { util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved); ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb); util_unreference_framebuffer_state(&ctx->fb_saved); } }
void cso_set_framebuffer(struct cso_context *ctx, const struct pipe_framebuffer_state *fb) { if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) { util_copy_framebuffer_state(&ctx->fb, fb); ctx->pipe->set_framebuffer_state(ctx->pipe, fb); } }
enum pipe_error cso_set_framebuffer(struct cso_context *ctx, const struct pipe_framebuffer_state *fb) { if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) { util_copy_framebuffer_state(&ctx->fb, fb); ctx->pipe->set_framebuffer_state(ctx->pipe, fb); } return PIPE_OK; }
void r300_decompress_zmask_locked(struct r300_context *r300) { struct pipe_framebuffer_state saved_fb; memset(&saved_fb, 0, sizeof(saved_fb)); util_copy_framebuffer_state(&saved_fb, r300->fb_state.state); r300_decompress_zmask_locked_unsafe(r300); r300->context.set_framebuffer_state(&r300->context, &saved_fb); util_unreference_framebuffer_state(&saved_fb); pipe_surface_reference(&r300->locked_zbuffer, NULL); }
static void nvc0_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct nvc0_context *nvc0 = nvc0_context(pipe); nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB); util_copy_framebuffer_state(&nvc0->framebuffer, fb); nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER; }
void lp_scene_begin_binning( struct lp_scene *scene, struct pipe_framebuffer_state *fb ) { assert(lp_scene_is_empty(scene)); util_copy_framebuffer_state(&scene->fb, fb); scene->tiles_x = align(fb->width, TILE_SIZE) / TILE_SIZE; scene->tiles_y = align(fb->height, TILE_SIZE) / TILE_SIZE; assert(scene->tiles_x <= TILES_X); assert(scene->tiles_y <= TILES_Y); }
void fd_context_destroy(struct pipe_context *pctx) { struct fd_context *ctx = fd_context(pctx); unsigned i; DBG(""); fd_fence_ref(pctx->screen, &ctx->last_fence, NULL); if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue)) util_queue_destroy(&ctx->flush_queue); util_copy_framebuffer_state(&ctx->framebuffer, NULL); fd_batch_reference(&ctx->batch, NULL); /* unref current batch */ fd_bc_invalidate_context(ctx); fd_prog_fini(pctx); if (ctx->blitter) util_blitter_destroy(ctx->blitter); if (pctx->stream_uploader) u_upload_destroy(pctx->stream_uploader); if (ctx->clear_rs_state) pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state); if (ctx->primconvert) util_primconvert_destroy(ctx->primconvert); slab_destroy_child(&ctx->transfer_pool); for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe); i++) { struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i]; if (!pipe->bo) break; fd_bo_del(pipe->bo); } fd_device_del(ctx->dev); fd_pipe_del(ctx->pipe); if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) { printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n", (uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem, (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw, (uint32_t)ctx->stats.batch_restore); } }
static void ilo_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *state) { struct ilo_context *ilo = ilo_context(pipe); util_copy_framebuffer_state(&ilo->fb.state, state); if (state->nr_cbufs) ilo->fb.num_samples = state->cbufs[0]->texture->nr_samples; else if (state->zsbuf) ilo->fb.num_samples = state->zsbuf->texture->nr_samples; else ilo->fb.num_samples = 1; if (!ilo->fb.num_samples) ilo->fb.num_samples = 1; ilo->dirty |= ILO_DIRTY_FB; }
/** * Set the framebuffer surface info: color buffers, zbuffer, stencil buffer. */ void llvmpipe_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct llvmpipe_context *lp = llvmpipe_context(pipe); boolean changed = !util_framebuffer_state_equal(&lp->framebuffer, fb); assert(fb->width <= LP_MAX_WIDTH); assert(fb->height <= LP_MAX_HEIGHT); if (changed) { util_copy_framebuffer_state(&lp->framebuffer, fb); if (LP_PERF & PERF_NO_DEPTH) { pipe_surface_reference(&lp->framebuffer.zsbuf, NULL); } /* Tell draw module how deep the Z/depth buffer is */ if (lp->framebuffer.zsbuf) { int depth_bits; double mrd; depth_bits = util_format_get_component_bits(lp->framebuffer.zsbuf->format, UTIL_FORMAT_COLORSPACE_ZS, 0); if (depth_bits > 16) { mrd = 0.0000001; } else { mrd = 0.00002; } lp->mrd = mrd; draw_set_mrd(lp->draw, mrd); } lp_setup_bind_framebuffer( lp->setup, &lp->framebuffer ); lp->dirty |= LP_NEW_FRAMEBUFFER; } }
/** * Set the framebuffer surface info: color buffers, zbuffer, stencil buffer. */ void llvmpipe_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct llvmpipe_context *lp = llvmpipe_context(pipe); boolean changed = !util_framebuffer_state_equal(&lp->framebuffer, fb); assert(fb->width <= LP_MAX_WIDTH); assert(fb->height <= LP_MAX_HEIGHT); if (changed) { util_copy_framebuffer_state(&lp->framebuffer, fb); if (LP_PERF & PERF_NO_DEPTH) { pipe_surface_reference(&lp->framebuffer.zsbuf, NULL); } /* Tell draw module how deep the Z/depth buffer is. * * If no depth buffer is bound, send the utility function the default * format for no bound depth (PIPE_FORMAT_NONE). * * FIXME: mrd constant isn't right should use a value derived from * current primitive not a constant (for float depth buffers) */ lp->mrd = util_get_depth_format_mrd((lp->framebuffer.zsbuf) ? lp->framebuffer.zsbuf->format : PIPE_FORMAT_NONE); draw_set_mrd(lp->draw, lp->mrd); lp_setup_bind_framebuffer( lp->setup, &lp->framebuffer ); lp->dirty |= LP_NEW_FRAMEBUFFER; } }
void cso_save_framebuffer(struct cso_context *ctx) { util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb); }
static void svga_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct svga_context *svga = svga_context(pipe); struct pipe_framebuffer_state *dst = &svga->curr.framebuffer; boolean propagate = FALSE; unsigned i; /* make sure any pending drawing calls are flushed before changing * the framebuffer state */ svga_hwtnl_flush_retry(svga); dst->width = fb->width; dst->height = fb->height; dst->nr_cbufs = fb->nr_cbufs; /* check if we need to propagate any of the target surfaces */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) { if (svga_surface_needs_propagation(dst->cbufs[i])) { propagate = TRUE; break; } } } if (propagate) { for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) svga_propagate_surface(svga, dst->cbufs[i]); } } /* Check that all surfaces are the same size. * Actually, the virtual hardware may support rendertargets with * different size, depending on the host API and driver, */ { int width = 0, height = 0; if (fb->zsbuf) { width = fb->zsbuf->width; height = fb->zsbuf->height; } for (i = 0; i < fb->nr_cbufs; ++i) { if (fb->cbufs[i]) { if (width && height) { if (fb->cbufs[i]->width != width || fb->cbufs[i]->height != height) { debug_warning("Mixed-size color and depth/stencil surfaces " "may not work properly"); } } else { width = fb->cbufs[i]->width; height = fb->cbufs[i]->height; } } } } util_copy_framebuffer_state(dst, fb); /* Set the rendered-to flags */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = dst->cbufs[i]; if (s) { struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } if (svga->curr.framebuffer.zsbuf) { switch (svga->curr.framebuffer.zsbuf->format) { case PIPE_FORMAT_Z16_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16; break; case PIPE_FORMAT_Z24_UNORM_S8_UINT: case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_S8_UINT_Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8; break; case PIPE_FORMAT_Z32_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32; break; case PIPE_FORMAT_Z32_FLOAT: svga->curr.depthscale = 1.0f / ((float)(1<<23)); break; default: svga->curr.depthscale = 0.0f; break; } /* Set rendered-to flag */ { struct pipe_surface *s = dst->zsbuf; struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } else { svga->curr.depthscale = 0.0f; } svga->dirty |= SVGA_NEW_FRAME_BUFFER; }
static void svga_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct svga_context *svga = svga_context(pipe); struct pipe_framebuffer_state *dst = &svga->curr.framebuffer; boolean propagate = FALSE; unsigned i; dst->width = fb->width; dst->height = fb->height; dst->nr_cbufs = fb->nr_cbufs; /* check if we need to propagate any of the target surfaces */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) { if (svga_surface_needs_propagation(dst->cbufs[i])) { propagate = TRUE; break; } } } if (propagate) { /* make sure that drawing calls comes before propagation calls */ svga_hwtnl_flush_retry( svga ); for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; if (dst->cbufs[i] && dst->cbufs[i] != s) svga_propagate_surface(svga, dst->cbufs[i]); } } /* XXX: Actually the virtual hardware may support rendertargets with * different size, depending on the host API and driver, but since we cannot * know that make no such assumption here. */ for(i = 0; i < fb->nr_cbufs; ++i) { if (fb->zsbuf && fb->cbufs[i]) { assert(fb->zsbuf->width == fb->cbufs[i]->width); assert(fb->zsbuf->height == fb->cbufs[i]->height); } } util_copy_framebuffer_state(dst, fb); /* Set the rendered-to flags */ for (i = 0; i < dst->nr_cbufs; i++) { struct pipe_surface *s = dst->cbufs[i]; if (s) { struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } if (svga->curr.framebuffer.zsbuf) { switch (svga->curr.framebuffer.zsbuf->format) { case PIPE_FORMAT_Z16_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D16; break; case PIPE_FORMAT_Z24_UNORM_S8_UINT: case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_S8_UINT_Z24_UNORM: case PIPE_FORMAT_X8Z24_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D24S8; break; case PIPE_FORMAT_Z32_UNORM: svga->curr.depthscale = 1.0f / DEPTH_BIAS_SCALE_FACTOR_D32; break; case PIPE_FORMAT_Z32_FLOAT: svga->curr.depthscale = 1.0f / ((float)(1<<23)); break; default: svga->curr.depthscale = 0.0f; break; } /* Set rendered-to flag */ { struct pipe_surface *s = dst->zsbuf; struct svga_texture *t = svga_texture(s->texture); svga_set_texture_rendered_to(t, s->u.tex.first_layer, s->u.tex.level); } } else { svga->curr.depthscale = 0.0f; } svga->dirty |= SVGA_NEW_FRAME_BUFFER; }