static void st_egl_image_target_renderbuffer_storage(struct gl_context *ctx, struct gl_renderbuffer *rb, GLeglImageOES image_handle) { struct st_context *st = st_context(ctx); struct st_renderbuffer *strb = st_renderbuffer(rb); struct pipe_surface *ps; unsigned usage; usage = PIPE_BIND_RENDER_TARGET; ps = st_manager_get_egl_image_surface(st, (void *) image_handle, usage); if (ps) { strb->Base.Width = ps->width; strb->Base.Height = ps->height; strb->Base.Format = st_pipe_format_to_mesa_format(ps->format); strb->Base.DataType = st_format_datatype(ps->format); strb->Base._BaseFormat = st_pipe_format_to_base_format(ps->format); strb->Base.InternalFormat = strb->Base._BaseFormat; pipe_surface_reference(&strb->surface, ps); pipe_resource_reference(&strb->texture, ps->texture); pipe_surface_reference(&ps, NULL); } }
static void nvc0_bind_surfaces_range(struct nvc0_context *nvc0, const unsigned t, unsigned start, unsigned nr, struct pipe_surface **psurfaces) { const unsigned end = start + nr; const unsigned mask = ((1 << nr) - 1) << start; unsigned i; if (psurfaces) { for (i = start; i < end; ++i) { const unsigned p = i - start; if (psurfaces[p]) nvc0->surfaces_valid[t] |= (1 << i); else nvc0->surfaces_valid[t] &= ~(1 << i); pipe_surface_reference(&nvc0->surfaces[t][i], psurfaces[p]); } } else { for (i = start; i < end; ++i) pipe_surface_reference(&nvc0->surfaces[t][i], NULL); nvc0->surfaces_valid[t] &= ~mask; } nvc0->surfaces_dirty[t] |= mask; if (t == 0) nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_SUF); else nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF); }
void st_destroy_context( struct st_context *st ) { struct pipe_context *pipe = st->pipe; struct cso_context *cso = st->cso_context; GLcontext *ctx = st->ctx; GLuint i; /* need to unbind and destroy CSO objects before anything else */ cso_release_all(st->cso_context); st_reference_fragprog(st, &st->fp, NULL); st_reference_vertprog(st, &st->vp, NULL); /* release framebuffer surfaces */ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { pipe_surface_reference(&st->state.framebuffer.cbufs[i], NULL); } pipe_surface_reference(&st->state.framebuffer.zsbuf, NULL); _mesa_delete_program_cache(st->ctx, st->pixel_xfer.cache); _vbo_DestroyContext(st->ctx); _mesa_free_context_data(ctx); st_destroy_context_priv(st); cso_destroy_context(cso); pipe->destroy( pipe ); free(ctx); }
static void ilo_set_shader_resources(struct pipe_context *pipe, unsigned start, unsigned count, struct pipe_surface **surfaces) { struct ilo_state_vector *vec = &ilo_context(pipe)->state_vector; struct ilo_resource_state *dst = &vec->resource; unsigned i; assert(start + count <= Elements(dst->states)); if (surfaces) { for (i = 0; i < count; i++) pipe_surface_reference(&dst->states[start + i], surfaces[i]); } else { for (i = 0; i < count; i++) pipe_surface_reference(&dst->states[start + i], NULL); } if (dst->count <= start + count) { if (surfaces) count += start; else count = start; while (count > 0 && !dst->states[count - 1]) count--; dst->count = count; } vec->dirty |= ILO_DIRTY_RESOURCE; }
void vc4_flush(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); struct pipe_surface *cbuf = vc4->framebuffer.cbufs[0]; struct pipe_surface *zsbuf = vc4->framebuffer.zsbuf; if (!vc4->needs_flush) return; /* The RCL setup would choke if the draw bounds cause no drawing, so * just drop the drawing if that's the case. */ if (vc4->draw_max_x <= vc4->draw_min_x || vc4->draw_max_y <= vc4->draw_min_y) { vc4_job_reset(vc4); return; } /* Increment the semaphore indicating that binning is done and * unblocking the render thread. Note that this doesn't act until the * FLUSH completes. */ cl_ensure_space(&vc4->bcl, 8); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); cl_u8(&bcl, VC4_PACKET_INCREMENT_SEMAPHORE); /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */ cl_u8(&bcl, VC4_PACKET_FLUSH); cl_end(&vc4->bcl, bcl); if (cbuf && (vc4->resolve & PIPE_CLEAR_COLOR0)) { pipe_surface_reference(&vc4->color_write, cbuf); if (!(vc4->cleared & PIPE_CLEAR_COLOR0)) { pipe_surface_reference(&vc4->color_read, cbuf); } else { pipe_surface_reference(&vc4->color_read, NULL); } } else { pipe_surface_reference(&vc4->color_write, NULL); pipe_surface_reference(&vc4->color_read, NULL); } if (vc4->framebuffer.zsbuf && (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) { pipe_surface_reference(&vc4->zs_write, zsbuf); if (!(vc4->cleared & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) { pipe_surface_reference(&vc4->zs_read, zsbuf); } else { pipe_surface_reference(&vc4->zs_read, NULL); } } else { pipe_surface_reference(&vc4->zs_write, NULL); pipe_surface_reference(&vc4->zs_read, NULL); } vc4_job_submit(vc4); }
static void brw_destroy_context( struct pipe_context *pipe ) { struct brw_context *brw = brw_context(pipe); int i; brw_context_flush( brw ); brw_batchbuffer_free( brw->batch ); brw_destroy_state(brw); brw_draw_cleanup( brw ); brw_pipe_blend_cleanup( brw ); brw_pipe_depth_stencil_cleanup( brw ); brw_pipe_framebuffer_cleanup( brw ); brw_pipe_flush_cleanup( brw ); brw_pipe_misc_cleanup( brw ); brw_pipe_query_cleanup( brw ); brw_pipe_rast_cleanup( brw ); brw_pipe_sampler_cleanup( brw ); brw_pipe_shader_cleanup( brw ); brw_pipe_vertex_cleanup( brw ); brw_pipe_clear_cleanup( brw ); brw_hw_cc_cleanup( brw ); FREE(brw->wm.compile_data); for (i = 0; i < brw->curr.fb.nr_cbufs; i++) pipe_surface_reference(&brw->curr.fb.cbufs[i], NULL); brw->curr.fb.nr_cbufs = 0; pipe_surface_reference(&brw->curr.fb.zsbuf, NULL); bo_reference(&brw->curbe.curbe_bo, NULL); bo_reference(&brw->vs.prog_bo, NULL); bo_reference(&brw->vs.state_bo, NULL); bo_reference(&brw->vs.bind_bo, NULL); bo_reference(&brw->gs.prog_bo, NULL); bo_reference(&brw->gs.state_bo, NULL); bo_reference(&brw->clip.prog_bo, NULL); bo_reference(&brw->clip.state_bo, NULL); bo_reference(&brw->clip.vp_bo, NULL); bo_reference(&brw->sf.prog_bo, NULL); bo_reference(&brw->sf.state_bo, NULL); bo_reference(&brw->sf.vp_bo, NULL); for (i = 0; i < Elements(brw->wm.sdc_bo); i++) bo_reference(&brw->wm.sdc_bo[i], NULL); bo_reference(&brw->wm.bind_bo, NULL); for (i = 0; i < Elements(brw->wm.surf_bo); i++) bo_reference(&brw->wm.surf_bo[i], NULL); bo_reference(&brw->wm.sampler_bo, NULL); bo_reference(&brw->wm.prog_bo, NULL); bo_reference(&brw->wm.state_bo, NULL); }
static enum pipe_error emit_framebuffer( struct svga_context *svga, unsigned dirty ) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); const struct pipe_framebuffer_state *curr = &svga->curr.framebuffer; struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer; boolean reemit = svga->rebind.rendertargets; unsigned i; enum pipe_error ret; /* * We need to reemit non-null surface bindings, even when they are not * dirty, to ensure that the resources are paged in. */ for (i = 0; i < svgascreen->max_color_buffers; i++) { if (curr->cbufs[i] != hw->cbufs[i] || (reemit && hw->cbufs[i])) { if (svga->curr.nr_fbs++ > MAX_RT_PER_BATCH) return PIPE_ERROR_OUT_OF_MEMORY; ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i, curr->cbufs[i]); if (ret != PIPE_OK) return ret; pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]); } } if (curr->zsbuf != hw->zsbuf || (reemit && hw->zsbuf)) { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, curr->zsbuf); if (ret != PIPE_OK) return ret; if (curr->zsbuf && curr->zsbuf->format == PIPE_FORMAT_S8_UINT_Z24_UNORM) { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, curr->zsbuf); if (ret != PIPE_OK) return ret; } else { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL); if (ret != PIPE_OK) return ret; } pipe_surface_reference(&hw->zsbuf, curr->zsbuf); } svga->rebind.rendertargets = FALSE; return PIPE_OK; }
/** * XXX this might get moved someday * Set the framebuffer surface info: color buffers, zbuffer, stencil buffer. * Here, we flush the old surfaces and update the tile cache to point to the new * surfaces. */ void softpipe_set_framebuffer_state(struct pipe_context *pipe, const struct pipe_framebuffer_state *fb) { struct softpipe_context *sp = softpipe_context(pipe); uint i; draw_flush(sp->draw); for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { struct pipe_surface *cb = i < fb->nr_cbufs ? fb->cbufs[i] : NULL; /* check if changing cbuf */ if (sp->framebuffer.cbufs[i] != cb) { /* flush old */ sp_flush_tile_cache(sp->cbuf_cache[i]); /* assign new */ pipe_surface_reference(&sp->framebuffer.cbufs[i], cb); /* update cache */ sp_tile_cache_set_surface(sp->cbuf_cache[i], cb); } } sp->framebuffer.nr_cbufs = fb->nr_cbufs; /* zbuf changing? */ if (sp->framebuffer.zsbuf != fb->zsbuf) { /* flush old */ sp_flush_tile_cache(sp->zsbuf_cache); /* assign new */ pipe_surface_reference(&sp->framebuffer.zsbuf, fb->zsbuf); /* update cache */ sp_tile_cache_set_surface(sp->zsbuf_cache, fb->zsbuf); /* Tell draw module how deep the Z/depth buffer is * * If no depth buffer is bound, send the utility function the * format for no bound depth (PIPE_FORMAT_NONE). */ draw_set_zs_format(sp->draw, (sp->framebuffer.zsbuf) ? sp->framebuffer.zsbuf->format : PIPE_FORMAT_NONE); } sp->framebuffer.width = fb->width; sp->framebuffer.height = fb->height; sp->framebuffer.samples = fb->samples; sp->framebuffer.layers = fb->layers; sp->dirty |= SP_NEW_FRAMEBUFFER; }
static void swr_destroy(struct pipe_context *pipe) { struct swr_context *ctx = swr_context(pipe); struct swr_screen *screen = swr_screen(pipe->screen); if (ctx->blitter) util_blitter_destroy(ctx->blitter); for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { if (ctx->framebuffer.cbufs[i]) { struct swr_resource *res = swr_resource(ctx->framebuffer.cbufs[i]->texture); /* NULL curr_pipe, so we don't have a reference to a deleted pipe */ res->curr_pipe = NULL; pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL); } } if (ctx->framebuffer.zsbuf) { struct swr_resource *res = swr_resource(ctx->framebuffer.zsbuf->texture); /* NULL curr_pipe, so we don't have a reference to a deleted pipe */ res->curr_pipe = NULL; pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL); } for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) { pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL); } for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) { pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_VERTEX][i], NULL); } if (ctx->pipe.stream_uploader) u_upload_destroy(ctx->pipe.stream_uploader); /* Idle core after destroying buffer resources, but before deleting * context. Destroying resources has potentially called StoreTiles.*/ ctx->api.pfnSwrWaitForIdle(ctx->swrContext); if (ctx->swrContext) ctx->api.pfnSwrDestroyContext(ctx->swrContext); delete ctx->blendJIT; swr_destroy_scratch_buffers(ctx); /* Only update screen->pipe if current context is being destroyed */ assert(screen); if (screen->pipe == pipe) screen->pipe = NULL; AlignedFree(ctx); }
static enum pipe_error emit_fb_vgpu9(struct svga_context *svga) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); const struct pipe_framebuffer_state *curr = &svga->curr.framebuffer; struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer; boolean reemit = svga->rebind.flags.rendertargets; unsigned i; enum pipe_error ret; assert(!svga_have_vgpu10(svga)); /* * We need to reemit non-null surface bindings, even when they are not * dirty, to ensure that the resources are paged in. */ for (i = 0; i < svgascreen->max_color_buffers; i++) { if ((curr->cbufs[i] != hw->cbufs[i]) || (reemit && hw->cbufs[i])) { if (svga->curr.nr_fbs++ > MAX_RT_PER_BATCH) return PIPE_ERROR_OUT_OF_MEMORY; ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i, curr->cbufs[i]); if (ret != PIPE_OK) return ret; pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]); } } if ((curr->zsbuf != hw->zsbuf) || (reemit && hw->zsbuf)) { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, curr->zsbuf); if (ret != PIPE_OK) return ret; if (curr->zsbuf && util_format_is_depth_and_stencil(curr->zsbuf->format)) { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, curr->zsbuf); if (ret != PIPE_OK) return ret; } else { ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL); if (ret != PIPE_OK) return ret; } pipe_surface_reference(&hw->zsbuf, curr->zsbuf); } return PIPE_OK; }
void NineSurface9_dtor( struct NineSurface9 *This ) { if (This->transfer) NineSurface9_UnlockRect(This); pipe_surface_reference(&This->surface[0], NULL); pipe_surface_reference(&This->surface[1], NULL); /* Release system memory when we have to manage it (no parent) */ if (!This->base.base.container && This->data) FREE(This->data); NineResource9_dtor(&This->base); }
void svga_cleanup_framebuffer(struct svga_context *svga) { struct pipe_framebuffer_state *curr = &svga->curr.framebuffer; struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer; int i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { pipe_surface_reference(&curr->cbufs[i], NULL); pipe_surface_reference(&hw->cbufs[i], NULL); } pipe_surface_reference(&curr->zsbuf, NULL); pipe_surface_reference(&hw->zsbuf, NULL); }
void ilo_state_vector_cleanup(struct ilo_state_vector *vec) { unsigned i, sh; for (i = 0; i < Elements(vec->vb.states); i++) { if (vec->vb.enabled_mask & (1 << i)) pipe_resource_reference(&vec->vb.states[i].buffer, NULL); } pipe_resource_reference(&vec->ib.buffer, NULL); pipe_resource_reference(&vec->ib.hw_resource, NULL); for (i = 0; i < vec->so.count; i++) pipe_so_target_reference(&vec->so.states[i], NULL); for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { for (i = 0; i < vec->view[sh].count; i++) { struct pipe_sampler_view *view = vec->view[sh].states[i]; pipe_sampler_view_reference(&view, NULL); } for (i = 0; i < Elements(vec->cbuf[sh].cso); i++) { struct ilo_cbuf_cso *cbuf = &vec->cbuf[sh].cso[i]; pipe_resource_reference(&cbuf->resource, NULL); } } for (i = 0; i < vec->resource.count; i++) pipe_surface_reference(&vec->resource.states[i], NULL); for (i = 0; i < vec->fb.state.nr_cbufs; i++) pipe_surface_reference(&vec->fb.state.cbufs[i], NULL); if (vec->fb.state.zsbuf) pipe_surface_reference(&vec->fb.state.zsbuf, NULL); for (i = 0; i < vec->cs_resource.count; i++) pipe_surface_reference(&vec->cs_resource.states[i], NULL); for (i = 0; i < vec->global_binding.count; i++) { struct ilo_global_binding_cso *cso = util_dynarray_element(&vec->global_binding.bindings, struct ilo_global_binding_cso, i); pipe_resource_reference(&cso->resource, NULL); } util_dynarray_fini(&vec->global_binding.bindings); }
void svga_cleanup_framebuffer(struct svga_context *svga) { struct svga_screen *svgascreen = svga_screen(svga->pipe.screen); struct pipe_framebuffer_state *curr = &svga->curr.framebuffer; struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer; unsigned i; for (i = 0; i < svgascreen->max_color_buffers; i++) { pipe_surface_reference(&curr->cbufs[i], NULL); pipe_surface_reference(&hw->cbufs[i], NULL); } pipe_surface_reference(&curr->zsbuf, NULL); pipe_surface_reference(&hw->zsbuf, NULL); }
void util_unreference_framebuffer_state(struct pipe_framebuffer_state *fb) { unsigned i; for (i = 0; i < fb->nr_cbufs; i++) { pipe_surface_reference(&fb->cbufs[i], NULL); } pipe_surface_reference(&fb->zsbuf, NULL); fb->samples = fb->layers = 0; fb->width = fb->height = 0; fb->nr_cbufs = 0; }
int xa_ctx_srf_create(struct xa_context *ctx, struct xa_surface *dst) { struct pipe_screen *screen = ctx->pipe->screen; struct pipe_surface srf_templ; /* * Cache surfaces unless we change render target */ if (ctx->srf) { if (ctx->srf->texture == dst->tex) return XA_ERR_NONE; pipe_surface_reference(&ctx->srf, NULL); } if (!screen->is_format_supported(screen, dst->tex->format, PIPE_TEXTURE_2D, 0, PIPE_BIND_RENDER_TARGET)) return -XA_ERR_INVAL; u_surface_default_template(&srf_templ, dst->tex); ctx->srf = ctx->pipe->create_surface(ctx->pipe, dst->tex, &srf_templ); if (!ctx->srf) return -XA_ERR_NORES; return XA_ERR_NONE; }
/** * Display an attachment to the xlib_drawable of the framebuffer. */ static boolean xmesa_st_framebuffer_display(struct st_framebuffer_iface *stfbi, enum st_attachment_type statt) { struct xmesa_st_framebuffer *xstfb = xmesa_st_framebuffer(stfbi); struct pipe_resource *ptex = xstfb->textures[statt]; struct pipe_surface *psurf; if (!ptex) return TRUE; psurf = xstfb->display_surface; /* (re)allocate the surface for the texture to be displayed */ if (!psurf || psurf->texture != ptex) { pipe_surface_reference(&xstfb->display_surface, NULL); psurf = xstfb->screen->get_tex_surface(xstfb->screen, ptex, 0, 0, 0, PIPE_BIND_DISPLAY_TARGET); if (!psurf) return FALSE; xstfb->display_surface = psurf; } xstfb->screen->flush_frontbuffer(xstfb->screen, psurf, &xstfb->buffer->ws); return TRUE; }
/** * Destroy a VdpOutputSurface. */ VdpStatus vlVdpOutputSurfaceDestroy(VdpOutputSurface surface) { vlVdpOutputSurface *vlsurface; struct pipe_context *pipe; vlsurface = vlGetDataHTAB(surface); if (!vlsurface) return VDP_STATUS_INVALID_HANDLE; pipe = vlsurface->device->context; pipe_mutex_lock(vlsurface->device->mutex); vlVdpResolveDelayedRendering(vlsurface->device, NULL, NULL); pipe_surface_reference(&vlsurface->surface, NULL); pipe_sampler_view_reference(&vlsurface->sampler_view, NULL); pipe->screen->fence_reference(pipe->screen, &vlsurface->fence, NULL); vl_compositor_cleanup_state(&vlsurface->cstate); pipe_mutex_unlock(vlsurface->device->mutex); vlRemoveDataHTAB(surface); FREE(vlsurface); return VDP_STATUS_OK; }
XA_EXPORT void xa_context_destroy(struct xa_context *r) { struct pipe_resource **vsbuf = &r->vs_const_buffer; struct pipe_resource **fsbuf = &r->fs_const_buffer; if (*vsbuf) pipe_resource_reference(vsbuf, NULL); if (*fsbuf) pipe_resource_reference(fsbuf, NULL); if (r->shaders) { xa_shaders_destroy(r->shaders); r->shaders = NULL; } xa_ctx_sampler_views_destroy(r); if (r->srf) pipe_surface_reference(&r->srf, NULL); if (r->cso) { cso_destroy_context(r->cso); r->cso = NULL; } r->pipe->destroy(r->pipe); }
void vg_prepare_blend_surface_from_mask(struct vg_context *ctx) { struct pipe_surface *dest_surface = NULL; struct pipe_context *pipe = ctx->pipe; struct st_framebuffer *stfb = ctx->draw_buffer; struct st_renderbuffer *strb = stfb->strb; vg_validate_state(ctx); /* first finish all pending rendering */ vgFinish(); dest_surface = pipe->screen->get_tex_surface(pipe->screen, stfb->blend_texture_view->texture, 0, 0, 0, PIPE_BIND_RENDER_TARGET); /* flip it, because we want to use it as a sampler */ util_blit_pixels_tex(ctx->blit, stfb->alpha_mask_view, 0, strb->height, strb->width, 0, dest_surface, 0, 0, strb->width, strb->height, 0.0, PIPE_TEX_MIPFILTER_NEAREST); /* make sure it's complete */ vgFinish(); if (dest_surface) pipe_surface_reference(&dest_surface, NULL); }
static void nv50_blitctx_post_blit(struct nv50_context *nv50, struct nv50_blitctx *blit) { int s; pipe_surface_reference(&nv50->framebuffer.cbufs[0], NULL); nv50->framebuffer.width = blit->saved.fb.width; nv50->framebuffer.height = blit->saved.fb.height; nv50->framebuffer.nr_cbufs = blit->saved.fb.nr_cbufs; nv50->framebuffer.cbufs[0] = blit->saved.fb.cbufs[0]; nv50->framebuffer.zsbuf = blit->saved.fb.zsbuf; nv50->vertprog = blit->saved.vp; nv50->gmtyprog = blit->saved.gp; nv50->fragprog = blit->saved.fp; nv50->clip.nr = blit->saved.clip_nr; pipe_sampler_view_reference(&nv50->textures[2][0], NULL); for (s = 0; s < 3; ++s) { nv50->num_textures[s] = blit->saved.num_textures[s]; nv50->num_samplers[s] = blit->saved.num_samplers[s]; } nv50->textures[2][0] = blit->saved.texture; nv50->samplers[2][0] = blit->saved.sampler; nv50->dirty = blit->saved.dirty | (NV50_NEW_FRAMEBUFFER | NV50_NEW_SCISSOR | NV50_NEW_SAMPLE_MASK | NV50_NEW_RASTERIZER | NV50_NEW_ZSA | NV50_NEW_BLEND | NV50_NEW_TEXTURES | NV50_NEW_SAMPLERS | NV50_NEW_VERTPROG | NV50_NEW_GMTYPROG | NV50_NEW_FRAGPROG); }
struct pipe_surface * trace_surf_create(struct trace_resource *tr_tex, struct pipe_surface *surface) { struct trace_surface *tr_surf; if(!surface) goto error; assert(surface->texture == tr_tex->resource); tr_surf = CALLOC_STRUCT(trace_surface); if(!tr_surf) goto error; memcpy(&tr_surf->base, surface, sizeof(struct pipe_surface)); pipe_reference_init(&tr_surf->base.reference, 1); tr_surf->base.texture = NULL; pipe_resource_reference(&tr_surf->base.texture, &tr_tex->base); tr_surf->surface = surface; return &tr_surf->base; error: pipe_surface_reference(&surface, NULL); return NULL; }
struct pipe_surface * galahad_surface_create(struct galahad_context *glhd_context, struct galahad_resource *glhd_resource, struct pipe_surface *surface) { struct galahad_surface *glhd_surface; if(!surface) goto error; assert(surface->texture == glhd_resource->resource); glhd_surface = CALLOC_STRUCT(galahad_surface); if(!glhd_surface) goto error; memcpy(&glhd_surface->base, surface, sizeof(struct pipe_surface)); pipe_reference_init(&glhd_surface->base.reference, 1); glhd_surface->base.texture = NULL; pipe_resource_reference(&glhd_surface->base.texture, &glhd_resource->base); glhd_surface->surface = surface; return &glhd_surface->base; error: pipe_surface_reference(&surface, NULL); return NULL; }
void galahad_surface_destroy(struct galahad_surface *glhd_surface) { pipe_resource_reference(&glhd_surface->base.texture, NULL); pipe_surface_reference(&glhd_surface->surface, NULL); FREE(glhd_surface); }
void trace_surf_destroy(struct trace_surface *tr_surf) { pipe_resource_reference(&tr_surf->base.texture, NULL); pipe_surface_reference(&tr_surf->surface, NULL); FREE(tr_surf); }
struct pipe_surface * rbug_surface_create(struct rbug_context *rb_context, struct rbug_resource *rb_resource, struct pipe_surface *surface) { struct rbug_surface *rb_surface; if(!surface) goto error; assert(surface->texture == rb_resource->resource); rb_surface = CALLOC_STRUCT(rbug_surface); if(!rb_surface) goto error; memcpy(&rb_surface->base, surface, sizeof(struct pipe_surface)); pipe_reference_init(&rb_surface->base.reference, 1); rb_surface->base.texture = NULL; pipe_resource_reference(&rb_surface->base.texture, &rb_resource->base); rb_surface->surface = surface; return &rb_surface->base; error: pipe_surface_reference(&surface, NULL); return NULL; }
struct pipe_surface * trace_surface_create(struct trace_texture *tr_tex, struct pipe_surface *surface) { struct trace_screen *tr_scr = trace_screen(tr_tex->base.screen); struct trace_surface *tr_surf; if(!surface) goto error; assert(surface->texture == tr_tex->texture); tr_surf = CALLOC_STRUCT(trace_surface); if(!tr_surf) goto error; memcpy(&tr_surf->base, surface, sizeof(struct pipe_surface)); pipe_reference_init(&tr_surf->base.reference, 1); tr_surf->base.texture = NULL; pipe_texture_reference(&tr_surf->base.texture, &tr_tex->base); tr_surf->surface = surface; trace_screen_add_to_list(tr_scr, surfaces, tr_surf); return &tr_surf->base; error: pipe_surface_reference(&surface, NULL); return NULL; }
boolean xorg_composite_bind_state(struct exa_context *exa, int op, PicturePtr pSrcPicture, PicturePtr pMaskPicture, PicturePtr pDstPicture, struct exa_pixmap_priv *pSrc, struct exa_pixmap_priv *pMask, struct exa_pixmap_priv *pDst) { struct pipe_surface *dst_surf = xorg_gpu_surface(exa->pipe, pDst); renderer_bind_destination(exa->renderer, dst_surf, pDst->width, pDst->height); bind_blend_state(exa, op, pSrcPicture, pMaskPicture, pDstPicture); bind_shaders(exa, op, pSrcPicture, pMaskPicture, pDstPicture, pSrc, pMask); bind_samplers(exa, op, pSrcPicture, pMaskPicture, pDstPicture, pSrc, pMask, pDst); setup_transforms(exa, pSrcPicture, pMaskPicture); if (exa->num_bound_samplers == 0 ) { /* solid fill */ renderer_begin_solid(exa->renderer); } else { renderer_begin_textures(exa->renderer, exa->num_bound_samplers); } pipe_surface_reference(&dst_surf, NULL); return TRUE; }
static boolean vg_context_update_color_rb(struct vg_context *ctx, struct pipe_resource *pt) { struct st_renderbuffer *strb = ctx->draw_buffer->strb; struct pipe_screen *screen = ctx->pipe->screen; if (strb->texture == pt) { pipe_resource_reference(&pt, NULL); return FALSE; } /* unreference existing ones */ pipe_surface_reference(&strb->surface, NULL); pipe_resource_reference(&strb->texture, NULL); strb->width = strb->height = 0; strb->texture = pt; strb->surface = screen->get_tex_surface(screen, strb->texture, 0, 0, 0, PIPE_BIND_RENDER_TARGET); if (!strb->surface) { pipe_resource_reference(&strb->texture, NULL); return TRUE; } strb->width = pt->width0; strb->height = pt->height0; return TRUE; }
static void destroy_renderbuffer(struct st_renderbuffer *strb) { pipe_surface_reference(&strb->surface, NULL); pipe_resource_reference(&strb->texture, NULL); FREE(strb); }