static void nvc0_context_unreference_resources(struct nvc0_context *nvc0) { unsigned s, i; nouveau_bufctx_del(&nvc0->bufctx_3d); nouveau_bufctx_del(&nvc0->bufctx); nouveau_bufctx_del(&nvc0->bufctx_cp); util_unreference_framebuffer_state(&nvc0->framebuffer); for (i = 0; i < nvc0->num_vtxbufs; ++i) pipe_resource_reference(&nvc0->vtxbuf[i].buffer, NULL); pipe_resource_reference(&nvc0->idxbuf.buffer, NULL); for (s = 0; s < 6; ++s) { for (i = 0; i < nvc0->num_textures[s]; ++i) pipe_sampler_view_reference(&nvc0->textures[s][i], NULL); for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i) if (!nvc0->constbuf[s][i].user) pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL); } for (s = 0; s < 2; ++s) { for (i = 0; i < NVC0_MAX_SURFACE_SLOTS; ++i) pipe_surface_reference(&nvc0->surfaces[s][i], NULL); } for (s = 0; s < 6; ++s) for (i = 0; i < NVC0_MAX_BUFFERS; ++i) pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL); for (s = 0; s < 6; ++s) for (i = 0; i < NVC0_MAX_IMAGES; ++i) pipe_resource_reference(&nvc0->images[s][i].resource, NULL); for (i = 0; i < nvc0->num_tfbbufs; ++i) pipe_so_target_reference(&nvc0->tfbbuf[i], NULL); for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *); ++i) { struct pipe_resource **res = util_dynarray_element( &nvc0->global_residents, struct pipe_resource *, i); pipe_resource_reference(res, NULL); } util_dynarray_fini(&nvc0->global_residents); if (nvc0->tcp_empty) nvc0->base.pipe.delete_tcs_state(&nvc0->base.pipe, nvc0->tcp_empty); }
static void vc5_set_stream_output_targets(struct pipe_context *pctx, unsigned num_targets, struct pipe_stream_output_target **targets, const unsigned *offsets) { struct vc5_context *ctx = vc5_context(pctx); struct vc5_streamout_stateobj *so = &ctx->streamout; unsigned i; assert(num_targets <= ARRAY_SIZE(so->targets)); for (i = 0; i < num_targets; i++) pipe_so_target_reference(&so->targets[i], targets[i]); for (; i < so->num_targets; i++) pipe_so_target_reference(&so->targets[i], NULL); so->num_targets = num_targets; ctx->dirty |= VC5_DIRTY_STREAMOUT; }
static void st_end_transform_feedback(struct gl_context *ctx, struct gl_transform_feedback_object *obj) { struct st_context *st = st_context(ctx); struct st_transform_feedback_object *sobj = st_transform_feedback_object(obj); cso_set_stream_outputs(st->cso_context, 0, NULL, 0); pipe_so_target_reference(&sobj->draw_count, st_transform_feedback_get_draw_target(obj)); }
static void nvc0_set_transform_feedback_targets(struct pipe_context *pipe, unsigned num_targets, struct pipe_stream_output_target **targets, const unsigned *offsets) { struct nvc0_context *nvc0 = nvc0_context(pipe); unsigned i; bool serialize = true; assert(num_targets <= 4); for (i = 0; i < num_targets; ++i) { const bool changed = nvc0->tfbbuf[i] != targets[i]; const bool append = (offsets[i] == ((unsigned)-1)); if (!changed && append) continue; nvc0->tfbbuf_dirty |= 1 << i; if (nvc0->tfbbuf[i] && changed) nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize); if (targets[i] && !append) nvc0_so_target(targets[i])->clean = true; pipe_so_target_reference(&nvc0->tfbbuf[i], targets[i]); } for (; i < nvc0->num_tfbbufs; ++i) { if (nvc0->tfbbuf[i]) { nvc0->tfbbuf_dirty |= 1 << i; nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize); pipe_so_target_reference(&nvc0->tfbbuf[i], NULL); } } nvc0->num_tfbbufs = num_targets; if (nvc0->tfbbuf_dirty) nvc0->dirty |= NVC0_NEW_TFB_TARGETS; }
void ilo_state_vector_cleanup(struct ilo_state_vector *vec) { unsigned i, sh; for (i = 0; i < Elements(vec->vb.states); i++) { if (vec->vb.enabled_mask & (1 << i)) pipe_resource_reference(&vec->vb.states[i].buffer, NULL); } pipe_resource_reference(&vec->ib.buffer, NULL); pipe_resource_reference(&vec->ib.hw_resource, NULL); for (i = 0; i < vec->so.count; i++) pipe_so_target_reference(&vec->so.states[i], NULL); for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { for (i = 0; i < vec->view[sh].count; i++) { struct pipe_sampler_view *view = vec->view[sh].states[i]; pipe_sampler_view_reference(&view, NULL); } for (i = 0; i < Elements(vec->cbuf[sh].cso); i++) { struct ilo_cbuf_cso *cbuf = &vec->cbuf[sh].cso[i]; pipe_resource_reference(&cbuf->resource, NULL); } } for (i = 0; i < vec->resource.count; i++) pipe_surface_reference(&vec->resource.states[i], NULL); for (i = 0; i < vec->fb.state.nr_cbufs; i++) pipe_surface_reference(&vec->fb.state.cbufs[i], NULL); if (vec->fb.state.zsbuf) pipe_surface_reference(&vec->fb.state.zsbuf, NULL); for (i = 0; i < vec->cs_resource.count; i++) pipe_surface_reference(&vec->cs_resource.states[i], NULL); for (i = 0; i < vec->global_binding.count; i++) { struct ilo_global_binding_cso *cso = util_dynarray_element(&vec->global_binding.bindings, struct ilo_global_binding_cso, i); pipe_resource_reference(&cso->resource, NULL); } util_dynarray_fini(&vec->global_binding.bindings); }
void r600_set_streamout_targets(struct pipe_context *ctx, unsigned num_targets, struct pipe_stream_output_target **targets, unsigned append_bitmask) { struct r600_common_context *rctx = (struct r600_common_context *)ctx; unsigned i; /* Stop streamout. */ if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) { r600_emit_streamout_end(rctx); } /* Set the new targets. */ for (i = 0; i < num_targets; i++) { pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]); r600_context_add_resource_size(ctx, targets[i]->buffer); } for (; i < rctx->streamout.num_targets; i++) { pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL); } rctx->streamout.enabled_mask = (num_targets >= 1 && targets[0] ? 1 : 0) | (num_targets >= 2 && targets[1] ? 2 : 0) | (num_targets >= 3 && targets[2] ? 4 : 0) | (num_targets >= 4 && targets[3] ? 8 : 0); rctx->streamout.num_targets = num_targets; rctx->streamout.append_bitmask = append_bitmask; if (num_targets) { r600_streamout_buffers_dirty(rctx); } else { rctx->streamout.begin_atom.dirty = false; } }
static void blitter_restore_vertex_states(struct blitter_context_priv *ctx) { struct pipe_context *pipe = ctx->base.pipe; unsigned i; /* Vertex buffers. */ pipe->set_vertex_buffers(pipe, ctx->base.saved_num_vertex_buffers, ctx->base.saved_vertex_buffers); for (i = 0; i < ctx->base.saved_num_vertex_buffers; i++) { if (ctx->base.saved_vertex_buffers[i].buffer) { pipe_resource_reference(&ctx->base.saved_vertex_buffers[i].buffer, NULL); } } ctx->base.saved_num_vertex_buffers = ~0; /* Vertex elements. */ pipe->bind_vertex_elements_state(pipe, ctx->base.saved_velem_state); ctx->base.saved_velem_state = INVALID_PTR; /* Vertex shader. */ pipe->bind_vs_state(pipe, ctx->base.saved_vs); ctx->base.saved_vs = INVALID_PTR; /* Geometry shader. */ if (ctx->has_geometry_shader) { pipe->bind_gs_state(pipe, ctx->base.saved_gs); ctx->base.saved_gs = INVALID_PTR; } /* Stream outputs. */ if (ctx->has_stream_out) { pipe->set_stream_output_targets(pipe, ctx->base.saved_num_so_targets, ctx->base.saved_so_targets, ~0); for (i = 0; i < ctx->base.saved_num_so_targets; i++) pipe_so_target_reference(&ctx->base.saved_so_targets[i], NULL); ctx->base.saved_num_so_targets = ~0; } /* Rasterizer. */ pipe->bind_rasterizer_state(pipe, ctx->base.saved_rs_state); ctx->base.saved_rs_state = INVALID_PTR; }
void util_blitter_copy_buffer(struct blitter_context *blitter, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, unsigned srcx, unsigned size) { struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter; struct pipe_context *pipe = ctx->base.pipe; struct pipe_vertex_buffer vb; struct pipe_stream_output_target *so_target; /* Drivers not capable of Stream Out should not call this function * in the first place. */ assert(ctx->has_stream_out); /* Some alignment is required. */ if (srcx % 4 != 0 || dstx % 4 != 0 || size % 16 != 0 || !ctx->has_stream_out) { struct pipe_box box; u_box_1d(srcx, size, &box); util_resource_copy_region(pipe, dst, 0, dstx, 0, 0, src, 0, &box); return; } blitter_set_running_flag(ctx); blitter_check_saved_vertex_states(ctx); vb.buffer = src; vb.buffer_offset = srcx; vb.stride = 4; pipe->set_vertex_buffers(pipe, 1, &vb); pipe->bind_vertex_elements_state(pipe, ctx->velem_state_readbuf); pipe->bind_vs_state(pipe, ctx->vs_pos_only); if (ctx->has_geometry_shader) pipe->bind_gs_state(pipe, NULL); pipe->bind_rasterizer_state(pipe, ctx->rs_discard_state); so_target = pipe->create_stream_output_target(pipe, dst, dstx, size); pipe->set_stream_output_targets(pipe, 1, &so_target, 0); util_draw_arrays(pipe, PIPE_PRIM_POINTS, 0, size / 16); blitter_restore_vertex_states(ctx); blitter_unset_running_flag(ctx); pipe_so_target_reference(&so_target, NULL); }
void cso_save_stream_outputs(struct cso_context *ctx) { uint i; if (!ctx->has_streamout) { return; } ctx->nr_so_targets_saved = ctx->nr_so_targets; for (i = 0; i < ctx->nr_so_targets; i++) { assert(!ctx->so_targets_saved[i]); pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]); } }
void ilo_cleanup_states(struct ilo_context *ilo) { unsigned i, sh; for (i = 0; i < Elements(ilo->vb.states); i++) { if (ilo->vb.enabled_mask & (1 << i)) pipe_resource_reference(&ilo->vb.states[i].buffer, NULL); } pipe_resource_reference(&ilo->ib.buffer, NULL); pipe_resource_reference(&ilo->ib.hw_resource, NULL); for (i = 0; i < ilo->so.count; i++) pipe_so_target_reference(&ilo->so.states[i], NULL); for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { for (i = 0; i < ilo->view[sh].count; i++) { struct pipe_sampler_view *view = ilo->view[sh].states[i]; pipe_sampler_view_reference(&view, NULL); } for (i = 0; i < Elements(ilo->cbuf[sh].cso); i++) { struct ilo_cbuf_cso *cbuf = &ilo->cbuf[sh].cso[i]; pipe_resource_reference(&cbuf->resource, NULL); } } for (i = 0; i < ilo->resource.count; i++) pipe_surface_reference(&ilo->resource.states[i], NULL); for (i = 0; i < ilo->fb.state.nr_cbufs; i++) pipe_surface_reference(&ilo->fb.state.cbufs[i], NULL); if (ilo->fb.state.zsbuf) pipe_surface_reference(&ilo->fb.state.zsbuf, NULL); for (i = 0; i < ilo->cs_resource.count; i++) pipe_surface_reference(&ilo->cs_resource.states[i], NULL); for (i = 0; i < ilo->global_binding.count; i++) pipe_resource_reference(&ilo->global_binding.resources[i], NULL); }
static void nvc0_context_unreference_resources(struct nvc0_context *nvc0) { unsigned s, i; for (i = 0; i < NVC0_BUFCTX_COUNT; ++i) nvc0_bufctx_reset(nvc0, i); for (i = 0; i < nvc0->num_vtxbufs; ++i) pipe_resource_reference(&nvc0->vtxbuf[i].buffer, NULL); pipe_resource_reference(&nvc0->idxbuf.buffer, NULL); for (s = 0; s < 5; ++s) { for (i = 0; i < nvc0->num_textures[s]; ++i) pipe_sampler_view_reference(&nvc0->textures[s][i], NULL); for (i = 0; i < 16; ++i) pipe_resource_reference(&nvc0->constbuf[s][i], NULL); } for (i = 0; i < nvc0->num_tfbbufs; ++i) pipe_so_target_reference(&nvc0->tfbbuf[i], NULL); }
/** * Prior to context destruction, this function unbinds all state objects. */ void cso_release_all( struct cso_context *ctx ) { unsigned i, shader; if (ctx->pipe) { ctx->pipe->bind_blend_state( ctx->pipe, NULL ); ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL ); { static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL }; static void *zeros[PIPE_MAX_SAMPLERS] = { NULL }; struct pipe_screen *scr = ctx->pipe->screen; unsigned sh; for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { int maxsam = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS); int maxview = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS); assert(maxsam <= PIPE_MAX_SAMPLERS); assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS); if (maxsam > 0) { ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros); } if (maxview > 0) { ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views); } } } ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL ); ctx->pipe->bind_fs_state( ctx->pipe, NULL ); ctx->pipe->bind_vs_state( ctx->pipe, NULL ); ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL ); if (ctx->has_streamout) ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL); } /* free fragment sampler views */ for (shader = 0; shader < Elements(ctx->samplers); shader++) { struct sampler_info *info = &ctx->samplers[shader]; for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) { pipe_sampler_view_reference(&info->views[i], NULL); pipe_sampler_view_reference(&info->views_saved[i], NULL); } } util_unreference_framebuffer_state(&ctx->fb); util_unreference_framebuffer_state(&ctx->fb_saved); pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL); pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL); for (i = 0; i < PIPE_SHADER_TYPES; i++) { pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL); pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL); } for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { pipe_so_target_reference(&ctx->so_targets[i], NULL); pipe_so_target_reference(&ctx->so_targets_saved[i], NULL); } if (ctx->cache) { cso_cache_delete( ctx->cache ); ctx->cache = NULL; } }
/** * Free the CSO context. */ void cso_destroy_context( struct cso_context *ctx ) { unsigned i; if (ctx->pipe) { ctx->pipe->set_index_buffer(ctx->pipe, NULL); ctx->pipe->bind_blend_state( ctx->pipe, NULL ); ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL ); { static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL }; static void *zeros[PIPE_MAX_SAMPLERS] = { NULL }; struct pipe_screen *scr = ctx->pipe->screen; unsigned sh; for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { int maxsam = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS); int maxview = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS); assert(maxsam <= PIPE_MAX_SAMPLERS); assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS); if (maxsam > 0) { ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros); } if (maxview > 0) { ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views); } } } ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL ); ctx->pipe->bind_fs_state( ctx->pipe, NULL ); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL); ctx->pipe->bind_vs_state( ctx->pipe, NULL ); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL); if (ctx->has_geometry_shader) { ctx->pipe->bind_gs_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL); } if (ctx->has_tessellation) { ctx->pipe->bind_tcs_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL); ctx->pipe->bind_tes_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL); } if (ctx->has_compute_shader) { ctx->pipe->bind_compute_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_COMPUTE, 0, NULL); } ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL ); if (ctx->has_streamout) ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL); } for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) { pipe_sampler_view_reference(&ctx->fragment_views[i], NULL); pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL); } util_unreference_framebuffer_state(&ctx->fb); util_unreference_framebuffer_state(&ctx->fb_saved); pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL); pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL); for (i = 0; i < PIPE_SHADER_TYPES; i++) { pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL); pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL); } for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { pipe_so_target_reference(&ctx->so_targets[i], NULL); pipe_so_target_reference(&ctx->so_targets_saved[i], NULL); } if (ctx->cache) { cso_cache_delete( ctx->cache ); ctx->cache = NULL; } if (ctx->vbuf) u_vbuf_destroy(ctx->vbuf); FREE( ctx ); }