static void st_destroy_context_priv( struct st_context *st ) { uint shader, i; st_destroy_atoms( st ); st_destroy_draw( st ); st_destroy_generate_mipmap(st); st_destroy_clear(st); st_destroy_bitmap(st); st_destroy_drawpix(st); st_destroy_drawtex(st); for (shader = 0; shader < Elements(st->state.sampler_views); shader++) { for (i = 0; i < Elements(st->state.sampler_views[0]); i++) { pipe_sampler_view_release(st->pipe, &st->state.sampler_views[shader][i]); } } if (st->default_texture) { st->ctx->Driver.DeleteTexture(st->ctx, st->default_texture); st->default_texture = NULL; } u_upload_destroy(st->uploader); if (st->indexbuf_uploader) { u_upload_destroy(st->indexbuf_uploader); } if (st->constbuf_uploader) { u_upload_destroy(st->constbuf_uploader); } free( st ); }
bool vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe) { assert(c); memset(c, 0, sizeof(*c)); c->pipe = pipe; c->upload = u_upload_create(pipe, 128 * 1024, 4, PIPE_BIND_VERTEX_BUFFER); if (!c->upload) return false; if (!init_pipe_state(c)) { u_upload_destroy(c->upload); return false; } if (!init_shaders(c)) { u_upload_destroy(c->upload); cleanup_pipe_state(c); return false; } if (!init_buffers(c)) { u_upload_destroy(c->upload); cleanup_shaders(c); cleanup_pipe_state(c); return false; } return true; }
static void svga_destroy( struct pipe_context *pipe ) { struct svga_context *svga = svga_context( pipe ); unsigned shader; svga_cleanup_framebuffer( svga ); svga_cleanup_tss_binding( svga ); svga_hwtnl_destroy( svga->hwtnl ); svga_cleanup_vertex_state(svga); svga->swc->destroy(svga->swc); svga_destroy_swtnl( svga ); u_upload_destroy( svga->upload_vb ); u_upload_destroy( svga->upload_ib ); util_bitmask_destroy( svga->vs_bm ); util_bitmask_destroy( svga->fs_bm ); for(shader = 0; shader < PIPE_SHADER_TYPES; ++shader) pipe_resource_reference( &svga->curr.cb[shader], NULL ); FREE( svga ); }
void r600_common_context_cleanup(struct r600_common_context *rctx) { if (rctx->query_result_shader) rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader); if (rctx->gfx.cs) rctx->ws->cs_destroy(rctx->gfx.cs); if (rctx->dma.cs) rctx->ws->cs_destroy(rctx->dma.cs); if (rctx->ctx) rctx->ws->ctx_destroy(rctx->ctx); if (rctx->b.stream_uploader) u_upload_destroy(rctx->b.stream_uploader); if (rctx->b.const_uploader) u_upload_destroy(rctx->b.const_uploader); slab_destroy_child(&rctx->pool_transfers); slab_destroy_child(&rctx->pool_transfers_unsync); if (rctx->allocator_zeroed_memory) { u_suballocator_destroy(rctx->allocator_zeroed_memory); } rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL); rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL); r600_resource_reference(&rctx->eop_bug_scratch, NULL); }
static void vc4_context_destroy(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); if (vc4->blitter) util_blitter_destroy(vc4->blitter); if (vc4->primconvert) util_primconvert_destroy(vc4->primconvert); if (vc4->uploader) u_upload_destroy(vc4->uploader); util_slab_destroy(&vc4->transfer_pool); pipe_surface_reference(&vc4->framebuffer.cbufs[0], NULL); pipe_surface_reference(&vc4->framebuffer.zsbuf, NULL); pipe_surface_reference(&vc4->color_write, NULL); pipe_surface_reference(&vc4->color_read, NULL); vc4_program_fini(pctx); ralloc_free(vc4); }
void u_vbuf_destroy(struct u_vbuf *mgr) { struct pipe_screen *screen = mgr->pipe->screen; unsigned i; unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_MAX_INPUTS); mgr->pipe->set_index_buffer(mgr->pipe, NULL); pipe_resource_reference(&mgr->index_buffer.buffer, NULL); mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL); for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL); translate_cache_destroy(mgr->translate_cache); u_upload_destroy(mgr->uploader); cso_cache_delete(mgr->cso_cache); FREE(mgr); }
static void r600_destroy_context(struct pipe_context *context) { struct r600_context *rctx = (struct r600_context *)context; si_release_all_descriptors(rctx); pipe_resource_reference(&rctx->null_const_buf.buffer, NULL); r600_resource_reference(&rctx->border_color_table, NULL); if (rctx->dummy_pixel_shader) { rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader); } for (int i = 0; i < 8; i++) { rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_depth_stencil[i]); rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_depth[i]); rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_stencil[i]); } rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush_inplace); rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve); rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress); util_unreference_framebuffer_state(&rctx->framebuffer); util_blitter_destroy(rctx->blitter); if (rctx->uploader) { u_upload_destroy(rctx->uploader); } util_slab_destroy(&rctx->pool_transfers); r600_common_context_cleanup(&rctx->b); FREE(rctx); }
static void nvc0_destroy(struct pipe_context *pipe) { struct nvc0_context *nvc0 = nvc0_context(pipe); if (nvc0->screen->cur_ctx == nvc0) { nvc0->screen->cur_ctx = NULL; nvc0->screen->save_state = nvc0->state; nvc0->screen->save_state.tfb = NULL; } if (nvc0->base.pipe.stream_uploader) u_upload_destroy(nvc0->base.pipe.stream_uploader); /* Unset bufctx, we don't want to revalidate any resources after the flush. * Other contexts will always set their bufctx again on action calls. */ nouveau_pushbuf_bufctx(nvc0->base.pushbuf, NULL); nouveau_pushbuf_kick(nvc0->base.pushbuf, nvc0->base.pushbuf->channel); nvc0_context_unreference_resources(nvc0); nvc0_blitctx_destroy(nvc0); nouveau_context_destroy(&nvc0->base); }
void r600_common_context_cleanup(struct r600_common_context *rctx) { unsigned i,j; /* Release DCC stats. */ for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) { assert(!rctx->dcc_stats[i].query_active); for (j = 0; j < ARRAY_SIZE(rctx->dcc_stats[i].ps_stats); j++) if (rctx->dcc_stats[i].ps_stats[j]) rctx->b.destroy_query(&rctx->b, rctx->dcc_stats[i].ps_stats[j]); r600_texture_reference(&rctx->dcc_stats[i].tex, NULL); } if (rctx->gfx.cs) rctx->ws->cs_destroy(rctx->gfx.cs); if (rctx->dma.cs) rctx->ws->cs_destroy(rctx->dma.cs); if (rctx->ctx) rctx->ws->ctx_destroy(rctx->ctx); if (rctx->uploader) { u_upload_destroy(rctx->uploader); } util_slab_destroy(&rctx->pool_transfers); if (rctx->allocator_zeroed_memory) { u_suballocator_destroy(rctx->allocator_zeroed_memory); } rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL); }
static void ilo_context_destroy(struct pipe_context *pipe) { struct ilo_context *ilo = ilo_context(pipe); ilo_cleanup_states(ilo); if (ilo->last_cp_bo) intel_bo_unreference(ilo->last_cp_bo); if (ilo->uploader) u_upload_destroy(ilo->uploader); if (ilo->blitter) ilo_blitter_destroy(ilo->blitter); if (ilo->hw3d) ilo_3d_destroy(ilo->hw3d); if (ilo->shader_cache) ilo_shader_cache_destroy(ilo->shader_cache); if (ilo->cp) ilo_cp_destroy(ilo->cp); util_slab_destroy(&ilo->transfer_mempool); FREE(ilo); }
static void r300_destroy_context(struct pipe_context* context) { struct r300_context* r300 = r300_context(context); if (r300->cs && r300->hyperz_enabled) { r300->rws->cs_request_feature(r300->cs, RADEON_FID_R300_HYPERZ_ACCESS, FALSE); } if (r300->cs && r300->cmask_access) { r300->rws->cs_request_feature(r300->cs, RADEON_FID_R300_CMASK_ACCESS, FALSE); } if (r300->blitter) util_blitter_destroy(r300->blitter); if (r300->draw) draw_destroy(r300->draw); if (r300->uploader) u_upload_destroy(r300->uploader); /* XXX: This function assumes r300->query_list was initialized */ r300_release_referenced_objects(r300); if (r300->cs) r300->rws->cs_destroy(r300->cs); if (r300->ctx) r300->rws->ctx_destroy(r300->ctx); rc_destroy_regalloc_state(&r300->fs_regalloc_state); /* XXX: No way to tell if this was initialized or not? */ util_slab_destroy(&r300->pool_transfers); /* Free the structs allocated in r300_setup_atoms() */ if (r300->aa_state.state) { FREE(r300->aa_state.state); FREE(r300->blend_color_state.state); FREE(r300->clip_state.state); FREE(r300->fb_state.state); FREE(r300->gpu_flush.state); FREE(r300->hyperz_state.state); FREE(r300->invariant_state.state); FREE(r300->rs_block_state.state); FREE(r300->sample_mask.state); FREE(r300->scissor_state.state); FREE(r300->textures_state.state); FREE(r300->vap_invariant_state.state); FREE(r300->viewport_state.state); FREE(r300->ztop_state.state); FREE(r300->fs_constants.state); FREE(r300->vs_constants.state); if (!r300->screen->caps.has_tcl) { FREE(r300->vertex_stream_state.state); } } FREE(r300); }
void vl_compositor_cleanup(struct vl_compositor *c) { assert(c); u_upload_destroy(c->upload); cleanup_buffers(c); cleanup_shaders(c); cleanup_pipe_state(c); }
static void tegra_destroy(struct pipe_context *pcontext) { struct tegra_context *context = to_tegra_context(pcontext); if (context->base.stream_uploader) u_upload_destroy(context->base.stream_uploader); context->gpu->destroy(context->gpu); free(context); }
static void swr_destroy(struct pipe_context *pipe) { struct swr_context *ctx = swr_context(pipe); struct swr_screen *screen = swr_screen(pipe->screen); if (ctx->blitter) util_blitter_destroy(ctx->blitter); for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { if (ctx->framebuffer.cbufs[i]) { struct swr_resource *res = swr_resource(ctx->framebuffer.cbufs[i]->texture); /* NULL curr_pipe, so we don't have a reference to a deleted pipe */ res->curr_pipe = NULL; pipe_surface_reference(&ctx->framebuffer.cbufs[i], NULL); } } if (ctx->framebuffer.zsbuf) { struct swr_resource *res = swr_resource(ctx->framebuffer.zsbuf->texture); /* NULL curr_pipe, so we don't have a reference to a deleted pipe */ res->curr_pipe = NULL; pipe_surface_reference(&ctx->framebuffer.zsbuf, NULL); } for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) { pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_FRAGMENT][i], NULL); } for (unsigned i = 0; i < ARRAY_SIZE(ctx->sampler_views[0]); i++) { pipe_sampler_view_reference(&ctx->sampler_views[PIPE_SHADER_VERTEX][i], NULL); } if (ctx->pipe.stream_uploader) u_upload_destroy(ctx->pipe.stream_uploader); /* Idle core after destroying buffer resources, but before deleting * context. Destroying resources has potentially called StoreTiles.*/ ctx->api.pfnSwrWaitForIdle(ctx->swrContext); if (ctx->swrContext) ctx->api.pfnSwrDestroyContext(ctx->swrContext); delete ctx->blendJIT; swr_destroy_scratch_buffers(ctx); /* Only update screen->pipe if current context is being destroyed */ assert(screen); if (screen->pipe == pipe) screen->pipe = NULL; AlignedFree(ctx); }
void fd_context_destroy(struct pipe_context *pctx) { struct fd_context *ctx = fd_context(pctx); unsigned i; DBG(""); fd_fence_ref(pctx->screen, &ctx->last_fence, NULL); if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue)) util_queue_destroy(&ctx->flush_queue); util_copy_framebuffer_state(&ctx->framebuffer, NULL); fd_batch_reference(&ctx->batch, NULL); /* unref current batch */ fd_bc_invalidate_context(ctx); fd_prog_fini(pctx); if (ctx->blitter) util_blitter_destroy(ctx->blitter); if (pctx->stream_uploader) u_upload_destroy(pctx->stream_uploader); if (ctx->clear_rs_state) pctx->delete_rasterizer_state(pctx, ctx->clear_rs_state); if (ctx->primconvert) util_primconvert_destroy(ctx->primconvert); slab_destroy_child(&ctx->transfer_pool); for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe); i++) { struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i]; if (!pipe->bo) break; fd_bo_del(pipe->bo); } fd_device_del(ctx->dev); fd_pipe_del(ctx->pipe); if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) { printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n", (uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem, (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw, (uint32_t)ctx->stats.batch_restore); } }
/** * Destroy a context, freeing any associated memory. */ static void iris_destroy_context(struct pipe_context *ctx) { struct iris_context *ice = (struct iris_context *)ctx; if (ctx->stream_uploader) u_upload_destroy(ctx->stream_uploader); ice->vtbl.destroy_state(ice); iris_destroy_program_cache(ice); iris_destroy_border_color_pool(ice); u_upload_destroy(ice->state.surface_uploader); u_upload_destroy(ice->state.dynamic_uploader); u_upload_destroy(ice->query_buffer_uploader); slab_destroy_child(&ice->transfer_pool); iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]); iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]); iris_destroy_binder(&ice->state.binder); ralloc_free(ice); }
static void fd5_context_destroy(struct pipe_context *pctx) { struct fd5_context *fd5_ctx = fd5_context(fd_context(pctx)); fd_bo_del(fd5_ctx->vs_pvt_mem); fd_bo_del(fd5_ctx->fs_pvt_mem); fd_bo_del(fd5_ctx->vsc_size_mem); fd_bo_del(fd5_ctx->blit_mem); fd_context_cleanup_common_vbos(&fd5_ctx->base); u_upload_destroy(fd5_ctx->border_color_uploader); fd_context_destroy(pctx); }
void u_vbuf_destroy(struct u_vbuf *mgrb) { struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb; unsigned i; for (i = 0; i < mgr->b.nr_vertex_buffers; i++) { pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL); } for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) { pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL); } translate_cache_destroy(mgr->translate_cache); u_upload_destroy(mgr->b.uploader); FREE(mgr); }
void u_vbuf_destroy(struct u_vbuf *mgr) { unsigned i; mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL); for (i = 0; i < mgr->nr_vertex_buffers; i++) { pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL); } for (i = 0; i < mgr->nr_real_vertex_buffers; i++) { pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL); } translate_cache_destroy(mgr->translate_cache); u_upload_destroy(mgr->uploader); cso_cache_delete(mgr->cso_cache); FREE(mgr); }
void r600_common_context_cleanup(struct r600_common_context *rctx) { if (rctx->rings.gfx.cs) rctx->ws->cs_destroy(rctx->rings.gfx.cs); if (rctx->rings.dma.cs) rctx->ws->cs_destroy(rctx->rings.dma.cs); if (rctx->ctx) rctx->ws->ctx_destroy(rctx->ctx); if (rctx->uploader) { u_upload_destroy(rctx->uploader); } util_slab_destroy(&rctx->pool_transfers); if (rctx->allocator_so_filled_size) { u_suballocator_destroy(rctx->allocator_so_filled_size); } }
static void fd4_context_destroy(struct pipe_context *pctx) { struct fd4_context *fd4_ctx = fd4_context(fd_context(pctx)); fd_bo_del(fd4_ctx->vs_pvt_mem); fd_bo_del(fd4_ctx->fs_pvt_mem); fd_bo_del(fd4_ctx->vsc_size_mem); pctx->delete_vertex_elements_state(pctx, fd4_ctx->solid_vbuf_state.vtx); pctx->delete_vertex_elements_state(pctx, fd4_ctx->blit_vbuf_state.vtx); pipe_resource_reference(&fd4_ctx->solid_vbuf, NULL); pipe_resource_reference(&fd4_ctx->blit_texcoord_vbuf, NULL); u_upload_destroy(fd4_ctx->border_color_uploader); fd_context_destroy(pctx); }
void r600_common_context_cleanup(struct r600_common_context *rctx) { if (rctx->gfx.cs) rctx->ws->cs_destroy(rctx->gfx.cs); if (rctx->dma.cs) rctx->ws->cs_destroy(rctx->dma.cs); if (rctx->ctx) rctx->ws->ctx_destroy(rctx->ctx); if (rctx->uploader) { u_upload_destroy(rctx->uploader); } util_slab_destroy(&rctx->pool_transfers); if (rctx->allocator_so_filled_size) { u_suballocator_destroy(rctx->allocator_so_filled_size); } rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL); }
static void nv50_destroy(struct pipe_context *pipe) { struct nv50_context *nv50 = nv50_context(pipe); if (nv50->screen->cur_ctx == nv50) { nv50->screen->cur_ctx = NULL; /* Save off the state in case another context gets created */ nv50->screen->save_state = nv50->state; } if (nv50->base.pipe.stream_uploader) u_upload_destroy(nv50->base.pipe.stream_uploader); nouveau_pushbuf_bufctx(nv50->base.pushbuf, NULL); nouveau_pushbuf_kick(nv50->base.pushbuf, nv50->base.pushbuf->channel); nv50_context_unreference_resources(nv50); FREE(nv50->blit); nouveau_context_destroy(&nv50->base); }
static void ilo_context_destroy(struct pipe_context *pipe) { struct ilo_context *ilo = ilo_context(pipe); ilo_state_vector_cleanup(&ilo->state_vector); if (ilo->uploader) u_upload_destroy(ilo->uploader); if (ilo->blitter) ilo_blitter_destroy(ilo->blitter); if (ilo->render) ilo_render_destroy(ilo->render); if (ilo->shader_cache) ilo_shader_cache_destroy(ilo->shader_cache); if (ilo->cp) ilo_cp_destroy(ilo->cp); slab_destroy(&ilo->transfer_mempool); FREE(ilo); }
struct pipe_context *svga_context_create( struct pipe_screen *screen, void *priv ) { struct svga_screen *svgascreen = svga_screen(screen); struct svga_context *svga = NULL; enum pipe_error ret; svga = CALLOC_STRUCT(svga_context); if (svga == NULL) goto no_svga; svga->pipe.screen = screen; svga->pipe.priv = priv; svga->pipe.destroy = svga_destroy; svga->pipe.clear = svga_clear; svga->swc = svgascreen->sws->context_create(svgascreen->sws); if(!svga->swc) goto no_swc; svga_init_resource_functions(svga); svga_init_blend_functions(svga); svga_init_blit_functions(svga); svga_init_depth_stencil_functions(svga); svga_init_draw_functions(svga); svga_init_flush_functions(svga); svga_init_misc_functions(svga); svga_init_rasterizer_functions(svga); svga_init_sampler_functions(svga); svga_init_fs_functions(svga); svga_init_vs_functions(svga); svga_init_vertex_functions(svga); svga_init_constbuffer_functions(svga); svga_init_query_functions(svga); svga_init_surface_functions(svga); /* debug */ svga->debug.no_swtnl = debug_get_option_no_swtnl(); svga->debug.force_swtnl = debug_get_option_force_swtnl(); svga->debug.use_min_mipmap = debug_get_option_use_min_mipmap(); svga->debug.disable_shader = debug_get_option_disable_shader(); svga->debug.no_line_width = debug_get_option_no_line_width(); svga->debug.force_hw_line_stipple = debug_get_option_force_hw_line_stipple(); svga->fs_bm = util_bitmask_create(); if (svga->fs_bm == NULL) goto no_fs_bm; svga->vs_bm = util_bitmask_create(); if (svga->vs_bm == NULL) goto no_vs_bm; svga->upload_ib = u_upload_create( &svga->pipe, 32 * 1024, 16, PIPE_BIND_INDEX_BUFFER ); if (svga->upload_ib == NULL) goto no_upload_ib; svga->upload_vb = u_upload_create( &svga->pipe, 128 * 1024, 16, PIPE_BIND_VERTEX_BUFFER ); if (svga->upload_vb == NULL) goto no_upload_vb; svga->hwtnl = svga_hwtnl_create( svga, svga->upload_ib, svga->swc ); if (svga->hwtnl == NULL) goto no_hwtnl; if (!svga_init_swtnl(svga)) goto no_swtnl; ret = svga_emit_initial_state( svga ); if (ret != PIPE_OK) goto no_state; /* Avoid shortcircuiting state with initial value of zero. */ memset(&svga->state.hw_clear, 0xcd, sizeof(svga->state.hw_clear)); memset(&svga->state.hw_clear.framebuffer, 0x0, sizeof(svga->state.hw_clear.framebuffer)); memset(&svga->state.hw_draw, 0xcd, sizeof(svga->state.hw_draw)); memset(&svga->state.hw_draw.views, 0x0, sizeof(svga->state.hw_draw.views)); svga->state.hw_draw.num_views = 0; svga->dirty = ~0; LIST_INITHEAD(&svga->dirty_buffers); return &svga->pipe; no_state: svga_destroy_swtnl(svga); no_swtnl: svga_hwtnl_destroy( svga->hwtnl ); no_hwtnl: u_upload_destroy( svga->upload_vb ); no_upload_vb: u_upload_destroy( svga->upload_ib ); no_upload_ib: util_bitmask_destroy( svga->vs_bm ); no_vs_bm: util_bitmask_destroy( svga->fs_bm ); no_fs_bm: svga->swc->destroy(svga->swc); no_swc: FREE(svga); no_svga: return NULL; }
struct pipe_context * nv50_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags) { struct nv50_screen *screen = nv50_screen(pscreen); struct nv50_context *nv50; struct pipe_context *pipe; int ret; uint32_t flags; nv50 = CALLOC_STRUCT(nv50_context); if (!nv50) return NULL; pipe = &nv50->base.pipe; if (!nv50_blitctx_create(nv50)) goto out_err; nv50->base.pushbuf = screen->base.pushbuf; nv50->base.client = screen->base.client; ret = nouveau_bufctx_new(screen->base.client, 2, &nv50->bufctx); if (!ret) ret = nouveau_bufctx_new(screen->base.client, NV50_BIND_3D_COUNT, &nv50->bufctx_3d); if (!ret) ret = nouveau_bufctx_new(screen->base.client, NV50_BIND_CP_COUNT, &nv50->bufctx_cp); if (ret) goto out_err; nv50->base.screen = &screen->base; nv50->base.copy_data = nv50_m2mf_copy_linear; nv50->base.push_data = nv50_sifc_linear_u8; nv50->base.push_cb = nv50_cb_push; nv50->screen = screen; pipe->screen = pscreen; pipe->priv = priv; pipe->stream_uploader = u_upload_create_default(pipe); if (!pipe->stream_uploader) goto out_err; pipe->const_uploader = pipe->stream_uploader; pipe->destroy = nv50_destroy; pipe->draw_vbo = nv50_draw_vbo; pipe->clear = nv50_clear; pipe->launch_grid = nv50_launch_grid; pipe->flush = nv50_flush; pipe->texture_barrier = nv50_texture_barrier; pipe->memory_barrier = nv50_memory_barrier; pipe->get_sample_position = nv50_context_get_sample_position; pipe->emit_string_marker = nv50_emit_string_marker; if (!screen->cur_ctx) { /* Restore the last context's state here, normally handled during * context switch */ nv50->state = screen->save_state; screen->cur_ctx = nv50; nouveau_pushbuf_bufctx(screen->base.pushbuf, nv50->bufctx); } nv50->base.pushbuf->kick_notify = nv50_default_kick_notify; nouveau_context_init(&nv50->base); nv50_init_query_functions(nv50); nv50_init_surface_functions(nv50); nv50_init_state_functions(nv50); nv50_init_resource_functions(pipe); nv50->base.invalidate_resource_storage = nv50_invalidate_resource_storage; if (screen->base.device->chipset < 0x84 || debug_get_bool_option("NOUVEAU_PMPEG", false)) { /* PMPEG */ nouveau_context_init_vdec(&nv50->base); } else if (screen->base.device->chipset < 0x98 || screen->base.device->chipset == 0xa0) { /* VP2 */ pipe->create_video_codec = nv84_create_decoder; pipe->create_video_buffer = nv84_video_buffer_create; } else { /* VP3/4 */ pipe->create_video_codec = nv98_create_decoder; pipe->create_video_buffer = nv98_video_buffer_create; } flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD; BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->code); BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->uniforms); BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->txc); BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->stack_bo); if (screen->compute) { BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->code); BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->txc); BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->stack_bo); } flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR; BCTX_REFN_bo(nv50->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo); BCTX_REFN_bo(nv50->bufctx, FENCE, flags, screen->fence.bo); if (screen->compute) BCTX_REFN_bo(nv50->bufctx_cp, CP_SCREEN, flags, screen->fence.bo); nv50->base.scratch.bo_size = 2 << 20; util_dynarray_init(&nv50->global_residents, NULL); return pipe; out_err: if (pipe->stream_uploader) u_upload_destroy(pipe->stream_uploader); if (nv50->bufctx_3d) nouveau_bufctx_del(&nv50->bufctx_3d); if (nv50->bufctx_cp) nouveau_bufctx_del(&nv50->bufctx_cp); if (nv50->bufctx) nouveau_bufctx_del(&nv50->bufctx); FREE(nv50->blit); FREE(nv50); return NULL; }
/* * pipe_context */ static void si_destroy_context(struct pipe_context *context) { struct si_context *sctx = (struct si_context *)context; int i; util_queue_finish(&sctx->screen->shader_compiler_queue); util_queue_finish(&sctx->screen->shader_compiler_queue_low_priority); /* Unreference the framebuffer normally to disable related logic * properly. */ struct pipe_framebuffer_state fb = {}; if (context->set_framebuffer_state) context->set_framebuffer_state(context, &fb); si_release_all_descriptors(sctx); pipe_resource_reference(&sctx->esgs_ring, NULL); pipe_resource_reference(&sctx->gsvs_ring, NULL); pipe_resource_reference(&sctx->tess_rings, NULL); pipe_resource_reference(&sctx->null_const_buf.buffer, NULL); pipe_resource_reference(&sctx->sample_pos_buffer, NULL); si_resource_reference(&sctx->border_color_buffer, NULL); free(sctx->border_color_table); si_resource_reference(&sctx->scratch_buffer, NULL); si_resource_reference(&sctx->compute_scratch_buffer, NULL); si_resource_reference(&sctx->wait_mem_scratch, NULL); si_pm4_free_state(sctx, sctx->init_config, ~0); if (sctx->init_config_gs_rings) si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0); for (i = 0; i < ARRAY_SIZE(sctx->vgt_shader_config); i++) si_pm4_delete_state(sctx, vgt_shader_config, sctx->vgt_shader_config[i]); if (sctx->fixed_func_tcs_shader.cso) sctx->b.delete_tcs_state(&sctx->b, sctx->fixed_func_tcs_shader.cso); if (sctx->custom_dsa_flush) sctx->b.delete_depth_stencil_alpha_state(&sctx->b, sctx->custom_dsa_flush); if (sctx->custom_blend_resolve) sctx->b.delete_blend_state(&sctx->b, sctx->custom_blend_resolve); if (sctx->custom_blend_fmask_decompress) sctx->b.delete_blend_state(&sctx->b, sctx->custom_blend_fmask_decompress); if (sctx->custom_blend_eliminate_fastclear) sctx->b.delete_blend_state(&sctx->b, sctx->custom_blend_eliminate_fastclear); if (sctx->custom_blend_dcc_decompress) sctx->b.delete_blend_state(&sctx->b, sctx->custom_blend_dcc_decompress); if (sctx->vs_blit_pos) sctx->b.delete_vs_state(&sctx->b, sctx->vs_blit_pos); if (sctx->vs_blit_pos_layered) sctx->b.delete_vs_state(&sctx->b, sctx->vs_blit_pos_layered); if (sctx->vs_blit_color) sctx->b.delete_vs_state(&sctx->b, sctx->vs_blit_color); if (sctx->vs_blit_color_layered) sctx->b.delete_vs_state(&sctx->b, sctx->vs_blit_color_layered); if (sctx->vs_blit_texcoord) sctx->b.delete_vs_state(&sctx->b, sctx->vs_blit_texcoord); if (sctx->cs_clear_buffer) sctx->b.delete_compute_state(&sctx->b, sctx->cs_clear_buffer); if (sctx->cs_copy_buffer) sctx->b.delete_compute_state(&sctx->b, sctx->cs_copy_buffer); if (sctx->cs_copy_image) sctx->b.delete_compute_state(&sctx->b, sctx->cs_copy_image); if (sctx->cs_copy_image_1d_array) sctx->b.delete_compute_state(&sctx->b, sctx->cs_copy_image_1d_array); if (sctx->cs_clear_render_target) sctx->b.delete_compute_state(&sctx->b, sctx->cs_clear_render_target); if (sctx->cs_clear_render_target_1d_array) sctx->b.delete_compute_state(&sctx->b, sctx->cs_clear_render_target_1d_array); if (sctx->cs_dcc_retile) sctx->b.delete_compute_state(&sctx->b, sctx->cs_dcc_retile); if (sctx->blitter) util_blitter_destroy(sctx->blitter); /* Release DCC stats. */ for (int i = 0; i < ARRAY_SIZE(sctx->dcc_stats); i++) { assert(!sctx->dcc_stats[i].query_active); for (int j = 0; j < ARRAY_SIZE(sctx->dcc_stats[i].ps_stats); j++) if (sctx->dcc_stats[i].ps_stats[j]) sctx->b.destroy_query(&sctx->b, sctx->dcc_stats[i].ps_stats[j]); si_texture_reference(&sctx->dcc_stats[i].tex, NULL); } if (sctx->query_result_shader) sctx->b.delete_compute_state(&sctx->b, sctx->query_result_shader); if (sctx->gfx_cs) sctx->ws->cs_destroy(sctx->gfx_cs); if (sctx->dma_cs) sctx->ws->cs_destroy(sctx->dma_cs); if (sctx->ctx) sctx->ws->ctx_destroy(sctx->ctx); if (sctx->b.stream_uploader) u_upload_destroy(sctx->b.stream_uploader); if (sctx->b.const_uploader) u_upload_destroy(sctx->b.const_uploader); if (sctx->cached_gtt_allocator) u_upload_destroy(sctx->cached_gtt_allocator); slab_destroy_child(&sctx->pool_transfers); slab_destroy_child(&sctx->pool_transfers_unsync); if (sctx->allocator_zeroed_memory) u_suballocator_destroy(sctx->allocator_zeroed_memory); sctx->ws->fence_reference(&sctx->last_gfx_fence, NULL); sctx->ws->fence_reference(&sctx->last_sdma_fence, NULL); si_resource_reference(&sctx->eop_bug_scratch, NULL); si_destroy_compiler(&sctx->compiler); si_saved_cs_reference(&sctx->current_saved_cs, NULL); _mesa_hash_table_destroy(sctx->tex_handles, NULL); _mesa_hash_table_destroy(sctx->img_handles, NULL); util_dynarray_fini(&sctx->resident_tex_handles); util_dynarray_fini(&sctx->resident_img_handles); util_dynarray_fini(&sctx->resident_tex_needs_color_decompress); util_dynarray_fini(&sctx->resident_img_needs_color_decompress); util_dynarray_fini(&sctx->resident_tex_needs_depth_decompress); si_unref_sdma_uploads(sctx); FREE(sctx); }
struct pipe_context * nvc0_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags) { struct nvc0_screen *screen = nvc0_screen(pscreen); struct nvc0_context *nvc0; struct pipe_context *pipe; int ret; uint32_t flags; nvc0 = CALLOC_STRUCT(nvc0_context); if (!nvc0) return NULL; pipe = &nvc0->base.pipe; if (!nvc0_blitctx_create(nvc0)) goto out_err; nvc0->base.pushbuf = screen->base.pushbuf; nvc0->base.client = screen->base.client; ret = nouveau_bufctx_new(screen->base.client, 2, &nvc0->bufctx); if (!ret) ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_3D_COUNT, &nvc0->bufctx_3d); if (!ret) ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_CP_COUNT, &nvc0->bufctx_cp); if (ret) goto out_err; nvc0->screen = screen; nvc0->base.screen = &screen->base; pipe->screen = pscreen; pipe->priv = priv; pipe->stream_uploader = u_upload_create_default(pipe); if (!pipe->stream_uploader) goto out_err; pipe->const_uploader = pipe->stream_uploader; pipe->destroy = nvc0_destroy; pipe->draw_vbo = nvc0_draw_vbo; pipe->clear = nvc0_clear; pipe->launch_grid = (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) ? nve4_launch_grid : nvc0_launch_grid; pipe->flush = nvc0_flush; pipe->texture_barrier = nvc0_texture_barrier; pipe->memory_barrier = nvc0_memory_barrier; pipe->get_sample_position = nvc0_context_get_sample_position; pipe->emit_string_marker = nvc0_emit_string_marker; nouveau_context_init(&nvc0->base); nvc0_init_query_functions(nvc0); nvc0_init_surface_functions(nvc0); nvc0_init_state_functions(nvc0); nvc0_init_transfer_functions(nvc0); nvc0_init_resource_functions(pipe); nvc0->base.invalidate_resource_storage = nvc0_invalidate_resource_storage; pipe->create_video_codec = nvc0_create_decoder; pipe->create_video_buffer = nvc0_video_buffer_create; /* shader builtin library is per-screen, but we need a context for m2mf */ nvc0_program_library_upload(nvc0); nvc0_program_init_tcp_empty(nvc0); if (!nvc0->tcp_empty) goto out_err; /* set the empty tctl prog on next draw in case one is never set */ nvc0->dirty_3d |= NVC0_NEW_3D_TCTLPROG; /* Do not bind the COMPUTE driver constbuf at screen initialization because * CBs are aliased between 3D and COMPUTE, but make sure it will be bound if * a grid is launched later. */ nvc0->dirty_cp |= NVC0_NEW_CP_DRIVERCONST; /* now that there are no more opportunities for errors, set the current * context if there isn't already one. */ if (!screen->cur_ctx) { nvc0->state = screen->save_state; screen->cur_ctx = nvc0; nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx); } screen->base.pushbuf->kick_notify = nvc0_default_kick_notify; /* add permanently resident buffers to bufctxts */ flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD; BCTX_REFN_bo(nvc0->bufctx_3d, 3D_TEXT, flags, screen->text); BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->uniform_bo); BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->txc); if (screen->compute) { BCTX_REFN_bo(nvc0->bufctx_cp, CP_TEXT, flags, screen->text); BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->uniform_bo); BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->txc); } flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR; if (screen->poly_cache) BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->poly_cache); if (screen->compute) BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->tls); flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR; BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo); BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo); if (screen->compute) BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->fence.bo); nvc0->base.scratch.bo_size = 2 << 20; memset(nvc0->tex_handles, ~0, sizeof(nvc0->tex_handles)); util_dynarray_init(&nvc0->global_residents); return pipe; out_err: if (nvc0) { if (pipe->stream_uploader) u_upload_destroy(pipe->stream_uploader); if (nvc0->bufctx_3d) nouveau_bufctx_del(&nvc0->bufctx_3d); if (nvc0->bufctx_cp) nouveau_bufctx_del(&nvc0->bufctx_cp); if (nvc0->bufctx) nouveau_bufctx_del(&nvc0->bufctx); FREE(nvc0->blit); FREE(nvc0); } return NULL; }
struct pipe_context *svga_context_create(struct pipe_screen *screen, void *priv, unsigned flags) { struct svga_screen *svgascreen = svga_screen(screen); struct svga_context *svga = NULL; enum pipe_error ret; svga = CALLOC_STRUCT(svga_context); if (!svga) goto cleanup; LIST_INITHEAD(&svga->dirty_buffers); svga->pipe.screen = screen; svga->pipe.priv = priv; svga->pipe.destroy = svga_destroy; svga->pipe.clear = svga_clear; svga->swc = svgascreen->sws->context_create(svgascreen->sws); if (!svga->swc) goto cleanup; svga_init_resource_functions(svga); svga_init_blend_functions(svga); svga_init_blit_functions(svga); svga_init_depth_stencil_functions(svga); svga_init_draw_functions(svga); svga_init_flush_functions(svga); svga_init_misc_functions(svga); svga_init_rasterizer_functions(svga); svga_init_sampler_functions(svga); svga_init_fs_functions(svga); svga_init_vs_functions(svga); svga_init_gs_functions(svga); svga_init_vertex_functions(svga); svga_init_constbuffer_functions(svga); svga_init_query_functions(svga); svga_init_surface_functions(svga); svga_init_stream_output_functions(svga); /* init misc state */ svga->curr.sample_mask = ~0; /* debug */ svga->debug.no_swtnl = debug_get_option_no_swtnl(); svga->debug.force_swtnl = debug_get_option_force_swtnl(); svga->debug.use_min_mipmap = debug_get_option_use_min_mipmap(); svga->debug.disable_shader = debug_get_option_disable_shader(); svga->debug.no_line_width = debug_get_option_no_line_width(); svga->debug.force_hw_line_stipple = debug_get_option_force_hw_line_stipple(); if (!(svga->blend_object_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->ds_object_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->input_element_object_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->rast_object_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->sampler_object_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->sampler_view_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->shader_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->surface_view_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->stream_output_id_bm = util_bitmask_create())) goto cleanup; if (!(svga->query_id_bm = util_bitmask_create())) goto cleanup; svga->hwtnl = svga_hwtnl_create(svga); if (svga->hwtnl == NULL) goto cleanup; if (!svga_init_swtnl(svga)) goto cleanup; ret = svga_emit_initial_state( svga ); if (ret != PIPE_OK) goto cleanup; svga->const0_upload = u_upload_create(&svga->pipe, CONST0_UPLOAD_DEFAULT_SIZE, CONST0_UPLOAD_ALIGNMENT, PIPE_BIND_CONSTANT_BUFFER); if (!svga->const0_upload) goto cleanup; /* Avoid shortcircuiting state with initial value of zero. */ memset(&svga->state.hw_clear, 0xcd, sizeof(svga->state.hw_clear)); memset(&svga->state.hw_clear.framebuffer, 0x0, sizeof(svga->state.hw_clear.framebuffer)); memset(&svga->state.hw_draw, 0xcd, sizeof(svga->state.hw_draw)); memset(&svga->state.hw_draw.views, 0x0, sizeof(svga->state.hw_draw.views)); memset(&svga->state.hw_draw.num_sampler_views, 0, sizeof(svga->state.hw_draw.num_sampler_views)); svga->state.hw_draw.num_views = 0; /* Initialize the shader pointers */ svga->state.hw_draw.vs = NULL; svga->state.hw_draw.gs = NULL; svga->state.hw_draw.fs = NULL; memset(svga->state.hw_draw.constbuf, 0, sizeof(svga->state.hw_draw.constbuf)); memset(svga->state.hw_draw.default_constbuf_size, 0, sizeof(svga->state.hw_draw.default_constbuf_size)); memset(svga->state.hw_draw.enabled_constbufs, 0, sizeof(svga->state.hw_draw.enabled_constbufs)); /* Create a no-operation blend state which we will bind whenever the * requested blend state is impossible (e.g. due to having an integer * render target attached). * * XXX: We will probably actually need 16 of these, one for each possible * RGBA color mask (4 bits). Then, we would bind the one with a color mask * matching the blend state it is replacing. */ { struct pipe_blend_state noop_tmpl = {0}; unsigned i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) { // Set the color mask to all-ones. Later this may change. noop_tmpl.rt[i].colormask = PIPE_MASK_RGBA; } svga->noop_blend = svga->pipe.create_blend_state(&svga->pipe, &noop_tmpl); } svga->dirty = ~0; return &svga->pipe; cleanup: svga_destroy_swtnl(svga); if (svga->const0_upload) u_upload_destroy(svga->const0_upload); if (svga->hwtnl) svga_hwtnl_destroy(svga->hwtnl); if (svga->swc) svga->swc->destroy(svga->swc); util_bitmask_destroy(svga->blend_object_id_bm); util_bitmask_destroy(svga->ds_object_id_bm); util_bitmask_destroy(svga->input_element_object_id_bm); util_bitmask_destroy(svga->rast_object_id_bm); util_bitmask_destroy(svga->sampler_object_id_bm); util_bitmask_destroy(svga->sampler_view_id_bm); util_bitmask_destroy(svga->shader_id_bm); util_bitmask_destroy(svga->surface_view_id_bm); util_bitmask_destroy(svga->stream_output_id_bm); util_bitmask_destroy(svga->query_id_bm); FREE(svga); return NULL; }
static void svga_destroy( struct pipe_context *pipe ) { struct svga_context *svga = svga_context( pipe ); unsigned shader, i; /* free any alternate rasterizer states used for point sprite */ for (i = 0; i < Elements(svga->rasterizer_no_cull); i++) { if (svga->rasterizer_no_cull[i]) { pipe->delete_rasterizer_state(pipe, svga->rasterizer_no_cull[i]); } } /* free polygon stipple state */ if (svga->polygon_stipple.sampler) { pipe->delete_sampler_state(pipe, svga->polygon_stipple.sampler); } if (svga->polygon_stipple.sampler_view) { pipe->sampler_view_destroy(pipe, &svga->polygon_stipple.sampler_view->base); } pipe_resource_reference(&svga->polygon_stipple.texture, NULL); /* free HW constant buffers */ for (shader = 0; shader < Elements(svga->state.hw_draw.constbuf); shader++) { pipe_resource_reference(&svga->state.hw_draw.constbuf[shader], NULL); } pipe->delete_blend_state(pipe, svga->noop_blend); /* free query gb object */ if (svga->gb_query) { pipe->destroy_query(pipe, NULL); svga->gb_query = NULL; } util_blitter_destroy(svga->blitter); svga_cleanup_framebuffer( svga ); svga_cleanup_tss_binding( svga ); svga_cleanup_vertex_state(svga); svga_destroy_swtnl( svga ); svga_hwtnl_destroy( svga->hwtnl ); svga->swc->destroy(svga->swc); util_bitmask_destroy(svga->blend_object_id_bm); util_bitmask_destroy(svga->ds_object_id_bm); util_bitmask_destroy(svga->input_element_object_id_bm); util_bitmask_destroy(svga->rast_object_id_bm); util_bitmask_destroy(svga->sampler_object_id_bm); util_bitmask_destroy(svga->sampler_view_id_bm); util_bitmask_destroy(svga->shader_id_bm); util_bitmask_destroy(svga->surface_view_id_bm); util_bitmask_destroy(svga->stream_output_id_bm); util_bitmask_destroy(svga->query_id_bm); u_upload_destroy(svga->const0_upload); /* free user's constant buffers */ for (shader = 0; shader < PIPE_SHADER_TYPES; ++shader) { for (i = 0; i < Elements(svga->curr.constbufs[shader]); ++i) { pipe_resource_reference(&svga->curr.constbufs[shader][i].buffer, NULL); } } FREE( svga ); }