static void llvmpipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, struct pipe_resource *constants) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); unsigned size = constants ? constants->width0 : 0; const void *data = constants ? llvmpipe_resource_data(constants) : NULL; assert(shader < PIPE_SHADER_TYPES); assert(index < PIPE_MAX_CONSTANT_BUFFERS); if(llvmpipe->constants[shader][index] == constants) return; draw_flush(llvmpipe->draw); /* note: reference counting */ pipe_resource_reference(&llvmpipe->constants[shader][index], constants); if(shader == PIPE_SHADER_VERTEX || shader == PIPE_SHADER_GEOMETRY) { draw_set_mapped_constant_buffer(llvmpipe->draw, shader, index, data, size); } llvmpipe->dirty |= LP_NEW_CONSTANTS; }
void llvmpipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, struct pipe_buffer *constants) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); unsigned size = constants ? constants->size : 0; const void *data = constants ? llvmpipe_buffer(constants)->data : NULL; assert(shader < PIPE_SHADER_TYPES); assert(index == 0); if(llvmpipe->constants[shader] == constants) return; draw_flush(llvmpipe->draw); /* note: reference counting */ pipe_buffer_reference(&llvmpipe->constants[shader], constants); if(shader == PIPE_SHADER_VERTEX) { draw_set_mapped_constant_buffer(llvmpipe->draw, PIPE_SHADER_VERTEX, 0, data, size); } llvmpipe->dirty |= LP_NEW_CONSTANTS; }
static void llvmpipe_end_query(struct pipe_context *pipe, struct pipe_query *q) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); struct llvmpipe_query *pq = llvmpipe_query(q); lp_setup_end_query(llvmpipe->setup, pq); if (pq->type == PIPE_QUERY_PRIMITIVES_EMITTED) { pq->num_primitives_written = llvmpipe->so_stats.num_primitives_written; } if (pq->type == PIPE_QUERY_PRIMITIVES_GENERATED) { pq->num_primitives_generated = llvmpipe->num_primitives_generated; } if (pq->type == PIPE_QUERY_SO_STATISTICS) { pq->num_primitives_written = llvmpipe->so_stats.num_primitives_written; pq->num_primitives_generated = llvmpipe->num_primitives_generated; } if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER) { assert(llvmpipe->active_occlusion_query); llvmpipe->active_occlusion_query = FALSE; llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY; } }
/** * \param fence if non-null, returns pointer to a fence which can be waited on */ void llvmpipe_flush( struct pipe_context *pipe, struct pipe_fence_handle **fence, const char *reason) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); draw_flush(llvmpipe->draw); /* ask the setup module to flush */ lp_setup_flush(llvmpipe->setup, fence, reason); /* Enable to dump BMPs of the color/depth buffers each frame */ if (0) { static unsigned frame_no = 1; char filename[256]; unsigned i; for (i = 0; i < llvmpipe->framebuffer.nr_cbufs; i++) { util_snprintf(filename, sizeof(filename), "cbuf%u_%u", i, frame_no); debug_dump_surface_bmp(&llvmpipe->pipe, filename, llvmpipe->framebuffer.cbufs[i]); } if (0) { util_snprintf(filename, sizeof(filename), "zsbuf_%u", frame_no); debug_dump_surface_bmp(&llvmpipe->pipe, filename, llvmpipe->framebuffer.zsbuf); } ++frame_no; } }
static void * llvmpipe_create_gs_state(struct pipe_context *pipe, const struct pipe_shader_state *templ) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct lp_geometry_shader *state; state = CALLOC_STRUCT(lp_geometry_shader); if (!state) goto no_state; /* debug */ if (LP_DEBUG & DEBUG_TGSI) { debug_printf("llvmpipe: Create geometry shader %p:\n", (void *)state); tgsi_dump(templ->tokens, 0); } /* copy stream output info */ state->no_tokens = !templ->tokens; memcpy(&state->stream_output, &templ->stream_output, sizeof state->stream_output); if (templ->tokens) { state->dgs = draw_create_geometry_shader(llvmpipe->draw, templ); if (state->dgs == NULL) { goto no_dgs; } } return state; no_dgs: FREE( state ); no_state: return NULL; }
void llvmpipe_bind_vertex_sampler_states(struct pipe_context *pipe, unsigned num_samplers, void **samplers) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); unsigned i; assert(num_samplers <= PIPE_MAX_VERTEX_SAMPLERS); /* Check for no-op */ if (num_samplers == llvmpipe->num_vertex_samplers && !memcmp(llvmpipe->vertex_samplers, samplers, num_samplers * sizeof(void *))) return; draw_flush(llvmpipe->draw); for (i = 0; i < num_samplers; ++i) llvmpipe->vertex_samplers[i] = samplers[i]; for (i = num_samplers; i < PIPE_MAX_VERTEX_SAMPLERS; ++i) llvmpipe->vertex_samplers[i] = NULL; llvmpipe->num_vertex_samplers = num_samplers; llvmpipe->dirty |= LP_NEW_SAMPLER; }
void llvmpipe_set_vertex_sampler_textures(struct pipe_context *pipe, unsigned num_textures, struct pipe_texture **textures) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); uint i; assert(num_textures <= PIPE_MAX_VERTEX_SAMPLERS); /* Check for no-op */ if (num_textures == llvmpipe->num_vertex_textures && !memcmp(llvmpipe->vertex_textures, textures, num_textures * sizeof(struct pipe_texture *))) { return; } draw_flush(llvmpipe->draw); for (i = 0; i < PIPE_MAX_VERTEX_SAMPLERS; i++) { struct pipe_texture *tex = i < num_textures ? textures[i] : NULL; pipe_texture_reference(&llvmpipe->vertex_textures[i], tex); } llvmpipe->num_vertex_textures = num_textures; llvmpipe->dirty |= LP_NEW_TEXTURE; }
/** * Called during state validation when LP_NEW_VIEWPORT is set. */ void lp_setup_set_viewports(struct lp_setup_context *setup, unsigned num_viewports, const struct pipe_viewport_state *viewports) { struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); unsigned i; LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); assert(num_viewports <= PIPE_MAX_VIEWPORTS); assert(viewports); /* * For use in lp_state_fs.c, propagate the viewport values for all viewports. */ for (i = 0; i < num_viewports; i++) { float min_depth; float max_depth; util_viewport_zmin_zmax(&viewports[i], lp->rasterizer->clip_halfz, &min_depth, &max_depth); if (setup->viewports[i].min_depth != min_depth || setup->viewports[i].max_depth != max_depth) { setup->viewports[i].min_depth = min_depth; setup->viewports[i].max_depth = max_depth; setup->dirty |= LP_SETUP_NEW_VIEWPORTS; } } }
static void llvmpipe_destroy( struct pipe_context *pipe ) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); uint i; lp_print_counters(); /* This will also destroy llvmpipe->setup: */ if (llvmpipe->draw) draw_destroy( llvmpipe->draw ); for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { pipe_surface_reference(&llvmpipe->framebuffer.cbufs[i], NULL); } pipe_surface_reference(&llvmpipe->framebuffer.zsbuf, NULL); for (i = 0; i < PIPE_MAX_SAMPLERS; i++) { pipe_texture_reference(&llvmpipe->texture[i], NULL); } for (i = 0; i < PIPE_MAX_VERTEX_SAMPLERS; i++) { pipe_texture_reference(&llvmpipe->vertex_textures[i], NULL); } for (i = 0; i < Elements(llvmpipe->constants); i++) { if (llvmpipe->constants[i]) { pipe_buffer_reference(&llvmpipe->constants[i], NULL); } } align_free( llvmpipe ); }
void llvmpipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, const struct pipe_constant_buffer *constants) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct pipe_buffer *buffer = constants ? constants->buffer : NULL; unsigned size = buffer ? buffer->size : 0; const void *data = buffer ? llvmpipe_buffer(buffer)->data : NULL; assert(shader < PIPE_SHADER_TYPES); assert(index == 0); if(shader == PIPE_SHADER_VERTEX) draw_flush(llvmpipe->draw); /* note: reference counting */ pipe_buffer_reference(&llvmpipe->constants[shader].buffer, buffer); if(shader == PIPE_SHADER_FRAGMENT) { llvmpipe->jit_context.constants = data; } if(shader == PIPE_SHADER_VERTEX) { draw_set_mapped_constant_buffer(llvmpipe->draw, data, size); } llvmpipe->dirty |= LP_NEW_CONSTANTS; }
/** * Draw vertex arrays, with optional indexing, optional instancing. * All the other drawing functions are implemented in terms of this function. * Basically, map the vertex buffers (and drawing surfaces), then hand off * the drawing to the 'draw' module. */ static void llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info) { struct llvmpipe_context *lp = llvmpipe_context(pipe); struct draw_context *draw = lp->draw; const void *mapped_indices = NULL; unsigned i; if (!llvmpipe_check_render_cond(lp)) return; if (lp->dirty) llvmpipe_update_derived( lp ); /* * Map vertex buffers */ for (i = 0; i < lp->num_vertex_buffers; i++) { const void *buf = lp->vertex_buffer[i].user_buffer; if (!buf) buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer); draw_set_mapped_vertex_buffer(draw, i, buf); } /* Map index buffer, if present */ if (info->indexed) { mapped_indices = lp->index_buffer.user_buffer; if (!mapped_indices) mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer); draw_set_indexes(draw, (ubyte *) mapped_indices + lp->index_buffer.offset, lp->index_buffer.index_size); } llvmpipe_prepare_vertex_sampling(lp, lp->num_sampler_views[PIPE_SHADER_VERTEX], lp->sampler_views[PIPE_SHADER_VERTEX]); /* draw! */ draw_vbo(draw, info); /* * unmap vertex/index buffers */ for (i = 0; i < lp->num_vertex_buffers; i++) { draw_set_mapped_vertex_buffer(draw, i, NULL); } if (mapped_indices) { draw_set_indexes(draw, NULL, 0); } llvmpipe_cleanup_vertex_sampling(lp); /* * TODO: Flush only when a user vertex/index buffer is present * (or even better, modify draw module to do this * internally when this condition is seen?) */ draw_flush(draw); }
void llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen); struct lp_fragment_shader *shader = fs; struct lp_fragment_shader_variant *variant; assert(fs != llvmpipe->fs); variant = shader->variants; while(variant) { struct lp_fragment_shader_variant *next = variant->next; if(variant->function) { if(variant->jit_function) LLVMFreeMachineCodeForFunction(screen->engine, variant->function); LLVMDeleteFunction(variant->function); } FREE(variant); variant = next; } FREE((void *) shader->base.tokens); FREE(shader); }
static void llvmpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); struct llvmpipe_query *pq = llvmpipe_query(q); /* Check if the query is already in the scene. If so, we need to * flush the scene now. Real apps shouldn't re-use a query in a * frame of rendering. */ if (pq->fence && !lp_fence_issued(pq->fence)) { llvmpipe_finish(pipe, __FUNCTION__); } memset(pq->count, 0, sizeof(pq->count)); lp_setup_begin_query(llvmpipe->setup, pq); if (pq->type == PIPE_QUERY_PRIMITIVES_EMITTED) { pq->num_primitives_written = 0; llvmpipe->so_stats.num_primitives_written = 0; } if (pq->type == PIPE_QUERY_PRIMITIVES_GENERATED) { pq->num_primitives_generated = 0; llvmpipe->num_primitives_generated = 0; } if (pq->type == PIPE_QUERY_OCCLUSION_COUNTER) { llvmpipe->active_occlusion_query = TRUE; llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY; } }
static void llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct lp_fragment_shader *shader = fs; struct lp_fs_variant_list_item *li; assert(fs != llvmpipe->fs); (void) llvmpipe; /* * XXX: we need to flush the context until we have some sort of reference * counting in fragment shaders as they may still be binned * Flushing alone might not sufficient we need to wait on it too. */ llvmpipe_finish(pipe, __FUNCTION__); li = first_elem(&shader->variants); while(!at_end(&shader->variants, li)) { struct lp_fs_variant_list_item *next = next_elem(li); remove_shader_variant(llvmpipe, li->base); li = next; } assert(shader->variants_cached == 0); FREE((void *) shader->base.tokens); FREE(shader); }
static void llvmpipe_set_vertex_sampler_views(struct pipe_context *pipe, unsigned num, struct pipe_sampler_view **views) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); uint i; assert(num <= PIPE_MAX_VERTEX_SAMPLERS); /* Check for no-op */ if (num == llvmpipe->num_vertex_sampler_views && !memcmp(llvmpipe->vertex_sampler_views, views, num * sizeof(struct pipe_sampler_view *))) { return; } draw_flush(llvmpipe->draw); for (i = 0; i < PIPE_MAX_VERTEX_SAMPLERS; i++) { struct pipe_sampler_view *view = i < num ? views[i] : NULL; pipe_sampler_view_reference(&llvmpipe->vertex_sampler_views[i], view); } llvmpipe->num_vertex_sampler_views = num; draw_set_sampler_views(llvmpipe->draw, llvmpipe->vertex_sampler_views, llvmpipe->num_vertex_sampler_views); llvmpipe->dirty |= LP_NEW_SAMPLER_VIEW; }
static void llvmpipe_end_query(struct pipe_context *pipe, struct pipe_query *q) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); struct llvmpipe_query *pq = llvmpipe_query(q); lp_setup_end_query(llvmpipe->setup, pq); switch (pq->type) { case PIPE_QUERY_PRIMITIVES_EMITTED: pq->num_primitives_written = llvmpipe->so_stats.num_primitives_written - pq->num_primitives_written; break; case PIPE_QUERY_PRIMITIVES_GENERATED: pq->num_primitives_generated = llvmpipe->so_stats.primitives_storage_needed - pq->num_primitives_generated; break; case PIPE_QUERY_SO_STATISTICS: pq->num_primitives_written = llvmpipe->so_stats.num_primitives_written - pq->num_primitives_written; pq->num_primitives_generated = llvmpipe->so_stats.primitives_storage_needed - pq->num_primitives_generated; break; case PIPE_QUERY_SO_OVERFLOW_PREDICATE: pq->num_primitives_written = llvmpipe->so_stats.num_primitives_written - pq->num_primitives_written; pq->num_primitives_generated = llvmpipe->so_stats.primitives_storage_needed - pq->num_primitives_generated; break; case PIPE_QUERY_PIPELINE_STATISTICS: pq->stats.ia_vertices = llvmpipe->pipeline_statistics.ia_vertices - pq->stats.ia_vertices; pq->stats.ia_primitives = llvmpipe->pipeline_statistics.ia_primitives - pq->stats.ia_primitives; pq->stats.vs_invocations = llvmpipe->pipeline_statistics.vs_invocations - pq->stats.vs_invocations; pq->stats.gs_invocations = llvmpipe->pipeline_statistics.gs_invocations - pq->stats.gs_invocations; pq->stats.gs_primitives = llvmpipe->pipeline_statistics.gs_primitives - pq->stats.gs_primitives; pq->stats.c_invocations = llvmpipe->pipeline_statistics.c_invocations - pq->stats.c_invocations; pq->stats.c_primitives = llvmpipe->pipeline_statistics.c_primitives - pq->stats.c_primitives; pq->stats.ps_invocations = llvmpipe->pipeline_statistics.ps_invocations - pq->stats.ps_invocations; llvmpipe->active_statistics_queries--; break; case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: assert(llvmpipe->active_occlusion_queries); llvmpipe->active_occlusion_queries--; llvmpipe->dirty |= LP_NEW_OCCLUSION_QUERY; break; default: break; } }
static void llvmpipe_delete_vs_state(struct pipe_context *pipe, void *_vs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct draw_vertex_shader *vs = (struct draw_vertex_shader *)_vs; draw_delete_vertex_shader(llvmpipe->draw, vs); }
static void lp_blit(struct pipe_context *pipe, const struct pipe_blit_info *blit_info) { struct llvmpipe_context *lp = llvmpipe_context(pipe); struct pipe_blit_info info = *blit_info; if (info.src.resource->nr_samples > 1 && info.dst.resource->nr_samples <= 1 && !util_format_is_depth_or_stencil(info.src.resource->format) && !util_format_is_pure_integer(info.src.resource->format)) { debug_printf("llvmpipe: color resolve unimplemented\n"); return; } if (util_try_blit_via_copy_region(pipe, &info)) { return; /* done */ } if (info.mask & PIPE_MASK_S) { debug_printf("llvmpipe: cannot blit stencil, skipping\n"); info.mask &= ~PIPE_MASK_S; } if (!util_blitter_is_blit_supported(lp->blitter, &info)) { debug_printf("llvmpipe: blit unsupported %s -> %s\n", util_format_short_name(info.src.resource->format), util_format_short_name(info.dst.resource->format)); return; } /* XXX turn off occlusion and streamout queries */ util_blitter_save_vertex_buffers(lp->blitter, lp->num_vertex_buffers, lp->vertex_buffer); util_blitter_save_vertex_elements(lp->blitter, (void*)lp->velems); util_blitter_save_vertex_shader(lp->blitter, (void*)lp->vs); util_blitter_save_geometry_shader(lp->blitter, (void*)lp->gs); /*util_blitter_save_so_targets(lp->blitter, lp->num_so_targets, (struct pipe_stream_output_target**)lp->so_targets);*/ util_blitter_save_rasterizer(lp->blitter, (void*)lp->rasterizer); util_blitter_save_viewport(lp->blitter, &lp->viewport); util_blitter_save_scissor(lp->blitter, &lp->scissor); util_blitter_save_fragment_shader(lp->blitter, lp->fs); util_blitter_save_blend(lp->blitter, (void*)lp->blend); util_blitter_save_depth_stencil_alpha(lp->blitter, (void*)lp->depth_stencil); util_blitter_save_stencil_ref(lp->blitter, &lp->stencil_ref); /*util_blitter_save_sample_mask(sp->blitter, lp->sample_mask);*/ util_blitter_save_framebuffer(lp->blitter, &lp->framebuffer); util_blitter_save_fragment_sampler_states(lp->blitter, lp->num_samplers[PIPE_SHADER_FRAGMENT], (void**)lp->samplers[PIPE_SHADER_FRAGMENT]); util_blitter_save_fragment_sampler_views(lp->blitter, lp->num_sampler_views[PIPE_SHADER_FRAGMENT], lp->sampler_views[PIPE_SHADER_FRAGMENT]); util_blitter_save_render_condition(lp->blitter, lp->render_cond_query, lp->render_cond_mode); util_blitter_blit(lp->blitter, &info); }
void llvmpipe_bind_fs_state(struct pipe_context *pipe, void *fs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); llvmpipe->fs = (struct lp_fragment_shader *) fs; llvmpipe->dirty |= LP_NEW_FS; }
static void llvmpipe_set_clip_state(struct pipe_context *pipe, const struct pipe_clip_state *clip) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); /* pass the clip state to the draw module */ draw_set_clip_state(llvmpipe->draw, clip); }
static unsigned int llvmpipe_is_texture_referenced( struct pipe_context *pipe, struct pipe_texture *texture, unsigned face, unsigned level) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); return lp_setup_is_texture_referenced(llvmpipe->setup, texture); }
static void llvmpipe_set_sampler_views(struct pipe_context *pipe, unsigned shader, unsigned start, unsigned num, struct pipe_sampler_view **views) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); uint i; assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS); assert(shader < PIPE_SHADER_TYPES); assert(start + num <= Elements(llvmpipe->sampler_views[shader])); draw_flush(llvmpipe->draw); /* set the new sampler views */ for (i = 0; i < num; i++) { /* Note: we're using pipe_sampler_view_release() here to work around * a possible crash when the old view belongs to another context that * was already destroyed. */ pipe_sampler_view_release(pipe, &llvmpipe->sampler_views[shader][start + i]); /* * Warn if someone tries to set a view created in a different context * (which is why we need the hack above in the first place). * An assert would be better but st/mesa relies on it... */ if (views[i] && views[i]->context != pipe) { debug_printf("Illegal setting of sampler_view %d created in another " "context\n", i); } pipe_sampler_view_reference(&llvmpipe->sampler_views[shader][start + i], views[i]); } /* find highest non-null sampler_views[] entry */ { unsigned j = MAX2(llvmpipe->num_sampler_views[shader], start + num); while (j > 0 && llvmpipe->sampler_views[shader][j - 1] == NULL) j--; llvmpipe->num_sampler_views[shader] = j; } if (shader == PIPE_SHADER_VERTEX || shader == PIPE_SHADER_GEOMETRY) { draw_set_sampler_views(llvmpipe->draw, shader, llvmpipe->sampler_views[shader], llvmpipe->num_sampler_views[shader]); } else { llvmpipe->dirty |= LP_NEW_SAMPLER_VIEW; } }
static void llvmpipe_render_condition ( struct pipe_context *pipe, struct pipe_query *query, uint mode ) { struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe ); llvmpipe->render_cond_query = query; llvmpipe->render_cond_mode = mode; }
static void llvmpipe_set_polygon_stipple(struct pipe_context *pipe, const struct pipe_poly_stipple *stipple) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); draw_flush(llvmpipe->draw); llvmpipe->poly_stipple = *stipple; /* struct copy */ llvmpipe->dirty |= LP_NEW_STIPPLE; }
static void llvmpipe_set_scissor_state(struct pipe_context *pipe, const struct pipe_scissor_state *scissor) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); draw_flush(llvmpipe->draw); llvmpipe->scissor = *scissor; /* struct copy */ llvmpipe->dirty |= LP_NEW_SCISSOR; }
static void lp_setup_so_info(struct vbuf_render *vbr, uint primitives, uint vertices, uint prim_generated) { struct lp_setup_context *setup = lp_setup_context(vbr); struct llvmpipe_context *lp = llvmpipe_context(setup->pipe); lp->so_stats.num_primitives_written += primitives; lp->so_stats.primitives_storage_needed = vertices * 4 /*sizeof(float|int32)*/ * 4 /*x,y,z,w*/; lp->num_primitives_generated += prim_generated; }
static void llvmpipe_bind_gs_state(struct pipe_context *pipe, void *gs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); llvmpipe->gs = (struct lp_geometry_shader *)gs; draw_bind_geometry_shader(llvmpipe->draw, (llvmpipe->gs ? llvmpipe->gs->draw_data : NULL)); llvmpipe->dirty |= LP_NEW_GS; }
static void llvmpipe_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); if (sample_mask != llvmpipe->sample_mask) { llvmpipe->sample_mask = sample_mask; llvmpipe->dirty |= LP_NEW_RASTERIZER; } }
static void llvmpipe_set_viewport_state(struct pipe_context *pipe, const struct pipe_viewport_state *viewport) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); /* pass the viewport info to the draw module */ draw_set_viewport_state(llvmpipe->draw, viewport); llvmpipe->viewport = *viewport; /* struct copy */ llvmpipe->dirty |= LP_NEW_VIEWPORT; }
static void llvmpipe_delete_vs_state(struct pipe_context *pipe, void *vs) { struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct lp_vertex_shader *state = (struct lp_vertex_shader *)vs; draw_delete_vertex_shader(llvmpipe->draw, state->draw_data); FREE( (void *)state->shader.tokens ); FREE( state ); }