enum pipe_error cso_set_blend(struct cso_context *ctx, const struct pipe_blend_state *templ) { unsigned key_size, hash_key; struct cso_hash_iter iter; void *handle; key_size = templ->independent_blend_enable ? sizeof(struct pipe_blend_state) : (char *)&(templ->rt[1]) - (char *)templ; hash_key = cso_construct_key((void*)templ, key_size); iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND, (void*)templ, key_size); if (cso_hash_iter_is_null(iter)) { struct cso_blend *cso = MALLOC(sizeof(struct cso_blend)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memset(&cso->state, 0, sizeof cso->state); memcpy(&cso->state, templ, key_size); cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data; } if (ctx->blend != handle) { ctx->blend = handle; ctx->pipe->bind_blend_state(ctx->pipe, handle); } return PIPE_OK; }
static INLINE void * shader_from_cache(struct pipe_context *pipe, unsigned type, struct cso_hash *hash, unsigned key) { void *shader = 0; struct cso_hash_iter iter = cso_hash_find(hash, key); if (cso_hash_iter_is_null(iter)) { if (type == PIPE_SHADER_VERTEX) shader = create_vs(pipe, key); else shader = create_fs(pipe, key); cso_hash_insert(hash, key, shader); } else shader = (void *)cso_hash_iter_data(iter); return shader; }
enum pipe_error cso_single_sampler(struct cso_context *ctx, unsigned shader_stage, unsigned idx, const struct pipe_sampler_state *templ) { void *handle = NULL; if (templ) { unsigned key_size = sizeof(struct pipe_sampler_state); unsigned hash_key = cso_construct_key((void*)templ, key_size); struct cso_hash_iter iter = cso_find_state_template(ctx->cache, hash_key, CSO_SAMPLER, (void *) templ, key_size); if (cso_hash_iter_is_null(iter)) { struct cso_sampler *cso = MALLOC(sizeof(struct cso_sampler)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(&cso->state, templ, sizeof(*templ)); cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback) ctx->pipe->delete_sampler_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_sampler *)cso_hash_iter_data(iter))->data; } } ctx->samplers[shader_stage].samplers[idx] = handle; return PIPE_OK; }
enum pipe_error cso_set_fragment_shader(struct cso_context *ctx, const struct pipe_shader_state *templ) { const struct tgsi_token *tokens = templ->tokens; unsigned num_tokens = tgsi_num_tokens(tokens); size_t tokens_size = num_tokens*sizeof(struct tgsi_token); unsigned hash_key = cso_construct_key((void*)tokens, tokens_size); struct cso_hash_iter iter = cso_find_state_template(ctx->cache, hash_key, CSO_FRAGMENT_SHADER, (void*)tokens, sizeof(*templ)); /* XXX correct? tokens_size? */ void *handle = NULL; if (cso_hash_iter_is_null(iter)) { struct cso_fragment_shader *cso = MALLOC(sizeof(struct cso_fragment_shader) + tokens_size); struct tgsi_token *cso_tokens = (struct tgsi_token *)((char *)cso + sizeof(*cso)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(cso_tokens, tokens, tokens_size); cso->state.tokens = cso_tokens; cso->data = ctx->pipe->create_fs_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback)ctx->pipe->delete_fs_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_FRAGMENT_SHADER, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_fragment_shader *)cso_hash_iter_data(iter))->data; } return cso_set_fragment_shader_handle( ctx, handle ); }
void * shaders_cache_fill(struct shaders_cache *sc, int shader_key) { VGint key = shader_key; struct cached_shader *cached; struct cso_hash_iter iter = cso_hash_find(sc->hash, key); if (cso_hash_iter_is_null(iter)) { cached = CALLOC_STRUCT(cached_shader); cached->driver_shader = create_shader(sc->pipe->pipe, key, &cached->state); cso_hash_insert(sc->hash, key, cached); return cached->driver_shader; } cached = (struct cached_shader *)cso_hash_iter_data(iter); assert(cached->driver_shader); return cached->driver_shader; }
enum pipe_error cso_set_rasterizer(struct cso_context *ctx, const struct pipe_rasterizer_state *templ) { unsigned key_size = sizeof(struct pipe_rasterizer_state); unsigned hash_key = cso_construct_key((void*)templ, key_size); struct cso_hash_iter iter = cso_find_state_template(ctx->cache, hash_key, CSO_RASTERIZER, (void*)templ, key_size); void *handle = NULL; if (cso_hash_iter_is_null(iter)) { struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(&cso->state, templ, sizeof(*templ)); cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback)ctx->pipe->delete_rasterizer_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data; } if (ctx->rasterizer != handle) { ctx->rasterizer = handle; ctx->pipe->bind_rasterizer_state(ctx->pipe, handle); } return PIPE_OK; }
/* u_vbuf uses its own caching for vertex elements, because it needs to keep * its own preprocessed state per vertex element CSO. */ static struct u_vbuf_elements * u_vbuf_set_vertex_elements_internal(struct u_vbuf *mgr, unsigned count, const struct pipe_vertex_element *states) { struct pipe_context *pipe = mgr->pipe; unsigned key_size, hash_key; struct cso_hash_iter iter; struct u_vbuf_elements *ve; struct cso_velems_state velems_state; /* need to include the count into the stored state data too. */ key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned); velems_state.count = count; memcpy(velems_state.velems, states, sizeof(struct pipe_vertex_element) * count); hash_key = cso_construct_key((void*)&velems_state, key_size); iter = cso_find_state_template(mgr->cso_cache, hash_key, CSO_VELEMENTS, (void*)&velems_state, key_size); if (cso_hash_iter_is_null(iter)) { struct cso_velements *cso = MALLOC_STRUCT(cso_velements); memcpy(&cso->state, &velems_state, key_size); cso->data = u_vbuf_create_vertex_elements(mgr, count, states); cso->delete_state = (cso_state_callback)u_vbuf_delete_vertex_elements; cso->context = (void*)mgr; iter = cso_insert_state(mgr->cso_cache, hash_key, CSO_VELEMENTS, cso); ve = cso->data; } else { ve = ((struct cso_velements *)cso_hash_iter_data(iter))->data; } assert(ve); if (ve != mgr->ve) pipe->bind_vertex_elements_state(pipe, ve->driver_cso); return ve; }
void cso_for_each_state(struct cso_cache *sc, enum cso_cache_type type, cso_state_callback func, void *user_data) { struct cso_hash *hash = 0; struct cso_hash_iter iter; switch (type) { case CSO_BLEND: hash = sc->blend_hash; break; case CSO_SAMPLER: hash = sc->sampler_hash; break; case CSO_DEPTH_STENCIL_ALPHA: hash = sc->depth_stencil_hash; break; case CSO_RASTERIZER: hash = sc->rasterizer_hash; break; case CSO_FRAGMENT_SHADER: hash = sc->fs_hash; break; case CSO_VERTEX_SHADER: hash = sc->vs_hash; break; case CSO_VELEMENTS: hash = sc->velements_hash; break; } iter = cso_hash_first_node(hash); while (!cso_hash_iter_is_null(iter)) { void *state = cso_hash_iter_data(iter); iter = cso_hash_iter_next(iter); if (state) { func(state, user_data); } } }
enum pipe_error cso_set_depth_stencil_alpha(struct cso_context *ctx, const struct pipe_depth_stencil_alpha_state *templ) { unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state); unsigned hash_key = cso_construct_key((void*)templ, key_size); struct cso_hash_iter iter = cso_find_state_template(ctx->cache, hash_key, CSO_DEPTH_STENCIL_ALPHA, (void*)templ, key_size); void *handle; if (cso_hash_iter_is_null(iter)) { struct cso_depth_stencil_alpha *cso = MALLOC(sizeof(struct cso_depth_stencil_alpha)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(&cso->state, templ, sizeof(*templ)); cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_DEPTH_STENCIL_ALPHA, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_depth_stencil_alpha *)cso_hash_iter_data(iter))->data; } if (ctx->depth_stencil != handle) { ctx->depth_stencil = handle; ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle); } return PIPE_OK; }
struct pipe_surface * util_surfaces_do_get(struct util_surfaces *us, unsigned surface_struct_size, struct pipe_screen *pscreen, struct pipe_resource *pt, unsigned face, unsigned level, unsigned zslice, unsigned flags) { struct pipe_surface *ps; if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) { /* or 2D array */ if(!us->u.hash) us->u.hash = cso_hash_create(); ps = cso_hash_iter_data(cso_hash_find(us->u.hash, ((zslice + face) << 8) | level)); } else { if(!us->u.array) us->u.array = CALLOC(pt->last_level + 1, sizeof(struct pipe_surface *)); ps = us->u.array[level]; } if(ps) { p_atomic_inc(&ps->reference.count); return ps; } ps = (struct pipe_surface *)CALLOC(1, surface_struct_size); if(!ps) return NULL; pipe_surface_init(ps, pt, face, level, zslice, flags); ps->offset = ~0; if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) cso_hash_insert(us->u.hash, ((zslice + face) << 8) | level, ps); else us->u.array[level] = ps; return ps; }
static INLINE void sanitize_hash(struct cso_hash *hash, enum cso_cache_type type, int max_size, void *user_data) { struct cso_context *ctx = (struct cso_context *)user_data; /* if we're approach the maximum size, remove fourth of the entries * otherwise every subsequent call will go through the same */ int hash_size = cso_hash_size(hash); int max_entries = (max_size > hash_size) ? max_size : hash_size; int to_remove = (max_size < max_entries) * max_entries/4; struct cso_hash_iter iter = cso_hash_first_node(hash); if (hash_size > max_size) to_remove += hash_size - max_size; while (to_remove) { /*remove elements until we're good */ /*fixme: currently we pick the nodes to remove at random*/ void *cso = cso_hash_iter_data(iter); if (delete_cso(ctx, cso, type)) { iter = cso_hash_erase(hash, iter); --to_remove; } else iter = cso_hash_iter_next(iter); } }
enum pipe_error cso_set_vertex_shader(struct cso_context *ctx, const struct pipe_shader_state *templ) { unsigned hash_key = cso_construct_key((void*)templ, sizeof(struct pipe_shader_state)); struct cso_hash_iter iter = cso_find_state_template(ctx->cache, hash_key, CSO_VERTEX_SHADER, (void*)templ, sizeof(*templ)); void *handle = NULL; if (cso_hash_iter_is_null(iter)) { struct cso_vertex_shader *cso = MALLOC(sizeof(struct cso_vertex_shader)); if (!cso) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(cso->state, templ, sizeof(*templ)); cso->data = ctx->pipe->create_vs_state(ctx->pipe, &cso->state); cso->delete_state = (cso_state_callback)ctx->pipe->delete_vs_state; cso->context = ctx->pipe; iter = cso_insert_state(ctx->cache, hash_key, CSO_VERTEX_SHADER, cso); if (cso_hash_iter_is_null(iter)) { FREE(cso); return PIPE_ERROR_OUT_OF_MEMORY; } handle = cso->data; } else { handle = ((struct cso_vertex_shader *)cso_hash_iter_data(iter))->data; } return cso_set_vertex_shader_handle( ctx, handle ); }
static INLINE struct keymap_item * hash_table_item(struct cso_hash_iter iter) { return (struct keymap_item *) cso_hash_iter_data(iter); }