static boolean iter_immediate( struct tgsi_iterate_context *iter, struct tgsi_full_immediate *imm ) { struct sanity_check_ctx *ctx = (struct sanity_check_ctx *) iter; scan_register *reg; /* No immediates allowed after the first instruction. */ if (ctx->num_instructions > 0) report_error( ctx, "Instruction expected but immediate found" ); /* Mark the register as declared. */ reg = MALLOC(sizeof(scan_register)); fill_scan_register1d(reg, TGSI_FILE_IMMEDIATE, ctx->num_imms); cso_hash_insert(ctx->regs_decl, scan_register_key(reg), reg); ctx->num_imms++; /* Check data type validity. */ if (imm->Immediate.DataType != TGSI_IMM_FLOAT32 && imm->Immediate.DataType != TGSI_IMM_UINT32 && imm->Immediate.DataType != TGSI_IMM_INT32) { report_error( ctx, "(%u): Invalid immediate data type", imm->Immediate.DataType ); return TRUE; } return TRUE; }
struct cso_hash_iter cso_insert_state(struct cso_cache *sc, unsigned hash_key, enum cso_cache_type type, void *state) { struct cso_hash *hash = _cso_hash_for_type(sc, type); sanitize_hash(sc, hash, type, sc->max_size); return cso_hash_insert(hash, hash_key, state); }
void vg_context_add_object(struct vg_context *ctx, struct vg_object *obj) { if (ctx) { struct cso_hash *hash = ctx->owned_objects[obj->type]; if (!hash) return; cso_hash_insert(hash, (unsigned) obj->handle, obj); } }
static boolean check_register_usage( struct sanity_check_ctx *ctx, scan_register *reg, const char *name, boolean indirect_access ) { if (!check_file_name( ctx, reg->file )) { FREE(reg); return FALSE; } if (indirect_access) { /* Note that 'index' is an offset relative to the value of the * address register. No range checking done here.*/ reg->indices[0] = 0; reg->indices[1] = 0; if (!is_any_register_declared( ctx, reg->file )) report_error( ctx, "%s: Undeclared %s register", file_names[reg->file], name ); if (!is_ind_register_used(ctx, reg)) cso_hash_insert(ctx->regs_ind_used, reg->file, reg); else FREE(reg); } else { if (!is_register_declared( ctx, reg )) { if (reg->dimensions == 2) { report_error( ctx, "%s[%d][%d]: Undeclared %s register", file_names[reg->file], reg->indices[0], reg->indices[1], name ); } else { report_error( ctx, "%s[%d]: Undeclared %s register", file_names[reg->file], reg->indices[0], name ); } } if (!is_register_used( ctx, reg )) cso_hash_insert(ctx->regs_used, scan_register_key(reg), reg); else FREE(reg); } return TRUE; }
void vg_context_add_object(struct vg_context *ctx, enum vg_object_type type, void *ptr) { if (ctx) { struct cso_hash *hash = ctx->owned_objects[type]; if (!hash) return; cso_hash_insert(hash, (unsigned)(long)ptr, ptr); } }
static void check_and_declare(struct sanity_check_ctx *ctx, scan_register *reg) { if (is_register_declared( ctx, reg)) report_error( ctx, "%s[%u]: The same register declared more than once", file_names[reg->file], reg->indices[0] ); cso_hash_insert(ctx->regs_decl, scan_register_key(reg), reg); }
struct translate * translate_cache_find(struct translate_cache *cache, struct translate_key *key) { unsigned hash_key = create_key(key); struct translate *translate = (struct translate*) cso_hash_find_data_from_template(cache->hash, hash_key, key, sizeof(*key)); if (!translate) { /* create/insert */ translate = translate_create(key); cso_hash_insert(cache->hash, hash_key, translate); } return translate; }
boolean util_surfaces_do_get(struct util_surfaces *us, unsigned surface_struct_size, struct pipe_context *ctx, struct pipe_resource *pt, unsigned level, unsigned layer, struct pipe_surface **res) { struct pipe_surface *ps; if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) { /* or 2D array */ if(!us->u.hash) us->u.hash = cso_hash_create(); ps = cso_hash_iter_data(cso_hash_find(us->u.hash, (layer << 8) | level)); } else { if(!us->u.array) us->u.array = CALLOC(pt->last_level + 1, sizeof(struct pipe_surface *)); ps = us->u.array[level]; } if(ps && ps->context == ctx) { p_atomic_inc(&ps->reference.count); *res = ps; return FALSE; } ps = (struct pipe_surface *)CALLOC(1, surface_struct_size); if(!ps) { *res = NULL; return FALSE; } pipe_surface_init(ctx, ps, pt, level, layer); if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) cso_hash_insert(us->u.hash, (layer << 8) | level, ps); else us->u.array[level] = ps; *res = ps; return TRUE; }
static INLINE void * shader_from_cache(struct pipe_context *pipe, unsigned type, struct cso_hash *hash, unsigned key) { void *shader = 0; struct cso_hash_iter iter = cso_hash_find(hash, key); if (cso_hash_iter_is_null(iter)) { if (type == PIPE_SHADER_VERTEX) shader = create_vs(pipe, key); else shader = create_fs(pipe, key); cso_hash_insert(hash, key, shader); } else shader = (void *)cso_hash_iter_data(iter); return shader; }
static void add_glyph(struct vg_font *font, VGuint glyphIndex, struct vg_object *obj, VGboolean isHinted, const VGfloat glyphOrigin[2], const VGfloat escapement[2]) { struct vg_glyph *glyph; /* remove the existing one */ del_glyph(font, glyphIndex); glyph = CALLOC_STRUCT(vg_glyph); glyph->object = obj; glyph->is_hinted = isHinted; memcpy(glyph->glyph_origin, glyphOrigin, sizeof(glyph->glyph_origin)); memcpy(glyph->escapement, escapement, sizeof(glyph->glyph_origin)); cso_hash_insert(font->glyphs, (unsigned) glyphIndex, glyph); }
void * shaders_cache_fill(struct shaders_cache *sc, int shader_key) { VGint key = shader_key; struct cached_shader *cached; struct cso_hash_iter iter = cso_hash_find(sc->hash, key); if (cso_hash_iter_is_null(iter)) { cached = CALLOC_STRUCT(cached_shader); cached->driver_shader = create_shader(sc->pipe->pipe, key, &cached->state); cso_hash_insert(sc->hash, key, cached); return cached->driver_shader; } cached = (struct cached_shader *)cso_hash_iter_data(iter); assert(cached->driver_shader); return cached->driver_shader; }
/** * Insert a new key + data pointer into the table. * Note: we create a copy of the key, but not the data! * If the key is already present in the table, replace the existing * entry (calling the delete callback on the previous entry). * If the maximum capacity of the map is reached an old entry * will be deleted (the delete callback will be called). */ boolean util_keymap_insert(struct keymap *map, const void *key, const void *data, void *user) { unsigned key_hash; struct keymap_item *item; struct cso_hash_iter iter; assert(map); if (!map) return FALSE; key_hash = hash(key, map->key_size); item = hash_table_find_item(map, key, key_hash); if (item) { /* call delete callback for old entry/item */ map->delete_func(map, item->key, item->value, user); item->value = (void *) data; return TRUE; } item = MALLOC_STRUCT(keymap_item); if (!item) return FALSE; item->key = mem_dup(key, map->key_size); item->value = (void *) data; iter = cso_hash_insert(map->cso, key_hash, item); if (cso_hash_iter_is_null(iter)) { FREE(item); return FALSE; } map->num_entries++; return TRUE; }
struct pipe_surface * util_surfaces_do_get(struct util_surfaces *us, unsigned surface_struct_size, struct pipe_screen *pscreen, struct pipe_resource *pt, unsigned face, unsigned level, unsigned zslice, unsigned flags) { struct pipe_surface *ps; if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) { /* or 2D array */ if(!us->u.hash) us->u.hash = cso_hash_create(); ps = cso_hash_iter_data(cso_hash_find(us->u.hash, ((zslice + face) << 8) | level)); } else { if(!us->u.array) us->u.array = CALLOC(pt->last_level + 1, sizeof(struct pipe_surface *)); ps = us->u.array[level]; } if(ps) { p_atomic_inc(&ps->reference.count); return ps; } ps = (struct pipe_surface *)CALLOC(1, surface_struct_size); if(!ps) return NULL; pipe_surface_init(ps, pt, face, level, zslice, flags); ps->offset = ~0; if(pt->target == PIPE_TEXTURE_3D || pt->target == PIPE_TEXTURE_CUBE) cso_hash_insert(us->u.hash, ((zslice + face) << 8) | level, ps); else us->u.array[level] = ps; return ps; }