static int main_relations_create_idlink_cb(void *user_data, ID *id_self, ID **id_pointer, int cb_flag) { MainIDRelations *rel = user_data; if (*id_pointer) { MainIDRelationsEntry *entry, **entry_p; entry = BLI_mempool_alloc(rel->entry_pool); if (BLI_ghash_ensure_p(rel->id_user_to_used, id_self, (void ***)&entry_p)) { entry->next = *entry_p; } else { entry->next = NULL; } entry->id_pointer = id_pointer; entry->usage_flag = cb_flag; *entry_p = entry; entry = BLI_mempool_alloc(rel->entry_pool); if (BLI_ghash_ensure_p(rel->id_used_to_user, *id_pointer, (void ***)&entry_p)) { entry->next = *entry_p; } else { entry->next = NULL; } entry->id_pointer = (ID **)id_self; entry->usage_flag = cb_flag; *entry_p = entry; } return IDWALK_RET_NOP; }
static void do_moviecache_put(MovieCache *cache, void *userkey, ImBuf *ibuf, int need_lock) { MovieCacheKey *key; MovieCacheItem *item; if (!limitor) IMB_moviecache_init(); IMB_refImBuf(ibuf); key = BLI_mempool_alloc(cache->keys_pool); key->cache_owner = cache; key->userkey = BLI_mempool_alloc(cache->userkeys_pool); memcpy(key->userkey, userkey, cache->keysize); item = BLI_mempool_alloc(cache->items_pool); PRINT("%s: cache '%s' put %p, item %p\n", __func__, cache-> name, ibuf, item); item->ibuf = ibuf; item->cache_owner = cache; item->c_handle = NULL; item->priority_data = NULL; if (cache->getprioritydatafp) { item->priority_data = cache->getprioritydatafp(userkey); } BLI_ghash_remove(cache->hash, key, moviecache_keyfree, moviecache_valfree); BLI_ghash_insert(cache->hash, key, item); if (cache->last_userkey) { memcpy(cache->last_userkey, userkey, cache->keysize); } if (need_lock) BLI_mutex_lock(&limitor_lock); item->c_handle = MEM_CacheLimiter_insert(limitor, item); MEM_CacheLimiter_ref(item->c_handle); MEM_CacheLimiter_enforce_limits(limitor); MEM_CacheLimiter_unref(item->c_handle); if (need_lock) BLI_mutex_unlock(&limitor_lock); /* cache limiter can't remove unused keys which points to destoryed values */ check_unused_keys(cache); if (cache->points) { MEM_freeN(cache->points); cache->points = NULL; } }
static void check_persistent(SpaceOops *soops, TreeElement *te, ID *id, short type, short nr) { TreeStoreElem *tselem; if (soops->treestore == NULL) { /* if treestore was not created in readfile.c, create it here */ soops->treestore = BLI_mempool_create(sizeof(TreeStoreElem), 1, 512, BLI_MEMPOOL_ALLOW_ITER); } if (soops->treehash == NULL) { soops->treehash = BKE_treehash_create_from_treestore(soops->treestore); } /* find any unused tree element in treestore and mark it as used * (note that there may be multiple unused elements in case of linked objects) */ tselem = BKE_treehash_lookup_unused(soops->treehash, type, nr, id); if (tselem) { te->store_elem = tselem; tselem->used = 1; return; } /* add 1 element to treestore */ tselem = BLI_mempool_alloc(soops->treestore); tselem->type = type; tselem->nr = type ? nr : 0; tselem->id = id; tselem->used = 0; tselem->flag = TSE_CLOSED; te->store_elem = tselem; BKE_treehash_add_element(soops->treehash, tselem); }
void BLI_ghash_insert(GHash *gh, void *key, void *val) { unsigned int hash = gh->hashfp(key) % gh->nbuckets; Entry *e = (Entry *)BLI_mempool_alloc(gh->entrypool); e->next = gh->buckets[hash]; e->key = key; e->val = val; gh->buckets[hash] = e; if (UNLIKELY(++gh->nentries > gh->nbuckets / 2)) { Entry **old = gh->buckets; const unsigned nold = gh->nbuckets; unsigned int i; gh->nbuckets = hashsizes[++gh->cursize]; gh->buckets = (Entry **)MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets"); for (i = 0; i < nold; i++) { Entry *e_next; for (e = old[i]; e; e = e_next) { e_next = e->next; hash = gh->hashfp(e->key) % gh->nbuckets; e->next = gh->buckets[hash]; gh->buckets[hash] = e; } } MEM_freeN(old); } }
void BLI_ghash_insert(GHash *gh, void *key, void *val) { unsigned int hash= gh->hashfp(key)%gh->nbuckets; Entry *e= (Entry*) BLI_mempool_alloc(gh->entrypool); e->key= key; e->val= val; e->next= gh->buckets[hash]; gh->buckets[hash]= e; if (++gh->nentries>(float)gh->nbuckets/2) { Entry **old= gh->buckets; int i, nold= gh->nbuckets; gh->nbuckets= hashsizes[++gh->cursize]; gh->buckets= (Entry**)MEM_mallocN(gh->nbuckets*sizeof(*gh->buckets), "buckets"); memset(gh->buckets, 0, gh->nbuckets*sizeof(*gh->buckets)); for (i=0; i<nold; i++) { for (e= old[i]; e;) { Entry *n= e->next; hash= gh->hashfp(e->key)%gh->nbuckets; e->next= gh->buckets[hash]; gh->buckets[hash]= e; e= n; } } MEM_freeN(old); } }
/* Allocate and initialize a BMLogVert */ static BMLogVert *bm_log_vert_alloc(BMesh *bm, BMLog *log, BMVert *v) { BMLogEntry *entry = log->current_entry; BMLogVert *lv = BLI_mempool_alloc(entry->pool_verts); bm_log_vert_bmvert_copy(bm, lv, v); return lv; }
/* Allocate and initialize a BMLogVert */ static BMLogVert *bm_log_vert_alloc(BMLog *log, BMVert *v, const int cd_vert_mask_offset) { BMLogEntry *entry = log->current_entry; BMLogVert *lv = BLI_mempool_alloc(entry->pool_verts); bm_log_vert_bmvert_copy(lv, v, cd_vert_mask_offset); return lv; }
static void BME_CD_alloc_block(BME_CustomData *data, void **block) { if (*block) BME_CD_free_block(data, block); //if we copy layers that have their own free functions like deformverts if (data->totsize > 0) *block = BLI_mempool_alloc(data->pool); else *block = NULL; }
static void outliner_storage_cleanup(SpaceOops *soops) { BLI_mempool *ts = soops->treestore; if (ts) { TreeStoreElem *tselem; int unused = 0; /* each element used once, for ID blocks with more users to have each a treestore */ BLI_mempool_iter iter; BLI_mempool_iternew(ts, &iter); while ((tselem = BLI_mempool_iterstep(&iter))) { tselem->used = 0; } /* cleanup only after reading file or undo step, and always for * RNA datablocks view in order to save memory */ if (soops->storeflag & SO_TREESTORE_CLEANUP) { BLI_mempool_iternew(ts, &iter); while ((tselem = BLI_mempool_iterstep(&iter))) { if (tselem->id == NULL) unused++; } if (unused) { if (BLI_mempool_count(ts) == unused) { BLI_mempool_destroy(ts); soops->treestore = NULL; if (soops->treehash) { BKE_treehash_free(soops->treehash); soops->treehash = NULL; } } else { TreeStoreElem *tsenew; BLI_mempool *new_ts = BLI_mempool_create(sizeof(TreeStoreElem), BLI_mempool_count(ts) - unused, 512, BLI_MEMPOOL_ALLOW_ITER); BLI_mempool_iternew(ts, &iter); while ((tselem = BLI_mempool_iterstep(&iter))) { if (tselem->id) { tsenew = BLI_mempool_alloc(new_ts); *tsenew = *tselem; } } BLI_mempool_destroy(ts); soops->treestore = new_ts; if (soops->treehash) { /* update hash table to fix broken pointers */ BKE_treehash_rebuild_from_treestore(soops->treehash, soops->treestore); } } } } } }
void seq_stripelem_cache_put( SeqRenderData context, struct Sequence * seq, float cfra, seq_stripelem_ibuf_t type, struct ImBuf * i) { seqCacheKey * key; seqCacheEntry * e; if (!i) { return; } ibufs_in++; if (!entrypool) { seq_stripelem_cache_init(); } key = (seqCacheKey*) BLI_mempool_alloc(keypool); key->seq = seq; key->context = context; key->cfra = cfra - seq->start; key->type = type; /* Normally we want our own version, but start and end stills are duplicates of the original. */ if(ELEM(type, SEQ_STRIPELEM_IBUF_STARTSTILL, SEQ_STRIPELEM_IBUF_ENDSTILL)==0) IMB_refImBuf(i); e = (seqCacheEntry*) BLI_mempool_alloc(entrypool); e->ibuf = i; e->c_handle = NULL; BLI_ghash_remove(hash, key, HashKeyFree, HashValFree); BLI_ghash_insert(hash, key, e); e->c_handle = MEM_CacheLimiter_insert(limitor, e); MEM_CacheLimiter_ref(e->c_handle); MEM_CacheLimiter_enforce_limits(limitor); MEM_CacheLimiter_unref(e->c_handle); }
void seq_stripelem_cache_put( SeqRenderData context, struct Sequence * seq, float cfra, seq_stripelem_ibuf_t type, struct ImBuf * i) { seqCacheKey * key; seqCacheEntry * e; if (!i) { return; } ibufs_in++; if (!entrypool) { seq_stripelem_cache_init(); } key = (seqCacheKey*) BLI_mempool_alloc(keypool); key->seq = seq; key->context = context; key->cfra = cfra - seq->start; key->type = type; IMB_refImBuf(i); e = (seqCacheEntry*) BLI_mempool_alloc(entrypool); e->ibuf = i; e->c_handle = NULL; BLI_ghash_remove(hash, key, HashKeyFree, HashValFree); BLI_ghash_insert(hash, key, e); e->c_handle = MEM_CacheLimiter_insert(limitor, e); MEM_CacheLimiter_ref(e->c_handle); MEM_CacheLimiter_enforce_limits(limitor); MEM_CacheLimiter_unref(e->c_handle); }
/** * Insert function that doesn't set the value (use for GSet) */ BLI_INLINE void ghash_insert_ex_keyonly( GHash *gh, void *key, const unsigned int bucket_index) { Entry *e = BLI_mempool_alloc(gh->entrypool); BLI_assert((gh->flag & GHASH_FLAG_ALLOW_DUPES) || (BLI_ghash_haskey(gh, key) == 0)); BLI_assert((gh->flag & GHASH_FLAG_IS_GSET) != 0); e->next = gh->buckets[bucket_index]; e->key = key; gh->buckets[bucket_index] = e; ghash_buckets_expand(gh, ++gh->nentries, false); }
/** * Internal insert function. * Takes hash and bucket_index arguments to avoid calling #ghash_keyhash and #ghash_bucket_index multiple times. */ BLI_INLINE void ghash_insert_ex( GHash *gh, void *key, void *val, const unsigned int bucket_index) { GHashEntry *e = BLI_mempool_alloc(gh->entrypool); BLI_assert((gh->flag & GHASH_FLAG_ALLOW_DUPES) || (BLI_ghash_haskey(gh, key) == 0)); BLI_assert(!(gh->flag & GHASH_FLAG_IS_GSET)); e->e.next = gh->buckets[bucket_index]; e->e.key = key; e->val = val; gh->buckets[bucket_index] = (Entry *)e; ghash_buckets_expand(gh, ++gh->nentries, false); }
/** * Ensure \a (v0, v1) is exists in \a eh. * * This handles the common situation where the caller needs ensure a key is added to \a eh, * constructing a new value in the case the key isn't found. * Otherwise use the existing value. * * Such situations typically incur multiple lookups, however this function * avoids them by ensuring the key is added, * returning a pointer to the value so it can be used or initialized by the caller. * * \returns true when the value didn't need to be added. * (when false, the caller _must_ initialize the value). */ bool BLI_edgehash_ensure_p(EdgeHash *eh, uint v0, uint v1, void ***r_val) { EDGE_ORD(v0, v1); const uint bucket_index = edgehash_bucket_index(eh, v0, v1); EdgeEntry *e = edgehash_lookup_entry_ex(eh, v0, v1, bucket_index); const bool haskey = (e != NULL); if (!haskey) { e = BLI_mempool_alloc(eh->epool); edgehash_insert_ex_keyonly_entry(eh, v0, v1, bucket_index, e); } *r_val = &e->val; return haskey; }
/* Allocate and initialize a BMLogFace */ static BMLogFace *bm_log_face_alloc(BMLog *log, BMFace *f) { BMLogEntry *entry = log->current_entry; BMLogFace *lf = BLI_mempool_alloc(entry->pool_faces); BMVert *v[3]; BLI_assert(f->len == 3); // BM_iter_as_array(NULL, BM_VERTS_OF_FACE, f, (void **)v, 3); BM_face_as_array_vert_tri(f, v); lf->v_ids[0] = bm_log_vert_id_get(log, v[0]); lf->v_ids[1] = bm_log_vert_id_get(log, v[1]); lf->v_ids[2] = bm_log_vert_id_get(log, v[2]); return lf; }
/** * \brief Add a new Walker State * * Allocate a new empty state and put it on the worklist. * A pointer to the new state is returned so that the caller * can fill in the state data. The new state will be inserted * at the front for depth-first walks, and at the end for * breadth-first walks. */ void *BMW_state_add(BMWalker *walker) { BMwGenericWalker *newstate; newstate = BLI_mempool_alloc(walker->worklist); newstate->depth = walker->depth; switch (walker->order) { case BMW_DEPTH_FIRST: BLI_addhead(&walker->states, newstate); break; case BMW_BREADTH_FIRST: BLI_addtail(&walker->states, newstate); break; default: BLI_assert(0); break; } return newstate; }
void BLI_edgehash_insert(EdgeHash *eh, unsigned int v0, unsigned int v1, void *val) { unsigned int hash; EdgeEntry *e = BLI_mempool_alloc(eh->epool); /* this helps to track down errors with bad edge data */ BLI_assert(v0 != v1); EDGE_ORD(v0, v1); /* ensure v0 is smaller */ hash = EDGE_HASH(v0, v1) % eh->nbuckets; e->v0 = v0; e->v1 = v1; e->val = val; e->next = eh->buckets[hash]; eh->buckets[hash] = e; if (++eh->nentries > eh->nbuckets * 3) { EdgeEntry **old = eh->buckets; int i, nold = eh->nbuckets; eh->nbuckets = _ehash_hashsizes[++eh->cursize]; eh->buckets = MEM_mallocN(eh->nbuckets * sizeof(*eh->buckets), "eh buckets"); memset(eh->buckets, 0, eh->nbuckets * sizeof(*eh->buckets)); for (i = 0; i < nold; i++) { for (e = old[i]; e; ) { EdgeEntry *n = e->next; hash = EDGE_HASH(e->v0, e->v1) % eh->nbuckets; e->next = eh->buckets[hash]; eh->buckets[hash] = e; e = n; } } MEM_freeN(old); } }
/** * Insert function that doesn't set the value (use for EdgeSet) */ BLI_INLINE void edgehash_insert_ex_keyonly(EdgeHash *eh, unsigned int v0, unsigned int v1, unsigned int hash) { EdgeEntry *e = BLI_mempool_alloc(eh->epool); BLI_assert((eh->flag & EDGEHASH_FLAG_ALLOW_DUPES) || (BLI_edgehash_haskey(eh, v0, v1) == 0)); /* this helps to track down errors with bad edge data */ BLI_assert(v0 < v1); BLI_assert(v0 != v1); e->next = eh->buckets[hash]; e->v0 = v0; e->v1 = v1; /* intentionally leave value unset */ eh->buckets[hash] = e; if (UNLIKELY(edgehash_test_expand_buckets(++eh->nentries, eh->nbuckets))) { edgehash_resize_buckets(eh, _ehash_hashsizes[++eh->cursize]); } }
/** * Internal insert function. * Takes a \a bucket_index argument to avoid calling #edgehash_bucket_index multiple times. */ BLI_INLINE void edgehash_insert_ex( EdgeHash *eh, uint v0, uint v1, void *val, const uint bucket_index) { EdgeEntry *e = BLI_mempool_alloc(eh->epool); BLI_assert((eh->flag & EDGEHASH_FLAG_ALLOW_DUPES) || (BLI_edgehash_haskey(eh, v0, v1) == 0)); IS_EDGEHASH_ASSERT(eh); /* this helps to track down errors with bad edge data */ BLI_assert(v0 < v1); BLI_assert(v0 != v1); e->next = eh->buckets[bucket_index]; e->v0 = v0; e->v1 = v1; e->val = val; eh->buckets[bucket_index] = e; if (UNLIKELY(edgehash_test_expand_buckets(++eh->nentries, eh->nbuckets))) { edgehash_resize_buckets(eh, _ehash_hashsizes[++eh->cursize]); } }
void *BLI_mempool_calloc(BLI_mempool *pool){ void *retval=NULL; retval = BLI_mempool_alloc(pool); memset(retval, 0, pool->esize); return retval; }
/** * \note This function sets the edge indices to invalid values. */ void BM_mesh_beautify_fill( BMesh *bm, BMEdge **edge_array, const int edge_array_len, const short flag, const short method, const short oflag_edge, const short oflag_face) { Heap *eheap; /* edge heap */ HeapNode **eheap_table; /* edge index aligned table pointing to the eheap */ GSet **edge_state_arr = MEM_callocN((size_t)edge_array_len * sizeof(GSet *), __func__); BLI_mempool *edge_state_pool = BLI_mempool_create(sizeof(EdRotState), 0, 512, BLI_MEMPOOL_NOP); int i; #ifdef DEBUG_TIME TIMEIT_START(beautify_fill); #endif eheap = BLI_heap_new_ex((uint)edge_array_len); eheap_table = MEM_mallocN(sizeof(HeapNode *) * (size_t)edge_array_len, __func__); /* build heap */ for (i = 0; i < edge_array_len; i++) { BMEdge *e = edge_array[i]; const float cost = bm_edge_calc_rotate_beauty(e, flag, method); if (cost < 0.0f) { eheap_table[i] = BLI_heap_insert(eheap, cost, e); } else { eheap_table[i] = NULL; } BM_elem_index_set(e, i); /* set_dirty */ } bm->elem_index_dirty |= BM_EDGE; while (BLI_heap_is_empty(eheap) == false) { BMEdge *e = BLI_heap_popmin(eheap); i = BM_elem_index_get(e); eheap_table[i] = NULL; BLI_assert(BM_edge_face_count_is_equal(e, 2)); e = BM_edge_rotate(bm, e, false, BM_EDGEROT_CHECK_EXISTS); BLI_assert(e == NULL || BM_edge_face_count_is_equal(e, 2)); if (LIKELY(e)) { GSet *e_state_set = edge_state_arr[i]; /* add the new state into the set so we don't move into this state again * note: we could add the previous state too but this isn't essential) * for avoiding eternal loops */ EdRotState *e_state = BLI_mempool_alloc(edge_state_pool); erot_state_current(e, e_state); if (UNLIKELY(e_state_set == NULL)) { edge_state_arr[i] = e_state_set = erot_gset_new(); /* store previous state */ } BLI_assert(BLI_gset_haskey(e_state_set, (void *)e_state) == false); BLI_gset_insert(e_state_set, e_state); // printf(" %d -> %d, %d\n", i, BM_elem_index_get(e->v1), BM_elem_index_get(e->v2)); /* maintain the index array */ edge_array[i] = e; BM_elem_index_set(e, i); /* recalculate faces connected on the heap */ bm_edge_update_beauty_cost(e, eheap, eheap_table, edge_state_arr, (const BMEdge **)edge_array, edge_array_len, flag, method); /* update flags */ if (oflag_edge) { BMO_edge_flag_enable(bm, e, oflag_edge); } if (oflag_face) { BMO_face_flag_enable(bm, e->l->f, oflag_face); BMO_face_flag_enable(bm, e->l->radial_next->f, oflag_face); } } } BLI_heap_free(eheap, NULL); MEM_freeN(eheap_table); for (i = 0; i < edge_array_len; i++) { if (edge_state_arr[i]) { BLI_gset_free(edge_state_arr[i], NULL); } } MEM_freeN(edge_state_arr); BLI_mempool_destroy(edge_state_pool); #ifdef DEBUG_TIME TIMEIT_END(beautify_fill); #endif }