void *mc_realloc(void *ptr, size_t size) { void *new_ptr; mem_cache_ptr mem_cache; mem_cache_header_ptr mem_cache_header; if (NULL == ptr || size <= 0) { return NULL; } mem_cache_header = (mem_cache_header_ptr)(ptr - sizeof(mem_cache_header_t)); mem_cache = mem_cache_header->mem_cache; if (mem_cache_header->size <= size) { return ptr; } if (mem_cache != redis_mem_cache) { return NULL; } if (MEMCACHE_MAGIC_NUMBER != mem_cache->magic_number) { return NULL; } if (mem_cache_header->size <= 0) { return NULL; } new_ptr = mem_cache_alloc(redis_mem_cache, size); if (NULL == new_ptr) { return NULL; } memcpy(new_ptr, ptr, mem_cache_header->size); mem_cache_free(ptr); return new_ptr; }
struct mem_cache_t *mem_cache_create(size_t objsize, uint32_t capacity) { struct mem_cache_t *cachep; void *parray_mem; cachep = mem_cache_alloc(cache_cache); if (cachep == NULL) return NULL; parray_mem = __dma_mem_memalign(L1_CACHE_BYTES, sizeof(void *) * capacity); if (parray_mem == NULL) { mem_cache_free(cache_cache, cachep); return NULL; } cachep->objsize = objsize; cachep->free_limit = capacity; cachep->ptr_stack = parray_mem; cachep->next_free = 0; cachep->obj_allocated = 0; mutex_init(&(cachep->mmlock)); return cachep; }
void item_free(item *it) { unsigned int ntotal = ITEM_ntotal(it); assert((it->it_flags & ITEM_LINKED) == 0); assert(it != heads[it->slabs_clsid]); assert(it != tails[it->slabs_clsid]); assert(it->refcount == 0); /* so slab size changer can tell later if item is already free or not */ it->slabs_clsid = 0; it->it_flags |= ITEM_SLABBED; mem_cache_free(mem_cache, it, ntotal); }
int kfree(const void * mem) { int i; if(!mem) return 0; for(i=0;i<SIZEOFARRAY(_pools);i++) if( _pools_mem_cache[i] ) if( mem_cache_free(_pools_mem_cache[i], mem) == 0 ) return 0; return -1; }
void mc_free(void *ptr) { mem_cache_free(ptr); }
int mem_cache_free_wrapped(void *addr) { return mem_cache_free(this, addr); }