/* * Initialize slab heap related info * * When prelloc is true, the slab allocator allocates the entire heap * upfront. Otherwise, memory for new slabsare allocated on demand. But once * a slab is allocated, it is never freed, though a slab could be * reused on eviction. */ static rstatus_i _slab_heapinfo_setup(void) { heapinfo.nslab = 0; heapinfo.max_nslab = slab_mem / slab_size; heapinfo.base = NULL; if (prealloc) { heapinfo.base = cc_alloc(heapinfo.max_nslab * slab_size); if (heapinfo.base == NULL) { log_error("pre-alloc %zu bytes for %"PRIu32" slabs failed: %s", heapinfo.max_nslab * slab_size, heapinfo.max_nslab, strerror(errno)); return CC_ENOMEM; } log_info("pre-allocated %zu bytes for %"PRIu32" slabs", slab_mem, heapinfo.max_nslab); } heapinfo.curr = heapinfo.base; heapinfo.slab_table = cc_alloc(sizeof(*heapinfo.slab_table) * heapinfo.max_nslab); if (heapinfo.slab_table == NULL) { log_error("create of slab table with %"PRIu32" entries failed: %s", heapinfo.max_nslab, strerror(errno)); return CC_ENOMEM; } TAILQ_INIT(&heapinfo.slab_lruq); log_vverb("created slab table with %"PRIu32" entries", heapinfo.max_nslab); return CC_OK; }
struct hash_table * hashtable_create(uint32_t hash_power) { struct hash_table *ht; uint64_t size; ASSERT(hash_power > 0); /* alloc struct */ ht = cc_alloc(sizeof(struct hash_table)); if (ht == NULL) { return NULL; } /* init members */ ht->table = NULL; ht->hash_power = hash_power; ht->nhash_item = 0; size = HASHSIZE(ht->hash_power); /* alloc table */ ht->table = _hashtable_alloc(size); if (ht->table == NULL) { cc_free(ht); return NULL; } return ht; }
struct response * admin_response_create(void) { struct response *rsp = cc_alloc(sizeof(struct response)); if (rsp == NULL) { return NULL; } admin_response_reset(rsp); return rsp; }
static char *new_bindalloc_segment(void) { if (bindsegcur >= bindsegcnt) { char *w = (char *)cc_alloc(SEGSIZE); if (bindsegcnt >= segmax) expand_segmax(segmax * SEGMAX_FACTOR); if (debugging(DEBUG_STORE)) cc_msg("Binder store alloc %d size %ld at %p (%s in $r)\n", (int)bindsegcnt, (long)SEGSIZE, w, phasename, currentfunction.symstr); bindsegbase[bindsegcnt++] = w; } return bindsegbase[bindsegcur++]; }
static struct slab * _slab_heap_create(void) { struct slab *slab; if (prealloc) { slab = (struct slab *)heapinfo.curr; heapinfo.curr += slab_size; } else { slab = cc_alloc(slab_size); } return slab; }
void admin_process_setup(void) { log_info("set up the %s module", DS_ADMIN_MODULE_NAME); if (admin_init) { log_warn("%s has already been setup, overwrite", DS_ADMIN_MODULE_NAME); } cap = nmetric * METRIC_PRINT_LEN; buf = cc_alloc(cap); /* TODO: check return status of cc_alloc */ admin_init = true; }
struct response * response_create(void) { struct response *rsp = cc_alloc(sizeof(struct response)); if (rsp == NULL) { return NULL; } response_reset(rsp); INCR(response_metrics, response_create); INCR(response_metrics, response_free); return rsp; }
/* * Allocate table given size */ static struct item_slh * _hashtable_alloc(uint64_t size) { struct item_slh *table; uint32_t i; table = cc_alloc(sizeof(*table) * size); if (table != NULL) { for (i = 0; i < size; ++i) { SLIST_INIT(&table[i]); } } return table; }
void admin_process_setup(admin_process_metrics_st *metrics) { log_info("set up the %s module", PINGSERVER_ADMIN_MODULE_NAME); if (admin_init) { log_warn("%s has already been setup, overwrite", PINGSERVER_ADMIN_MODULE_NAME); } admin_metrics = metrics; stats_len = METRIC_PRINT_LEN * nmetric; stats_buf = cc_alloc(stats_len + METRIC_END_LEN); /* TODO: check return status of cc_alloc */ admin_init = true; }
static char *new_synalloc_segment(void) { char *w; if (synsegcnt >= segmax) expand_segmax(segmax * SEGMAX_FACTOR); if (bindsegcur < bindsegcnt) { w = bindsegbase[--bindsegcnt]; if (debugging(DEBUG_2STORE) && synsegcnt>0) cc_msg("Syntax store %d from binder size %ld at %p\n", (int)synsegcnt, (long)SEGSIZE, w); } else { w = (char *)cc_alloc(SEGSIZE); if (debugging(DEBUG_STORE)) cc_msg("Syntax store alloc %d size %ld at %p (%s in $r)\n", (int)synsegcnt, (long)SEGSIZE, w, phasename, currentfunction.symstr); } return synsegbase[synsegcnt++] = w; }
struct request * request_create(void) { rstatus_i status; struct request *req = cc_alloc(sizeof(struct request)); if (req == NULL) { return NULL; } status = array_create(&req->keys, MAX_BATCH_SIZE, sizeof(struct bstring)); if (status != CC_OK) { return NULL; } request_reset(req); INCR(request_metrics, request_create); return req; }
VoidStar GlobAlloc(StoreUse t, int32 n) { char *p = globallp; n = n + 3 & ~(int32)3; /* make n a multiple of sizeof(int) */ if (n > SEGSIZE) { /* Big global store requests get a single oversize page. */ p = (char *)cc_alloc(n); if (debugging(DEBUG_STORE)) cc_msg("Global overlarge store alloc size %ld at %p (in $r)\n", (long)n, p, currentfunction.symstr); globallxtra += n; /* could update globallcnt? */ } else { if (p+n > globalltop) stuse_waste += globalltop-p, p = new_global_segment(); globallp = p + n; } stuse[(int)t] += n; return p; }
struct ring_array * ring_array_create(size_t elem_size, uint32_t cap) { struct ring_array *arr; /* underlying array has # items stored + 1, since full is when wpos is 1 element behind wpos */ arr = cc_alloc(RING_ARRAY_HDR_SIZE + elem_size * (cap + 1)); if (arr == NULL) { log_error("Could not allocate memory for ring array cap %u " "elem_size %u", cap, elem_size); return NULL; } arr->elem_size = elem_size; arr->cap = cap; arr->rpos = arr->wpos = 0; return arr; }
static char *new_global_segment(void) { char *w; /* I will recycle a segment that had been used for local space if there */ /* are any such available. */ if (bindsegcur < bindsegcnt) { w = bindsegbase[--bindsegcnt]; if (debugging(DEBUG_STORE)) cc_msg("Global store %d from binder size %ld at %p\n", (int)globallcnt, (long)SEGSIZE, w); } else { w = (char *)cc_alloc(SEGSIZE); if (debugging(DEBUG_STORE)) cc_msg("Global store alloc %d size %ld at %p (in $r)\n", (int)globallcnt, (long)SEGSIZE, w, currentfunction.symstr); } globallcnt++; globallp = w, globalltop = w + SEGSIZE; return w; }