/* * Return the id of the slab which can store an item of a given size. * * Return SLABCLASS_INVALID_ID, for large items which cannot be stored in * any of the configured slabs. */ uint8_t slab_id(size_t size) { uint8_t id, imin, imax; ASSERT(size != 0); /* binary search */ imin = SLABCLASS_MIN_ID; imax = profile_last_id; while (imax >= imin) { id = (imin + imax) / 2; if (size > slabclass[id].size) { imin = id + 1; } else if (id > SLABCLASS_MIN_ID && size <= slabclass[id - 1].size) { imax = id - 1; } else { break; } } if (imin > imax) { /* size too big for any slab */ log_debug("slab_id: returning invalid"); return SLABCLASS_INVALID_ID; } log_vverb("slab_id: returning %u", id); return id; }
void * _cc_realloc_move(void *ptr, size_t size, const char *name, int line) { void *p = NULL, *pr; if (size == 0) { free(ptr); log_debug("realloc(0) @ %s:%d", name, line); return NULL; } /* * Calling realloc then malloc allows us to force this function call to * change the address of the allocated memory block. realloc ensures we can * copy size bytes, and calling malloc before the realloc'd data is free'd * gives us a new address for the memory object. */ if (((pr = realloc(ptr, size)) == NULL || (p = malloc(size)) == NULL)) { log_error("realloc(%zu) failed @ %s:%d", size, name, line); } else { log_vverb("realloc(%zu) at %p @ %s:%d", size, p, name, line); memcpy(p, pr, size); } free(pr); return p; }
/* * Initialize slab heap related info * * When prelloc is true, the slab allocator allocates the entire heap * upfront. Otherwise, memory for new slabsare allocated on demand. But once * a slab is allocated, it is never freed, though a slab could be * reused on eviction. */ static rstatus_i _slab_heapinfo_setup(void) { heapinfo.nslab = 0; heapinfo.max_nslab = slab_mem / slab_size; heapinfo.base = NULL; if (prealloc) { heapinfo.base = cc_alloc(heapinfo.max_nslab * slab_size); if (heapinfo.base == NULL) { log_error("pre-alloc %zu bytes for %"PRIu32" slabs failed: %s", heapinfo.max_nslab * slab_size, heapinfo.max_nslab, strerror(errno)); return CC_ENOMEM; } log_info("pre-allocated %zu bytes for %"PRIu32" slabs", slab_mem, heapinfo.max_nslab); } heapinfo.curr = heapinfo.base; heapinfo.slab_table = cc_alloc(sizeof(*heapinfo.slab_table) * heapinfo.max_nslab); if (heapinfo.slab_table == NULL) { log_error("create of slab table with %"PRIu32" entries failed: %s", heapinfo.max_nslab, strerror(errno)); return CC_ENOMEM; } TAILQ_INIT(&heapinfo.slab_lruq); log_vverb("created slab table with %"PRIu32" entries", heapinfo.max_nslab); return CC_OK; }
static inline int _write_uint64(struct buf **buf, uint64_t val) { size_t n; struct buf *b; /* NOTE(yao): here we are being conservative on how many bytes wee need * to print a (64-bit) integer. The actual number might be smaller. * But since it is 21 bytes at most (including \0' while buffers usually * are KBs in size, it is unlikely to cause many extra expansions. */ if (_check_buf_size(buf, CC_UINT64_MAXLEN) != CC_OK) { return COMPOSE_ENOMEM; } b = *buf; /* always succeeds if we have enough space, which we checked above */ n = cc_print_uint64_unsafe((char *)b->wpos, val); b->wpos += n; log_vverb("wrote rsp uint %"PRIu64" to buf %p", val, b); return n; }
void request_return(struct request **request) { struct request *req = *request; if (req == NULL) { return; } INCR(request_metrics, request_free); INCR(request_metrics, request_return); log_vverb("return req %p", req); req->free = true; FREEPOOL_RETURN(req, &reqp, next); *request = NULL; }
void * _cc_alloc(size_t size, const char *name, int line) { void *p; if (size == 0) { log_debug("malloc(0) @ %s:%d", name, line); return NULL; } p = malloc(size); if (p == NULL) { log_error("malloc(%zu) failed @ %s:%d", size, name, line); } else { log_vverb("malloc(%zu) at %p @ %s:%d", size, p, name, line); } return p; }
struct request * request_borrow(void) { struct request *req; FREEPOOL_BORROW(req, &reqp, next, request_create); if (req == NULL) { log_debug("borrow req failed: OOM %d"); return NULL; } request_reset(req); DECR(request_metrics, request_free); INCR(request_metrics, request_borrow); log_vverb("borrowing req %p", req); return req; }
struct response * response_borrow(void) { struct response *rsp; FREEPOOL_BORROW(rsp, &rspp, next, response_create); if (rsp == NULL) { log_debug("borrow rsp failed: OOM %d"); return NULL; } response_reset(rsp); DECR(response_metrics, response_free); INCR(response_metrics, response_borrow); log_vverb("borrowing rsp %p", rsp); return rsp; }
/* * Return a single response object */ void response_return(struct response **response) { ASSERT(response != NULL); struct response *rsp = *response; if (rsp == NULL) { return; } INCR(response_metrics, response_free); INCR(response_metrics, response_return); log_vverb("return rsp %p", rsp); rsp->free = true; FREEPOOL_RETURN(rsp, &rspp, next); *response = NULL; }
static void _slab_lruq_remove(struct slab *slab) { log_vverb("remove slab %p with id %d from lruq", slab, slab->id); TAILQ_REMOVE(&heapinfo.slab_lruq, slab, s_tqe); }
static void _slab_lruq_append(struct slab *slab) { log_vverb("append slab %p with id %d from lruq", slab, slab->id); TAILQ_INSERT_TAIL(&heapinfo.slab_lruq, slab, s_tqe); }
void _cc_free(void *ptr, const char *name, int line) { log_vverb("free(%p) @ %s:%d", ptr, name, line); free(ptr); }