/* * Setup a thread's stats-related fields. * * Return success status as a boolean. */ static rstatus_t thread_setup_stats(struct thread_worker *t) { err_t err; t->stats_mutex = mc_alloc(sizeof(*t->stats_mutex)); if (t->stats_mutex == NULL) { return MC_ENOMEM; } err = pthread_mutex_init(t->stats_mutex, NULL); if (err != 0) { log_error("pthread mutex init failed: %s", strerror(err)); return MC_ERROR; } t->stats_thread = stats_thread_init(); if (t->stats_thread == NULL) { log_error("stats thread init failed: %s", strerror(errno)); pthread_mutex_destroy(t->stats_mutex); return MC_ERROR; } t->stats_slabs = stats_slabs_init(); if (t->stats_slabs == NULL) { log_error("stats slabs init failed: %s", strerror(errno)); pthread_mutex_destroy(t->stats_mutex); stats_thread_deinit(t->stats_thread); return MC_ERROR; } return MC_OK; }
/* * Create an object cache. * * The object cache will let you allocate objects of the same size. It is * fully MT safe, so you may allocate objects from multiple threads without * having to do any syncrhonization in the application code. * * @param name the name of the object cache. This name may be used for debug * purposes and may help you track down what kind of object you * have problems with (buffer overruns, leakage etc) * @param bufsize the size of each object in the cache * @param align the alignment requirements of the objects in the cache. * @return a handle to an object cache if successful, NULL otherwise. */ cache_t * cache_create(const char *name, size_t bufsize, size_t align) { cache_t *ret; char *name_new; void **ptr; ret = mc_calloc(1, sizeof(cache_t)); name_new = mc_alloc(strlen(name) + 1); ptr = mc_calloc(initial_pool_size, bufsize); if (ret == NULL || name_new == NULL || ptr == NULL || pthread_mutex_init(&ret->mutex, NULL) == -1) { mc_free(ret); mc_free(name_new); mc_free(ptr); return NULL; } strncpy(name_new, name, strlen(name) + 1); ret->name = name_new; ret->ptr = ptr; ret->freetotal = initial_pool_size; ret->bufsize = bufsize; return ret; }
/* * Allocate an object from the cache. * * @param handle the handle to the object cache to allocate from * @return a pointer to an initialized object from the cache, or NULL if * the allocation cannot be satisfied. */ void * cache_alloc(cache_t *cache) { void *object; pthread_mutex_lock(&cache->mutex); if (cache->freecurr > 0) { object = cache->ptr[--cache->freecurr]; } else { object = mc_alloc(cache->bufsize); } pthread_mutex_unlock(&cache->mutex); return object; }
/* * Constructs a set of UDP headers and attaches them to the outgoing * messages. */ rstatus_t conn_build_udp_headers(struct conn *c) { int i; unsigned char *hdr; ASSERT(c != NULL); if (c->msg_used > c->udp_hsize) { void *new_udp_hbuf; if (c->udp_hbuf != NULL) { new_udp_hbuf = mc_realloc(c->udp_hbuf, c->msg_used * 2 * UDP_HEADER_SIZE); } else { new_udp_hbuf = mc_alloc(c->msg_used * 2 * UDP_HEADER_SIZE); } if (new_udp_hbuf == NULL) { return MC_ENOMEM; } c->udp_hbuf = (unsigned char *)new_udp_hbuf; c->udp_hsize = c->msg_used * 2; } hdr = c->udp_hbuf; for (i = 0; i < c->msg_used; i++) { c->msg[i].msg_iov[0].iov_base = (void*)hdr; c->msg[i].msg_iov[0].iov_len = UDP_HEADER_SIZE; *hdr++ = c->udp_rid / 256; *hdr++ = c->udp_rid % 256; *hdr++ = i / 256; *hdr++ = i % 256; *hdr++ = c->msg_used / 256; *hdr++ = c->msg_used % 256; *hdr++ = 0; *hdr++ = 0; ASSERT((void *) hdr == (caddr_t)c->msg[i].msg_iov[0].iov_base + UDP_HEADER_SIZE); } return MC_OK; }
rstatus_t kc_map_init(size_t size) { int i; /* allocate 2x the number of entries expected */ table_size = 2 * size; table = mc_alloc(sizeof(*table) * table_size); if (table == NULL) { log_error("Could not allocate counter table for hotkey - OOM"); return MC_ENOMEM; } for (i = 0; i < table_size; ++i) { kc_map_entry_reset(table + i); } table_nkey = 0; return MC_OK; }
struct conn * conn_get(int sd, conn_state_t state, int ev_flags, int rsize, int udp) { struct conn *c; ASSERT(state >= CONN_LISTEN && state < CONN_SENTINEL); ASSERT(rsize > 0); c = _conn_get(); if (c == NULL) { c = mc_zalloc(sizeof(*c)); if (c == NULL) { return NULL; } c->rsize = rsize; c->rbuf = mc_alloc(c->rsize); c->wsize = TCP_BUFFER_SIZE; c->wbuf = mc_alloc(c->wsize); c->isize = ILIST_SIZE; c->ilist = mc_alloc(sizeof(*c->ilist) * c->isize); c->ssize = SLIST_SIZE; c->slist = mc_alloc(sizeof(*c->slist) * c->ssize); c->iov_size = IOV_SIZE; c->iov = mc_alloc(sizeof(*c->iov) * c->iov_size); c->msg_size = MSG_SIZE; c->msg = mc_alloc(sizeof(*c->msg) * c->msg_size); if (c->rbuf == NULL || c->wbuf == NULL || c->ilist == NULL || c->iov == NULL || c->msg == NULL || c->slist == NULL) { conn_free(c); return NULL; } stats_thread_incr(conn_struct); } STAILQ_NEXT(c, c_tqe) = NULL; c->thread = NULL; c->sd = sd; c->state = state; /* c->event is initialized later */ c->ev_flags = ev_flags; c->which = 0; ASSERT(c->rbuf != NULL && c->rsize > 0); c->rcurr = c->rbuf; c->rbytes = 0; ASSERT(c->wbuf != NULL && c->wsize > 0); c->wcurr = c->wbuf; c->wbytes = 0; c->write_and_go = state; c->write_and_free = NULL; c->ritem = NULL; c->rlbytes = 0; c->item = NULL; c->sbytes = 0; ASSERT(c->iov != NULL && c->iov_size > 0); c->iov_used = 0; ASSERT(c->msg != NULL && c->msg_size > 0); c->msg_used = 0; c->msg_curr = 0; c->msg_bytes = 0; ASSERT(c->ilist != NULL && c->isize > 0); c->icurr = c->ilist; c->ileft = 0; ASSERT(c->slist != NULL && c->ssize > 0); c->scurr = c->slist; c->sleft = 0; c->stats.buffer = NULL; c->stats.size = 0; c->stats.offset = 0; c->req_type = REQ_UNKNOWN; c->req = NULL; c->req_len = 0; c->udp = udp; c->udp_rid = 0; c->udp_hbuf = NULL; c->udp_hsize = 0; c->noreply = 0; stats_thread_incr(conn_total); stats_thread_incr(conn_curr); log_debug(LOG_VVERB, "get conn %p c %d", c, c->sd); return c; }