/* FIXME: Is it necessary to keep this copy/pasted code? */ void do_item_unlink_nolock(item *it, const uint32_t hv) { MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nkey, it->nbytes); if ((it->it_flags & ITEM_LINKED) != 0) { it->it_flags &= ~ITEM_LINKED; STATS_LOCK(); stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); assoc_delete(ITEM_key(it), it->nkey, hv); item_unlink_q(it); do_item_remove(it); } }
void do_item_unlink(item *it) { MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nbytes); if ((it->it_flags & ITEM_LINKED) != 0) { it->it_flags &= ~ITEM_LINKED; STATS_LOCK(); stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); assoc_delete(ITEM_key(it), it->nkey); item_unlink_q(it); if (it->refcount == 0) item_free(it); } }
void stats_prefix_record_get(const char *key, const size_t nkey, const bool is_hit) { PREFIX_STATS *pfs; STATS_LOCK(); pfs = stats_prefix_find(key, nkey); if (NULL != pfs) { pfs->num_gets++; if (is_hit) { pfs->num_hits++; } } STATS_UNLOCK(); }
static void slab_rebalance_finish(void) { slabclass_t *s_cls; slabclass_t *d_cls; pthread_mutex_lock(&cache_lock); pthread_mutex_lock(&slabs_lock); s_cls = &slabclass[slab_rebal.s_clsid]; d_cls = &slabclass[slab_rebal.d_clsid]; /* At this point the stolen slab is completely clear */ //相当于把指针赋NULL值 s_cls->slab_list[s_cls->killing - 1] = s_cls->slab_list[s_cls->slabs - 1]; s_cls->slabs--;//源slab class的内存页数减一 s_cls->killing = 0; //内存页所有字节清零,这个也很重要的 memset(slab_rebal.slab_start, 0, (size_t)settings.item_size_max); //将slab_rebal.slab_start指向的一个页内存馈赠给目标slab class //slab_rebal.slab_start指向的页是从源slab class中得到的。 d_cls->slab_list[d_cls->slabs++] = slab_rebal.slab_start; //按照目标slab class的item尺寸进行划分这个页,并且将这个页的 //内存并入到目标slab class的空闲item队列中 split_slab_page_into_freelist(slab_rebal.slab_start, slab_rebal.d_clsid); //清零 slab_rebal.done = 0; slab_rebal.s_clsid = 0; slab_rebal.d_clsid = 0; slab_rebal.slab_start = NULL; slab_rebal.slab_end = NULL; slab_rebal.slab_pos = NULL; slab_rebalance_signal = 0;//rebalance线程完成工作后,再次进入休眠状态 pthread_mutex_unlock(&slabs_lock); pthread_mutex_unlock(&cache_lock); STATS_LOCK(); stats.slab_reassign_running = false; stats.slabs_moved++; STATS_UNLOCK(); if (settings.verbose > 1) { fprintf(stderr, "finished a slab move\n"); } }
static int slab_rebalance_start(void) { slabclass_t *s_cls; int no_go = 0; pthread_mutex_lock(&slabs_lock); if (slab_rebal.s_clsid < POWER_SMALLEST || slab_rebal.s_clsid > power_largest || slab_rebal.d_clsid < SLAB_GLOBAL_PAGE_POOL || slab_rebal.d_clsid > power_largest || slab_rebal.s_clsid == slab_rebal.d_clsid) no_go = -2; s_cls = &slabclass[slab_rebal.s_clsid]; if (!grow_slab_list(slab_rebal.d_clsid)) { no_go = -1; } if (s_cls->slabs < 2) no_go = -3; if (no_go != 0) { pthread_mutex_unlock(&slabs_lock); return no_go; /* Should use a wrapper function... */ } /* Always kill the first available slab page as it is most likely to * contain the oldest items */ slab_rebal.slab_start = s_cls->slab_list[0]; slab_rebal.slab_end = (char *)slab_rebal.slab_start + (s_cls->size * s_cls->perslab); slab_rebal.slab_pos = slab_rebal.slab_start; slab_rebal.done = 0; /* Also tells do_item_get to search for items in this slab */ slab_rebalance_signal = 2; if (settings.verbose > 1) { fprintf(stderr, "Started a slab rebalance\n"); } pthread_mutex_unlock(&slabs_lock); STATS_LOCK(); stats_state.slab_reassign_running = true; STATS_UNLOCK(); return 0; }
static void *lru_maintainer_thread(void *arg) { int i; useconds_t to_sleep = MIN_LRU_MAINTAINER_SLEEP; rel_time_t last_crawler_check = 0; struct crawler_expired_data cdata; memset(&cdata, 0, sizeof(struct crawler_expired_data)); pthread_mutex_init(&cdata.lock, NULL); cdata.crawl_complete = true; // kick off the crawler. pthread_mutex_lock(&lru_maintainer_lock); if (settings.verbose > 2) fprintf(stderr, "Starting LRU maintainer background thread\n"); while (do_run_lru_maintainer_thread) { int did_moves = 0; pthread_mutex_unlock(&lru_maintainer_lock); usleep(to_sleep); pthread_mutex_lock(&lru_maintainer_lock); STATS_LOCK(); stats.lru_maintainer_juggles++; STATS_UNLOCK(); /* We were asked to immediately wake up and poke a particular slab * class due to a low watermark being hit */ if (lru_maintainer_check_clsid != 0) { did_moves = lru_maintainer_juggle(lru_maintainer_check_clsid); lru_maintainer_check_clsid = 0; } else { for (i = POWER_SMALLEST; i < MAX_NUMBER_OF_SLAB_CLASSES; i++) { did_moves += lru_maintainer_juggle(i); } } if (did_moves == 0) { if (to_sleep < MAX_LRU_MAINTAINER_SLEEP) to_sleep += 1000; } else { to_sleep /= 2; if (to_sleep < MIN_LRU_MAINTAINER_SLEEP) to_sleep = MIN_LRU_MAINTAINER_SLEEP; } /* Once per second at most */ if (settings.lru_crawler && last_crawler_check != current_time) { lru_maintainer_crawler_check(&cdata); last_crawler_check = current_time; } } pthread_mutex_unlock(&lru_maintainer_lock); if (settings.verbose > 2) fprintf(stderr, "LRU maintainer thread stopping\n"); return NULL; }
static void *assoc_maintenance_thread(void *arg) { while (do_run_maintenance_thread) { int ii = 0; /* Lock the cache, and bulk move multiple buckets to the new * hash table. */ mutex_lock(&cache_lock); for (ii = 0; ii < hash_bulk_move && expanding; ++ii) { item *it, *next; int bucket; for (it = old_hashtable[expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = hash(ITEM_key(it), it->nkey, 0) & hashmask(hashpower); it->h_next = primary_hashtable[bucket]; primary_hashtable[bucket] = it; } old_hashtable[expand_bucket] = NULL; expand_bucket++; if (expand_bucket == hashsize(hashpower - 1)) { expanding = false; free(old_hashtable); STATS_LOCK(); stats.hash_bytes -= hashsize(hashpower - 1) * sizeof(void *); stats.hash_is_expanding = 0; STATS_UNLOCK(); if (settings.verbose > 1) fprintf(stderr, "Hash table expansion done\n"); } } if (!expanding) { // added by Bin: fprintf(stderr, "\nHash table expansion done\n"); //assoc_pre_bench(); //assoc_post_bench(); /* We are done expanding.. just wait for next invocation */ pthread_cond_wait(&maintenance_cond, &cache_lock); } pthread_mutex_unlock(&cache_lock); } return NULL; }
static int slab_rebalance_start(void) { slabclass_t *s_cls; int no_go = 0; pthread_mutex_lock(&slabs_lock); if (slab_rebal.s_clsid < POWER_SMALLEST || slab_rebal.s_clsid > power_largest || slab_rebal.d_clsid < POWER_SMALLEST || slab_rebal.d_clsid > power_largest || slab_rebal.s_clsid == slab_rebal.d_clsid) no_go = -2; s_cls = &slabclass[slab_rebal.s_clsid]; if (!grow_slab_list(slab_rebal.d_clsid)) { no_go = -1; } if (s_cls->slabs < 2) no_go = -3; if (no_go != 0) { pthread_mutex_unlock(&slabs_lock); return no_go; /* Should use a wrapper function... */ } s_cls->killing = 1; slab_rebal.slab_start = s_cls->slab_list[s_cls->killing - 1]; slab_rebal.slab_end = (char *)slab_rebal.slab_start + (s_cls->size * s_cls->perslab); slab_rebal.slab_pos = slab_rebal.slab_start; slab_rebal.done = 0; /* Also tells do_item_get to search for items in this slab */ slab_rebalance_signal = 2; if (settings.verbose > 1) { fprintf(stderr, "Started a slab rebalance\n"); } pthread_mutex_unlock(&slabs_lock); STATS_LOCK(); stats.slab_reassign_running = true; STATS_UNLOCK(); return 0; }
void assoc_init(const int hashtable_init) { if (hashtable_init) { hashpower = hashtable_init; } primary_hashtable = calloc(hashsize(hashpower), sizeof(void *)); if (! primary_hashtable) { fprintf(stderr, "Failed to init hashtable.\n"); exit(EXIT_FAILURE); } STATS_LOCK(); stats.hash_power_level = hashpower; stats.hash_bytes = hashsize(hashpower) * sizeof(void *); STATS_UNLOCK(); }
/* FIXME: Is it necessary to keep this copy/pasted code? */ void do_item_unlink_nolock(item *it, const uint32_t hv) { syslog(LOG_INFO, "[%s:%s:%d]", __FILE__, __func__, __LINE__); MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nkey, it->nbytes); if ((it->it_flags & ITEM_LINKED) != 0) { it->it_flags &= ~ITEM_LINKED; STATS_LOCK(); stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); assoc_delete(ITEM_key(it), it->nkey, hv); item_unlink_q(it); do_item_remove(it); } }
//将item从hashtable和LRU链中移除,而且还释放掉 item 所占的内存 (其实只是把 item 放到空闲链表中),是do_item_link的逆操作 void do_item_unlink(item *it, const uint32_t hv) { MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nkey, it->nbytes); mutex_lock(&cache_lock);//执行同步 if ((it->it_flags & ITEM_LINKED) != 0) {//判断状态值,保证item还在LRU队列中 it->it_flags &= ~ITEM_LINKED;//修改状态值 STATS_LOCK();//更新统计信息 stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); assoc_delete(ITEM_key(it), it->nkey, hv);//从Hash表中删除 item_unlink_q(it);//将item从slabclass对应的LRU队列摘除 do_item_remove(it);//释放 item 所占的内存 } mutex_unlock(&cache_lock); }
void do_item_unlink(item *it, const uint32_t hv) { syslog(LOG_INFO, "[%s:%s:%d]", __FILE__, __func__, __LINE__); MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nkey, it->nbytes); mutex_lock(&cache_lock); if ((it->it_flags & ITEM_LINKED) != 0) { it->it_flags &= ~ITEM_LINKED;//设置为非linked STATS_LOCK(); stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); assoc_delete(ITEM_key(it), it->nkey, hv); //从hash表中删除 item_unlink_q(it); //从LRU链中删除 do_item_remove(it); } mutex_unlock(&cache_lock); }
int stats_prefix_delete(const char *prefix, const size_t nprefix) { PREFIX_STATS *curr, *next, *prev; int hidx; int ret = -1; STATS_LOCK(); if (nprefix == 0) { hidx = mc_hash(prefix, nprefix, 0) % PREFIX_HASH_SIZE; prev = NULL; for (curr = prefix_stats[hidx]; curr != NULL; prev = curr, curr = curr->next) { if (curr->prefix_len == 0) break; } if (curr != NULL) { /* found */ if (prev == NULL) prefix_stats[hidx] = curr->next; else prev->next = curr->next; num_prefixes--; total_prefix_size -= strlen(null_prefix_str); free(curr->prefix); free(curr); ret = 0; } } else { /* nprefix > 0 */ // Full scan for sub-prefixies (we would fix it in future) for (hidx = 0; hidx < PREFIX_HASH_SIZE; hidx++) { prev = NULL; for (curr = prefix_stats[hidx]; curr != NULL; curr = next) { next = curr->next; if ((curr->prefix_len >= nprefix && strncmp(curr->prefix, prefix, nprefix) == 0) && (curr->prefix_len == nprefix || *(curr->prefix+nprefix)==settings.prefix_delimiter)) { if (prev == NULL) prefix_stats[hidx] = curr->next; else prev->next = curr->next; num_prefixes--; total_prefix_size -= curr->prefix_len; free(curr->prefix); free(curr); ret = 0; } else { prev = curr; } } } } STATS_UNLOCK(); return ret; }
//输出所有信息 char *stats_prefix_dump(int *length) { const char *format = "PREFIX %s get %llu hit %llu set %llu del %llu\r\n"; PREFIX_STATS *pfs; char *buf; int i, pos; size_t size = 0, written = 0, total_written = 0; /* * Figure out how big the buffer needs to be. This is the sum of the * lengths of the prefixes themselves, plus the size of one copy of * the per-prefix output with 20-digit values for all the counts, * plus space for the "END" at the end. */ STATS_LOCK(); //计算需要全部内存空间大小 size = strlen(format) + total_prefix_size + num_prefixes * (strlen(format) - 2 /* %s */ + 4 * (20 - 4)) /* %llu replaced by 20-digit num */ + sizeof("END\r\n"); buf = malloc(size); if (NULL == buf) { perror("Can't allocate stats response: malloc"); STATS_UNLOCK(); return NULL; } pos = 0; for (i = 0; i < PREFIX_HASH_SIZE; i++) { for (pfs = prefix_stats[i]; NULL != pfs; pfs = pfs->next) { //格式化后拷贝到指定指针处 written = snprintf(buf + pos, size-pos, format, pfs->prefix, pfs->num_gets, pfs->num_hits, pfs->num_sets, pfs->num_deletes); pos += written; total_written += written; //判断是否拷贝正确 assert(total_written < size); } } STATS_UNLOCK(); memcpy(buf + pos, "END\r\n", 6); *length = pos + 5; return buf; }
//初始化哈希表 void assoc_init(const int hashtable_init) { //如何设置了哈希表大小,用设置大小,否则使用默认参数。 if (hashtable_init) { hashpower = hashtable_init; } //申请哈希表空间 primary_hashtable = calloc(hashsize(hashpower), sizeof(void *)); if (! primary_hashtable) { fprintf(stderr, "Failed to init hashtable.\n"); exit(EXIT_FAILURE); } //记录哈希表大小和占用字节 STATS_LOCK(); stats.hash_power_level = hashpower; stats.hash_bytes = hashsize(hashpower) * sizeof(void *); STATS_UNLOCK(); }
//默认参数值为0。本函数由main函数调用,参数的默认值为0 void assoc_init(const int hashtable_init) { if (hashtable_init) { hashpower = hashtable_init; } //因为哈希表会慢慢增大,所以要使用动态内存分配。哈希表存储的数据是一个 //指针,这样更省空间。hashsize(hashpower)就是哈希表的长度了 primary_hashtable = calloc(hashsize(hashpower), sizeof(void *)); if (! primary_hashtable) { fprintf(stderr, "Failed to init hashtable.\n"); exit(EXIT_FAILURE);//哈希表是memcached工作的基础,如果失败只能退出运行 } STATS_LOCK(); stats.hash_power_level = hashpower; stats.hash_bytes = hashsize(hashpower) * sizeof(void *); STATS_UNLOCK(); }
int do_item_link(item *it) { assert((it->it_flags & (ITEM_LINKED|ITEM_SLABBED)) == 0); assert(it->nbytes < 1048576); it->it_flags |= ITEM_LINKED; it->time = current_time; assoc_insert(it); STATS_LOCK(); stats.curr_bytes += ITEM_ntotal(it); stats.curr_items += 1; stats.total_items += 1; STATS_UNLOCK(); item_link_q(it); return 1; }
//从哈希表和LRU中删除 void do_item_unlink(item *it, const uint32_t hv) { MEMCACHED_ITEM_UNLINK(ITEM_key(it), it->nkey, it->nbytes); mutex_lock(&cache_lock); if ((it->it_flags & ITEM_LINKED) != 0) { it->it_flags &= ~ITEM_LINKED; STATS_LOCK(); stats.curr_bytes -= ITEM_ntotal(it); stats.curr_items -= 1; STATS_UNLOCK(); //从哈希表中删除 assoc_delete(ITEM_key(it), it->nkey, hv); //从链表中删除 item_unlink_q(it); //向slab归还这个item do_item_remove(it); } mutex_unlock(&cache_lock); }
void assoc_init(const int hashtable_init) { if (hashtable_init) { hashpower = hashtable_init; } if (settings.shared_malloc_assoc) { primary_hashtable = shared_malloc((void *)0x00007fa2fdf10000, hashsize(hashpower)*sizeof(void *), settings.shared_malloc_assoc_key, NO_LOCK);//TODO: check that no need to add zeroes (like in calloc) } else { primary_hashtable = calloc(hashsize(hashpower), sizeof(void *)); } if (! primary_hashtable) { fprintf(stderr, "Failed to init hashtable.\n"); exit(EXIT_FAILURE); } STATS_LOCK(); stats.hash_power_level = hashpower; stats.hash_bytes = hashsize(hashpower) * sizeof(void *); STATS_UNLOCK(); }
//本函数采用了一些优化手段,并非每调用一次本函数就申请一块内存。这会导致 //内存碎片。这里采取的优化方法是,一次性分配64个CQ_ITEM大小的内存(即预分配) //下次调用本函数的时候,直接从之前分配64个中要一个即可。 //由于是为了防止内存碎片,所以不是以链表的形式存放着64个CQ_ITEM。而是数组的形式 //于是, cqi_free函数就有点特别,它并不真正释放,而是像内存池那样归还 static CQ_ITEM *cqi_new(void) {//CQ_ITEM是主线程accept后返回的已建立连接的fd的封装。 CQ_ITEM *item = NULL; //所有线程都会访问cqi_freelist,所以需要加锁 pthread_mutex_lock(&cqi_freelist_lock); if (cqi_freelist) { item = cqi_freelist; cqi_freelist = item->next; } pthread_mutex_unlock(&cqi_freelist_lock); //没有多余的CQ_ITEM了 if (NULL == item) { int i; /* Allocate a bunch of items at once to reduce fragmentation */ item = malloc(sizeof(CQ_ITEM) * ITEMS_PER_ALLOC); if (NULL == item) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); return NULL; } /* * Link together all the new items except the first one * (which we'll return to the caller) for placement on * the freelist. */ //item[0]直接返回为调用者,不用next指针连在一起。调用者负责将 //item[0].next赋值为NULL //将这块内存分成一个个的item并且用next指针像链表一样连起来 for (i = 2; i < ITEMS_PER_ALLOC; i++) item[i - 1].next = &item[i]; pthread_mutex_lock(&cqi_freelist_lock); //因为主线程负责申请CQ_ITEM,子线程负责释放CQ_ITEM。所以cqi_freelist此刻 //可能并不等于NULL。由于使用头插法,所以无论cqi_freelist是否为NULL,都能把链表连起来的 item[ITEMS_PER_ALLOC - 1].next = cqi_freelist; cqi_freelist = &item[1]; pthread_mutex_unlock(&cqi_freelist_lock); } return item; }
static void slab_rebalance_finish(void) { slabclass_t *s_cls; slabclass_t *d_cls; pthread_mutex_lock(&cache_lock); pthread_mutex_lock(&slabs_lock); s_cls = &slabclass[slab_rebal.s_clsid]; d_cls = &slabclass[slab_rebal.d_clsid]; /* At this point the stolen slab is completely clear */ s_cls->slab_list[s_cls->killing - 1] = s_cls->slab_list[s_cls->slabs - 1]; s_cls->slabs--; s_cls->killing = 0; memset(slab_rebal.slab_start, 0, (size_t)settings.item_size_max); d_cls->slab_list[d_cls->slabs++] = slab_rebal.slab_start; split_slab_page_into_freelist(slab_rebal.slab_start, slab_rebal.d_clsid); slab_rebal.done = 0; slab_rebal.s_clsid = 0; slab_rebal.d_clsid = 0; slab_rebal.slab_start = NULL; slab_rebal.slab_end = NULL; slab_rebal.slab_pos = NULL; slab_rebalance_signal = 0; pthread_mutex_unlock(&slabs_lock); pthread_mutex_unlock(&cache_lock); STATS_LOCK(); stats.slab_reassign_running = false; stats.slabs_moved++; STATS_UNLOCK(); if (settings.verbose > 1) { fprintf(stderr, "finished a slab move\n"); } }
int do_item_link(item *it) { assert((it->it_flags & (ITEM_LINKED|ITEM_SLABBED)) == 0); assert(it->nbytes < (1024 * 1024)); /* 1MB max size */ it->it_flags |= ITEM_LINKED; it->time = current_time; assoc_insert(it); STATS_LOCK(); stats.curr_bytes += ITEM_ntotal(it); stats.curr_items += 1; stats.total_items += 1; STATS_UNLOCK(); /* Allocate a new CAS ID on link. */ it->cas_id = get_cas_id(); item_link_q(it); return 1; }
int do_item_link(item *it) { MEMCACHED_ITEM_LINK(ITEM_key(it), it->nkey, it->nbytes); assert((it->it_flags & (ITEM_LINKED|ITEM_SLABBED)) == 0); it->it_flags |= ITEM_LINKED; it->time = current_time; assoc_insert(it); STATS_LOCK(); stats.curr_bytes += ITEM_ntotal(it); stats.curr_items += 1; stats.total_items += 1; STATS_UNLOCK(); /* Allocate a new CAS ID on link. */ ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0); item_link_q(it); return 1; }
/* grows the hashtable to the next power of 2. */ static void assoc_expand(void) { old_hashtable = primary_hashtable; //申请一个新的hash表,此时primary_hashtable是新表,old_hashtable是旧表 primary_hashtable = calloc(hashsize(hashpower + 1), sizeof(void *)); if (primary_hashtable) { if (settings.verbose > 1) fprintf(stderr, "Hash table expansion starting\n"); hashpower++; expanding = true; expand_bucket = 0; STATS_LOCK(); stats.hash_power_level = hashpower; stats.hash_bytes += hashsize(hashpower) * sizeof(void *); stats.hash_is_expanding = 1; STATS_UNLOCK(); } else { primary_hashtable = old_hashtable; /* Bad news, but we can keep running. */ } }
bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) { bool ret = true; // removed by Bin // readded by UD if (add_stats != NULL) { if (!stat_type) { /* prepare general statistics for the engine */ STATS_LOCK(); append_stat("curr_bytes", add_stats, c, "%llu", (unsigned long long)stats.curr_bytes); append_stat("curr_items", add_stats, c, "%u", stats.curr_items); append_stat("total_items", add_stats, c, "%u", stats.total_items); append_stat("num_displacements", add_stats, c, "%u", stats.num_displacements); STATS_UNLOCK(); } else { ret = false; } } return ret; }
//link item //主要操作包括: //1. 改变一些统计数据 //2. 把item加入hash表 //3. 把item加入相应的slabclass lru链表中 int do_item_link(item *it, const uint32_t hv) { MEMCACHED_ITEM_LINK(ITEM_key(it), it->nkey, it->nbytes); assert((it->it_flags & (ITEM_LINKED|ITEM_SLABBED)) == 0); mutex_lock(&cache_lock); it->it_flags |= ITEM_LINKED; it->time = current_time; STATS_LOCK(); stats.curr_bytes += ITEM_ntotal(it); stats.curr_items += 1; stats.total_items += 1; STATS_UNLOCK(); /* Allocate a new CAS ID on link. */ ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0); assoc_insert(it, hv); //插入hash表 item_link_q(it); //插入lru链表 refcount_incr(&it->refcount); mutex_unlock(&cache_lock); return 1; }
//将item加入到hashtable和LRU链中 int do_item_link(item *it, const uint32_t hv) { syslog(LOG_INFO, "[%s:%s:%d]", __FILE__, __func__, __LINE__); MEMCACHED_ITEM_LINK(ITEM_key(it), it->nkey, it->nbytes); assert((it->it_flags & (ITEM_LINKED|ITEM_SLABBED)) == 0); //判断状态,即没有在hash表LRU链中或被释放 mutex_lock(&cache_lock); it->it_flags |= ITEM_LINKED; //设置linked状态 it->time = current_time;//设置最近访问的时间 STATS_LOCK(); stats.curr_bytes += ITEM_ntotal(it); //增加每个item所需要的字节大小,包括item结构体和item内容大小 stats.curr_items += 1; stats.total_items += 1; STATS_UNLOCK(); /* Allocate a new CAS ID on link. */ ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0); //设置新CAS,CAS是memcache用来处理并发请求的一种机制 assoc_insert(it, hv);//插入hashtable item_link_q(it); //加入LRU链 refcount_incr(&it->refcount); mutex_unlock(&cache_lock); return 1; }
/* * Returns a fresh connection queue item. */ static CQ_ITEM *cqi_new(void) { CQ_ITEM *item = NULL; pthread_mutex_lock(&cqi_freelist_lock); if (cqi_freelist) { item = cqi_freelist; cqi_freelist = item->next; } pthread_mutex_unlock(&cqi_freelist_lock); if (NULL == item) { int i; /* Allocate a bunch of items at once to reduce fragmentation */ item = malloc(sizeof(CQ_ITEM) * ITEMS_PER_ALLOC); if (NULL == item) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); return NULL; } /* * Link together all the new items except the first one * (which we'll return to the caller) for placement on * the freelist. */ for (i = 2; i < ITEMS_PER_ALLOC; i++) item[i - 1].next = &item[i]; pthread_mutex_lock(&cqi_freelist_lock); item[ITEMS_PER_ALLOC - 1].next = cqi_freelist; cqi_freelist = &item[1]; pthread_mutex_unlock(&cqi_freelist_lock); } return item; }
bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) { bool ret = true; if (add_stats != NULL) { if (!stat_type) { /* prepare general statistics for the engine */ STATS_LOCK(); APPEND_STAT("bytes", "%llu", (unsigned long long)stats_state.curr_bytes); APPEND_STAT("curr_items", "%llu", (unsigned long long)stats_state.curr_items); APPEND_STAT("total_items", "%llu", (unsigned long long)stats.total_items); STATS_UNLOCK(); if (settings.slab_automove > 0) { pthread_mutex_lock(&slabs_lock); APPEND_STAT("slab_global_page_pool", "%u", slabclass[SLAB_GLOBAL_PAGE_POOL].slabs); pthread_mutex_unlock(&slabs_lock); } item_stats_totals(add_stats, c); } else if (nz_strcmp(nkey, stat_type, "items") == 0) { item_stats(add_stats, c); } else if (nz_strcmp(nkey, stat_type, "slabs") == 0) { slabs_stats(add_stats, c); } else if (nz_strcmp(nkey, stat_type, "sizes") == 0) { item_stats_sizes(add_stats, c); } else if (nz_strcmp(nkey, stat_type, "sizes_enable") == 0) { item_stats_sizes_enable(add_stats, c); } else if (nz_strcmp(nkey, stat_type, "sizes_disable") == 0) { item_stats_sizes_disable(add_stats, c); } else { ret = false; } } else { ret = false; } return ret; }
/*@null@*/ item *do_item_alloc(char *key, const size_t nkey, const int flags, const rel_time_t exptime, const int nbytes) { uint8_t nsuffix; item *it; char suffix[40]; size_t ntotal = item_make_header(nkey + 1, flags, nbytes, suffix, &nsuffix); unsigned int id = slabs_clsid(ntotal); if (id == 0) return 0; it = slabs_alloc(ntotal); if (it == 0) { int tries = 50; item *search; /* If requested to not push old items out of cache when memory runs out, * we're out of luck at this point... */ if (settings.evict_to_free == 0) return NULL; /* * try to get one off the right LRU * don't necessariuly unlink the tail because it may be locked: refcount>0 * search up from tail an item with refcount==0 and unlink it; give up after 50 * tries */ if (id > LARGEST_ID) return NULL; if (tails[id] == 0) return NULL; for (search = tails[id]; tries > 0 && search != NULL; tries--, search=search->prev) { if (search->refcount == 0) { if (search->exptime == 0 || search->exptime > current_time) { STATS_LOCK(); stats.evictions++; STATS_UNLOCK(); } do_item_unlink(search); break; } } it = slabs_alloc(ntotal); if (it == 0) return NULL; } assert(it->slabs_clsid == 0); it->slabs_clsid = id; assert(it != heads[it->slabs_clsid]); it->next = it->prev = it->h_next = 0; it->refcount = 1; /* the caller will have a reference */ DEBUG_REFCNT(it, '*'); it->it_flags = 0; it->nkey = nkey; it->nbytes = nbytes; strcpy(ITEM_key(it), key); it->exptime = exptime; memcpy(ITEM_suffix(it), suffix, (size_t)nsuffix); it->nsuffix = nsuffix; return it; }