item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; //哈希表处理迁移数据状态,且还没有迁移到该桶。(在还没有迁移到该桶时,assoc_insert保证哈希到该区间的键插入到旧表) if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { //找到key所在桶 it = primary_hashtable[hv & hashmask(hashpower)]; } item *ret = NULL; int depth = 0; //遍历冲突链 while (it) { if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
shadow_item *shadow_assoc_find(const char *key, const size_t nkey, const uint32_t hv) { shadow_item *it; unsigned int oldbucket; if (expanding && (oldbucket = (hv & hashmask(shadow_hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { it = primary_hashtable[hv & hashmask(shadow_hashpower)]; } shadow_item *ret = NULL; int depth = 0; while (it) { if ((nkey == it->nkey) && (memcmp(key, it->key, nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
Item* AssocMaintainer::AssocFind(const char *key, const size_t nkey, const uint32_t hv) { Item *it; unsigned int oldbucket; if (expanding_ && (oldbucket = (hv & hashmask(hashpower_ - 1))) >= expand_bucket_) { it = old_hashtable_[oldbucket]; } else { it = primary_hashtable_[hv & hashmask(hashpower_)]; } Item *ret = NULL; int depth = 0; while (it) { if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } return ret; }
//由于哈希值只能确定是在哈希表中的哪个桶(bucket),但一个桶里面是有一条冲突链的 //此时需要用到具体的键值遍历并一一比较冲突链上的所有节点。虽然key是以'\0'结尾 //的字符串,但调用strlen还是有点耗时(需要遍历键值字符串)。所以需要另外一个参数 //nkey指明这个key的长度 item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; if (expanding &&//正在扩展哈希表 (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)//该item还在旧表里面 { it = old_hashtable[oldbucket]; } else { //由哈希值判断这个key是属于那个桶(bucket)的 it = primary_hashtable[hv & hashmask(hashpower)]; } //到这里,已经确定这个key是属于那个桶的。 遍历对应桶的冲突链即可 item *ret = NULL; int depth = 0; while (it) { //长度相同的情况下才调用memcmp比较,更高效 if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
//hv是这个item键值的哈希值 int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ //使用头插法 插入一个item ///第一次看本函数,直接看else部分 if (expanding &&//目前处于扩展hash表状态 (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)//数据迁移时还没迁移到这个桶 { //插入到旧表 it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //使用头插法插入哈希表中 //插入到新表 it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++;//哈希表的item数量加一 //当hash表的item数量到达了hash表容量的1.5倍时,就会进行扩展 //当然如果现在正处于扩展状态,是不会再扩展的 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand();//唤醒迁移线程,扩展哈希表 } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
//hv是这个item键值的哈希值,用于快速查找该key对应的item,见assoc_find item插入hash表函数assoc_insert int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // 插入hash表函数为assoc_insert 插入lru队列的函数为item_link_q // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ //使用头插法,插入一个item //第一次看本函数,直接看else部分 if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //使用头插法插入哈希表中 it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++;//哈希表的item数量加一 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it) { uint32_t hv; unsigned int oldbucket; assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ hv = hash(ITEM_key(it), it->nkey, 0); if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { it = primary_hashtable[hv & hashmask(hashpower)]; } item *ret = NULL; int depth = 0; while (it) { // [branch 009b] Switch to safe memcmp if ((nkey == it->nkey) && (tm_memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
//hash,插入元素,hv是这个item键值的哈希值 int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ // 头插法 if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { //目前处于扩容状态,但这个桶的数据还没有迁移,因此插入到旧表old_hashtable it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //插入到新表,primary_hashtable it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } //元素数目+1 hash_items++; // 适时扩张,当元素个数超过hash容量的1.5倍时 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { //调用assoc_start_expand函数来唤醒扩容线程 assoc_start_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(struct default_engine *engine, uint32_t hash, hash_item *it) { unsigned int oldbucket; assert(assoc_find(engine, hash, item_get_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ // inserting actual hash_item to appropriate assoc_t if (engine->assoc.expanding && (oldbucket = (hash & hashmask(engine->assoc.hashpower - 1))) >= engine->assoc.expand_bucket) { it->h_next = engine->assoc.old_hashtable[oldbucket]; engine->assoc.old_hashtable[oldbucket] = it; } else { it->h_next = engine->assoc.primary_hashtable[hash & hashmask(engine->assoc.hashpower)]; engine->assoc.primary_hashtable[hash & hashmask(engine->assoc.hashpower)] = it; } engine->assoc.hash_items++; if (! engine->assoc.expanding && engine->assoc.hash_items > (hashsize(engine->assoc.hashpower) * 3) / 2) { assoc_expand(engine); } MEMCACHED_ASSOC_INSERT(item_get_key(it), it->nkey, engine->assoc.hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } pthread_mutex_lock(&hash_items_counter_lock); hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand(); } pthread_mutex_unlock(&hash_items_counter_lock); MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) {//这个槽位位于当前正在迁移的bucket之后,所以使用旧数据 it = old_hashtable[oldbucket]; } else {//使用新数据 it = primary_hashtable[hv & hashmask(hashpower)]; } item *ret = NULL; int depth = 0; while (it) {//需要遍历这个槽位里面的链表 if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it;//先看长度,否则后移 break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
//由于哈希值只能确定是在哈希表中的哪个桶(bucket),但一个桶里面是有一条冲突链的 //此时需要用到具体的键值遍历并一一比较冲突链上的所有节点。虽然key是以'\0'结尾 //的字符串,但调用strlen还是有点耗时(需要遍历键值字符串)。所以需要另外一个参数 //nkey指明这个key的长度 //reference:http://blog.csdn.net/luotuo44/article/details/42773231 item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; // 得到相应的桶, bucket if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { //由哈希值判断这个key是属于那个桶(bucket)的 it = primary_hashtable[hv & hashmask(hashpower)]; } // 在桶里遍历搜索目标 item *ret = NULL; int depth = 0; while (it) { //调用memcmp来进行比较 if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
hash_item *assoc_find(struct default_engine *engine, uint32_t hash, const char *key, const size_t nkey) { hash_item *it; unsigned int oldbucket; if (engine->assoc.expanding && (oldbucket = (hash & hashmask(engine->assoc.hashpower - 1))) >= engine->assoc.expand_bucket) { it = engine->assoc.old_hashtable[oldbucket]; } else { it = engine->assoc.primary_hashtable[hash & hashmask(engine->assoc.hashpower)]; } hash_item *ret = NULL; int depth = 0; while (it) { if ((nkey == it->nkey) && (memcmp(key, item_get_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
int HashTable::hash_insert(base_item* it, const uint32_t hv) { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; //这个是自己加的锁,原本没有 hashitems++; return 1; }
static int _prefix_insert(struct default_engine *engine, uint32_t hash, prefix_t *pt) { assert(assoc_prefix_find(engine, hash, _get_prefix(pt), pt->nprefix) == NULL); pt->h_next = engine->assoc.prefix_hashtable[hash & hashmask(DEFAULT_PREFIX_HASHPOWER)]; engine->assoc.prefix_hashtable[hash & hashmask(DEFAULT_PREFIX_HASHPOWER)] = pt; assert(pt->parent_prefix != NULL); pt->parent_prefix->prefix_items++; engine->assoc.tot_prefix_items++; return 1; }
void hashtable_del2(hashtable_t *table, const char *key, const HASH_KEY_LEN len) { if(table == NULL || key == NULL) return; if(table->lock != NULL) { pthread_mutex_lock(table->lock); } HASH_VAL hval = hash(key, (size_t)len, 0); hashtable_data_t *idx = table->idx[hval & hashmask(table->power)]; hashtable_data_t *prev = NULL; while(idx != NULL) { if(strncmp(idx->key, key, (size_t)len) == 0) { // first item. if(prev == NULL) { table->idx[hval & hashmask(table->power)] = idx->next; }else { prev->next = idx->next; } table->data_count--; idx->next = NULL; if(table->lock != NULL) { pthread_mutex_unlock(table->lock); } hashtable_data_free(table, idx); return; } prev = idx; idx = idx->next; } if(table->lock != NULL) { pthread_mutex_unlock(table->lock); } return; }
int htWalk(itemCB *cbFn, int startBkt, int n, void *magic) { int stopBkt, bkt, maxBkt, walking, sampled; time_t flushTime; item *it; mutex_lock(&cache_lock); if(expanding) { sampled = -1; } else { maxBkt = hashsize(hashpower) - 1; startBkt &= hashmask(hashpower); stopBkt = startBkt == 0 ? maxBkt : (startBkt - 1); sampled = 0; flushTime = settings.oldest_live ? settings.oldest_live : current_time; walking = 1; for(bkt = startBkt; walking; ) { for(it = primary_hashtable[bkt]; walking && it; it = it->h_next) { // ignore items that are flushed or expired if(it->time <= flushTime && (it->exptime == 0 || it->exptime >= current_time)) { if((*cbFn)(it, bkt, magic) == -1 || ++sampled == n) { walking = 0; } } } if(bkt == stopBkt) break; if(bkt == maxBkt) bkt = 0; else bkt++; } } mutex_unlock(&cache_lock); return sampled; }
void HashTable::hash_delete(const char* key, const size_t nkey, const uint32_t hv) { base_item* &p = primary_hashtable[hv & hashmask(hashpower)]; base_item** pos; //因为c++引用和指针处理不同所以要先处理一下 if (p && ((nkey != (p->nkey)) || memcmp(key, p->data, nkey))) { pos = &(p->h_next); while (*pos && ((nkey != ((*pos)->nkey)) || memcmp(key, (*pos)->data, nkey))) { pos = &((*pos)->h_next); } } else { p = 0; return; } //想改变的是指针的位置,所以必须用二级指针 if (*pos) { base_item* nxt; hashitems--; nxt = (*pos)->h_next; (*pos)->h_next = 0; *pos = nxt; return; } }
/* returns 0 on success, -1 if key was not found */ static int hashtable_do_del(hashtable_t *hashtable, const char *key, size_t hash) { pair_t *pair; bucket_t *bucket; size_t index; index = hash & hashmask(hashtable->order); bucket = &hashtable->buckets[index]; pair = hashtable_find_pair(hashtable, bucket, key, hash); if(!pair) return -1; if(&pair->list == bucket->first && &pair->list == bucket->last) bucket->first = bucket->last = &hashtable->list; else if(&pair->list == bucket->first) bucket->first = pair->list.next; else if(&pair->list == bucket->last) bucket->last = pair->list.prev; list_remove(&pair->list); json_decref(pair->value); jsonp_free(pair); hashtable->size--; return 0; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(struct default_engine *engine, uint32_t hash, hash_item *it) { struct assoc *assoc = &engine->assoc; uint32_t bucket = GET_HASH_BUCKET(hash, assoc->hashmask); uint32_t tabidx; assert(assoc_find(engine, hash, item_get_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (assoc->infotable[bucket].curpower != assoc->rootpower && assoc->infotable[bucket].refcount == 0) { redistribute(engine, bucket); } tabidx = GET_HASH_TABIDX(hash, assoc->hashpower, hashmask(assoc->infotable[bucket].curpower)); // inserting actual hash_item to appropriate assoc_t it->h_next = assoc->roottable[tabidx].hashtable[bucket]; assoc->roottable[tabidx].hashtable[bucket] = it; assoc->hash_items++; if (assoc->hash_items > (hashsize(assoc->hashpower + assoc->rootpower) * 3) / 2) { assoc_expand(engine); } MEMCACHED_ASSOC_INSERT(item_get_key(it), it->nkey, assoc->hash_items); return 1; }
void *item_trylock(uint32_t hv) { pthread_mutex_t *lock = &item_locks[hv & hashmask(item_lock_hashpower)]; if (pthread_mutex_trylock(lock) == 0) { return lock; } return NULL; }
ENGINE_ERROR_CODE assoc_init(struct default_engine *engine) { struct assoc *assoc = &engine->assoc; logger = engine->server.log->get_logger(); assoc->hashsize = hashsize(assoc->hashpower); assoc->hashmask = hashmask(assoc->hashpower); assoc->rootpower = 0; assoc->roottable = calloc(assoc->hashsize * 2, sizeof(void *)); if (assoc->roottable == NULL) { return ENGINE_ENOMEM; } assoc->roottable[0].hashtable = (hash_item**)&assoc->roottable[assoc->hashsize]; assoc->infotable = calloc(assoc->hashsize, sizeof(struct bucket_info)); if (assoc->infotable == NULL) { free(assoc->roottable); return ENGINE_ENOMEM; } assoc->prefix_hashtable = calloc(hashsize(DEFAULT_PREFIX_HASHPOWER), sizeof(void *)); if (assoc->prefix_hashtable == NULL) { free(assoc->roottable); free(assoc->infotable); return ENGINE_ENOMEM; } // initialize noprefix stats info memset(&assoc->noprefix_stats, 0, sizeof(prefix_t)); root_pt = &assoc->noprefix_stats; return ENGINE_SUCCESS; }
hashtable_data_t* hashtable_get2(hashtable_t *table, const char *key, const HASH_KEY_LEN len) { if(table->lock != NULL) { pthread_mutex_lock(table->lock); } HASH_VAL hval = hash(key, (size_t)len, 0); hashtable_data_t *data = table->idx[hval & hashmask(table->power)]; while(data != NULL) { if(strncmp(data->key, key, (size_t)len) == 0) { break; } data = data->next; } if(table->lock != NULL) { pthread_mutex_unlock(table->lock); } return data; }
hashtable_data_t* hashtable_set(hashtable_t *table, hashtable_data_t *data) { if(table == NULL || data == NULL) return NULL; if(table->lock != NULL) { pthread_mutex_lock(table->lock); } HASH_VAL hval = hash(data->key, data->len, 0); hashtable_data_t *idx = table->idx[hval & hashmask(table->power)]; hashtable_data_t *prev = NULL; while(idx != NULL) { if(strncmp(idx->key, data->key, data->len) == 0) { if(table->lock != NULL) { pthread_mutex_unlock(table->lock); } return NULL; } prev = idx; idx = idx->next; } if(prev == NULL) { table->idx[hval & hashmask(table->power)] = data; }else { prev->next = data; } table->data_count++; if(table->lock != NULL) { pthread_mutex_unlock(table->lock); } return data; }
static void *assoc_maintenance_thread(void *arg) { while (do_run_maintenance_thread) { int ii = 0; /* Lock the cache, and bulk move multiple buckets to the new * hash table. */ item_lock_global(); mutex_lock(&cache_lock); for (ii = 0; ii < hash_bulk_move && expanding; ++ii) { item *it, *next; int bucket; for (it = old_hashtable[expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = hash(ITEM_key(it), it->nkey, 0) & hashmask(hashpower); it->h_next = primary_hashtable[bucket]; primary_hashtable[bucket] = it; } old_hashtable[expand_bucket] = NULL; expand_bucket++; if (expand_bucket == hashsize(hashpower - 1)) { expanding = false; free(old_hashtable); STATS_LOCK(); stats.hash_bytes -= hashsize(hashpower - 1) * sizeof(void *); stats.hash_is_expanding = 0; STATS_UNLOCK(); if (settings.verbose > 1) fprintf(stderr, "Hash table expansion done\n"); } } mutex_unlock(&cache_lock); item_unlock_global(); if (!expanding) { /* finished expanding. tell all threads to use fine-grained locks */ switch_item_lock_type(ITEM_LOCK_GRANULAR); slabs_rebalancer_resume(); /* We are done expanding.. just wait for next invocation */ mutex_lock(&cache_lock); started_expanding = false; pthread_cond_wait(&maintenance_cond, &cache_lock); /* Before doing anything, tell threads to use a global lock */ mutex_unlock(&cache_lock); slabs_rebalancer_pause(); switch_item_lock_type(ITEM_LOCK_GLOBAL); mutex_lock(&cache_lock); assoc_expand(); mutex_unlock(&cache_lock); } } return NULL; }
item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; /* 如果hashtable正在扩容阶段,所找item在老hashtable中,则在老hashtable中查询,否则从新表中查询 * 判断在哪个表中的思路: * 1. 定位key所在hashtable中桶的位置 * 2. 如果此位置大于等于从旧hashtable中移到新hashtable的数量,则所查找元素在旧hashtable中,否则在新hash表中 * * eg. * primary hashtable * [0] * [1] -> a -> b -> null * [2] * [3] -> x * * old hashtable * [0] * [1] * [2] * [3] * [4] -> y ->null * [5] -> p -> null <--- hash(key) * ... */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { it = primary_hashtable[hv & hashmask(hashpower)]; } item *ret = NULL; int depth = 0; while (it) { if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; }
std::mutex* HashTable::item_trylock(uint32_t hv) { std::mutex* lock = item_locks[(hv & hashmask(hashpower)) % item_lock_count]; //抢到锁证明这个区域目前没有其他线程增改存 if (lock->try_lock() == true) { return lock; } return 0; }
void item_unlock(uint32_t hv) { uint8_t *lock_type = pthread_getspecific(item_lock_type_key); if (likely(*lock_type == ITEM_LOCK_GRANULAR)) { mutex_unlock(&item_locks[hv & hashmask(item_lock_hashpower)]); } else { mutex_unlock(&item_global_lock); } }
static void *assoc_maintenance_thread(void *arg) { struct default_engine *engine = arg; bool done = false; struct timespec sleep_time = {0, 1000}; int i,try_cnt = 9; long tot_execs = 0; EXTENSION_LOGGER_DESCRIPTOR *logger = engine->server.log->get_logger(); if (engine->config.verbose) { logger->log(EXTENSION_LOG_INFO, NULL, "Hash table expansion start: %d => %d\n", hashsize(engine->assoc.hashpower - 1), hashsize(engine->assoc.hashpower)); } do { int ii; /* long-running background task. * hold the cache lock lazily in order to give priority to normal workers. */ for (i = 0; i < try_cnt; i++) { if (pthread_mutex_trylock(&engine->cache_lock) == 0) break; nanosleep(&sleep_time, NULL); } if (i == try_cnt) pthread_mutex_lock(&engine->cache_lock); for (ii = 0; ii < hash_bulk_move && engine->assoc.expanding; ++ii) { hash_item *it, *next; int bucket; for (it = engine->assoc.old_hashtable[engine->assoc.expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = engine->server.core->hash(item_get_key(it), it->nkey, 0) & hashmask(engine->assoc.hashpower); it->h_next = engine->assoc.primary_hashtable[bucket]; engine->assoc.primary_hashtable[bucket] = it; } engine->assoc.old_hashtable[engine->assoc.expand_bucket] = NULL; engine->assoc.expand_bucket++; if (engine->assoc.expand_bucket == hashsize(engine->assoc.hashpower - 1)) { engine->assoc.expanding = false; free(engine->assoc.old_hashtable); } } if (!engine->assoc.expanding) { done = true; } pthread_mutex_unlock(&engine->cache_lock); if ((++tot_execs % 100) == 0) { nanosleep(&sleep_time, NULL); } } while (!done); if (engine->config.verbose) { logger->log(EXTENSION_LOG_INFO, NULL, "Hash table expansion done\n"); } return NULL; }