//hash,插入元素,hv是这个item键值的哈希值 int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ // 头插法 if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { //目前处于扩容状态,但这个桶的数据还没有迁移,因此插入到旧表old_hashtable it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //插入到新表,primary_hashtable it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } //元素数目+1 hash_items++; // 适时扩张,当元素个数超过hash容量的1.5倍时 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { //调用assoc_start_expand函数来唤醒扩容线程 assoc_start_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } pthread_mutex_lock(&hash_items_counter_lock); hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand(); } pthread_mutex_unlock(&hash_items_counter_lock); MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(struct default_engine *engine, uint32_t hash, hash_item *it) { struct assoc *assoc = &engine->assoc; uint32_t bucket = GET_HASH_BUCKET(hash, assoc->hashmask); uint32_t tabidx; assert(assoc_find(engine, hash, item_get_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (assoc->infotable[bucket].curpower != assoc->rootpower && assoc->infotable[bucket].refcount == 0) { redistribute(engine, bucket); } tabidx = GET_HASH_TABIDX(hash, assoc->hashpower, hashmask(assoc->infotable[bucket].curpower)); // inserting actual hash_item to appropriate assoc_t it->h_next = assoc->roottable[tabidx].hashtable[bucket]; assoc->roottable[tabidx].hashtable[bucket] = it; assoc->hash_items++; if (assoc->hash_items > (hashsize(assoc->hashpower + assoc->rootpower) * 3) / 2) { assoc_expand(engine); } MEMCACHED_ASSOC_INSERT(item_get_key(it), it->nkey, assoc->hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(struct default_engine *engine, uint32_t hash, hash_item *it) { unsigned int oldbucket; assert(assoc_find(engine, hash, item_get_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ // inserting actual hash_item to appropriate assoc_t if (engine->assoc.expanding && (oldbucket = (hash & hashmask(engine->assoc.hashpower - 1))) >= engine->assoc.expand_bucket) { it->h_next = engine->assoc.old_hashtable[oldbucket]; engine->assoc.old_hashtable[oldbucket] = it; } else { it->h_next = engine->assoc.primary_hashtable[hash & hashmask(engine->assoc.hashpower)]; engine->assoc.primary_hashtable[hash & hashmask(engine->assoc.hashpower)] = it; } engine->assoc.hash_items++; if (! engine->assoc.expanding && engine->assoc.hash_items > (hashsize(engine->assoc.hashpower) * 3) / 2) { assoc_expand(engine); } MEMCACHED_ASSOC_INSERT(item_get_key(it), it->nkey, engine->assoc.hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it) { uint32_t hv; unsigned int oldbucket; assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ hv = hash(ITEM_key(it), it->nkey, 0); if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
//hv是这个item键值的哈希值 int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ //使用头插法 插入一个item ///第一次看本函数,直接看else部分 if (expanding &&//目前处于扩展hash表状态 (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket)//数据迁移时还没迁移到这个桶 { //插入到旧表 it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //使用头插法插入哈希表中 //插入到新表 it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++;//哈希表的item数量加一 //当hash表的item数量到达了hash表容量的1.5倍时,就会进行扩展 //当然如果现在正处于扩展状态,是不会再扩展的 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand();//唤醒迁移线程,扩展哈希表 } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
//hv是这个item键值的哈希值,用于快速查找该key对应的item,见assoc_find item插入hash表函数assoc_insert int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // 插入hash表函数为assoc_insert 插入lru队列的函数为item_link_q // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ //使用头插法,插入一个item //第一次看本函数,直接看else部分 if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { //使用头插法插入哈希表中 it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++;//哈希表的item数量加一 if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand(); } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ // commented by Bin /* if (assoc_find(ITEM_key(it), it->nkey, hv) != 0) { */ /* printf("see duplicate keys"); */ /* } */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; #ifdef COUNT_LARGEST_BUCKET bucket_size[hv & hashmask(hashpower)] ++; if (bucket_size[hv & hashmask(hashpower)] > largest_bucket) { largest_bucket = bucket_size[hv & hashmask(hashpower)]; } #endif } hash_items++; // added by Bin: /* if ((hash_items) && (hash_items % 1000000 == 0)) { */ /* printf("%u Million items inserted!\n", hash_items / 1000000); */ /* } */ if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { // commented by Bin //assoc_expand(); // added by Bin /* perror("can not insert!\n"); */ /* exit(1); */ printf("hash table is full (hashpower = %d, hash_items = %u,), need to increase hashpower\n", hashpower, hash_items); return 0; } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }
/* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) {// 如果正在扩容,并且此时当前这个槽位在正在迁移的槽位后面,那么这个节点应该加到老地方去,因为待会就回扫到他了。 it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2) { assoc_start_expand();//槽位容量整体大于1.5个时,扩容。那么,缩小呢? } MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; }