struct hash_node* hash_get_node_by_key(struct hash* p_hash, void* p_key) { struct hash_node** p_bucket = hash_get_bucket(p_hash, p_key); struct hash_node* p_node = *p_bucket; if (!p_node) { return p_node; } while (p_node != 0 && vsf_sysutil_memcmp(p_key, p_node->p_key, p_hash->key_size) != 0) { p_node = p_node->p_next; } return p_node; }
hash_node_t* hash_get_node_by_key(hash_t *hash, void *key, unsigned int key_size) { hash_node_t **bucket = hash_get_bucket(hash, key); hash_node_t *node = *bucket; if (node == NULL) { return NULL; } while (node != NULL && memcmp(node->key, key, key_size) != 0) { node = node->next; } return node; }
//根据key获取哈希表中的一个节点 hash_node_t *hash_get_node_by_key(hash_t *hash,void *key,unsigned int key_size) { hash_node_t** bucket_address=hash_get_bucket(hash,key); if((*bucket_address)==NULL)//链表头指针为空 { return NULL; } //链表中遍历查找 hash_node_t*cur = *bucket_address; while(cur != NULL && memcmp(key,cur->key,key_size)!=0) { cur=cur->next; } if(cur != NULL) { return cur; } return NULL; }
static void clientdbGC(void *unused) { static int bucket = 0; hash_link *link_next; link_next = hash_get_bucket(client_table, bucket++); while (link_next != NULL) { ClientInfo *c = (ClientInfo *) link_next; int age = squid_curtime - c->last_seen; link_next = link_next->next; if (c->n_established) continue; if (age < 24 * 3600 && c->Http.n_requests > 100) continue; if (age < 4 * 3600 && (c->Http.n_requests > 10 || c->Icp.n_requests > 10)) continue; if (age < 5 * 60 && (c->Http.n_requests > 1 || c->Icp.n_requests > 1)) continue; if (age < 60) continue; hash_remove_link(client_table, &c->hash); clientdbFreeItem(c); statCounter.client_http.clients--; cleanup_removed++; } if (bucket < CLIENT_DB_HASH_SIZE) eventAdd("client_db garbage collector", clientdbGC, NULL, 0.15, 0); else { bucket = 0; cleanup_running = 0; max_clients = statCounter.client_http.clients * 3 / 2; if (!cleanup_scheduled) { cleanup_scheduled = 1; eventAdd("client_db garbage collector", clientdbScheduledGC, NULL, 3 * 3600, 0); } debug(49, 2) ("clientdbGC: Removed %d entries\n", cleanup_removed); } }
void hash_free_entry(hash_t *hash, void *key, unsigned int key_size) { hash_node_t *node = hash_get_node_by_key(hash, key, key_size); if(node == NULL) return; free(node->key); free(node->value); if(node->prev) node->prev->next = node->next; else { hash_node_t **bucket = hash_get_bucket(hash, key); *bucket = node->next; } if(node->next) node->next->prev = node->prev; free(node); }
hash_node_t* hash_get_node_by_key(hash_t *hash, void *key, unsigned int key_size) { unsigned int bucket = hash_get_bucket(hash, key); unsigned int i = bucket; while (hash->nodes[i].status != EMPTY && memcmp(key, hash->nodes[i].key, key_size) != 0) { i = (i + 1) % hash->buckets; if (i == bucket) // 探测了一圈 { // 没找到,并且表满 return NULL; } } if (hash->nodes[i].status == ACTIVE) { return &(hash->nodes[i]); } // 如果运行到这里,说明i为空位 return NULL; }
/** * hash_add() - add the given ptr to the given hash_array */ void hash_add(HASH_ARRAY *array, void *ptr) { LIST_NODE *bktptr; int bucketNum = hash_get_bucket(ptr); HASH_BLOCK *bucket = &(array->blocks[bucketNum]); array->numallocs++; if (bucket->depth == 0) { bucket->root = MC_MALLOC(sizeof(LIST_NODE)); bucket->root->ptr = ptr; bucket->root->next = NULL; bucket->depth++; array->n++; } else { array->collisions++; array->numliststeps++; bktptr = bucket->root; while (bktptr->next != NULL) { array->numliststeps++; bktptr = bktptr->next; } bktptr->next = MC_MALLOC(sizeof(LIST_NODE)); bktptr = bktptr->next; bktptr->ptr = ptr; bktptr->next = NULL; bucket->depth++; array->n++; } }
/* recalculate a few hash buckets per invocation; schedules next step */ static void storeDigestRebuildStep(void *datanotused) { int bcount = (int) ceil((double) store_hash_buckets * (double) Config.digest.rebuild_chunk_percentage / 100.0); assert(sd_state.rebuild_lock); if (sd_state.rebuild_offset + bcount > store_hash_buckets) bcount = store_hash_buckets - sd_state.rebuild_offset; debug(71, 3) ("storeDigestRebuildStep: buckets: %d offset: %d chunk: %d buckets\n", store_hash_buckets, sd_state.rebuild_offset, bcount); while (bcount--) { hash_link *link_ptr = hash_get_bucket(store_table, sd_state.rebuild_offset); for (; link_ptr; link_ptr = link_ptr->next) { storeDigestAdd((StoreEntry *) link_ptr); } sd_state.rebuild_offset++; } /* are we done ? */ if (sd_state.rebuild_offset >= store_hash_buckets) storeDigestRebuildFinish(); else eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1); }
//往哈希表中添加一项 void hash_add_entry(hash_t *hash,void *key,unsigned int key_size,void *value,unsigned int value_size) { hash_node_t *node = malloc(sizeof(hash_node_t)); node->key = malloc(key_size); memcpy(node->key,key,key_size); node->value=malloc(value_size); memcpy(node->value,value,value_size); hash_node_t **bucketaddress=hash_get_bucket(hash,key); if(*bucketaddress == NULL) { *bucketaddress = node; } else { //将新节点插入 头插 node->next = *bucketaddress; (*bucketaddress)->prev=node; (*bucketaddress)=node; node->prev=NULL; } }
static void mo_hash(struct Client *client_p, struct Client *source_p, int parc, char *parv[]) { int i; int max_chain = 0; int buckets = 0; int count = 0; struct Client *cl; struct Client *icl; struct Channel *ch; struct UserHost *ush; struct ResvChannel *rch; for (i = 0; i < HASHSIZE; ++i) { if ((cl = hash_get_bucket(HASH_TYPE_CLIENT, i)) != NULL) { int len = 0; ++buckets; for (; cl != NULL; cl = cl->hnext) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one(source_p, ":%s NOTICE %s :Client: entries: %d buckets: %d " "max chain: %d", me.name, source_p->name, count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((ch = hash_get_bucket(HASH_TYPE_CHANNEL, i)) != NULL) { int len = 0; ++buckets; for (; ch != NULL; ch = ch->hnextch) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one(source_p, ":%s NOTICE %s :Channel: entries: %d buckets: %d " "max chain: %d", me.name, source_p->name, count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((rch = hash_get_bucket(HASH_TYPE_RESERVED, i)) != NULL) { int len = 0; ++buckets; for (; rch != NULL; rch = rch->hnext) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one(source_p, ":%s NOTICE %s :Resv: entries: %d buckets: %d " "max chain: %d", me.name, source_p->name, count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((icl = hash_get_bucket(HASH_TYPE_ID, i)) != NULL) { int len = 0; ++buckets; for (; icl != NULL; icl = icl->idhnext) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one(source_p, ":%s NOTICE %s :Id: entries: %d buckets: %d " "max chain: %d", me.name, source_p->name, count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((ush = hash_get_bucket(HASH_TYPE_USERHOST, i)) != NULL) { int len = 0; ++buckets; for (; ush != NULL; ush = ush->next) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one(source_p, ":%s NOTICE %s :UserHost: entries: %d buckets: %d " "max chain: %d", me.name, source_p->name, count, buckets, max_chain); }
/*! \brief HASH command handler * * \param source_p Pointer to allocated Client struct from which the message * originally comes from. This can be a local or remote client. * \param parc Integer holding the number of supplied arguments. * \param parv Argument vector where parv[0] .. parv[parc-1] are non-NULL * pointers. * \note Valid arguments for this command are: * - parv[0] = command */ static int mo_hash(struct Client *source_p, int parc, char *parv[]) { unsigned int i = 0; unsigned int max_chain = 0; unsigned int buckets = 0; unsigned int count = 0; const struct Client *cl = NULL; const struct Client *icl = NULL; const struct Channel *ch = NULL; const struct UserHost *ush = NULL; for (i = 0; i < HASHSIZE; ++i) { if ((cl = hash_get_bucket(HASH_TYPE_CLIENT, i))) { unsigned int len = 0; ++buckets; for (; cl; cl = cl->hnext) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one_notice(source_p, &me, ":Client: entries: %u buckets: %u " "max chain: %u", count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((ch = hash_get_bucket(HASH_TYPE_CHANNEL, i))) { unsigned int len = 0; ++buckets; for (; ch; ch = ch->hnextch) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one_notice(source_p, &me, ":Channel: entries: %u buckets: %u " "max chain: %u", count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((icl = hash_get_bucket(HASH_TYPE_ID, i))) { unsigned int len = 0; ++buckets; for (; icl; icl = icl->idhnext) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one_notice(source_p, &me, ":Id: entries: %u buckets: %u " "max chain: %u", count, buckets, max_chain); count = 0; buckets = 0; max_chain = 0; for (i = 0; i < HASHSIZE; ++i) { if ((ush = hash_get_bucket(HASH_TYPE_USERHOST, i))) { unsigned int len = 0; ++buckets; for (; ush; ush = ush->next) ++len; if (len > max_chain) max_chain = len; count += len; } } sendto_one_notice(source_p, &me, ":UserHost: entries: %u buckets: %u " "max chain: %u", count, buckets, max_chain); return 0; }