void cmmjl_free(void *x){ // An instance has been deleted, so we need to go through and free all // of the data we allocated for it. t_cmmjl_obj *o = cmmjl_obj_get(x); int i; if(o->osc_address_methods){ linklist_clear(o->osc_address_methods); free(o->osc_address_methods); } cmmjl_obj_instance_mark_free(x, o->instance); if(o->osc_scheduler){ cmmjl_osc_schedule_free(o->osc_scheduler); free(o->osc_scheduler); } if(o->entrance_count_tab){ hashtab_clear(o->entrance_count_tab); free(o->entrance_count_tab); } hashtab_delete(_cmmjl_obj_tab, x); if(hashtab_getsize(_cmmjl_obj_tab) == 0){ free(_cmmjl_obj_tab); hashtab_clear(_cmmjl_instance_count); free(_cmmjl_instance_count); } }
static const char *my_remove(struct HashTab *h, int value) { struct MyNode tmp, *my; void **p; int key = value % cf_mod; tmp.value = value; p = hashtab_lookup(h, key, false, &tmp); if (!p) return "NEXIST"; my = *p; if (my->value != value) return "WRONG"; hashtab_delete(h, key, &tmp); free(my); p = hashtab_lookup(h, key, false, &tmp); if (p) return "EXISTS?"; return "OK"; }
/* TODO: Looks fishy */ int bpool_delete(struct bpool *pool, char *id, int write_back) { if(pool && id) { struct bpool_node *node; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(500, &pool->big_lock)); #endif node = hashtab_search(pool->table, id); if(node) { #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(501, &node->page_lock)); #endif assert(!node->dirty); hashtab_delete(pool->table, id); /* TODO: add facility to reset blocks */ /*memset(node->id, 0, pool->PAGE_ID_SIZE); memset(node->buf, 0, pool->PAGE_SIZE);*/ node->refbit = 0; node->valid = 0; node->dirty = 0; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(501, &node->page_lock)); #endif } #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(500, &pool->big_lock)); #endif } // if pool && id return -2; }
int main () { int i = 0; int res = 0; char *pres = NULL; hashtab_node * node = NULL; struct test_node *p = NULL; hashtab *h = NULL; #ifdef MEMORY_TEST setenv("MALLOC_TRACE","1.txt",1); mtrace(); #endif h = hashtab_create(5,hashtab_hvalue,hashtab_keycmp,hashtab_node_free); assert(h!= NULL); while(1) { p = (struct test_node*)malloc(sizeof(struct test_node)); assert(p != NULL); printf("\r\n 请输入key 和value,当可以等于\"quit\"时退出"); scanf("%s",p->key); scanf("%s",p->data); if(strcmp(p->key,"quit") == 0) { free(p); break; } res = hashtab_insert(h,p->key,p->data); if (res != 0) { free(p); printf("\r\n key[%s],data[%s] insert failed %d",p->key,p->data,res); } else { printf("\r\n key[%s],data[%s] insert success %d",p->key,p->data,res); } } hashtab_dump(h); while(1) { p = (struct test_node*)malloc(sizeof(struct test_node)); assert(p != NULL); printf("\r\n 请输入key 查询value的数值,当可以等于\"quit\"时退出"); scanf("%s",p->key); if(strcmp(p->key,"quit") == 0) { free(p); break; } pres = hashtab_search(h,p->key); if (pres == NULL) { printf("\r\n key[%s] search data failed",p->key); } else { printf("\r\n key[%s],search data[%s] success",p->key,pres); } free(p); } hashtab_dump(h); while(1) { p = (struct test_node*)malloc(sizeof(struct test_node)); assert(p != NULL); printf("\r\n 请输入key 删除节点的数值,当可以等于\"quit\"时退出"); scanf("%s",p->key); if(strcmp(p->key,"quit") == 0) { free(p); break; } node = hashtab_delete(h,p->key); if (node == NULL) { printf("\r\n key[%s] delete node failed ",p->key); } else { printf("\r\n key[%s],delete data[%s] success",node->key,node->data); h->hash_node_free(node); } free(p); hashtab_dump(h); } hashtab_destory(h); #ifdef MEMORY_TEST muntrace(); #endif return 0; }
static struct bpool_node * __bpool_find_free_node(struct bpool *cache, unsigned int rd_wr, unsigned part_id, void *tag) { struct bpool_node *block; unsigned out_loops = 0; unsigned over_quota = BPOOL_FALSE; /* Find a free block */ while(1) { out_loops++; unsigned dirty = 0; unsigned i = 0; if(out_loops > 3 * cache->num_blocks ) { fprintf(stdout, "Notice: Too many loops! [out_loops: %d]\n", out_loops ); exit(-1); } #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(4, &cache->big_lock)); #endif for(i=0; i < 64; i++) { /* check quotas */ if( cache->part_sizes[part_id] >= cache->part_quotas[part_id] ) { over_quota = BPOOL_TRUE; } else { over_quota = BPOOL_FALSE; } /* read block */ block = &(cache->clock[ cache->clock_hand[part_id] ]); //fprintf(stdout, "Info: Clock hand %u\n", cache->clock_hand[part_id] ); /* fast lock => fails if already locked */ #ifdef BPOOL_ENABLE_LOCKS if(0 == __bpool_trylock(5, &(block->page_lock))) { #else if( 1 ) { #endif /* an invalid block was previously removed */ if(over_quota == BPOOL_FALSE) { if(!block->valid) { cache->ops.mset(cache->environment, BPOOL_KEY, block->id); cache->ops.mset(cache->environment, BPOOL_BUF, block->buf); cache->clock_hand[part_id] = (cache->clock_hand[part_id] + 1)%(cache->num_blocks); return block; } } if( (over_quota == BPOOL_TRUE && !block->valid) || (block->valid && block->part_id != part_id) ) { #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(5, &(block->page_lock))); #endif cache->clock_hand[part_id] = (cache->clock_hand[part_id] + 1)%(cache->num_blocks); /*fprintf(stdout, "Notice: Continuing due to me:%u hand:%u valid:%u part_id: %u over_quota: %u\n", part_id, cache->clock_hand[part_id], block->valid, block->part_id, over_quota ); */ continue; } G_ASSERT(block->valid, "ASSERT: block must be valid here!!\n", HARD_ASSERT); /* valid, refbit set => set refbit = 0 */ if(block->refbit) { /* make refbit = 0, return any block with refbit = 0 */ block->refbit = 0; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(5, &(block->page_lock))); #endif } /* valid, !refbit, dirty block => must be cleaned */ /* since, i came here first, i must clean it */ else if(block->dirty) { //fprintf(stdout, "Info: break for cleaning\n"); dirty = 1; break; } /* valid, !refbit, clean block, demoted => good for removal */ else { G_ASSERT(block->valid && !block->refbit && !block->dirty, "ASSERT: block must be valid/!refbit/!dirty here!!\n", HARD_ASSERT); hashtab_delete(cache->table, (block->id)); /* remove from hashtable */ /* update sizes */ cache->global_size--; cache->part_sizes[block->part_id]--; block->valid = 0; cache->ops.mset(cache->environment, BPOOL_KEY, block->id); cache->ops.mset(cache->environment, BPOOL_BUF, block->buf); cache->clock_hand[part_id] = (cache->clock_hand[part_id] + 1)%(cache->num_blocks); /* stats: record evicts/victims */ #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &cache->stats_lock ); #endif cache->stats.global_num_evicts[rd_wr]++; cache->stats.part_num_evicts[part_id][rd_wr]++; cache->stats.global_num_victims[rd_wr]++; cache->stats.part_num_evicts[block->part_id][rd_wr]++; #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &cache->stats_lock ); #endif return block; } } /* end trylock() */ else { /* fprintf(stdout, "Don't evict a locked page!!\n"); */ } /* make the clock go around */ cache->clock_hand[part_id] = (cache->clock_hand[part_id] + 1)%(cache->num_blocks); } /* end for loop */ #ifdef BPOOL_ENABLE_LOCKS /* release big lock */ assert(0 == __bpool_unlock(4, &cache->big_lock)); #endif /* i exited because my block was dirty */ /* i'm still holding the page_lock for this node */ if(dirty) { int ret; assert(block); assert(block->dirty); assert(block->valid); double flush_start_time, flush_stop_time; flush_start_time = __bpool_time(); cache->ops.flush(cache->environment, block, &ret, tag); flush_stop_time = __bpool_time(); block->dirty = 0; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(5, &(block->page_lock))); #endif /* stats: record flush */ #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &cache->stats_lock ); #endif cache->stats.global_num_flushes[rd_wr]++; cache->stats.part_num_flushes[part_id][rd_wr]++; cache->stats.global_lat_flushes[rd_wr] += (flush_stop_time-flush_start_time); cache->stats.part_lat_flushes[part_id][rd_wr] += (flush_stop_time - flush_start_time); #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &cache->stats_lock ); #endif } } /* end while loop */ } /* end find_free_node() */ static int __bpool_get(struct bpool *pool, char *id, char *buf, int rw, void *tag) { if(pool && id && buf) { struct bpool_node *node = NULL; unsigned first = 1; unsigned part_id = 0; /* Find the partition id */ if(pool->ops.part) { part_id = pool->ops.part( pool->environment, tag, id ); if(part_id < 0 || part_id >= pool->num_partitions ) { fprintf(stderr, "Warn: part_id was incorrect! I made it 0.\n"); part_id = 0; } } /* stat: record get */ #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &pool->stats_lock ); #endif pool->stats.global_num_gets[rw]++; pool->stats.part_num_gets[part_id][rw]++; #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &pool->stats_lock ); #endif /* Access the buffer pool */ while(1) { /* grab the pointer to the block */ #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(1, &pool->big_lock)); #endif node = hashtab_search(pool->table, (void *) id); #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(1, &pool->big_lock)); #endif /* node - cannot be deleted but identity can be changed by the time I lock it */ if(node) { #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(2, &(node->page_lock))); #endif /* doesn't match what I want */ if(pool->ops.compare(pool->table, id, node->id) != 0) { #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(2, &(node->page_lock))); #endif first = 0; continue; /* pointer was bad -- try again */ } /* yay!! this is what I want */ else { unsigned old_part_id = node->part_id; node->part_id = part_id; node->refbit = 1; if(rw == BPOOL_READ) { pool->ops.mread(pool->environment, BPOOL_BUF, node->buf, buf); } else { pool->ops.mwrite(pool->environment, BPOOL_BUF, node->buf, buf); node->dirty = 1; } #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(2, &(node->page_lock))); #endif /* switched partitions -- then update counts */ if(old_part_id != part_id) { G_ASSERT(old_part_id >=0 && old_part_id < pool->num_partitions, "ASSERT: old_part_id is invalid!!", HARD_ASSERT); #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_lock(3, &(pool->big_lock))); #endif pool->part_sizes[old_part_id]--; pool->part_sizes[part_id]++; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(3, &(pool->big_lock))); #endif } /* stat: record hit */ #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &pool->stats_lock ); #endif pool->stats.global_num_hits[rw]++; pool->stats.part_num_hits[part_id][rw]++; #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &pool->stats_lock ); #endif return CACHE_HIT; } } /* end if(node) */ /* too bad, the node is not even in hashtable */ else { /* make sure everyone is behaving correctly */ __bpool_behave(pool); /* searches clock for free node - returns with lock on hashtable and page */ node = __bpool_find_free_node(pool, rw, part_id, tag); if(node) { /* someone else added it in the meantime */ if(hashtab_search(pool->table, (void *) id)) { first = 0; #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(4, &pool->big_lock)); assert(0 == __bpool_unlock(5, &node->page_lock)); #endif continue; } /*else*/ { #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &pool->stats_lock ); #endif pool->stats.global_num_misses[rw]++; pool->stats.part_num_misses[part_id][rw]++; #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &pool->stats_lock ); #endif /* copy key into node->id */ pool->ops.mwrite(pool->environment, BPOOL_KEY, node->id, id); hashtab_insert(pool->table, (void *) node->id, (void *) node); node->part_id = part_id; node->valid = 1; /* update sizes */ pool->global_size++; pool->part_sizes[part_id]++; /* unlock big_lock so other reads proceed */ #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(4, &pool->big_lock)); #endif /* page fault to fill the block */ int ret=0; double fill_start_time, fill_stop_time; fill_start_time = __bpool_time(); pool->ops.fill(pool->environment, node, &ret, tag); fill_stop_time = __bpool_time(); /* stats: record fill */ #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &pool->stats_lock ); #endif pool->stats.global_num_fills[rw]++; pool->stats.part_num_fills[part_id][rw]++; pool->stats.global_lat_fills[rw] += (fill_stop_time - fill_start_time); pool->stats.part_lat_fills[part_id][rw] += (fill_stop_time - fill_start_time); #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &pool->stats_lock ); #endif if(ret >= 0) { if(!rw) { pool->ops.mread(pool->environment, BPOOL_BUF, node->buf, buf); } else { pool->ops.mwrite(pool->environment, BPOOL_BUF, node->buf, buf); node->dirty = 1; } } else { G_ASSERT(0, "BPOOL: fill failed!!", HARD_ASSERT); bpool_delete(pool,id, 0); return ret; } #ifdef BPOOL_ENABLE_LOCKS assert(0 == __bpool_unlock(5, &node->page_lock)); #endif return CACHE_MISS; } } /* end if(node) */ /* no free node - ignore for now */ G_ASSERT(0, "ERROR: bpool_get(): can't find any free nodes!\n", HARD_ASSERT); return -1; } /* end else - node == NULL */ } } G_ASSERT(0, "ERROR: bpool_get(): some of the variables are NULL!\n", HARD_ASSERT); return -1; } /** * bpool_read * ---------- * returns data from the buffer pool * calls fill() to fill empty block (if needed) * * Returns bytes read. -1 on error * Only TRX thread use this function so do mining here **/ int bpool_read(struct bpool *pool, char *id, char *buf, void *tag) { int ret = 0; double start_time, stop_time; start_time = __bpool_time(); ret = __bpool_get(pool, id, buf, BPOOL_READ, tag); stop_time = __bpool_time(); #ifdef BPOOL_ENABLE_LOCKS __bpool_lock(0, &pool->stats_lock ); #endif /* global numbers */ pool->stats.global_lat_gets[BPOOL_STAT_READ] += (stop_time - start_time); if(ret == CACHE_HIT) { pool->stats.global_lat_hits[BPOOL_STAT_READ] += (stop_time - start_time); } if(ret == CACHE_MISS) { pool->stats.global_lat_misses[BPOOL_STAT_READ] += (stop_time - start_time); } /* partition numbers */ if( pool->ops.part != NULL ) { unsigned part_id = pool->ops.part( pool->environment, tag, id ); pool->stats.part_lat_gets[part_id][BPOOL_STAT_READ] += (stop_time - start_time); if(ret == CACHE_HIT) { pool->stats.part_lat_hits[part_id][BPOOL_STAT_READ] += (stop_time - start_time); } if(ret == CACHE_MISS) { pool->stats.part_lat_misses[part_id][BPOOL_STAT_READ] += (stop_time - start_time); } } #ifdef BPOOL_ENABLE_LOCKS __bpool_unlock(0, &pool->stats_lock ); #endif return ret; }