void * /* returns value associated with key */ hashtable_search(struct hashtable *h, void *k) { struct entry *e; unsigned int hashvalue, index; hashvalue = hash(h,k); index = indexFor(h->tablelength,hashvalue); e = h->table[index]; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) return e->v; e = e->next; } return NULL; }
void * /* returns value associated with key */ hashtable_remove(struct hashtable *h, void *k) { /* TODO: consider compacting the table when the load factor drops enough, * or provide a 'compact' method. */ struct entry *e; struct entry **pE; void *v; unsigned int hashvalue, index; // Use global read lock for hashing/indexing rwlock_rdlock(&h->globallock); hashvalue = hash(h,k); index = indexFor(h->tablelength,hash(h,k)); rwlock_rdunlock(&h->globallock); // Use local write lock for removal rwlock_wrlock(&h->locks[index]); pE = &(h->table[index]); e = *pE; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { *pE = e->next; // Use write lock for entry count decrement rwlock_wrlock(&h->entrycountlock); h->entrycount--; rwlock_wrunlock(&h->entrycountlock); v = e->v; freekey(e->k); free(e); rwlock_wrunlock(&h->locks[index]); return v; } pE = &(e->next); e = e->next; } rwlock_wrunlock(&h->locks[index]); return NULL; }
bool RKModificationTracker::removeObject (RObject *object, RKEditor *editor, bool removed_in_workspace) { RK_TRACE (OBJECTS); // TODO: allow more than one editor per object // WARNING: This does not work, if a sub-object is being edited! RKEditor *ed = objectEditor (object); RK_ASSERT (object); RK_ASSERT (!((editor) && (!ed))); RK_ASSERT (!(removed_in_workspace && editor)); if (removed_in_workspace) { if (ed) { if (KMessageBox::questionYesNo (0, i18n ("The object '%1' was removed from workspace or changed to a different type of object, but is currently opened for editing. Do you want to restore it?", object->getFullName ()), i18n ("Restore object?")) == KMessageBox::Yes) { ed->restoreObject (object); return false; } } } else { if (editor || ed) { if (KMessageBox::questionYesNo (0, i18n ("Do you really want to remove the object '%1'? The object is currently opened for editing, it will be removed in the editor, too. There's no way to get it back.", object->getFullName ()), i18n ("Remove object?")) != KMessageBox::Yes) { return false; } } else { // TODO: check for other editors editing this object if (KMessageBox::questionYesNo (0, i18n ("Do you really want to remove the object '%1'? There's no way to get it back.", object->getFullName ()), i18n ("Remove object?")) != KMessageBox::Yes) { return false; } } } RK_ASSERT (object); RK_ASSERT (object->getContainer ()); if (!updates_locked) { QModelIndex object_index = indexFor (object->getContainer ()); int object_row = object->getContainer ()->getIndexOf (object); RK_ASSERT (object_row >= 0); beginRemoveRows (object_index, object_row, object_row); } if (!updates_locked) sendListenerNotification (RObjectListener::ObjectRemoved, object, 0, 0, 0); object->remove (removed_in_workspace); if (!updates_locked) endRemoveRows (); return true; }
int chainhash_insert_only(struct chainhash *h, void *k, void *v) { /* This method allows duplicate keys - but they shouldn't be used */ uint32_t index; struct entry *e; e = (struct entry *)malloc(sizeof(struct entry)); if (NULL == e) return -1; ++(h->entrycount); e->h = chainhash_hash(h, k); index = indexFor(h->tablelength, e->h); e->k = k; e->v = v; e->next = h->table[index]; h->table[index] = e; return 0; }
/** * chainhash_change * * function to change the value associated with a key, where there already * exists a value bound to the key in the chainhash. * Source due to Holger Schemel. * @h: chainhash * @k: key for the value * @v: new value to use * @free_old: free old value if exists */ int chainhash_change(struct chainhash * h, void *k, void *v, int free_old) { struct entry *e; uint32_t hashvalue, index; hashvalue = chainhash_hash(h, k); index = indexFor(h->tablelength, hashvalue); e = h->table[index]; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { if (free_old) { free(e->v); } e->v = v; return 0; } e = e->next; } return -1; }
int stm_hashtable_insert(char *name, struct hashtable *h, unsigned long *k, unsigned long *v,unsigned long v_size){ unsigned int index ; struct entry * e; hash_lock() ; e = (struct entry *)pos_malloc( name , sizeof(struct entry)) ; if( e == NULL) PR_DEBUG("enull\n") ; e->v = (unsigned long *)pos_malloc( name , v_size) ; // TM_START(1, RW) ; if( e->v == NULL)PR_DEBUG("e->v null\n") ; hash_unlock() ; TM_START(1, RW) ; // e->h = hash(h , k) ; // memcpy(e->k , k , sizeof(unsigned long)*KEY_SIZE) ; // memcpy(e->v , v, v_size) ; TM_STORE(&e->h , hash(h,k)) ; TM_STORE(&e->k , k ) ; TM_STORE(&e->v , v ) ; // TM_START(1 , RW) ; int entrycount = (int)TM_LOAD(&h->entrycount) ; int loadlimit = (int)TM_LOAD(&h->loadlimit) ; TM_STORE(&h->entrycount , entrycount+1) ; if( entrycount+1 > loadlimit ){ // h_lock(); // shashtable_expand(name ,h) ; // h_unlock() ; } e->h = hash( h , k ) ; index = indexFor((int)TM_LOAD(&h->tablelength) , e->h); e->next = (struct entry *)TM_LOAD(&h->table[index]); pos_clflush_cache_range(e , sizeof(struct entry)); pos_clflush_cache_range(e->v , v_size) ; TM_STORE(&h->table[index] , e) ; pos_clflush_cache_range(&h->table[index] , sizeof(unsigned long)) ; TM_COMMIT ; return ; }
/* hashtable_change * * function to change the value associated with a key, where there already * exists a value bound to the key in the hashtable. * Source due to Holger Schemel. * * */ int hashtable_change(struct hashtable *h, void *k, void *v) { struct entry *e; unsigned int hashvalue, index_; hashvalue = hashtable_hash(h, k); index_ = indexFor(h->tablelength, hashvalue); e = h->table[index_]; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { FREE(e->v); e->v = v; return -1; } e = e->next; } return 0; }
int stm_hashtable_remove( char *name , struct hashtable *h, unsigned long *k){ struct entry *prev, *b ; unsigned long hashvalue , index ; int result = 0 ; // whether find node or not find. TM_START(0 ,RW) ; // transaction start hashvalue = hash( h , k ); int tablelength = (int)TM_LOAD(&h->tablelength) ; index = indexFor( tablelength , hashvalue ) ; prev = b = (struct entry *)TM_LOAD(&h->table[index]) ; result = 0 ; while( b != NULL ){ if(( hashvalue == b->h ) && default_key_eq_fn( k , b->k )){ result = 1 ; goto ret; } prev = b ; b = (struct entry *)TM_LOAD(&b->next) ; } ret: if(result){ // matching entry so end normaly. if( prev == b ){ TM_STORE( &h->table[index] , TM_LOAD(&b->next)) ; pos_clflush_cache_range(&h->table[index] , sizeof(struct entry*)) ; TM_STORE( &h->entrycount , (unsigned long)(h->entrycount-1)) ; pos_clflush_cache_range(&h->entrycount , sizeof(unsigned long)) ; } else{ TM_STORE(&prev->next , TM_LOAD(&b->next)) ; pos_clflush_cache_range(&prev->next , sizeof(struct entry*)); TM_STORE( &h->entrycount , (unsigned long)(h->entrycount-1)) ; pos_clflush_cache_range(&h->entrycount , sizeof(unsigned long)) ; } //pos_free part. } TM_COMMIT ; return result ; }
int /* returns zero if not found */ hashtable_iterator_search(struct hashtable_itr *itr, struct hashtable *h, void *k) { #ifdef HASHTABLE_THREADED pthread_mutex_lock(&h->mutex); #endif struct entry *e, *parent; unsigned int hashvalue, index; int ret; hashvalue = hash(h,k); index = indexFor(h->tablelength,hashvalue); e = h->table[index]; parent = NULL; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { itr->index = index; itr->e = e; itr->parent = parent; itr->h = h; ret= -1; goto egress; } parent = e; e = e->next; } ret = 0; egress: #ifdef HASHTABLE_THREADED pthread_mutex_unlock(&h->mutex); #endif return ret; }
void RKModificationTracker::moveObject (RContainerObject *parent, RObject* child, int old_index, int new_index) { RK_TRACE (OBJECTS); QModelIndex parent_index; if (!updates_locked) { parent_index = indexFor (parent); beginRemoveRows (parent_index, old_index, old_index); } RK_ASSERT (parent->findChildByIndex (old_index) == child); parent->removeChildNoDelete (child); if (!updates_locked) { endRemoveRows (); beginInsertRows (parent_index, new_index, new_index); } parent->insertChild (child, new_index); RK_ASSERT (parent->findChildByIndex (new_index) == child); if (!updates_locked) { endInsertRows (); sendListenerNotification (RObjectListener::ChildMoved, parent, old_index, new_index, 0); } }
void UniformGrid::populateCells(const std::list<Model>& models) { // Populate the cells with the models std::cerr << "Populating the cells: " << cells.size() << std::endl; for (const auto& model : models) { auto bbox = model.getBoundingBox(); Point3D min = bbox.front(); Point3D max = bbox.front(); getMinAndMax(bbox, &min, &max); auto minCoord = coordAt(min); auto maxCoord = coordAt(max); for (size_t x = minCoord[0]; x <= maxCoord[0]; ++x) { for (size_t y = minCoord[1]; y <= maxCoord[1]; ++y) { for (size_t z = minCoord[2]; z <= maxCoord[2]; ++z) { UniformGrid::CellCoord coord(x, y, z); if (intersectsCell(model, coord)) { cells[indexFor(coord)].models.push_back(&model); } } } } } std::cerr << "Done populating cells" << std::endl; }
int hashtable_insert(struct hashtable *h, void *k, void *v) { /* This method allows duplicate keys - but they shouldn't be used */ unsigned int index; struct entry *e; if (++(h->entrycount) > h->loadlimit) { /* Ignore the return value. If expand fails, we should * still try cramming just this value into the existing table * -- we may not have memory for a larger table, but one more * element may be ok. Next time we insert, we'll try expanding again.*/ hashtable_expand(h); } e = (struct entry *)malloc(sizeof(struct entry)); if (NULL == e) { --(h->entrycount); return 0; } /*oom*/ e->h = hash(h,k); index = indexFor(h->tablelength,e->h); e->k = k; e->v = v; e->next = h->table[index]; h->table[index] = e; return -1; }
errorCode hashtable_insert(struct hashtable *h, String key, Index value) { /* This method allows duplicate keys - but they shouldn't be used */ unsigned int index; struct entry *e; if (++(h->entrycount) > h->loadlimit) { /* Ignore the return value. If expand fails, we should * still try cramming just this value into the existing table * -- we may not have memory for a larger table, but one more * element may be ok. Next time we insert, we'll try expanding again.*/ hashtable_expand(h); } e = (struct entry *)EXIP_MALLOC(sizeof(struct entry)); if (NULL == e) { --(h->entrycount); return EXIP_MEMORY_ALLOCATION_ERROR; } /*oom*/ e->hash = h->hashfn(key); // hash(h,k, len); index = indexFor(h->tablelength,e->hash); e->key = key; e->value = value; e->next = h->table[index]; h->table[index] = e; return EXIP_OK; }
void * /* returns value associated with key */ hashtable_remove(struct hashtable *h, void *k) { /* TODO: consider compacting the table when the load factor drops enough, * or provide a 'compact' method. */ #ifdef HASHTABLE_THREADED pthread_mutex_lock(&h->mutex); #endif struct entry *e; struct entry **pE; void *v; unsigned int hashvalue, index; hashvalue = hash(h,k); index = indexFor(h->tablelength,hash(h,k)); pE = &(h->table[index]); e = *pE; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { *pE = e->next; h->entrycount--; v = e->v; freekey(e->k); free(e); return v; } pE = &(e->next); e = e->next; } #ifdef HASHTABLE_THREADED pthread_mutex_unlock(&h->mutex); #endif return NULL; }
/** * Returns value associated with key */ void * hashtable_search(struct hashtable *h, const void *k) { struct entry *e; unsigned int hashvalue, index_; if (h==NULL){ /* Check that the hashtable does exist. */ printf("Internal error: cannot search into an NULL hashtable !\n"); exit(-1); } hashvalue = hash(h,k); index_ = indexFor(h->tablelength,hashvalue); e = h->table[index_]; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { return e->v; } e = e->next; } return NULL; }
std::set<const Model*> UniformGrid::getModels(const Ray& ray) const { std::set<const Model*> models; Point3D nextT(0, 0, 0); // The point *within* the grid where the ray first intersected it Point3D rayStartPoint(0, 0, 0); if (!inGrid(ray.start)) { const auto& sp = startPoint; // Not in the grid: We will use a cube the sz of whole grid to find // the point of entry into the grid auto gridCubeInverse = (translationMatrix(sp[0], sp[0], sp[0]) * gridSizeScaleMatrix).invert(); HitRecord hr; if (!utilityCube.intersects(ray, &hr, gridCubeInverse)) { // Does not intersect the grid even return models; } nextT[0] = hr.t; nextT[1] = hr.t; nextT[2] = hr.t; rayStartPoint = ray.at(hr.t); } else { rayStartPoint = ray.start; } // Place in the grid we are currently stepping through CellCoord gridCoord = coordAt(rayStartPoint); Vector3D dir( std::abs(ray.dir[0]), std::abs(ray.dir[1]), std::abs(ray.dir[2])); // These values are in units of t: how far we must go to travel a whole cell Vector3D dt( isZero(dir[0]) ? 0 : cellSize / dir[0], isZero(dir[1]) ? 0 : cellSize / dir[1], isZero(dir[2]) ? 0 : cellSize / dir[2] ); { // The bottom left corner of the cell we are starting in Point3D gsp = pointAt(gridCoord); // "Grid start point" // Determine how far, in units of t, we have to go in any direction // to reach the next cell // If we are going "forwards" in a coordinate then we need to travel to // gsp + cellSize. If we are going "backwards" in a coordinate then we need // to travel to only gsp. for (int i = 0; i < 3; ++i) { if (isZero(dir[i])) { nextT[i] = -1; continue; } if (ray.dir[i] < 0) { nextT[i] += (rayStartPoint[i] - gsp[i]) / dir[i]; } else { nextT[i] += (gsp[i] + cellSize - rayStartPoint[i]) / dir[i]; } } } // Which direction in the grid to move when we hit a "next" value CellCoord incs( (ray.dir[0] > 0) ? 1 : -1, (ray.dir[1] > 0) ? 1 : -1, (ray.dir[2] > 0) ? 1 : -1 ); // Check if a coord is still valid auto coordOk = [&] (int coord) -> bool { return 0 <= coord && coord < sideLength; }; auto smaller = [] (double a, double b) -> bool { return (b < 0) || a <= b; }; while (coordOk(gridCoord.x) && coordOk(gridCoord.y) && coordOk(gridCoord.z)) { for (const Model* model : cells[indexFor(gridCoord)].models) { models.insert(model); } for (int i = 0; i < 3; ++i) { if (nextT[i] < 0) continue; const auto a = nextT[(i + 1) % 3]; const auto b = nextT[(i + 2) % 3]; if (smaller(nextT[i], a) && smaller(nextT[i], b)) { nextT[i] += dt[i]; gridCoord[i] += incs[i]; break; } } } return models; }
static int hashtable_expand(struct hashtable *h) { /* Double the size of the table to accomodate more entries */ struct entry **newtable; struct entry *e; struct entry **pE; unsigned int newsize, i, index; /* Check we're not hitting max capacity */ if (0 == (newsize = (h->tablelength << 1))) return 0; newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize); if (newtable != NULL) { memset(newtable, 0, newsize * sizeof(struct entry *)); /* This algorithm is not 'stable'. ie. it reverses the list * when it transfers entries between the tables */ for (i = 0; i < h->tablelength; i++) { while ((e = h->table[i]) != NULL) { h->table[i] = e->next; index = indexFor(newsize,e->h); e->next = newtable[index]; newtable[index] = e; } } free(h->table); h->table = newtable; } else /* Plan B: realloc instead */ { newtable = (struct entry **) realloc(h->table, newsize * sizeof(struct entry *)); if (newtable == NULL) return 0; h->table = newtable; for (i = h->tablelength; i < newsize; i++) newtable[i] = NULL; for (i = 0; i < h->tablelength; i++) { for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { index = indexFor(newsize,e->h); if (index == i) { pE = &(e->next); } else { *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } } } h->tablelength = newsize; h->loadlimit <<= 1; return -1; }
/*****************************************************************************/ SWITCH_DECLARE(void *) /* returns value associated with key */ switch_hashtable_remove(switch_hashtable_t *h, void *k) { unsigned int hashvalue = hash(h,k); return _switch_hashtable_remove(h, k, hashvalue, indexFor(h->tablelength,hashvalue)); }
static int hashtable_expand(struct hashtable *h) { /* Double the size of the table to accomodate more entries */ struct entry **newtable; struct entry *e; struct entry **pE; unsigned int newsize, i, index; /* Check we're not hitting max capacity */ if (h->primeindex == (prime_table_length - 1)) return 0; newsize = primes[++(h->primeindex)]; newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize); if (NULL != newtable) { memset(newtable, 0, newsize * sizeof(struct entry *)); /* This algorithm is not 'stable'. ie. it reverses the list * when it transfers entries between the tables */ for (i = 0; i < h->tablelength; i++) { while (NULL != (e = h->table[i])) { h->table[i] = e->next; index = indexFor(newsize,e->h); e->next = newtable[index]; newtable[index] = e; } } free(h->table); h->table = newtable; } /* Plan B: realloc instead */ else { newtable = (struct entry **) realloc(h->table, newsize * sizeof(struct entry *)); if (NULL == newtable) { (h->primeindex)--; return 0; } h->table = newtable; memset(newtable[h->tablelength], 0, newsize - h->tablelength); for (i = 0; i < h->tablelength; i++) { for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { index = indexFor(newsize,e->h); if (index == i) { pE = &(e->next); } else { *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } } } h->tablelength = newsize; h->loadlimit = (unsigned int) ceil(newsize * max_load_factor); return -1; }
static int hashtable_expand(struct hashtable *h) { // Acquire global write lock for entire function rwlock_wrlock(&h->globallock); /* Double the size of the table to accomodate more entries */ struct entry **newtable; struct entry *e; struct entry **pE; unsigned int newsize, i, index; /* Check we're not hitting max capacity */ if (h->primeindex == (prime_table_length - 1)) { // Release global write lock for early return rwlock_wrunlock(&h->globallock); return 0; } newsize = primes[++(h->primeindex)]; newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize); if (NULL != newtable) { memset(newtable, 0, newsize * sizeof(struct entry *)); /* This algorithm is not 'stable'. ie. it reverses the list * when it transfers entries between the tables */ for (i = 0; i < h->tablelength; i++) { while (NULL != (e = h->table[i])) { h->table[i] = e->next; index = indexFor(newsize,e->h); e->next = newtable[index]; newtable[index] = e; } } free(h->table); h->table = newtable; } /* Plan B: realloc instead */ else { newtable = (struct entry **) realloc(h->table, newsize * sizeof(struct entry *)); if (NULL == newtable) { (h->primeindex)--; // Release global write lock for early return rwlock_wrunlock(&h->globallock); return 0; } h->table = newtable; memset(newtable[h->tablelength], 0, newsize - h->tablelength); for (i = 0; i < h->tablelength; i++) { for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { index = indexFor(newsize,e->h); if (index == i) { pE = &(e->next); } else { *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } } } #ifdef DEBUG printf("resizing fine-grained rwlock array to %d locks.\n", newsize); #endif // Realloc more rwlocks for newly resized table h->locks = (pthread_rwlock_t *) realloc(h->locks, sizeof(pthread_rwlock_t) * newsize); for(unsigned int i = h->num_locks; i < newsize; ++i) { if (pthread_rwlock_init(&h->locks[i], NULL)) { perror("pthread_rwlock_init"); exit(1); } } h->num_locks = newsize; h->tablelength = newsize; h->loadlimit = (unsigned int) ceil(newsize * max_load_factor); // Release global write lock rwlock_wrunlock(&h->globallock); return -1; }
//static int //hashtable_expand(struct hashtable *h) //static int int hashtable_expand(char *name, struct hashtable *h) { /* Double the size of the table to accomodate more entries */ struct entry **newtable; struct entry *e; struct entry **pE; unsigned int newsize, i, index; /* Check we're not hitting max capacity */ if (h->primeindex == (prime_table_length - 1)) return 0; newsize = primes[++(h->primeindex)]; //newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize); newtable = (struct entry **)pos_malloc(name, sizeof(struct entry*) * newsize); if (NULL != newtable) { memset(newtable, 0, newsize * sizeof(struct entry *)); #if CONSISTENCY == 1 pos_clflush_cache_range(newtable, newsize * sizeof(struct entry *)); #endif /* This algorithm is not 'stable'. ie. it reverses the list * when it transfers entries between the tables */ for (i = 0; i < h->tablelength; i++) { while (NULL != (e = h->table[i])) { #if MODE == 2 if (e != NULL) e += OFFSET_BASE; #endif #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_write_value(name, (unsigned long *)&h->table[i], (unsigned long)e->next); #else h->table[i] = e->next ; // adding clflush pos_clflush_cache_range(&h->table[i] , sizeof(unsigned long)) ; #endif #else h->table[i] = e->next; #endif index = indexFor(newsize,e->h); #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_write_value(name, (unsigned long *)&e->next, (unsigned long)newtable[index]); #else e->next = newtable[index]; //adding clflush pos_clflush_cache_range(&e->next , sizeof(unsigned long)); #endif #else e->next = newtable[index]; #endif newtable[index] = e; } } //free(h->table); #if CONSISTENCY != 1 pos_free(name, h->table);////////////////////////////////// #endif h->table = newtable; } /* Plan B: realloc instead */ /*else { //newtable = (struct entry **) // realloc(h->table, newsize * sizeof(struct entry *)); newtable = (struct entry **) pos_realloc(name, h->table, newsize * sizeof(struct entry *)); if (NULL == newtable) { (h->primeindex)--; return 0; } h->table = newtable; memset(newtable[h->tablelength], 0, newsize - h->tablelength); for (i = 0; i < h->tablelength; i++) { for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) { index = indexFor(newsize,e->h); if (index == i) { pE = &(e->next); } else { *pE = e->next; e->next = newtable[index]; newtable[index] = e; } } } }*/ #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_write_value(name, (unsigned long *)&h->tablelength, (unsigned long)newsize); pos_write_value(name, (unsigned long *)&h->loadlimit, (unsigned long)newtable[index]); // Delayed flush... #else h->tablelength = newsize ; pos_clflush_cache_range( &h->tablelength , sizeof(unsigned long)); h->loadlimit = (unsigned int)ceil(newsize * max_load_factor); pos_clflush_cache_range( &h->loadlimit , sizeof(unsigned long)) ; #endif #else h->tablelength = newsize; h->loadlimit = (unsigned int) ceil(newsize * max_load_factor); #endif return -1; }
//int //hashtable_insert(struct hashtable *h, void *k, void *v) int hashtable_insert(char *name, struct hashtable *h, unsigned long *k, unsigned long *v, unsigned long v_size) { /* This method allows duplicate keys - but they shouldn't be used */ unsigned int index; struct entry *e; #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 //PR_DEBUG("Undo Logging because UNDO_CONESISTENCY is 1\n") ; pos_transaction_start(name, POS_TS_INSERT); #endif #endif if (++(h->entrycount) > h->loadlimit) { /* Ignore the return value. If expand fails, we should * still try cramming just this value into the existing table * -- we may not have memory for a larger table, but one more * element may be ok. Next time we insert, we'll try expanding again.*/ //hashtable_expand(h); // hashtable_expand(name, h); } //e = (struct entry *)malloc(sizeof(struct entry)); //if (NULL == e) { --(h->entrycount); return 0; } /*oom*/ e = (struct entry *)pos_malloc(name, sizeof(struct entry)); if (NULL == e) { --(h->entrycount); return -1; } /*oom*/ e->h = hash(h,k); //#if CONSISTENCY == 1 //pos_clflush_cache_range(&e->h, sizeof(e->h)); //#endif index = indexFor(h->tablelength,e->h); //e->k = k; //e->v = v; memcpy(e->k, k, sizeof(unsigned long)*KEY_SIZE); //#if CONSISTENCY == 1 // pos_clflush_cache_range(e->k, sizeof(unsigned long)*KEY_SIZE); //#endif e->v = (unsigned long *)pos_malloc(name, v_size); memcpy(e->v, v, v_size); #if CONSISTENCY == 1 //pos_clflush_cache_range(&e->v, sizeof(e->v)); pos_clflush_cache_range(e->v, v_size); #endif e->next = h->table[index]; #if CONSISTENCY == 1 //pos_clflush_cache_range(&e->next, sizeof(e->next)); pos_clflush_cache_range(e, sizeof(struct entry)); // Delayed flush #endif #if MODE == 1 #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 // PR_DEBUG("Undo CONSISTENCY is 1 so pos_write_value\n") ; pos_write_value(name, (unsigned long *)&h->table[index], (unsigned long)e); #else h->table[index] = e; pos_clflush_cache_range(&h->table[index] , sizeof(unsigned long)) ; #endif #else h->table[index] = e; #endif #elif MODE == 2 h->table[index] = e - OFFSET_BASE; #endif #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_transaction_end(name); #endif #endif //return -1; return 0; }
//void * /* returns value associated with key */ //hashtable_remove(struct hashtable *h, void *k) int hashtable_remove(char *name, struct hashtable *h, unsigned long *k) { /* TODO: consider compacting the table when the load factor drops enough, * or provide a 'compact' method. */ struct entry *e; struct entry **pE; //void *v; unsigned long hashvalue, index; #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_transaction_start(name, POS_TS_REMOVE); #endif #endif hashvalue = hash(h,k); index = indexFor(h->tablelength,hash(h,k)); pE = &(h->table[index]); e = *pE; while (NULL != e) { #if MODE == 2 e += OFFSET_BASE; #endif /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_write_value(name, (unsigned long *)pE , (unsigned long)e->next); pos_write_value(name, (unsigned long *)&h->entrycount, (unsigned long)(h->entrycount-1)); #else *pE = e->next ; pos_clflush_cache_range( pE , sizeof(struct entry)) ; h->entrycount-- ; pos_clflush_cache_range(&h->entrycount , sizeof(unsigned long)) ; #endif #else *pE = e->next; h->entrycount--; //v = e->v; #endif //freekey(e->k); //free(e); pos_free(name, e->v); pos_free(name, e); #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_transaction_end(name); #endif #endif //return v; return 0; } pE = &(e->next); e = e->next; } #if CONSISTENCY == 1 #if UNDO_CONSISTENCY == 1 pos_transaction_end(name); #endif #endif //return NULL; return -1; }