int hash_swap(hash_t *hash, unsigned long key1, unsigned long key2) { int bucket; unsigned long tmp; hash_entry_t *entry1, *entry2; rc = pthread_mutex_lock(&lock); /* find first value */ bucket = hash_fn(hash,key1); entry1 = hash->table[bucket]; for (;;){ /* first entry not found */ if (entry1 == NULL) { rc = pthread_mutex_unlock(&lock); return -1; } /* first entry found */ if (entry1->key == key1) break; entry1 = entry1->next; } /* find second value */ bucket = hash_fn(hash,key2); entry2 = hash->table[bucket]; for (;;){ /* first entry not found */ if (entry2 == NULL) { rc = pthread_mutex_unlock(&lock); return -1; } /* first entry found */ if (entry2->key == key2) break; entry2 = entry2->next; } /* do the swap */ tmp = entry1->val; entry1->val = entry2->val; entry2->val = tmp; rc = pthread_mutex_unlock(&lock); return 0; }
unsigned long hash_delete(hash_t *hash, unsigned long key) { unsigned int bucket; hash_entry_t *curr, *prev; unsigned long ret; bucket = hash_fn(hash,key); curr = hash->table[bucket]; prev = curr; for (;;){ /* entry does not exist */ if (curr == NULL) return HASH_ENTRY_NOTFOUND; /* found entry */ if (curr->key == key) break; prev = curr; curr = curr->next; } /* delete entry */ ret = curr->val; if (curr == hash->table[bucket]) hash->table[bucket] = curr->next; else prev->next = curr->next; free(curr); return ret; }
vector<string> findRepeatedDnaSequences(string s) { unordered_map<int, int> map; hash<string> hash_fn; vector<string> v; for(int i = 0 ; i + 9 < s.size(); i++) { string t = s.substr(i, 10); int h = hash_fn(t); if(map.find(h) == map.end()) { map[h] = 1; } else { if (map[h] == 1) { v.insert (v.end(), t); } map[h]++; } } return v; }
void hash_insert(hash_t *hash, unsigned long key, unsigned long val) { hash_entry_t *new_entry, *curr; unsigned int bucket; bucket = hash_fn(hash, key); //Lookup curr = hash->table[bucket]; while (curr) { /* found key */ if (curr->key == key){ curr->val = val; return; } curr = curr->next; } /* key does not exist, allocate a new entry */ new_entry = malloc(sizeof(hash_entry_t)); new_entry->key = key; new_entry->val = val; /* put new entry, at the beggining of the bucket */ new_entry->next = hash->table[bucket]; hash->table[bucket] = new_entry; }
inline size_t calculateHash(const T& msg) { std::stringstream ss; ss << msg; std::hash<std::string> hash_fn; return hash_fn(ss.str()); }
static void output_previous_word(void) { char *p; int i, j; word[wordpos++] = 0; /* word's trailing null */ /* Don't bother putting single or double letter words in the table */ if (wordpos <= 3) { p = word; while (*p) *b++ = *p++; return; } /* search table to see if word is already in it; */ for (i = hash_fn(word, COMPRESS_HASH_MASK), j = 0; i < MAXTABLE && (words[i] || (i & 0xFF) == 0) && j < COLLISION_LIMIT; i++, j++) if (words[i]) if (strcmp(word, words[i]) == 0) { *b++ = (i >> 8) | TABLE_FLAG; *b++ = i & 0xFF; return; }
unsigned long hash_lookup(hash_t *hash, unsigned long key) { unsigned int bucket; hash_entry_t *curr; unsigned long return_val; rc = pthread_mutex_lock(&lock); bucket = hash_fn(hash,key); curr = hash->table[bucket]; for (;;){ /* entry not found */ if (curr == NULL) { rc = pthread_mutex_unlock(&lock); return HASH_ENTRY_NOTFOUND; } /* entry found */ if (curr->key == key) break; curr = curr->next; } return_val = curr->val; rc = pthread_mutex_unlock(&lock); return return_val; }
/* Find irq by its id. */ irq_t find_by_irqid(int id) { irqhashtable_t bucket; irq_t p, link; spin_lock(&irqhashlock); bucket = &irqhash[hash_fn(id)]; p = bucket->irq; /* optimize for average case. */ if(p->id == id) { link = p; goto end; } else { for(link=p->next; (link->id!=id); link=link->next) if(link->next == NULL) { link = NULL; goto end; } } end: spin_unlock(&irqhashlock); return link; }
TileLoader::TileLoader(const std::string &service, double latitude, double longitude, unsigned int zoom, unsigned int blocks, QObject *parent) : QObject(parent), latitude_(latitude), longitude_(longitude), zoom_(zoom), blocks_(blocks), object_uri_(service) { assert(blocks_ >= 0); const std::string package_path = ros::package::getPath("rviz_satellite"); if (package_path.empty()) { throw std::runtime_error("package 'rviz_satellite' not found"); } std::hash<std::string> hash_fn; cache_path_ = QDir::cleanPath(QString::fromStdString(package_path) + QDir::separator() + QString("mapscache") + QDir::separator() + QString::number(hash_fn(object_uri_))); QDir dir(cache_path_); if (!dir.exists() && !dir.mkpath(".")) { throw std::runtime_error("Failed to create cache folder: " + cache_path_.toStdString()); } /// @todo: some kind of error checking of the URL // calculate center tile coordinates double x, y; latLonToTileCoords(latitude_, longitude_, zoom_, x, y); center_tile_x_ = std::floor(x); center_tile_y_ = std::floor(y); // fractional component origin_offset_x_ = x - center_tile_x_; origin_offset_y_ = y - center_tile_y_; }
static bool node_hash_remove(fatfs_hash_table_t *tbl, fatfs_node_t *node) { unsigned int hval; fatfs_node_t *lnode, *pnode; // Calculate hash of name and get the first node in slot hval = hash_fn(node->dentry.filename, strlen(node->dentry.filename)) % tbl->size; lnode = tbl->nodes[hval]; // Now find the node in list and remove it pnode = NULL; while (lnode != NULL) { if (lnode == node) { if (pnode == NULL) tbl->nodes[hval] = lnode->hash_next; else pnode->hash_next = lnode->hash_next; node->hash_next = NULL; tbl->n--; // All done return true; } pnode = lnode; lnode = lnode->hash_next; } // Node not in list return false; }
/* Find a process by its pid. */ process_t find_by_pid(pid_t pid) { phashtable_t bucket; process_t p, link; spin_lock(&phashlock); bucket = &phash[hash_fn(pid)]; p = bucket->proc; /* optimize for average case. */ if(p->pid == pid) { link = p; goto ok; } else { for(link=p->next; (link->pid!=pid); link=link->next) if(link->next == NULL) goto errp; } ok: spin_unlock(&phashlock); return link; errp: spin_unlock(&phashlock); return NULL; }
uint64_t TestingHarness::GetThreadId() { std::hash<std::thread::id> hash_fn; uint64_t id = hash_fn(std::this_thread::get_id()); id = id % MAX_THREADS; return id; }
int main() { std::string str = "Meet the new boss..."; std::hash<std::string> hash_fn; std::size_t str_hash = hash_fn(str); std::cout << str_hash << '\n'; }
int wal_engine::insert(const statement& st) { LOG_INFO("Insert"); record* after_rec = st.rec_ptr; table* tab = db->tables->at(st.table_id); plist<table_index*>* indices = tab->indices; unsigned int num_indices = tab->num_indices; unsigned int index_itr; std::string key_str = sr.serialize(after_rec, indices->at(0)->sptr); LOG_INFO("key_str :: %s", key_str.c_str()); unsigned long key = hash_fn(key_str); // Check if key present if (indices->at(0)->pm_map->exists(key) != 0) { after_rec->clear_data(); delete after_rec; return EXIT_SUCCESS; } // Add log entry std::string after_tuple = sr.serialize(after_rec, after_rec->sptr); entry_stream.str(""); entry_stream << st.transaction_id << " " << st.op_type << " " << st.table_id << " " << after_tuple << "\n"; entry_str = entry_stream.str(); fs_log.push_back(entry_str); // Add to table tab->pm_data->push_back(after_rec); off_t storage_offset; storage_offset = tab->fs_data.push_back(after_tuple); // Add entry in indices for (index_itr = 0; index_itr < num_indices; index_itr++) { key_str = sr.serialize(after_rec, indices->at(index_itr)->sptr); key = hash_fn(key_str); indices->at(index_itr)->pm_map->insert(key, after_rec); indices->at(index_itr)->off_map->insert(key, storage_offset); } return EXIT_SUCCESS; }
int main() { S s; s.first_name = "Bender"; s.last_name = "Rodriguez"; std::hash<S> hash_fn; std::cout << "hash(s) = " << hash_fn(s) << "\n"; }
int wal_engine::remove(const statement& st) { LOG_INFO("Remove"); record* rec_ptr = st.rec_ptr; table* tab = db->tables->at(st.table_id); plist<table_index*>* indices = tab->indices; unsigned int num_indices = tab->num_indices; unsigned int index_itr; record* before_rec = NULL; std::string key_str = sr.serialize(rec_ptr, indices->at(0)->sptr); unsigned long key = hash_fn(key_str); // Check if key does not exist if (indices->at(0)->pm_map->at(key, &before_rec) == false) { delete rec_ptr; return EXIT_SUCCESS; } // Add log entry entry_stream.str(""); entry_stream << st.transaction_id << " " << st.op_type << " " << st.table_id << " " << sr.serialize(before_rec, before_rec->sptr) << "\n"; entry_str = entry_stream.str(); fs_log.push_back(entry_str); tab->pm_data->erase(before_rec); // Remove entry in indices for (index_itr = 0; index_itr < num_indices; index_itr++) { key_str = sr.serialize(rec_ptr, indices->at(index_itr)->sptr); key = hash_fn(key_str); indices->at(index_itr)->pm_map->erase(key); indices->at(index_itr)->off_map->erase(key); } before_rec->clear_data(); delete before_rec; delete rec_ptr; return EXIT_SUCCESS; }
/* Insert the process in the global hash table. */ static inline void phash_insert(process_t process) { phashtable_t p; spin_lock(&phashlock); p = &phash[hash_fn(process->pid)]; process->next = p->proc; p->proc = process; spin_unlock(&phashlock); }
/* Insert the irq in the global hash table. */ static inline void irqhash_insert(irq_t irq) { irqhashtable_t p; spin_lock(&irqhashlock); p = &irqhash[hash_fn(irq->id)]; irq->next = p->irq; p->irq = irq; spin_unlock(&irqhashlock); }
static inline void thash_insert(struct thread *thread) { thashtable_t t; spin_lock(&hashlock); t = &thash[hash_fn(thread->tid)]; thread->next = t->thr; t->thr = thread; spin_unlock(&hashlock); }
static bool node_hash_add(fatfs_hash_table_t *tbl, fatfs_node_t *node) { unsigned int hval; // Calculate hash of given node filename hval = hash_fn(node->dentry.filename, strlen(node->dentry.filename)) % tbl->size; CYG_TRACE2(TNC, "name='%s' hval=%d", node->dentry.filename, hval); if (tbl->nodes[hval] == NULL) { // First node in this slot node->hash_next = NULL; tbl->nodes[hval] = node; tbl->n++; return true; } else { // More nodes in this slot fatfs_node_t *lnode, *pnode; pnode = NULL; lnode = tbl->nodes[hval]; // Insert node into list so that it is sorted by filename while (lnode != NULL) { if (lnode == node) return false; if (strcasecmp(lnode->dentry.filename, node->dentry.filename) > 0) { if (pnode != NULL) pnode->hash_next = node; // Insert in the middle else tbl->nodes[hval] = node; // Insert at the beginning node->hash_next = lnode; tbl->n++; return true; } pnode = lnode; lnode = lnode->hash_next; } // Insert at the end pnode->hash_next = node; node->hash_next = NULL; tbl->n++; return true; } }
void Set::update_hash() { std::hash<ParamsMap::value_type::first_type> hash_fn; hash = 0; /** Because xor is associative there is no need to care about order */ for( auto in=params.begin() ; in!=params.end() ; ++in ) hash ^= hash_fn(in->first); set_desc.put( "@hash" , hash ); }
vector<string> findRepeatedDnaSequences(string s) { unordered_map<size_t,int> MP; hash<string> hash_fn; vector<string> ret; for(int i = 0; i < int(s.size()) - 9; ++i) if(MP[hash_fn(s.substr(i,10))]++ == 1 ) ret.push_back(s.substr(i,10)); return ret; }
void wal_engine::load(const statement& st) { //LOG_INFO("Load"); record* after_rec = st.rec_ptr; table* tab = db->tables->at(st.table_id); plist<table_index*>* indices = tab->indices; unsigned int num_indices = tab->num_indices; unsigned int index_itr; std::string key_str = sr.serialize(after_rec, indices->at(0)->sptr); unsigned long key = hash_fn(key_str); std::string after_tuple = sr.serialize(after_rec, after_rec->sptr); // Add log entry if (!conf.recovery) { entry_stream.str(""); entry_stream << st.transaction_id << " " << st.op_type << " " << st.table_id << " " << after_tuple << "\n"; entry_str = entry_stream.str(); fs_log.push_back(entry_str); } tab->pm_data->push_back(after_rec); off_t storage_offset; storage_offset = tab->fs_data.push_back(after_tuple); // Add entry in indices for (index_itr = 0; index_itr < num_indices; index_itr++) { key_str = sr.serialize(after_rec, indices->at(index_itr)->sptr); key = hash_fn(key_str); indices->at(index_itr)->pm_map->insert(key, after_rec); indices->at(index_itr)->off_map->insert(key, storage_offset); } }
int dcache_lookup(struct inode * dir, const char * name, int len, unsigned long * ino) { struct hash_list * hash; struct dir_cache_entry *de; if (len > DCACHE_NAME_LEN) return 0; hash = hash_table + hash_fn(dir->i_dev, dir->i_ino, namehash(name,len)); de = find_entry(dir, name, len, hash); if (!de) return 0; *ino = de->ino; move_to_level2(de, hash); return 1; }
int wal_engine::update(const statement& st) { LOG_INFO("Update"); record* rec_ptr = st.rec_ptr; table* tab = db->tables->at(st.table_id); plist<table_index*>* indices = db->tables->at(st.table_id)->indices; std::string key_str = sr.serialize(rec_ptr, indices->at(0)->sptr); unsigned long key = hash_fn(key_str); record* before_rec; // Check if key does not exist if (indices->at(0)->pm_map->at(key, &before_rec) == false) { rec_ptr->clear_data(); delete rec_ptr; return EXIT_SUCCESS; } entry_stream.str(""); entry_stream << st.transaction_id << " " << st.op_type << " " << st.table_id << " "; entry_stream << sr.serialize(before_rec, tab->sptr) << " "; // Update existing record for (int field_itr : st.field_ids) { if (rec_ptr->sptr->columns[field_itr].inlined == 0) { void* before_field = before_rec->get_pointer(field_itr); delete (char*) before_field; } before_rec->set_data(field_itr, rec_ptr); } std::string before_tuple; before_tuple = sr.serialize(before_rec, tab->sptr); entry_stream << before_tuple << "\n"; // Add log entry entry_str = entry_stream.str(); fs_log.push_back(entry_str); off_t storage_offset = 0; indices->at(0)->off_map->at(key, &storage_offset); tab->fs_data.update(storage_offset, before_tuple); delete rec_ptr; return EXIT_SUCCESS; }
static void log_call(unsigned long src, unsigned long is_call, unsigned long dest) { unsigned h = hash_fn(src); struct hash_entry *he; unsigned i; if (is_call) { src |= HE_IS_CALL; } tl_assert(h < NR_HASH_HEADS); for (he = hash_heads[h]; he && src != he->src; he = he->next) { ; } if (!he) { he = VG_(malloc)("hash_entry", sizeof(*he)); he->next = hash_heads[h]; he->src = src; he->nr_entries = 0; he->nr_entries_allocated = 7; he->dest = VG_(malloc)("hash_entry->entries", sizeof(he->dest[0]) * he->nr_entries_allocated); hash_heads[h] = he; } else { for (i = 0; i < he->nr_entries; i++) { if (he->dest[i] == dest) { return; } } if (he->nr_entries == he->nr_entries_allocated) { unsigned long *t; he->nr_entries_allocated += 16; t = VG_(malloc)("hash_entry->entries2", sizeof(he->dest[0]) * he->nr_entries_allocated); VG_(memcpy)(t, he->dest, sizeof(he->dest[0]) * he->nr_entries); VG_(free)(he->dest); he->dest = t; } } he->dest[he->nr_entries] = dest; he->nr_entries++; }
static fatfs_node_t* node_hash_find(fatfs_hash_table_t *tbl, const char *name, unsigned int namelen, unsigned int parent_cluster) { unsigned int hval; fatfs_node_t *node; // Calculate hash of name and get the first node in slot hval = hash_fn(name, namelen) % tbl->size; node = tbl->nodes[hval]; CYG_TRACE2(TNC, "name='%s' hval=%d\n", name, hval); // Find the node in list wich matches the // given name and parent_cluster while (node != NULL) { // First compare the parent cluster number and // check filename length since it is faster than // comparing filenames if (parent_cluster == node->dentry.parent_cluster && '\0' == node->dentry.filename[namelen]) { int i = strncasecmp(node->dentry.filename, name, namelen); if (i == 0) return node; else if (i > 0) return NULL; // Stop searching - we have a // sorted list so there can't be // any matching filename further on // if i > 0 - look at node_hash_add } node = node->hash_next; } // No such node found return NULL; }
BigInt compute_x(const std::string& hash_id, const std::string& identifier, const std::string& password, const MemoryRegion<byte>& salt) { std::auto_ptr<HashFunction> hash_fn( global_state().algorithm_factory().make_hash_function(hash_id)); hash_fn->update(identifier); hash_fn->update(":"); hash_fn->update(password); SecureVector<byte> inner_h = hash_fn->final(); hash_fn->update(salt); hash_fn->update(inner_h); SecureVector<byte> outer_h = hash_fn->final(); return BigInt::decode(outer_h); }
/* Remove the process from the global hash table. */ static inline void phash_remove(process_t process) { phashtable_t bucket; process_t p, link; spin_lock(&phashlock); bucket = &phash[hash_fn(process->pid)]; p = bucket->proc; if(p == process) { p = process->next; } else { for(link=p; (link->next!=process) || (link->next != NULL); link = link->next) ; link->next = process->next; } spin_unlock(&phashlock); }
unsigned long hash_lookup(hash_t *hash, unsigned long key) { unsigned int bucket; hash_entry_t *curr; bucket = hash_fn(hash,key); curr = hash->table[bucket]; for (;;){ /* entry not found */ if (curr == NULL) return HASH_ENTRY_NOTFOUND; /* entry found */ if (curr->key == key) break; curr = curr->next; } return curr->val; }