void neighborhood_cell::detection() { VEC3F_PTR pos = md->particleSystem()->position(); VEC3I cell3d; for (unsigned int i = 0; i < md->numParticle(); i++){ cell3d = getCellNumber(pos[i].x, pos[i].y, pos[i].z); cell_id[i] = getHash(cell3d); body_id[i] = i; } thrust::sort_by_key(cell_id, cell_id + md->numParticle(), body_id); memset(cell_start, 0xffffffff, sizeof(unsigned int) * ng); memset(cell_end, 0, sizeof(unsigned int)*ng); unsigned int begin = 0, end = 0, id = 0; bool ispass; while (end++ != md->numParticle()){ ispass = true; id = cell_id[begin]; if (id != cell_id[end]){ end - begin > 1 ? ispass = false : reorderDataAndFindCellStart(id, begin++, end); } if (!ispass){ reorderDataAndFindCellStart(id, begin, end); begin = end; } } }
static void makeSleepCharPoolPetFileName( char *id,char *output, int outlen) { char poolpet[256]; if ( strlen( id) < 1)return; snprintf( poolpet , sizeof( poolpet ) , "%s.pet" , id); makeDirFilename(output , outlen, sleepchardir , getHash(id) ,poolpet); }
static void makeSleepCharPoolItemFileName( char *id,char *output, int outlen) { char poolitem[256]; if ( strlen( id) < 1)return; snprintf( poolitem , sizeof( poolitem ) , "%s.item" , id); makeDirFilename(output , outlen, sleepchardir , getHash(id) ,poolitem); }
int main() { struct test *dummy; size_t size = 13; HASHTABLE init = init_hash(dummy, size, (void *)casting); strcpy(point_one.name, "entry\0"); strcpy(point_sec.name, "hello\0"); strcpy(point_thi.name, "12345\0"); point_one.x = 5; point_one.y = 20; point_sec.x = 17; point_sec.y = 30; point_thi.x = 11; point_thi.y = 2350; insertHash(init, point_one.name, &point_one); insertHash(init, point_sec.name, &point_sec); insertHash(init, point_thi.name, &point_thi); dummy = getHash(init, point_sec.name); printf("Output Name: %s, x: %d, y: %d\n", dummy->name, dummy->x, dummy->y); return 1; }
b3BroadphasePair* b3HashedOverlappingPairCache::findPair(int proxy0, int proxy1) { b3g_findPairs++; if(proxy0 >proxy1) b3Swap(proxy0,proxy1); int proxyId1 = proxy0; int proxyId2 = proxy1; /*if (proxyId1 > proxyId2) b3Swap(proxyId1, proxyId2);*/ int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); if (hash >= m_hashTable.size()) { return NULL; } int index = m_hashTable[hash]; while (index != B3_NULL_PAIR && equalsPair(m_overlappingPairArray[index], proxyId1, proxyId2) == false) { index = m_next[index]; } if (index == B3_NULL_PAIR) { return NULL; } b3Assert(index < m_overlappingPairArray.size()); return &m_overlappingPairArray[index]; }
void removeFromHTable(Hashtable* t,char* key) { /* Removes the pair identified by key from the hashtable t */ int hash = getHash(t, key); HTNode* cNode = t->_tab[hash]; HTNode* pNode = t->_tab[hash]; if(strcmp(cNode->_key, key) == 0){ t->_tab[hash] = cNode->_next; free(cNode->_key); free(cNode->_value); free(cNode); return; } while( cNode != NULL ){ if(strcmp(cNode->_key, key) == 0){ HTNode* fNode = cNode->_next; free(cNode->_key); free(cNode->_value); free(cNode); pNode->_next = fNode; } pNode = cNode; cNode = cNode->_next; } }
std::vector<uint64_t> getVocabIDs(const StringPiece &textin) { //Tokenize std::vector<uint64_t> output; util::TokenIter<util::SingleCharacter> itWord(textin, util::SingleCharacter(' ')); while (itWord) { StringPiece word = *itWord; uint64_t id = 0; util::TokenIter<util::SingleCharacter> itFactor(word, util::SingleCharacter('|')); while (itFactor) { StringPiece factor = *itFactor; //cerr << "factor=" << factor << endl; id += getHash(factor); itFactor++; } output.push_back(id); itWord++; } return output; }
SHA_CTX Hash::getHash(char* block, unsigned char* digest, unsigned int blockLen) { SHA_CTX context; SHA1_Init(&context); context = getHash(block, digest, blockLen, context); return context; }
/**************check the login(client authentication part very important)**************/ int check_login(char *name, char *passwd) { FILE * fp; char str[256]; char hash[256], chash[256], tempHash[256]; fp = fopen("shadow", "r"); getHash(passwd, strlen(passwd), hash); hash[HASHLEN]=0; sprintf(chash, "%s::%s\n", name, hash); while( fgets(str, 255, fp) ) { str[strlen(chash)]=0; if(strcmp(str, chash)==0) { //authenticate is successful, clean everything memset(chash, 0, sizeof(chash)); memset(hash, 0, sizeof(hash)); memset(str, 0, sizeof(str)); memset(passwd, 0, strlen(passwd)); return 1; } } return 0; }
// Get an actor from its Name physx::PxRigidActor* getActor(char* name){ unsigned long long l = getHash(std::string(name)); if (g_PhysXActors.find(l) != g_PhysXActors.end()){ return g_PhysXActors[l]; } return nullptr; }
bool Uploader::upload(string filename, string collection) { mongo::GridFS fs = mongo::GridFS(conn,dbName.c_str(),collection.c_str()); p = filename; files = fs.list(); while(files.get()->more()){ mongo::GridFile file = fs.findFile(files.get()->next()); hashlist.push_front(boost::algorithm::to_upper_copy(file.getMD5())); } getHash(); hashIter = find(hashlist.begin(),hashlist.end(),digest); if(!boost::filesystem::exists(p)) { cout << "No such file!" << endl; return false; } if(hashIter == hashlist.end()) fs.storeFile(filename.c_str(),filename.c_str()); return 1; }
/** * Inserts a data block into the hashtable */ static void bufferDataBlock(Hashtable* ht, FieldData* data) { uint16_t hash = getHash(ht, data); HashBucket* bucket = ht->bucket[hash]; if (bucket == 0) { /* This slot is still free, place the bucket here */ DPRINTF("bufferDataBlock: creating bucket\n"); ht->bucket[hash] = createBucket(ht, data); return; } /* This slot is already used, search spill chain for equal flow */ while(1) { if (equalFlow(ht, bucket->data, data)) { DPRINTF("appending to bucket\n"); aggregateFlow(ht, bucket->data, data); bucket->expireTime = time(0) + ht->minBufferTime; /* The flow's data block is no longer needed */ free(data); break; } if (bucket->next == 0) { DPRINTF("creating bucket\n"); bucket->next = createBucket(ht, data); break; } bucket = (HashBucket*)bucket->next; } }
void ThreadSafeHashSet<T, H>::remove(const T& element) { // Если нужно сжать таблицу - то сожмём. // Условие сжатия - в заголовке. condenseIfNeeded(); // Вычислим где он может находиться в таблице unsigned int neededBucket = getHash(element) % getCurrentBucketsNumber(); unsigned int neededMutex = neededBucket % mutexNumber_; // Возьмём мьютекс на время удаления. stripesLocks_[neededMutex].writeLock(); // Удалим элемент из таблицы. for (auto it = buckets_[neededBucket].before_begin(); it._M_next() != buckets_[neededBucket].end(); it++) { if (*(it._M_next()) == element) { // Если следующий за нами элемент подлежит удалению, то удалим его. buckets_[neededBucket].erase_after(it); // Покажем, что в таблице элементов стало на 1 меньше. // Предварительно захватим мьютекс на атомарную декрементацию. std::unique_lock<std::mutex> decrementingLock(elementsCounterLock_); currentElementsNumber_--; stripesLocks_[neededMutex].writeUnlock(); return; } } stripesLocks_[neededMutex].writeUnlock(); }
void ThreadSafeHashSet<T, H>::reHash(unsigned int bucketsNumber) { // Пробежимся по всем корзинам и тупо по всем спискам, // перехешировывая всё что только можно. // По всем корзинам for(int i = 0; i < buckets_.size(); ++i) { // По всем элементам корзины for(auto it = buckets_[i].before_begin(); it._M_next() != buckets_[i].end();) { // Поскольку список односвязный, и удалять элементы мы можем // только перед собой, то нужно проверить, не последние ли мы // и вообще, есть ли кто-то перед нами. //Если нет - пересчитываем хэш, нашу новую корзину unsigned int newHash = getHash(*(it._M_next())); unsigned int newBucket = newHash % bucketsNumber; // Если корзина изменилась, то вставляем туда, где ему место // и удаляем отсюда if (newBucket != i) { // Если хэш у элемента после нас изменился, то положим его туда, // где ему место, а здесь его удалим. buckets_[newBucket].push_front(*(it._M_next())); buckets_[i].erase_after(it); } else { // Так как ничего в списке не перед нами не удалилось, // нужно продвинуться вперед. it++; } }; }; }
int fint(int key1, int key2) { for (int i = head[getHash(key1, key2)]; i != -1; i = ele[i].next) { if (ele[i].key1 == key1 && ele[i].key2 == key2) return i; } return -1; }
CheckFileHashWidget::CheckFileHashWidget(int Type, QFileInfoList list, QWidget *parent) : QWidget(parent), algorithmType(Type) { QLabel *labelHash = new QLabel("File's hash:"); QLabel *labelInput = new QLabel("Input hash for check:"); labelIcon = new QLabel(); compareWithFile = new QPushButton("Compare file"); hashLine = new QLineEdit(); inputLine = new QLineEdit(); hashLine->setReadOnly(true); QVBoxLayout *vbox = new QVBoxLayout(); QHBoxLayout *hbox = new QHBoxLayout(); hbox->addWidget(inputLine); hbox->addWidget(labelIcon); connect(this, SIGNAL(statusChanged(QString)), this->parent(), SLOT(changeStatus(QString))); hashLine->setText(getHash(list.at(0).absoluteFilePath())); vbox->addWidget(labelHash); vbox->addWidget(hashLine); vbox->addWidget(labelInput); vbox->addLayout(hbox); vbox->addWidget(compareWithFile); setLayout(vbox); connect(inputLine, SIGNAL(textChanged(QString)), SLOT(checkLines(QString))); connect(compareWithFile, SIGNAL(clicked()), SLOT(compareFileAndLine())); }
int main() { FILE *fh; char *hash; char *gesture; hash = (char*) malloc(41); gesture = (char*) malloc(18); fh = fopen(OUTPUT_FILE, "a+"); if (fh == NULL) { printf("ERROR: Unable to open output file for writing\n"); return 1; } else { fprintf(fh, "Beginning new scan ... \n"); } if (getHash(hash) != 0) { fprintf(fh, "Unable to find hash\n"); fclose(fh); return 1; } else { fprintf(fh, "Found hash: %s\n", hash); } fprintf(fh, "Done!\n\n"); fclose(fh); return 0; }
EbResult EbHashTable::RemoveNode (void *key) { EbUint32 hash = getHash (key); TableEntry *prev = 0, *ptr = 0; for (ptr = (TableEntry *)array[hash]; ptr != 0; prev = ptr, ptr = ptr->next) { if (CompareRaw(key, ptr->key) == EB_TRUE) { if (prev == 0) { array[hash] = ptr->next; } else { prev->next = ptr->next; } delete [] ((EbUint8*)(ptr->key)); delete ptr; return EB_SUCCESS; } } return EB_FAILURE; }
hash getHash(answer const & f, int len) { if (len <= f.addOpen) { return OPEN[len]; } hash z = OPEN[f.addOpen]; len -= f.addOpen; if (len <= n) { return z + getHash(f.pos, f.pos + len); } z = z + getHash(f.pos, f.pos + n); len -= n; if (len <= f.addClose) { return z + CLOSE[len]; } assert(false); }
void EbHashTable::AddNode (void *key, EbDataHandle handle) { EbUint32 hash = getHash (key); TableEntry *p = new TableEntry (handle, key, array[hash]); array[hash] = p; }
void HashTableDb::set(const std::string& key, const std::string& value) { size_t hashCode = getHash(key) & hashMask; try { DbRecord rec = ht->get(hashCode, key.c_str()).getRecord(*db); if (rec.canShrink(key.c_str(), value.c_str())) { db->shrinkRecord(rec, key.c_str(), value.c_str()); } else { db->delRecord(rec); ht->del(hashCode, key.c_str()); DbRecord newRec = db->newRecord(key.c_str(), value.c_str()); ht->add(hashCode, key.c_str(), newRec.getPage()->getId(), newRec.getCursor()); } } catch (IndexFile::IndexNotFound) { DbRecord newRec = db->newRecord(key.c_str(), value.c_str()); ht->add(hashCode, key.c_str(), newRec.getPage()->getId(), newRec.getCursor()); } }
btBroadphasePair* btHashedOverlappingPairCache::findPair(btBroadphaseProxy* proxy0, btBroadphaseProxy* proxy1) { gFindPairs++; if(proxy0->m_uniqueId>proxy1->m_uniqueId) btSwap(proxy0,proxy1); int proxyId1 = proxy0->getUid(); int proxyId2 = proxy1->getUid(); /*if (proxyId1 > proxyId2) btSwap(proxyId1, proxyId2);*/ int hash = static_cast<int>(getHash(static_cast<unsigned int>(proxyId1), static_cast<unsigned int>(proxyId2)) & (m_overlappingPairArray.capacity()-1)); if (hash >= m_hashTable.size()) { return NULL; } int index = m_hashTable[hash]; while (index != BT_NULL_PAIR && equalsPair(m_overlappingPairArray[index], proxyId1, proxyId2) == false) { index = m_next[index]; } if (index == BT_NULL_PAIR) { return NULL; } btAssert(index < m_overlappingPairArray.size()); return &m_overlappingPairArray[index]; }
int BaseLB::LDStats::getRecvHash(LDCommData &cData) { if (cData.recvHash == -1) { cData.recvHash = getHash(cData.receiver.get_destObj()); } return cData.recvHash; }
std::string HashTableDb::get(const std::string& key) { size_t hashCode = getHash(key) & hashMask; try { IndexNode node = ht->get(hashCode, key.c_str()); DbRecord rec = node.getRecord(*db); if (rec.type() == Long) { std::string value = rec.value(); while (rec.hasNextExtendedRecord()) { rec = rec.nextExtendedRecord(); value += rec.value(); } return value; } else { return rec.value(); } } catch (IndexFile::IndexNotFound) { throw NotFound(); } }
int BaseLB::LDStats::getSendHash(LDCommData &cData) { if (cData.sendHash == -1) { cData.sendHash = getHash(cData.sender); } return cData.sendHash; }
/* * This method returns the DCE formatted encoded string value * along with the hashed class name of the CKUUID in a format * that's easily transported and then used to re-create the CKUUID * with the generator methods. */ CKString CKUUID::getStringValueInDCEFormatWithClassHash() const { CKString retval; // see if we have anything to do if (!isGenerated()) { throw CKException(__FILE__, __LINE__, "CKUUID::getStringValueInDCEFormat()" " - this UUID has not yet been generated, and therefore there is " "nothing to return. Please make sure to properly generate the UUIDs " "with one of the class (static) generators for best success."); } else { // generate the string representation appropriately char buff[128]; bzero(buff, 128); snprintf(buff, 127, "%08x %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x %08x", getHash(), mUUID.dce.time_low, mUUID.dce.time_mid, mUUID.dce.time_hi_and_version, mUUID.dce.clock_seq_hi_and_reserved, mUUID.dce.clock_seq_low, mUUID.dce.node[0], mUUID.dce.node[1], mUUID.dce.node[2], mUUID.dce.node[3], mUUID.dce.node[4], mUUID.dce.node[5], getHashedClassName()); retval = buff; } return retval; }
void CaViewerScanner::scanFolders(const QStringList& slFolder) { for(int countFolder = 0; countFolder < slFolder.size(); ++countFolder) { const QString sCurFolder = slFolder.at(countFolder); QDir dirFolder(sCurFolder); dirFolder.setFilter(QDir::NoDotAndDotDot | QDir::Dirs); if(dirFolder.exists()) { scanFolders(dirFolder.entryList()); QDir dirFile(sCurFolder); dirFile.setFilter(QDir::Files); const QFileInfoList fileList = dirFile.entryInfoList(); for(int countFile = 0; countFile < fileList.size(); ++countFile) { const QFileInfo info = fileList.at(countFile); const QString sFileName = info.fileName(); const QString sPath = info.absolutePath() + QChar('/'); QString sBaseName; const QStringList slSeries = getImageSeries(sPath, sFileName, sBaseName); const QString sMaster = slSeries.first(); const QString sMasterHash = getHash(sPath + sMaster); if(registerMasterImage(sPath, sMaster)) { for(int countSeries = 1; countSeries < slSeries.size(); ++countSeries) registerChainImage(sMasterHash, sPath, slSeries.at(countSeries)); } } } } }
void addToHashTable(HashTable *hashTable, StringXXX *key){ int position = getHash(key); if (position < 0){ print(key); exit(0); } hashTable->cells[position] = addCell(hashTable->cells[position], key); }
void connect(Class& obj, void (Class::*func)(Params...)) { m_functions.insert({ Key {&obj, getHash(func)}, [&obj, func](Params... params) { (obj.*func)(params...); } }); }
void insert(int key1, int key2) { int tmp = getHash(key1, key2); ele[N].key1 = key1; ele[N].key2 = key2; ele[N].val = 0; ele[N].next = head[tmp]; head[tmp] = N++; }