_S_ITEM_ *findItem(const char *szName) { int item_list_length = sizeof(item_list)/sizeof(_S_ITEM_); unsigned long hcode1 = getHashCode(szName); for(int i=0;i<item_list_length;i++) { _S_ITEM_ *pitem = &(item_list[i]); //if(strcmp(szName,pitem->m_szName) == 0) if(getHashCode(pitem->m_szName) == hcode1) { return pitem; } } }
K* NAHashDictionary<K,V>::remove(K* key) { K* removedKey = (*hashTable_)[getHashCode(*key)]->remove(key); if (removedKey) entries_--; return removedKey; } // NAHashDictionary<K,V>::remove()
//统计单词词频,线性探测法 void countWords_linear(char input[][WORD_MAX_SIZE], Node *hasharray, const int *array_size, int *unique_words_num) { int i = 0; int pos; int flag = 1; while(i < *array_size) { if(flag == 1) { pos = getHashCode(input[i]); } if(hasharray[pos].word == NULL) { hasharray[pos].word = input[i]; hasharray[pos].count = 1; i++; (*unique_words_num)++; flag = 1; continue; } else if(strcmp(hasharray[pos].word, input[i]) == 0) { hasharray[pos].count++; i++; flag = 1; continue; } flag = 0; if(pos == ARRAY_MAX_SIZE - 1) { pos = 0; } else { pos = pos + 1; } } }
int main() { printf("%d\r\n",getHashCode("staff")); printf("%d\r\n",getHashCode("stafp")); //_S_ITEM *pItem=finditem("staff"); //printf("%s %d\r\n",pItem->m_szName,pItem->m_nValue); /*printf("%d %d\r\n",getHashCode("staff"),getHashCode("step")); if(getHashCode("staff")==getHashCode("step")){ puts("같은문장입니다.\r\n"); } else{ puts("다른문장입니다.\r\n"); }*/ return 0; }
int main() { printf("%d \r\n",getHashCode("staff")); printf("%d \r\n",getHashCode("staeg")); /*_S_ITEM_ *pItem = findItem("staff"); //printf("%s %d \r\n",pItem->szName,pItem->m_nValue); printf("%d, %d \r\n",getHashCode("staff"),getHashCode("step")); if(getHashCode("staff") == getHashCode("step")) { puts("같은 문장입니다."); } else { puts("다른 문장입니다."); } */ return 0; }
K* NAHashDictionary<K,V>::insert(K* key, V* value) { if (enforceUniqueness_ AND contains(key)) { assert(enforceUniqueness_); return NULL; // don't insert a duplicate key } (*hashTable_)[getHashCode(*key)]->insert(key, value); entries_++; return key; } // NAHashDictionary<K,V>::insert()
std::vector<size_t> RabinKarp::_findMatches(std::string text, std::string pattern, bool firstOnly) { size_t m = pattern.size(); size_t n = text.size(); std::vector<size_t> matches; if (m > n) return matches; // no match is possible int64_t hashPattern = 0; // hash value of the pattern int64_t hashSegment = 0; // hash value of the segment for (size_t i = 0; i < m; i++) { hashPattern = ((hashPattern * B) + getHashCode(pattern[i])) % M; hashSegment = ((hashSegment * B) + getHashCode(text[i])) % M; } if (hashSegment == hashPattern) { matches.push_back(0); if (firstOnly) return matches; } // pre-compute B ^ (m-1) % M size_t E = moduloExponentiation(B, m - 1, M); for (size_t i = m; i < n; i++) { // string to consider: [i-m + 1, m] hashSegment = mod(hashSegment - mod(E * text[i - m], M), M); hashSegment = mod(hashSegment * B, M); hashSegment = mod(hashSegment + getHashCode(text[i]), M); if (hashSegment == hashPattern) { matches.push_back(i - m + 1); if (firstOnly) return matches; } } return matches; }
//统计单词词频//链表处理冲突,有问题!!! void countWords(char input[][WORD_MAX_SIZE], Node *hasharray, const int *array_size) { int i = 0; int pos; Node *cur; Node *pre; while(i < *array_size) { pos = getHashCode(input[i]); cur = &hasharray[pos]; if(hasharray[pos].word == NULL) { hasharray[pos].word = input[i]; hasharray[pos].count = 1; i++; continue; } else if(strcmp(hasharray[pos].word, input[i]) == 0) { hasharray[pos].count++; i++; continue; } else if(hasharray[pos].next == NULL) { Node *p = (Node *)malloc(sizeof(Node)); p->count = 1; p->word = input[i]; cur->next = p; i++; continue; } while(cur->next != NULL) { cur = cur->next; if(strcmp(cur->word, input[i]) == 0) { cur->count++; i++; continue; } else { pre = cur; cur = cur->next; continue; } } if(cur == NULL) { Node *p = (Node *)malloc(sizeof(Node)); p->count = 1; p->word = input[i]; pre->next = p; i++; continue; } } }
keyword * fetchFromHash(hashtable *table, char * c){ int hashCode = getHashCode(c); int index = hashCode % table->size; //check if bucket has nodes already if(table->buckets[index] == 0){ return NULL; } else{ //check if it is already in bucket node *n; for(n = table->buckets[index]; n != 0; n = n->next){ if(strcmp(n->data->word, c) == 0){ return n->data; } } return NULL; } }
/** * Demonstrates the dataset, pool and cache reload functionality in a multi * threaded environment. When a workset is returned to the pool of worksets * a check is carried out to see if the pool is now inactive and all of the * worksets have been returned. If both conditions are met the pool is * freed along with the underlying dataset and cache. * * @param inputFile containing HTTP User-Agent strings. */ static void runRequests(void* inputFile) { fiftyoneDegreesWorkset *ws = NULL; unsigned long hashCode = 0; char userAgent[1000]; FILE* fin = fopen((const char*)inputFile, "r"); while (fgets(userAgent, sizeof(userAgent), fin) != NULL) { ws = fiftyoneDegreesProviderWorksetGet(&provider); fiftyoneDegreesMatch(ws, userAgent); hashCode ^= getHashCode(ws); fiftyoneDegreesWorksetRelease(ws); } fclose(fin); printf("Finished with hashcode '%lu'\r\n", hashCode); FIFTYONEDEGREES_MUTEX_LOCK(&lock); threadsFinished++; FIFTYONEDEGREES_MUTEX_UNLOCK(&lock); }
/** * Demonstrates the dataset, pool and cache reload functionality in a single * threaded environment. Since only one thread is available the reload will * be done as part of the program flow and detection will not be available for * the very short time that the dataset, pool and cache are being reloaded. * * The reload happens every 500 requests. The total number of dataset reloads * is then returned. * * @param inputFile containing HTTP User-Agent strings to use with device * detection. * @return number of times the dataset, pool and cache were reloaded. */ static int runRequest(const char *inputFile) { fiftyoneDegreesWorkset *ws = NULL; unsigned long hashCode = 0; int count = 0, numberOfReloads = 0; char userAgent[1000]; char *fileInMemory; char *pathToFileInMemory; long currentFileSize; FILE* fin = fopen((const char*)inputFile, "r"); // In this example the same data file is reloaded from. // Store path for use with reloads. pathToFileInMemory = (char*)malloc(sizeof(char) * (strlen(provider.activePool->dataSet->fileName) + 1)); memcpy(pathToFileInMemory, provider.activePool->dataSet->fileName, strlen(provider.activePool->dataSet->fileName) + 1); while (fgets(userAgent, sizeof(userAgent), fin) != NULL) { ws = fiftyoneDegreesProviderWorksetGet(&provider); fiftyoneDegreesMatch(ws, userAgent); hashCode ^= getHashCode(ws); fiftyoneDegreesWorksetRelease(ws); count++; if (count % 1000 == 0) { // Load file into memory. currentFileSize = loadFile(pathToFileInMemory, &fileInMemory); // Refresh the current dataset. fiftyoneDegreesProviderReloadFromMemory(&provider, (void*)fileInMemory, currentFileSize); fiftyoneDegreesDataSet *ds = (fiftyoneDegreesDataSet*)provider.activePool->dataSet; // Tell the API to free the memory occupied by the data file. ds->memoryToFree = (void*)fileInMemory; numberOfReloads++; } } fclose(fin); free(pathToFileInMemory); printf("Finished with hashcode '%lu'\r\n", hashCode); return numberOfReloads; }
HASH_NODE *hashSeek(HASH_TABLE *hashTable, int nodeType, char *nodeContent) { HASH_NODE *hashNode; int hashAddress; int seekFlag = 0; hashAddress = getHashCode(nodeContent); hashNode = hashTable->hashArray[hashAddress]; while((seekFlag == 0) && hashNode != NULL) { if(hashNode->content == nodeType && (strcmp(hashNode->content, nodeContent) == 0)) seekFlag = 1; else hashNode = hashNode->next; } if(seekFlag) return hashNode; else return 0; }
/* returns 1 if there is a new word added, 0 if not */ int addToHash(hashtable *table, char *c){ int hashCode = getHashCode(c); int index = hashCode % table->size; //check if bucket has nodes already if(table->buckets[index] == 0){ //empty bucket //add to bucket node *n = (node *)malloc(sizeof(node)); n->data = (keyword *)malloc(sizeof(keyword)); n->data->word = c; n->data->count = 1; n->next = 0; table->buckets[index] = n; return 1; } else{ //check if it is already in bucket node *prev = 0; node *n; for(n = table->buckets[index]; n != 0; n = n->next){ if(strcmp(n->data->word, c) == 0){ (n->data->count)++; return 0; //done } else{ prev = n; } } prev->next = (node *)malloc(sizeof(node)); prev->next->data = (keyword *)malloc(sizeof(keyword)); prev->next->data->word = c; prev->next->data->count = 1; prev->next->next = 0; return 1; } }
HASH_NODE *hashInsert(HASH_TABLE *hashTable, int nodeType, char *nodeContent) { int hashAddress; HASH_NODE *hashNode, *hashNewNode; if(hashSeek(hashTable, nodeType, nodeContent) != NULL) { //fprintf(stderr, "hashInsert Debug"); return(hashSeek(hashTable, noteType, nodeContent)); } hashAdress = getHashCode(nodeContent); hashNode = hashTable->hashArray[hashAdress]; hashNewNode = malloc(sizeof(HASH_NODE)); hashNewNode->type = nodeType; strcpy(hashNewNode->content, nodeContent); hashNewNode->next = hashNode; hashTable->hashArray[hashAddress] = hashNewNode; return hashNewNode; }
bool Principal::operator==(const Principal& p) { return getHashCode()==p.getHashCode(); }
bool Principal::operator!=(const Principal& p) { return !(getHashCode()==p.getHashCode()); }
bool Principal::operator==(std::size_t hashCode) { return getHashCode()==hashCode; }
bool Principal::operator!=(const std::string& name) { return !(getHashCode()==std::hash<std::string>()(name)); }
bool Principal::operator!=(std::size_t hashCode) { return !(getHashCode()==hashCode); }