int weightToVertex(VertexHashSet *set, Vertex *vertex) { for (BucketNode *node = set->buckets[hashVertex(vertex, set->bucketCount)]; node; node = node->next) { if (node->value == vertex) return node->weight; } assert(false); return 0; }
uint32_t hashDfaOrigins(const DfaStates& nfasWithInput) { // Find the NFA states this dfa came from, uint32_t hash = 0; for (DfaStates::const_iterator nfaIt=nfasWithInput.begin(); nfaIt!=nfasWithInput.end(); ++nfaIt) { DfaVertex* nfaStatep = *nfaIt; hash ^= hashVertex(nfaStatep); } return hash; }
void addElementResizeVertex(VertexHashSet *set, Vertex *value, int weight, bool external) { if (external && set->elementCount + 1 > (int)((double)set->bucketCount * LOAD_FACTOR)) allocateBucketsVertex(set, set->bucketCount << 1); //there is a possibility of exceeding the load factor BucketNode **node = set->buckets + hashVertex(value, set->bucketCount); while (*node) { if ((*node)->value == value) return; //value already exists node = &((*node)->next); } if (external) set->elementCount++; *node = makeNodeVertex(value, weight, *node); }
void removeElementVertex(VertexHashSet *set, Vertex *value) { BucketNode **node = set->buckets + hashVertex(value, set->bucketCount); while (*node) { if ((*node)->value == value) { BucketNode *removeNode = *node; *node = (*node)->next; free(removeNode); set->elementCount--; break; } node = &((*node)->next); } }
uint32_t hashDfaOrigins(DfaVertex* dfaStatep) { // Find the NFA states this dfa came from, // Record a checksum, so we can search for it later by the list of nfa nodes. // The order of the nodes is not deterministic; the hash thus must not depend on order of edges uint32_t hash = 0; // Foreach NFA state (this DFA state was formed from) if (debug()) nextStep(); for (V3GraphEdge* dfaEdgep = dfaStatep->outBeginp(); dfaEdgep; dfaEdgep=dfaEdgep->outNextp()) { if (nfaState(dfaEdgep->top())) { DfaVertex* nfaStatep = static_cast<DfaVertex*>(dfaEdgep->top()); hash ^= hashVertex(nfaStatep); if (debug()) { if (nfaStatep->user()==m_step) v3fatalSrc("DFA state points to duplicate NFA state."); nfaStatep->user(m_step); } } } return hash; }
bool containsVertex(VertexHashSet *set, Vertex *value) { for (BucketNode *node = set->buckets[hashVertex(value, set->bucketCount)]; node; node = node->next) { if (node->value == value) return true; } return false; }
void Collapser::collapse() { int numStreams = mStreams.size(); unsigned int lCurrent = 0; // first create an interleaved version of geometry // to compare vertices in one memcmp call // char *interleaved = (char*) malloc( mVertexSize * mNumVertices ); char *ptr = interleaved; for ( lCurrent = 0; lCurrent < mNumVertices; lCurrent++ ) { for ( int streamIndex = 0; streamIndex < numStreams; streamIndex++ ) { unsigned int csize = mStreams[streamIndex]->csize; memcpy( ptr, &mStreams[streamIndex]->bytes[lCurrent*csize], csize ); ptr += csize; } } // hash and split vertices into bucket // for faster compare/collapse // int ** buckets; hash *hashList = new hash[ mNumVertices ]; unsigned int *bucketCounts = new unsigned int[ NBUCKETS ]; memset( bucketCounts, 0, NBUCKETS * sizeof(unsigned int) ); // hash all vertices, store hashes and count vertices per buckets // for ( lCurrent = 0; lCurrent < mNumVertices; lCurrent++ ) { hash vHash = hashVertex( (char*) &interleaved[lCurrent * mVertexSize], mVertexSize ); vHash = vHash % NBUCKETS; hashList[lCurrent] = vHash; bucketCounts[vHash] ++; } // allocate buckets // buckets = (int **) malloc( NBUCKETS * sizeof(void *) ); for( hash i = 0; i < NBUCKETS; i++ ) { buckets[i] = new int[ bucketCounts[i] ]; } // for each buckets, create le list of corresponding vertex indices // memset( bucketCounts, 0, NBUCKETS * sizeof(unsigned int) ); for ( lCurrent = 0; lCurrent < mNumVertices; lCurrent++ ) { hash vHash = hashList[lCurrent]; buckets[vHash][ bucketCounts[vHash] ] = lCurrent; bucketCounts[vHash] ++; } // collapse each buckets // int bPerThreads = NBUCKETS / MAX_THREADS; pthread_t threads[MAX_THREADS]; COLLAPSE_PARAM* cParams = (COLLAPSE_PARAM*) malloc( MAX_THREADS * sizeof(COLLAPSE_PARAM)); for ( int i = 0; i < MAX_THREADS; i++ ) { cParams[i].buckets = buckets; cParams[i].bucketCounts = bucketCounts; cParams[i].boffset = i*bPerThreads; cParams[i].bcount = bPerThreads; cParams[i].interleaved = interleaved; cParams[i].mVertexSize = mVertexSize; cParams[i].mRemapTable = mRemapTable; int rc = pthread_create(&threads[i], NULL, collapseBucket, (void *) &cParams[i]); assert(0 == rc); } for (int i=0; i<MAX_THREADS; ++i) { // block until thread i completes int rc = pthread_join(threads[i], NULL); assert(0 == rc); } // free memory for( int i = 0; i < NBUCKETS; i++ ) { delete buckets[i]; } delete buckets; delete bucketCounts; delete hashList; // calculate new length // ===================== mCollapsedNumVertices = 0; for (int i = 0; i < mNumVertices; i++) { if( mRemapTable[i] == i ) mCollapsedNumVertices++; } // remap indices // ============= remap(); logStats(); }