bool RecordOutputMgr::printKeyAndTerminate(RecordKeyVector &keyList) { if (_context->getProgram() == ContextBase::MERGE) { //when printing merged records, we want to force the printing into //bed3 format, which is surprisingly difficult to do. Had to use the following: const Bed3Interval *bed3 = static_cast<const Bed3Interval *>(keyList.getKey()); bed3->Bed3Interval::print(_outBuf); //in addition, if we're doing stranded merges, we need to print the strand sign. if (_context->getDesiredStrand() != FileRecordMergeMgr::ANY_STRAND) { _outBuf.append("\t"); _outBuf.append(keyList.getKey()->getStrand()); } return false; } printBamType bamCode = printBamRecord(keyList); if (bamCode == BAM_AS_BAM) { return true; } else if (bamCode == NOT_BAM) { keyList.getKey()->print(_outBuf); return false; } //otherwise, it was BAM_AS_BED, and the key was printed. return false; }
void RecordOutputMgr::printRecord(RecordKeyVector &keyList) { if (keyList.getKey()->getType() == FileRecordTypeChecker::BAM_RECORD_TYPE) { RecordKeyVector blockList(keyList.getKey()); bool deleteBlocks = false; _bamBlockMgr->getBlocks(blockList, deleteBlocks); printRecord(keyList, &blockList); if (deleteBlocks) { _bamBlockMgr->deleteBlocks(blockList); } return; } printRecord(keyList, NULL); }
void ComplementFile::processHits(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { _outputMgr = outputMgr; const Record *rec = hits.getKey(); //test for chrom change. const QuickString &newChrom = rec->getChrName(); if (_currChrom != newChrom) { outPutLastRecordInPrevChrom(); //if record's chrom doesn't exist in the genome file, do //nothing if (!fastForward(newChrom)) return; //we've switched to a new chromosome that is in both the DB //and genome file. _currStartPos = 0; _currChrom = newChrom; _outRecord.setChrName(newChrom); } int endPos = rec->getStartPos(); printRecord(endPos); _currStartPos = rec->getEndPos(); }
void BlockMgr::getBlocksFromBed12(RecordKeyVector &keyList, bool &mustDelete) { const Bed12Interval *keyRecord = static_cast<const Bed12Interval *>(keyList.getKey()); int blockCount = keyRecord->getBlockCount(); if ( blockCount <= 0 ) { mustDelete = false; return; } int sizeCount = _blockSizeTokens.tokenize(keyRecord->getBlockSizes(), ','); int startCount = _blockStartTokens.tokenize(keyRecord->getBlockStarts(), ','); if (blockCount != sizeCount || sizeCount != startCount) { fprintf(stderr, "Error: found wrong block counts while splitting entry.\n"); exit(-1); } for (int i=0; i < blockCount; i++) { int startPos = keyRecord->getStartPos() + str2chrPos(_blockStartTokens.getElem(i).c_str()); int endPos = startPos + str2chrPos(_blockSizeTokens.getElem(i).c_str()); Record *record = allocateAndAssignRecord(keyRecord, startPos, endPos); keyList.push_back(record); } mustDelete = true; }
unsigned long Fisher::getTotalIntersection(RecordKeyVector &recList) { unsigned long intersection = 0; Record *key = recList.getKey(); CHRPOS keyStart = key->getStartPos(); CHRPOS keyEnd = key->getEndPos(); _overlapCounts += recList.size(); // note that we truncate to a max size of 2.1GB _qsizes.push_back((int)(keyEnd - keyStart)); int hitIdx = 0; for (RecordKeyVector::iterator_type iter = recList.begin(); iter != recList.end(); iter = recList.next()) { CHRPOS maxStart = max((*iter)->getStartPos(), keyStart); CHRPOS minEnd = min((*iter)->getEndPos(), keyEnd); _qsizes.push_back((int)(minEnd - maxStart)); if (_context->getObeySplits()) { intersection += upCast(_context)->getSplitBlockInfo()->getOverlapBases(hitIdx); hitIdx++; } else { intersection += (unsigned long)(minEnd - maxStart); } } _numIntersections += (int)recList.size(); return intersection; }
void CoverageFile::doHist(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { //make a map of depths to num bases with that depth _currDepthMap.clear(); for (size_t i=0; i < _queryLen; i++) { _currDepthMap[_depthArray[i]]++; _finalDepthMap[_depthArray[i]]++; } for (depthMapType::iterator iter = _currDepthMap.begin(); iter != _currDepthMap.end(); iter++) { size_t depth = iter->first; size_t numBasesAtDepth = iter->second; float coveredBases = (float)numBasesAtDepth / (float)_queryLen; _finalOutput = depth; _finalOutput.append("\t"); _finalOutput.append(numBasesAtDepth); _finalOutput.append("\t"); _finalOutput.append(_queryLen); _finalOutput.append("\t"); format(coveredBases); outputMgr->printRecord(hits.getKey(), _finalOutput); } }
void ComplementFile::processHits(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { _outputMgr = outputMgr; const Record *rec = hits.getKey(); //test for chrom change. const string &newChrom = rec->getChrName(); if (_currChrom != newChrom) { outPutLastRecordInPrevChrom(); //if record's chrom doesn't exist in the genome file, do //nothing if (!fastForward(newChrom)) return; //we've switched to a new chromosome that is in both the DB //and genome file. _currStartPos = 0; _currChrom = newChrom; _outRecord.setChrName(newChrom); } // warn if the record's interval is beyond the // length of the chromosome checkCoordinatesAgainstChromLength(rec); // safe guard against the first record for the chrom // starting with 0. if (rec->getStartPos() != 0) { CHRPOS endPos = rec->getStartPos(); printRecord(endPos); } _currStartPos = rec->getEndPos(); }
void CoverageFile::makeDepthCount(RecordKeyVector &hits) { const Record *key = hits.getKey(); _queryOffset = key->getStartPos(); _queryLen = (size_t)(key->getEndPos() - _queryOffset); _totalQueryLen += _queryLen; //resize depth array if needed if (_depthArrayCapacity < _queryLen) { _depthArray = (size_t*)realloc(_depthArray, sizeof(size_t) * _queryLen); _depthArrayCapacity = _queryLen; memset(_depthArray, 0, sizeof(size_t) * _depthArrayCapacity); } //loop through hits, which may not be in sorted order, due to //potential multiple databases, and increment the depth array as needed. for (RecordKeyVector::const_iterator_type iter = hits.begin(); iter != hits.end(); iter = hits.next()) { const Record *dbRec = *iter; int dbStart = dbRec->getStartPos(); int dbEnd = dbRec->getEndPos(); int maxStart = max(_queryOffset, dbStart); int minEnd = min(dbEnd, key->getEndPos()); for (int i=maxStart; i < minEnd; i++) { _depthArray[i - _queryOffset]++; } } }
void BlockMgr::getBlocks(RecordKeyVector &keyList, bool &mustDelete) { switch (keyList.getKey()->getType()) { case FileRecordTypeChecker::BED12_RECORD_TYPE: getBlocksFromBed12(keyList, mustDelete); break; case FileRecordTypeChecker::BAM_RECORD_TYPE: getBlocksFromBam(keyList, mustDelete); break; default: keyList.push_back(keyList.getKey()); mustDelete = false; break; } }
int BlockMgr::findBlockedOverlaps(RecordKeyVector &keyList, RecordKeyVector &hitList, RecordKeyVector &resultList) { bool deleteKeyBlocks = false; if (keyList.empty()) { //get all the blocks for the query record, put them in it's list. getBlocks(keyList, deleteKeyBlocks); } _overlapBases.clear(); int keyBlocksSumLength = getTotalBlockLength(keyList); //Loop through every database record the query intersected with for (RecordKeyVector::const_iterator_type hitListIter = hitList.begin(); hitListIter != hitList.end(); hitListIter = hitList.next()) { RecordKeyVector hitBlocks(*hitListIter); bool deleteHitBlocks = false; getBlocks(hitBlocks, deleteHitBlocks); //get all blocks for the hit record. int hitBlockSumLength = getTotalBlockLength(hitBlocks); //get total length of the bocks for the hitRecord. int totalHitOverlap = 0; bool hitHasOverlap = false; //loop through every block of the database record. for (RecordKeyVector::const_iterator_type hitBlockIter = hitBlocks.begin(); hitBlockIter != hitBlocks.end(); hitBlockIter = hitBlocks.next()) { //loop through every block of the query record. for (RecordKeyVector::const_iterator_type keyListIter = keyList.begin(); keyListIter != keyList.end(); keyListIter = keyList.next()) { const Record *keyBlock = *keyListIter; const Record *hitBlock = *hitBlockIter; int maxStart = max(keyBlock->getStartPos(), hitBlock->getStartPos()); int minEnd = min(keyBlock->getEndPos(), hitBlock->getEndPos()); int overlap = minEnd - maxStart; if (overlap > 0) { hitHasOverlap = true; totalHitOverlap += overlap; } } } if (hitHasOverlap) { if ((float) totalHitOverlap / (float)keyBlocksSumLength >= _overlapFraction) { if (_hasReciprocal && ((float)totalHitOverlap / (float)hitBlockSumLength >= _overlapFraction)) { _overlapBases.push_back(totalHitOverlap); resultList.push_back(*hitListIter); } else if (!_hasReciprocal) { _overlapBases.push_back(totalHitOverlap); resultList.push_back(*hitListIter); } } } if (deleteHitBlocks) { deleteBlocks(hitBlocks); } } if (deleteKeyBlocks) { deleteBlocks(keyList); } resultList.setKey(keyList.getKey()); return (int)resultList.size(); }
void CoverageFile::doMean(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { size_t sum =0; for (size_t i= 0; i < _queryLen; i++) { sum += _depthArray[i]; } format((float)sum / (float)_queryLen); outputMgr->printRecord(hits.getKey(), _finalOutput); }
void IntersectFile::checkSplits(RecordKeyVector &hitSet) { if (upCast(_context)->getObeySplits()) { RecordKeyVector keySet(hitSet.getKey()); RecordKeyVector resultSet(hitSet.getKey()); RecordKeyVector overlapSet(hitSet.getKey()); upCast(_context)->getSplitBlockInfo()->findBlockedOverlaps(keySet, hitSet, resultSet, overlapSet); // when using coverage, we need a list of the sub-intervals of coverage // so that per-base depth can be properly calculated when obeying splits if (_context->getProgram() == ContextBase::COVERAGE) { hitSet.swap(overlapSet); } else { hitSet.swap(resultSet); } } }
bool RecordOutputMgr::printKeyAndTerminate(RecordKeyVector &keyList) { if (_context->getProgram() == ContextBase::MERGE) { //when printing merged records, we want to force the printing into //bed3 format, which is surprisingly difficult to do. Had to use the following: const Bed3Interval *bed3 = static_cast<const Bed3Interval *>(keyList.getKey()); bed3->Bed3Interval::print(_outBuf); return false; } printBamType bamCode = printBamRecord(keyList); if (bamCode == BAM_AS_BAM) { return true; } else if (bamCode == NOT_BAM) { keyList.getKey()->print(_outBuf); return false; } //otherwise, it was BAM_AS_BED, and the key was printed. return false; }
bool ComplementFile::findNext(RecordKeyVector &hits) { while (!_frm->eof()) { _frm->getNextRecord(&hits); if (hits.getKey() == NULL) continue; return true; } return false; }
void FileRecordMergeMgr::deleteAllMergedItemsButKey(RecordKeyVector &recList) { //if the key is also in the list, this method won't delete it. for (RecordKeyVector::const_iterator_type iter = recList.begin(); iter != recList.end(); iter = recList.next()) { if (*iter == recList.getKey()) { continue; } deleteRecord(*iter); } recList.clearVector(); }
bool MergeFile::merge() { RecordKeyVector hitSet; FileRecordMgr *frm = _context->getFile(0); while (!frm->eof()) { Record *key = frm->getNextRecord(&hitSet); if (key == NULL) continue; _recordOutputMgr->printRecord(hitSet.getKey(), _context->getColumnOpsVal(hitSet)); } return true; }
void RecordOutputMgr::printClosest(RecordKeyVector &keyList, const vector<int> *dists) { //The first time we print a record is when we print any header, because the header //hasn't been read from the query file until after the first record has also been read. checkForHeader(); const ContextClosest *context = static_cast<const ContextClosest *>(_context); bool deleteBlocks = false; const Record *keyRec = keyList.getKey(); RecordKeyVector blockList(keyRec); if (keyRec->getType() == FileRecordTypeChecker::BAM_RECORD_TYPE) { _bamBlockMgr->getBlocks(blockList, deleteBlocks); _currBamBlockList = &blockList; } if (!keyList.empty()) { int distCount = 0; for (RecordKeyVector::const_iterator_type iter = keyList.begin(); iter != keyList.end(); iter = keyList.next()) { const Record *hitRec = *iter; printKey(keyRec, keyRec->getStartPosStr(), keyRec->getEndPosStr()); tab(); addDbFileId(hitRec->getFileIdx()); printKey(hitRec, hitRec->getStartPosStr(), hitRec->getEndPosStr()); if (dists != NULL) { tab(); int dist = (*dists)[distCount]; //if not using sign distance, use absolute value instead. dist = context->signDistance() ? dist : abs(dist); _outBuf.append(dist); distCount++; } newline(); if (needsFlush()) flush(); } } else { printKey(keyRec, keyRec->getStartPosStr(), keyRec->getEndPosStr()); tab(); // need to add a dummy file id if multiple DB files are used if (_context->getNumInputFiles() > 2) { _outBuf.append('.'); tab(); } null(false, true); if (context->reportDistance()) { tab(); _outBuf.append(-1); } newline(); } if (deleteBlocks) { _bamBlockMgr->deleteBlocks(blockList); _currBamBlockList = NULL; } return; }
void CoverageFile::doPerBase(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { //loop through all bases in query, printing full record and metrcis for each const Record * queryRec = hits.getKey(); for (size_t i= 0; i < _queryLen; i++) { _finalOutput = i +1; _finalOutput.append("\t"); _finalOutput.append(_depthArray[i]); outputMgr->printRecord(queryRec, _finalOutput); } }
void RecordOutputMgr::printClosest(RecordKeyVector &keyList, const vector<int> *dists) { const ContextClosest *context = static_cast<const ContextClosest *>(_context); bool deleteBlocks = false; RecordKeyVector blockList(keyList.getKey()); if (keyList.getKey()->getType() == FileRecordTypeChecker::BAM_RECORD_TYPE) { _bamBlockMgr->getBlocks(blockList, deleteBlocks); _currBamBlockList = &blockList; } if (!keyList.empty()) { int distCount = 0; for (RecordKeyVector::const_iterator_type iter = keyList.begin(); iter != keyList.end(); iter = keyList.next()) { printKey(keyList.getKey()); tab(); addDbFileId((*iter)->getFileIdx()); (*iter)->print(_outBuf); if (dists != NULL) { tab(); _outBuf.append((*dists)[distCount]); distCount++; } newline(); if (needsFlush()) flush(); } } else { printKey(keyList.getKey()); tab(); null(true, false); if (context->reportDistance()) { tab(); _outBuf.append(-1); } newline(); } if (deleteBlocks) { _bamBlockMgr->deleteBlocks(blockList); _currBamBlockList = NULL; } return; }
void CoverageFile::doDefault(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { size_t nonZeroBases = _queryLen - countBasesAtDepth(0); float coveredBases = (float)nonZeroBases / (float)_queryLen; _finalOutput = hits.size(); _finalOutput.append("\t"); _finalOutput.append(nonZeroBases); _finalOutput.append("\t"); _finalOutput.append(_queryLen); _finalOutput.append("\t"); format(coveredBases); outputMgr->printRecord(hits.getKey(), _finalOutput); }
bool Fisher::getFisher() { NewChromSweep sweep(_context); if (!sweep.init()) { return false; } RecordKeyVector hitSet; while (sweep.next(hitSet)) { if (_context->getObeySplits()) { RecordKeyVector keySet(hitSet.getKey()); RecordKeyVector resultSet(hitSet.getKey()); _blockMgr->findBlockedOverlaps(keySet, hitSet, resultSet); _intersectionVal += getTotalIntersection(resultSet); } else { _intersectionVal += getTotalIntersection(hitSet); } } sweep.closeOut(); _queryLen = sweep.getQueryTotalRecordLength(); _dbLen = sweep.getDatabaseTotalRecordLength(); _unionVal = _queryLen + _dbLen; return true; }
void GroupBy::processHits(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { const Record *rec = hits.getKey(); const QuickString &opVal = _context->getColumnOpsVal(hits); if (upCast(_context)->printFullCols()) { outputMgr->printRecord(rec, opVal); } else { QuickString outBuf; for (int i=0; i < (int)_groupCols.size(); i++) { outBuf.append(rec->getField(_groupCols[i])); outBuf.append('\t'); } outBuf.append(opVal); outputMgr->printRecord(NULL, outBuf); } }
void CloseSweep::checkMultiDbs(RecordKeyVector &retList) { ContextClosest::tieModeType tieMode = _context->getTieMode(); if (_context->getMultiDbMode() == ContextClosest::ALL_DBS && _numDBs > 1) { _copyDists.clear(); _copyRetList.clearAll(); _copyRetList.setKey(retList.getKey()); //loop through retList, find min dist int minDist = INT_MAX; int i = 0; for (; i < (int)_finalDistances.size(); i++) { if (abs(_finalDistances[i]) < minDist) { minDist = abs(_finalDistances[i]); } } i=0; for (RecordKeyVector::const_iterator_type iter = retList.begin(); iter != retList.end(); iter++) { int dist = _finalDistances[i]; if (abs(dist) == minDist) { _copyDists.push_back(dist); _copyRetList.push_back(*iter); } i++; } retList.clearVector(); _finalDistances.clear(); if (_copyRetList.empty()) return; if (tieMode == ContextClosest::FIRST_TIE) { retList.push_back(*(_copyRetList.begin())); _finalDistances.push_back(_copyDists[0]); } else if (tieMode == ContextClosest::LAST_TIE) { retList.push_back(*(_copyRetList.begin() + _copyRetList.size() -1)); _finalDistances.push_back(_copyDists[_copyDists.size()-1]); } else { retList = _copyRetList; _finalDistances = _copyDists; } } }
void BlockMgr::getBlocksFromBam(RecordKeyVector &keyList, bool &mustDelete) { const BamRecord *keyRecord = static_cast<const BamRecord *>(keyList.getKey()); const vector<BamTools::CigarOp> &cigarData = keyRecord->getCigarData(); int currPos = keyRecord->getStartPos(); int blockLength = 0; for (int i=0; i < (int)cigarData.size(); i++) { char opType = cigarData[i].Type; int opLen = (int)(cigarData[i].Length); switch(opType) { case 'I': case 'S': case 'P': case 'H': break; case 'M': case 'X': case '=': blockLength += opLen; break; case 'D': case 'N' : if ((opType == 'D' && !_breakOnDeletionOps) || (opType == 'N' && !_breakOnSkipOps)) { blockLength += opLen; } else { keyList.push_back(allocateAndAssignRecord(keyRecord, currPos, currPos + blockLength)); currPos += opLen + blockLength; blockLength = 0; } break; default: fprintf(stderr, "ERROR: Found invalid Cigar operation: %c.\n", opType); exit(1); break; } } if (blockLength > 0) { keyList.push_back(allocateAndAssignRecord(keyRecord, currPos, currPos + blockLength)); } mustDelete = true; }
RecordOutputMgr::printBamType RecordOutputMgr::printBamRecord(RecordKeyVector &keyList, bool bamOutputOnly) { const Record *record = keyList.getKey(); if (record->getType() == FileRecordTypeChecker::BAM_RECORD_TYPE) { if (_context->getOutputFileType() == FileRecordTypeChecker::BAM_FILE_TYPE) { _bamWriter->SaveAlignment(static_cast<const BamRecord *>(record)->getAlignment()); return BAM_AS_BAM; } else { if (!bamOutputOnly) { if (record->isUnmapped()) { record->printUnmapped(_outBuf); } else { static_cast<const BamRecord *>(record)->print(_outBuf, _currBamBlockList); } } return BAM_AS_BED; } } return NOT_BAM; }
unsigned long Fisher::getTotalIntersection(RecordKeyVector &recList) { unsigned long intersection = 0; const Record *key = recList.getKey(); int keyStart = key->getStartPos(); int keyEnd = key->getEndPos(); int hitIdx = 0; for (RecordKeyVector::const_iterator_type iter = recList.begin(); iter != recList.end(); iter = recList.next()) { int maxStart = max((*iter)->getStartPos(), keyStart); int minEnd = min((*iter)->getEndPos(), keyEnd); if (_context->getObeySplits()) { intersection += _blockMgr->getOverlapBases(hitIdx); hitIdx++; } else { intersection += (unsigned long)(minEnd - maxStart); } } _numIntersections += (int)recList.size(); return intersection; }
unsigned long Jaccard::getTotalIntersection(RecordKeyVector &hits) { unsigned long intersection = 0; Record *key = hits.getKey(); CHRPOS keyStart = key->getStartPos(); CHRPOS keyEnd = key->getEndPos(); int hitIdx = 0; for (RecordKeyVector::iterator_type iter = hits.begin(); iter != hits.end(); iter = hits.next()) { Record *currRec = *iter; CHRPOS maxStart = max(currRec->getStartPos(), keyStart); CHRPOS minEnd = min(currRec->getEndPos(), keyEnd); if (_context->getObeySplits()) { intersection += upCast(_context)->getSplitBlockInfo()->getOverlapBases(hitIdx); hitIdx++; } else { intersection += (unsigned long)(minEnd - maxStart); } } _numIntersections += (int)hits.size(); return intersection; }
void CoverageFile::doCounts(RecordOutputMgr *outputMgr, RecordKeyVector &hits) { _finalOutput = hits.size(); outputMgr->printRecord(hits.getKey(), _finalOutput); }
int BlockMgr::findBlockedOverlaps(RecordKeyVector &hitList, bool useOverlappingSubBlocks) { RecordKeyVector keyList(hitList.getKey()); bool deleteKeyBlocks = true; getBlocks(keyList, deleteKeyBlocks); _overlapBases.clear(); int keyBlocksSumLength = getTotalBlockLength(keyList); //Loop through every database record the query intersected with RecordKeyVector::iterator_type hitListIter = hitList.begin(); for (; hitListIter != hitList.end();) { RecordKeyVector hitBlocks(*hitListIter); bool deleteHitBlocks = false; getBlocks(hitBlocks, deleteHitBlocks); //get all blocks for the hit record. int hitBlockSumLength = getTotalBlockLength(hitBlocks); //get total length of the bocks for the hitRecord. int totalHitOverlap = 0; bool hitHasOverlap = false; //loop through every block of the database record. RecordKeyVector::iterator_type hitBlockIter = hitBlocks.begin(); for (; hitBlockIter != hitBlocks.end(); hitBlockIter = hitBlocks.next()) { //loop through every block of the query record. RecordKeyVector::iterator_type keyListIter = keyList.begin(); for (; keyListIter != keyList.end(); keyListIter = keyList.next()) { const Record *keyBlock = *keyListIter; const Record *hitBlock = *hitBlockIter; int maxStart = max(keyBlock->getStartPos(), hitBlock->getStartPos()); int minEnd = min(keyBlock->getEndPos(), hitBlock->getEndPos()); int overlap = minEnd - maxStart; if (overlap > 0) { hitHasOverlap = true; totalHitOverlap += overlap; if (useOverlappingSubBlocks == true) { (*hitListIter)->block_starts.push_back(maxStart); (*hitListIter)->block_ends.push_back(minEnd); } } } } if (hitHasOverlap && useOverlappingSubBlocks == false) { bool enoughKeyOverlap = (float) totalHitOverlap / (float) keyBlocksSumLength >= _overlapFraction; bool enoughHitOverlap = (float) totalHitOverlap / (float) hitBlockSumLength >= _overlapFraction; if (enoughKeyOverlap) { if (_hasReciprocal && enoughHitOverlap) { //(*hitListIter)->setValid(true); _overlapBases.push_back(totalHitOverlap); hitListIter = hitList.next(); } else if (_hasReciprocal && !enoughHitOverlap) { hitList.erase(); //(*hitListIter)->setValid(false); } else if (!_hasReciprocal) { //(*hitListIter)->setValid(true); _overlapBases.push_back(totalHitOverlap); hitListIter = hitList.next(); } } else { hitList.erase(); //(*hitListIter)->setValid(false); } } else if (!hitHasOverlap && useOverlappingSubBlocks == false) { hitList.erase(); //(*hitListIter)->setValid(false); } else { hitListIter = hitList.next(); } if (deleteHitBlocks) { deleteBlocks(hitBlocks); } } // end for loop through main hits if (deleteKeyBlocks) { deleteBlocks(keyList); } return (int)hitList.size(); }
void FileRecordMergeMgr::deleteMergedRecord(RecordKeyVector &recList) { deleteAllMergedItemsButKey(recList); deleteRecord(recList.getKey()); recList.setKey(NULL); }