void getLocs(set<RecordId>* out, Collection* coll) { RecordIterator* it = coll->getIterator(&_txn); while (!it->isEOF()) { RecordId nextLoc = it->getNext(); out->insert(nextLoc); } delete it; }
void getLocs(set<DiskLoc>* out, Collection* coll) { RecordIterator* it = coll->getIterator(DiskLoc(), false, CollectionScanParams::FORWARD); while (!it->isEOF()) { DiskLoc nextLoc = it->getNext(); out->insert(nextLoc); } delete it; }
// Create multiple iterators over a nonempty record store. TEST( RecordStoreTestHarness, GetManyIteratorsNonEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } const int nToInsert = 10; RecordId locs[nToInsert]; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), data.c_str(), data.size() + 1, false ); ASSERT_OK( res.getStatus() ); locs[i] = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) ); } set<RecordId> remain( locs, locs + nToInsert ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); vector<RecordIterator*> v = rs->getManyIterators( opCtx.get() ); for (vector<RecordIterator*>::iterator vIter = v.begin(); vIter != v.end(); vIter++) { RecordIterator *rIter = *vIter; while ( !rIter->isEOF() ) { RecordId loc = rIter->curr(); ASSERT( 1 == remain.erase( loc ) ); ASSERT_EQUALS( loc, rIter->getNext() ); } ASSERT_EQUALS( RecordId(), rIter->curr() ); ASSERT_EQUALS( RecordId(), rIter->getNext() ); ASSERT( rIter->isEOF() ); ASSERT_EQUALS( RecordId(), rIter->curr() ); delete rIter; } ASSERT( remain.empty() ); } }
// Insert multiple records and create an iterator for repairing the record store, // even though the it has not been corrupted. TEST( RecordStoreTestHarness, GetIteratorForRepairNonEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } const int nToInsert = 10; DiskLoc locs[nToInsert]; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), data.c_str(), data.size() + 1, false ); ASSERT_OK( res.getStatus() ); locs[i] = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) ); } set<DiskLoc> remain( locs, locs + nToInsert ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); RecordIterator *it = rs->getIteratorForRepair( opCtx.get() ); while ( !it->isEOF() ) { DiskLoc loc = it->curr(); ASSERT( 1 == remain.erase( loc ) ); ASSERT_EQUALS( loc, it->getNext() ); } ASSERT( remain.empty() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); ASSERT_EQUALS( DiskLoc(), it->getNext() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); } }
// Create multiple iterators over an empty record store. TEST( RecordStoreTestHarness, GetManyIteratorsEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); vector<RecordIterator*> v = rs->getManyIterators( opCtx.get() ); for (vector<RecordIterator*>::iterator vIter = v.begin(); vIter != v.end(); vIter++) { RecordIterator *rIter = *vIter; ASSERT( rIter->isEOF() ); ASSERT_EQUALS( RecordId(), rIter->curr() ); ASSERT_EQUALS( RecordId(), rIter->getNext() ); ASSERT( rIter->isEOF() ); ASSERT_EQUALS( RecordId(), rIter->curr() ); delete rIter; } } }
// Create an iterator for repairing an empty record store. TEST( RecordStoreTestHarness, GetIteratorForRepairEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); RecordIterator *it = rs->getIteratorForRepair( opCtx.get() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); ASSERT_EQUALS( DiskLoc(), it->getNext() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); } }
Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) { WriteUnitOfWork wunit(_txn); invariant(_indexes.empty()); _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this)); const string& ns = _collection->ns().ns(); Status status = _collection->getIndexCatalog()->checkUnfinished(); if ( !status.isOK() ) return status; for ( size_t i = 0; i < indexSpecs.size(); i++ ) { BSONObj info = indexSpecs[i]; string pluginName = IndexNames::findPluginName( info["key"].Obj() ); //log() << "PLUGIN IS " << pluginName; // CUSTOM if (pluginName == "test") { // YOU SHOULD BE ABLE TO MAKE TE INDEX HERE //log() << "INX_CREATE NUM REC:" << _collection->numRecords(_txn); RecordIterator* ri = _collection->getIterator(_txn); std::vector<Entry*> initialEntries; while (!ri->isEOF()) { //log() << "SEE ITEM"; RecordData recordData = ri->dataFor(ri->curr()); //log() << "VALU IS " << recordData.toBson(); std::vector<double> lower; std::vector<double> upper; bool foundOK = false; if (recordData.toBson().getFieldDotted("loc")["lng"].ok()) { // A POINT log() << "ELE IS A POINT: " << recordData.toBson().getFieldDotted("loc")["lng"].Double() << " AND " << recordData.toBson().getFieldDotted("loc")["lat"].Double(); lower.push_back(recordData.toBson().getFieldDotted("loc")["lng"].Double()); lower.push_back(recordData.toBson().getFieldDotted("loc")["lat"].Double()); upper.push_back(recordData.toBson().getFieldDotted("loc")["lng"].Double()); upper.push_back(recordData.toBson().getFieldDotted("loc")["lat"].Double()); foundOK = true; } else { //log() << "NOT OK LINE!"; } if (recordData.toBson().getFieldDotted("loc")["type"].ok()) { if (recordData.toBson().getFieldDotted("loc")["type"].String() == "Polygon") { //log() << "ITS A POLY"; // << recordData.toBson().getFieldDotted("loc")["rew"].Double(); lower.push_back(recordData.toBson().getFieldDotted("loc")["coordinates"].Array().at(0).Array().at(0).Array().at(0).Double()); lower.push_back(recordData.toBson().getFieldDotted("loc")["coordinates"].Array().at(0).Array().at(0).Array().at(1).Double()); upper.push_back(recordData.toBson().getFieldDotted("loc")["coordinates"].Array().at(0).Array().at(2).Array().at(0).Double()); upper.push_back(recordData.toBson().getFieldDotted("loc")["coordinates"].Array().at(0).Array().at(2).Array().at(1).Double()); foundOK = true; } } else { //log() << "NOT OK POLY!"; } if (foundOK) { std::unordered_map<int, std::string> newDoc; BoundingBox I = BoundingBox(lower, upper); Entry* myEnt = new Entry(I, newDoc); initialEntries.push_back(myEnt); } ri->getNext(); } int dimensions = 2; int max = 6; int min = 3; log() << "RTRee creation has begun!"; //create a new Node, this will be the root node std::vector<Entry*> newV; Node* R = new Node(dimensions, newV, max, min, true); //create a new RTree RTree myIndex = RTree(dimensions, R, max, min); //insert the entries we created into myIndex RTree for (int i = 0; i<initialEntries.size(); i++){ //log() << "started inserting entry " << i << " which has lat "; Entry* current = initialEntries.at(i); //log() << "started inserting entry " << i << " which has lat " << current->getI().get_ithLower(0) << "," << current->getI().get_ithLower(1); //log() << "started inserting entry " << i << " which has lat " << current->getI().get_ithUpper(0) << "," << current->getI().get_ithUpper(1); myIndex.insert(current); //log() << "finished inserting entry " << i; } log() << "inserted all initial entries!"; myIndex.theTree = &myIndex; double rand1 = -100;//0;//-2.0;// 2.0;// double rand2 = -100;//0;//-3.0;// 1.0;// double rand3 = 100;//50;//2.0;// 4.0;// double rand4 = 100;//50;//3.0;// 4.0;// std::vector<double> lowerBB; lowerBB.push_back(rand1); lowerBB.push_back(rand2); std::vector<double> upperBB; upperBB.push_back(rand3); upperBB.push_back(rand4); BoundingBox* IBB = new BoundingBox(lowerBB, upperBB);//this is the bounding box we will be searching for std::vector<Entry*> overlapping = myIndex.search(IBB); // just leave this for now //cout << "found: " << overlapping.size() << " search results when searching for I = "; //cout << "lower bounds: " << I.getLower() << endl; //cout << "upper bounds: " << I.getUpper() << endl; /*log() << "SEARCH FOUND " << overlapping.size() << " RESULTS" << endl; for(int i=0; i<overlapping.size();i++){ log() << "Entry " << i << endl; //TODO print Entry log() << overlapping.at(i)->getI().get_ithLower(0)<< " "<< overlapping.at(i)->getI().get_ithLower(1) << endl; log() << overlapping.at(i)->getI().get_ithUpper(0)<< " " << overlapping.at(i)->getI().get_ithUpper(1) << endl; }*/ return Status::OK(); } // CUSTOM if ( pluginName.size() ) { Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(_txn, pluginName); if ( !s.isOK() ) return s; } // Any foreground indexes make all indexes be built in the foreground. _buildInBackground = (_buildInBackground && info["background"].trueValue()); } for ( size_t i = 0; i < indexSpecs.size(); i++ ) { BSONObj info = indexSpecs[i]; StatusWith<BSONObj> statusWithInfo = _collection->getIndexCatalog()->prepareSpecForCreate( _txn, info ); Status status = statusWithInfo.getStatus(); if ( !status.isOK() ) return status; info = statusWithInfo.getValue(); IndexToBuild index; index.block = boost::make_shared<IndexCatalog::IndexBuildBlock>(_txn, _collection, info); status = index.block->init(); if ( !status.isOK() ) return status; index.real = index.block->getEntry()->accessMethod(); status = index.real->initializeAsEmpty(_txn); if ( !status.isOK() ) return status; if (!_buildInBackground) { // Bulk build process requires foreground building as it assumes nothing is changing // under it. index.bulk.reset(index.real->initiateBulk(_txn)); } const IndexDescriptor* descriptor = index.block->getEntry()->descriptor(); index.options.logIfError = false; // logging happens elsewhere if needed. index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique || repl::getGlobalReplicationCoordinator() ->shouldIgnoreUniqueIndex(descriptor); log() << "build index on: " << ns << " properties: " << descriptor->toString(); if (index.bulk) log() << "\t building index using bulk method"; // TODO SERVER-14888 Suppress this in cases we don't want to audit. audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns); _indexes.push_back( index ); } // this is so that operations examining the list of indexes know there are more keys to look // at when doing things like in place updates, etc... _collection->infoCache()->addedIndex(_txn); if (_buildInBackground) _backgroundOperation.reset(new BackgroundOperation(ns)); wunit.commit(); return Status::OK(); }
bool RecordIterator::operator==(const RecordIterator& iter) const { return (_current == iter.current() && _source == iter._source); }
RecordIterator::RecordIterator(const RecordIterator& iter) : _source(iter.source()), _current(iter.current()) {}