/* step one of adding keys to index idxNo for a new record @return true means done. false means multikey involved and more work to do */ void fetchIndexInserters(BSONObjSet & /*out*/keys, IndexInterface::IndexInserter &inserter, NamespaceDetails *d, int idxNo, const BSONObj& obj, DiskLoc recordLoc) { IndexDetails &idx = d->idx(idxNo); idx.getKeysFromObject(obj, keys); if( keys.empty() ) return; bool dupsAllowed = !idx.unique(); Ordering ordering = Ordering::make(idx.keyPattern()); try { // we can't do the two step method with multi keys as insertion of one key changes the indexes // structure. however we can do the first key of the set so we go ahead and do that FWIW inserter.addInsertionContinuation( idx.idxInterface().beginInsertIntoIndex( idxNo, idx, recordLoc, *keys.begin(), ordering, dupsAllowed)); } catch (AssertionException& e) { if( e.getCode() == 10287 && idxNo == d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; } else { throw; } } }
/* add keys to index idxNo for a new record */ static void addKeysToIndex(const char *ns, NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc recordLoc, bool dupsAllowed) { IndexDetails& idx = d->idx(idxNo); BSONObjSet keys; idx.getKeysFromObject(obj, keys); if( keys.empty() ) return; BSONObj order = idx.keyPattern(); IndexInterface& ii = idx.idxInterface(); Ordering ordering = Ordering::make(order); int n = 0; for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) { if( ++n == 2 ) { d->setIndexIsMultikey(ns, idxNo); } verify( !recordLoc.isNull() ); try { ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx); } catch (AssertionException& e) { if( e.getCode() == 10287 && idxNo == d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; continue; } if( !dupsAllowed ) { // dup key exception, presumably. throw; } problem() << " caught assertion addKeysToIndex " << idx.indexNamespace() << " " << obj["_id"] << endl; } } }
BSONObjSet DocumentSource::truncateSortSet(const BSONObjSet& sorts, const std::set<std::string>& fields) { BSONObjSet out = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); for (auto&& sort : sorts) { BSONObjBuilder outputSort; for (auto&& key : sort) { auto keyName = key.fieldNameStringData(); bool shouldAppend = true; for (auto&& field : fields) { if (keyName == field || keyName.startsWith(field + '.')) { shouldAppend = false; break; } } if (!shouldAppend) { break; } outputSort.append(key); } BSONObj outSortObj = outputSort.obj(); if (!outSortObj.isEmpty()) { out.insert(outSortObj); } } return out; }
void NamespaceDetails::ColdIndexer::build() { Lock::assertWriteLocked(_d->_ns); if (_isSecondaryIndex) { IndexDetails::Builder builder(*_idx); const int indexNum = _d->idxNo(*_idx); for (shared_ptr<Cursor> cursor(BasicCursor::make(_d)); cursor->ok(); cursor->advance()) { BSONObj pk = cursor->currPK(); BSONObj obj = cursor->current(); BSONObjSet keys; _idx->getKeysFromObject(obj, keys); if (keys.size() > 1) { _d->setIndexIsMultikey(indexNum); } for (BSONObjSet::const_iterator ki = keys.begin(); ki != keys.end(); ++ki) { builder.insertPair(*ki, &pk, obj); } killCurrentOp.checkForInterrupt(); // uasserts if we should stop } builder.done(); // If the index is unique, check all adjacent keys for a duplicate. if (_idx->unique()) { _d->checkIndexUniqueness(*_idx); } } }
BSONObjSet DocumentSourceLookUp::getOutputSorts() { BSONObjSet out; BSONObjSet inputSort = pSource->getOutputSorts(); std::string asPath = _as.getPath(false); for (auto&& sortObj : inputSort) { // Truncate each sortObj at the '_as' path. BSONObjBuilder outputSort; for (BSONElement fieldSort : sortObj) { if (fieldSort.fieldNameStringData() == asPath) { break; } outputSort.append(fieldSort); } BSONObj outSortObj = outputSort.obj(); if (!outSortObj.isEmpty()) { out.insert(outSortObj); } } return out; }
RecordId IndexAccessMethod::findSingle(OperationContext* opCtx, const BSONObj& requestedKey) const { // Generate the key for this index. BSONObj actualKey; if (_btreeState->getCollator()) { // For performance, call get keys only if there is a non-simple collation. BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); MultikeyPaths* multikeyPaths = nullptr; getKeys(requestedKey, GetKeysMode::kEnforceConstraints, &keys, multikeyPaths); invariant(keys.size() == 1); actualKey = *keys.begin(); } else { actualKey = requestedKey; } std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(opCtx)); const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc : SortedDataInterface::Cursor::kWantLoc; if (auto kv = cursor->seekExact(actualKey, requestedInfo)) { // StorageEngine should guarantee these. dassert(!kv->loc.isNull()); dassert(kv->key.woCompare(actualKey, /*order*/ BSONObj(), /*considerFieldNames*/ false) == 0); return kv->loc; } return RecordId(); }
Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numInserted) { BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); MultikeyPaths multikeyPaths; _real->getKeys(obj, &keys, &multikeyPaths); _everGeneratedMultipleKeys = _everGeneratedMultipleKeys || (keys.size() > 1); if (!multikeyPaths.empty()) { if (_indexMultikeyPaths.empty()) { _indexMultikeyPaths = multikeyPaths; } else { invariant(_indexMultikeyPaths.size() == multikeyPaths.size()); for (size_t i = 0; i < multikeyPaths.size(); ++i) { _indexMultikeyPaths[i].insert(multikeyPaths[i].begin(), multikeyPaths[i].end()); } } } for (BSONObjSet::iterator it = keys.begin(); it != keys.end(); ++it) { _sorter->add(*it, loc); _keysInserted++; } if (NULL != numInserted) { *numInserted += keys.size(); } return Status::OK(); }
/** add index keys for a newly inserted record done in two steps/phases to allow potential deferal of write lock portion in the future */ void indexRecordUsingTwoSteps(const char *ns, NamespaceDetails *d, BSONObj obj, DiskLoc loc, bool shouldBeUnlocked) { vector<int> multi; vector<BSONObjSet> multiKeys; IndexInterface::IndexInserter inserter; // Step 1, read phase. int n = d->nIndexesBeingBuilt(); { BSONObjSet keys; for ( int i = 0; i < n; i++ ) { // this call throws on unique constraint violation. we haven't done any writes yet so that is fine. fetchIndexInserters(/*out*/keys, inserter, d, i, obj, loc); if( keys.size() > 1 ) { multi.push_back(i); multiKeys.push_back(BSONObjSet()); multiKeys[multiKeys.size()-1].swap(keys); } keys.clear(); } } inserter.finishAllInsertions(); // Step 2, write phase. // now finish adding multikeys for( unsigned j = 0; j < multi.size(); j++ ) { unsigned i = multi[j]; BSONObjSet& keys = multiKeys[j]; IndexDetails& idx = d->idx(i); IndexInterface& ii = idx.idxInterface(); Ordering ordering = Ordering::make(idx.keyPattern()); d->setIndexIsMultikey(ns, i); for( BSONObjSet::iterator k = ++keys.begin()/*skip 1*/; k != keys.end(); k++ ) { try { ii.bt_insert(idx.head, loc, *k, ordering, !idx.unique(), idx); } catch (AssertionException& e) { if( e.getCode() == 10287 && (int) i == d->nIndexes ) { DEV log() << "info: caught key already in index on bg indexing (ok)" << endl; } else { /* roll back previously added index entries note must do self index as it is multikey and could require some cleanup itself */ for( int j = 0; j < n; j++ ) { try { _unindexRecord(d->idx(j), obj, loc, false); } catch(...) { log(3) << "unindex fails on rollback after unique key constraint prevented insert\n"; } } throw; } } } } }
// Find the keys for obj, put them in the tree pointing to loc Status BtreeBasedAccessMethod::insert(OperationContext* txn, const BSONObj& obj, const DiskLoc& loc, const InsertDeleteOptions& options, int64_t* numInserted) { *numInserted = 0; BSONObjSet keys; // Delegate to the subclass. getKeys(obj, &keys); Status ret = Status::OK(); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed); // Everything's OK, carry on. if (status.isOK()) { ++*numInserted; continue; } // Error cases. if (ErrorCodes::KeyTooLong == status.code()) { // Ignore this error if we're on a secondary. if (!txn->isPrimaryFor(_btreeState->ns())) { continue; } // The user set a parameter to ignore key too long errors. if (!failIndexKeyTooLong) { continue; } } if (ErrorCodes::UniqueIndexViolation == status.code()) { // We ignore it for some reason in BG indexing. if (!_btreeState->isReady()) { DEV log() << "info: key already in index during bg indexing (ok)\n"; continue; } } // Clean up after ourselves. for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) { removeOneKey(txn, *j, loc); *numInserted = 0; } return status; } if (*numInserted > 1) { _btreeState->setMultikey( txn ); } return ret; }
Status AbstractIndexAccessMethod::insertKeys(OperationContext* opCtx, const BSONObjSet& keys, const BSONObjSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, const RecordId& loc, const InsertDeleteOptions& options, InsertResult* result) { bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx); // Add all new data keys, and all new multikey metadata keys, into the index. When iterating // over the data keys, each of them should point to the doc's RecordId. When iterating over // the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'. for (const auto keySet : {&keys, &multikeyMetadataKeys}) { const auto& recordId = (keySet == &keys ? loc : kMultikeyMetadataKeyId); for (const auto& key : *keySet) { Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK(); if (status.isOK()) { bool unique = _descriptor->unique(); StatusWith<SpecialFormatInserted> ret = _newInterface->insert(opCtx, key, recordId, !unique /* dupsAllowed */); status = ret.getStatus(); // When duplicates are encountered and allowed, retry with dupsAllowed. Add the // key to the output vector so callers know which duplicate keys were inserted. if (ErrorCodes::DuplicateKey == status.code() && options.dupsAllowed) { invariant(unique); ret = _newInterface->insert(opCtx, key, recordId, true /* dupsAllowed */); status = ret.getStatus(); // This is speculative in that the 'dupsInserted' vector is not used by any code // today. It is currently in place to test detecting duplicate key errors during // hybrid index builds. Duplicate detection in the future will likely not take // place in this insert() method. if (status.isOK() && result) { result->dupsInserted.push_back(key); } } if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); } if (isFatalError(opCtx, status, key)) { return status; } } } if (result) { result->numInserted += keys.size() + multikeyMetadataKeys.size(); } if (shouldMarkIndexAsMultikey(keys, multikeyMetadataKeys, multikeyPaths)) { _btreeState->setMultikey(opCtx, multikeyPaths); } return Status::OK(); }
AllowedIndicesFilter::AllowedIndicesFilter(const BSONObjSet& indexKeyPatterns, const stdx::unordered_set<std::string>& indexNames) : indexKeyPatterns(SimpleBSONObjComparator::kInstance.makeBSONObjSet()), indexNames(indexNames) { for (BSONObjSet::const_iterator i = indexKeyPatterns.begin(); i != indexKeyPatterns.end(); ++i) { const BSONObj& indexKeyPattern = *i; this->indexKeyPatterns.insert(indexKeyPattern.getOwned()); } }
// page in pages needed for all index lookups on a given object void prefetchIndexPages(Collection* collection, const repl::ReplSetImpl::IndexPrefetchConfig& prefetchConfig, const BSONObj& obj) { DiskLoc unusedDl; // unused BSONObjSet unusedKeys; // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type? // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts. // #3 (per op), a big issue would be "too many knobs". switch (prefetchConfig) { case repl::ReplSetImpl::PREFETCH_NONE: return; case repl::ReplSetImpl::PREFETCH_ID_ONLY: { TimerHolder timer( &prefetchIndexStats); // on the update op case, the call to prefetchRecordPages will touch the _id index. // thus perhaps this option isn't very useful? try { IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(); if ( !desc ) return; IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc ); verify( iam ); iam->touch(obj); } catch (const DBException& e) { LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl; } break; } case repl::ReplSetImpl::PREFETCH_ALL: { // indexCount includes all indexes, including ones // in the process of being built IndexCatalog::IndexIterator ii = collection->getIndexCatalog()->getIndexIterator( true ); while ( ii.more() ) { TimerHolder timer( &prefetchIndexStats); // This will page in all index pages for the given object. try { IndexDescriptor* desc = ii.next(); IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex( desc ); verify( iam ); iam->touch(obj); } catch (const DBException& e) { LOG(2) << "ignoring exception in prefetchIndexPages(): " << e.what() << endl; } unusedKeys.clear(); } break; } default: fassertFailed(16427); } }
virtual Status insert(const BSONObj& obj, const DiskLoc& loc, const InsertDeleteOptions& options, int64_t* numInserted) { BSONObjSet keys; _real->getKeys(obj, &keys); _phase1.addKeys(keys, loc, false); if ( numInserted ) *numInserted += keys.size(); return Status::OK(); }
void getKeys( const BSONObj &obj, BSONObjSet &keys ) const { if ( _spec._indexType.get() ) { //plugin (eg geo) _spec._indexType->getKeys( obj , keys ); return; } vector<const char*> fieldNames( _spec._fieldNames ); vector<BSONElement> fixed( _spec._fixed ); _getKeys( fieldNames , fixed , obj, keys ); if ( keys.empty() && ! _spec._sparse ) keys.insert( _spec._nullKey ); }
Status BtreeBasedAccessMethod::touch(const BSONObj& obj) { BSONObjSet keys; getKeys(obj, &keys); boost::scoped_ptr<BtreeInterface::Cursor> cursor(_newInterface->newCursor(1)); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { cursor->locate(*i, DiskLoc()); } return Status::OK(); }
Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) { BSONObjSet keys; getKeys(obj, &keys); std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn)); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { cursor->seekExact(*i); } return Status::OK(); }
// static Status SetFilter::set(OperationContext* opCtx, QuerySettings* querySettings, PlanCache* planCache, const string& ns, const BSONObj& cmdObj) { // indexes - required BSONElement indexesElt = cmdObj.getField("indexes"); if (indexesElt.eoo()) { return Status(ErrorCodes::BadValue, "required field indexes missing"); } if (indexesElt.type() != mongo::Array) { return Status(ErrorCodes::BadValue, "required field indexes must be an array"); } vector<BSONElement> indexesEltArray = indexesElt.Array(); if (indexesEltArray.empty()) { return Status(ErrorCodes::BadValue, "required field indexes must contain at least one index"); } BSONObjSet indexes = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); stdx::unordered_set<std::string> indexNames; for (vector<BSONElement>::const_iterator i = indexesEltArray.begin(); i != indexesEltArray.end(); ++i) { const BSONElement& elt = *i; if (elt.type() == BSONType::Object) { BSONObj obj = elt.Obj(); if (obj.isEmpty()) { return Status(ErrorCodes::BadValue, "index specification cannot be empty"); } indexes.insert(obj.getOwned()); } else if (elt.type() == BSONType::String) { indexNames.insert(elt.String()); } else { return Status(ErrorCodes::BadValue, "each item in indexes must be an object or string"); } } auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Add allowed indices to query settings, overriding any previous entries. querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes, indexNames); // Remove entry from plan cache. planCache->remove(*cq).transitional_ignore(); LOG(0) << "Index filter set on " << ns << " " << redact(cq->toStringShort()) << " " << indexesElt; return Status::OK(); }
Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) { BSONObjSet keys; getKeys(obj, &keys); boost::scoped_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn, 1)); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { cursor->locate(*i, RecordId()); } return Status::OK(); }
Status BtreeBasedAccessMethod::touch(const BSONObj& obj) { BSONObjSet keys; getKeys(obj, &keys); DiskLoc loc; int keyPos; for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { _newInterface->locate(*i, DiskLoc(), 1, &loc, &keyPos); } return Status::OK(); }
void DocumentSourceGraphLookUp::doBreadthFirstSearch() { long long depth = 0; bool shouldPerformAnotherQuery; do { shouldPerformAnotherQuery = false; // Check whether each key in the frontier exists in the cache or needs to be queried. BSONObjSet cached = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); auto matchStage = makeMatchStageFromFrontier(&cached); ValueUnorderedSet queried = pExpCtx->getValueComparator().makeUnorderedValueSet(); _frontier->swap(queried); _frontierUsageBytes = 0; // Process cached values, populating '_frontier' for the next iteration of search. while (!cached.empty()) { auto it = cached.begin(); shouldPerformAnotherQuery = addToVisitedAndFrontier(*it, depth) || shouldPerformAnotherQuery; cached.erase(it); checkMemoryUsage(); } if (matchStage) { // Query for all keys that were in the frontier and not in the cache, populating // '_frontier' for the next iteration of search. // We've already allocated space for the trailing $match stage in '_fromPipeline'. _fromPipeline.back() = *matchStage; auto pipeline = uassertStatusOK(_mongod->makePipeline(_fromPipeline, _fromExpCtx)); while (auto next = pipeline->output()->getNext()) { uassert(40271, str::stream() << "Documents in the '" << _from.ns() << "' namespace must contain an _id for de-duplication in $graphLookup", !(*next)["_id"].missing()); BSONObj result = next->toBson(); shouldPerformAnotherQuery = addToVisitedAndFrontier(result.getOwned(), depth) || shouldPerformAnotherQuery; addToCache(result, queried); } checkMemoryUsage(); } ++depth; } while (shouldPerformAnotherQuery && depth < std::numeric_limits<long long>::max() && (!_maxDepth || depth <= *_maxDepth)); _frontier->clear(); _frontierUsageBytes = 0; }
TEST( FTSIndexFormat, Simple1 ) { FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "data" << "text" ) ) ) ); BSONObjSet keys; FTSIndexFormat::getKeys( spec, BSON( "data" << "cat sat" ), &keys ); ASSERT_EQUALS( 2U, keys.size() ); for ( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) { BSONObj key = *i; ASSERT_EQUALS( 2, key.nFields() ); ASSERT_EQUALS( String, key.firstElement().type() ); } }
BSONObj keyTooLong(const BSONObj& a, void* data) { BSONObj index = a[0]["index"].Obj(); BSONObj doc = a[0]["doc"].Obj(); BSONObjSet keys; getKeysForUpgradeChecking(index, doc, &keys); for (BSONObjSet::const_iterator key = keys.begin(); key != keys.end(); ++key) { if (key->objsize() > 1024) { return BSON("" << true); } } return BSON("" << false); }
bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc) { BSONObjSet keys; getKeysForUpgradeChecking(index, doc, &keys); int largestKeySize = 0; for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) { largestKeySize = std::max(largestKeySize, keyV1Size(*it)); } // BtreeData_V1::KeyMax is 1024 return largestKeySize > 1024; }
Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) { BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); // There's no need to compute the prefixes of the indexed fields that cause the index to be // multikey when paging a document's index entries into memory. MultikeyPaths* multikeyPaths = nullptr; getKeys(obj, &keys, multikeyPaths); std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn)); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { cursor->seekExact(*i); } return Status::OK(); }
BSONObjSet DocumentSource::allPrefixes(BSONObj obj) { BSONObjSet out = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); BSONObj last = {}; for (auto&& field : obj) { BSONObjBuilder builder(last.objsize() + field.size()); builder.appendElements(last); builder.append(field); last = builder.obj(); out.insert(last); } return out; }
TEST( FTSIndexFormat, ExtraFront1 ) { FTSSpec spec( FTSSpec::fixSpec( BSON( "key" << BSON( "x" << 1 << "data" << "text" ) ) ) ); BSONObjSet keys; FTSIndexFormat::getKeys( spec, BSON( "data" << "cat" << "x" << 5 ), &keys ); ASSERT_EQUALS( 1U, keys.size() ); BSONObj key = *(keys.begin()); ASSERT_EQUALS( 3, key.nFields() ); BSONObjIterator i( key ); ASSERT_EQUALS( 5, i.next().numberInt() ); ASSERT_EQUALS( StringData("cat"), i.next().valuestr() ); ASSERT( i.next().numberDouble() > 0 ); }
void HashedIndexType::getKeys( const BSONObj &obj, BSONObjSet &keys ) const { string hashedFieldCopy = string( _hashedField ); const char* hashedFieldCopyPtr = hashedFieldCopy.c_str(); BSONElement fieldVal = obj.getFieldDottedOrArray( hashedFieldCopyPtr ); uassert( 16244 , "Error: hashed indexes do not currently support array values" , fieldVal.type() != Array ); if ( ! fieldVal.eoo() ) { BSONObj key = BSON( "" << makeSingleKey( fieldVal , _seed , _hashVersion ) ); keys.insert( key ); } else if (! _isSparse ) { keys.insert( _missingKey.copy() ); } }
void run() { BSONObj spec( BSON("key" << BSON( "a" << "hashed" ) )); BSONObj nullObj = BSON( "a" << BSONNULL ); // Call getKeys on the nullObj. BSONObjSet nullFieldKeySet; ExpressionKeysPrivate::getHashKeys(nullObj, "a", 0, 0, false, &nullFieldKeySet); BSONElement nullFieldFromKey = nullFieldKeySet.begin()->firstElement(); ASSERT_EQUALS( ExpressionKeysPrivate::makeSingleHashKey( nullObj.firstElement(), 0, 0 ), nullFieldFromKey.Long() ); BSONObj missingField = IndexLegacy::getMissingField(NULL,spec); ASSERT_EQUALS( NumberLong, missingField.firstElement().type() ); ASSERT_EQUALS( nullFieldFromKey, missingField.firstElement()); }
// Remove the provided doc from the index. Status IndexAccessMethod::remove(OperationContext* txn, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numDeleted) { BSONObjSet keys; getKeys(obj, &keys); *numDeleted = 0; for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { removeOneKey(txn, *i, loc, options.dupsAllowed); ++*numDeleted; } return Status::OK(); }
// Find the keys for obj, put them in the tree pointing to loc Status BtreeBasedAccessMethod::insert(const BSONObj& obj, const DiskLoc& loc, const InsertDeleteOptions& options, int64_t* numInserted) { *numInserted = 0; BSONObjSet keys; // Delegate to the subclass. getKeys(obj, &keys); Status ret = Status::OK(); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { try { _interface->bt_insert(_btreeState, _btreeState->head(), loc, *i, options.dupsAllowed, true); ++*numInserted; } catch (AssertionException& e) { if (10287 == e.getCode() && !_btreeState->isReady()) { // This is the duplicate key exception. We ignore it for some reason in BG // indexing. DEV log() << "info: key already in index during bg indexing (ok)\n"; } else if (!options.dupsAllowed) { // Assuming it's a duplicate key exception. Clean up any inserted keys. for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) { removeOneKey(*j, loc); } *numInserted = 0; return Status(ErrorCodes::DuplicateKey, e.what(), e.getCode()); } else { problem() << " caught assertion addKeysToIndex " << _descriptor->indexNamespace() << obj["_id"] << endl; ret = Status(ErrorCodes::InternalError, e.what(), e.getCode()); } } } if (*numInserted > 1) { _btreeState->setMultikey(); } return ret; }