Status NativeSaslAuthenticationSession::step(const StringData& inputData, std::string* outputData) { StatusWith<bool> status = _saslConversation->step(inputData, outputData); if (status.isOK()) { _done = status.getValue(); } return status.getStatus(); }
TEST( RocksRecordStoreTest, Update1 ) { unittest::TempDir td( _rocksRecordStoreTestDir ); scoped_ptr<rocksdb::DB> db( getDB( td.path() ) ); { RocksRecordStore rs( "foo.bar", db.get(), db->DefaultColumnFamily(), db->DefaultColumnFamily() ); string s1 = "eliot1"; string s2 = "eliot2 and more"; DiskLoc loc; { MyOperationContext opCtx( db.get() ); { WriteUnitOfWork uow( opCtx.recoveryUnit() ); StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s1.c_str(), s1.size() + 1, -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); } ASSERT_EQUALS( s1, rs.dataFor( loc ).data() ); } { MyOperationContext opCtx( db.get() ); { WriteUnitOfWork uow( opCtx.recoveryUnit() ); StatusWith<DiskLoc> res = rs.updateRecord( &opCtx, loc, s2.c_str(), s2.size() + 1, -1, NULL ); ASSERT_OK( res.getStatus() ); ASSERT( loc == res.getValue() ); } ASSERT_EQUALS( s2, rs.dataFor( loc ).data() ); } } }
// Insert multiple records and create a random iterator for the record store TEST(RecordStoreTestHarness, GetRandomIteratorNonEmpty) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } const unsigned nToInsert = 5000; // should be non-trivial amount, so we get multiple btree levels RecordId locs[nToInsert]; for (unsigned i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); locs[i] = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } set<RecordId> remain(locs, locs + nToInsert); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getRandomCursor(opCtx.get()); // returns NULL if getRandomCursor is not supported if (!cursor) { return; } // Iterate documents and mark those visited, but let at least one remain for (unsigned i = 0; i < nToInsert - 1; i++) { // Get a new cursor once in a while, shouldn't affect things if (i % (nToInsert / 8) == 0) { cursor = rs->getRandomCursor(opCtx.get()); } remain.erase(cursor->next()->id); // can happen more than once per doc } ASSERT(!remain.empty()); ASSERT(cursor->next()); // We should have at least visited a quarter of the items if we're any random at all // The expected fraction of visited records is 62.3%. ASSERT_LT(remain.size(), nToInsert * 3 / 4); } }
void MigrationManager::_scheduleWithDistLock(OperationContext* txn, const HostAndPort& targetHost, Migration migration) { const NamespaceString nss(migration.nss); executor::TaskExecutor* const executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor(); stdx::unique_lock<stdx::mutex> lock(_mutex); auto it = _activeMigrationsWithDistLock.find(nss); if (it == _activeMigrationsWithDistLock.end()) { // Acquire the collection distributed lock (blocking call) auto distLockHandleStatus = acquireDistLock(txn, nss); if (!distLockHandleStatus.isOK()) { migration.completionNotification->set(distLockHandleStatus.getStatus()); return; } it = _activeMigrationsWithDistLock .insert(std::make_pair( nss, CollectionMigrationsState(std::move(distLockHandleStatus.getValue())))) .first; } auto collectionMigrationState = &it->second; // Add ourselves to the list of migrations on this collection collectionMigrationState->migrations.push_front(std::move(migration)); auto itMigration = collectionMigrationState->migrations.begin(); const RemoteCommandRequest remoteRequest( targetHost, NamespaceString::kAdminDb.toString(), itMigration->moveChunkCmdObj, txn); StatusWith<executor::TaskExecutor::CallbackHandle> callbackHandleWithStatus = executor->scheduleRemoteCommand( remoteRequest, [this, collectionMigrationState, itMigration]( const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { Client::initThread(getThreadName().c_str()); ON_BLOCK_EXIT([&] { Client::destroy(); }); auto txn = cc().makeOperationContext(); _completeWithDistLock( txn.get(), itMigration, extractMigrationStatusFromRemoteCommandResponse(args.response)); }); if (callbackHandleWithStatus.isOK()) { itMigration->callbackHandle = std::move(callbackHandleWithStatus.getValue()); return; } // The completion routine takes its own lock lock.unlock(); _completeWithDistLock(txn, itMigration, std::move(callbackHandleWithStatus.getStatus())); }
void UserCacheInvalidator::run() { Client::initThread("UserCacheInvalidator"); lastInvalidationTime = Date_t::now(); while (true) { stdx::unique_lock<stdx::mutex> lock(invalidationIntervalMutex); Date_t sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs); Date_t now = Date_t::now(); while (now < sleepUntil) { invalidationIntervalChangedCondition.wait_for(lock, sleepUntil - now); sleepUntil = lastInvalidationTime + Seconds(userCacheInvalidationIntervalSecs); now = Date_t::now(); } lastInvalidationTime = now; lock.unlock(); if (inShutdown()) { break; } auto txn = cc().makeOperationContext(); StatusWith<OID> currentGeneration = getCurrentCacheGeneration(txn.get()); if (!currentGeneration.isOK()) { if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) { warning() << "_getUserCacheGeneration command not found on config server(s), " "this most likely means you are running an outdated version of mongod " "on the config servers" << std::endl; } else { warning() << "An error occurred while fetching current user cache generation " "to check if user cache needs invalidation: " << currentGeneration.getStatus() << std::endl; } // When in doubt, invalidate the cache _authzManager->invalidateUserCache(); continue; } if (currentGeneration.getValue() != _previousCacheGeneration) { log() << "User cache generation changed from " << _previousCacheGeneration << " to " << currentGeneration.getValue() << "; invalidating user cache" << std::endl; _authzManager->invalidateUserCache(); _previousCacheGeneration = currentGeneration.getValue(); } } }
void UserCacheInvalidator::run() { Client::initThread("UserCacheInvalidator"); auto interval = globalInvalidationInterval(); interval->start(); while (true) { interval->wait(); if (globalInShutdownDeprecated()) { break; } auto opCtx = cc().makeOperationContext(); StatusWith<OID> currentGeneration = getCurrentCacheGeneration(opCtx.get()); if (!currentGeneration.isOK()) { if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) { warning() << "_getUserCacheGeneration command not found on config server(s), " "this most likely means you are running an outdated version of mongod " "on the config servers"; } else { warning() << "An error occurred while fetching current user cache generation " "to check if user cache needs invalidation: " << currentGeneration.getStatus(); } // When in doubt, invalidate the cache try { _authzManager->invalidateUserCache(opCtx.get()); } catch (const DBException& e) { warning() << "Error invalidating user cache: " << e.toStatus(); } continue; } if (currentGeneration.getValue() != _previousCacheGeneration) { log() << "User cache generation changed from " << _previousCacheGeneration << " to " << currentGeneration.getValue() << "; invalidating user cache"; try { _authzManager->invalidateUserCache(opCtx.get()); } catch (const DBException& e) { warning() << "Error invalidating user cache: " << e.toStatus(); } _previousCacheGeneration = currentGeneration.getValue(); } } }
TEST( RecordStoreTestHarness, UpdateInPlace1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; string s1 = "aaa111bbb"; string s2 = "aaa222bbb"; RecordId loc; const RecordData s1Rec(s1.c_str(), s1.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s1Rec.data(), s1Rec.size(), -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); const char* damageSource = "222"; mutablebson::DamageVector dv; dv.push_back( mutablebson::DamageEvent() ); dv[0].sourceOffset = 0; dv[0].targetOffset = 3; dv[0].size = 3; Status res = rs->updateWithDamages( opCtx.get(), loc, s1Rec, damageSource, dv ); ASSERT_OK( res ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() ); } }
// Insert a record and try to perform an in-place update on it with a DamageVector // containing overlapping DamageEvents. The changes should be applied in the order // specified by the DamageVector, and not -- for instance -- by the targetOffset. TEST( RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "00010111"; DiskLoc loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv( 2 ); dv[0].sourceOffset = 0; dv[0].targetOffset = 3; dv[0].size = 5; dv[1].sourceOffset = 3; dv[1].targetOffset = 0; dv[1].size = 5; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) ); uow.commit(); } } data = "10111010"; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
StatusWithFunc SharedLibrary::getFunction(StringData name) { StatusWith<void*> s = getSymbol(name); if (!s.isOK()) { return StatusWithFunc(s.getStatus()); } return StatusWithFunc(reinterpret_cast<void (*)()>(s.getValue())); }
Status WiredTigerRecordStore::oplogDiskLocRegister(OperationContext* txn, const Timestamp& opTime) { StatusWith<RecordId> loc = oploghack::keyForOptime(opTime); if (!loc.isOK()) return loc.getStatus(); stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex); _addUncommitedDiskLoc_inlock(txn, loc.getValue()); return Status::OK(); }
TEST(RocksRecordStoreTest, Isolation1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); RecordId loc1; RecordId loc2; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc1 = res.getValue(); res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc2 = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() ); scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() ); scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) ); scoped_ptr<WriteUnitOfWork> w2( new WriteUnitOfWork( t2.get() ) ); rs->dataFor( t1.get(), loc1 ); rs->dataFor( t2.get(), loc1 ); ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() ); ASSERT_OK( rs->updateRecord( t1.get(), loc2, "B", 2, false, NULL ).getStatus() ); // this should throw ASSERT_THROWS(rs->updateRecord(t2.get(), loc1, "c", 2, false, NULL), WriteConflictException); w1->commit(); // this should succeed } }
StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAddShard( OperationContext* txn, RemoteCommandTargeter* targeter, const std::string& dbName, const BSONObj& cmdObj) { auto host = targeter->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); if (!host.isOK()) { return host.getStatus(); } executor::RemoteCommandRequest request( host.getValue(), dbName, cmdObj, rpc::makeEmptyMetadata(), Seconds(30)); StatusWith<executor::RemoteCommandResponse> swResponse = Status(ErrorCodes::InternalError, "Internal error running command"); auto callStatus = _executorForAddShard->scheduleRemoteCommand( request, [&swResponse](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { swResponse = args.response; }); if (!callStatus.isOK()) { return callStatus.getStatus(); } // Block until the command is carried out _executorForAddShard->wait(callStatus.getValue()); if (!swResponse.isOK()) { if (swResponse.getStatus().compareCode(ErrorCodes::ExceededTimeLimit)) { LOG(0) << "Operation for addShard timed out with status " << swResponse.getStatus(); } return swResponse.getStatus(); } BSONObj responseObj = swResponse.getValue().data.getOwned(); BSONObj responseMetadata = swResponse.getValue().metadata.getOwned(); Status commandStatus = getStatusFromCommandResult(responseObj); Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj); return Shard::CommandResponse(std::move(responseObj), std::move(responseMetadata), std::move(commandStatus), std::move(writeConcernStatus)); }
TEST(RecordStoreTestHarness, Simple1) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } string s = "eliot was here"; RecordId loc1; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false); ASSERT_OK(res.getStatus()); loc1 = res.getValue(); uow.commit(); } ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); RecordData rd; ASSERT(!rs->findRecord(opCtx.get(), RecordId(111, 17), &rd)); ASSERT(rd.data() == NULL); ASSERT(rs->findRecord(opCtx.get(), loc1, &rd)); ASSERT_EQUALS(s, rd.data()); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false); ASSERT_OK(res.getStatus()); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(2, rs->numRecords(opCtx.get())); } }
Status AbstractIndexAccessMethod::insertKeys(OperationContext* opCtx, const BSONObjSet& keys, const BSONObjSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, const RecordId& loc, const InsertDeleteOptions& options, InsertResult* result) { bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx); // Add all new data keys, and all new multikey metadata keys, into the index. When iterating // over the data keys, each of them should point to the doc's RecordId. When iterating over // the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'. for (const auto keySet : {&keys, &multikeyMetadataKeys}) { const auto& recordId = (keySet == &keys ? loc : kMultikeyMetadataKeyId); for (const auto& key : *keySet) { Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK(); if (status.isOK()) { bool unique = _descriptor->unique(); StatusWith<SpecialFormatInserted> ret = _newInterface->insert(opCtx, key, recordId, !unique /* dupsAllowed */); status = ret.getStatus(); // When duplicates are encountered and allowed, retry with dupsAllowed. Add the // key to the output vector so callers know which duplicate keys were inserted. if (ErrorCodes::DuplicateKey == status.code() && options.dupsAllowed) { invariant(unique); ret = _newInterface->insert(opCtx, key, recordId, true /* dupsAllowed */); status = ret.getStatus(); // This is speculative in that the 'dupsInserted' vector is not used by any code // today. It is currently in place to test detecting duplicate key errors during // hybrid index builds. Duplicate detection in the future will likely not take // place in this insert() method. if (status.isOK() && result) { result->dupsInserted.push_back(key); } } if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); } if (isFatalError(opCtx, status, key)) { return status; } } } if (result) { result->numInserted += keys.size() + multikeyMetadataKeys.size(); } if (shouldMarkIndexAsMultikey(keys, multikeyMetadataKeys, multikeyPaths)) { _btreeState->setMultikey(opCtx, multikeyPaths); } return Status::OK(); }
StatusWith<ShardRegistry::CommandResponse> ShardRegistry::_runCommandWithMetadata( TaskExecutor* executor, const std::shared_ptr<Shard>& shard, const ReadPreferenceSetting& readPref, const std::string& dbName, const BSONObj& cmdObj, const BSONObj& metadata) { auto targeter = shard->getTargeter(); auto host = targeter->findHost(readPref); if (!host.isOK()) { return host.getStatus(); } executor::RemoteCommandRequest request( host.getValue(), dbName, cmdObj, metadata, kConfigCommandTimeout); StatusWith<executor::RemoteCommandResponse> responseStatus = Status(ErrorCodes::InternalError, "Internal error running command"); auto callStatus = executor->scheduleRemoteCommand(request, [&responseStatus](const RemoteCommandCallbackArgs& args) { responseStatus = args.response; }); if (!callStatus.isOK()) { return callStatus.getStatus(); } // Block until the command is carried out executor->wait(callStatus.getValue()); updateReplSetMonitor(targeter, host.getValue(), responseStatus.getStatus()); if (!responseStatus.isOK()) { return responseStatus.getStatus(); } auto response = responseStatus.getValue(); updateReplSetMonitor(targeter, host.getValue(), getStatusFromCommandResult(response.data)); CommandResponse cmdResponse; cmdResponse.response = response.data; cmdResponse.metadata = response.metadata; if (response.metadata.hasField(rpc::kReplSetMetadataFieldName)) { auto replParseStatus = rpc::ReplSetMetadata::readFromMetadata(response.metadata); if (!replParseStatus.isOK()) { return replParseStatus.getStatus(); } const auto& replMetadata = replParseStatus.getValue(); cmdResponse.visibleOpTime = replMetadata.getLastOpVisible(); } return cmdResponse; }
Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx, StringData uri, BSONObjBuilder* bob) { StatusWith<std::string> metadataResult = getMetadata(opCtx, uri); if (!metadataResult.isOK()) { return metadataResult.getStatus(); } WiredTigerConfigParser topParser(metadataResult.getValue()); WT_CONFIG_ITEM appMetadata; if (topParser.get("app_metadata", &appMetadata) != 0) { return Status::OK(); } if (appMetadata.len == 0) { return Status::OK(); } if (appMetadata.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT) { return Status(ErrorCodes::FailedToParse, str::stream() << "app_metadata must be a nested struct. Actual value: " << StringData(appMetadata.str, appMetadata.len)); } WiredTigerConfigParser parser(appMetadata); WT_CONFIG_ITEM keyItem; WT_CONFIG_ITEM valueItem; int ret; auto keysSeen = SimpleStringDataComparator::kInstance.makeStringDataUnorderedSet(); while ((ret = parser.next(&keyItem, &valueItem)) == 0) { const StringData key(keyItem.str, keyItem.len); if (keysSeen.count(key)) { return Status(ErrorCodes::DuplicateKey, str::stream() << "app_metadata must not contain duplicate keys. " << "Found multiple instances of key '" << key << "'."); } keysSeen.insert(key); switch (valueItem.type) { case WT_CONFIG_ITEM::WT_CONFIG_ITEM_BOOL: bob->appendBool(key, valueItem.val); break; case WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM: bob->appendIntOrLL(key, valueItem.val); break; default: bob->append(key, StringData(valueItem.str, valueItem.len)); break; } } if (ret != WT_NOTFOUND) { return wtRCToStatus(ret); } return Status::OK(); }
Status SortKeyGenerator::getSortKey(const WorkingSetMember& member, BSONObj* objOut) const { StatusWith<BSONObj> sortKey = BSONObj(); if (member.hasObj()) { sortKey = getSortKeyFromObject(member); } else { sortKey = getSortKeyFromIndexKey(member); } if (!sortKey.isOK()) { return sortKey.getStatus(); } if (!_sortHasMeta) { *objOut = sortKey.getValue(); return Status::OK(); } BSONObjBuilder mergedKeyBob; // Merge metadata into the key. BSONObjIterator it(_rawSortSpec); BSONObjIterator sortKeyIt(sortKey.getValue()); while (it.more()) { BSONElement elt = it.next(); if (elt.isNumber()) { // Merge btree key elt. mergedKeyBob.append(sortKeyIt.next()); } else if (LiteParsedQuery::isTextScoreMeta(elt)) { // Add text score metadata double score = 0.0; if (member.hasComputed(WSM_COMPUTED_TEXT_SCORE)) { const TextScoreComputedData* scoreData = static_cast<const TextScoreComputedData*>( member.getComputed(WSM_COMPUTED_TEXT_SCORE)); score = scoreData->getScore(); } mergedKeyBob.append("$metaTextScore", score); } } *objOut = mergedKeyBob.obj(); return Status::OK(); }
StatusWith<ReplicationExecutor::EventHandle> ScatterGatherRunner::start( ReplicationExecutor* executor, const stdx::function<void ()>& onCompletion) { invariant(!_started); _started = true; _actualResponses = 0; _onCompletion = onCompletion; StatusWith<ReplicationExecutor::EventHandle> evh = executor->makeEvent(); if (!evh.isOK()) { return evh; } _sufficientResponsesReceived = evh.getValue(); ScopeGuard earlyReturnGuard = MakeGuard( &ScatterGatherRunner::_signalSufficientResponsesReceived, this, executor); const ReplicationExecutor::RemoteCommandCallbackFn cb = stdx::bind( &ScatterGatherRunner::_processResponse, stdx::placeholders::_1, this); std::vector<RemoteCommandRequest> requests = _algorithm->getRequests(); for (size_t i = 0; i < requests.size(); ++i) { const StatusWith<ReplicationExecutor::CallbackHandle> cbh = executor->scheduleRemoteCommand(requests[i], cb); if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) { return StatusWith<ReplicationExecutor::EventHandle>(cbh.getStatus()); } fassert(18743, cbh.getStatus()); _callbacks.push_back(cbh.getValue()); } if (_callbacks.empty() || _algorithm->hasReceivedSufficientResponses()) { invariant(_algorithm->hasReceivedSufficientResponses()); _signalSufficientResponsesReceived(executor); } earlyReturnGuard.Dismiss(); return evh; }
StatusWith<DiskLoc> HeapRecordStore::extractAndCheckLocForOplog(const char* data, int len) const { StatusWith<DiskLoc> status = oploghack::extractKey(data, len); if (!status.isOK()) return status; if (!_data->records.empty() && status.getValue() <= _data->records.rbegin()->first) return StatusWith<DiskLoc>(ErrorCodes::BadValue, "ts not higher than highest"); return status; }
Status AbstractIndexAccessMethod::update(OperationContext* opCtx, const UpdateTicket& ticket, int64_t* numInserted, int64_t* numDeleted) { invariant(!_btreeState->isBuilding()); invariant(ticket.newKeys.size() == ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size()); invariant(numInserted); invariant(numDeleted); *numInserted = 0; *numDeleted = 0; if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } for (const auto& remKey : ticket.removed) { _newInterface->unindex(opCtx, remKey, ticket.loc, ticket.dupsAllowed); } bool checkIndexKeySize = shouldCheckIndexKeySize(opCtx); // Add all new data keys, and all new multikey metadata keys, into the index. When iterating // over the data keys, each of them should point to the doc's RecordId. When iterating over // the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'. const auto newMultikeyMetadataKeys = asVector(ticket.newMultikeyMetadataKeys); for (const auto keySet : {&ticket.added, &newMultikeyMetadataKeys}) { const auto& recordId = (keySet == &ticket.added ? ticket.loc : kMultikeyMetadataKeyId); for (const auto& key : *keySet) { Status status = checkIndexKeySize ? checkKeySize(key) : Status::OK(); if (status.isOK()) { StatusWith<SpecialFormatInserted> ret = _newInterface->insert(opCtx, key, recordId, ticket.dupsAllowed); status = ret.getStatus(); if (status.isOK() && ret.getValue() == SpecialFormatInserted::LongTypeBitsInserted) _btreeState->setIndexKeyStringWithLongTypeBitsExistsOnDisk(opCtx); } if (isFatalError(opCtx, status, key)) { return status; } } } if (shouldMarkIndexAsMultikey( ticket.newKeys, ticket.newMultikeyMetadataKeys, ticket.newMultikeyPaths)) { _btreeState->setMultikey(opCtx, ticket.newMultikeyPaths); } *numDeleted = ticket.removed.size(); *numInserted = ticket.added.size(); return Status::OK(); }
// Sanity check TEST(FTDCCompressor, TestBasic) { FTDCConfig config; FTDCCompressor c(&config); auto st = c.addSample(BSON("name" << "joe" << "key1" << 33 << "key2" << 42)); ASSERT_HAS_SPACE(st); st = c.addSample(BSON("name" << "joe" << "key1" << 34 << "key2" << 45)); ASSERT_HAS_SPACE(st); StatusWith<ConstDataRange> swBuf = c.getCompressedSamples(); ASSERT_TRUE(swBuf.isOK()); ASSERT_TRUE(swBuf.getValue().length() > 0); ASSERT_TRUE(swBuf.getValue().data() != nullptr); }
TEST(StatusWithTest, Fib1) { StatusWith<int> x = fib(-2); ASSERT(!x.isOK()); x = fib(0); ASSERT(x.isOK()); ASSERT(1 == x.getValue()); x = fib(1); ASSERT(x.isOK()); ASSERT(1 == x.getValue()); x = fib(2); ASSERT(x.isOK()); ASSERT(2 == x.getValue()); x = fib(3); ASSERT(x.isOK()); ASSERT(3 == x.getValue()); }
StatusWith<int64_t> WiredTigerUtil::checkApplicationMetadataFormatVersion(OperationContext* opCtx, StringData uri, int64_t minimumVersion, int64_t maximumVersion) { StatusWith<std::string> result = getMetadata(opCtx, uri); if (result.getStatus().code() == ErrorCodes::NoSuchKey) { return result.getStatus(); } invariantOK(result.getStatus()); WiredTigerConfigParser topParser(result.getValue()); WT_CONFIG_ITEM metadata; if (topParser.get("app_metadata", &metadata) != 0) return Status(ErrorCodes::UnsupportedFormat, str::stream() << "application metadata for " << uri << " is missing "); if (metadata.type != WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT) { return Status(ErrorCodes::FailedToParse, str::stream() << "application metadata must be enclosed in parentheses. Actual value: " << StringData(metadata.str, metadata.len)); } WiredTigerConfigParser parser(metadata); int64_t version = 0; WT_CONFIG_ITEM versionItem; if (parser.get("formatVersion", &versionItem) != 0) { // If 'formatVersion' is missing, this metadata was introduced by // one of the RC versions (where the format version is 1). version = 1; } else if (versionItem.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_NUM) { version = versionItem.val; } else { return Status(ErrorCodes::UnsupportedFormat, str::stream() << "'formatVersion' in application metadata for " << uri << " must be a number. Current value: " << StringData(versionItem.str, versionItem.len)); } if (version < minimumVersion || version > maximumVersion) { return Status(ErrorCodes::UnsupportedFormat, str::stream() << "Application metadata for " << uri << " has unsupported format version: " << version << "."); } LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion " << " uri: " << uri << " ok range " << minimumVersion << " -> " << maximumVersion << " current: " << version; return version; }
StatusWith<std::string> KVCatalog::newOrphanedIdent(OperationContext* opCtx, std::string ident) { // The collection will be named local.orphan.xxxxx. std::string identNs = ident; std::replace(identNs.begin(), identNs.end(), '-', '_'); std::string ns = NamespaceString(NamespaceString::kOrphanCollectionDb, NamespaceString::kOrphanCollectionPrefix + identNs) .ns(); stdx::lock_guard<stdx::mutex> lk(_identsLock); Entry& old = _idents[ns]; if (!old.ident.empty()) { return Status(ErrorCodes::NamespaceExists, str::stream() << ns << " already exists in the catalog"); } opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, ns)); // Generate a new UUID for the orphaned collection. CollectionOptions optionsWithUUID; optionsWithUUID.uuid.emplace(CollectionUUID::gen()); BSONObj obj; { BSONObjBuilder b; b.append("ns", ns); b.append("ident", ident); BSONCollectionCatalogEntry::MetaData md; md.ns = ns; // Default options with newly generated UUID. md.options = optionsWithUUID; // Not Prefixed. md.prefix = KVPrefix::kNotPrefixed; b.append("md", md.toBSON()); obj = b.obj(); } StatusWith<RecordId> res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), Timestamp()); if (!res.isOK()) return res.getStatus(); old = Entry(ident, res.getValue()); LOG(1) << "stored meta data for orphaned collection " << ns << " @ " << res.getValue(); return StatusWith<std::string>(std::move(ns)); }
Status KVCatalog::newCollection(OperationContext* opCtx, StringData ns, const CollectionOptions& options) { invariant(opCtx->lockState() == NULL || opCtx->lockState()->isDbLockedForMode(nsToDatabaseSubstring(ns), MODE_X)); std::unique_ptr<Lock::ResourceLock> rLk; if (!_isRsThreadSafe && opCtx->lockState()) { rLk.reset(new Lock::ResourceLock(opCtx->lockState(), resourceIdCatalogMetadata, MODE_X)); } const string ident = _newUniqueIdent(ns, "collection"); stdx::lock_guard<stdx::mutex> lk(_identsLock); Entry& old = _idents[ns.toString()]; if (!old.ident.empty()) { return Status(ErrorCodes::NamespaceExists, "collection already exists"); } opCtx->recoveryUnit()->registerChange(new AddIdentChange(this, ns)); BSONObj obj; { BSONObjBuilder b; b.append("ns", ns); b.append("ident", ident); BSONCollectionCatalogEntry::MetaData md; md.ns = ns.toString(); md.options = options; b.append("md", md.toBSON()); obj = b.obj(); } StatusWith<RecordId> res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), false); if (!res.isOK()) return res.getStatus(); old = Entry(ident, res.getValue()); LOG(1) << "stored meta data for " << ns << " @ " << res.getValue(); return Status::OK(); }
// Goes over the request and preprocesses normalized versions of all the inserts in the request static void normalizeInserts( const BatchedCommandRequest& request, vector<StatusWith<BSONObj> >* normalizedInserts, vector<PregeneratedKeys>* pregen ) { normalizedInserts->reserve(request.sizeWriteOps()); for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) { BSONObj insertDoc = request.getInsertRequest()->getDocumentsAt( i ); StatusWith<BSONObj> normalInsert = fixDocumentForInsert( insertDoc ); normalizedInserts->push_back( normalInsert ); if ( request.getOrdered() && !normalInsert.isOK() ) break; if ( !normalInsert.getValue().isEmpty() ) insertDoc = normalInsert.getValue(); pregen->push_back( PregeneratedKeys() ); GeneratorHolder::getInstance()->prepare( request.getTargetingNS(), insertDoc, &pregen->back() ); } }
// Insert a single record. Create a random iterator pointing to that single record. // Then check we'll retrieve the record. TEST(RecordStoreTestHarness, GetRandomIteratorSingleton) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(0, rs->numRecords(opCtx.get())); } // Insert one record. RecordId idToRetrieve; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false); ASSERT_OK(res.getStatus()); idToRetrieve = res.getValue(); uow.commit(); } // Double-check that the record store has one record in it now. { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(1, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getRandomCursor(opCtx.get()); // returns NULL if getRandomCursor is not supported if (!cursor) { return; } // We should be pointing at the only record in the store. // Check deattaching / reattaching cursor->save(); cursor->detachFromOperationContext(); opCtx = harnessHelper->newOperationContext(); cursor->reattachToOperationContext(opCtx.get()); ASSERT_TRUE(cursor->restore()); auto record = cursor->next(); ASSERT_EQUALS(record->id, idToRetrieve); // Iterator should either be EOF now, or keep returning the single existing document for (int i = 0; i < 10; i++) { record = cursor->next(); ASSERT(!record || record->id == idToRetrieve); } } }
RecordId _oplogOrderInsertOplog( OperationContext* txn, scoped_ptr<RecordStore>& rs, int inc ) { Timestamp opTime = Timestamp(5,inc); RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get()); Status status = rrs->oplogDiskLocRegister( txn, opTime ); ASSERT_OK( status ); BSONObj obj = BSON( "ts" << opTime ); StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false ); ASSERT_OK( res.getStatus() ); return res.getValue(); }
Status checkAuthForCommand(ClientBasic* client, const std::string& dbname, const BSONObj& cmdObj) override { StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj); if (!parseStatus.isOK()) { return parseStatus.getStatus(); } const GetMoreRequest& request = parseStatus.getValue(); return AuthorizationSession::get(client)->checkAuthForGetMore(request.nss, request.cursorid); }
// Insert a record and try to call updateWithDamages() with an empty DamageVector. TEST( RecordStoreTestHarness, UpdateWithNoDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "my record"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }