TEST_F(KeysManagerShardedTest, ShouldStillBeAbleToUpdateCacheEvenIfItCantCreateKeys) { KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); // Set the time to be very ahead so the updater will be forced to create new keys. const LogicalTime fakeTime(Timestamp(20000, 0)); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(fakeTime); FailPointEnableBlock failWriteBlock("failCollectionInserts"); { FailPointEnableBlock failQueryBlock("planExecutorAlwaysFails"); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); } auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); }
TEST_F(CacheReaderTest, GetKeyShouldReturnCorrectKeyAfterRefresh) { auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey1( 1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON())); auto refreshStatus = reader.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); { auto key = refreshStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } auto status = reader.getKey(LogicalTime(Timestamp(1, 0))); ASSERT_OK(status.getStatus()); { auto key = status.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightOldKey) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); keyManager()->refreshNow(operationContext()); { auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } { auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(105, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(KeysManagerShardedTest, GetKeyWithMultipleKeys) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); keyStatus = keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); key = keyStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ(Timestamp(205, 0), key.getExpiresAt().asTimestamp()); }
TEST_F(KeysManagerShardedTest, HasSeenKeysIsFalseUntilKeysAreFound) { const LogicalTime currentTime(Timestamp(100, 0)); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); ASSERT_EQ(false, keyManager()->hasSeenKeys()); { FailPointEnableBlock failKeyGenerationBlock("disableKeyGeneration"); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForValidation( operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); ASSERT_EQ(false, keyManager()->hasSeenKeys()); } // Once the failpoint is disabled, the generator can make keys again. keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); ASSERT_EQ(true, keyManager()->hasSeenKeys()); }
TEST_F(KeyGeneratorUpdateTest, ShouldNotCreateNewKeyIfThereAre2UnexpiredKeys) { KeyGenerator generator("dummy", catalogClient(), Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 2))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); { auto allKeys = getKeys(operationContext()); ASSERT_EQ(2u, allKeys.size()); const auto& key1 = allKeys.front(); ASSERT_EQ(1, key1.getKeyId()); ASSERT_EQ("dummy", key1.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key1.getExpiresAt().asTimestamp()); const auto& key2 = allKeys.back(); ASSERT_EQ(2, key2.getKeyId()); ASSERT_EQ("dummy", key2.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key2.getExpiresAt().asTimestamp()); } auto generateStatus = generator.generateNewKeysIfNeeded(operationContext()); ASSERT_OK(generateStatus); auto allKeys = getKeys(operationContext()); ASSERT_EQ(2u, allKeys.size()); auto citer = allKeys.cbegin(); { const auto& key = *citer; ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ("dummy", key.getPurpose()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } { ++citer; const auto& key = *citer; ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ("dummy", key.getPurpose()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(CacheReaderTest, RefreshCanIncrementallyGetNewKeys) { auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey0( 0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON())); { auto refreshStatus = reader.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); auto key = refreshStatus.getValue(); ASSERT_EQ(0, key.getKeyId()); ASSERT_EQ(origKey0.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(100, 0), key.getExpiresAt().asTimestamp()); auto keyStatus = reader.getKey(LogicalTime(Timestamp(112, 1))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); } KeysCollectionDocument origKey1( 1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey2.toBSON())); { auto refreshStatus = reader.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); auto key = refreshStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } { auto keyStatus = reader.getKey(LogicalTime(Timestamp(108, 1))); auto key = keyStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(KeysManagerShardedTest, GetKeyShouldErrorIfKeyIdMismatchKey) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
TEST_F(KeysManagerShardedTest, ShouldCreateKeysIfKeyGeneratorEnabled) { keyManager()->startMonitoring(getServiceContext()); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); keyManager()->enableKeyGenerator(operationContext(), true); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 100))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(Timestamp(101, 0), key.getExpiresAt().asTimestamp()); }
TEST_F(KeyGeneratorUpdateTest, ShouldCreate2KeysFromEmpty) { KeyGenerator generator("dummy", catalogClient(), Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 2))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); auto generateStatus = generator.generateNewKeysIfNeeded(operationContext()); ASSERT_OK(generateStatus); auto allKeys = getKeys(operationContext()); ASSERT_EQ(2u, allKeys.size()); const auto& key1 = allKeys.front(); ASSERT_EQ(currentTime.asTimestamp().asLL(), key1.getKeyId()); ASSERT_EQ("dummy", key1.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key1.getExpiresAt().asTimestamp()); const auto& key2 = allKeys.back(); ASSERT_EQ(currentTime.asTimestamp().asLL() + 1, key2.getKeyId()); ASSERT_EQ("dummy", key2.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key2.getExpiresAt().asTimestamp()); ASSERT_NE(key1.getKey(), key2.getKey()); }
TEST_F(KeysManagerShardedTest, GetKeyForValidationErrorsIfKeyDoesntExist) { keyManager()->startMonitoring(getServiceContext()); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
TEST_F(CacheReaderTest, ErrorsIfCacheIsEmpty) { auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); KeysCollectionCacheReader reader("test", catalogClient); auto status = reader.getKey(LogicalTime(Timestamp(1, 0))).getStatus(); ASSERT_EQ(ErrorCodes::KeyNotFound, status.code()); ASSERT_FALSE(status.reason().empty()); }
Status LogicalTimeMetadataHook::readReplyMetadata(OperationContext* opCtx, StringData replySource, const BSONObj& metadataObj) { auto parseStatus = LogicalTimeMetadata::readFromMetadata(metadataObj); if (!parseStatus.isOK()) { return parseStatus.getStatus(); } auto& signedTime = parseStatus.getValue().getSignedTime(); // LogicalTimeMetadata is default constructed if no cluster time metadata was sent, so a // default constructed SignedLogicalTime should be ignored. if (signedTime.getTime() == LogicalTime::kUninitialized || !LogicalClock::get(_service)->isEnabled()) { return Status::OK(); } if (opCtx) { auto timeTracker = OperationTimeTracker::get(opCtx); auto operationTime = metadataObj[kOperationTimeFieldName]; if (!operationTime.eoo()) { invariant(operationTime.type() == BSONType::bsonTimestamp); timeTracker->updateOperationTime(LogicalTime(operationTime.timestamp())); } } return LogicalClock::get(_service)->advanceClusterTime(signedTime.getTime()); }
TEST_F(KeyGeneratorUpdateTest, ShouldCreateAnotherKeyIfOnlyOneKeyExists) { KeyGenerator generator("dummy", catalogClient(), Seconds(5)); LogicalClock::get(operationContext()) ->setClusterTimeFromTrustedSource(LogicalTime(Timestamp(100, 2))); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); { auto allKeys = getKeys(operationContext()); ASSERT_EQ(1u, allKeys.size()); const auto& key1 = allKeys.front(); ASSERT_EQ(1, key1.getKeyId()); ASSERT_EQ("dummy", key1.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key1.getExpiresAt().asTimestamp()); } auto currentTime = LogicalClock::get(operationContext())->getClusterTime(); auto generateStatus = generator.generateNewKeysIfNeeded(operationContext()); ASSERT_OK(generateStatus); { auto allKeys = getKeys(operationContext()); ASSERT_EQ(2u, allKeys.size()); const auto& key1 = allKeys.front(); ASSERT_EQ(1, key1.getKeyId()); ASSERT_EQ("dummy", key1.getPurpose()); ASSERT_EQ(origKey1.getKey(), key1.getKey()); ASSERT_EQ(Timestamp(105, 0), key1.getExpiresAt().asTimestamp()); const auto& key2 = allKeys.back(); ASSERT_EQ(currentTime.asTimestamp().asLL(), key2.getKeyId()); ASSERT_EQ("dummy", key2.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key2.getExpiresAt().asTimestamp()); ASSERT_NE(key1.getKey(), key2.getKey()); } }
TEST_F(KeysManagerShardedTest, GetKeyForValidationTimesOutIfRefresherIsNotRunning) { operationContext()->setDeadlineAfterNowBy(Microseconds(250 * 1000), ErrorCodes::ExceededTimeLimit); ASSERT_THROWS( keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))), DBException); }
TEST_F(KeysManagerShardedTest, GetKeyForValidationTimesOutIfRefresherIsNotRunning) { operationContext()->setDeadlineAfterNowBy(Microseconds(250 * 1000)); ASSERT_THROWS(keyManager() ->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))) .status_with_transitional_ignore(), DBException); }
TEST_F(KeysManagerShardedTest, ShouldNotReturnKeysInFeatureCompatibilityVersion34) { serverGlobalParams.featureCompatibility.version.store( ServerGlobalParams::FeatureCompatibility::Version::k34); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); KeysCollectionDocument origKey( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey.toBSON())); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
TEST_F(CacheReaderTest, RefreshShouldNotGetKeysForOtherPurpose) { auto catalogClient = Grid::get(operationContext())->catalogClient(operationContext()); KeysCollectionCacheReader reader("test", catalogClient); KeysCollectionDocument origKey0( 0, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey0.toBSON())); { auto refreshStatus = reader.refresh(operationContext()); ASSERT_EQ(ErrorCodes::KeyNotFound, refreshStatus.getStatus()); auto emptyKeyStatus = reader.getKey(LogicalTime(Timestamp(50, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, emptyKeyStatus.getStatus()); } KeysCollectionDocument origKey1( 1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON())); { auto refreshStatus = reader.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); auto key = refreshStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } auto keyStatus = reader.getKey(LogicalTime(Timestamp(60, 1))); ASSERT_OK(keyStatus.getStatus()); { auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(KeyGeneratorUpdateTest, ShouldPropagateWriteError) { KeyGenerator generator("dummy", catalogClient(), Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 2))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); FailPointEnableBlock failWriteBlock("failCollectionInserts"); auto generateStatus = generator.generateNewKeysIfNeeded(operationContext()); ASSERT_EQ(ErrorCodes::FailPointEnabled, generateStatus); }
LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) { invariant(nTicks > 0 && nTicks < (1U << 31)); stdx::lock_guard<stdx::mutex> lock(_mutex); LogicalTime clusterTime = _clusterTime; const unsigned wallClockSecs = durationCount<Seconds>(_service->getFastClockSource()->now().toDurationSinceEpoch()); unsigned clusterTimeSecs = clusterTime.asTimestamp().getSecs(); // Synchronize clusterTime with wall clock time, if clusterTime was behind in seconds. if (clusterTimeSecs < wallClockSecs) { clusterTime = LogicalTime(Timestamp(wallClockSecs, 0)); } // If reserving 'nTicks' would force the cluster timestamp's increment field to exceed (2^31-1), // overflow by moving to the next second. We use the signed integer maximum as an overflow point // in order to preserve compatibility with potentially signed or unsigned integral Timestamp // increment types. It is also unlikely to apply more than 2^31 oplog entries in the span of one // second. else if (clusterTime.asTimestamp().getInc() >= ((1U << 31) - nTicks)) { log() << "Exceeded maximum allowable increment value within one second. Moving clusterTime " "forward to the next second."; // Move time forward to the next second clusterTime = LogicalTime(Timestamp(clusterTime.asTimestamp().getSecs() + 1, 0)); } // Save the next cluster time. clusterTime.addTicks(1); _clusterTime = clusterTime; // Add the rest of the requested ticks if needed. if (nTicks > 1) { _clusterTime.addTicks(nTicks - 1); } return clusterTime; }
TEST_F(KeysManagerShardedTest, EnableModeFlipFlopStressTest) { keyManager()->startMonitoring(getServiceContext()); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); bool doEnable = true; for (int x = 0; x < 10; x++) { keyManager()->enableKeyGenerator(operationContext(), doEnable); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 100))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(Timestamp(101, 0), key.getExpiresAt().asTimestamp()); doEnable = !doEnable; } }
TEST_F(CacheTest, GetKeyShouldReturnOldestKeyPossible) { KeysCollectionCache cache("test", catalogClient()); KeysCollectionDocument origKey0( 0, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey0.toBSON())); KeysCollectionDocument origKey1( 1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); auto refreshStatus = cache.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); { auto key = refreshStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } auto keyStatus = cache.getKey(LogicalTime(Timestamp(103, 1))); ASSERT_OK(keyStatus.getStatus()); { auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } }
TEST_F(CacheTest, GetKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) { KeysCollectionCache cache("test", catalogClient()); KeysCollectionDocument origKey1( 1, "test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); auto refreshStatus = cache.refresh(operationContext()); ASSERT_OK(refreshStatus.getStatus()); { auto key = refreshStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ("test", key.getPurpose()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } auto status = cache.getKey(LogicalTime(Timestamp(110, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, status.getStatus()); }
void ClusterCommandTestFixture::testSnapshotReadConcernWithAfterClusterTime( BSONObj targetedCmd, BSONObj scatterGatherCmd) { auto containsAtClusterTimeNoAfterClusterTime = [&](const executor::RemoteCommandRequest& request) { ASSERT(!request.cmdObj["readConcern"]["atClusterTime"].eoo()); ASSERT(request.cmdObj["readConcern"]["afterClusterTime"].eoo()); // The chosen atClusterTime should be greater than or equal to the request's // afterClusterTime. ASSERT_GTE(LogicalTime(request.cmdObj["readConcern"]["atClusterTime"].timestamp()), LogicalTime(kAfterClusterTime)); }; // Target one shard. runCommandInspectRequests( _makeCmd(targetedCmd, true), containsAtClusterTimeNoAfterClusterTime, true); // Target all shards. if (!scatterGatherCmd.isEmpty()) { runCommandInspectRequests( _makeCmd(scatterGatherCmd, true), containsAtClusterTimeNoAfterClusterTime, false); } }
TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const Key& key) { stdx::lock_guard<stdx::mutex> lk(_cacheMutex); auto timeCeil = LogicalTime(Timestamp(time.asTimestamp().asULL() | kRangeMask)); if (_cache && _cache->hasProof(timeCeil, key)) { return _cache->_proof; } auto unsignedTimeArray = timeCeil.toUnsignedArray(); // update cache _cache = CacheEntry(SHA1Block::computeHmac( key.data(), key.size(), unsignedTimeArray.data(), unsignedTimeArray.size()), timeCeil, key); return _cache->_proof; }
StatusWith<LogicalTimeMetadata> LogicalTimeMetadata::readFromMetadata( const BSONElement& metadataElem) { if (metadataElem.eoo()) { return LogicalTimeMetadata(); } const auto& obj = metadataElem.Obj(); Timestamp ts; Status status = bsonExtractTimestampField(obj, kClusterTimeFieldName, &ts); if (!status.isOK()) { return status; } BSONElement signatureElem; status = bsonExtractTypedField(obj, kSignatureFieldName, Object, &signatureElem); if (!status.isOK()) { return status; } const auto& signatureObj = signatureElem.Obj(); // Extract BinData type signature hash and construct a SHA1Block instance from it. BSONElement hashElem; status = bsonExtractTypedField(signatureObj, kSignatureHashFieldName, BinData, &hashElem); if (!status.isOK()) { return status; } int hashLength = 0; auto rawBinSignature = hashElem.binData(hashLength); BSONBinData proofBinData(rawBinSignature, hashLength, hashElem.binDataType()); auto proofStatus = SHA1Block::fromBinData(proofBinData); if (!proofStatus.isOK()) { return proofStatus.getStatus(); } long long keyId; status = bsonExtractIntegerField(signatureObj, kSignatureKeyIdFieldName, &keyId); if (!status.isOK()) { return status; } return LogicalTimeMetadata( SignedLogicalTime(LogicalTime(ts), std::move(proofStatus.getValue()), keyId)); }
TEST_F(KeyGeneratorUpdateTest, ShouldNotCreateKeysWithDisableKeyGenerationFailPoint) { KeyGenerator generator("dummy", catalogClient(), Seconds(5)); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); { FailPointEnableBlock failKeyGenerationBlock("disableKeyGeneration"); auto generateStatus = generator.generateNewKeysIfNeeded(operationContext()); ASSERT_EQ(ErrorCodes::FailPointEnabled, generateStatus); } auto allKeys = getKeys(operationContext()); ASSERT_EQ(0U, allKeys.size()); }
Status CommittedOpTimeMetadataHook::readReplyMetadata(OperationContext* opCtx, StringData replySource, const BSONObj& metadataObj) { auto lastCommittedOpTimeField = metadataObj[kLastCommittedOpTimeFieldName]; if (lastCommittedOpTimeField.eoo()) { return Status::OK(); } invariant(lastCommittedOpTimeField.type() == BSONType::bsonTimestamp); // replySource is the HostAndPort of a single server, except when this hook is triggered // through DBClientReplicaSet, when it will be a replica set connection string. The // shardRegistry stores connection strings and hosts in its lookup table, in addition to shard // ids, so replySource can be correctly passed on to ShardRegistry::getShardNoReload. auto shard = Grid::get(_service)->shardRegistry()->getShardNoReload(replySource.toString()); if (shard) { shard->updateLastCommittedOpTime(LogicalTime(lastCommittedOpTimeField.timestamp())); } return Status::OK(); }
Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) { invariant(isEmpty()); // only legal to call on uninitialized object. if (readConcernElem.eoo()) { return Status::OK(); } dassert(readConcernElem.fieldNameStringData() == kReadConcernFieldName); if (readConcernElem.type() != Object) { return Status(ErrorCodes::FailedToParse, str::stream() << kReadConcernFieldName << " field should be an object"); } BSONObj readConcernObj = readConcernElem.Obj(); for (auto&& field : readConcernObj) { auto fieldName = field.fieldNameStringData(); if (fieldName == kAfterOpTimeFieldName) { OpTime opTime; // TODO pass field in rather than scanning again. auto opTimeStatus = bsonExtractOpTimeField(readConcernObj, kAfterOpTimeFieldName, &opTime); if (!opTimeStatus.isOK()) { return opTimeStatus; } _opTime = opTime; } else if (fieldName == kAfterClusterTimeFieldName) { Timestamp afterClusterTime; auto afterClusterTimeStatus = bsonExtractTimestampField( readConcernObj, kAfterClusterTimeFieldName, &afterClusterTime); if (!afterClusterTimeStatus.isOK()) { return afterClusterTimeStatus; } _afterClusterTime = LogicalTime(afterClusterTime); } else if (fieldName == kAtClusterTimeFieldName) { Timestamp atClusterTime; auto atClusterTimeStatus = bsonExtractTimestampField(readConcernObj, kAtClusterTimeFieldName, &atClusterTime); if (!atClusterTimeStatus.isOK()) { return atClusterTimeStatus; } _atClusterTime = LogicalTime(atClusterTime); } else if (fieldName == kLevelFieldName) { std::string levelString; // TODO pass field in rather than scanning again. auto readCommittedStatus = bsonExtractStringField(readConcernObj, kLevelFieldName, &levelString); if (!readCommittedStatus.isOK()) { return readCommittedStatus; } if (levelString == kLocalReadConcernStr) { _level = ReadConcernLevel::kLocalReadConcern; } else if (levelString == kMajorityReadConcernStr) { _level = ReadConcernLevel::kMajorityReadConcern; } else if (levelString == kLinearizableReadConcernStr) { _level = ReadConcernLevel::kLinearizableReadConcern; } else if (levelString == kAvailableReadConcernStr) { _level = ReadConcernLevel::kAvailableReadConcern; } else if (levelString == kSnapshotReadConcernStr) { _level = ReadConcernLevel::kSnapshotReadConcern; } else { return Status(ErrorCodes::FailedToParse, str::stream() << kReadConcernFieldName << '.' << kLevelFieldName << " must be either 'local', 'majority', " "'linearizable', 'available', or 'snapshot'"); } _originalLevel = _level; } else { return Status(ErrorCodes::InvalidOptions, str::stream() << "Unrecognized option in " << kReadConcernFieldName << ": " << fieldName); } } if (_afterClusterTime && _opTime) { return Status(ErrorCodes::InvalidOptions, str::stream() << "Can not specify both " << kAfterClusterTimeFieldName << " and " << kAfterOpTimeFieldName); } if (_afterClusterTime && _atClusterTime) { return Status(ErrorCodes::InvalidOptions, str::stream() << "Can not specify both " << kAfterClusterTimeFieldName << " and " << kAtClusterTimeFieldName); } // Note: 'available' should not be used with after cluster time, as cluster time can wait for // replication whereas the premise of 'available' is to avoid waiting. 'linearizable' should not // be used with after cluster time, since linearizable reads are inherently causally consistent. if (_afterClusterTime && getLevel() != ReadConcernLevel::kMajorityReadConcern && getLevel() != ReadConcernLevel::kLocalReadConcern && getLevel() != ReadConcernLevel::kSnapshotReadConcern) { return Status(ErrorCodes::InvalidOptions, str::stream() << kAfterClusterTimeFieldName << " field can be set only if " << kLevelFieldName << " is equal to " << kMajorityReadConcernStr << ", " << kLocalReadConcernStr << ", or " << kSnapshotReadConcernStr); } if (_opTime && getLevel() == ReadConcernLevel::kSnapshotReadConcern) { return Status(ErrorCodes::InvalidOptions, str::stream() << kAfterOpTimeFieldName << " field cannot be set if " << kLevelFieldName << " is equal to " << kSnapshotReadConcernStr); } if (_atClusterTime && getLevel() != ReadConcernLevel::kSnapshotReadConcern) { return Status(ErrorCodes::InvalidOptions, str::stream() << kAtClusterTimeFieldName << " field can be set only if " << kLevelFieldName << " is equal to " << kSnapshotReadConcernStr); } if (_afterClusterTime && _afterClusterTime == LogicalTime::kUninitialized) { return Status(ErrorCodes::InvalidOptions, str::stream() << kAfterClusterTimeFieldName << " cannot be a null timestamp"); } if (_atClusterTime && _atClusterTime == LogicalTime::kUninitialized) { return Status(ErrorCodes::InvalidOptions, str::stream() << kAtClusterTimeFieldName << " cannot be a null timestamp"); } return Status::OK(); }
TEST_F(CacheTest, ErrorsIfCacheIsEmpty) { KeysCollectionCache cache("test", catalogClient()); auto status = cache.getKey(LogicalTime(Timestamp(1, 0))).getStatus(); ASSERT_EQ(ErrorCodes::KeyNotFound, status.code()); ASSERT_FALSE(status.reason().empty()); }