예제 #1
0
TEST_F(KeyGeneratorUpdateTest, ShouldCreate2KeysFromEmpty) {
    KeyGenerator generator("dummy", catalogClient(), Seconds(5));

    const LogicalTime currentTime(LogicalTime(Timestamp(100, 2)));
    LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime);

    auto generateStatus = generator.generateNewKeysIfNeeded(operationContext());
    ASSERT_OK(generateStatus);

    auto allKeys = getKeys(operationContext());

    ASSERT_EQ(2u, allKeys.size());

    const auto& key1 = allKeys.front();
    ASSERT_EQ(currentTime.asTimestamp().asLL(), key1.getKeyId());
    ASSERT_EQ("dummy", key1.getPurpose());
    ASSERT_EQ(Timestamp(105, 0), key1.getExpiresAt().asTimestamp());

    const auto& key2 = allKeys.back();
    ASSERT_EQ(currentTime.asTimestamp().asLL() + 1, key2.getKeyId());
    ASSERT_EQ("dummy", key2.getPurpose());
    ASSERT_EQ(Timestamp(110, 0), key2.getExpiresAt().asTimestamp());

    ASSERT_NE(key1.getKey(), key2.getKey());
}
예제 #2
0
LogicalTime LogicalClock::reserveTicks(uint64_t nTicks) {

    invariant(nTicks > 0 && nTicks < (1U << 31));

    stdx::lock_guard<stdx::mutex> lock(_mutex);

    LogicalTime clusterTime = _clusterTime;

    const unsigned wallClockSecs =
        durationCount<Seconds>(_service->getFastClockSource()->now().toDurationSinceEpoch());
    unsigned clusterTimeSecs = clusterTime.asTimestamp().getSecs();

    // Synchronize clusterTime with wall clock time, if clusterTime was behind in seconds.
    if (clusterTimeSecs < wallClockSecs) {
        clusterTime = LogicalTime(Timestamp(wallClockSecs, 0));
    }
    // If reserving 'nTicks' would force the cluster timestamp's increment field to exceed (2^31-1),
    // overflow by moving to the next second. We use the signed integer maximum as an overflow point
    // in order to preserve compatibility with potentially signed or unsigned integral Timestamp
    // increment types. It is also unlikely to apply more than 2^31 oplog entries in the span of one
    // second.
    else if (clusterTime.asTimestamp().getInc() >= ((1U << 31) - nTicks)) {

        log() << "Exceeded maximum allowable increment value within one second. Moving clusterTime "
                 "forward to the next second.";

        // Move time forward to the next second
        clusterTime = LogicalTime(Timestamp(clusterTime.asTimestamp().getSecs() + 1, 0));
    }

    // Save the next cluster time.
    clusterTime.addTicks(1);
    _clusterTime = clusterTime;

    // Add the rest of the requested ticks if needed.
    if (nTicks > 1) {
        _clusterTime.addTicks(nTicks - 1);
    }

    return clusterTime;
}
TimeProofService::TimeProof TimeProofService::getProof(LogicalTime time, const Key& key) {
    stdx::lock_guard<stdx::mutex> lk(_cacheMutex);
    auto timeCeil = LogicalTime(Timestamp(time.asTimestamp().asULL() | kRangeMask));
    if (_cache && _cache->hasProof(timeCeil, key)) {
        return _cache->_proof;
    }

    auto unsignedTimeArray = timeCeil.toUnsignedArray();
    // update cache
    _cache =
        CacheEntry(SHA1Block::computeHmac(
                       key.data(), key.size(), unsignedTimeArray.data(), unsignedTimeArray.size()),
                   timeCeil,
                   key);
    return _cache->_proof;
}
예제 #4
0
Status LogicalClock::_passesRateLimiter_inlock(LogicalTime newTime) {
    const unsigned wallClockSecs =
        durationCount<Seconds>(_service->getFastClockSource()->now().toDurationSinceEpoch());
    auto maxAcceptableDrift = static_cast<const unsigned>(maxAcceptableLogicalClockDrift);
    auto newTimeSecs = newTime.asTimestamp().getSecs();

    // Both values are unsigned, so compare them first to avoid wrap-around.
    if ((newTimeSecs > wallClockSecs) && (newTimeSecs - wallClockSecs) > maxAcceptableDrift) {
        return Status(ErrorCodes::ClusterTimeFailsRateLimiter,
                      str::stream() << "New cluster time, " << newTimeSecs
                                    << ", is too far from this node's wall clock time, "
                                    << wallClockSecs
                                    << ".");
    }

    return Status::OK();
}
StatusWith<KeysCollectionDocument> KeysCollectionManagerDirect::getKeyForSigning(
    OperationContext* opCtx, const LogicalTime& forThisTime) {
    // Search through the cache for active keys.
    {
        stdx::lock_guard<stdx::mutex> lk(_mutex);
        for (auto& it : _cache) {
            auto keyDoc = it.second;
            auto expiration = keyDoc.getExpiresAt();
            if (expiration > forThisTime) {
                return keyDoc;
            }
        }
    }

    // Query admin.system.keys for active keys.
    DBDirectClient client(opCtx);

    BSONObjBuilder queryBuilder;
    queryBuilder.append("purpose", _purpose);
    queryBuilder.append("expiresAt", BSON("$gt" << forThisTime.asTimestamp()));

    auto cursor = client.query(KeysCollectionDocument::ConfigNS, queryBuilder.obj());

    if (!cursor->more()) {
        return {ErrorCodes::KeyNotFound, "Could not find an active key for signing"};
    }

    // Parse and return the key.
    auto res = KeysCollectionDocument::fromBSON(cursor->next());
    if (!res.isOK()) {
        return res.getStatus();
    }

    auto keyDoc = res.getValue();

    // Add to our cache.
    {
        stdx::lock_guard<stdx::mutex> lk(_mutex);
        _cache.add(keyDoc.getKeyId(), keyDoc);
    }

    return keyDoc;
}
StatusWith<KeysCollectionDocument> KeysCollectionManagerDirect::getKeyForValidation(
    OperationContext* opCtx, long long keyId, const LogicalTime& forThisTime) {
    // First, attempt to find the key in our cache.
    {
        stdx::lock_guard<stdx::mutex> lk(_mutex);
        auto it = _cache.find(keyId);
        if (it != _cache.end()) {
            return it->second;
        }
    }

    // Query admin.system.keys for an active key with this id.
    DBDirectClient client(opCtx);

    BSONObjBuilder queryBuilder;
    queryBuilder.append("purpose", _purpose);
    queryBuilder.append("_id", keyId);
    queryBuilder.append("expiresAt", BSON("$gt" << forThisTime.asTimestamp()));

    auto cursor = client.query(KeysCollectionDocument::ConfigNS, queryBuilder.obj());

    if (!cursor->more()) {
        return {ErrorCodes::KeyNotFound, "Could not find matching key"};
    }

    // Parse the key.
    auto res = KeysCollectionDocument::fromBSON(cursor->next());
    if (!res.isOK()) {
        return res.getStatus();
    }

    // Add to our cache.
    {
        stdx::lock_guard<stdx::mutex> lk(_mutex);
        _cache.add(keyId, res.getValue());
    }

    return res.getValue();
}
StatusWith<std::vector<KeysCollectionDocument>> ShardingCatalogClientImpl::getNewKeys(
    OperationContext* opCtx,
    StringData purpose,
    const LogicalTime& newerThanThis,
    repl::ReadConcernLevel readConcernLevel) {
    auto config = Grid::get(opCtx)->shardRegistry()->getConfigShard();

    BSONObjBuilder queryBuilder;
    queryBuilder.append("purpose", purpose);
    queryBuilder.append("expiresAt", BSON("$gt" << newerThanThis.asTimestamp()));

    auto findStatus = config->exhaustiveFindOnConfig(opCtx,
                                                     kConfigReadSelector,
                                                     readConcernLevel,
                                                     KeysCollectionDocument::ConfigNS,
                                                     queryBuilder.obj(),
                                                     BSON("expiresAt" << 1),
                                                     boost::none);

    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto& keyDocs = findStatus.getValue().docs;
    std::vector<KeysCollectionDocument> keys;
    for (auto&& keyDoc : keyDocs) {
        auto parseStatus = KeysCollectionDocument::fromBSON(keyDoc);
        if (!parseStatus.isOK()) {
            return parseStatus.getStatus();
        }

        keys.push_back(std::move(parseStatus.getValue()));
    }

    return keys;
}