Exemplo n.º 1
0
ChunkPtr ChunkManager::findIntersectingChunk(const BSONObj& shardKey) const {
    {
        BSONObj chunkMin;
        ChunkPtr chunk;
        {
            ChunkMap::const_iterator it = _chunkMap.upper_bound(shardKey);
            if (it != _chunkMap.end()) {
                chunkMin = it->first;
                chunk = it->second;
            }
        }

        if (chunk) {
            if (chunk->containsKey(shardKey)) {
                return chunk;
            }

            log() << chunkMin;
            log() << *chunk;
            log() << shardKey;

            reload();
            msgasserted(13141, "Chunk map pointed to incorrect chunk");
        }
    }

    msgasserted(8070,
                str::stream() << "couldn't find a chunk intersecting: " << shardKey
                              << " for ns: " << _ns << " at version: " << _version.toString()
                              << ", number of chunks: " << _chunkMap.size());
}
Exemplo n.º 2
0
BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
    OplogReader missingObjReader;  // why are we using OplogReader to run a non-oplog query?
    const char* ns = o.getStringField("ns");

    // capped collections
    Collection* collection = db->getCollection(ns);
    if (collection && collection->isCapped()) {
        log() << "missing doc, but this is okay for a capped collection (" << ns << ")";
        return BSONObj();
    }

    const int retryMax = 3;
    for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
        if (retryCount != 1) {
            // if we are retrying, sleep a bit to let the network possibly recover
            sleepsecs(retryCount * retryCount);
        }
        try {
            bool ok = missingObjReader.connect(HostAndPort(_hostname));
            if (!ok) {
                warning() << "network problem detected while connecting to the "
                          << "sync source, attempt " << retryCount << " of " << retryMax << endl;
                continue;  // try again
            }
        } catch (const SocketException&) {
            warning() << "network problem detected while connecting to the "
                      << "sync source, attempt " << retryCount << " of " << retryMax << endl;
            continue;  // try again
        }

        // get _id from oplog entry to create query to fetch document.
        const BSONElement opElem = o.getField("op");
        const bool isUpdate = !opElem.eoo() && opElem.str() == "u";
        const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"];

        if (idElem.eoo()) {
            severe() << "cannot fetch missing document without _id field: " << o.toString();
            fassertFailedNoTrace(28742);
        }

        BSONObj query = BSONObjBuilder().append(idElem).obj();
        BSONObj missingObj;
        try {
            missingObj = missingObjReader.findOne(ns, query);
        } catch (const SocketException&) {
            warning() << "network problem detected while fetching a missing document from the "
                      << "sync source, attempt " << retryCount << " of " << retryMax << endl;
            continue;  // try again
        } catch (DBException& e) {
            error() << "assertion fetching missing object: " << e.what() << endl;
            throw;
        }

        // success!
        return missingObj;
    }
    // retry count exceeded
    msgasserted(15916,
                str::stream() << "Can no longer connect to initial sync source: " << _hostname);
}
Exemplo n.º 3
0
    DiskLoc MmapV1ExtentManager::_createExtent( OperationContext* txn,
                                                int size,
                                                bool enforceQuota ) {
        size = quantizeExtentSize( size );

        if ( size > maxSize() )
            size = maxSize();

        verify( size < DataFile::maxSize() );

        for ( int i = numFiles() - 1; i >= 0; i-- ) {
            DataFile* f = _getOpenFile(i);
            invariant(f);

            if ( f->getHeader()->unusedLength >= size ) {
                return _createExtentInFile( txn, i, f, size, enforceQuota );
            }
        }

        _checkQuota( enforceQuota, numFiles() );

        // no space in an existing file
        // allocate files until we either get one big enough or hit maxSize
        for ( int i = 0; i < 8; i++ ) {
            DataFile* f = _addAFile( txn, size, false );

            if ( f->getHeader()->unusedLength >= size ) {
                return _createExtentInFile( txn, numFiles() - 1, f, size, enforceQuota );
            }

        }

        // callers don't check for null return code, so assert
        msgasserted(14810, "couldn't allocate space for a new extent" );
    }
Exemplo n.º 4
0
void LogFile::synchronousAppend(const void* _buf, size_t _len) {
    const size_t BlockSize = 8 * 1024 * 1024;
    verify(_fd);
    verify(_len % minDirectIOSizeBytes == 0);
    const char* buf = (const char*)_buf;
    size_t left = _len;
    while (left) {
        size_t toWrite = std::min(left, BlockSize);
        DWORD written;
        if (!WriteFile(_fd, buf, toWrite, &written, NULL)) {
            DWORD e = GetLastError();
            if (e == 87)
                msgasserted(13519, "error 87 appending to file - invalid parameter");
            else
                uasserted(13517,
                          str::stream() << "error appending to file " << _name << ' ' << _len << ' '
                                        << toWrite
                                        << ' '
                                        << errnoWithDescription(e));
        } else {
            dassert(written == toWrite);
        }
        left -= written;
        buf += written;
    }
}
Exemplo n.º 5
0
    // Open the dictionary. Creates it if necessary.
    bool IndexDetails::open(const bool may_create) {
        const string dname = indexNamespace();
        if (may_create) {
            addNewNamespaceToCatalog(dname);
        }

        TOKULOG(1) << "Opening IndexDetails " << dname << endl;
        try {
            _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create,
                                              _info["background"].trueValue()));
            return true;
        } catch (storage::Dictionary::NeedsCreate) {
            if (cc().upgradingSystemUsers() &&
                isSystemUsersCollection(parentNS()) &&
                keyPattern() == oldSystemUsersKeyPattern) {
                // We're upgrading the system.users collection, and we are missing the old index.
                // That's ok, we'll signal the caller about this by returning a NULL pointer from
                // IndexDetails::make.  See #673
                return false;
            }
            // Unlike for NamespaceIndex, this dictionary must exist on disk if we think it should
            // exist.  This error only gets thrown if may_create is false, which happens when we're
            // trying to open a collection for which we have serialized info.  Therefore, this is a
            // fatal non-user error.
            msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname
                               << " should exist, but we got ENOENT");
        }
    }
Exemplo n.º 6
0
    Extent* ExtentManager::createExtent(const char *ns, int size, bool newCapped, bool enforceQuota ) {
        size = quantizeExtentSize( size );

        for ( int i = numFiles() - 1; i >= 0; i-- ) {
            DataFile* f = getFile( i );
            if ( f->getHeader()->unusedLength >= size ) {
                return _createExtentInFile( i, f, ns, size, newCapped, enforceQuota );
            }
        }

        // no space in an existing file
        // allocate files until we either get one big enough or hit maxSize
        for ( int i = 0; i < 8; i++ ) {
            DataFile* f = addAFile( size, false );

            if ( f->getHeader()->unusedLength >= size ||
                 f->getHeader()->fileLength >= DataFile::maxSize() ) {
                return _createExtentInFile( numFiles() - 1, f, ns, size, newCapped, enforceQuota );
            }

        }

        // callers don't check for null return code, so assert
        msgasserted(14810, "couldn't allocate space for a new extent" );
    }
Exemplo n.º 7
0
Arquivo: file.cpp Projeto: ANTco/mongo
 void File::read(fileofs o, char* data, unsigned len) {
     LARGE_INTEGER li;
     li.QuadPart = o;
     if (SetFilePointerEx(_handle, li, NULL, FILE_BEGIN) == 0) {
         _bad = true;
         DWORD dosError = GetLastError();
         log() << "In File::read(), SetFilePointerEx for '" << _name
               << "' tried to set the file pointer to " << o
               << " but failed with " << errnoWithDescription(dosError) << std::endl;
         return;
     }
     DWORD bytesRead;
     if (!ReadFile(_handle, data, len, &bytesRead, 0)) {
         _bad = true;
         DWORD dosError = GetLastError();
         log() << "In File::read(), ReadFile for '" << _name
               << "' failed with " << errnoWithDescription(dosError) << std::endl;
     }
     else if (bytesRead != len) {
         _bad = true;
         msgasserted(10438,
                     mongoutils::str::stream() << "In File::read(), ReadFile for '" << _name
                                               << "' read " << bytesRead
                                               << " bytes while trying to read " << len
                                               << " bytes starting at offset " << o
                                               << ", truncated file?");
     }
 }
Exemplo n.º 8
0
    // Open the dictionary. Creates it if necessary.
    bool IndexDetailsBase::open(const bool may_create, const bool use_memcmp_magic) {
        const string dname = indexNamespace();

        TOKULOG(1) << "Opening IndexDetails " << dname << endl;
        try {
            // We use the memcmp magic API only for single-key, ascending _id indexes,
            // because the _id field is always unique (and therefore we can simply
            // compare the OID fields if they exist and that will be sufficient)
            if (use_memcmp_magic) {
                verify(_unique);
            }
            _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create,
                                              _info["background"].trueValue(), use_memcmp_magic));
            return true;
        } catch (storage::Dictionary::NeedsCreate) {
            if (cc().upgradingSystemUsers() &&
                isSystemUsersCollection(parentNS()) &&
                keyPattern() == oldSystemUsersKeyPattern) {
                // We're upgrading the system.users collection, and we are missing the old index.
                // That's ok, we'll signal the caller about this by returning a NULL pointer from
                // IndexDetailsBase::make.  See #673
                return false;
            }
            // This dictionary must exist on disk if we think it should exist.
            // This error only gets thrown if may_create is false, which happens when we're
            // trying to open a collection for which we have serialized info.
            // Therefore, this is a fatal non-user error.
            msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname
                               << " should exist, but we got ENOENT");
        }
    }
Exemplo n.º 9
0
// todo : we stop once a datafile dne.
//        if one datafile were missing we should keep going for
//        repair purposes yet we do not.
void Database::openAllFiles() {
    verify(this);
    Status s = _extentManager.init();
    if ( !s.isOK() ) {
        msgasserted( 16966, str::stream() << "_extentManager.init failed: " << s.toString() );
    }
}
Exemplo n.º 10
0
    void LogFile::truncate() {
        verify(_fd != INVALID_HANDLE_VALUE);

        if (!SetEndOfFile(_fd)){
            msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
        }
    }
Exemplo n.º 11
0
 void LockerImpl::assertWriteLocked(const StringData& ns) const {
     if (!isWriteLocked(ns)) {
         dump();
         msgasserted(
             16105, mongoutils::str::stream() << "expected to be write locked for " << ns);
     }
 }
Exemplo n.º 12
0
std::string toUtf8String(const std::wstring& wide) {
    if (wide.size() > boost::integer_traits<int>::const_max)
        throw std::length_error(
            "Wide string cannot be more than INT_MAX characters long.");
    if (wide.size() == 0)
        return "";

    // Calculate necessary buffer size
    int len = ::WideCharToMultiByte(
                  CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
                  NULL, 0, NULL, NULL);

    // Perform actual conversion
    if (len > 0) {
        std::vector<char> buffer(len);
        len = ::WideCharToMultiByte(
                  CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
                  &buffer[0], static_cast<int>(buffer.size()), NULL, NULL);
        if (len > 0) {
            assert(len == static_cast<int>(buffer.size()));
            return std::string(&buffer[0], buffer.size());
        }
    }

    msgasserted( 16091 ,
                 mongoutils::str::stream() << "can't wstring to utf8: " << ::GetLastError() );
}
Exemplo n.º 13
0
 // todo : we stop once a datafile dne.
 //        if one datafile were missing we should keep going for
 //        repair purposes yet we do not.
 void Database::openAllFiles(TransactionExperiment* txn) {
     verify(this);
     Status s = _extentManager->init(txn);
     if ( !s.isOK() ) {
         msgasserted( 16966, str::stream() << "_extentManager.init failed: " << s.toString() );
     }
 }
Exemplo n.º 14
0
 void Lock::assertAtLeastReadLocked(const StringData& ns) { 
     if( !atLeastReadLocked(ns) ) { 
         LockState &ls = lockState();
         log() << "error expected " << ns << " to be locked " << endl;
         ls.dump();
         msgasserted(16104, str::stream() << "expected to be read locked for " << ns);
     }
 }
Exemplo n.º 15
0
 void LockerImpl::assertAtLeastReadLocked(const StringData& ns) const {
     if (!isAtLeastReadLocked(ns)) {
         log() << "error expected " << ns << " to be locked " << std::endl;
         dump();
         msgasserted(
             16104, mongoutils::str::stream() << "expected to be read locked for " << ns);
     }
 }
Exemplo n.º 16
0
Arquivo: sync.cpp Projeto: wjin/mongo
    BSONObj Sync::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) {
        OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query?
        const char *ns = o.getStringField("ns");

        // capped collections
        Collection* collection = db->getCollection(ns);
        if ( collection && collection->isCapped() ) {
            log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl;
            return BSONObj();
        }

        const int retryMax = 3;
        for (int retryCount = 1; retryCount <= retryMax; ++retryCount) {
            if (retryCount != 1) {
                // if we are retrying, sleep a bit to let the network possibly recover
                sleepsecs(retryCount * retryCount);
            }
            try {
                bool ok = missingObjReader.connect(HostAndPort(hn));
                if (!ok) {
                    warning() << "network problem detected while connecting to the "
                              << "sync source, attempt " << retryCount << " of "
                              << retryMax << endl;
                        continue;  // try again
                }
            } 
            catch (const SocketException&) {
                warning() << "network problem detected while connecting to the "
                          << "sync source, attempt " << retryCount << " of "
                          << retryMax << endl;
                continue; // try again
            }

            // might be more than just _id in the update criteria
            BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
            BSONObj missingObj;
            try {
                missingObj = missingObjReader.findOne(ns, query);
            } 
            catch (const SocketException&) {
                warning() << "network problem detected while fetching a missing document from the "
                          << "sync source, attempt " << retryCount << " of "
                          << retryMax << endl;
                continue; // try again
            } 
            catch (DBException& e) {
                log() << "replication assertion fetching missing object: " << e.what() << endl;
                throw;
            }

            // success!
            return missingObj;
        }
        // retry count exceeded
        msgasserted(15916, 
                    str::stream() << "Can no longer connect to initial sync source: " << hn);
    }
Exemplo n.º 17
0
 CollectionMap::~CollectionMap() {
     for (CollectionStringMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it) {
         shared_ptr<Collection> cl = it->second;
         try {
             cl->close();
         }
         catch (DBException &e) {
             // shouldn't throw in destructor
             msgasserted(16779, mongoutils::str::stream() << "caught exception while closing " << (string) it->first << " to close CollectionMap " << _database << ": " << e.what());
         }
     }
     if (_metadb != NULL) {
         TOKULOG(1) << "Closing CollectionMap " << _database << endl;
         const int r = _metadb->close();
         if (r != 0) {
             msgasserted(16920, mongoutils::str::stream() << "failed to close metadb for CollectionMap " << _database);
         }
     }
 }
Exemplo n.º 18
0
 NamespaceIndex::~NamespaceIndex() {
     for (NamespaceDetailsMap::const_iterator it = _namespaces.begin(); it != _namespaces.end(); ++it) {
         shared_ptr<NamespaceDetails> d = it->second;
         try {
             d->close();
         }
         catch (DBException &e) {
             // shouldn't throw in destructor
             msgasserted(16779, mongoutils::str::stream() << "caught exception while closing " << (string) it->first << " to close NamespaceIndex " << _database << ": " << e.what());
         }
     }
     if (_nsdb != NULL) {
         TOKULOG(1) << "Closing NamespaceIndex " << _database << endl;
         const int r = _nsdb->close();
         if (r != 0) {
             msgasserted(16920, mongoutils::str::stream() << "failed to close nsdb for NamespaceIndex " << _database);
         }
     }
 }
Exemplo n.º 19
0
    void LogFile::truncate() {
        verify(_fd >= 0);

        BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
        const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
        if (ftruncate(_fd, pos) != 0){
            msgasserted(15873, "Couldn't truncate file: " + errnoWithDescription());
        }

        fsync(_fd);
    }
Exemplo n.º 20
0
    void ShardingState::gotShardName( const string& name ) {
        if ( setShardName( name ) )
            return;

        string clientAddr = cc().clientAddress(true);
        stringstream ss;

        // Same error as above, to match for reporting
        ss << "remote client " << clientAddr << " tried to initialize this host as shard " << name
           << ", but shard name was previously initialized as " << _shardName;
        msgasserted( 13298 , ss.str() );
    }
Exemplo n.º 21
0
void ChunkManager::createFirstChunks(OperationContext* txn,
                                     const ShardId& primaryShardId,
                                     const vector<BSONObj>* initPoints,
                                     const set<ShardId>* initShardIds) {
    // TODO distlock?
    // TODO: Race condition if we shard the collection and insert data while we split across
    // the non-primary shard.

    vector<BSONObj> splitPoints;
    vector<ShardId> shardIds;
    calcInitSplitsAndShards(txn, primaryShardId, initPoints, initShardIds, &splitPoints, &shardIds);


    // this is the first chunk; start the versioning from scratch
    ChunkVersion version;
    version.incEpoch();
    version.incMajor();

    log() << "going to create " << splitPoints.size() + 1 << " chunk(s) for: " << _ns
          << " using new epoch " << version.epoch();

    for (unsigned i = 0; i <= splitPoints.size(); i++) {
        BSONObj min = i == 0 ? _keyPattern.getKeyPattern().globalMin() : splitPoints[i - 1];
        BSONObj max =
            i < splitPoints.size() ? splitPoints[i] : _keyPattern.getKeyPattern().globalMax();

        Chunk temp(this, min, max, shardIds[i % shardIds.size()], version);

        BSONObjBuilder chunkBuilder;
        temp.serialize(chunkBuilder);

        BSONObj chunkObj = chunkBuilder.obj();

        Status result = grid.catalogManager(txn)->update(txn,
                                                         ChunkType::ConfigNS,
                                                         BSON(ChunkType::name(temp.genID())),
                                                         chunkObj,
                                                         true,
                                                         false,
                                                         NULL);

        version.incMinor();

        if (!result.isOK()) {
            string ss = str::stream()
                << "creating first chunks failed. result: " << result.reason();
            error() << ss;
            msgasserted(15903, ss);
        }
    }

    _version = ChunkVersion(0, 0, version.epoch());
}
Exemplo n.º 22
0
    vector<string> getMyAddrs() {
        vector<string> out;
        ifaddrs * addrs;
        
        if (!serverGlobalParams.bind_ip.empty()) {
            boost::split(out, serverGlobalParams.bind_ip, boost::is_any_of(", "));
            return out;
        }

        int status = getifaddrs(&addrs);
        massert(13469, "getifaddrs failure: " + errnoWithDescription(errno), status == 0);

        // based on example code from linux getifaddrs manpage
        for (ifaddrs * addr = addrs; addr != NULL; addr = addr->ifa_next) {
            if ( addr->ifa_addr == NULL ) continue;
            int family = addr->ifa_addr->sa_family;
            char host[NI_MAXHOST];

            if (family == AF_INET || family == AF_INET6) {
                status = getnameinfo(addr->ifa_addr,
                                     (family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)),
                                     host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
                if ( status != 0 ) {
                    freeifaddrs( addrs );
                    addrs = NULL;
                    msgasserted( 13470, string("getnameinfo() failed: ") + gai_strerror(status) );
                }

                out.push_back(host);
            }

        }

        freeifaddrs( addrs );
        addrs = NULL;

        if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
            LogstreamBuilder builder(logger::globalLogDomain(),
                                     getThreadName(),
                                     logger::LogSeverity::Debug(1));
            builder << "getMyAddrs():";
            for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
                builder << " [" << *it << ']';
            }
            builder << endl;
        }

        return out;
    }
Exemplo n.º 23
0
void DocumentSourceSort::populate() {
    if (_mergingPresorted) {
        typedef DocumentSourceMergeCursors DSCursors;
        if (DSCursors* castedSource = dynamic_cast<DSCursors*>(pSource)) {
            populateFromCursors(castedSource->getCursors());
        } else {
            msgasserted(17196, "can only mergePresorted from MergeCursors");
        }
    } else {
        while (boost::optional<Document> next = pSource->getNext()) {
            loadDocument(std::move(*next));
        }
        loadingDone();
    }
}
Exemplo n.º 24
0
void File::read(fileofs o, char* data, unsigned len) {
    ssize_t bytesRead = ::pread(_fd, data, len, o);
    if (bytesRead == -1) {
        _bad = true;
        log() << "In File::read(), ::pread for '" << _name << "' failed with "
              << errnoWithDescription() << std::endl;
    } else if (bytesRead != static_cast<ssize_t>(len)) {
        _bad = true;
        msgasserted(16569,
                    mongoutils::str::stream()
                        << "In File::read(), ::pread for '" << _name << "' read " << bytesRead
                        << " bytes while trying to read " << len << " bytes starting at offset "
                        << o << ", truncated file?");
    }
}
Exemplo n.º 25
0
 bool DurableMappedFile::finishOpening() {
     LOG(3) << "mmf finishOpening " << (void*) _view_write << ' ' << filename() << " len:" << length() << endl;
     if( _view_write ) {
         if (storageGlobalParams.dur) {
             _view_private = createPrivateMap();
             if( _view_private == 0 ) {
                 msgasserted(13636, str::stream() << "file " << filename() << " open/create failed in createPrivateMap (look in log for more information)");
             }
             privateViews.add(_view_private, this); // note that testIntent builds use this, even though it points to view_write then...
         }
         else {
             _view_private = _view_write;
         }
         return true;
     }
     return false;
 }
Exemplo n.º 26
0
TEST(AssertUtils, MassertTypedExtraInfoWorks) {
    try {
        msgasserted(ErrorExtraInfoExample(123), "");
    } catch (const DBException& ex) {
        ASSERT(ex.extraInfo());
        ASSERT(ex.extraInfo<ErrorExtraInfoExample>());
        ASSERT_EQ(ex.extraInfo<ErrorExtraInfoExample>()->data, 123);
    }

    try {
        massert(ErrorExtraInfoExample(123), "", false);
    } catch (const ExceptionFor<ErrorCodes::ForTestingErrorExtraInfo>& ex) {
        ASSERT(ex.extraInfo());
        ASSERT(ex.extraInfo<ErrorExtraInfoExample>());
        ASSERT_EQ(ex.extraInfo<ErrorExtraInfoExample>()->data, 123);
        ASSERT_EQ(ex->data, 123);
    }
}
Exemplo n.º 27
0
void ShardingState::gotShardName( const string& name ) {
    scoped_lock lk(_mutex);
    if ( _shardName.size() == 0 ) {
        // TODO SERVER-2299 verify the name is sound w.r.t IPs
        _shardName = name;
        return;
    }

    if ( _shardName == name )
        return;

    stringstream ss;
    ss << "gotShardName different than what i had before "
       << " before [" << _shardName << "] "
       << " got [" << name << "] "
       ;
    msgasserted( 13298 , ss.str() );
}
Exemplo n.º 28
0
 static string compressionMethodToString(TOKU_COMPRESSION_METHOD c) {
     switch (c) {
         case TOKU_SMALL_COMPRESSION_METHOD:
         case TOKU_LZMA_METHOD:
             return "lzma";
         case TOKU_DEFAULT_COMPRESSION_METHOD:
         case TOKU_FAST_COMPRESSION_METHOD:
         case TOKU_QUICKLZ_METHOD:
             return "quicklz";
         case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
         case TOKU_ZLIB_METHOD:
             return "zlib";
         case TOKU_NO_COMPRESSION:
             return "none";
         default:
             msgasserted(17233, mongoutils::str::stream() << "invalid compression method " << c);
     }
 }
Exemplo n.º 29
0
    /* static */
    BSONObj WriteBackListener::waitFor( const ConnectionIdent& ident, const OID& oid ) {

        Timer t;
        Timer lastMessageTimer;

        while ( t.minutes() < 60 ) {
            {
                scoped_lock lk( _seenWritebacksLock );
                WBStatus s = _seenWritebacks[ident];

                if ( oid < s.id ) {
                    // this means we're waiting for a GLE that already passed.
                    // it should be impossible because once we call GLE, no other
                    // writebacks should happen with that connection id

                    msgasserted( 14041 , str::stream() << "got writeback waitfor for older id " <<
                                 " oid: " << oid << " s.id: " << s.id << " ident: " << ident.toString() );
                }
                else if ( oid == s.id ) {
                    return s.gle;
                }

                // Stay in lock so we can use the status
                if( lastMessageTimer.seconds() > 10 ){

                    warning() << "waiting for writeback " << oid
                              << " from connection " << ident.toString()
                              << " for " << t.seconds() << " secs"
                              << ", currently at id " << s.id << endl;

                    lastMessageTimer.reset();
                }
            }

            sleepmillis( 10 );
        }

        uasserted( 13403 , str::stream() << "didn't get writeback for: " << oid
                                         << " after: " << t.millis() << " ms"
                                         << " from connection " << ident.toString() );

        throw 1; // never gets here
    }
Exemplo n.º 30
0
    void ChunkManager::loadExistingRanges(const ChunkManager* oldManager) {
        int tries = 3;

        while (tries--) {
            ChunkMap chunkMap;
            set<ShardId> shardIds;
            ShardVersionMap shardVersions;

            Timer t;

            bool success = _load(chunkMap, shardIds, &shardVersions, oldManager);
            if (success) {
                log() << "ChunkManager: time to load chunks for " << _ns << ": "
                      << t.millis() << "ms"
                      << " sequenceNumber: " << _sequenceNumber
                      << " version: " << _version.toString()
                      << " based on: "
                            << (oldManager ? oldManager->getVersion().toString() : "(empty)");

                // TODO: Merge into diff code above, so we validate in one place
                if (isChunkMapValid(chunkMap)) {
                    _chunkMap.swap(chunkMap);
                    _shardIds.swap(shardIds);
                    _shardVersions.swap(shardVersions);
                    _chunkRanges.reloadAll(_chunkMap);

                    return;
                }
            }

            if (_chunkMap.size() < 10) {
                _printChunks();
            }

            warning() << "ChunkManager loaded an invalid config for " << _ns << ", trying again";

            sleepmillis(10 * (3 - tries));
        }

        // This will abort construction so we should never have a reference to an invalid config
        msgasserted(13282, str::stream() << "Couldn't load a valid config for " << _ns
                                         << " after 3 attempts. Please try again.");
    }