コード例 #1
0
ファイル: dbclient.cpp プロジェクト: RyokoAkizuki/freshcity
    void DBClientWithCommands::reIndex( const string& ns ) {
        list<BSONObj> all;
        auto_ptr<DBClientCursor> i = getIndexes( ns );
        while ( i->more() ) {
            all.push_back( i->next().getOwned() );
        }

        dropIndexes( ns );

        for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
            BSONObj o = *i;
            insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , o );
        }

    }
コード例 #2
0
ファイル: database.cpp プロジェクト: ryannutley/mongo
    Status Database::dropCollection( const StringData& fullns ) {
        LOG(1) << "dropCollection: " << fullns << endl;

        CollectionTemp* collection = getCollectionTemp( fullns );
        if ( !collection ) {
            // collection doesn't exist
            return Status::OK();
        }

        _initForWrites();

        BackgroundOperation::assertNoBgOpInProgForNs( fullns );

        if ( collection->_details->getTotalIndexCount() > 0 ) {
            try {
                string errmsg;
                BSONObjBuilder result;

                if ( !dropIndexes( collection->_details, fullns, "*", errmsg, result, true) ) {
                    warning() << "could not drop collection: " << fullns
                              << " because of " << errmsg << endl;
                    return Status( ErrorCodes::InternalError, errmsg );
                }
            }
            catch( DBException& e ) {
                stringstream ss;
                ss << "drop: dropIndexes for collection failed - consider trying repair ";
                ss << " cause: " << e.what();
                warning() << ss.str() << endl;
                return Status( ErrorCodes::InternalError, ss.str() );
            }
            verify( collection->_details->getTotalIndexCount() == 0 );
        }
        LOG(1) << "\t dropIndexes done" << endl;

        ClientCursor::invalidate( fullns );
        Top::global.collectionDropped( fullns );
        NamespaceDetailsTransient::eraseCollection( fullns.toString() );
        dropNS( fullns.toString() );

        scoped_lock lk( _collectionLock );
        _collections.erase( fullns.toString() );

        return Status::OK();
    }
コード例 #3
0
ファイル: compact.cpp プロジェクト: adi2188/mongo
    bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) { 
        //int les = d->lastExtentSize;

        // this is a big job, so might as well make things tidy before we start just to be nice.
        getDur().commitNow();

        list<DiskLoc> extents;
        for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext ) 
            extents.push_back(L);
        log() << "compact " << extents.size() << " extents" << endl;

        ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );

        // same data, but might perform a little different after compact?
        NamespaceDetailsTransient::get(ns).clearQueryCache();

        int nidx = d->nIndexes;
        scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
        scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
        {
            NamespaceDetails::IndexIterator ii = d->ii(); 
            int x = 0;
            while( ii.more() ) { 
                BSONObjBuilder b;
                IndexDetails& idx = ii.next();
                BSONObj::iterator i(idx.info.obj());
                while( i.more() ) { 
                    BSONElement e = i.next();
                    if( !str::equals(e.fieldName(), "v") && !str::equals(e.fieldName(), "background") ) {
                        b.append(e);
                    }
                }
                BSONObj o = b.obj().getOwned();
                phase1[x].sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), o.getObjectField("key") ) );
                phase1[x].sorter->hintNumObjects( d->stats.nrecords );
                indexSpecs[x++].reset(o);
            }
        }

        log() << "compact orphan deleted lists" << endl;
        for( int i = 0; i < Buckets; i++ ) { 
            d->deletedList[i].writing().Null();
        }

        // before dropping indexes, at least make sure we can allocate one extent!
        uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());

        // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
        log() << "compact dropping indexes" << endl;
        BSONObjBuilder b;
        if( !dropIndexes(d, ns, "*", errmsg, b, true) ) { 
            errmsg = "compact drop indexes failed";
            log() << errmsg << endl;
            return false;
        }

        getDur().commitNow();

        long long skipped = 0;
        int n = 0;
        for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { 
            skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
            pm.hit();
        }

        if( skipped ) {
            result.append("invalidObjects", skipped);
        }

        assert( d->firstExtent.ext()->xprev.isNull() );

        // indexes will do their own progress meter?
        pm.finished();

        // build indexes
        NamespaceString s(ns);
        string si = s.db + ".system.indexes";
        for( int i = 0; i < nidx; i++ ) {
            killCurrentOp.checkForInterrupt(false);
            BSONObj info = indexSpecs[i].info;
            log() << "compact create index " << info["key"].Obj().toString() << endl;
            try {
                precalced = &phase1[i];
                theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize());
            }
            catch(...) { 
                precalced = 0;
                throw;
            }
            precalced = 0;
        }

        return true;
    }
コード例 #4
0
ファイル: database.cpp プロジェクト: DylanTong/mongo
    Status Database::dropCollection( const StringData& fullns ) {
        LOG(1) << "dropCollection: " << fullns << endl;

        Collection* collection = getCollection( fullns );
        if ( !collection ) {
            // collection doesn't exist
            return Status::OK();
        }

        _initForWrites();

        {
            NamespaceString s( fullns );
            verify( s.db() == _name );

            if( s.isSystem() ) {
                if( s.coll() == "system.profile" ) {
                    if ( _profile != 0 )
                        return Status( ErrorCodes::IllegalOperation,
                                       "turn off profiling before dropping system.profile collection" );
                }
                else {
                    return Status( ErrorCodes::IllegalOperation, "can't drop system ns" );
                }
            }
        }

        BackgroundOperation::assertNoBgOpInProgForNs( fullns );

        audit::logDropCollection( currentClient.get(), fullns );

        if ( collection->_details->getTotalIndexCount() > 0 ) {
            try {
                string errmsg;
                BSONObjBuilder result;

                if ( !dropIndexes( collection->_details, fullns, "*", errmsg, result, true) ) {
                    warning() << "could not drop collection: " << fullns
                              << " because of " << errmsg << endl;
                    return Status( ErrorCodes::InternalError, errmsg );
                }
            }
            catch( DBException& e ) {
                stringstream ss;
                ss << "drop: dropIndexes for collection failed - consider trying repair ";
                ss << " cause: " << e.what();
                warning() << ss.str() << endl;
                return Status( ErrorCodes::InternalError, ss.str() );
            }
            verify( collection->_details->getTotalIndexCount() == 0 );
        }
        LOG(1) << "\t dropIndexes done" << endl;

        ClientCursor::invalidate( fullns );
        Top::global.collectionDropped( fullns );

        Status s = _dropNS( fullns );

        _clearCollectionCache( fullns ); // we want to do this always

        if ( !s.isOK() )
            return s;

        DEV {
            // check all index collection entries are gone
            string nstocheck = fullns.toString() + ".$";
            scoped_lock lk( _collectionLock );
            for ( CollectionMap::iterator i = _collections.begin();
                  i != _collections.end();
                  ++i ) {
                string temp = i->first;
                if ( temp.find( nstocheck ) != 0 )
                    continue;
                log() << "after drop, bad cache entries for: "
                      << fullns << " have " << temp;
                verify(0);
            }
        }

        return Status::OK();
    }
コード例 #5
0
    void NamespaceDetails::emptyCappedCollection( const char *ns ) {
        DEV verify( this == nsdetails(ns) );
        massert( 13424, "collection must be capped", isCapped() );
        massert( 13425, "background index build in progress", !_indexBuildsInProgress );

        vector<BSONObj> indexes = Helpers::findAll( Namespace( ns ).getSisterNS( "system.indexes" ) , BSON( "ns" << ns ) );
        for ( unsigned i=0; i<indexes.size(); i++ ) {
            indexes[i] = indexes[i].copy();
        }

        if ( _nIndexes ) {
            string errmsg;
            BSONObjBuilder note;
            bool res = dropIndexes( this , ns , "*" , errmsg , note , true );
            massert( 13426 , str::stream() << "failed during index drop: " << errmsg , res );
        }

        // Clear all references to this namespace.
        ClientCursor::invalidate( ns );
        NamespaceDetailsTransient::resetCollection( ns );

        // Get a writeable reference to 'this' and reset all pertinent
        // attributes.
        NamespaceDetails *t = writingWithoutExtra();

        t->cappedLastDelRecLastExtent() = DiskLoc();
        t->cappedListOfAllDeletedRecords() = DiskLoc();

        // preserve firstExtent/lastExtent
        t->_capExtent = _firstExtent;
        t->_stats.datasize = _stats.nrecords = 0;
        // lastExtentSize preserve
        // nIndexes preserve 0
        // capped preserve true
        // max preserve
        t->_paddingFactor = 1.0;
        t->_systemFlags = 0;
        t->_capFirstNewRecord = DiskLoc();
        t->_capFirstNewRecord.setInvalid();
        t->cappedLastDelRecLastExtent().setInvalid();
        // dataFileVersion preserve
        // indexFileVersion preserve
        t->_multiKeyIndexBits = 0;
        t->_reservedA = 0;
        t->_extraOffset = 0;
        // indexBuildInProgress preserve 0
        memset(t->_reserved, 0, sizeof(t->_reserved));

        // Reset all existing extents and recreate the deleted list.
        for( DiskLoc ext = _firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
            DiskLoc prev = ext.ext()->xprev;
            DiskLoc next = ext.ext()->xnext;
            DiskLoc empty = ext.ext()->reuse( ns, true );
            ext.ext()->xprev.writing() = prev;
            ext.ext()->xnext.writing() = next;
            addDeletedRec( empty.drec(), empty );
        }

        for ( unsigned i=0; i<indexes.size(); i++ ) {
            theDataFileMgr.insertWithObjMod(Namespace( ns ).getSisterNS( "system.indexes" ).c_str(),
                                            indexes[i],
                                            false,
                                            true);
        }
        
    }
コード例 #6
0
ファイル: compact.cpp プロジェクト: nosqldb/mongo
    bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) { 
        // this is a big job, so might as well make things tidy before we start just to be nice.
        getDur().commitIfNeeded();

        list<DiskLoc> extents;
        for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext ) 
            extents.push_back(L);
        log() << "compact " << extents.size() << " extents" << endl;

        ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );

        // same data, but might perform a little different after compact?
        NamespaceDetailsTransient::get(ns).clearQueryCache();

        int nidx = d->nIndexes;
        scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
        scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
        {
            NamespaceDetails::IndexIterator ii = d->ii(); 
            // For each existing index...
            for( int idxNo = 0; ii.more(); ++idxNo ) {
                // Build a new index spec based on the old index spec.
                BSONObjBuilder b;
                BSONObj::iterator i(ii.next().info.obj());
                while( i.more() ) { 
                    BSONElement e = i.next();
                    if ( str::equals( e.fieldName(), "v" ) ) {
                        // Drop any preexisting index version spec.  The default index version will
                        // be used instead for the new index.
                        continue;
                    }
                    if ( str::equals( e.fieldName(), "background" ) ) {
                        // Create the new index in the foreground.
                        continue;
                    }
                    // Pass the element through to the new index spec.
                    b.append(e);
                }
                // Add the new index spec to 'indexSpecs'.
                BSONObj o = b.obj().getOwned();
                indexSpecs[idxNo].reset(o);
                // Create an external sorter.
                phase1[idxNo].sorter.reset
                        ( new BSONObjExternalSorter
                           // Use the default index interface, since the new index will be created
                           // with the default index version.
                         ( IndexInterface::defaultVersion(),
                           o.getObjectField("key") ) );
                phase1[idxNo].sorter->hintNumObjects( d->stats.nrecords );
            }
        }

        log() << "compact orphan deleted lists" << endl;
        for( int i = 0; i < Buckets; i++ ) { 
            d->deletedList[i].writing().Null();
        }



        // Start over from scratch with our extent sizing and growth
        d->lastExtentSize=0;

        // before dropping indexes, at least make sure we can allocate one extent!
        uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());

        // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
        log() << "compact dropping indexes" << endl;
        BSONObjBuilder b;
        if( !dropIndexes(d, ns, "*", errmsg, b, true) ) { 
            errmsg = "compact drop indexes failed";
            log() << errmsg << endl;
            return false;
        }

        getDur().commitIfNeeded();

        long long skipped = 0;
        int n = 0;

        // reset data size and record counts to 0 for this namespace
        // as we're about to tally them up again for each new extent
        {
            NamespaceDetails::Stats *s = getDur().writing(&d->stats);
            s->datasize = 0;
            s->nrecords = 0;
        }

        for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { 
            skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
            pm.hit();
        }

        if( skipped ) {
            result.append("invalidObjects", skipped);
        }

        verify( d->firstExtent.ext()->xprev.isNull() );

        // indexes will do their own progress meter?
        pm.finished();

        // build indexes
        NamespaceString s(ns);
        string si = s.db + ".system.indexes";
        for( int i = 0; i < nidx; i++ ) {
            killCurrentOp.checkForInterrupt(false);
            BSONObj info = indexSpecs[i].info;
            log() << "compact create index " << info["key"].Obj().toString() << endl;
            try {
                precalced = &phase1[i];
                theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize());
            }
            catch(...) { 
                precalced = 0;
                throw;
            }
            precalced = 0;
        }

        return true;
    }