Esempio n. 1
0
File: db.cpp Progetto: zhuk/mongo
    void testTheDb() {
        stringstream ss;

        setClient("sys.unittest.pdfile");

        /* this is not validly formatted, if you query this namespace bad things will happen */
        theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
        theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);

        BSONObj j1((const char *) &js1);
        deleteObjects("sys.unittest.delete", j1, false);
        theDataFileMgr.insert("sys.unittest.delete", &js1, sizeof(js1));
        deleteObjects("sys.unittest.delete", j1, false);
        updateObjects("sys.unittest.delete", j1, j1, true,ss);
        updateObjects("sys.unittest.delete", j1, j1, false,ss);

        auto_ptr<Cursor> c = theDataFileMgr.findAll("sys.unittest.pdfile");
        while ( c->ok() ) {
            c->_current();
            c->advance();
        }
        out() << endl;

        database = 0;
    }
Esempio n. 2
0
CleanupHandler::CleanupHandler()
    : m_hasStartedDeleting(false)
{
    moveToThread(qApp->thread()); // Ensure that we are acting on the main thread.
    connect(qApp, SIGNAL(aboutToQuit()), SLOT(deleteObjects()), Qt::DirectConnection);
    signal(SIGTERM, &CleanupHandler::sigTermHandler);
}
Esempio n. 3
0
    void getMe(BSONObj& me) {
        const string myname = getHostName();
        Client::Transaction transaction(0);            

        // local.me is an identifier for a server for getLastError w:2+
        if (!Collection::findOne("local.me", BSONObj(), me) ||
                !me.hasField("host") || me["host"].String() != myname) {
        
            // cleaning out local.me requires write
            // lock. This is a rare operation, so it should
            // be ok
            if (!Lock::isWriteLocked("local")) {
                throw RetryWithWriteLock();
            }

            // clean out local.me
            deleteObjects("local.me", BSONObj(), false, false);
        
            // repopulate
            BSONObjBuilder b;
            b.appendOID( "_id" , 0 , true );
            b.append( "host", myname );
            me = b.obj();
            updateObjects("local.me", me, BSONObj(), true, false);
        }
        transaction.commit(0);
    }
Esempio n. 4
0
    void receivedDelete(Message& m, CurOp& op) {
        DbMessage d(m);
        const char *ns = d.getns();
        assert(*ns);
        uassert( 10056 ,  "not master", isMasterNs( ns ) );
        op.debug().str << ns << ' ';
        int flags = d.pullInt();
        bool justOne = flags & RemoveOption_JustOne;
        bool broadcast = flags & RemoveOption_Broadcast;
        assert( d.moreJSObjs() );
        BSONObj pattern = d.nextJsObj();
        {
            string s = pattern.toString();
            op.debug().str << " query: " << s;
            op.setQuery(pattern);
        }        

        writelock lk(ns);
        // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
        if ( ! broadcast & handlePossibleShardedMessage( m , 0 ) )
            return;
        
        Client::Context ctx(ns);
        
        long long n = deleteObjects(ns, pattern, justOne, true);
        lastError.getSafe()->recordDelete( n );
    }
void ActorCriticOnPolicyOnStateTest::testNormalDistributionWithEligibility()
{
  srand(0); // Consistent
  lambda = 0.2;
  criticE = new ATrace<double>(projector->dimension());
  alpha_v = 0.5 / projector->vectorNorm();
  critic = new TDLambda<double>(alpha_v, gamma, lambda, criticE);

  actorMuE = new ATrace<double>(projector->dimension());
  actorSigmaE = new ATrace<double>(projector->dimension());
  actorTraces = new Traces<double>();
  actorTraces->push_back(actorMuE);
  actorTraces->push_back(actorSigmaE);

  alpha_u = 0.1 / projector->vectorNorm();

  policyDistribution = new NormalDistribution<double>(random, problem->getContinuousActions(), 0.5,
      1.0, projector->dimension());

  actor = new ActorLambda<double>(alpha_u, gamma, lambda, policyDistribution, actorTraces);
  control = new ActorCritic<double>(critic, actor, projector, toStateAction);

  agent = new LearnerAgent<double>(control);
  sim = new RLRunner<double>(agent, problem, 1000, 1, 1);
  sim->run();
  double discReward = sim->episodeR / sim->timeStep;
  std::cout << discReward << std::endl;
  ASSERT(discReward > rewardRequired);
  deleteObjects();
}
Esempio n. 6
0
 int removeFromSysIndexes(const char *ns, const char *idxName) {
     string system_indexes = cc().database()->name + ".system.indexes";
     BSONObjBuilder b;
     b.append("ns", ns);
     b.append("name", idxName); // e.g.: { name: "ts_1", ns: "foo.coll" }
     BSONObj cond = b.done();
     return (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
 }
Esempio n. 7
0
extern "C" JNIEXPORT void JNICALL Java_net_osmand_plus_render_NativeOsmandLibrary_deleteSearchResult(JNIEnv* ienv,
		jobject obj, jint searchResult) {
	SearchResult* result = (SearchResult*) searchResult;
	if(result != NULL){
		deleteObjects(result->result);
		delete result;
	}
}
Esempio n. 8
0
void project_close()
//
//  Input:   none
//  Output:  none
//  Purpose: closes a SWMM project.
//
{
    deleteObjects();
    deleteHashTables();
}
Esempio n. 9
0
    Status Database::renameCollection( const StringData& fromNS, const StringData& toNS,
                                       bool stayTemp ) {

        // move data namespace
        Status s = _renameSingleNamespace( fromNS, toNS, stayTemp );
        if ( !s.isOK() )
            return s;

        NamespaceDetails* details = _namespaceIndex.details( toNS );
        verify( details );

        // move index namespaces
        string indexName = _name + ".system.indexes";
        BSONObj oldIndexSpec;
        while( Helpers::findOne( indexName, BSON( "ns" << fromNS ), oldIndexSpec ) ) {
            oldIndexSpec = oldIndexSpec.getOwned();

            BSONObj newIndexSpec;
            {
                BSONObjBuilder b;
                BSONObjIterator i( oldIndexSpec );
                while( i.more() ) {
                    BSONElement e = i.next();
                    if ( strcmp( e.fieldName(), "ns" ) != 0 )
                        b.append( e );
                    else
                        b << "ns" << toNS;
                }
                newIndexSpec = b.obj();
            }

            DiskLoc newIndexSpecLoc = theDataFileMgr.insert( indexName.c_str(),
                                                             newIndexSpec.objdata(),
                                                             newIndexSpec.objsize(),
                                                             false,
                                                             true,
                                                             false );
            int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
            IndexDetails &indexDetails = details->idx(indexI);
            string oldIndexNs = indexDetails.indexNamespace();
            indexDetails.info = newIndexSpecLoc;
            string newIndexNs = indexDetails.indexNamespace();

            Status s = _renameSingleNamespace( oldIndexNs, newIndexNs, false );
            if ( !s.isOK() )
                return s;

            deleteObjects( indexName.c_str(), oldIndexSpec, true, false, true );
        }

        Top::global.collectionDropped( fromNS.toString() );

        return Status::OK();
    }
Esempio n. 10
0
bool Model::loadObject(string name)
{
    filename = name;

    ifstream istr(filename.data());

    if(!istr)
        return false;

    deleteObjects();

    displayList = 0;
    objectLoaded = false;

    GroupObject* defaultObject = new GroupObject;
    GroupObject* currentObject = defaultObject;

    objects.push_back(defaultObject);

    // some playing with strings to get just the file path
    char path[256];
    strcpy(path, filename.data());
    for(int i = (int)filename.length(); path[i] != '\\' && path[i] != '/'; i--)
        path[i] = 0;

    Material* currentMaterial = NULL;
    char line[256];

    // loop through every line of the .obj file
    while(istr.getline(line, 256))
    {
        // some .obj exporters put a space a the end of the line - if so, we trim it here
        string temp = string(line);
        if(temp.size() > 0 && temp.at(temp.length() - 1) == ' ')
            temp.erase(temp.size() - 1, 1);

        istringstream newLine(temp, istringstream::in);

        string firstWord;
        newLine >> firstWord;

        if(firstWord == "#")
        {
            // do nothing for comments
        }
        else if(firstWord == "mtllib")
        {
            string materialFilename;
            while(newLine >> materialFilename)
            {
                loadMaterials(string(path) + materialFilename);
            }
        }
        else if(firstWord == "usemtl")
Esempio n. 11
0
 /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
    call. repair database is the clean solution, but this gives one a lighter weight
    partial option.  see dropIndexes()
 */
 void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
     string system_indexes = cc().database()->name + ".system.indexes";
     BSONObjBuilder b;
     b.append("ns", ns);
     if( idIndex ) {
         b.append("name", BSON( "$ne" << idIndex->indexName().c_str() ));
     }
     BSONObj cond = b.done();
     int n = (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
     if( n ) {
         log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
     }
 }
Esempio n. 12
0
 void DbSet::set( const BSONObj &obj, bool val ) {
     Client::Context c( name_.c_str() );
     if ( val ) {
         try {
             BSONObj k = obj;
             theDataFileMgr.insertWithObjMod( name_.c_str(), k, false );
         } catch ( DBException& ) {
             // dup key - already in set
         }
     } else {
         deleteObjects( name_.c_str(), obj, true, false, false );
     }                        
 }
void ActorCriticOnPolicyOnStateTest::checkDistribution(
    PolicyDistribution<double>* policyDistribution)
{
  alpha_v = 0.1 / projector->vectorNorm();
  alpha_u = 0.01 / projector->vectorNorm();

  critic = new TD<double>(alpha_v, gamma, projector->dimension());
  actor = new Actor<double>(alpha_u, policyDistribution);
  control = new ActorCritic<double>(critic, actor, projector, toStateAction);

  agent = new LearnerAgent<double>(control);
  sim = new RLRunner<double>(agent, problem, 10000, 1, 1);
  sim->run();
  double discReward = sim->episodeR / sim->timeStep;
  std::cout << discReward << std::endl;
  ASSERT(discReward > rewardRequired);
  deleteObjects();
}
ActorCriticOnPolicyOnStateTest::~ActorCriticOnPolicyOnStateTest()
{
  delete random;
  delete problem;
  delete projector;
  delete toStateAction;
  if (criticE)
    delete criticE;
  if (policyDistribution)
    delete policyDistribution;
  if (actorMuE)
    delete actorMuE;
  if (actorSigmaE)
    delete actorSigmaE;
  if (actorTraces)
    delete actorTraces;
  deleteObjects();
}
Esempio n. 15
0
    void receivedDelete(Message& m, CurOp& op) {
        DbMessage d(m);
        const char *ns = d.getns();

        Status status = cc().getAuthorizationManager()->checkAuthForDelete(ns);
        uassert(16542, status.reason(), status.isOK());

        op.debug().ns = ns;
        int flags = d.pullInt();
        verify(d.moreJSObjs());
        BSONObj pattern = d.nextJsObj();

        op.debug().query = pattern;
        op.setQuery(pattern);

        const bool justOne = flags & RemoveOption_JustOne;
        const bool broadcast = flags & RemoveOption_Broadcast;

        OpSettings settings;
        settings.setQueryCursorMode(WRITE_LOCK_CURSOR);
        settings.setJustOne(justOne);
        cc().setOpSettings(settings);

        Client::ShardedOperationScope sc;
        if (!broadcast && sc.handlePossibleShardedMessage(m, 0)) {
            return;
        }

        LOCK_REASON(lockReason, "delete");
        Lock::DBRead lk(ns, lockReason);

        // writelock is used to synchronize stepdowns w/ writes
        uassert(10056, "not master", isMasterNs(ns));

        Client::Context ctx(ns);
        long long n;
        scoped_ptr<Client::AlternateTransactionStack> altStack(opNeedsAltTxn(ns) ? new Client::AlternateTransactionStack : NULL);
        Client::Transaction transaction(DB_SERIALIZABLE);
        n = deleteObjects(ns, pattern, justOne, true);
        transaction.commit();

        lastError.getSafe()->recordDelete( n );
        op.debug().ndeleted = n;
    }
Esempio n. 16
0
    void receivedDelete(Message& m, CurOp& op) {
        DbMessage d(m);
        const char *ns = d.getns();

        Status status = cc().getAuthorizationManager()->checkAuthForDelete(ns);
        uassert(16542, status.reason(), status.isOK());

        op.debug().ns = ns;
        int flags = d.pullInt();
        bool justOne = flags & RemoveOption_JustOne;
        bool broadcast = flags & RemoveOption_Broadcast;
        verify( d.moreJSObjs() );
        BSONObj pattern = d.nextJsObj();
        
        op.debug().query = pattern;
        op.setQuery(pattern);

        PageFaultRetryableSection s;
        while ( 1 ) {
            try {
                Lock::DBWrite lk(ns);
                
                // writelock is used to synchronize stepdowns w/ writes
                uassert( 10056 ,  "not master", isMasterNs( ns ) );
                
                // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
                if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
                    return;
                
                Client::Context ctx(ns);
                
                long long n = deleteObjects(ns, pattern, justOne, true);
                lastError.getSafe()->recordDelete( n );
                op.debug().ndeleted = n;
                break;
            }
            catch ( PageFaultException& e ) {
                LOG(2) << "recordDelete got a PageFaultException" << endl;
                e.touch();
            }
        }
    }
Esempio n. 17
0
 void receivedDelete(Message& m, stringstream &ss) {
     DbMessage d(m);
     const char *ns = d.getns();
     assert(*ns);
     uassert( "not master", isMasterNs( ns ) );
     setClient(ns);
     Client& client = cc();
     client.top.setWrite();
     int flags = d.pullInt();
     bool justOne = flags & 1;
     assert( d.moreJSObjs() );
     BSONObj pattern = d.nextJsObj();
     {
         string s = pattern.toString();
         ss << " query: " << s;
         CurOp& currentOp = *client.curop();
         strncpy(currentOp.query, s.c_str(), sizeof(currentOp.query)-2);
     }        
     int n = deleteObjects(ns, pattern, justOne, true);
     recordDelete( n );
 }
Esempio n. 18
0
    /**
     * Perform a remove operation, which might remove multiple documents.  Dispatches to remove code
     * currently to do most of this.
     *
     * Might fault or error, otherwise populates the result.
     */
    static void multiRemove( const BatchItemRef& removeItem,
                             WriteOpResult* result ) {

        Lock::assertWriteLocked( removeItem.getRequest()->getNS() );

        try {
            long long n = deleteObjects( removeItem.getRequest()->getNS(),
                                         removeItem.getDelete()->getQuery(),
                                         removeItem.getDelete()->getLimit() == 1, // justOne
                                         true, // logOp
                                         false // god
                                         );

            result->stats.n = n;
        }
        catch ( const PageFaultException& ex ) {
            // TODO: An actual data structure that's not an exception for this
            result->fault = new PageFaultException( ex );
        }
        catch ( const DBException& ex ) {
            result->error = toWriteError( ex.toStatus() );
        }
    }
Esempio n. 19
0
    bool WriteBatchExecutor::applyDelete(const string& ns,
                                         const WriteBatch::WriteItem& writeItem,
                                         CurOp* currentOp) {
        OpDebug& opDebug = currentOp->debug();

        _opCounters->gotDelete();

        BSONObj queryObj;

        string errMsg;
        bool ret = writeItem.parseDeleteItem(&errMsg, &queryObj);
        verify(ret); // writeItem should have been already validated by WriteBatch::parse().

        currentOp->setQuery(queryObj);
        opDebug.op = dbDelete;
        opDebug.query = queryObj;

        long long n;

        try {
            n = deleteObjects(ns.c_str(),
                              queryObj,
                              /*justOne*/false,
                              /*logOp*/true,
                              /*god*/false,
                              /*rs*/NULL);
        }
        catch (UserException& e) {
            opDebug.exceptionInfo = e.getInfo();
            return false;
        }

        _le->recordDelete(n);
        opDebug.ndeleted = n;

        return true;
    }
Esempio n. 20
0
    bool WriteBatchExecutor::doDelete( const string& ns,
                                       const BatchedDeleteDocument& deleteOp,
                                       CurOp* currentOp,
                                       WriteStats* stats,
                                       BatchedErrorDetail* error ) {
        OpDebug& opDebug = currentOp->debug();

        _opCounters->gotDelete();

        BSONObj queryObj = deleteOp.getQuery();

        currentOp->setQuery( queryObj );
        opDebug.op = dbDelete;
        opDebug.query = queryObj;

        long long n;

        try {
            n = deleteObjects( ns, queryObj, // ns, query
                               deleteOp.getLimit() == 1, // justOne
                               true, // logOp
                               false // god
                               );
            stats->numDeleted += n;
        }
        catch ( const UserException& ex ) {
            opDebug.exceptionInfo = ex.getInfo();
            toBatchedError( ex, error );
            return false;
        }

        _le->recordDelete( n );
        opDebug.ndeleted = n;

        return true;
    }
Esempio n. 21
0
int IndieLib()
{
    // ----- IndieLib intialization -----

    CIndieLib *mI = CIndieLib::instance();
    if (!mI->init()) return 0;

    // ----- Get Window Dimensions

    int winWidth = mI->_window->getWidth();
    int winHeight = mI->_window->getHeight();

    srand(static_cast<unsigned int>(time(0)));

    // ----- Surface loading -----

    IND_Surface *mSurfaceBack = IND_Surface::newSurface();
    if (!mI->_surfaceManager->add(mSurfaceBack, "../SpaceGame/resources/Backgrounds/18.jpg", IND_OPAQUE, IND_32)) return 0;

    /*IND_Animation* mTestA = IND_Animation::newAnimation();
    if (!mI->_animationManager->addToSurface(mTestA, "resources/animations/dust.xml", IND_ALPHA, IND_32, 255, 0, 255)) return 0;
    mTestA->getActualFramePos(0);*/

    // Loading 2D Entities

    // Background
    IND_Entity2d* mBack = IND_Entity2d::newEntity2d();
    mI->_entity2dManager->add(mBack);
    mBack->setSurface(mSurfaceBack);
    mBack->setScale((float)winWidth / mSurfaceBack->getWidth(), (float)winHeight / mSurfaceBack->getHeight());

    Controls* controls = new Controls();
    controls->loadSettings();

    ErrorHandler* error = new ErrorHandler();
    error->initialize(mI);

    Hud* mHud = new Hud();
    mHud->createHud(mI);

    Menu* mMenu = new Menu();
    mMenu->createMenu(mI);

    Save* quickSave = new Save();

    if (!SoundEngine::initialize())
    {
        error->writeError(200, 100, "Error", "SoundEngine");
    }

    vector<Planet*> mPlanets;
    Ship* mShip = NULL;

    bool loadSave = false;
    float mDelta = 0.0f;

    IND_Timer* mTimer = new IND_Timer;
    mTimer->start();

    while (!mI->_input->onKeyPress(IND_ESCAPE) && !mI->_input->quit() && !mMenu->isExit())
    {
        // get delta time
        mDelta = mI->_render->getFrameTime() / 1000.0f;

        if (mI->_input->isKeyPressed(controls->getMenu()))
        {
            mMenu->show();
            SoundEngine::getSoundEngine()->setAllSoundsPaused(true);
        }
        if (!mMenu->isHidden())
        {
            mMenu->updateMenu(mHud, quickSave, mPlanets, mShip);
            loadSave = mHud->getLoadingText()->isShow();
        }
        else
        {
            if (loadSave)
            {
                mDelta = 0.0;
                loadSave = false;
                SoundEngine::getSoundEngine()->setAllSoundsPaused(true);
                mHud->getLoadingText()->setShow(false);
                quickSave->loadSave(mI, mShip, mPlanets);
                mHud->showHud();
            }

            if (mShip != NULL)
            {
                if (mI->_input->onKeyPress(controls->getQuickSave()))
                {
                    quickSave->makeSave(mI, mShip, mPlanets);
                }

                mHud->updateHud(mShip);

                if (mI->_input->onKeyPress(controls->getQuickLoad()))
                {
                    deleteObjects(mHud, mShip, mPlanets);
                    loadSave = true;
                }
                if (mShip->isDestroyed())
                {
                    SoundEngine::getSoundEngine()->setAllSoundsPaused(true);
                    mHud->updateGameOverText(mShip->getScore());
                    deleteObjects(mHud, mShip, mPlanets);
                    mHud->getLoadingText()->setShow(false);
                    mHud->getGameOverText()->setShow(true);
                    mMenu->show();
                }
                if(mShip!=NULL)
                {
                    //----- Collisions -----
                    checkShipPlanetsCollisions(mI, mPlanets, mShip);
                    checkBulletsPlanetsCollisions(mI, mPlanets, mShip);
                    checkBulletsShipCollisions(mI, mPlanets, mShip);

                    //----- Movement update -----
                    mShip->updateShip(controls, mDelta);
                    if ((mTimer->getTicks() / 1000.0f) >= 3.0f)
                    {
                        mTimer->start();
                        mPlanets.at(rand() % mPlanets.size())->addSatellite();
                    }
                    for (vector<Planet*>::iterator it = mPlanets.begin(); it != mPlanets.end(); ++it)
                    {
                        (*it)->updatePlanet(mDelta, (mShip->getPosX() + 0.25f * mShip->getHeight() * cos(mShip->getAngleZRadian())), (mShip->getPosY() - 0.25f * mShip->getHeight() * sin(mShip->getAngleZRadian())));
                    }
                }
            }
        }

        //mI->_render->showFpsInWindowTitle();
        mI->_input->update();
        mI->_render->beginScene();
        mI->_entity2dManager->renderEntities2d();
        mI->_render->endScene();
    }

    // ----- Free -----
    delete controls;
    delete error;
    delete mHud;
    delete mMenu;
    delete quickSave;
    mI->_surfaceManager->remove(mSurfaceBack);
    mI->_entity2dManager->remove(mBack);
    deleteObjects(mHud, mShip, mPlanets);
    mI->end();
    return 0;
}
Esempio n. 22
0
        bool runNoDirectClient( const string& ns , 
                                const BSONObj& queryOriginal , const BSONObj& fields , const BSONObj& update , 
                                bool upsert , bool returnNew , bool remove ,
                                BSONObjBuilder& result , string& errmsg ) {
            
            
            Lock::DBWrite lk( ns );
            Client::Context cx( ns );

            BSONObj doc;
            
            bool found = Helpers::findOne( ns.c_str() , queryOriginal , doc );

            BSONObj queryModified = queryOriginal;
            if ( found && doc["_id"].type() && ! isSimpleIdQuery( queryOriginal ) ) {
                // we're going to re-write the query to be more efficient
                // we have to be a little careful because of positional operators
                // maybe we can pass this all through eventually, but right now isn't an easy way
                
                bool hasPositionalUpdate = false;
                {
                    // if the update has a positional piece ($)
                    // then we need to pull all query parts in
                    // so here we check for $
                    // a little hacky
                    BSONObjIterator i( update );
                    while ( i.more() ) {
                        const BSONElement& elem = i.next();
                        
                        if ( elem.fieldName()[0] != '$' || elem.type() != Object )
                            continue;

                        BSONObjIterator j( elem.Obj() );
                        while ( j.more() ) {
                            if ( str::contains( j.next().fieldName(), ".$" ) ) {
                                hasPositionalUpdate = true;
                                break;
                            }
                        }
                    }
                }

                BSONObjBuilder b( queryOriginal.objsize() + 10 );
                b.append( doc["_id"] );
                
                bool addedAtomic = false;

                BSONObjIterator i( queryOriginal );
                while ( i.more() ) {
                    const BSONElement& elem = i.next();

                    if ( str::equals( "_id" , elem.fieldName() ) ) {
                        // we already do _id
                        continue;
                    }
                    
                    if ( ! hasPositionalUpdate ) {
                        // if there is a dotted field, accept we may need more query parts
                        continue;
                    }
                    
                    if ( ! addedAtomic ) {
                        b.appendBool( "$atomic" , true );
                        addedAtomic = true;
                    }

                    b.append( elem );
                }
                queryModified = b.obj();
            }

            if ( remove ) {
                _appendHelper( result , doc , found , fields );
                if ( found ) {
                    deleteObjects( ns.c_str() , queryModified , true , true );
                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendNumber( "n" , 1 );
                    le.done();
                }
            }
            else {
                // update
                if ( ! found && ! upsert ) {
                    // didn't have it, and am not upserting
                    _appendHelper( result , doc , found , fields );
                }
                else {
                    // we found it or we're updating
                    
                    if ( ! returnNew ) {
                        _appendHelper( result , doc , found , fields );
                    }
                    
                    UpdateResult res = updateObjects( ns.c_str() , update , queryModified , upsert , false , true , cc().curop()->debug() );
                    
                    if ( returnNew ) {
                        if ( res.upserted.isSet() ) {
                            queryModified = BSON( "_id" << res.upserted );
                        }
                        else if ( queryModified["_id"].type() ) {
                            // we do this so that if the update changes the fields, it still matches
                            queryModified = queryModified["_id"].wrap();
                        }
                        if ( ! Helpers::findOne( ns.c_str() , queryModified , doc ) ) {
                            errmsg = str::stream() << "can't find object after modification  " 
                                                   << " ns: " << ns 
                                                   << " queryModified: " << queryModified 
                                                   << " queryOriginal: " << queryOriginal;
                            log() << errmsg << endl;
                            return false;
                        }
                        _appendHelper( result , doc , true , fields );
                    }
                    
                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendBool( "updatedExisting" , res.existing );
                    le.appendNumber( "n" , res.num );
                    if ( res.upserted.isSet() )
                        le.append( "upserted" , res.upserted );
                    le.done();
                    
                }
            }
            
            return true;
        }
Esempio n. 23
0
    bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) {
        char db[256];
        nsToClient( ns, db );

        NamespaceDetails *nsd = nsdetails( ns );
        if ( nsd ){
            /** note: its ok to clone into a collection, but only if the range you're copying 
                doesn't exist on this server */
            string err;
            if ( runCount( ns , BSON( "query" << query ) , err ) > 0 ){
                log() << "WARNING: data already exists for: " << ns << " in range : " << query << " deleting..." << endl;
                deleteObjects( ns , query , false , logForRepl , false );
            }
        }

        {
            dbtemprelease r;
            auto_ptr< DBClientConnection > c( new DBClientConnection() );
            if ( !c->connect( fromhost, errmsg ) )
                return false;
            if( !replAuthenticate(c.get()) )
                return false;
            conn = c;

            // Start temporary op log
            BSONObjBuilder cmdSpec;
            cmdSpec << "logCollection" << ns << "start" << 1;
            if ( logSizeMb != INT_MIN )
                cmdSpec << "logSizeMb" << logSizeMb;
            BSONObj info;
            if ( !conn->runCommand( db, cmdSpec.done(), info ) ) {
                errmsg = "logCollection failed: " + (string)info;
                return false;
            }
        }
        
        if ( ! nsd ) {
            BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) );
            if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) )
                return false;
        }

        copy( ns, ns, false, logForRepl, false, false, query );

        if ( copyIndexes ) {
            string indexNs = string( db ) + ".system.indexes";
            copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) );
        }
        
        auto_ptr< DBClientCursor > c;
        {
            dbtemprelease r;
            string logNS = "local.temp.oplog." + string( ns );
            c = conn->query( logNS.c_str(), Query(), 0, 0, 0, Option_CursorTailable );
        }
        if ( c->more() ) {
            replayOpLog( c.get(), query );
            cursorId = c->getCursorId();
            massert( "Expected valid tailing cursor", cursorId != 0 );
        } else {
            massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 );
            cursorId = 0;
        }
        c->decouple();
        return true;
    }
Esempio n. 24
0
 void Helpers::emptyCollection(const char *ns) {
     Client::Context context(ns);
     deleteObjects(ns, BSONObj(), false);
 }
Esempio n. 25
0
        static bool runImpl(OperationContext* txn,
                            const string& dbname,
                            const string& ns,
                            const BSONObj& query,
                            const BSONObj& fields,
                            const BSONObj& update,
                            const BSONObj& sort,
                            bool upsert,
                            bool returnNew,
                            bool remove ,
                            BSONObjBuilder& result,
                            string& errmsg) {

            AutoGetOrCreateDb autoDb(txn, dbname, MODE_IX);
            Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IX);
            Client::Context ctx(txn, ns, autoDb.getDb(), autoDb.justCreated());

            if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
                return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
                    << "Not primary while running findAndModify in " << ns));
            }

            Collection* collection = ctx.db()->getCollection(ns);

            const WhereCallbackReal whereCallback(txn, StringData(ns));

            if ( !collection ) {
                if ( !upsert ) {
                    // no collectio and no upsert, so can't possible do anything
                    _appendHelper( result, BSONObj(), false, fields, whereCallback );
                    return true;
                }
                // no collection, but upsert, so we want to create it
                // problem is we only have IX on db and collection :(
                // so we tell our caller who can do it
                errmsg = "no-collection";
                return false;
            }

            Snapshotted<BSONObj> snapshotDoc;
            RecordId loc;
            bool found = false;
            {
                CanonicalQuery* cq;
                const BSONObj projection;
                const long long skip = 0;
                const long long limit = -1; // 1 document requested; negative indicates hard limit.
                uassertStatusOK(CanonicalQuery::canonicalize(ns,
                                                             query,
                                                             sort,
                                                             projection,
                                                             skip,
                                                             limit,
                                                             &cq,
                                                             whereCallback));

                PlanExecutor* rawExec;
                uassertStatusOK(getExecutor(txn,
                                            collection,
                                            cq,
                                            PlanExecutor::YIELD_AUTO,
                                            &rawExec,
                                            QueryPlannerParams::DEFAULT));

                scoped_ptr<PlanExecutor> exec(rawExec);

                PlanExecutor::ExecState state = exec->getNextSnapshotted(&snapshotDoc, &loc);
                if (PlanExecutor::ADVANCED == state) {
                    found = true;
                }
                else if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
                    if (PlanExecutor::FAILURE == state &&
                        WorkingSetCommon::isValidStatusMemberObject(snapshotDoc.value())) {
                        const Status errorStatus =
                            WorkingSetCommon::getMemberObjectStatus(snapshotDoc.value());
                        invariant(!errorStatus.isOK());
                        uasserted(errorStatus.code(), errorStatus.reason());
                    }
                    uasserted(ErrorCodes::OperationFailed,
                              str::stream() << "executor returned " << PlanExecutor::statestr(state)
                                            << " while finding document to update");
                }
                else {
                    invariant(PlanExecutor::IS_EOF == state);
                }
            }

            WriteUnitOfWork wuow(txn);
            if (found) {
                // We found a doc, but it might not be associated with the active snapshot.
                // If the doc has changed or is no longer in the collection, we will throw a
                // write conflict exception and start again from the beginning.
                if (txn->recoveryUnit()->getSnapshotId() != snapshotDoc.snapshotId()) {
                    BSONObj oldObj = snapshotDoc.value();
                    if (!collection->findDoc(txn, loc, &snapshotDoc)) {
                        // Got deleted in the new snapshot.
                        throw WriteConflictException();
                    }

                    if (!oldObj.binaryEqual(snapshotDoc.value())) {
                        // Got updated in the new snapshot.
                        throw WriteConflictException();
                    }
                }

                // If we get here without throwing, then we should have the copy of the doc from
                // the latest snapshot.
                invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId());
            }

            BSONObj doc = snapshotDoc.value();
            BSONObj queryModified = query;
            if (found && !doc["_id"].eoo() && !CanonicalQuery::isSimpleIdQuery(query)) {
                // we're going to re-write the query to be more efficient
                // we have to be a little careful because of positional operators
                // maybe we can pass this all through eventually, but right now isn't an easy way
                
                bool hasPositionalUpdate = false;
                {
                    // if the update has a positional piece ($)
                    // then we need to pull all query parts in
                    // so here we check for $
                    // a little hacky
                    BSONObjIterator i( update );
                    while ( i.more() ) {
                        const BSONElement& elem = i.next();
                        
                        if ( elem.fieldName()[0] != '$' || elem.type() != Object )
                            continue;

                        BSONObjIterator j( elem.Obj() );
                        while ( j.more() ) {
                            if ( str::contains( j.next().fieldName(), ".$" ) ) {
                                hasPositionalUpdate = true;
                                break;
                            }
                        }
                    }
                }

                BSONObjBuilder b(query.objsize() + 10);
                b.append( doc["_id"] );
                
                bool addedAtomic = false;

                BSONObjIterator i(query);
                while ( i.more() ) {
                    const BSONElement& elem = i.next();

                    if ( str::equals( "_id" , elem.fieldName() ) ) {
                        // we already do _id
                        continue;
                    }
                    
                    if ( ! hasPositionalUpdate ) {
                        // if there is a dotted field, accept we may need more query parts
                        continue;
                    }
                    
                    if ( ! addedAtomic ) {
                        b.appendBool( "$atomic" , true );
                        addedAtomic = true;
                    }

                    b.append( elem );
                }

                queryModified = b.obj();
            }

            if ( remove ) {
                _appendHelper(result, doc, found, fields, whereCallback);
                if ( found ) {
                    deleteObjects(txn, ctx.db(), ns, queryModified, PlanExecutor::YIELD_MANUAL,
                                  true, true);
                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendNumber( "n" , 1 );
                    le.done();
                }
            }
            else {
                // update
                if ( ! found && ! upsert ) {
                    // didn't have it, and am not upserting
                    _appendHelper(result, doc, found, fields, whereCallback);
                }
                else {
                    // we found it or we're updating
                    
                    if ( ! returnNew ) {
                        _appendHelper(result, doc, found, fields, whereCallback);
                    }
                    
                    const NamespaceString requestNs(ns);
                    UpdateRequest request(requestNs);

                    request.setQuery(queryModified);
                    request.setUpdates(update);
                    request.setUpsert(upsert);
                    request.setUpdateOpLog();
                    request.setStoreResultDoc(returnNew);

                    request.setYieldPolicy(PlanExecutor::YIELD_MANUAL);

                    // TODO(greg) We need to send if we are ignoring
                    // the shard version below, but for now no
                    UpdateLifecycleImpl updateLifecycle(false, requestNs);
                    request.setLifecycle(&updateLifecycle);
                    UpdateResult res = mongo::update(txn,
                                                     ctx.db(),
                                                     request,
                                                     &txn->getCurOp()->debug());

                    if (!found && res.existing) {
                        // No match was found during the read part of this find and modify, which
                        // means that we're here doing an upsert. But the update also told us that
                        // we modified an *already existing* document. This probably means that
                        // the query reported EOF based on an out-of-date snapshot. This should be
                        // a rare event, so we handle it by throwing a write conflict.
                        throw WriteConflictException();
                    }

                    if ( !collection ) {
                        // collection created by an upsert
                        collection = ctx.db()->getCollection(ns);
                    }

                    LOG(3) << "update result: "  << res ;
                    if (returnNew) {
                        dassert(!res.newObj.isEmpty());
                        _appendHelper(result, res.newObj, true, fields, whereCallback);
                    }

                    BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) );
                    le.appendBool( "updatedExisting" , res.existing );
                    le.appendNumber( "n" , res.numMatched );
                    if ( !res.upserted.isEmpty() ) {
                        le.append( res.upserted[kUpsertedFieldName] );
                    }
                    le.done();
                }
            }

            // Committing the WUOW can close the current snapshot. Until this happens, the
            // snapshot id should not have changed.
            if (found) {
                invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId());
            }
            wuow.commit();

            return true;
        }
Esempio n. 26
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        assertInWriteLock();
        LOG(6) << "applying op: " << op << endl;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b" };
        BSONElement fields[4];
        op.getFields(4, names, fields);

        BSONObj o;
        if( fields[0].isABSONObj() )
            o = fields[0].embeddedObject();
            
        const char *ns = fields[1].valuestrsafe();

        // operation type -- see logOp() comments for types
        const char *opType = fields[2].valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false, debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();
            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            OpDebug debug;
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();
            if ( opType[1] == 0 )
                deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();
            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else {
            throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
        }

    }
Esempio n. 27
0
    Status Database::_renameSingleNamespace( const StringData& fromNS, const StringData& toNS,
                                             bool stayTemp ) {

        // TODO: make it so we dont't need to do this
        string fromNSString = fromNS.toString();
        string toNSString = toNS.toString();

        // some sanity checking
        NamespaceDetails* fromDetails = _namespaceIndex.details( fromNS );
        if ( !fromDetails )
            return Status( ErrorCodes::BadValue, "from namespace doesn't exist" );

        if ( _namespaceIndex.details( toNS ) )
            return Status( ErrorCodes::BadValue, "to namespace already exists" );

        // remove anything cached
        {
            scoped_lock lk( _collectionLock );
            _collections.erase( fromNSString );
            _collections.erase( toNSString );
        }

        ClientCursor::invalidate( fromNSString.c_str() );
        ClientCursor::invalidate( toNSString.c_str() );
        NamespaceDetailsTransient::eraseCollection( fromNSString ); // XXX
        NamespaceDetailsTransient::eraseCollection( toNSString ); // XXX

        // at this point, we haven't done anything destructive yet

        // ----
        // actually start moving
        // ----

        // this could throw, but if it does we're ok
        _namespaceIndex.add_ns( toNS, fromDetails );
        NamespaceDetails* toDetails = _namespaceIndex.details( toNS );

        try {
            toDetails->copyingFrom(toNSString.c_str(), fromDetails); // fixes extraOffset
        }
        catch( DBException& ) {
            // could end up here if .ns is full - if so try to clean up / roll back a little
            _namespaceIndex.kill_ns(toNSString.c_str());
            throw;
        }

        // at this point, code .ns stuff moved

        _namespaceIndex.kill_ns( fromNSString.c_str() );
        fromDetails = NULL;

        // fix system.namespaces
        BSONObj newSpec;
        {

            BSONObj oldSpec;
            if ( !Helpers::findOne( _namespacesName, BSON( "name" << fromNS ), oldSpec ) )
                return Status( ErrorCodes::InternalError, "can't find system.namespaces entry" );

            BSONObjBuilder b;
            BSONObjIterator i( oldSpec.getObjectField( "options" ) );
            while( i.more() ) {
                BSONElement e = i.next();
                if ( strcmp( e.fieldName(), "create" ) != 0 ) {
                    if (stayTemp || (strcmp(e.fieldName(), "temp") != 0))
                        b.append( e );
                }
                else {
                    b << "create" << toNS;
                }
            }
            newSpec = b.obj();
        }
        addNewNamespaceToCatalog( toNSString.c_str(), newSpec.isEmpty() ? 0 : &newSpec );

        deleteObjects( _namespacesName.c_str(), BSON( "name" << fromNS ), false, false, true );

        return Status::OK();
    }
Esempio n. 28
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        if( logLevel >= 6 )
            log() << "applying op: " << op << endl;

        assertInWriteLock();

        OpDebug debug;
        BSONObj o = op.getObjectField("o");
        const char *ns = op.getStringField("ns");
        // operation type -- see logOp() comments for types
        const char *opType = op.getStringField("op");

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false , debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    BSONObjBuilder b;
                    b.append(_id);

                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();

            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();

            if ( opType[1] == 0 )
                deleteObjects(ns, o, op.getBoolField("b"));
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();

            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else {
            stringstream ss;
            ss << "unknown opType [" << opType << "]";
            throw MsgAssertionException( 13141 , ss.str() );
        }

    }
Esempio n. 29
0
void Plume::setSettings(){
    deleteObjects();
    initArrays();
}
Esempio n. 30
0
        void doTTLForDB( const string& dbName ) {

            //check isMaster before becoming god
            bool isMaster = isMasterNs( dbName.c_str() );

            Client::GodScope god;

            vector<BSONObj> indexes;
            {
                auto_ptr<DBClientCursor> cursor =
                                db.query( dbName + ".system.indexes" ,
                                          BSON( secondsExpireField << BSON( "$exists" << true ) ) ,
                                          0 , /* default nToReturn */
                                          0 , /* default nToSkip */
                                          0 , /* default fieldsToReturn */
                                          QueryOption_SlaveOk ); /* perform on secondaries too */
                if ( cursor.get() ) {
                    while ( cursor->more() ) {
                        indexes.push_back( cursor->next().getOwned() );
                    }
                }
            }
            
            for ( unsigned i=0; i<indexes.size(); i++ ) {
                BSONObj idx = indexes[i];
                

                BSONObj key = idx["key"].Obj();
                if ( key.nFields() != 1 ) {
                    error() << "key for ttl index can only have 1 field" << endl;
                    continue;
                }

                BSONObj query;
                {
                    BSONObjBuilder b;
                    b.appendDate( "$lt" , curTimeMillis64() - ( 1000 * idx[secondsExpireField].numberLong() ) );
                    query = BSON( key.firstElement().fieldName() << b.obj() );
                }
                
                LOG(1) << "TTL: " << key << " \t " << query << endl;
                
                long long n = 0;
                {
                    string ns = idx["ns"].String();
                    Client::WriteContext ctx( ns );
                    NamespaceDetails* nsd = nsdetails( ns );
                    if ( ! nsd ) {
                        // collection was dropped
                        continue;
                    }
                    if ( nsd->setUserFlag( NamespaceDetails::Flag_UsePowerOf2Sizes ) ) {
                        nsd->syncUserFlags( ns );
                    }
                    // only do deletes if on master
                    if ( ! isMaster ) {
                        continue;
                    }

                    n = deleteObjects( ns.c_str() , query , false , true );
                    ttlDeletedDocuments.increment( n );
                }

                LOG(1) << "\tTTL deleted: " << n << endl;
            }
            
            
        }