Пример #1
0
    /** @param fromRepl false if from ApplyOpsCmd
        @return true if was and update should have happened and the document DNE.  see replset initial sync code.
     */
    bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
        LOG(6) << "applying op: " << op << endl;
        bool failedUpdate = false;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b" };
        BSONElement fields[4];
        op.getFields(4, names, fields);

        BSONObj o;
        if( fields[0].isABSONObj() )
            o = fields[0].embeddedObject();
            
        const char *ns = fields[1].valuestrsafe();

        Lock::assertWriteLocked(ns);

        NamespaceDetails *nsd = nsdetails(ns);

        // operation type -- see logOp() comments for types
        const char *opType = fields[2].valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjectsForReplication(ns, o, o, true, false, false, debug, false,
                                                QueryPlanSelectionPolicy::idElseNatural() );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    // probably don't need this since all replicated colls have _id indexes now
                    // but keep it just in case
                    RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns); }

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);
                    updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false,
                                                QueryPlanSelectionPolicy::idElseNatural() );
                }
            }
        }
Пример #2
0
/** @param fromRepl false if from ApplyOpsCmd
    @return true if was and update should have happened and the document DNE.  see replset initial sync code.
 */
bool applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
    assertInWriteLock();
    LOG(6) << "applying op: " << op << endl;
    bool failedUpdate = false;

    OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

    const char *names[] = { "o", "ns", "op", "b" };
    BSONElement fields[4];
    op.getFields(4, names, fields);

    BSONObj o;
    if( fields[0].isABSONObj() )
        o = fields[0].embeddedObject();

    const char *ns = fields[1].valuestrsafe();
    NamespaceDetails *nsd = nsdetails(ns);

    // operation type -- see logOp() comments for types
    const char *opType = fields[2].valuestrsafe();

    if ( *opType == 'i' ) {
        opCounters->gotInsert();

        const char *p = strchr(ns, '.');
        if ( p && strcmp(p, ".system.indexes") == 0 ) {
            // updates aren't allowed for indexes -- so we will do a regular insert. if index already
            // exists, that is ok.
            theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
        }
        else {
            // do upserts for inserts as we might get replayed more than once
            OpDebug debug;
            BSONElement _id;
            if( !o.getObjectID(_id) ) {
                /* No _id.  This will be very slow. */
                Timer t;
                updateObjects(ns, o, o, true, false, false, debug );
                if( t.millis() >= 2 ) {
                    RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                }
            }
            else {
                /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                RARELY if (nsd && !nsd->capped) {
                    ensureHaveIdIndex(ns);    // otherwise updates will be slow
                }

                /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                          then.  very few upserts will not be inserts...
                          */
                BSONObjBuilder b;
                b.append(_id);
                updateObjects(ns, o, b.done(), true, false, false , debug );
            }
        }
    }
Пример #3
0
 void applyOperationFromOplog(const BSONObj& op) {
     LOG(6) << "applying op: " << op << endl;
     OpCounters* opCounters = &replOpCounters;
     const char *names[] = { 
         KEY_STR_NS, 
         KEY_STR_OP_NAME
         };
     BSONElement fields[2];
     op.getFields(2, names, fields);
     const char* ns = fields[0].valuestrsafe();
     const char* opType = fields[1].valuestrsafe();
     if (strcmp(opType, OP_STR_INSERT) == 0) {
         opCounters->gotInsert();
         runInsertFromOplog(ns, op);
     }
     else if (strcmp(opType, OP_STR_UPDATE) == 0) {
         opCounters->gotUpdate();
         runUpdateFromOplog(ns, op, false);
     }
     else if (strcmp(opType, OP_STR_DELETE) == 0) {
         opCounters->gotDelete();
         runDeleteFromOplog(ns, op);
     }
     else if (strcmp(opType, OP_STR_COMMAND) == 0) {
         opCounters->gotCommand();
         runCommandFromOplog(ns, op);
     }
     else if (strcmp(opType, OP_STR_COMMENT) == 0) {
         // no-op
     }
     else if (strcmp(opType, OP_STR_CAPPED_INSERT) == 0) {
         opCounters->gotInsert();
         runCappedInsertFromOplog(ns, op);
     }
     else if (strcmp(opType, OP_STR_CAPPED_DELETE) == 0) {
         opCounters->gotDelete();
         runCappedDeleteFromOplog(ns, op);
     }
     else {
         throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
     }
 }
Пример #4
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        assertInWriteLock();
        LOG(6) << "applying op: " << op << endl;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b" };
        BSONElement fields[4];
        op.getFields(4, names, fields);

        BSONObj o;
        if( fields[0].isABSONObj() )
            o = fields[0].embeddedObject();
            
        const char *ns = fields[1].valuestrsafe();

        // operation type -- see logOp() comments for types
        const char *opType = fields[2].valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false, debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();
            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            OpDebug debug;
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();
            if ( opType[1] == 0 )
                deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();
            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else {
            throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
        }

    }
Пример #5
0
    /** @param fromRepl false if from ApplyOpsCmd
        @return true if was and update should have happened and the document DNE.  see replset initial sync code.
     */
    bool applyOperation_inlock(OperationContext* txn,
                               Database* db,
                               const BSONObj& op,
                               bool fromRepl,
                               bool convertUpdateToUpsert) {
        LOG(3) << "applying op: " << op << endl;
        bool failedUpdate = false;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b", "o2" };
        BSONElement fields[5];
        op.getFields(5, names, fields);
        BSONElement& fieldO = fields[0];
        BSONElement& fieldNs = fields[1];
        BSONElement& fieldOp = fields[2];
        BSONElement& fieldB = fields[3];
        BSONElement& fieldO2 = fields[4];

        BSONObj o;
        if( fieldO.isABSONObj() )
            o = fieldO.embeddedObject();

        const char *ns = fieldNs.valuestrsafe();

        BSONObj o2;
        if (fieldO2.isABSONObj())
            o2 = fieldO2.Obj();

        bool valueB = fieldB.booleanSafe();

        txn->lockState()->assertWriteLocked(ns);

        Collection* collection = db->getCollection( txn, ns );
        IndexCatalog* indexCatalog = collection == NULL ? NULL : collection->getIndexCatalog();

        // operation type -- see logOp() comments for types
        const char *opType = fieldOp.valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && nsToCollectionSubstring( p ) == "system.indexes" ) {
                if (o["background"].trueValue()) {
                    IndexBuilder* builder = new IndexBuilder(o);
                    // This spawns a new thread and returns immediately.
                    builder->go();
                }
                else {
                    IndexBuilder builder(o);
                    Status status = builder.buildInForeground(txn, db);
                    if ( status.isOK() ) {
                        // yay
                    }
                    else if ( status.code() == ErrorCodes::IndexOptionsConflict ||
                              status.code() == ErrorCodes::IndexKeySpecsConflict ) {
                        // SERVER-13206, SERVER-13496
                        // 2.4 (and earlier) will add an ensureIndex to an oplog if its ok or not
                        // so in 2.6+ where we do stricter validation, it will fail
                        // but we shouldn't care as the primary is responsible
                        warning() << "index creation attempted on secondary that conflicts, "
                                  << "skipping: " << status;
                    }
                    else {
                        uassertStatusOK( status );
                    }
                }
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;

                    const NamespaceString requestNs(ns);
                    UpdateRequest request(txn, requestNs);

                    request.setQuery(o);
                    request.setUpdates(o);
                    request.setUpsert();
                    request.setFromReplication();
                    UpdateLifecycleImpl updateLifecycle(true, requestNs);
                    request.setLifecycle(&updateLifecycle);

                    update(db, request, &debug);

                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    // probably don't need this since all replicated colls have _id indexes now
                    // but keep it just in case
                    RARELY if ( indexCatalog
                                 && !collection->isCapped()
                                 && !indexCatalog->haveIdIndex(txn) ) {
                        try {
                            Helpers::ensureIndex(txn, collection, BSON("_id" << 1), true, "_id_");
                        }
                        catch (const DBException& e) {
                            warning() << "Ignoring error building id index on " << collection->ns()
                                      << ": " << e.toString();
                        }
                    }

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);

                    const NamespaceString requestNs(ns);
                    UpdateRequest request(txn, requestNs);

                    request.setQuery(b.done());
                    request.setUpdates(o);
                    request.setUpsert();
                    request.setFromReplication();
                    UpdateLifecycleImpl updateLifecycle(true, requestNs);
                    request.setLifecycle(&updateLifecycle);

                    update(db, request, &debug);
                }
            }
        }
Пример #6
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        if( logLevel >= 6 )
            log() << "applying op: " << op << endl;

        assertInWriteLock();

        OpDebug debug;
        BSONObj o = op.getObjectField("o");
        const char *ns = op.getStringField("ns");
        // operation type -- see logOp() comments for types
        const char *opType = op.getStringField("op");

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false , debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    BSONObjBuilder b;
                    b.append(_id);

                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();

            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();

            if ( opType[1] == 0 )
                deleteObjects(ns, o, op.getBoolField("b"));
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();

            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else {
            stringstream ss;
            ss << "unknown opType [" << opType << "]";
            throw MsgAssertionException( 13141 , ss.str() );
        }

    }
Пример #7
0
    /** @param fromRepl false if from ApplyOpsCmd
        @return true if was and update should have happened and the document DNE.  see replset initial sync code.
     */
    bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) {
        LOG(3) << "applying op: " << op << endl;
        bool failedUpdate = false;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b" };
        BSONElement fields[4];
        op.getFields(4, names, fields);

        BSONObj o;
        if( fields[0].isABSONObj() )
            o = fields[0].embeddedObject();
            
        const char *ns = fields[1].valuestrsafe();

        Lock::assertWriteLocked(ns);

        NamespaceDetails *nsd = nsdetails(ns);

        // operation type -- see logOp() comments for types
        const char *opType = fields[2].valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                if (o["background"].trueValue()) {
                    IndexBuilder* builder = new IndexBuilder(ns, o);
                    // This spawns a new thread and returns immediately.
                    builder->go();
                }
                else {
                    IndexBuilder builder(ns, o);
                    // Finish the foreground build before returning
                    builder.build();
                }
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;

                    const NamespaceString requestNs(ns);
                    UpdateRequest request(
                        requestNs, debug,
                        QueryPlanSelectionPolicy::idElseNatural());

                    request.setQuery(o);
                    request.setUpdates(o);
                    request.setUpsert();
                    request.setFromReplication();

                    update(request);

                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    // probably don't need this since all replicated colls have _id indexes now
                    // but keep it just in case
                    RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); }

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);

                    const NamespaceString requestNs(ns);
                    UpdateRequest request(
                        requestNs, debug,
                        QueryPlanSelectionPolicy::idElseNatural());

                    request.setQuery(b.done());
                    request.setUpdates(o);
                    request.setUpsert();
                    request.setFromReplication();

                    update(request);
                }
            }
        }