static void runInsertFromOplog(const char* ns, BSONObj op) { BSONObj row = op[KEY_STR_ROW].Obj(); // handle add index case if (mongoutils::str::endsWith(ns, ".system.indexes")) { // do not build the index if the user has disabled if (theReplSet->buildIndexes()) { Client::WriteContext ctx(ns); NamespaceDetails* nsd = nsdetails(ns); const string &coll = row["ns"].String(); NamespaceDetails* collNsd = nsdetails(coll); const bool ok = collNsd->ensureIndex(row); if (!ok) { // the index already exists, so this is a no-op // Note that for create index and drop index, we // are tolerant of the fact that the operation may // have already been done return; } // overwrite set to true because we are running on a secondary insertOneObject(nsd, row, NamespaceDetails::NO_UNIQUE_CHECKS); } } else { try { Client::ReadContext ctx(ns); runNonSystemInsertFromOplogWithLock(ns, row); } catch (RetryWithWriteLock &e) { Client::WriteContext ctx(ns); runNonSystemInsertFromOplogWithLock(ns, row); } } }
static void runNonSystemInsertFromOplogWithLock( const char* ns, BSONObj row ) { NamespaceDetails* nsd = nsdetails(ns); // overwrite set to true because we are running on a secondary uint64_t flags = (NamespaceDetails::NO_UNIQUE_CHECKS | NamespaceDetails::NO_LOCKTREE); insertOneObject(nsd, row, flags); }
void insertObjects(const char *ns, const vector<BSONObj> &objs, bool keepGoing, uint64_t flags, bool logop ) { if (mongoutils::str::contains(ns, "system.")) { massert(16748, "need transaction to run insertObjects", cc().txnStackSize() > 0); uassert(10095, "attempt to insert in reserved database name 'system'", !mongoutils::str::startsWith(ns, "system.")); massert(16750, "attempted to insert multiple objects into a system namspace at once", objs.size() == 1); if (handle_system_collection_insert(ns, objs[0], logop) != 0) { return; } } NamespaceDetails *details = getAndMaybeCreateNS(ns, logop); NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get(ns); for (size_t i = 0; i < objs.size(); i++) { const BSONObj &obj = objs[i]; try { uassert( 10059 , "object to insert too large", obj.objsize() <= BSONObjMaxUserSize); BSONObjIterator i( obj ); while ( i.more() ) { BSONElement e = i.next(); uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' ); } uassert( 16440 , "_id cannot be an array", obj["_id"].type() != Array ); BSONObj objModified = obj; BSONElementManipulator::lookForTimestamps(objModified); if (details->isCapped() && logop) { // unfortunate hack we need for capped collections // we do this because the logic for generating the pk // and what subsequent rows to delete are buried in the // namespace details object. There is probably a nicer way // to do this, but this works. details->insertObjectIntoCappedAndLogOps(objModified, flags); if (nsdt != NULL) { nsdt->notifyOfWriteOp(); } } else { insertOneObject(details, nsdt, objModified, flags); // may add _id field if (logop) { OpLogHelpers::logInsert(ns, objModified, &cc().txn()); } } } catch (const UserException &) { if (!keepGoing || i == objs.size() - 1) { throw; } } } }
static void insertAndLog(const char *ns, NamespaceDetails *d, BSONObj &newObj, bool logop, bool fromMigrate) { checkNoMods( newObj ); TOKULOG(3) << "insertAndLog for upsert: " << newObj << endl; // We cannot pass NamespaceDetails::NO_UNIQUE_CHECKS because we still need to check secondary indexes. // We know if we are in this function that we did a query for the object and it didn't exist yet, so the unique check on the PK won't fail. // To prove this to yourself, look at the callers of insertAndLog and see that they return an UpdateResult that says the object didn't exist yet. checkBulkLoad(ns); insertOneObject(d, newObj); if (logop) { OpLogHelpers::logInsert(ns, newObj, &cc().txn()); } }
// Does not check magic system collection inserts. void _insertObjects(const char *ns, const vector<BSONObj> &objs, bool keepGoing, uint64_t flags, bool logop ) { NamespaceDetails *details = getAndMaybeCreateNS(ns, logop); for (size_t i = 0; i < objs.size(); i++) { const BSONObj &obj = objs[i]; try { uassert( 10059 , "object to insert too large", obj.objsize() <= BSONObjMaxUserSize); BSONObjIterator i( obj ); while ( i.more() ) { BSONElement e = i.next(); // check no $ modifiers. note we only check top level. // (scanning deep would be quite expensive) uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' ); // check no regexp for _id (SERVER-9502) if (str::equals(e.fieldName(), "_id")) { uassert(17033, "can't use a regex for _id", e.type() != RegEx); } } uassert( 16440 , "_id cannot be an array", obj["_id"].type() != Array ); BSONObj objModified = obj; BSONElementManipulator::lookForTimestamps(objModified); if (details->isCapped() && logop) { // unfortunate hack we need for capped collections // we do this because the logic for generating the pk // and what subsequent rows to delete are buried in the // namespace details object. There is probably a nicer way // to do this, but this works. details->insertObjectIntoCappedAndLogOps(objModified, flags); details->notifyOfWriteOp(); } else { insertOneObject(details, objModified, flags); // may add _id field if (logop) { OpLogHelpers::logInsert(ns, objModified); } } } catch (const UserException &) { if (!keepGoing || i == objs.size() - 1) { throw; } } } }
// on input, conn is a connection for which the caller has created a multi-statement // mvcc transaction over it. Reads the document from the remote server and // applies it locally void applySnapshotOfDocsMap(shared_ptr<DBClientConnection> conn) { size_t numDocs = 0; log() << "Applying documents to collections for rollback." << rsLog; for (RollbackDocsMapIterator it; it.ok(); it.advance()){ numDocs++; DocID curr = it.current(); LOCK_REASON(lockReason, "repl: appling snapshot of doc during rollback"); Client::ReadContext ctx(curr.ns, lockReason); Collection* cl = getCollection(curr.ns); if (cl->isPKHidden()) { log() << "Collection " << curr.ns << " has a hidden PK, yet it has \ a document for which we want to apply a snapshot of: " << \ curr.pk << rsLog; throw RollbackOplogException("Collection for which we are applying a document has a hidden PK"); } BSONObj pkWithFields = cl->fillPKWithFields(curr.pk); BSONObj remoteImage = findOneFromConn(conn.get(), curr.ns, Query(pkWithFields)); if (!remoteImage.isEmpty()) { const uint64_t flags = Collection::NO_UNIQUE_CHECKS | Collection::NO_LOCKTREE; insertOneObject(cl, remoteImage, flags); } }
// Does not check magic system collection inserts. void _insertObjects(const char *ns, const vector<BSONObj> &objs, bool keepGoing, uint64_t flags, bool logop ) { Collection *cl = getOrCreateCollection(ns, logop); for (size_t i = 0; i < objs.size(); i++) { const BSONObj &obj = objs[i]; try { BSONObj objModified = obj; BSONElementManipulator::lookForTimestamps(objModified); if (cl->isCapped()) { if (cc().txnStackSize() > 1) { // This is a nightmare to maintain transactionally correct. // Capped collections will be deprecated one day anyway. // They are an anathma. uasserted(17228, "Cannot insert into a capped collection in a multi-statement transaction."); } if (logop) { // special case capped colletions until all oplog writing // for inserts is handled in the collection class, not here. validateInsert(obj); CappedCollection *cappedCl = cl->as<CappedCollection>(); cappedCl->insertObjectAndLogOps(objModified, flags); cappedCl->notifyOfWriteOp(); } } else { insertOneObject(cl, objModified, flags); // may add _id field if (logop) { OpLogHelpers::logInsert(ns, objModified); } } } catch (const UserException &) { if (!keepGoing || i == objs.size() - 1) { throw; } } } }