DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ) { LogIndentLevel lil; if ( eLoc.getOfs() <= 0 ) { error() << "invalid extent ofs: " << eLoc.getOfs() << endl; return DiskLoc(); } MongoDataFile * mdf = db->getFile( eLoc.a() ); Extent * e = mdf->debug_getExtent( eLoc ); if ( ! e->isOk() ) { warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl; } log() << "length:" << e->length << endl; LogIndentLevel lil2; set<DiskLoc> seen; DiskLoc loc = forward ? e->firstRecord : e->lastRecord; while ( ! loc.isNull() ) { if ( ! seen.insert( loc ).second ) { error() << "infinite loop in extend, seen: " << loc << " before" << endl; break; } if ( loc.getOfs() <= 0 ) { error() << "offset is 0 for record which should be impossible" << endl; break; } log(1) << loc << endl; Record* rec = loc.rec(); BSONObj obj; try { obj = loc.obj(); assert( obj.valid() ); LOG(1) << obj << endl; w( obj ); } catch ( std::exception& e ) { log() << "found invalid document @ " << loc << " " << e.what() << endl; if ( ! obj.isEmpty() ) { try { BSONElement e = obj.firstElement(); stringstream ss; ss << "first element: " << e; log() << ss.str(); } catch ( std::exception& ) { } } } loc = forward ? rec->getNext( loc ) : rec->getPrev( loc ); } return forward ? e->xnext : e->xprev; }
/** * actually applies a reduce, to a list of tuples (key, value). * After the call, tuples will hold a single tuple {"0": key, "1": value} */ void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) { uassert( 10074 , "need values" , tuples.size() ); int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128; // need to build the reduce args: ( key, [values] ) BSONObjBuilder reduceArgs( sizeEstimate ); boost::scoped_ptr<BSONArrayBuilder> valueBuilder; int sizeSoFar = 0; unsigned n = 0; for ( ; n<tuples.size(); n++ ) { BSONObjIterator j(tuples[n]); BSONElement keyE = j.next(); if ( n == 0 ) { reduceArgs.append( keyE ); key = keyE.wrap(); sizeSoFar = 5 + keyE.size(); valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) )); } BSONElement ee = j.next(); uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) ); if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) { assert( n > 1 ); // if not, inf. loop break; } valueBuilder->append( ee ); sizeSoFar += ee.size(); } assert(valueBuilder); valueBuilder->done(); BSONObj args = reduceArgs.obj(); Scope * s = _func.scope(); s->invokeSafe( _func.func() , &args, 0 ); ++numReduces; if ( s->type( "return" ) == Array ) { uasserted( 10075 , "reduce -> multiple not supported yet"); return; } endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() ); if ( n == tuples.size() ) return; // the input list was too large, add the rest of elmts to new tuples and reduce again // note: would be better to use loop instead of recursion to avoid stack overflow BSONList x; for ( ; n < tuples.size(); n++ ) { x.push_back( tuples[n] ); } BSONObjBuilder temp( endSizeEstimate ); temp.append( key.firstElement() ); s->append( temp , "1" , "return" ); x.push_back( temp.obj() ); _reduce( x , key , endSizeEstimate ); }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { Timer t; string ns = dbname + '.' + cmdObj.firstElement().valuestr(); string key = cmdObj["key"].valuestrsafe(); BSONObj keyPattern = BSON( key << 1 ); BSONObj query = getQuery( cmdObj ); int bufSize = BSONObjMaxUserSize - 4096; BufBuilder bb( bufSize ); char * start = bb.buf(); BSONArrayBuilder arr( bb ); BSONElementSet values; long long nscanned = 0; // locations looked at long long nscannedObjects = 0; // full objects looked at long long n = 0; // matches MatchDetails md; NamespaceDetails * d = nsdetails( ns.c_str() ); if ( ! d ) { result.appendArray( "values" , BSONObj() ); result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) ); return true; } shared_ptr<Cursor> cursor; if ( ! query.isEmpty() ) { cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() ); } else { // query is empty, so lets see if we can find an index // with the key so we don't have to hit the raw data NamespaceDetails::IndexIterator ii = d->ii(); while ( ii.more() ) { IndexDetails& idx = ii.next(); if ( d->isMultikey( ii.pos() - 1 ) ) continue; if ( idx.inKeyPattern( key ) ) { cursor = NamespaceDetailsTransient::bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() ); if( cursor.get() ) break; } } if ( ! cursor.get() ) cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() ); } verify( cursor ); string cursorName = cursor->toString(); auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns)); while ( cursor->ok() ) { nscanned++; bool loadedRecord = false; if ( cursor->currentMatches( &md ) && !cursor->getsetdup( cursor->currLoc() ) ) { n++; BSONObj holder; BSONElementSet temp; loadedRecord = ! cc->getFieldsDotted( key , temp, holder ); for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) { BSONElement e = *i; if ( values.count( e ) ) continue; int now = bb.len(); uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize ); arr.append( e ); BSONElement x( start + now ); values.insert( x ); } } if ( loadedRecord || md.hasLoadedRecord() ) nscannedObjects++; cursor->advance(); if (!cc->yieldSometimes( ClientCursor::MaybeCovered )) { cc.release(); break; } RARELY killCurrentOp.checkForInterrupt(); } verify( start == bb.buf() ); result.appendArray( "values" , arr.done() ); { BSONObjBuilder b; b.appendNumber( "n" , n ); b.appendNumber( "nscanned" , nscanned ); b.appendNumber( "nscannedObjects" , nscannedObjects ); b.appendNumber( "timems" , t.millis() ); b.append( "cursor" , cursorName ); result.append( "stats" , b.obj() ); } return true; }
/* ns: namespace, e.g. <database>.<collection> pattern: the "where" clause / criteria justOne: stop after 1 match god: allow access to system namespaces, and don't yield */ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop, bool god, RemoveSaver * rs ) { if( !god ) { if ( strstr(ns, ".system.") ) { /* note a delete from system.indexes would corrupt the db if done here, as there are pointers into those objects in NamespaceDetails. */ uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) ); } if ( strchr( ns , '$' ) ) { log() << "cannot delete from collection with reserved $ in name: " << ns << endl; uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 ); } } { NamespaceDetails *d = nsdetails( ns ); if ( ! d ) return 0; uassert( 10101 , "can't remove from a capped collection" , ! d->isCapped() ); } long long nDeleted = 0; shared_ptr< Cursor > creal = NamespaceDetailsTransient::getCursor( ns, pattern ); if( !creal->ok() ) return nDeleted; shared_ptr< Cursor > cPtr = creal; auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) ); cc->setDoingDeletes( true ); CursorId id = cc->cursorid(); bool canYield = !god && !(creal->matcher() && creal->matcher()->docMatcher().atomic()); do { // TODO: we can generalize this I believe // bool willNeedRecord = (creal->matcher() && creal->matcher()->needRecord()) || pattern.isEmpty() || isSimpleIdQuery( pattern ); if ( ! willNeedRecord ) { // TODO: this is a total hack right now // check if the index full encompasses query if ( pattern.nFields() == 1 && str::equals( pattern.firstElement().fieldName() , creal->indexKeyPattern().firstElement().fieldName() ) ) willNeedRecord = true; } if ( canYield && ! cc->yieldSometimes( willNeedRecord ? ClientCursor::WillNeed : ClientCursor::MaybeCovered ) ) { cc.release(); // has already been deleted elsewhere // TODO should we assert or something? break; } if ( !cc->ok() ) { break; // if we yielded, could have hit the end } // this way we can avoid calling prepareToYield() every time (expensive) // as well as some other nuances handled cc->setDoingDeletes( true ); DiskLoc rloc = cc->currLoc(); BSONObj key = cc->currKey(); bool match = creal->currentMatches(); cc->advance(); if ( ! match ) continue; // SERVER-5198 Advance past the document to be modified, but see SERVER-5725. while( cc->ok() && rloc == cc->currLoc() ) { cc->advance(); } bool foundAllResults = ( justOne || !cc->ok() ); if ( !foundAllResults ) { // NOTE: Saving and restoring a btree cursor's position was historically described // as slow here. cc->c()->prepareToTouchEarlierIterate(); } { BSONElement e; if( BSONObj::make( rloc.rec() ).getObjectID( e ) ) { BSONObjBuilder b; b.append( e ); bool replJustOne = true; if(logop) logOp( "d", ns, b.done(), 0, &replJustOne ); postNotification("d",ns,b.done(),0); } else { problem() << "deleted object without id, not logging" << endl; } } theDataFileMgr.deleteRecord(ns, rloc.rec(), rloc); nDeleted++; if ( foundAllResults ) { break; } cc->c()->recoverFromTouchingEarlierIterate(); if( !god ) getDur().commitIfNeeded(); if( debug && god && nDeleted == 100 ) log() << "warning high number of deletes with god=true which could use significant memory" << endl; } while ( cc->ok() ); if ( cc.get() && ClientCursor::find( id , false ) == 0 ) { // TODO: remove this and the id declaration above if this doesn't trigger // if it does, then i'm very confused (ERH 06/2011) error() << "this should be impossible" << endl; printStackTrace(); cc.release(); } return nDeleted; }
void run(){ Scope * s = globalScriptEngine->newScope(); { // date BSONObj o; { BSONObjBuilder b; b.appendDate( "d" , 123456789 ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "return x.d.getTime() != 12;" , BSONObj() ); ASSERT_EQUALS( true, s->getBoolean( "return" ) ); s->invoke( "z = x.d.getTime();" , BSONObj() ); ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) ); s->invoke( "z = { z : x.d }" , BSONObj() ); BSONObj out = s->getObject( "z" ); ASSERT( out["z"].type() == Date ); } { // regex BSONObj o; { BSONObjBuilder b; b.appendRegex( "r" , "^a" , "i" ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "z = x.r.test( 'b' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'a' );" , BSONObj() ); ASSERT_EQUALS( true , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'ba' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = { a : x.r };" , BSONObj() ); BSONObj out = s->getObject("z"); ASSERT_EQUALS( (string)"^a" , out["a"].regex() ); ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() ); } // array { BSONObj o = fromjson( "{r:[1,2,3]}" ); s->setObject( "x", o, false ); BSONObj out = s->getObject( "x" ); ASSERT_EQUALS( Array, out.firstElement().type() ); s->setObject( "x", o, true ); out = s->getObject( "x" ); ASSERT_EQUALS( Array, out.firstElement().type() ); } delete s; }
bool wrappedRun(OperationContext* txn, const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder) { BSONElement e = jsobj.firstElement(); const string toDeleteNs = dbname + '.' + e.valuestr(); if (!serverGlobalParams.quiet) { LOG(0) << "CMD: dropIndexes " << toDeleteNs << endl; } Client::Context ctx(toDeleteNs); Database* db = ctx.db(); Collection* collection = db->getCollection( txn, toDeleteNs ); if ( ! collection ) { errmsg = "ns not found"; return false; } stopIndexBuilds(txn, db, jsobj); IndexCatalog* indexCatalog = collection->getIndexCatalog(); anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() ); BSONElement f = jsobj.getField("index"); if ( f.type() == String ) { string indexToDelete = f.valuestr(); if ( indexToDelete == "*" ) { Status s = indexCatalog->dropAllIndexes(txn, false); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } anObjBuilder.append("msg", "non-_id indexes dropped for collection"); return true; } IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( indexToDelete ); if ( desc == NULL ) { errmsg = str::stream() << "index not found with name [" << indexToDelete << "]"; return false; } if ( desc->isIdIndex() ) { errmsg = "cannot drop _id index"; return false; } Status s = indexCatalog->dropIndex(txn, desc); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } return true; } if ( f.type() == Object ) { IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( f.embeddedObject() ); if ( desc == NULL ) { errmsg = "can't find index with key:"; errmsg += f.embeddedObject().toString(); return false; } if ( desc->isIdIndex() ) { errmsg = "cannot drop _id index"; return false; } Status s = indexCatalog->dropIndex(txn, desc); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } return true; } errmsg = "invalid index name spec"; return false; }
// PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN__GENKEYSWITHARRELE, "_ixmKeyGenerator::_genKeyWithArrayEle" ) INT32 _genKeyWithArrayEle( BSONElement *keyEles, UINT32 eleNum, const BSONElement *arrElement, const CHAR *arrEleName, UINT32 arrElePos, BSONObjSet &keys ) const { PD_TRACE_ENTRY ( SDB__IXMKEYGEN__GENKEYSWITHARRELE ); INT32 rc = SDB_OK ; BSONObj arrObj = arrElement->embeddedObject() ; if ( arrObj.firstElement().eoo() ) { keyEles[arrElePos] = *arrElement ; rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } if ( '\0' == *arrEleName ) { BSONObjIterator itr( arrObj ) ; BSONElement &e = keyEles[arrElePos] ; while ( itr.more() ) { e = itr.next() ; rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } } else { BSONObjIterator itr( arrObj ) ; while ( itr.more() ) { const CHAR *dottedName = arrEleName ; BSONElement next = itr.next() ; if ( Object == next.type() ) { BSONElement e = next.embeddedObject() .getFieldDottedOrArray( dottedName ) ; if ( Array == e.type() ) { rc = _genKeyWithArrayEle(keyEles, eleNum, &e, dottedName, arrElePos, keys) ; if ( SDB_OK != rc ) { goto error ; } else { continue ; } } else { keyEles[arrElePos] = e ; } } else { keyEles[arrElePos] = BSONElement() ; } rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } } done: PD_TRACE_EXITRC( SDB__IXMKEYGEN__GENKEYSWITHARRELE, rc ) ; return rc ; error: goto done ; }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongRejectsLargestDecimal) { BSONObj query = BSON("" << Decimal128(Decimal128::kLargestPositive)); ASSERT_NOT_OK(MatchExpressionParser::parseIntegerElementToLong(query.firstElement())); }
TEST(MatchExpressionParserTest, ParseIntegerElementToNonNegativeLongRejectsNegative) { BSONObj query = BSON("" << -2LL); ASSERT_NOT_OK( MatchExpressionParser::parseIntegerElementToNonNegativeLong(query.firstElement())); }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongRejectsString) { BSONObj query = BSON("" << "1"); ASSERT_NOT_OK(MatchExpressionParser::parseIntegerElementToLong(query.firstElement())); }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongRejectsNonIntegralDecimal) { BSONObj query = BSON("" << Decimal128("2.5")); ASSERT_NOT_OK(MatchExpressionParser::parseIntegerElementToLong(query.firstElement())); }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongRejectsTooLargeNegativeDouble) { BSONObj query = BSON("" << std::numeric_limits<double>::min()); ASSERT_NOT_OK(MatchExpressionParser::parseIntegerElementToLong(query.firstElement())); }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongRejectsTooLargeDouble) { BSONObj query = BSON("" << MatchExpressionParser::kLongLongMaxPlusOneAsDouble); ASSERT_NOT_OK(MatchExpressionParser::parseIntegerElementToLong(query.firstElement())); }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { if ( cmdObj.firstElement().type() != Array ) { errmsg = "ops has to be an array"; return false; } BSONObj ops = cmdObj.firstElement().Obj(); { // check input BSONObjIterator i( ops ); while ( i.more() ) { BSONElement e = i.next(); if (!_checkOperation(e, errmsg)) { return false; } } } // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple // ns used so locking individually requires more analysis ScopedTransaction scopedXact(txn, MODE_X); Lock::GlobalWrite globalWriteLock(txn->lockState()); if (!fromRepl && !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) { return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while applying ops to database " << dbname)); } // Preconditions check reads the database state, so needs to be done locked if ( cmdObj["preCondition"].type() == Array ) { BSONObjIterator i( cmdObj["preCondition"].Obj() ); while ( i.more() ) { BSONObj f = i.next().Obj(); DBDirectClient db( txn ); BSONObj realres = db.findOne( f["ns"].String() , f["q"].Obj() ); // Apply-ops would never have a $where matcher, so use the default callback, // which will throw an error if $where is found. Matcher m(f["res"].Obj()); if ( ! m.matches( realres ) ) { result.append( "got" , realres ); result.append( "whatFailed" , f ); errmsg = "pre-condition failed"; return false; } } } // apply int num = 0; int errors = 0; BSONObjIterator i( ops ); BSONArrayBuilder ab; const bool alwaysUpsert = cmdObj.hasField("alwaysUpsert") ? cmdObj["alwaysUpsert"].trueValue() : true; while ( i.more() ) { BSONElement e = i.next(); const BSONObj& temp = e.Obj(); // Ignore 'n' operations. const char *opType = temp["op"].valuestrsafe(); if (*opType == 'n') continue; const string ns = temp["ns"].String(); // Run operations under a nested lock as a hack to prevent yielding. // // The list of operations is supposed to be applied atomically; yielding // would break atomicity by allowing an interruption or a shutdown to occur // after only some operations are applied. We are already locked globally // at this point, so taking a DBLock on the namespace creates a nested lock, // and yields are disallowed for operations that hold a nested lock. // // We do not have a wrapping WriteUnitOfWork so it is possible for a journal // commit to happen with a subset of ops applied. // TODO figure out what to do about this. Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState()); // Ensures that yielding will not happen (see the comment above). DEV { Locker::LockSnapshot lockSnapshot; invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot)); }; OldClientContext ctx(txn, ns); Status status(ErrorCodes::InternalError, ""); while (true) { try { // We assume that in the WriteConflict retry case, either the op rolls back // any changes it makes or is otherwise safe to rerun. status = repl::applyOperation_inlock(txn, ctx.db(), temp, false, alwaysUpsert); break; } catch (const WriteConflictException& wce) { LOG(2) << "WriteConflictException in applyOps command, retrying."; txn->recoveryUnit()->commitAndRestart(); continue; } } ab.append(status.isOK()); if (!status.isOK()) { errors++; } num++; WriteUnitOfWork wuow(txn); logOpForDbHash(txn, ns.c_str()); wuow.commit(); } result.append( "applied" , num ); result.append( "results" , ab.arr() ); if ( ! fromRepl ) { // We want this applied atomically on slaves // so we re-wrap without the pre-condition for speed string tempNS = str::stream() << dbname << ".$cmd"; // TODO: possibly use mutable BSON to remove preCondition field // once it is available BSONObjIterator iter(cmdObj); BSONObjBuilder cmdBuilder; while (iter.more()) { BSONElement elem(iter.next()); if (strcmp(elem.fieldName(), "preCondition") != 0) { cmdBuilder.append(elem); } } const BSONObj cmdRewritten = cmdBuilder.done(); // We currently always logOp the command regardless of whether the individial ops // succeeded and rely on any failures to also happen on secondaries. This isn't // perfect, but it's what the command has always done and is part of its "correct" // behavior. while (true) { try { WriteUnitOfWork wunit(txn); getGlobalEnvironment()->getOpObserver()->onApplyOps(txn, tempNS, cmdRewritten); wunit.commit(); break; } catch (const WriteConflictException& wce) { LOG(2) << "WriteConflictException while logging applyOps command, retrying."; txn->recoveryUnit()->commitAndRestart(); continue; } } } if (errors != 0) { return false; } return true; }
ProjectionExec::ProjectionExec(const BSONObj& spec, const MatchExpression* queryExpression) : _include(true), _special(false), _source(spec), _includeID(true), _skip(0), _limit(-1), _arrayOpType(ARRAY_OP_NORMAL), _hasNonSimple(false), _hasDottedField(false), _queryExpression(queryExpression), _hasReturnKey(false) { // Are we including or excluding fields? // -1 when we haven't initialized it. // 1 when we're including // 0 when we're excluding. int include_exclude = -1; BSONObjIterator it(_source); while (it.more()) { BSONElement e = it.next(); if (!e.isNumber() && !e.isBoolean()) { _hasNonSimple = true; } if (Object == e.type()) { BSONObj obj = e.embeddedObject(); verify(1 == obj.nFields()); BSONElement e2 = obj.firstElement(); if (mongoutils::str::equals(e2.fieldName(), "$slice")) { if (e2.isNumber()) { int i = e2.numberInt(); if (i < 0) { add(e.fieldName(), i, -i); // limit is now positive } else { add(e.fieldName(), 0, i); } } else { verify(e2.type() == Array); BSONObj arr = e2.embeddedObject(); verify(2 == arr.nFields()); BSONObjIterator it(arr); int skip = it.next().numberInt(); int limit = it.next().numberInt(); verify(limit > 0); add(e.fieldName(), skip, limit); } } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) { _arrayOpType = ARRAY_OP_ELEM_MATCH; // Create a MatchExpression for the elemMatch. BSONObj elemMatchObj = e.wrap(); verify(elemMatchObj.isOwned()); _elemMatchObjs.push_back(elemMatchObj); StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj); verify(swme.isOK()); // And store it in _matchers. _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] = swme.getValue(); add(e.fieldName(), true); } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) { verify(String == e2.type()); if (mongoutils::str::equals(e2.valuestr(), "text")) { _meta[e.fieldName()] = META_TEXT; } else if (mongoutils::str::equals(e2.valuestr(), "diskloc")) { _meta[e.fieldName()] = META_DISKLOC; } else if (mongoutils::str::equals(e2.valuestr(), "indexKey")) { _hasReturnKey = true; // The index key clobbers everything so just stop parsing here. return; } else { // This shouldn't happen, should be caught by parsing. verify(0); } } else { verify(0); } } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) { _includeID = false; } else { add(e.fieldName(), e.trueValue()); // Projections of dotted fields aren't covered. if (mongoutils::str::contains(e.fieldName(), '.')) { _hasDottedField = true; } // Validate input. if (include_exclude == -1) { // If we haven't specified an include/exclude, initialize include_exclude. // We expect further include/excludes to match it. include_exclude = e.trueValue(); _include = !e.trueValue(); } } if (mongoutils::str::contains(e.fieldName(), ".$")) { _arrayOpType = ARRAY_OP_POSITIONAL; } } }
TEST(MatchExpressionParserTest, ParseIntegerElementToLongAcceptsNegative) { BSONObj query = BSON("" << -2LL); auto result = MatchExpressionParser::parseIntegerElementToLong(query.firstElement()); ASSERT_OK(result.getStatus()); ASSERT_EQ(-2LL, result.getValue()); }
StatusWith<BSONObj> fixDocumentForInsert( const BSONObj& doc ) { if ( doc.objsize() > BSONObjMaxUserSize ) return StatusWith<BSONObj>( ErrorCodes::BadValue, str::stream() << "object to insert too large" << doc.objsize() ); bool firstElementIsId = doc.firstElement().fieldNameStringData() == "_id"; bool hasTimestampToFix = false; { BSONObjIterator i( doc ); while ( i.more() ) { BSONElement e = i.next(); if ( e.type() == Timestamp && e.timestampValue() == 0 ) { // we replace Timestamp(0,0) at the top level with a correct value // in the fast pass, we just mark that we want to swap hasTimestampToFix = true; break; } const char* fieldName = e.fieldName(); if ( fieldName[0] == '$' ) { return StatusWith<BSONObj>( ErrorCodes::BadValue, str::stream() << "Document can't have $ prefixed field names: " << e.fieldName() ); } // check no regexp for _id (SERVER-9502) // also, disallow undefined and arrays if ( str::equals( fieldName, "_id") ) { if ( e.type() == RegEx ) { return StatusWith<BSONObj>( ErrorCodes::BadValue, "can't use a regex for _id" ); } if ( e.type() == Undefined ) { return StatusWith<BSONObj>( ErrorCodes::BadValue, "can't use a undefined for _id" ); } if ( e.type() == Array ) { return StatusWith<BSONObj>( ErrorCodes::BadValue, "can't use an array for _id" ); } if ( e.type() == Object ) { BSONObj o = e.Obj(); Status s = o.storageValidEmbedded(); if ( !s.isOK() ) return StatusWith<BSONObj>( s ); } } } } if ( firstElementIsId && !hasTimestampToFix ) return StatusWith<BSONObj>( BSONObj() ); bool hadId = firstElementIsId; BSONObjIterator i( doc ); BSONObjBuilder b( doc.objsize() + 16 ); if ( firstElementIsId ) { b.append( doc.firstElement() ); i.next(); } else { BSONElement e = doc["_id"]; if ( e.type() ) { b.append( e ); hadId = true; } else { b.appendOID( "_id", NULL, true ); } } while ( i.more() ) { BSONElement e = i.next(); if ( hadId && e.fieldNameStringData() == "_id" ) { // no-op } else if ( e.type() == Timestamp && e.timestampValue() == 0 ) { mutex::scoped_lock lk(OpTime::m); b.append( e.fieldName(), OpTime::now(lk) ); } else { b.append( e ); } } return StatusWith<BSONObj>( b.obj() ); }
void operator()( DBClientCursorBatchIterator &i ) { mongolock l( true ); if ( context ) { context->relocked(); } while( i.moreInCurrentBatch() ) { if ( n % 128 == 127 /*yield some*/ ) { time_t now = time(0); if( now - lastLog >= 60 ) { // report progress if( lastLog ) log() << "clone " << to_collection << ' ' << n << endl; lastLog = now; } mayInterrupt( _mayBeInterrupted ); dbtempreleaseif t( _mayYield ); } BSONObj tmp = i.nextSafe(); /* assure object is valid. note this will slow us down a little. */ if ( !tmp.valid() ) { stringstream ss; ss << "Cloner: skipping corrupt object from " << from_collection; BSONElement e = tmp.firstElement(); try { e.validate(); ss << " firstElement: " << e; } catch( ... ) { ss << " firstElement corrupt"; } out() << ss.str() << endl; continue; } ++n; BSONObj js = tmp; if ( isindex ) { assert( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); storedForLater->push_back( js.getOwned() ); continue; } try { theDataFileMgr.insertWithObjMod(to_collection, js); if ( logForRepl ) logOp("i", to_collection, js); getDur().commitIfNeeded(); } catch( UserException& e ) { log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } }
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { DBDirectClient db; BSONElement e = jsobj.firstElement(); string toDeleteNs = dbname + '.' + e.valuestr(); LOG(0) << "CMD: reIndex " << toDeleteNs << endl; Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); if ( !collection ) { errmsg = "ns not found"; return false; } BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs ); std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj); list<BSONObj> all; auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk ); BSONObjBuilder b; while ( i->more() ) { const BSONObj spec = i->next().removeField("v").getOwned(); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { errmsg = str::stream() << "Cannot rebuild index " << spec << ": " << keyStatus.reason() << " For more info see http://dochub.mongodb.org/core/index-validation"; return false; } b.append( BSONObjBuilder::numStr( all.size() ) , spec ); all.push_back( spec ); } result.appendNumber( "nIndexesWas", collection->getIndexCatalog()->numIndexesTotal() ); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if ( !s.isOK() ) { errmsg = "dropIndexes failed"; return appendCommandStatus( result, s ); } for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) { BSONObj o = *i; LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl; Status s = collection->getIndexCatalog()->createIndex(txn, o, false); if ( !s.isOK() ) return appendCommandStatus( result, s ); } result.append( "nIndexes" , (int)all.size() ); result.appendArray( "indexes" , b.obj() ); IndexBuilder::restoreIndexes(indexesInProg); return true; }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( ! okForConfigChanges( errmsg ) ) return false; ShardConnection::sync(); string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { config->reload(); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded. have to shard before can split"; return false; } } BSONObj find = cmdObj.getObjectField( "find" ); if ( find.isEmpty() ) { find = cmdObj.getObjectField( "middle" ); if ( find.isEmpty() ) { errmsg = "need to specify find or middle"; return false; } } ChunkManagerPtr info = config->getChunkManager( ns ); ChunkPtr chunk = info->findChunk( find ); BSONObj middle = cmdObj.getObjectField( "middle" ); assert( chunk.get() ); log() << "splitting: " << ns << " shard: " << chunk << endl; BSONObj res; bool worked; if ( middle.isEmpty() ) { BSONObj ret = chunk->singleSplit( true /* force a split even if not enough data */ , res ); worked = !ret.isEmpty(); } else { // sanity check if the key provided is a valid split point if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) { errmsg = "cannot split on initial or final chunk's key"; return false; } if (!fieldsMatch(middle, info->getShardKey().key())){ errmsg = "middle has different fields (or different order) than shard key"; return false; } vector<BSONObj> splitPoints; splitPoints.push_back( middle ); worked = chunk->multiSplit( splitPoints , res ); } if ( !worked ) { errmsg = "split failed"; result.append( "cause" , res ); return false; } config->getChunkManager( ns , true ); return true; }
namespace engine { #define IXM_MAX_PREALLOCATED_UNDEFKEY ( 10 ) /* IXM Tool functions */ BSONObj ixmGetUndefineKeyObj( INT32 fieldNum ) { static BSONObj s_undefineKeys[ IXM_MAX_PREALLOCATED_UNDEFKEY ] ; static BOOLEAN s_init = FALSE ; static ossSpinXLatch s_latch ; if ( FALSE == s_init ) { s_latch.get() ; if ( FALSE == s_init ) { for ( SINT32 i = 0; i < IXM_MAX_PREALLOCATED_UNDEFKEY ; ++i ) { BSONObjBuilder b ; for ( SINT32 j = 0; j <= i; ++j ) { b.appendUndefined("") ; } s_undefineKeys[i] = b.obj() ; } s_init = TRUE ; } s_latch.release() ; } if ( fieldNum > 0 && fieldNum <= IXM_MAX_PREALLOCATED_UNDEFKEY ) { return s_undefineKeys[ fieldNum - 1 ] ; } else { BSONObjBuilder b ; for ( INT32 i = 0; i < fieldNum; ++i ) { b.appendUndefined("") ; } return b.obj() ; } } /* IXM Global opt var */ const static BSONObj gUndefinedObj = BSONObjBuilder().appendUndefined("").obj() ; const static BSONElement gUndefinedElt = gUndefinedObj.firstElement() ; class _ixmKeyGenerator { protected: const _ixmIndexKeyGen *_keygen ; mutable vector<BSONObj *> _objs ; public: _ixmKeyGenerator ( const _ixmIndexKeyGen *keygen ) { _keygen = keygen ; } ~_ixmKeyGenerator() { vector<BSONObj *>::iterator itr = _objs.begin() ; for ( ; itr != _objs.end(); itr++ ) { SDB_OSS_DEL *itr ; } } // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN_GETKEYS, "_ixmKeyGenerator::getKeys" ) INT32 getKeys ( const BSONObj &obj, BSONObjSet &keys, BSONElement *pArrEle ) const { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__IXMKEYGEN_GETKEYS ); SDB_ASSERT( _keygen, "spec can't be NULL" ) ; SDB_ASSERT( !_keygen->_fieldNames.empty(), "can not be empty" ) ; vector<const CHAR*> fieldNames ( _keygen->_fieldNames ) ; BSONElement arrEle ; try { rc = _getKeys( fieldNames, obj, keys, &arrEle ) ; } catch ( std::exception &e ) { PD_LOG( PDERROR, "unexpected err:%s", e.what() ) ; rc = SDB_INVALIDARG ; goto error ; } if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Failed to generate key from object: %s", obj.toString().c_str() ) ; goto error ; } if ( keys.empty() ) { keys.insert ( _keygen->_undefinedKey ) ; } if ( NULL != pArrEle && !arrEle.eoo() ) { *pArrEle = arrEle ; } done : PD_TRACE_EXITRC ( SDB__IXMKEYGEN_GETKEYS, rc ); return rc ; error : goto done ; } protected: // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN__GETKEYS, "_ixmKeyGenerator::_getKeys" ) INT32 _getKeys( vector<const CHAR *> &fieldNames, const BSONObj &obj, BSONObjSet &keys, BSONElement *arrEle ) const { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB__IXMKEYGEN__GETKEYS ); #define IXM_DEFAULT_FIELD_NUM 3 BSONElement eleOnStack[IXM_DEFAULT_FIELD_NUM] ; BSONElement *keyEles = NULL ; const CHAR *arrEleName = NULL ; UINT32 arrElePos = 0 ; UINT32 eooNum = 0 ; if ( IXM_DEFAULT_FIELD_NUM < fieldNames.size() ) { keyEles = new(std::nothrow) BSONElement[fieldNames.size()] ; if ( NULL == keyEles ) { PD_LOG( PDERROR, "failed to allocalte mem." ) ; rc = SDB_OOM ; goto error ; } } else { keyEles = ( BSONElement* )eleOnStack ; } for ( UINT32 i = 0; i < fieldNames.size(); i++ ) { const CHAR *name = fieldNames.at( i ) ; SDB_ASSERT( '\0' != name[0], "can not be empty" ) ; BSONElement &e = keyEles[i] ; e = obj.getFieldDottedOrArray( name ) ; if ( e.eoo() ) { ++eooNum ; continue ; } else if ( Array == e.type() ) { if ( !arrEle->eoo() ) { PD_LOG( PDERROR, "At most one array can be in the key:", arrEle->fieldName(), e.fieldName() ) ; rc = SDB_IXM_MULTIPLE_ARRAY ; goto error ; } else { *arrEle = e ; arrEleName = name ; arrElePos = i ; } } else { continue ; } } if ( fieldNames.size() == eooNum ) { rc = SDB_OK ; goto done ; } else if ( !arrEle->eoo() ) { rc = _genKeyWithArrayEle( keyEles, fieldNames.size(), arrEle, arrEleName, arrElePos, keys ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to gen keys with array element:%d", rc ) ; goto error ; } } else { rc = _genKeyWithNormalEle( keyEles, fieldNames.size(), keys ) ; if ( SDB_OK != rc ) { PD_LOG( PDERROR, "failed to gen keys with normal element:%d", rc ) ; goto error ; } } done: if ( IXM_DEFAULT_FIELD_NUM < fieldNames.size() && NULL != keyEles ) { delete []keyEles ; } PD_TRACE_EXITRC ( SDB__IXMKEYGEN__GETKEYS, rc ); return rc ; error: goto done ; } // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN__GENKEYSWITHARRELE, "_ixmKeyGenerator::_genKeyWithArrayEle" ) INT32 _genKeyWithArrayEle( BSONElement *keyEles, UINT32 eleNum, const BSONElement *arrElement, const CHAR *arrEleName, UINT32 arrElePos, BSONObjSet &keys ) const { PD_TRACE_ENTRY ( SDB__IXMKEYGEN__GENKEYSWITHARRELE ); INT32 rc = SDB_OK ; BSONObj arrObj = arrElement->embeddedObject() ; if ( arrObj.firstElement().eoo() ) { keyEles[arrElePos] = *arrElement ; rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } if ( '\0' == *arrEleName ) { BSONObjIterator itr( arrObj ) ; BSONElement &e = keyEles[arrElePos] ; while ( itr.more() ) { e = itr.next() ; rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } } else { BSONObjIterator itr( arrObj ) ; while ( itr.more() ) { const CHAR *dottedName = arrEleName ; BSONElement next = itr.next() ; if ( Object == next.type() ) { BSONElement e = next.embeddedObject() .getFieldDottedOrArray( dottedName ) ; if ( Array == e.type() ) { rc = _genKeyWithArrayEle(keyEles, eleNum, &e, dottedName, arrElePos, keys) ; if ( SDB_OK != rc ) { goto error ; } else { continue ; } } else { keyEles[arrElePos] = e ; } } else { keyEles[arrElePos] = BSONElement() ; } rc = _genKeyWithNormalEle( keyEles, eleNum, keys ) ; if ( SDB_OK != rc ) { goto error ; } } } done: PD_TRACE_EXITRC( SDB__IXMKEYGEN__GENKEYSWITHARRELE, rc ) ; return rc ; error: goto done ; } // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMKEYGEN__GENKEYSWITHNORMALELE, "_ixmKeyGenerator::_genKeyWithNormalEle" ) INT32 _genKeyWithNormalEle( BSONElement *keyELes, UINT32 eleNum, BSONObjSet &keys ) const { PD_TRACE_ENTRY ( SDB__IXMKEYGEN__GENKEYSWITHNORMALELE ); INT32 rc = SDB_OK ; BSONObjBuilder builder ; for ( UINT32 i = 0; i < eleNum; i++ ) { BSONElement &e = keyELes[i] ; if ( e.eoo() ) { builder.appendAs( gUndefinedElt, "" ) ; } else { builder.appendAs( e, "" ) ; } } keys.insert( builder.obj() ) ; PD_TRACE_EXITRC ( SDB__IXMKEYGEN__GENKEYSWITHNORMALELE, rc ); return rc ; } } ; typedef class _ixmKeyGenerator ixmKeyGenerator ; _ixmIndexKeyGen::_ixmIndexKeyGen ( const _ixmIndexCB *indexCB, IXM_KEYGEN_TYPE genType ) { SDB_ASSERT ( indexCB, "details can't be NULL" ) ; _keyPattern = indexCB->keyPattern() ; _info = indexCB->_infoObj ; _type = indexCB->getIndexType() ; _keyGenType = genType ; _init() ; } _ixmIndexKeyGen::_ixmIndexKeyGen ( const BSONObj &keyDef, IXM_KEYGEN_TYPE genType ) { _keyPattern = keyDef.copy () ; _type = IXM_EXTENT_TYPE_NONE ; _keyGenType = genType ; _init () ; } // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMINXKEYGEN__INIT, "_ixmIndexKeyGen::_init" ) void _ixmIndexKeyGen::_init() { PD_TRACE_ENTRY ( SDB__IXMINXKEYGEN__INIT ); _nFields = _keyPattern.nFields () ; INT32 fieldNum = 0 ; { BSONObjIterator i(_keyPattern) ; while ( i.more()) { BSONElement e = i.next() ; _fieldNames.push_back(e.fieldName()) ; _fixedElements.push_back(BSONElement()) ; ++fieldNum ; } _undefinedKey = ixmGetUndefineKeyObj( fieldNum ) ; } PD_TRACE_EXIT ( SDB__IXMINXKEYGEN__INIT ); } INT32 _ixmIndexKeyGen::getKeys ( const BSONObj &obj, BSONObjSet &keys, BSONElement *pArrEle ) const { ixmKeyGenerator g (this) ; if ( pArrEle ) { *pArrEle = BSONElement() ; } return g.getKeys ( obj, keys, pArrEle ) ; } static BOOLEAN anyElementNamesMatch( const BSONObj& a , const BSONObj& b ) { BSONObjIterator x(a); while ( x.more() ) { BSONElement e = x.next(); BSONObjIterator y(b); while ( y.more() ) { BSONElement f = y.next(); FieldCompareResult res = compareDottedFieldNames( e.fieldName(), f.fieldName() ) ; if ( res == SAME || res == LEFT_SUBFIELD || res == RIGHT_SUBFIELD ) return TRUE; } } return FALSE; } IndexSuitability ixmIndexKeyGen::suitability( const BSONObj &query , const BSONObj &order ) const { return _suitability( query , order ); } IndexSuitability ixmIndexKeyGen::_suitability( const BSONObj& query , const BSONObj& order ) const { if ( anyElementNamesMatch( _keyPattern , query ) == 0 && anyElementNamesMatch( _keyPattern , order ) == 0 ) return USELESS; return HELPFUL; } // PD_TRACE_DECLARE_FUNCTION ( SDB_IXMINXKEYGEN, "ixmIndexKeyGen::reset" ) INT32 ixmIndexKeyGen::reset ( const BSONObj & info ) { INT32 rc = SDB_OK ; PD_TRACE_ENTRY ( SDB_IXMINXKEYGEN ); _info = info ; try { _keyPattern = _info[IXM_KEY_FIELD].embeddedObjectUserCheck() ; } catch ( std::exception &e ) { PD_LOG ( PDERROR, "Unable to locate valid key in index: %s", e.what() ) ; rc = SDB_INVALIDARG ; goto error ; } if ( _keyPattern.objsize() == 0 ) { PD_LOG ( PDERROR, "Empty key" ) ; rc = SDB_INVALIDARG ; goto error ; } _init() ; done : PD_TRACE_EXITRC ( SDB_IXMINXKEYGEN, rc ); return rc ; error : goto done ; } INT32 ixmIndexKeyGen::reset ( const _ixmIndexCB *indexCB ) { SDB_ASSERT ( indexCB, "details can't be NULL" ) ; return reset ( indexCB->_infoObj ) ; } BSONElement ixmIndexKeyGen::missingField() const { return gUndefinedElt ; } BOOLEAN _ixmIndexKeyGen::validateKeyDef ( const BSONObj &keyDef ) { BSONObjIterator i ( keyDef ) ; INT32 count = 0 ; while ( i.more () ) { ++count ; BSONElement ie = i.next () ; if ( ie.type() != NumberInt || ( ie.numberInt() != -1 && ie.numberInt() != 1 ) ) { return FALSE ; } } return 0 != count ; } }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { if ( ! okForConfigChanges( errmsg ) ) return false; ShardConnection::sync(); Timer t; string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "no ns"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ) { config->reload(); if ( ! config->isSharded( ns ) ) { errmsg = "ns not sharded. have to shard before we can move a chunk"; return false; } } BSONObj find = cmdObj.getObjectField( "find" ); if ( find.isEmpty() ) { errmsg = "need to specify find. see help"; return false; } string toString = cmdObj["to"].valuestrsafe(); if ( ! toString.size() ) { errmsg = "you have to specify where you want to move the chunk"; return false; } Shard to = Shard::make( toString ); // so far, chunk size serves test purposes; it may or may not become a supported parameter long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong(); if ( maxChunkSizeBytes == 0 ) { maxChunkSizeBytes = Chunk::MaxChunkSize; } tlog() << "CMD: movechunk: " << cmdObj << endl; ChunkManagerPtr info = config->getChunkManager( ns ); ChunkPtr c = info->findChunk( find ); const Shard& from = c->getShard(); if ( from == to ) { errmsg = "that chunk is already on that shard"; return false; } BSONObj res; if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ) { errmsg = "move failed"; result.append( "cause" , res ); return false; } // preemptively reload the config to get new version info config->getChunkManager( ns , true ); result.append( "millis" , t.millis() ); return true; }
// static void ExpressionKeysPrivate::get2DKeys(const BSONObj &obj, const TwoDIndexingParams& params, BSONObjSet* keys, std::vector<BSONObj>* locs) { BSONElementMSet bSet; // Get all the nested location fields, but don't return individual elements from // the last array, if it exists. obj.getFieldsDotted(params.geo.c_str(), bSet, false); if (bSet.empty()) return; for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) { BSONElement geo = *setI; if (geo.eoo() || !geo.isABSONObj()) continue; // // Grammar for location lookup: // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc // loc ::= { <k1> : #, <k2> : # }|[#, #]|{} // // Empty locations are ignored, preserving single-location semantics // BSONObj embed = geo.embeddedObject(); if (embed.isEmpty()) continue; // Differentiate between location arrays and locations // by seeing if the first element value is a number bool singleElement = embed.firstElement().isNumber(); BSONObjIterator oi(embed); while (oi.more()) { BSONObj locObj; if (singleElement) { locObj = embed; } else { BSONElement locElement = oi.next(); uassert(16804, mongoutils::str::stream() << "location object expected, location array not in correct format", locElement.isABSONObj()); locObj = locElement.embeddedObject(); if(locObj.isEmpty()) continue; } BSONObjBuilder b(64); // Remember the actual location object if needed if (locs) locs->push_back(locObj); // Stop if we don't need to get anything but location objects if (!keys) { if (singleElement) break; else continue; } params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, ""); // Go through all the other index keys for (vector<pair<string, int> >::const_iterator i = params.other.begin(); i != params.other.end(); ++i) { // Get *all* fields for the index key BSONElementSet eSet; obj.getFieldsDotted(i->first, eSet); if (eSet.size() == 0) b.appendNull(""); else if (eSet.size() == 1) b.appendAs(*(eSet.begin()), ""); else { // If we have more than one key, store as an array of the objects BSONArrayBuilder aBuilder; for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei) { aBuilder.append(*ei); } b.append("", aBuilder.arr()); } } keys->insert(b.obj()); if(singleElement) break; } } }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string target = cmdObj.firstElement().valuestrsafe(); Shard s = Shard::make( target ); if ( ! grid.knowAboutShard( s.getConnString() ) ) { errmsg = "unknown shard"; return false; } ScopedDbConnection conn( configServer.getPrimary() ); // If the server is not yet draining chunks, put it in draining mode. BSONObj searchDoc = BSON( "_id" << s.getName() ); BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) ); BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc ); if ( shardDoc.isEmpty() ) { // TODO prevent move chunks to this shard. log() << "going to start draining shard: " << s.getName() << endl; BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) ); conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */); errmsg = conn->getLastError(); if ( errmsg.size() ) { log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl; return false; } Shard::reloadShardInfo(); result.append( "msg" , "draining started successfully" ); result.append( "state" , "started" ); result.append( "shard" , s.getName() ); conn.done(); return true; } // If the server has been completely drained, remove it from the ConfigDB. // Check not only for chunks but also databases. BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() ); long long chunkCount = conn->count( "config.chunks" , shardIDDoc ); BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() ); long long dbCount = conn->count( "config.databases" , primaryDoc ); if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) { log() << "going to remove shard: " << s.getName() << endl; conn->remove( "config.shards" , searchDoc ); errmsg = conn->getLastError(); if ( errmsg.size() ) { log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl; return false; } Shard::removeShard( shardDoc[ "_id" ].str() ); Shard::reloadShardInfo(); result.append( "msg" , "removeshard completed successfully" ); result.append( "state" , "completed" ); result.append( "shard" , s.getName() ); conn.done(); return true; } // If the server is already in draining mode, just report on its progress. // Report on databases (not just chunks) that are left too. result.append( "msg" , "draining ongoing" ); result.append( "state" , "ongoing" ); BSONObjBuilder inner; inner.append( "chunks" , chunkCount ); inner.append( "dbs" , dbCount ); result.append( "remaining" , inner.obj() ); conn.done(); return true; }
void run() { Scope * s = globalScriptEngine->newScope(); s->localConnect( "blah" ); BSONObjBuilder b; long long val = (long long)( 0xbabadeadbeefbaddULL ); b.append( "a", val ); BSONObj in = b.obj(); s->setObject( "a", in ); BSONObj out = s->getObject( "a" ); ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() ); ASSERT( s->exec( "printjson( a ); b = {b:a.a}", "foo", false, true, false ) ); out = s->getObject( "b" ); ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() ); if( val != out.firstElement().numberLong() ) { cout << val << endl; cout << out.firstElement().numberLong() << endl; cout << out.toString() << endl; ASSERT_EQUALS( val, out.firstElement().numberLong() ); } ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) ); out = s->getObject( "c" ); stringstream ss; ss << "NumberLong(\"" << val << "\")"; ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() ); ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) ); out = s->getObject( "d" ); ASSERT_EQUALS( NumberDouble, out.firstElement().type() ); ASSERT_EQUALS( double( val ), out.firstElement().number() ); ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) ); out = s->getObject( "e" ); ASSERT_EQUALS( NumberDouble, out.firstElement().type() ); ASSERT_EQUALS( double( val ), out.firstElement().number() ); ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) ); out = s->getObject( "f" ); ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() ); s->setObject( "z", BSON( "z" << (long long)( 4 ) ) ); ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) ); out = s->getObject( "y" ); ASSERT_EQUALS( Undefined, out.firstElement().type() ); ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) ); out = s->getObject( "x" ); ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() ); ASSERT_EQUALS( double( 4 ), out.firstElement().number() ); ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) ); out = s->getObject( "w" ); ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() ); ASSERT_EQUALS( 4, out.firstElement().numberLong() ); }
ProgramRunner::ProgramRunner( const BSONObj &args ) { verify( !args.isEmpty() ); string program( args.firstElement().valuestrsafe() ); verify( !program.empty() ); boost::filesystem::path programPath = findProgram(program); string prefix( "mongod-" ); bool isMongodProgram = string("mongod") == program || program.compare( 0, prefix.size(), prefix ) == 0; prefix = "mongos-"; bool isMongosProgram = string("mongos") == program || program.compare( 0, prefix.size(), prefix ) == 0; #if 0 if (isMongosProgram == "mongos") { _argv.push_back("valgrind"); _argv.push_back("--log-file=/tmp/mongos-%p.valgrind"); _argv.push_back("--leak-check=yes"); _argv.push_back("--suppressions=valgrind.suppressions"); //_argv.push_back("--error-exitcode=1"); _argv.push_back("--"); } #endif _argv.push_back( programPath.string() ); _port = -1; BSONObjIterator j( args ); j.next(); // skip program name (handled above) while(j.more()) { BSONElement e = j.next(); string str; if ( e.isNumber() ) { stringstream ss; ss << e.number(); str = ss.str(); } else { verify( e.type() == mongo::String ); str = e.valuestr(); } if ( str == "--port" ) _port = -2; else if ( _port == -2 ) _port = strtol( str.c_str(), 0, 10 ); _argv.push_back(str); } if ( ! isMongodProgram && ! isMongosProgram && program != "mongobridge" ) _port = 0; else { if ( _port <= 0 ) log() << "error: a port number is expected when running " << program << " from the shell" << endl; verify( _port > 0 ); } if ( _port > 0 ) { bool haveDbForPort = registry.isPortRegistered( _port ); if ( haveDbForPort ) { log() << "already have db for port: " << _port << endl; verify( !haveDbForPort ); } } }
Config::Config( const string& _dbname , const BSONObj& cmdObj ) { dbname = _dbname; ns = dbname + "." + cmdObj.firstElement().valuestr(); verbose = cmdObj["verbose"].trueValue(); jsMode = cmdObj["jsMode"].trueValue(); jsMaxKeys = 500000; reduceTriggerRatio = 2.0; maxInMemSize = 5 * 1024 * 1024; uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() ); if ( cmdObj["out"].type() == String ) { finalShort = cmdObj["out"].String(); outType = REPLACE; } else if ( cmdObj["out"].type() == Object ) { BSONObj o = cmdObj["out"].embeddedObject(); BSONElement e = o.firstElement(); string t = e.fieldName(); if ( t == "normal" || t == "replace" ) { outType = REPLACE; finalShort = e.String(); } else if ( t == "merge" ) { outType = MERGE; finalShort = e.String(); } else if ( t == "reduce" ) { outType = REDUCE; finalShort = e.String(); } else if ( t == "inline" ) { outType = INMEMORY; } else { uasserted( 13522 , str::stream() << "unknown out specifier [" << t << "]" ); } if (o.hasElement("db")) { outDB = o["db"].String(); } } else { uasserted( 13606 , "'out' has to be a string or an object" ); } if ( outType != INMEMORY ) { // setup names tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << JOB_NUMBER++; incLong = tempLong + "_inc"; finalLong = str::stream() << (outDB.empty() ? dbname : outDB) << "." << finalShort; } { // scope and code if ( cmdObj["scope"].type() == Object ) scopeSetup = cmdObj["scope"].embeddedObjectUserCheck(); mapper.reset( new JSMapper( cmdObj["map"] ) ); reducer.reset( new JSReducer( cmdObj["reduce"] ) ); if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() ) finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) ); if ( cmdObj["mapparams"].type() == Array ) { mapParams = cmdObj["mapparams"].embeddedObjectUserCheck(); } } { // query options BSONElement q = cmdObj["query"]; if ( q.type() == Object ) filter = q.embeddedObjectUserCheck(); else uassert( 13608 , "query has to be blank or an Object" , ! q.trueValue() ); BSONElement s = cmdObj["sort"]; if ( s.type() == Object ) sort = s.embeddedObjectUserCheck(); else uassert( 13609 , "sort has to be blank or an Object" , ! s.trueValue() ); if ( cmdObj["limit"].isNumber() ) limit = cmdObj["limit"].numberLong(); else limit = 0; } }
Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) { clear(); // Check if the update expression is a full object replacement. if (*updateExpr.firstElementFieldName() != '$') { if (multi) { return Status(ErrorCodes::FailedToParse, "multi update only works with $ operators"); } // Modifiers expect BSONElements as input. But the input to object replace is, by // definition, an object. We wrap the 'updateExpr' as the mod is expecting. Note // that the wrapper is temporary so the object replace mod should make a copy of // the object. unique_ptr<ModifierObjectReplace> mod(new ModifierObjectReplace); BSONObj wrapper = BSON("dummy" << updateExpr); Status status = mod->init(wrapper.firstElement(), _modOptions); if (!status.isOK()) { return status; } _mods.push_back(mod.release()); // Register the fact that this driver will only do full object replacements. _replacementMode = true; return Status::OK(); } // The update expression is made of mod operators, that is // { <$mod>: {...}, <$mod>: {...}, ... } BSONObjIterator outerIter(updateExpr); while (outerIter.more()) { BSONElement outerModElem = outerIter.next(); // Check whether this is a valid mod type. modifiertable::ModifierType modType = modifiertable::getType(outerModElem.fieldName()); if (modType == modifiertable::MOD_UNKNOWN) { return Status(ErrorCodes::FailedToParse, str::stream() << "Unknown modifier: " << outerModElem.fieldName()); } // Check whether there is indeed a list of mods under this modifier. if (outerModElem.type() != Object) { return Status(ErrorCodes::FailedToParse, str::stream() << "Modifiers operate on fields but we found type " << typeName(outerModElem.type()) << " instead. For example: {$mod: {<field>: ...}}" << " not {" << outerModElem.toString() << "}"); } // Check whether there are indeed mods under this modifier. if (outerModElem.embeddedObject().isEmpty()) { return Status(ErrorCodes::FailedToParse, str::stream() << "'" << outerModElem.fieldName() << "' is empty. You must specify a field like so: " "{" << outerModElem.fieldName() << ": {<field>: ...}}"); } BSONObjIterator innerIter(outerModElem.embeddedObject()); while (innerIter.more()) { BSONElement innerModElem = innerIter.next(); Status status = addAndParse(modType, innerModElem); if (!status.isOK()) { return status; } } } // Register the fact that there will be only $mod's in this driver -- no object // replacement. _replacementMode = false; return Status::OK(); }
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe(); MRSetup mr( dbname , cmdObj.firstElement().embeddedObjectUserCheck() , false ); set<ServerAndQuery> servers; BSONObjBuilder shardCounts; map<string,long long> counts; BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck(); vector< auto_ptr<DBClientCursor> > shardCursors; { // parse per shard results BSONObjIterator i( shards ); while ( i.more() ){ BSONElement e = i.next(); string shard = e.fieldName(); BSONObj res = e.embeddedObjectUserCheck(); uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() ); servers.insert( shard ); shardCounts.appendAs( res["counts"] , shard ); BSONObjIterator j( res["counts"].embeddedObjectUserCheck() ); while ( j.more() ){ BSONElement temp = j.next(); counts[temp.fieldName()] += temp.numberLong(); } } } DBDirectClient db; { // reduce from each stream BSONObj sortKey = BSON( "_id" << 1 ); ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection , Query().sort( sortKey ) ); cursor.init(); auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbname ); s->localConnect( dbname.c_str() ); ScriptingFunction reduceFunction = s->createFunction( mr.reduceCode.c_str() ); ScriptingFunction finalizeFunction = 0; if ( mr.finalizeCode.size() ) finalizeFunction = s->createFunction( mr.finalizeCode.c_str() ); BSONList values; result.append( "result" , mr.finalShort ); while ( cursor.more() ){ BSONObj t = cursor.next().getOwned(); if ( values.size() == 0 ){ values.push_back( t ); continue; } if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ){ values.push_back( t ); continue; } db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) ); values.clear(); values.push_back( t ); } if ( values.size() ) db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) ); } long long finalCount = mr.renameIfNeeded( db ); log(0) << " mapreducefinishcommand " << mr.finalLong << " " << finalCount << endl; for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ){ ScopedDbConnection conn( i->_server ); conn->dropCollection( dbname + "." + shardedOutputCollection ); conn.done(); } result.append( "shardCounts" , shardCounts.obj() ); { BSONObjBuilder c; for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ){ c.append( i->first , i->second ); } result.append( "counts" , c.obj() ); } return 1; }
void operator()( DBClientCursorBatchIterator &i ) { Lock::GlobalWrite lk; if ( context ) { context->relocked(); } while( i.moreInCurrentBatch() ) { if ( n % 128 == 127 /*yield some*/ ) { time_t now = time(0); if( now - lastLog >= 60 ) { // report progress if( lastLog ) log() << "clone " << to_collection << ' ' << n << endl; lastLog = now; } mayInterrupt( _mayBeInterrupted ); dbtempreleaseif t( _mayYield ); } BSONObj tmp = i.nextSafe(); /* assure object is valid. note this will slow us down a little. */ if ( !tmp.valid() ) { stringstream ss; ss << "Cloner: skipping corrupt object from " << from_collection; BSONElement e = tmp.firstElement(); try { e.validate(); ss << " firstElement: " << e; } catch( ... ) { ss << " firstElement corrupt"; } out() << ss.str() << endl; continue; } ++n; BSONObj js = tmp; if ( isindex ) { verify( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); storedForLater->push_back( js.getOwned() ); continue; } try { // add keys for presorting DiskLoc loc = theDataFileMgr.insertWithObjMod(to_collection, js); loc.assertOk(); if (_sortersForIndex != NULL) { // add key to SortersForNS for (SortersForIndex::iterator iSorter = _sortersForIndex->begin(); iSorter != _sortersForIndex->end(); ++iSorter) { iSorter->second.preSortPhase.addKeys(iSorter->second.spec, js, loc, false); } } if ( logForRepl ) logOp("i", to_collection, js); getDur().commitIfNeeded(); } catch( UserException& e ) { error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; throw; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } }