void ReplSource::forceResyncDead( const char *requester ) { if ( !replAllDead ) return; SourceVector sources; ReplSource::loadAll(sources); for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) { (*i)->forceResync( requester ); } replAllDead = 0; }
void ReplSource::forceResyncDead( const char *requester ) { if ( !replAllDead ) return; SourceVector sources; ReplSource::loadAll(sources); for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) { log() << requester << " forcing resync from " << (*i)->hostName << endl; (*i)->forceResync( requester ); } replAllDead = 0; }
int main(int argc,char **argv) { try { osgAudio::AudioEnvironment::instance()->init(); typedef std::vector<osg::ref_ptr<Source> > SourceVector; SourceVector sourceVector; for (int i=0; i < 8; i++) { osg::ref_ptr<FileStream> fstream; int n=0; if (i%2) { fstream = new FileStream("right.ogg"); n=20; } else { fstream = new FileStream("left.ogg"); } osg::ref_ptr<Source> source = new Source(fstream.get()); source->setLooping(true); source->setPosition(-10+n+i*1,0,0); sourceVector.push_back(source); } SourceVector::iterator it; for (it=sourceVector.begin(); it != sourceVector.end(); it++) { (*it)->play(); } std::cout << "Press return to exit" << std::endl; std::cin.get(); } catch(osgAudio::Error e) { std::cerr << e << "\n"; } catch(...) { std::cerr << "Unknown error!\n"; } return 0; }
BlockDiagonal(Kernel k, SourceVector& sources) : plan(k,sources,BlockDiagonal::local_options()), context(sources.size(), 50), M() { options.residual= 1e-1; options.variable_p = false; options.max_iters = 1; context.output = false; }
/* we reuse our existing objects so that we can keep our existing connection and cursor in effect. */ void ReplSource::loadAll(SourceVector &v) { Client::Context ctx("local.sources"); SourceVector old = v; v.clear(); if ( !cmdLine.source.empty() ) { // --source <host> specified. // check that no items are in sources other than that // add if missing shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj()); int n = 0; while ( c->ok() ) { n++; ReplSource tmp(c->current()); if ( tmp.hostName != cmdLine.source ) { log() << "repl: --source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl; log() << "repl: for instructions on changing this slave's source, see:" << endl; log() << "http://dochub.mongodb.org/core/masterslave" << endl; log() << "repl: terminating mongod after 30 seconds" << endl; sleepsecs(30); dbexit( EXIT_REPLICATION_ERROR ); } if ( tmp.only != cmdLine.only ) { log() << "--only " << cmdLine.only << " != " << tmp.only << " from local.sources collection" << endl; log() << "terminating after 30 seconds" << endl; sleepsecs(30); dbexit( EXIT_REPLICATION_ERROR ); } c->advance(); } uassert( 10002 , "local.sources collection corrupt?", n<2 ); if ( n == 0 ) { // source missing. add. ReplSource s; s.hostName = cmdLine.source; s.only = cmdLine.only; s.save(); } } else { try { massert( 10384 , "--only requires use of --source", cmdLine.only.empty()); } catch ( ... ) { dbexit( EXIT_BADOPTIONS ); } } shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj()); while ( c->ok() ) { ReplSource tmp(c->current()); if ( tmp.syncedTo.isNull() ) { DBDirectClient c; if ( c.exists( "local.oplog.$main" ) ) { BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) ); if ( !op.isEmpty() ) { tmp.syncedTo = op[ "ts" ].date(); } } } addSourceToList(v, tmp, old); c->advance(); } }
intrusive_ptr<Pipeline> Pipeline::parseCommand( string &errmsg, BSONObj &cmdObj, const intrusive_ptr<ExpressionContext> &pCtx) { intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx)); vector<BSONElement> pipeline; /* gather the specification for the aggregation */ for(BSONObj::iterator cmdIterator = cmdObj.begin(); cmdIterator.more(); ) { BSONElement cmdElement(cmdIterator.next()); const char *pFieldName = cmdElement.fieldName(); /* look for the aggregation command */ if (!strcmp(pFieldName, commandName)) { pPipeline->collectionName = cmdElement.String(); continue; } /* check for the collection name */ if (!strcmp(pFieldName, pipelineName)) { pipeline = cmdElement.Array(); continue; } /* check for explain option */ if (!strcmp(pFieldName, explainName)) { pPipeline->explain = cmdElement.Bool(); continue; } /* if the request came from the router, we're in a shard */ if (!strcmp(pFieldName, fromRouterName)) { pCtx->setInShard(cmdElement.Bool()); continue; } /* check for debug options */ if (!strcmp(pFieldName, splitMongodPipelineName)) { pPipeline->splitMongodPipeline = true; continue; } /* Ignore $auth information sent along with the command. The authentication system will * use it, it's not a part of the pipeline. */ if (!strcmp(pFieldName, AuthenticationTable::fieldName.c_str())) { continue; } /* we didn't recognize a field in the command */ ostringstream sb; sb << "unrecognized field \"" << cmdElement.fieldName(); errmsg = sb.str(); return intrusive_ptr<Pipeline>(); } /* If we get here, we've harvested the fields we expect for a pipeline. Set up the specified document source pipeline. */ SourceVector *pSourceVector = &pPipeline->sourceVector; // shorthand /* iterate over the steps in the pipeline */ const size_t nSteps = pipeline.size(); for(size_t iStep = 0; iStep < nSteps; ++iStep) { /* pull out the pipeline element as an object */ BSONElement pipeElement(pipeline[iStep]); uassert(15942, str::stream() << "pipeline element " << iStep << " is not an object", pipeElement.type() == Object); BSONObj bsonObj(pipeElement.Obj()); // Parse a pipeline stage from 'bsonObj'. uassert(16435, "A pipeline stage specification object must contain exactly one field.", bsonObj.nFields() == 1); BSONElement stageSpec = bsonObj.firstElement(); const char* stageName = stageSpec.fieldName(); // Create a DocumentSource pipeline stage from 'stageSpec'. StageDesc key; key.pName = stageName; const StageDesc* pDesc = (const StageDesc*) bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc), stageDescCmp); uassert(16436, str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'", pDesc); intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx); verify(stage); stage->setPipelineStep(iStep); pSourceVector->push_back(stage); } /* if there aren't any pipeline stages, there's nothing more to do */ if (!pSourceVector->size()) return pPipeline; /* Move filters up where possible. CW TODO -- move filter past projections where possible, and noting corresponding field renaming. */ /* Wherever there is a match immediately following a sort, swap them. This means we sort fewer items. Neither changes the documents in the stream, so this transformation shouldn't affect the result. We do this first, because then when we coalesce operators below, any adjacent matches will be combined. */ for(size_t srcn = pSourceVector->size(), srci = 1; srci < srcn; ++srci) { intrusive_ptr<DocumentSource> &pSource = pSourceVector->at(srci); if (dynamic_cast<DocumentSourceMatch *>(pSource.get())) { intrusive_ptr<DocumentSource> &pPrevious = pSourceVector->at(srci - 1); if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) { /* swap this item with the previous */ intrusive_ptr<DocumentSource> pTemp(pPrevious); pPrevious = pSource; pSource = pTemp; } } } /* Coalesce adjacent filters where possible. Two adjacent filters are equivalent to one filter whose predicate is the conjunction of the two original filters' predicates. For now, capture this by giving any DocumentSource the option to absorb it's successor; this will also allow adjacent projections to coalesce when possible. Run through the DocumentSources, and give each one the opportunity to coalesce with its successor. If successful, remove the successor. Move all document sources to a temporary list. */ SourceVector tempVector(*pSourceVector); pSourceVector->clear(); /* move the first one to the final list */ pSourceVector->push_back(tempVector[0]); /* run through the sources, coalescing them or keeping them */ for(size_t tempn = tempVector.size(), tempi = 1; tempi < tempn; ++tempi) { /* If we can't coalesce the source with the last, then move it to the final list, and make it the new last. (If we succeeded, then we're still on the same last, and there's no need to move or do anything with the source -- the destruction of tempVector will take care of the rest.) */ intrusive_ptr<DocumentSource> &pLastSource = pSourceVector->back(); intrusive_ptr<DocumentSource> &pTemp = tempVector.at(tempi); verify(pTemp && pLastSource); if (!pLastSource->coalesce(pTemp)) pSourceVector->push_back(pTemp); } /* optimize the elements in the pipeline */ for(SourceVector::iterator iter(pSourceVector->begin()), listEnd(pSourceVector->end()); iter != listEnd; ++iter) { if (!*iter) { errmsg = "Pipeline received empty document as argument"; return intrusive_ptr<Pipeline>(); } (*iter)->optimize(); } return pPipeline; }
/* we reuse our existing objects so that we can keep our existing connection and cursor in effect. */ void ReplSource::loadAll(OperationContext* txn, SourceVector& v) { const char* localSources = "local.sources"; Client::Context ctx(txn, localSources); SourceVector old = v; v.clear(); const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings(); if (!replSettings.source.empty()) { // --source <host> specified. // check that no items are in sources other than that // add if missing int n = 0; auto_ptr<PlanExecutor> exec(InternalPlanner::collectionScan( txn, localSources, ctx.db()->getCollection(localSources))); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { n++; ReplSource tmp(txn, obj); if (tmp.hostName != replSettings.source) { log() << "repl: --source " << replSettings.source << " != " << tmp.hostName << " from local.sources collection" << endl; log() << "repl: for instructions on changing this slave's source, see:" << endl; log() << "http://dochub.mongodb.org/core/masterslave" << endl; log() << "repl: terminating mongod after 30 seconds" << endl; sleepsecs(30); dbexit(EXIT_REPLICATION_ERROR); } if (tmp.only != replSettings.only) { log() << "--only " << replSettings.only << " != " << tmp.only << " from local.sources collection" << endl; log() << "terminating after 30 seconds" << endl; sleepsecs(30); dbexit(EXIT_REPLICATION_ERROR); } } uassert(17065, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state); uassert(10002, "local.sources collection corrupt?", n < 2); if (n == 0) { // source missing. add. ReplSource s(txn); s.hostName = replSettings.source; s.only = replSettings.only; s.save(txn); } } else { try { massert(10384, "--only requires use of --source", replSettings.only.empty()); } catch (...) { dbexit(EXIT_BADOPTIONS); } } auto_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, localSources, ctx.db()->getCollection(localSources))); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { ReplSource tmp(txn, obj); if (tmp.syncedTo.isNull()) { DBDirectClient c(txn); BSONObj op = c.findOne("local.oplog.$main", QUERY("op" << NE << "n").sort(BSON("$natural" << -1))); if (!op.isEmpty()) { tmp.syncedTo = op["ts"].date(); } } addSourceToList(txn, v, tmp, old); } uassert(17066, "Internal error reading from local.sources", PlanExecutor::IS_EOF == state); }
intrusive_ptr<Pipeline> Pipeline::parseCommand( string &errmsg, BSONObj &cmdObj, const intrusive_ptr<ExpressionContext> &pCtx) { intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx)); vector<BSONElement> pipeline; /* gather the specification for the aggregation */ for(BSONObj::iterator cmdIterator = cmdObj.begin(); cmdIterator.more(); ) { BSONElement cmdElement(cmdIterator.next()); const char *pFieldName = cmdElement.fieldName(); /* look for the aggregation command */ if (!strcmp(pFieldName, commandName)) { pPipeline->collectionName = cmdElement.String(); continue; } /* check for the collection name */ if (!strcmp(pFieldName, pipelineName)) { pipeline = cmdElement.Array(); continue; } /* check for explain option */ if (!strcmp(pFieldName, explainName)) { pPipeline->explain = cmdElement.Bool(); continue; } /* if the request came from the router, we're in a shard */ if (!strcmp(pFieldName, fromRouterName)) { pCtx->setInShard(cmdElement.Bool()); continue; } /* check for debug options */ if (!strcmp(pFieldName, splitMongodPipelineName)) { pPipeline->splitMongodPipeline = true; continue; } /* Ignore $auth information sent along with the command. The authentication system will * use it, it's not a part of the pipeline. */ if (!strcmp(pFieldName, AuthenticationTable::fieldName.c_str())) { continue; } /* we didn't recognize a field in the command */ ostringstream sb; sb << "unrecognized field \"" << cmdElement.fieldName(); errmsg = sb.str(); return intrusive_ptr<Pipeline>(); } /* If we get here, we've harvested the fields we expect for a pipeline. Set up the specified document source pipeline. */ SourceVector *pSourceVector = &pPipeline->sourceVector; // shorthand /* iterate over the steps in the pipeline */ const size_t nSteps = pipeline.size(); for(size_t iStep = 0; iStep < nSteps; ++iStep) { /* pull out the pipeline element as an object */ BSONElement pipeElement(pipeline[iStep]); uassert(15942, str::stream() << "pipeline element " << iStep << " is not an object", pipeElement.type() == Object); BSONObj bsonObj(pipeElement.Obj()); intrusive_ptr<DocumentSource> pSource; /* use the object to add a DocumentSource to the processing chain */ BSONObjIterator bsonIterator(bsonObj); while(bsonIterator.more()) { BSONElement bsonElement(bsonIterator.next()); const char *pFieldName = bsonElement.fieldName(); /* select the appropriate operation and instantiate */ StageDesc key; key.pName = pFieldName; const StageDesc *pDesc = (const StageDesc *) bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc), stageDescCmp); if (pDesc) { pSource = (*pDesc->pFactory)(&bsonElement, pCtx); pSource->setPipelineStep(iStep); } else { ostringstream sb; sb << "Pipeline::run(): unrecognized pipeline op \"" << pFieldName; errmsg = sb.str(); return intrusive_ptr<Pipeline>(); } } pSourceVector->push_back(pSource); } /* if there aren't any pipeline stages, there's nothing more to do */ if (!pSourceVector->size()) return pPipeline; /* Move filters up where possible. CW TODO -- move filter past projections where possible, and noting corresponding field renaming. */ /* Wherever there is a match immediately following a sort, swap them. This means we sort fewer items. Neither changes the documents in the stream, so this transformation shouldn't affect the result. We do this first, because then when we coalesce operators below, any adjacent matches will be combined. */ for(size_t srcn = pSourceVector->size(), srci = 1; srci < srcn; ++srci) { intrusive_ptr<DocumentSource> &pSource = pSourceVector->at(srci); if (dynamic_cast<DocumentSourceMatch *>(pSource.get())) { intrusive_ptr<DocumentSource> &pPrevious = pSourceVector->at(srci - 1); if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) { /* swap this item with the previous */ intrusive_ptr<DocumentSource> pTemp(pPrevious); pPrevious = pSource; pSource = pTemp; } } } /* Move limits in front of skips. This is more optimal for sharding * since currently, we can only split the pipeline at a single source * and it is better to limit the results coming from each shard */ for(int i = pSourceVector->size() - 1; i >= 1 /* not looking at 0 */; i--) { DocumentSourceLimit* limit = dynamic_cast<DocumentSourceLimit*>((*pSourceVector)[i].get()); DocumentSourceSkip* skip = dynamic_cast<DocumentSourceSkip*>((*pSourceVector)[i-1].get()); if (limit && skip) { // Increase limit by skip since the skipped docs now pass through the $limit limit->setLimit(limit->getLimit() + skip->getSkip()); swap((*pSourceVector)[i], (*pSourceVector)[i-1]); // Start at back again. This is needed to handle cases with more than 1 $limit // (S means skip, L means limit) // // These two would work without second pass (assuming back to front ordering) // SL -> LS // SSL -> LSS // // The following cases need a second pass to handle the second limit // SLL -> LLS // SSLL -> LLSS // SLSL -> LLSS i = pSourceVector->size(); // decremented before next pass } } /* Coalesce adjacent filters where possible. Two adjacent filters are equivalent to one filter whose predicate is the conjunction of the two original filters' predicates. For now, capture this by giving any DocumentSource the option to absorb it's successor; this will also allow adjacent projections to coalesce when possible. Run through the DocumentSources, and give each one the opportunity to coalesce with its successor. If successful, remove the successor. Move all document sources to a temporary list. */ SourceVector tempVector(*pSourceVector); pSourceVector->clear(); /* move the first one to the final list */ pSourceVector->push_back(tempVector[0]); /* run through the sources, coalescing them or keeping them */ for(size_t tempn = tempVector.size(), tempi = 1; tempi < tempn; ++tempi) { /* If we can't coalesce the source with the last, then move it to the final list, and make it the new last. (If we succeeded, then we're still on the same last, and there's no need to move or do anything with the source -- the destruction of tempVector will take care of the rest.) */ intrusive_ptr<DocumentSource> &pLastSource = pSourceVector->back(); intrusive_ptr<DocumentSource> &pTemp = tempVector.at(tempi); if (!pTemp || !pLastSource) { errmsg = "Pipeline received empty document as argument"; return intrusive_ptr<Pipeline>(); } if (!pLastSource->coalesce(pTemp)) pSourceVector->push_back(pTemp); } /* optimize the elements in the pipeline */ for(SourceVector::iterator iter(pSourceVector->begin()), listEnd(pSourceVector->end()); iter != listEnd; ++iter) { if (!*iter) { errmsg = "Pipeline received empty document as argument"; return intrusive_ptr<Pipeline>(); } (*iter)->optimize(); } return pPipeline; }