void SyncClusterConnection::say( Message &toSend ) { string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ) { _conns[i]->say( toSend ); } _checkLast(); }
virtual BSONObj keyAt(const IndexCatalogEntry* btreeState, DiskLoc bucket, int keyOffset) const { verify(!bucket.isNull()); const BtreeBucket<Version> *b = getBucket(btreeState,bucket); int n = b->getN(); if (n == b->INVALID_N_SENTINEL) { throw UserException(deletedBucketCode, "keyAt bucket deleted"); } dassert( n >= 0 && n < 10000 ); return keyOffset >= n ? BSONObj() : b->keyNode(keyOffset).key.toBson(); }
Chunk& ChunkManager::findChunk( const BSONObj & obj ){ for ( vector<Chunk*>::iterator i=_chunks.begin(); i != _chunks.end(); i++ ){ Chunk * c = *i; if ( c->contains( obj ) ) return *c; } stringstream ss; ss << "couldn't find a chunk which should be impossible extracted: " << _key.extractKey( obj ); throw UserException( ss.str() ); }
void ClientInfo::newPeerRequest( const HostAndPort& peer ) { if ( ! _remote.hasPort() ) _remote = peer; else if ( _remote != peer ) { stringstream ss; ss << "remotes don't match old [" << _remote.toString() << "] new [" << peer.toString() << "]"; throw UserException( 13134 , ss.str() ); } newRequest(); }
void AddData(ofstream & r, int i){ try{ string resFileName = "staged_scheme_" + to_string((long long)i) + ".txt"; ifstream res(resFileName); if (res.fail()) throw UserException("AddData(): unable to open res file"); string s; while (!res.eof()){ getline(res,s); r << s << endl; } res.close(); if (remove(resFileName.c_str())!=0) throw UserException("AddData: unable to remove file"); } catch (UserException& e){ cout<<"error : " << e.what() <<endl; std::system("pause"); exit(EXIT_FAILURE); } }
void SyncClusterConnection::remove( const string &ns , Query query, int flags ) { string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 8020 , (string)"SyncClusterConnection::remove prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ) { _conns[i]->remove( ns , query , flags ); } _checkLast(); }
void handleIndexWrite( int op , Request& r ) { DbMessage& d = r.d(); if ( op == dbInsert ) { while( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); const char * ns = o["ns"].valuestr(); if ( r.getConfig()->isSharded( ns ) ) { BSONObj newIndexKey = o["key"].embeddedObjectUserCheck(); uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns + " key: " + o["key"].embeddedObjectUserCheck().toString() , IndexDetails::isIdIndexPattern( newIndexKey ) || ! o["unique"].trueValue() || r.getConfig()->getChunkManager( ns )->getShardKey().uniqueAllowd( newIndexKey ) ); ChunkManager * cm = r.getConfig()->getChunkManager( ns ); assert( cm ); for ( int i=0; i<cm->numChunks(); i++) doWrite( op , r , cm->getChunk(i)->getShard() ); } else { doWrite( op , r , r.primaryShard() ); } r.gotInsert(); } } else if ( op == dbUpdate ) { throw UserException( 8050 , "can't update system.indexes" ); } else if ( op == dbDelete ) { // TODO throw UserException( 8051 , "can't delete indexes on sharded collection yet" ); } else { log() << "handleIndexWrite invalid write op: " << op << endl; throw UserException( 8052 , "handleIndexWrite invalid write op" ); } }
// Deprecated, will move to the strategy itself Shard Request::primaryShard() const { assert( _didInit ); if ( _chunkManager ) { if ( _chunkManager->numChunks() > 1 ) throw UserException( 8060 , "can't call primaryShard on a sharded collection" ); return _chunkManager->findChunk( _chunkManager->getShardKey().globalMin() )->getShard(); } Shard s = _config->getShard( getns() ); uassert( 10194 , "can't call primaryShard on a sharded collection!" , s.ok() ); return s; }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { while ( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 10; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& e ) { log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; r.reset(); manager = r.getChunkManager(); uassert(14804, "collection no longer sharded", manager); } sleepmillis( i * 200 ); } assert( inShutdown() || gotThrough ); } }
double Workflow::GetExecTime ( int pNum, int type, int cores) const { try{ if (pNum < 0 || pNum > packages.size()-1) throw UserException("Workflow::GetExecTime() error. Wrong packageNum" + to_string(pNum)); return packages[pNum].GetExecTime(type, cores); } catch (UserException& e){ std::cout<<"error : " << e.what() <<endl; std::system("pause"); exit(EXIT_FAILURE); } }
ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ){ if ( ! _shardingEnabled ) throw UserException( 8042 , "db doesn't have sharding enabled" ); scoped_lock lk( _lock ); ChunkManagerPtr info = _shards[ns]; if ( info ) return info; if ( _isSharded( ns ) ) throw UserException( 8043 , "already sharded" ); log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl; _sharded[ns] = CollectionInfo( fieldsAndOrder , unique ); info.reset( new ChunkManager( this , ns , fieldsAndOrder , unique ) ); _shards[ns] = info; return info; }
void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) { assert( cursor ); if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) { throw StaleConfigException( _ns , "ClusteredCursor::query" ); } if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) { BSONObj o = cursor->next(); throw UserException( o["code"].numberInt() , o["$err"].String() ); } }
/** * \brief Function to return the generic script convertor * \param batchType The type of the batch scheduler * \param scriptGenContent The content of the script to convert * \return The generic script convertor */ boost::shared_ptr<ScriptGenConvertor> vishnuScriptGenConvertor(const int batchType, const std::string& scriptGenContent) { boost::shared_ptr< ScriptGenConvertor> scriptGenConvertor(new ScriptGenConvertor(batchType, scriptGenContent)); std::string parse_error ; if (scriptGenConvertor->parseFile(parse_error)==-1) { std::string errorMessage = "Can't generate this generic script content \n"+parse_error ; throw UserException(ERRCODE_INVALID_PARAM, errorMessage); } ; return scriptGenConvertor; }
void SyncClusterConnection::say( Message &toSend, bool isRetry , string * actualServer ) { string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ) { _conns[i]->say( toSend ); } // TODO: should we set actualServer?? _checkLast(); }
/** * \brief Function to check the job nbNodesAndCpuPerNode * \param nbNodesAndNbCpuPerNode the number of nodes and cpu per node * \return raises an exception on error */ void vishnu::checkJobNbNodesAndNbCpuPerNode(const std::string& nbNodesAndCpuPerNode) { if(nbNodesAndCpuPerNode.size()!=0) { size_t posNbNodes; try { posNbNodes = nbNodesAndCpuPerNode.find(":"); if(posNbNodes!=std::string::npos) { std::string nbNodes = nbNodesAndCpuPerNode.substr(0, posNbNodes); isNumericalValue(nbNodes); std::string cpuPerNode = nbNodesAndCpuPerNode.substr(posNbNodes+1); isNumericalValue(cpuPerNode); } else { throw UserException(ERRCODE_INVALID_PARAM, ("Invalid NbNodesAndNbCpuPerNode value: "+nbNodesAndCpuPerNode)); } } catch(UserException& ue) { throw UserException(ERRCODE_INVALID_PARAM, ("Invalid NbNodesAndNbCpuPerNode value: "+nbNodesAndCpuPerNode)); } } }
BSONObj Shard::runCommand( const string& db , const BSONObj& cmd ) const { ScopedDbConnection conn( this ); BSONObj res; bool ok = conn->runCommand( db , cmd , res ); if ( ! ok ) { stringstream ss; ss << "runCommand (" << cmd << ") on shard (" << _name << ") failed : " << res; throw UserException( 13136 , ss.str() ); } res = res.getOwned(); conn.done(); return res; }
void _update( Request& r , DbMessage& d, ShardManager* manager ){ int flags = d.pullInt(); BSONObj query = d.nextJsObj(); uassert( "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); bool upsert = flags & 1; if ( upsert && ! manager->hasShardKey( toupdate ) ) throw UserException( "can't upsert something without shard key" ); if ( ! manager->hasShardKey( query ) ) throw UserException( "can't do update with query that doesn't have the shard key" ); if ( manager->hasShardKey( toupdate ) && manager->getShardKey().compare( query , toupdate ) ) throw UserException( "change would move shards!" ); Shard& s = manager->findShard( toupdate ); doWrite( dbUpdate , r , s.getServer() ); s.splitIfShould( d.msg().data->dataLen() ); }
BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) { if ( ns.find( ".$cmd" ) != string::npos ) { string cmdName = query.obj.firstElementFieldName(); int lockType = _lockType( cmdName ); if ( lockType > 0 ) { // write $cmd string errmsg; if ( ! prepare( errmsg ) ) throw UserException( PrepareConfigsFailedCode , (string)"SyncClusterConnection::findOne prepare failed: " + errmsg ); vector<BSONObj> all; for ( size_t i=0; i<_conns.size(); i++ ) { all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() ); } _checkLast(); for ( size_t i=0; i<all.size(); i++ ) { BSONObj temp = all[i]; if ( isOk( temp ) ) continue; stringstream ss; ss << "write $cmd failed on a node: " << temp.jsonString(); ss << " " << _conns[i]->toString(); ss << " ns: " << ns; ss << " cmd: " << query.toString(); throw UserException( 13105 , ss.str() ); } return all[0]; } } return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions ); }
void SyncClusterConnection::insert( const string &ns, BSONObj obj ){ uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() , ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() ); string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ){ _conns[i]->insert( ns , obj ); } _checkLast(); }
double Package::GetExecTime(int type, int cores){ try{ std::pair<int,int> typeCore = make_pair(type,cores); auto it = execTimes.find(typeCore); if (it==execTimes.end()) throw UserException("Package::GetExecTime() : combination of type " + to_string(type) + " and cores " + to_string(cores) + " not found"); return execTimes[std::make_pair(type,cores)]; } catch (UserException& e){ cout<<"error : " << e.what() <<endl; std::system("pause"); exit(EXIT_FAILURE); } }
void Model::remove( bool safe ){ uassert( 10016 , "_id isn't set - needed for remove()" , _id["_id"].type() ); ScopedDbConnection conn( modelServer() ); conn->remove( getNS() , _id ); string errmsg = ""; if ( safe ) errmsg = conn->getLastError(); conn.done(); if ( safe && errmsg.size() ) throw UserException( 9002 , (string)"error on Model::remove: " + errmsg ); }
unique_ptr<SchedulingMethod> SchedulingFactory::GetMethod(DataInfo &d, int uid, int wfNum){ try{ switch (uid){ case BELLMAN: return unique_ptr<SchedulingMethod> (new BellmanScheme(d, uid, wfNum)); default: throw UserException("SchedulingFactory::GetMethod() error. No valid algorithm found. Current algorithm uid = " + to_string(uid)); } } catch (UserException& e){ cout << "error : " << e.what() <<endl; std::system("pause"); exit(EXIT_FAILURE); } }
// return vector with packages from which pNum depends void Workflow::GetInput(int pNum, vector<int>& in) const{ try{ if (pNum < 0 || pNum > matrix.size()-1) throw UserException("Workflow::GetInput() error. Wrong package number"); for (int i = 0; i < matrix.size(); i++){ if (matrix[i][pNum]==1) in.push_back(i); } } catch (UserException& e){ std::cout<<"error : " << e.what() <<endl; std::system("pause"); exit(EXIT_FAILURE); } }
BSONObj Shard::runCommand( const string& db , const BSONObj& cmd ) const { scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getScopedDbConnection( getConnString() ) ); BSONObj res; bool ok = conn->get()->runCommand( db , cmd , res ); if ( ! ok ) { stringstream ss; ss << "runCommand (" << cmd << ") on shard (" << _name << ") failed : " << res; conn->done(); throw UserException( 13136 , ss.str() ); } res = res.getOwned(); conn->done(); return res; }
void _insert( Request& r , DbMessage& d, ShardManager* manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( "tried to insert object without shard key" ); } Shard& s = manager->findShard( o ); log(4) << " server:" << s.getServer() << " " << o << endl; insert( s.getServer() , r.getns() , o ); s.splitIfShould( o.objsize() ); } }
void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) { uassert(13119, (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString(), NamespaceString(ns).coll == "system.indexes" || obj["_id"].type()); string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ) { _conns[i]->insert( ns , obj , flags); } _checkLast(); }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } bool gotThrough = false; for ( int i=0; i<10; i++ ){ try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& ){ log(1) << "retrying insert because of StaleConfigException: " << o << endl; r.reset(); manager = r.getChunkManager(); } sleepmillis( i * 200 ); } assert( gotThrough ); } }
virtual void keyAndRecordAt(DiskLoc bucket, int keyOffset, BSONObj* keyOut, DiskLoc* recordOut) const { verify(!bucket.isNull()); const BtreeBucket<Version> *b = bucket.btree<Version>(); int n = b->getN(); if (n == b->INVALID_N_SENTINEL) { throw UserException(deletedBucketCode, "keyAt bucket deleted"); } dassert( n >= 0 && n < 10000 ); if (keyOffset >= n) { *keyOut = BSONObj(); *recordOut = DiskLoc(); } else { *keyOut = b->keyNode(keyOffset).key.toBson(); *recordOut = b->keyNode(keyOffset).recordLoc; } }
auto_ptr<DBClientCursor> SyncClusterConnection::_queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){ for ( size_t i=0; i<_conns.size(); i++ ){ try { auto_ptr<DBClientCursor> cursor = _conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize ); if ( cursor.get() ) return cursor; log() << "query failed to: " << _conns[i]->toString() << " no data" << endl; } catch ( ... ){ log() << "query failed to: " << _conns[i]->toString() << " exception" << endl; } } throw UserException( 8002 , "all servers down!" ); }
void Tool::auth( string dbname ){ if ( ! dbname.size() ) dbname = _db; if ( ! ( _username.size() || _password.size() ) ) return; string errmsg; if ( _conn->auth( dbname , _username , _password , errmsg ) ) return; // try against the admin db string err2; if ( _conn->auth( "admin" , _username , _password , errmsg ) ) return; throw UserException( 9997 , (string)"auth failed: " + errmsg ); }