S2NearCursor::S2NearCursor(const BSONObj &keyPattern, const IndexDetails *details, const BSONObj &query, const vector<GeoQueryField> &fields, const S2IndexingParams ¶ms, int numWanted, double maxDistance) : _details(details), _fields(fields), _params(params), _keyPattern(keyPattern), _numToReturn(numWanted), _maxDistance(maxDistance) { BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { geoFieldsToNuke.append(_fields[i].field, ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. _filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); _matcher.reset(new CoveredIndexMatcher(_filteredQuery, keyPattern)); // More indexing machinery. BSONObjBuilder specBuilder; BSONObjIterator specIt(_keyPattern); while (specIt.more()) { BSONElement e = specIt.next(); specBuilder.append(e.fieldName(), 1); } BSONObj spec = specBuilder.obj(); _specForFRV = IndexSpec(spec); // Start with a conservative _radiusIncrement. _radiusIncrement = S2::kAvgEdge.GetValue(_params.finestIndexedLevel) * _params.radius; _innerRadius = _outerRadius = 0; // Set up _outerRadius with proper checks (maybe maxDistance is really small?) nextAnnulus(); }
// Entry point for a search. virtual shared_ptr<Cursor> newCursor(const BSONObj& query, const BSONObj& order, int numWanted) const { vector<QueryGeometry> regions; double maxDistance = DBL_MAX; bool isNear = false; bool isIntersect = false; // Go through the fields that we index, and for each geo one, make a QueryGeometry // object for the S2Cursor class to do intersection testing/cover generating with. for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } BSONElement e = query.getFieldDotted(field.name); if (e.eoo()) { continue; } if (!e.isABSONObj()) { continue; } BSONObj obj = e.Obj(); QueryGeometry geoQueryField(field.name); if (parseLegacy(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else if (parseQuery(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else { uasserted(16535, "can't parse query for *2d geo search: " + obj.toString()); } } if (isNear && isIntersect ) { uasserted(16474, "Can't do both near and intersect, query: " + query.toString()); } // I copied this from 2d.cpp. Guard against perversion. if (numWanted < 0) numWanted *= -1; if (0 == numWanted) numWanted = INT_MAX; BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } geoFieldsToNuke.append(field.name, ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. BSONObj filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); if (isNear) { S2NearCursor *cursor = new S2NearCursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted, maxDistance); return shared_ptr<Cursor>(cursor); } else { // Default to intersect. S2Cursor *cursor = new S2Cursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted); return shared_ptr<Cursor>(cursor); } }
void S2SimpleCursor::seek(const BSONObj& query, const vector<GeoQuery>& regions) { _nscanned = 0; _matchTested = 0; _geoTested = 0; _fields = regions; _seen = unordered_set<DiskLoc, DiskLoc::Hasher>(); BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { geoFieldsToNuke.append(_fields[i].getField(), ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. _filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); BSONObjBuilder specBuilder; BSONObjIterator i(_descriptor->keyPattern()); while (i.more()) { BSONElement e = i.next(); // Checked in AccessMethod already, so we know this spec has only numbers and 2dsphere if ( e.type() == String ) { specBuilder.append( e.fieldName(), 1 ); } else { specBuilder.append( e.fieldName(), e.numberInt() ); } } BSONObj spec = specBuilder.obj(); BSONObj frsObj; BSONObjBuilder frsObjBuilder; frsObjBuilder.appendElements(_filteredQuery); S2RegionCoverer coverer; for (size_t i = 0; i < _fields.size(); ++i) { vector<S2CellId> cover; double area = _fields[i].getRegion().GetRectBound().Area(); S2SearchUtil::setCoverLimitsBasedOnArea(area, &coverer, _params.coarsestIndexedLevel); coverer.GetCovering(_fields[i].getRegion(), &cover); uassert(16759, "No cover ARGH?!", cover.size() > 0); _cellsInCover = cover.size(); BSONObj fieldRange = S2SearchUtil::coverAsBSON(cover, _fields[i].getField(), _params.coarsestIndexedLevel); frsObjBuilder.appendElements(fieldRange); } frsObj = frsObjBuilder.obj(); FieldRangeSet frs(_descriptor->parentNS().c_str(), frsObj, false, false); shared_ptr<FieldRangeVector> frv(new FieldRangeVector(frs, spec, 1)); _btreeCursor.reset(BtreeCursor::make(nsdetails(_descriptor->parentNS()), _descriptor->getOnDisk(), frv, 0, 1)); next(); }
void S2NearIndexCursor::seek(const BSONObj& query, const NearQuery& nearQuery, const vector<GeoQuery>& regions) { _indexedGeoFields = regions; _nearQuery = nearQuery; _returnedDistance = 0; _nearFieldIndex = 0; _stats = Stats(); _returned = unordered_set<DiskLoc, DiskLoc::Hasher>(); _results = priority_queue<Result>(); BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _indexedGeoFields.size(); ++i) { geoFieldsToNuke.append(_indexedGeoFields[i].getField(), ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. _filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); // More indexing machinery. BSONObjBuilder specBuilder; BSONObjIterator specIt(_descriptor->keyPattern()); while (specIt.more()) { BSONElement e = specIt.next(); // Checked in AccessMethod already, so we know this spec has only numbers and 2dsphere if ( e.type() == String ) { specBuilder.append( e.fieldName(), 1 ); } else { specBuilder.append( e.fieldName(), e.numberInt() ); } } _specForFRV = specBuilder.obj(); specIt = BSONObjIterator(_descriptor->keyPattern()); while (specIt.more()) { if (specIt.next().fieldName() == _nearQuery.field) { break; } ++_nearFieldIndex; } _minDistance = max(0.0, _nearQuery.minDistance); // _outerRadius can't be greater than (pi * r) or we wrap around the opposite // side of the world. _maxDistance = min(M_PI * _params.radius, _nearQuery.maxDistance); uassert(16892, "$minDistance too large", _minDistance < _maxDistance); // Start with a conservative _radiusIncrement. _radiusIncrement = 5 * S2::kAvgEdge.GetValue(_params.finestIndexedLevel) * _params.radius; _innerRadius = _outerRadius = _minDistance; // We might want to adjust the sizes of our coverings if our search // isn't local to the start point. // Set up _outerRadius with proper checks (maybe maxDistance is really small?) nextAnnulus(); fillResults(); }
CoveredIndexMatcher::CoveredIndexMatcher(const BSONObj &jsobj, const BSONObj &indexKeyPattern) : _keyMatcher(jsobj.filterFieldsUndotted(indexKeyPattern, true), indexKeyPattern), _docMatcher(jsobj) { _needRecord = ! ( _docMatcher.keyMatch() && _keyMatcher.jsobj.nFields() == _docMatcher.jsobj.nFields() && ! _keyMatcher.hasType( BSONObj::opEXISTS ) ); }
S2Cursor::S2Cursor(const BSONObj &keyPattern, const IndexDetails *details, const BSONObj &query, const vector<GeoQuery> &fields, const S2IndexingParams ¶ms, int numWanted) : _details(details), _fields(fields), _params(params), _keyPattern(keyPattern), _numToReturn(numWanted), _nscanned(0), _matchTested(0), _geoTested(0) { BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { geoFieldsToNuke.append(_fields[i].getField(), ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. _filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); _matcher.reset(new CoveredIndexMatcher(_filteredQuery, keyPattern)); }
S2NearCursor::S2NearCursor(const BSONObj &keyPattern, const IndexDetails *details, const BSONObj &query, const NearQuery &nearQuery, const vector<GeoQuery> &indexedGeoFields, const S2IndexingParams ¶ms) : _details(details), _nearQuery(nearQuery), _indexedGeoFields(indexedGeoFields), _params(params), _keyPattern(keyPattern), _nearFieldIndex(0), _returnedDistance(0) { BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _indexedGeoFields.size(); ++i) { geoFieldsToNuke.append(_indexedGeoFields[i].getField(), ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. _filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); _matcher.reset(new CoveredIndexMatcher(_filteredQuery, keyPattern)); // More indexing machinery. BSONObjBuilder specBuilder; BSONObjIterator specIt(_keyPattern); while (specIt.more()) { BSONElement e = specIt.next(); // Checked in AccessMethod already, so we know this spec has only numbers and 2dsphere if ( e.type() == String ) { specBuilder.append( e.fieldName(), 1 ); } else { specBuilder.append( e.fieldName(), e.numberInt() ); } } BSONObj spec = specBuilder.obj(); _specForFRV = IndexSpec(spec); specIt = BSONObjIterator(_keyPattern); while (specIt.more()) { if (specIt.next().fieldName() == _nearQuery.field) { break; } ++_nearFieldIndex; } // _outerRadius can't be greater than (pi * r) or we wrap around the opposite // side of the world. _maxDistance = min(M_PI * _params.radius, _nearQuery.maxDistance); // Start with a conservative _radiusIncrement. _radiusIncrement = 5 * S2::kAvgEdge.GetValue(_params.finestIndexedLevel) * _params.radius; _innerRadius = _outerRadius = 0; // We might want to adjust the sizes of our coverings if our search // isn't local to the start point. // Set up _outerRadius with proper checks (maybe maxDistance is really small?) nextAnnulus(); }
Status S2IndexCursor::seek(const BSONObj &position) { vector<GeoQuery> regions; bool isNearQuery = false; NearQuery nearQuery; // Go through the fields that we index, and for each geo one, make // a GeoQuery object for the S2*Cursor class to do intersection // testing/cover generating with. BSONObjIterator keyIt(_descriptor->keyPattern()); while (keyIt.more()) { BSONElement keyElt = keyIt.next(); if (keyElt.type() != String || IndexNames::GEO_2DSPHERE != keyElt.valuestr()) { continue; } BSONElement e = position.getFieldDotted(keyElt.fieldName()); if (e.eoo()) { continue; } if (!e.isABSONObj()) { continue; } BSONObj obj = e.Obj(); if (nearQuery.parseFrom(obj, _params.radius)) { if (isNearQuery) { return Status(ErrorCodes::BadValue, "Only one $near clause allowed: " + position.toString(), 16685); } isNearQuery = true; nearQuery.field = keyElt.fieldName(); continue; } GeoQuery geoQueryField(keyElt.fieldName()); if (!geoQueryField.parseFrom(obj)) { return Status(ErrorCodes::BadValue, "can't parse query (2dsphere): " + obj.toString(), 16535); } if (!geoQueryField.hasS2Region()) { return Status(ErrorCodes::BadValue, "Geometry unsupported: " + obj.toString(), 16684); } regions.push_back(geoQueryField); } // Remove all the indexed geo regions from the query. The s2*cursor will // instead create a covering for that key to speed up the search. // // One thing to note is that we create coverings for indexed geo keys during // a near search to speed it up further. BSONObjBuilder geoFieldsToNuke; if (isNearQuery) { geoFieldsToNuke.append(nearQuery.field, ""); } for (size_t i = 0; i < regions.size(); ++i) { geoFieldsToNuke.append(regions[i].getField(), ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. BSONObj filteredQuery = position.filterFieldsUndotted(geoFieldsToNuke.obj(), false); if (isNearQuery) { S2NearIndexCursor* nearCursor = new S2NearIndexCursor(_descriptor, _params); _underlyingCursor.reset(nearCursor); nearCursor->seek(filteredQuery, nearQuery, regions); } else { S2SimpleCursor* simpleCursor = new S2SimpleCursor(_descriptor, _params); _underlyingCursor.reset(simpleCursor); simpleCursor->seek(filteredQuery, regions); } return Status::OK(); }
void WriteBackListener::run() { int secsToSleep = 0; scoped_ptr<ChunkVersion> lastNeededVersion; int lastNeededCount = 0; bool needsToReloadShardInfo = false; while ( ! inShutdown() ) { if ( ! Shard::isAShardNode( _addr ) ) { LOG(1) << _addr << " is not a shard node" << endl; sleepsecs( 60 ); continue; } try { if (needsToReloadShardInfo) { // It's possible this shard was removed Shard::reloadShardInfo(); needsToReloadShardInfo = false; } scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getInternalScopedDbConnection( _addr ) ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data if ( ! conn->get()->runCommand( "admin" , cmd.obj() , result ) ) { result = result.getOwned(); log() << "writebacklisten command failed! " << result << endl; conn->done(); continue; } } conn->done(); LOG(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ) { string ns = data["ns"].valuestrsafe(); ConnectionIdent cid( "" , 0 ); OID wid; if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) { string s = ""; if ( data["instanceIdent"].type() == String ) s = data["instanceIdent"].String(); cid = ConnectionIdent( s , data["connectionId"].numberLong() ); wid = data["id"].OID(); } else { warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl; } int len; // not used, but needed for next call Message msg( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , msg.header()->valid() ); DBConfigPtr db = grid.getDBConfig( ns ); ChunkVersion needVersion = ChunkVersion::fromBSON( data, "version" ); // // TODO: Refactor the sharded strategy to correctly handle all sharding state changes itself, // we can't rely on WBL to do this for us b/c anything could reset our state in-between. // We should always reload here for efficiency when possible, but staleness is also caught in the // loop below. // ChunkManagerPtr manager; ShardPtr primary; db->getChunkManagerOrPrimary( ns, manager, primary ); ChunkVersion currVersion; if( manager ) currVersion = manager->getVersion(); LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString() << " mine : " << currVersion.toString() << endl; LOG(1) << msg.toString() << endl; // // We should reload only if we need to update our version to be compatible *and* we // haven't already done so. This avoids lots of reloading when we remove/add a sharded collection // bool alreadyReloaded = lastNeededVersion && lastNeededVersion->isEquivalentTo( needVersion ); if( alreadyReloaded ){ LOG(1) << "wbl already reloaded config information for version " << needVersion << ", at version " << currVersion << endl; } else if( lastNeededVersion ) { log() << "new version change detected to " << needVersion.toString() << ", " << lastNeededCount << " writebacks processed at " << lastNeededVersion->toString() << endl; lastNeededCount = 0; } // // Set our lastNeededVersion for next time // lastNeededVersion.reset( new ChunkVersion( needVersion ) ); lastNeededCount++; // // Determine if we should reload, if so, reload // bool shouldReload = ! needVersion.isWriteCompatibleWith( currVersion ) && ! alreadyReloaded; if( shouldReload && currVersion.isSet() && needVersion.isSet() && currVersion.hasCompatibleEpoch( needVersion ) ) { // // If we disagree about versions only, reload the chunk manager // db->getChunkManagerIfExists( ns, true ); } else if( shouldReload ){ // // If we disagree about anything else, reload the full db // warning() << "reloading config data for " << db->getName() << ", " << "wanted version " << needVersion.toString() << " but currently have version " << currVersion.toString() << endl; db->reload(); } // do request and then call getLastError // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError BSONObj gle; int attempts = 0; while ( true ) { attempts++; try { Request r( msg , 0 ); r.init(); r.d().reservedField() |= Reserved_FromWriteback; ClientInfo * ci = r.getClientInfo(); if (!noauth) { ci->getAuthorizationManager()->grantInternalAuthorization( "_writebackListener"); } ci->noAutoSplit(); r.process( attempts ); ci->newRequest(); // this so we flip prev and cur shards BSONObjBuilder b; string errmsg; if ( ! ci->getLastError( "admin", BSON( "getLastError" << 1 ), b, errmsg, true ) ) { b.appendBool( "commandFailed" , true ); if( ! b.hasField( "errmsg" ) ){ b.append( "errmsg", errmsg ); gle = b.obj(); } else if( errmsg.size() > 0 ){ // Rebuild GLE object with errmsg // TODO: Make this less clumsy by improving GLE interface gle = b.obj(); if( gle["errmsg"].type() == String ){ BSONObj gleNoErrmsg = gle.filterFieldsUndotted( BSON( "errmsg" << 1 ), false ); BSONObjBuilder bb; bb.appendElements( gleNoErrmsg ); bb.append( "errmsg", gle["errmsg"].String() + " ::and:: " + errmsg ); gle = bb.obj().getOwned(); } } } else{ gle = b.obj(); } if ( gle["code"].numberInt() == 9517 ) { log() << "new version change detected, " << lastNeededCount << " writebacks processed previously" << endl; lastNeededVersion.reset(); lastNeededCount = 1; log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl; LOG(1) << "writeback error : " << gle << endl; // // Bringing this in line with the similar retry logic elsewhere // // TODO: Reloading the chunk manager may not help if we dropped a // collection, but we don't actually have that info in the writeback // error // if( attempts <= 2 ){ db->getChunkManagerIfExists( ns, true ); } else{ versionManager.forceRemoteCheckShardVersionCB( ns ); sleepsecs( attempts - 1 ); } uassert( 15884, str::stream() << "Could not reload chunk manager after " << attempts << " attempts.", attempts <= 4 ); continue; } ci->clearSinceLastGetError(); } catch ( DBException& e ) { error() << "error processing writeback: " << e << endl; BSONObjBuilder b; e.getInfo().append( b, "err", "code" ); gle = b.obj(); } break; } { scoped_lock lk( _seenWritebacksLock ); WBStatus& s = _seenWritebacks[cid]; s.id = wid; s.gle = gle; } } else if ( result["noop"].trueValue() ) { // no-op } else { log() << "unknown writeBack result: " << result << endl; } secsToSleep = 0; continue; } catch ( std::exception& e ) { // Attention! Do not call any method that would throw an exception // (or assert) in this block. if ( inShutdown() ) { // we're shutting down, so just clean up return; } log() << "WriteBackListener exception : " << e.what() << endl; needsToReloadShardInfo = true; } catch ( ... ) { log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } log() << "WriteBackListener exiting : address no longer in cluster " << _addr; }
void omRemoveBusinessCommand::_generateResultInfo( list<BSONObj> &configList, BSONArray &resultInfo ) { BSONObj filter ; BSONArrayBuilder resultInfoBuilder ; list<BSONObj>::iterator iter ; if ( OM_BUSINESS_SEQUOIADB == _businessType ) { filter = BSON( OM_BSON_SVCNAME << "" << OM_BSON_ROLE << "" << OM_BSON_DATAGROUPNAME << "" ) ; } else if ( OM_BUSINESS_ZOOKEEPER == _businessType ) { filter = BSON( OM_ZOO_CONF_DETAIL_ZOOID << "" ) ; } else if ( OM_BUSINESS_SEQUOIASQL_OLAP == _businessType ) { filter = BSON( OM_SSQL_OLAP_CONF_ROLE << "" ) ; } else if( OM_BUSINESS_SEQUOIASQL_OLTP == _businessType ) { filter = BSON( OM_BSON_PORT << "" ) ; } for ( iter = configList.begin(); iter != configList.end(); ++iter ) { string hostName ; BSONObj configInfo ; hostName = iter->getStringField( OM_CONFIGURE_FIELD_HOSTNAME ) ; configInfo = iter->getObjectField( OM_CONFIGURE_FIELD_CONFIG ) ; { BSONObjIterator configIter( configInfo ) ; while ( configIter.more() ) { BSONObjBuilder resultEleBuilder ; BSONElement ele = configIter.next() ; BSONObj tmpNodeInfo = ele.embeddedObject() ; BSONObj nodeInfo = tmpNodeInfo.filterFieldsUndotted( filter, TRUE ) ; resultEleBuilder.append( OM_TASKINFO_FIELD_HOSTNAME, hostName ) ; resultEleBuilder.appendElements( nodeInfo ) ; resultEleBuilder.append( OM_TASKINFO_FIELD_STATUS, OM_TASK_STATUS_INIT ) ; resultEleBuilder.append( OM_TASKINFO_FIELD_STATUS_DESC, getTaskStatusStr( OM_TASK_STATUS_INIT ) ) ; resultEleBuilder.append( OM_REST_RES_RETCODE, SDB_OK ) ; resultEleBuilder.append( OM_REST_RES_DETAIL, "" ) ; { BSONArrayBuilder tmpEmptyBuilder ; resultEleBuilder.append( OM_TASKINFO_FIELD_FLOW, tmpEmptyBuilder.arr() ) ; } resultInfoBuilder.append( resultEleBuilder.obj() ) ; } } } resultInfo = resultInfoBuilder.arr() ; }
INT32 omRemoveBusinessCommand::_generateTaskConfig( list<BSONObj> &configList, BSONObj &taskConfig ) { INT32 rc = SDB_OK ; BSONObj filter ; BSONObjBuilder taskConfigBuilder ; BSONArrayBuilder configBuilder ; list<BSONObj>::iterator iter ; omDatabaseTool dbTool( _cb ) ; filter = BSON( OM_HOST_FIELD_NAME << "" << OM_HOST_FIELD_IP << "" << OM_HOST_FIELD_CLUSTERNAME << "" << OM_HOST_FIELD_USER << "" << OM_HOST_FIELD_PASSWD << "" << OM_HOST_FIELD_SSHPORT << "" ) ; taskConfigBuilder.append( OM_BSON_CLUSTER_NAME, _clusterName ) ; taskConfigBuilder.append( OM_BSON_BUSINESS_TYPE, _businessType ) ; taskConfigBuilder.append( OM_BSON_BUSINESS_NAME, _businessName ) ; taskConfigBuilder.append( OM_BSON_DEPLOY_MOD, _deployMod ) ; if ( OM_BUSINESS_SEQUOIADB == _businessType ) { string authUser ; string authPasswd ; rc = dbTool.getAuth( _businessName, authUser, authPasswd ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get business auth: " "name=%s, rc=%d", _businessName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } taskConfigBuilder.append( OM_TASKINFO_FIELD_AUTH_USER, authUser ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_AUTH_PASSWD, authPasswd ) ; } else if ( OM_BUSINESS_ZOOKEEPER == _businessType || OM_BUSINESS_SEQUOIASQL_OLAP == _businessType ) { string sdbUser ; string sdbPasswd ; string sdbUserGroup ; BSONObj clusterInfo ; rc = dbTool.getClusterInfo( _clusterName, clusterInfo ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get cluster info: " "name=%s, rc=%d", _clusterName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } sdbUser = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBUSER ) ; sdbPasswd = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBPASSWD ) ; sdbUserGroup = clusterInfo.getStringField( OM_CLUSTER_FIELD_SDBUSERGROUP ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBUSER, sdbUser ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBPASSWD, sdbPasswd ) ; taskConfigBuilder.append( OM_TASKINFO_FIELD_SDBUSERGROUP, sdbUserGroup ) ; } else if( OM_BUSINESS_SEQUOIASQL_OLTP == _businessType ) { } for ( iter = configList.begin(); iter != configList.end(); ++iter ) { string hostName ; string installPath ; BSONObj hostInfo ; BSONObj tmpHostInfo ; BSONObj configInfo ; BSONObj packages ; hostName = iter->getStringField( OM_CONFIGURE_FIELD_HOSTNAME ) ; configInfo = iter->getObjectField( OM_CONFIGURE_FIELD_CONFIG ) ; rc = dbTool.getHostInfoByAddress( hostName, tmpHostInfo ) ; if ( rc ) { _errorMsg.setError( TRUE, "failed to get host info: name=%s, rc=%d", hostName.c_str(), rc ) ; PD_LOG( PDERROR, _errorMsg.getError() ) ; goto error ; } hostInfo = tmpHostInfo.filterFieldsUndotted( filter, TRUE ) ; packages = tmpHostInfo.getObjectField( OM_HOST_FIELD_PACKAGES ) ; { BSONObjIterator pkgIter( packages ) ; while ( pkgIter.more() ) { BSONElement ele = pkgIter.next() ; BSONObj pkgInfo = ele.embeddedObject() ; string pkgName = pkgInfo.getStringField( OM_HOST_FIELD_PACKAGENAME ) ; if ( pkgName == _businessType ) { installPath = pkgInfo.getStringField( OM_HOST_FIELD_INSTALLPATH ) ; break ; } } } { BSONObjIterator configIter( configInfo ) ; while ( configIter.more() ) { BSONObjBuilder configInfoBuilder ; BSONElement ele = configIter.next() ; BSONObj nodeInfo = ele.embeddedObject() ; if ( OM_BUSINESS_SEQUOIADB == _businessType && 0 == ossStrlen( nodeInfo.getStringField( OM_CONF_DETAIL_CATANAME ) ) ) { CHAR catName[ OM_INT32_LENGTH + 1 ] = { 0 } ; string svcName = nodeInfo.getStringField( OM_CONF_DETAIL_SVCNAME ) ; INT32 iSvcName = ossAtoi( svcName.c_str() ) ; INT32 iCatName = iSvcName + MSG_ROUTE_CAT_SERVICE ; ossItoa( iCatName, catName, OM_INT32_LENGTH ) ; configInfoBuilder.append( OM_CONF_DETAIL_CATANAME, catName ) ; } configInfoBuilder.appendElements( nodeInfo ) ; configInfoBuilder.appendElements( hostInfo ) ; configInfoBuilder.append( OM_BSON_INSTALL_PATH, installPath ) ; configBuilder.append( configInfoBuilder.obj() ) ; } } } taskConfigBuilder.append( OM_TASKINFO_FIELD_CONFIG, configBuilder.arr() ) ; taskConfig = taskConfigBuilder.obj() ; done: return rc ; error: goto done ; }