// TODO: remove after MongoDB 3.2 bool Strategy::handleSpecialNamespaces(OperationContext* txn, Request& request, QueryMessage& q) { const char* ns = strstr(request.getns(), ".$cmd.sys."); if (!ns) return false; ns += 10; BSONObjBuilder reply; const auto upgradeToRealCommand = [txn, &q, &reply](StringData commandName) { BSONObjBuilder cmdBob; cmdBob.append(commandName, 1); cmdBob.appendElements(q.query); // fields are validated by Commands auto interposedCmd = cmdBob.done(); // Rewrite upgraded pseudoCommands to run on the 'admin' database. NamespaceString interposedNss("admin", "$cmd"); Command::runAgainstRegistered( txn, interposedNss.ns().c_str(), interposedCmd, reply, q.queryOptions); }; if (strcmp(ns, "inprog") == 0) { upgradeToRealCommand("currentOp"); } else if (strcmp(ns, "killop") == 0) { upgradeToRealCommand("killOp"); } else if (strcmp(ns, "unlock") == 0) { reply.append("err", "can't do unlock through mongos"); } else { warning() << "unknown sys command [" << ns << "]"; return false; } BSONObj x = reply.done(); replyToQuery(0, request.p(), request.m(), x); return true; }
Status AuthzManagerExternalStateMongod::remove( const NamespaceString& collectionName, const BSONObj& query, const BSONObj& writeConcern, int* numRemoved) { try { DBDirectClient client; client.remove(collectionName, query); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (!errstr.empty()) { return Status(ErrorCodes::UnknownError, errstr); } *numRemoved = res["n"].numberInt(); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
static void noteInCriticalSection( WriteErrorDetail* staleError ) { BSONObjBuilder builder; if ( staleError->isErrInfoSet() ) builder.appendElements( staleError->getErrInfo() ); builder.append( "inCriticalSection", true ); staleError->setErrInfo( builder.obj() ); }
Status AuthzManagerExternalStateMongod::insert( const NamespaceString& collectionName, const BSONObj& document, const BSONObj& writeConcern) { try { DBDirectClient client; client.insert(collectionName, document); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (errstr.empty()) { return Status::OK(); } if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) { return Status(ErrorCodes::DuplicateKey, errstr); } return Status(ErrorCodes::UnknownError, errstr); } catch (const DBException& e) { return e.toStatus(); } }
Status AuthzManagerExternalStateMongod::update(OperationContext* txn, const NamespaceString& collectionName, const BSONObj& query, const BSONObj& updatePattern, bool upsert, bool multi, const BSONObj& writeConcern, int* nMatched) { try { DBDirectClient client(txn); client.update(collectionName, query, updatePattern, upsert, multi); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string err = client.getLastErrorString(res); if (!err.empty()) { return Status(ErrorCodes::UnknownError, err); } *nMatched = res["n"].numberInt(); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
Status AuthzManagerExternalStateMock::insert( OperationContext* txn, const NamespaceString& collectionName, const BSONObj& document, const BSONObj&) { BSONObj toInsert; if (document["_id"].eoo()) { BSONObjBuilder docWithIdBuilder; docWithIdBuilder.append("_id", OID::gen()); docWithIdBuilder.appendElements(document); toInsert = docWithIdBuilder.obj(); } else { toInsert = document.copy(); } _documents[collectionName].push_back(toInsert); if (_authzManager) { _authzManager->logOp( "i", collectionName.ns().c_str(), toInsert, NULL, NULL); } return Status::OK(); }
BSONObj IndexCursor::current() { // If the index is clustering, the full documenet is always stored in _currObj. // If the index is not clustering, _currObj starts as empty and gets filled // with the full document on the first call to current(). if ( _currObj.isEmpty() ) { _nscannedObjects++; bool found = _cl->findByPK( _currPK, _currObj ); if ( !found ) { // If we didn't find the associated object, we must be either: // - a snapshot transaction whose context deleted the current pk // - a read uncommitted cursor with stale data // In either case, we may advance and try again exactly once. TOKULOG(4) << "current() did not find associated object for pk " << _currPK << endl; advance(); if ( ok() ) { found = _cl->findByPK( _currPK, _currObj ); uassert( 16741, str::stream() << toString() << ": could not find associated document with pk " << _currPK << ", index key " << _currKey, found ); } } } bool shouldAppendPK = _cl->isCapped() && cc().opSettings().shouldCappedAppendPK(); if (shouldAppendPK) { BSONObjBuilder b; b.appendElements(_currObj); b.append("$_", _currPK); return b.obj(); } return _currObj; }
bool WriteCmd::run(OperationContext* txn, const string& dbName, BSONObj& cmdObj, int options, string& errMsg, BSONObjBuilder& result) { // Can't be run on secondaries. dassert(txn->writesAreReplicated()); BatchedCommandRequest request(_writeType); BatchedCommandResponse response; if (!request.parseBSON(dbName, cmdObj, &errMsg) || !request.isValid(&errMsg)) { return appendCommandStatus(result, Status(ErrorCodes::FailedToParse, errMsg)); } StatusWith<WriteConcernOptions> wcStatus = extractWriteConcern(cmdObj); if (!wcStatus.isOK()) { return appendCommandStatus(result, wcStatus.getStatus()); } txn->setWriteConcern(wcStatus.getValue()); WriteBatchExecutor writeBatchExecutor( txn, &globalOpCounters, &LastError::get(txn->getClient())); writeBatchExecutor.executeBatch(request, &response); result.appendElements(response.toBSON()); return response.getOk(); }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); c->splitIfShould( o.objsize() ); } }
void getKeys(const BSONObj& obj, BSONObjSet& keys) const { verify(_fields.size() >= 1); BSONObjSet keysToAdd; // We output keys in the same order as the fields we index. for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; // First, we get the keys that this field adds. Either they're added literally from // the value of the field, or they're transformed if the field is geo. BSONElementSet fieldElements; // false means Don't expand the last array, duh. obj.getFieldsDotted(field.name, fieldElements, false); BSONObjSet keysForThisField; if (IndexedField::GEO == field.type) { getGeoKeys(fieldElements, &keysForThisField); } else if (IndexedField::LITERAL == field.type) { getLiteralKeys(fieldElements, &keysForThisField); } else { verify(0); } // We expect there to be _spec->_missingField() present in the keys if data is // missing. So, this should be non-empty. verify(!keysForThisField.empty()); // We take the Cartesian product of all of the keys. This requires that we have // some keys to take the Cartesian product with. If keysToAdd.empty(), we // initialize it. if (keysToAdd.empty()) { keysToAdd = keysForThisField; continue; } BSONObjSet updatedKeysToAdd; for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { for (BSONObjSet::const_iterator newIt = keysForThisField.begin(); newIt!= keysForThisField.end(); ++newIt) { BSONObjBuilder b; b.appendElements(*it); b.append(newIt->firstElement()); updatedKeysToAdd.insert(b.obj()); } } keysToAdd = updatedKeysToAdd; } if (keysToAdd.size() > _params.maxKeysPerInsert) { warning() << "insert of geo object generated lots of keys (" << keysToAdd.size() << ") consider creating larger buckets. obj=" << obj; } for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { keys.insert(*it); } }
// static StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) { // If the spec object has the field "2dsphereIndexVersion", validate it. If it doesn't, add // {2dsphereIndexVersion: 3}, which is the default for newly-built indexes. BSONElement indexVersionElt = specObj[kIndexVersionFieldName]; if (indexVersionElt.eoo()) { BSONObjBuilder bob; bob.appendElements(specObj); bob.append(kIndexVersionFieldName, S2_INDEX_VERSION_3); return bob.obj(); } if (!indexVersionElt.isNumber()) { return {ErrorCodes::CannotCreateIndex, str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName << " : " << indexVersionElt << " }, only versions: [" << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "] are supported"}; } if (indexVersionElt.type() == BSONType::NumberDouble && !std::isnormal(indexVersionElt.numberDouble())) { return {ErrorCodes::CannotCreateIndex, str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName << " : " << indexVersionElt << " }, only versions: [" << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "] are supported"}; } const auto indexVersion = indexVersionElt.numberLong(); if (indexVersion != S2_INDEX_VERSION_1 && indexVersion != S2_INDEX_VERSION_2 && indexVersion != S2_INDEX_VERSION_3) { return {ErrorCodes::CannotCreateIndex, str::stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : " << indexVersionElt << " }, only versions: [" << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "] are supported"}; } return specObj; }
bool passthrough( DBConfig * conf, const BSONObj& cmdObj , BSONObjBuilder& result ){ ScopedDbConnection conn( conf->getPrimary() ); BSONObj res; bool ok = conn->runCommand( conf->getName() , cmdObj , res ); result.appendElements( res ); conn.done(); return ok; }
// Make the object that describes all keys that are within our current search annulus. BSONObj S2NearIndexCursor::makeFRSObject() { BSONObjBuilder frsObjBuilder; frsObjBuilder.appendElements(_filteredQuery); S2RegionCoverer coverer; // Step 1: Make the BSON'd covering for our search annulus. BSONObj inExpr; // Caps are inclusive and inverting a cap includes the border. This means that our // initial _innerRadius of 0 is OK -- we'll still find a point that is exactly at // the start of our search. _innerCap = S2Cap::FromAxisAngle(_nearQuery.centroid, S1Angle::Radians(_innerRadius / _params.radius)); _outerCap = S2Cap::FromAxisAngle(_nearQuery.centroid, S1Angle::Radians(_outerRadius / _params.radius)); double area = _outerCap.area() - _innerCap.area(); _innerCap = _innerCap.Complement(); vector<S2Region*> regions; regions.push_back(&_innerCap); regions.push_back(&_outerCap); _annulus.Release(NULL); _annulus.Init(®ions); vector<S2CellId> cover; S2SearchUtil::setCoverLimitsBasedOnArea(area, &coverer, _params.coarsestIndexedLevel); coverer.GetCovering(_annulus, &cover); LOG(2) << "annulus cover size is " << cover.size() << ", params (" << coverer.min_level() << ", " << coverer.max_level() << ")" << endl; inExpr = S2SearchUtil::coverAsBSON(cover, _nearQuery.field, _params.coarsestIndexedLevel); frsObjBuilder.appendElements(inExpr); _params.configureCoverer(&coverer); // Cover the indexed geo components of the query. for (size_t i = 0; i < _indexedGeoFields.size(); ++i) { vector<S2CellId> cover; coverer.GetCovering(_indexedGeoFields[i].getRegion(), &cover); uassert(16761, "Couldn't generate index keys for geo field " + _indexedGeoFields[i].getField(), cover.size() > 0); BSONObj fieldRange = S2SearchUtil::coverAsBSON(cover, _indexedGeoFields[i].getField(), _params.coarsestIndexedLevel); frsObjBuilder.appendElements(fieldRange); } return frsObjBuilder.obj(); }
virtual bool run(OperationContext* txn, const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { BSONObj configObj; if( cmdObj["replSetInitiate"].type() == Object ) { configObj = cmdObj["replSetInitiate"].Obj(); } if (configObj.isEmpty()) { result.append("info2", "no configuration explicitly specified -- making one"); log() << "replSet info initiate : no configuration specified. " "Using a default configuration for the set"; ReplicationCoordinatorExternalStateImpl externalState; std::string name; std::vector<HostAndPort> seeds; std::set<HostAndPort> seedSet; parseReplSetSeedList( &externalState, getGlobalReplicationCoordinator()->getSettings().replSet, name, seeds, seedSet); // may throw... BSONObjBuilder b; b.append("_id", name); b.append("version", 1); BSONObjBuilder members; HostAndPort me = someHostAndPortForMe(); members.append("0", BSON( "_id" << 0 << "host" << me.toString() )); result.append("me", me.toString()); for( unsigned i = 0; i < seeds.size(); i++ ) { members.append(BSONObjBuilder::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString())); } b.appendArray("members", members.obj()); configObj = b.obj(); log() << "replSet created this configuration for initiation : " << configObj.toString(); } if (configObj.getField("version").eoo()) { // Missing version field defaults to version 1. BSONObjBuilder builder; builder.appendElements(configObj); builder.append("version", 1); configObj = builder.obj(); } Status status = getGlobalReplicationCoordinator()->processReplSetInitiate(txn, configObj, &result); return appendCommandStatus(result, status); }
BSONObj InsertWriteOperation::_ensureId(const BSONObj& doc) { if (doc.hasField("_id")) return doc; BSONObjBuilder bob; bob.append("_id", OID::gen()); bob.appendElements(doc); return bob.obj(); }
void S2AccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) { BSONObjSet keysToAdd; // We output keys in the same order as the fields we index. BSONObjIterator i(_descriptor->keyPattern()); while (i.more()) { BSONElement e = i.next(); // First, we get the keys that this field adds. Either they're added literally from // the value of the field, or they're transformed if the field is geo. BSONElementSet fieldElements; // false means Don't expand the last array, duh. obj.getFieldsDotted(e.fieldName(), fieldElements, false); BSONObjSet keysForThisField; if (IndexNames::GEO_2DSPHERE == e.valuestr()) { // We can't ever return documents that don't have geometry so don't bother indexing // them. if (fieldElements.empty()) { return; } getGeoKeys(obj, fieldElements, &keysForThisField); } else { getLiteralKeys(fieldElements, &keysForThisField); } // We expect there to be the missing field element present in the keys if data is // missing. So, this should be non-empty. verify(!keysForThisField.empty()); // We take the Cartesian product of all of the keys. This requires that we have // some keys to take the Cartesian product with. If keysToAdd.empty(), we // initialize it. if (keysToAdd.empty()) { keysToAdd = keysForThisField; continue; } BSONObjSet updatedKeysToAdd; for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { for (BSONObjSet::const_iterator newIt = keysForThisField.begin(); newIt!= keysForThisField.end(); ++newIt) { BSONObjBuilder b; b.appendElements(*it); b.append(newIt->firstElement()); updatedKeysToAdd.insert(b.obj()); } } keysToAdd = updatedKeysToAdd; } if (keysToAdd.size() > _params.maxKeysPerInsert) { warning() << "insert of geo object generated lots of keys (" << keysToAdd.size() << ") consider creating larger buckets. obj=" << obj; } *keys = keysToAdd; }
BSONObj ReplSetConfig::addProtocolVersionIfMissing(const BSONObj &config) { if (config["protocolVersion"].ok()) { return config; } BSONObjBuilder b; b.appendElements(config); b.append("protocolVersion", CURRENT_PROTOCOL_VERSION); return b.obj(); }
BSONObj MergeChunkRequest::toConfigCommandBSON(const BSONObj& writeConcern) { BSONObjBuilder cmdBuilder; appendAsConfigCommand(&cmdBuilder); // Tack on passed-in writeConcern cmdBuilder.appendElements(writeConcern); return cmdBuilder.obj(); }
BSONObj BalanceChunkRequest::serializeToRebalanceCommandForConfig(const ChunkType& chunk) { invariantOK(chunk.validate()); BSONObjBuilder cmdBuilder; cmdBuilder.append(kConfigSvrMoveChunk, 1); cmdBuilder.appendElements(chunk.toBSON()); return cmdBuilder.obj(); }
intrusive_ptr<DocumentSource> DocumentSourceSample::getMergeSource() { // Just need to merge the pre-sorted documents by their random values. BSONObjBuilder randMergeSortSpec; randMergeSortSpec.appendElements(randSortSpec); randMergeSortSpec.append("$mergePresorted", true); return DocumentSourceSort::create(pExpCtx, randMergeSortSpec.obj(), _size); }
Query& Query::where(const string &jscode, BSONObj scope) { /* use where() before sort() and hint() and explain(), else this will assert. */ assert( !obj.hasField("query") ); BSONObjBuilder b; b.appendElements(obj); b.appendWhere(jscode, scope); obj = b.obj(); return *this; }
Status DBClientSafeWriter::safeWrite( DBClientBase* conn, const BatchItemRef& itemRef, const BSONObj& writeConcern, BSONObj* gleResponse ) { const BatchedCommandRequest* request = itemRef.getRequest(); try { // Default settings for checkShardVersion const bool authoritative = false; const int tryNum = 1; // We need to set our version using setShardVersion, managed by checkShardVersionCB versionManager.checkShardVersionCB( conn, request->getTargetingNS(), authoritative, tryNum ); if ( request->getBatchType() == BatchedCommandRequest::BatchType_Insert ) { conn->insert( request->getNS(), request->getInsertRequest()->getDocumentsAt( itemRef.getItemIndex() ), 0 ); } else if ( request->getBatchType() == BatchedCommandRequest::BatchType_Update ) { const BatchedUpdateDocument* update = request->getUpdateRequest()->getUpdatesAt( itemRef.getItemIndex() ); conn->update( request->getNS(), update->getQuery(), update->getUpdateExpr(), update->getUpsert(), update->getMulti() ); } else { dassert( request->getBatchType() == BatchedCommandRequest::BatchType_Delete ); const BatchedDeleteDocument* deleteDoc = request->getDeleteRequest()->getDeletesAt( itemRef.getItemIndex() ); conn->remove( request->getNS(), deleteDoc->getQuery(), deleteDoc->getLimit() == 1 /*just one*/); } const StringData& dbName = NamespaceString( request->getNS() ).db(); BSONObjBuilder gleCmdB; gleCmdB.append( "getLastError", true ); gleCmdB.appendElements( writeConcern ); conn->runCommand( dbName.toString(), gleCmdB.obj(), *gleResponse ); } catch ( const DBException& ex ) { return ex.toStatus(); } return Status::OK(); }
StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx, const BSONObj& indexSpec, const CollatorInterface* defaultCollator) { if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) { // validateIndexSpec() should have already verified that 'collationElem' is an object. invariant(collationElem.type() == BSONType::Object); auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext()) ->makeFromBSON(collationElem.Obj()); if (!collator.isOK()) { return collator.getStatus(); } if (collator.getValue()) { // If the collator factory returned a non-null collator, then inject the entire // collation specification into the index specification. This is necessary to fill // in any options that the user omitted. BSONObjBuilder bob; for (auto&& indexSpecElem : indexSpec) { if (IndexDescriptor::kCollationFieldName != indexSpecElem.fieldNameStringData()) { bob.append(indexSpecElem); } } bob.append(IndexDescriptor::kCollationFieldName, collator.getValue()->getSpec().toBSON()); return bob.obj(); } else { // If the collator factory returned a null collator (representing the "simple" // collation), then we simply omit the "collation" from the index specification. // This is desirable to make the representation for the "simple" collation // consistent between v=1 and v=2 indexes. return indexSpec.removeField(IndexDescriptor::kCollationFieldName); } } else if (defaultCollator) { // validateIndexSpec() should have added the "v" field if it was not present and // verified that 'versionElem' is a number. auto versionElem = indexSpec[IndexDescriptor::kIndexVersionFieldName]; invariant(versionElem.isNumber()); if (IndexVersion::kV2 <= static_cast<IndexVersion>(versionElem.numberInt())) { // The user did not specify an explicit collation for this index and the collection // has a default collator. If we're building a v=2 index, then we should inherit the // collection default. However, if we're building a v=1 index, then we're implicitly // building an index that's using the "simple" collation. BSONObjBuilder bob; bob.appendElements(indexSpec); bob.append(IndexDescriptor::kCollationFieldName, defaultCollator->getSpec().toBSON()); return bob.obj(); } } return indexSpec; }
/** * Creates a create collection oplog entry with given optime. */ OplogEntry makeCreateCollectionOplogEntry(OpTime opTime, const NamespaceString& nss = NamespaceString("test.t")) { BSONObjBuilder bob; bob.appendElements(opTime.toBSON()); bob.append("h", 1LL); bob.append("op", "c"); bob.append("ns", nss.getCommandNS()); bob.append("o", BSON("create" << nss.coll())); return OplogEntry(bob.obj()); }
Query& Query::explain() { BSONObjBuilder b; if( obj.hasElement("query") ) b.appendElements(obj); else b.append("query", obj); b.append("$explain", true); obj = b.obj(); return *this; }
Query& Query::hint(BSONObj keyPattern) { BSONObjBuilder b; if( obj.hasElement("query") ) b.appendElements(obj); else b.append("query", obj); b.append("$hint", keyPattern); obj = b.obj(); return *this; }
Query& Query::sort(const BSONObj& s) { BSONObjBuilder b; if( obj.hasElement("query") ) b.appendElements(obj); else b.append("query", obj); b.append("orderby", s); obj = b.obj(); return *this; }
Status DeferredWriter::_makeCollection(OperationContext* opCtx) { BSONObjBuilder builder; builder.append("create", _nss.coll()); builder.appendElements(_collectionOptions.toBSON()); try { return createCollection(opCtx, _nss.db().toString(), builder.obj().getOwned()); } catch (const DBException& exception) { return exception.toStatus(); } }
bool CatalogManagerReplicaSet::runReadCommand(OperationContext* txn, const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder* result) { BSONObjBuilder cmdBuilder; cmdBuilder.appendElements(cmdObj); _appendReadConcern(&cmdBuilder); return _runReadCommand(txn, dbname, cmdBuilder.done(), kConfigReadSelector, result); }
INT32 rtnCoordDelete::buildOpMsg( const CoordCataInfoPtr &cataInfo, const CoordSubCLlist &subCLList, CHAR *pSrcMsg, CHAR *&pDstMsg, INT32 &bufferSize ) { INT32 rc = SDB_OK; INT32 flag; CHAR *pCollectionName = NULL; CHAR *pDeletor = NULL; CHAR *pHint = NULL; BSONObj boDeletor; BSONObj boHint; rc = msgExtractDelete( pSrcMsg, &flag, &pCollectionName, &pDeletor, &pHint ); PD_RC_CHECK( rc, PDERROR, "failed to parse delete request(rc=%d)", rc ); try { boDeletor = BSONObj( pDeletor ); boHint = BSONObj( pHint ); BSONArrayBuilder babSubCL; CoordSubCLlist::const_iterator iterCL = subCLList.begin(); while( iterCL != subCLList.end() ) { babSubCL.append( *iterCL ); ++iterCL; } BSONObjBuilder bobNewDeletor; bobNewDeletor.appendElements( boDeletor ); bobNewDeletor.appendArray( CAT_SUBCL_NAME, babSubCL.arr() ); BSONObj boNewDeletor = bobNewDeletor.obj(); rc = msgBuildDeleteMsg( &pDstMsg, &bufferSize, pCollectionName, flag, 0, &boNewDeletor, &boHint ); PD_RC_CHECK( rc, PDERROR, "failed to build delete request(rc=%d)", rc ); { MsgOpDelete *pReqMsg = (MsgOpDelete *)pDstMsg; MsgOpDelete *pSrcReq = (MsgOpDelete *)pSrcMsg; pReqMsg->version = cataInfo->getVersion(); pReqMsg->w = pSrcReq->w; } } catch ( std::exception &e ) { PD_RC_CHECK( SDB_INVALIDARG, PDERROR, "occur unexpected error:%s", e.what() ); } done: return rc; error: goto done; }