OpMsgRequest upconvertRequest(StringData db, BSONObj cmdObj, int queryFlags) { cmdObj = cmdObj.getOwned(); // Usually this is a no-op since it is already owned. auto readPrefContainer = BSONObj(); const StringData firstFieldName = cmdObj.firstElementFieldName(); if (firstFieldName == "$query" || firstFieldName == "query") { // Commands sent over OP_QUERY specify read preference by putting it at the top level and // putting the command in a nested field called either query or $query. // Check if legacyCommand has an invalid $maxTimeMS option. uassert(ErrorCodes::InvalidOptions, "cannot use $maxTimeMS query option with commands; use maxTimeMS command option " "instead", !cmdObj.hasField("$maxTimeMS")); if (auto readPref = cmdObj["$readPreference"]) readPrefContainer = readPref.wrap(); cmdObj = cmdObj.firstElement().Obj().shareOwnershipWith(cmdObj); } else if (auto queryOptions = cmdObj["$queryOptions"]) { // Mongos rewrites commands with $readPreference to put it in a field nested inside of // $queryOptions. Its command implementations often forward commands in that format to // shards. This function is responsible for rewriting it to a format that the shards // understand. readPrefContainer = queryOptions.Obj().shareOwnershipWith(cmdObj); cmdObj = cmdObj.removeField("$queryOptions"); } if (!readPrefContainer.isEmpty()) { cmdObj = BSONObjBuilder(std::move(cmdObj)).appendElements(readPrefContainer).obj(); } else if (!cmdObj.hasField("$readPreference") && (queryFlags & QueryOption_SlaveOk)) { BSONObjBuilder bodyBuilder(std::move(cmdObj)); ReadPreferenceSetting(ReadPreference::SecondaryPreferred).toContainingBSON(&bodyBuilder); cmdObj = bodyBuilder.obj(); } // Try to move supported array fields into document sequences. auto docSequenceIt = docSequenceFieldsForCommands.find(cmdObj.firstElementFieldName()); auto docSequenceElem = docSequenceIt == docSequenceFieldsForCommands.end() ? BSONElement() : cmdObj[docSequenceIt->second]; if (!isArrayOfObjects(docSequenceElem)) return OpMsgRequest::fromDBAndBody(db, std::move(cmdObj)); auto docSequenceName = docSequenceElem.fieldNameStringData(); // Note: removing field before adding "$db" to avoid the need to copy the potentially large // array. auto out = OpMsgRequest::fromDBAndBody(db, cmdObj.removeField(docSequenceName)); out.sequences.push_back({docSequenceName.toString()}); for (auto elem : docSequenceElem.Obj()) { out.sequences[0].objs.push_back(elem.Obj().shareOwnershipWith(cmdObj)); } return out; }
void IndexRebuilder::retryIndexBuild(const std::string& dbName, NamespaceDetails* nsd, const int index) { // details.info is always a valid system.indexes entry because DataFileMgr::insert journals // creating the index doc and then insert_makeIndex durably assigns its DiskLoc to info. // indexBuildsInProgress is set after that, so if it is set, info must be set. IndexDetails& details = nsd->idx(index); // First, clean up the in progress index build. Save the system.indexes entry so that we // can add it again afterwards. BSONObj indexObj = details.info.obj().getOwned(); // Clean up the in-progress index build getDur().writingInt(nsd->indexBuildsInProgress) -= 1; details.kill_idx(); // The index has now been removed from system.indexes, so the only record of it is in- // memory. If there is a journal commit between now and when insert() rewrites the entry and // the db crashes before the new system.indexes entry is journalled, the index will be lost // forever. Thus, we're assuming no journaling will happen between now and the entry being // re-written. // We need to force a foreground index build to prevent replication from replaying an // incompatible op (like a drop) during a yield. // TODO: once commands can interrupt/wait for index builds, this can be removed. indexObj = indexObj.removeField("background"); try { const std::string ns = dbName + ".system.indexes"; theDataFileMgr.insert(ns.c_str(), indexObj.objdata(), indexObj.objsize(), false, true); } catch (const DBException& e) { log() << "Rebuilding index failed: " << e.what() << " (" << e.getCode() << ")" << endl; } }
StatusWith<BSONObj> validateIndexSpecCollation(OperationContext* opCtx, const BSONObj& indexSpec, const CollatorInterface* defaultCollator) { if (auto collationElem = indexSpec[IndexDescriptor::kCollationFieldName]) { // validateIndexSpec() should have already verified that 'collationElem' is an object. invariant(collationElem.type() == BSONType::Object); auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext()) ->makeFromBSON(collationElem.Obj()); if (!collator.isOK()) { return collator.getStatus(); } if (collator.getValue()) { // If the collator factory returned a non-null collator, then inject the entire // collation specification into the index specification. This is necessary to fill // in any options that the user omitted. BSONObjBuilder bob; for (auto&& indexSpecElem : indexSpec) { if (IndexDescriptor::kCollationFieldName != indexSpecElem.fieldNameStringData()) { bob.append(indexSpecElem); } } bob.append(IndexDescriptor::kCollationFieldName, collator.getValue()->getSpec().toBSON()); return bob.obj(); } else { // If the collator factory returned a null collator (representing the "simple" // collation), then we simply omit the "collation" from the index specification. // This is desirable to make the representation for the "simple" collation // consistent between v=1 and v=2 indexes. return indexSpec.removeField(IndexDescriptor::kCollationFieldName); } } else if (defaultCollator) { // validateIndexSpec() should have added the "v" field if it was not present and // verified that 'versionElem' is a number. auto versionElem = indexSpec[IndexDescriptor::kIndexVersionFieldName]; invariant(versionElem.isNumber()); if (IndexVersion::kV2 <= static_cast<IndexVersion>(versionElem.numberInt())) { // The user did not specify an explicit collation for this index and the collection // has a default collator. If we're building a v=2 index, then we should inherit the // collection default. However, if we're building a v=1 index, then we're implicitly // building an index that's using the "simple" collation. BSONObjBuilder bob; bob.appendElements(indexSpec); bob.append(IndexDescriptor::kCollationFieldName, defaultCollator->getSpec().toBSON()); return bob.obj(); } } return indexSpec; }
void CDivisionController::SaveDivisionDefenseInfo(const string& strTableName, CDivisionDefenseInfoModel *pDivisionDefenseModel) { BSONObj boDivisionDefenseInfo; BSONObj boCondition; boDivisionDefenseInfo = pDivisionDefenseModel->GetDivisionDefenseInfo(); cout << "Save Data:" << boDivisionDefenseInfo.toString(); boCondition = boDivisionDefenseInfo; boCondition = boCondition.removeField("clock"); Insert(strTableName, boDivisionDefenseInfo, boCondition); }
std::list<mongo::BSONObj> TransformComputable::compute_transform(mongo::BSONObj query, std::string collection) { //get positions in other frames BSONObjBuilder query_other_frames; query_other_frames.appendElements(query.removeField("frame").removeField("allow_tf")); query_other_frames.append("frame", fromjson("{$exists:true}")); QResCursor cur = robot_memory_->query(query_other_frames.obj(), collection); //transform them is possible std::list<mongo::BSONObj> res; std::string target_frame = query.getField("frame").String(); while(cur->more()) { BSONObj pos = cur->next(); if(pos.hasField("frame") && pos.hasField("translation") && pos.hasField("rotation")) { std::string src_frame = pos.getField("frame").String(); Time now(0, 0); if(tf_->can_transform(target_frame.c_str(), src_frame.c_str(), now)) { BSONObjBuilder res_pos; std::vector<BSONElement> src_trans = pos.getField("translation").Array(); std::vector<BSONElement> src_rot = pos.getField("rotation").Array(); fawkes::tf::Transform pose_tf(fawkes::tf::Quaternion(src_rot[0].Double(), src_rot[1].Double(), src_rot[2].Double(), src_rot[3].Double()), fawkes::tf::Vector3(src_trans[0].Double(), src_trans[1].Double(), src_trans[2].Double())); fawkes::tf::Stamped<fawkes::tf::Pose> src_stamped_pose(pose_tf, Time(0, 0), src_frame.c_str()); fawkes::tf::Stamped<fawkes::tf::Pose> res_stamped_pose; tf_->transform_pose(target_frame.c_str(), src_stamped_pose, res_stamped_pose); res_pos.appendElements(pos.removeField("frame").removeField("translation").removeField("rotation").removeField("_id")); res_pos.append("frame", target_frame); res_pos.append("allow_tf", true); BSONArrayBuilder arrb_trans; arrb_trans.append(res_stamped_pose.getOrigin().x()); arrb_trans.append(res_stamped_pose.getOrigin().y()); arrb_trans.append(res_stamped_pose.getOrigin().z()); res_pos.append("translation", arrb_trans.arr()); BSONArrayBuilder arrb_rot; arrb_rot.append(res_stamped_pose.getRotation().x()); arrb_rot.append(res_stamped_pose.getRotation().y()); arrb_rot.append(res_stamped_pose.getRotation().z()); arrb_rot.append(res_stamped_pose.getRotation().w()); res_pos.append("rotation", arrb_rot.arr()); res.push_back(res_pos.obj()); } // else // { // logger_->log_info(name_, "Cant transform %s to %s", src_frame.c_str(), target_frame.c_str()); // } } } return res; }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result) { DBDirectClient db(txn); const NamespaceString toReIndexNs = parseNsCollectionRequired(dbname, jsobj); LOG(0) << "CMD: reIndex " << toReIndexNs; ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X); OldClientContext ctx(txn, toReIndexNs.ns()); Collection* collection = ctx.db()->getCollection(toReIndexNs.ns()); if (!collection) { if (ctx.db()->getViewCatalog()->lookup(txn, toReIndexNs.ns())) return appendCommandStatus( result, {ErrorCodes::CommandNotSupportedOnView, "can't re-index a view"}); else return appendCommandStatus( result, {ErrorCodes::NamespaceNotFound, "collection does not exist"}); } BackgroundOperation::assertNoBgOpInProgForNs(toReIndexNs.ns()); vector<BSONObj> all; { vector<string> indexNames; collection->getCatalogEntry()->getAllIndexes(txn, &indexNames); for (size_t i = 0; i < indexNames.size(); i++) { const string& name = indexNames[i]; BSONObj spec = collection->getCatalogEntry()->getIndexSpec(txn, name); all.push_back(spec.removeField("v").getOwned()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { errmsg = str::stream() << "Cannot rebuild index " << spec << ": " << keyStatus.reason() << " For more info see http://dochub.mongodb.org/core/index-validation"; return false; } } } result.appendNumber("nIndexesWas", all.size()); { WriteUnitOfWork wunit(txn); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if (!s.isOK()) { errmsg = "dropIndexes failed"; return appendCommandStatus(result, s); } wunit.commit(); } MultiIndexBlock indexer(txn, collection); // do not want interruption as that will leave us without indexes. Status status = indexer.init(all); if (!status.isOK()) return appendCommandStatus(result, status); status = indexer.insertAllDocumentsInCollection(); if (!status.isOK()) return appendCommandStatus(result, status); { WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } // Do not allow majority reads from this collection until all original indexes are visible. // This was also done when dropAllIndexes() committed, but we need to ensure that no one // tries to read in the intermediate state where all indexes are newer than the current // snapshot so are unable to be used. auto replCoord = repl::ReplicationCoordinator::get(txn); auto snapshotName = replCoord->reserveSnapshotName(txn); replCoord->forceSnapshotCreation(); // Ensures a newer snapshot gets created even if idle. collection->setMinimumVisibleSnapshot(snapshotName); result.append("nIndexes", (int)all.size()); result.append("indexes", all); return true; }
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { DBDirectClient db(txn); BSONElement e = jsobj.firstElement(); string toDeleteNs = dbname + '.' + e.valuestr(); LOG(0) << "CMD: reIndex " << toDeleteNs << endl; Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X); Client::Context ctx(txn, toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); if ( !collection ) { errmsg = "ns not found"; return false; } BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs ); std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj); vector<BSONObj> all; { vector<string> indexNames; collection->getCatalogEntry()->getAllIndexes( txn, &indexNames ); for ( size_t i = 0; i < indexNames.size(); i++ ) { const string& name = indexNames[i]; BSONObj spec = collection->getCatalogEntry()->getIndexSpec( txn, name ); all.push_back(spec.removeField("v").getOwned()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { errmsg = str::stream() << "Cannot rebuild index " << spec << ": " << keyStatus.reason() << " For more info see http://dochub.mongodb.org/core/index-validation"; return false; } } } result.appendNumber( "nIndexesWas", all.size() ); { WriteUnitOfWork wunit(txn); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if ( !s.isOK() ) { errmsg = "dropIndexes failed"; return appendCommandStatus( result, s ); } wunit.commit(); } MultiIndexBlock indexer(txn, collection); // do not want interruption as that will leave us without indexes. Status status = indexer.init(all); if (!status.isOK()) return appendCommandStatus( result, status ); status = indexer.insertAllDocumentsInCollection(); if (!status.isOK()) return appendCommandStatus( result, status ); { WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } result.append( "nIndexes", (int)all.size() ); result.append( "indexes", all ); IndexBuilder::restoreIndexes(indexesInProg); return true; }