void addIndex(const BSONObj& obj) { _client.ensureIndex(ns(), obj); }
CanonicalQuery* canonicalize(const BSONObj& query) { CanonicalQuery* cq; Status status = CanonicalQuery::canonicalize(ns(), query, &cq); ASSERT_OK(status); return cq; }
virtual ~Base() { if ( !nsd() ) return; string n( ns() ); dropNS( n ); }
virtual ~QueryStageUpdateBase() { Client::WriteContext ctx(&_txn, ns()); _client.dropCollection(ns()); ctx.commit(); }
void remove(const BSONObj& obj) { _client.remove(ns(), obj); }
bool cbmc_parseoptionst::process_goto_program( const optionst &options, goto_functionst &goto_functions) { try { namespacet ns(context); if(cmdline.isset("string-abstraction")) string_instrumentation( context, get_message_handler(), goto_functions); status("Function Pointer Removal"); remove_function_pointers(ns, goto_functions, cmdline.isset("pointer-check")); status("Partial Inlining"); // do partial inlining goto_partial_inline(goto_functions, ns, ui_message_handler); status("Generic Property Instrumentation"); // add generic checks goto_check(ns, options, goto_functions); if(cmdline.isset("string-abstraction")) { status("String Abstraction"); string_abstraction(context, get_message_handler(), goto_functions); } // add failed symbols // needs to be done before pointer analysis add_failed_symbols(context); if(cmdline.isset("pointer-check") || cmdline.isset("show-value-sets")) { status("Pointer Analysis"); value_set_analysist value_set_analysis(ns); value_set_analysis(goto_functions); // show it? if(cmdline.isset("show-value-sets")) { show_value_sets(get_ui(), goto_functions, value_set_analysis); return true; } status("Adding Pointer Checks"); // add pointer checks pointer_checks( goto_functions, context, options, value_set_analysis); } // recalculate numbers, etc. goto_functions.update(); // add loop ids goto_functions.compute_loop_numbers(); // if we aim to cover, replace // all assertions by false to prevent simplification if(cmdline.isset("cover-assertions")) make_assertions_false(goto_functions); // show it? if(cmdline.isset("show-loops")) { show_loop_numbers(get_ui(), goto_functions); return true; } // show it? if(cmdline.isset("show-goto-functions")) { goto_functions.output(ns, std::cout); return true; } } catch(const char *e) { error(e); return true; } catch(const std::string e) { error(e); return true; } catch(int) { return true; } catch(std::bad_alloc) { error("Out of memory"); return true; } return false; }
virtual bool run(OperationContext* txn, const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string coll = cmdObj.firstElement().valuestr(); if( coll.empty() || db.empty() ) { errmsg = "no collection name specified"; return false; } repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator(); if (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet && replCoord->getCurrentMemberState().primary() && !cmdObj["force"].trueValue()) { errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force"; return false; } NamespaceString ns(db,coll); if ( !ns.isNormal() ) { errmsg = "bad namespace name"; return false; } if ( ns.isSystem() ) { // items in system.* cannot be moved as there might be pointers to them // i.e. system.indexes entries are pointed to from NamespaceDetails errmsg = "can't compact a system namespace"; return false; } CompactOptions compactOptions; if ( cmdObj["preservePadding"].trueValue() ) { compactOptions.paddingMode = CompactOptions::PRESERVE; if ( cmdObj.hasElement( "paddingFactor" ) || cmdObj.hasElement( "paddingBytes" ) ) { errmsg = "cannot mix preservePadding and paddingFactor|paddingBytes"; return false; } } else if ( cmdObj.hasElement( "paddingFactor" ) || cmdObj.hasElement( "paddingBytes" ) ) { compactOptions.paddingMode = CompactOptions::MANUAL; if ( cmdObj.hasElement("paddingFactor") ) { compactOptions.paddingFactor = cmdObj["paddingFactor"].Number(); if ( compactOptions.paddingFactor < 1 || compactOptions.paddingFactor > 4 ){ errmsg = "invalid padding factor"; return false; } } if ( cmdObj.hasElement("paddingBytes") ) { compactOptions.paddingBytes = cmdObj["paddingBytes"].numberInt(); if ( compactOptions.paddingBytes < 0 || compactOptions.paddingBytes > ( 1024 * 1024 ) ) { errmsg = "invalid padding bytes"; return false; } } } if ( cmdObj.hasElement("validate") ) compactOptions.validateDocuments = cmdObj["validate"].trueValue(); Lock::DBWrite lk(txn->lockState(), ns.ns()); // SERVER-14085: The following will have to go as we push down WOUW WriteUnitOfWork wunit(txn); BackgroundOperation::assertNoBgOpInProgForNs(ns.ns()); Client::Context ctx(txn, ns); Collection* collection = ctx.db()->getCollection(txn, ns.ns()); if( ! collection ) { errmsg = "namespace does not exist"; return false; } if ( collection->isCapped() ) { errmsg = "cannot compact a capped collection"; return false; } log() << "compact " << ns << " begin, options: " << compactOptions.toString(); std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), cmdObj); StatusWith<CompactStats> status = collection->compact( txn, &compactOptions ); if ( !status.isOK() ) return appendCommandStatus( result, status.getStatus() ); if ( status.getValue().corruptDocuments > 0 ) result.append("invalidObjects", status.getValue().corruptDocuments ); log() << "compact " << ns << " end"; IndexBuilder::restoreIndexes(indexesInProg); wunit.commit(); return true; }
std::string rtrim(const std::string& s) { std::string ns(s); ns.erase(std::find_if(ns.rbegin(), ns.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), ns.end()); return ns; }
void DecimalFormatSymbols::initialize(const Locale& loc, UErrorCode& status, UBool useLastResortData) { if (U_FAILURE(status)) { return; } *validLocale = *actualLocale = 0; currPattern = NULL; // First initialize all the symbols to the fallbacks for anything we can't find initialize(); // // Next get the numbering system for this locale and set zero digit // and the digit string based on the numbering system for the locale // LocalPointer<NumberingSystem> ns(NumberingSystem::createInstance(loc, status)); const char *nsName; if (U_SUCCESS(status) && ns->getRadix() == 10 && !ns->isAlgorithmic()) { nsName = ns->getName(); UnicodeString digitString(ns->getDescription()); int32_t digitIndex = 0; UChar32 digit = digitString.char32At(0); fSymbols[kZeroDigitSymbol].setTo(digit); for (int32_t i = kOneDigitSymbol; i <= kNineDigitSymbol; ++i) { digitIndex += U16_LENGTH(digit); digit = digitString.char32At(digitIndex); fSymbols[i].setTo(digit); } } else { nsName = gLatn; } // Open resource bundles const char* locStr = loc.getName(); LocalUResourceBundlePointer resource(ures_open(NULL, locStr, &status)); LocalUResourceBundlePointer numberElementsRes( ures_getByKeyWithFallback(resource.getAlias(), gNumberElements, NULL, &status)); if (U_FAILURE(status)) { if ( useLastResortData ) { status = U_USING_DEFAULT_WARNING; initialize(); } return; } // Set locale IDs // TODO: Is there a way to do this without depending on the resource bundle instance? U_LOCALE_BASED(locBased, *this); locBased.setLocaleIDs( ures_getLocaleByType( numberElementsRes.getAlias(), ULOC_VALID_LOCALE, &status), ures_getLocaleByType( numberElementsRes.getAlias(), ULOC_ACTUAL_LOCALE, &status)); // Now load the rest of the data from the data sink. // Start with loading this nsName if it is not Latin. DecFmtSymDataSink sink(*this); if (uprv_strcmp(nsName, gLatn) != 0) { CharString path; path.append(gNumberElements, status) .append('/', status) .append(nsName, status) .append('/', status) .append(gSymbols, status); ures_getAllItemsWithFallback(resource.getAlias(), path.data(), sink, status); // If no symbols exist for the given nsName and resource bundle, silently ignore // and fall back to Latin. if (status == U_MISSING_RESOURCE_ERROR) { status = U_ZERO_ERROR; } else if (U_FAILURE(status)) { return; } } // Continue with Latin if necessary. if (!sink.seenAll()) { ures_getAllItemsWithFallback(resource.getAlias(), gNumberElementsLatnSymbols, sink, status); if (U_FAILURE(status)) { return; } } // Let the monetary number separators equal the default number separators if necessary. sink.resolveMissingMonetarySeparators(fSymbols); // Obtain currency data from the currency API. This is strictly // for backward compatibility; we don't use DecimalFormatSymbols // for currency data anymore. UErrorCode internalStatus = U_ZERO_ERROR; // don't propagate failures out UChar curriso[4]; UnicodeString tempStr; ucurr_forLocale(locStr, curriso, 4, &internalStatus); uprv_getStaticCurrencyName(curriso, locStr, tempStr, internalStatus); if (U_SUCCESS(internalStatus)) { fSymbols[kIntlCurrencySymbol].setTo(curriso, -1); fSymbols[kCurrencySymbol] = tempStr; } /* else use the default values. */ //load the currency data UChar ucc[4]={0}; //Currency Codes are always 3 chars long int32_t uccLen = 4; const char* locName = loc.getName(); UErrorCode localStatus = U_ZERO_ERROR; uccLen = ucurr_forLocale(locName, ucc, uccLen, &localStatus); if(U_SUCCESS(localStatus) && uccLen > 0) { char cc[4]={0}; u_UCharsToChars(ucc, cc, uccLen); /* An explicit currency was requested */ LocalUResourceBundlePointer currencyResource(ures_open(U_ICUDATA_CURR, locStr, &localStatus)); LocalUResourceBundlePointer currency( ures_getByKeyWithFallback(currencyResource.getAlias(), "Currencies", NULL, &localStatus)); ures_getByKeyWithFallback(currency.getAlias(), cc, currency.getAlias(), &localStatus); if(U_SUCCESS(localStatus) && ures_getSize(currency.getAlias())>2) { // the length is 3 if more data is present ures_getByIndex(currency.getAlias(), 2, currency.getAlias(), &localStatus); int32_t currPatternLen = 0; currPattern = ures_getStringByIndex(currency.getAlias(), (int32_t)0, &currPatternLen, &localStatus); UnicodeString decimalSep = ures_getUnicodeStringByIndex(currency.getAlias(), (int32_t)1, &localStatus); UnicodeString groupingSep = ures_getUnicodeStringByIndex(currency.getAlias(), (int32_t)2, &localStatus); if(U_SUCCESS(localStatus)){ fSymbols[kMonetaryGroupingSeparatorSymbol] = groupingSep; fSymbols[kMonetarySeparatorSymbol] = decimalSep; //pattern.setTo(TRUE, currPattern, currPatternLen); status = localStatus; } } /* else An explicit currency was requested and is unknown or locale data is malformed. */ /* ucurr_* API will get the correct value later on. */ } // else ignore the error if no currency // Currency Spacing. LocalUResourceBundlePointer currencyResource(ures_open(U_ICUDATA_CURR, locStr, &status)); CurrencySpacingSink currencySink(*this); ures_getAllItemsWithFallback(currencyResource.getAlias(), gCurrencySpacingTag, currencySink, status); currencySink.resolveMissing(); if (U_FAILURE(status)) { return; } }
virtual ~QueryStageFetchBase() { _client.dropCollection(ns()); }
void insert(const BSONObj& obj) { _client.insert(ns(), obj); }
void run() { Client::WriteContext ctx(ns()); for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1)); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1)); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. // Read half of them... for (int i = 0; i < 10; ++i) { WorkingSetID out; PlanStage::StageState status = ah->work(&out); ASSERT_EQUALS(PlanStage::NEED_TIME, status); } // ...yield ah->prepareToYield(); // ...invalidate one of the read objects set<DiskLoc> data; getLocs(&data); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (it->obj()["foo"].numberInt() == 15) { ah->invalidate(*it); remove(it->obj()); break; } } ah->recoverFromYield(); // And expect to find foo==15 it flagged for review. const vector<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(1), flagged.size()); // Expect to find the right value of foo in the flagged item. WorkingSetMember* member = ws.get(flagged[0]); ASSERT_TRUE(NULL != member); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(15, elt.numberInt()); // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract // one because of a mid-plan invalidation, so 10. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20); ASSERT_NOT_EQUALS(15, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10); } ASSERT_EQUALS(10, count); }
void getLocs(set<DiskLoc>* out) { for (boost::shared_ptr<Cursor> c = theDataFileMgr.findAll(ns()); c->ok(); c->advance()) { out->insert(c->currLoc()); } }
IndexDescriptor* getIndex(const BSONObj& obj) { NamespaceDetails* nsd = nsdetails(ns()); int idxNo = nsd->findIndexByKeyPattern(obj); return CatalogHack::getDescriptor(nsd, idxNo); }
virtual ~CollectionBase() { client().dropCollection( ns() ); }
const ENCODING *NS(XmlGetUtf16InternalEncoding)() { #if XML_BYTE_ORDER == 12 return &ns(internal_little2_encoding).enc; #elif XML_BYTE_ORDER == 21 return &ns(internal_big2_encoding).enc; #else const short n = 1; return *(const char *)&n ? &ns(internal_little2_encoding).enc : &ns(internal_big2_encoding).enc; #endif } static const ENCODING *NS(encodings)[] = { &ns(latin1_encoding).enc, &ns(ascii_encoding).enc, &ns(utf8_encoding).enc, &ns(big2_encoding).enc, &ns(big2_encoding).enc, &ns(little2_encoding).enc, &ns(utf8_encoding).enc /* NO_ENC */ }; static int NS(initScanProlog)(const ENCODING *enc, const char *ptr, const char *end, const char **nextTokPtr) { return initScan(NS(encodings), (const INIT_ENCODING *)enc, XML_PROLOG_STATE, ptr, end, nextTokPtr); }
void QueryPlan::registerSelf( long long nScanned ) const { NamespaceDetailsTransient::get( ns() ).registerIndexForPattern( fbs_.pattern( order_ ), indexKey(), nScanned ); }
const ENCODING *NS(XmlGetUtf8InternalEncoding)() { return &ns(internal_utf8_encoding).enc; }
Collection* collection() { return _ctx->ctx().db()->getCollection( &_opCtx, ns() ); }
void Strategy::queryOp(OperationContext* txn, Request& r) { verify(!NamespaceString(r.getns()).isCommand()); Timer queryTimer; globalOpCounters.gotQuery(); QueryMessage q(r.d()); NamespaceString ns(q.ns); ClientBasic* client = txn->getClient(); AuthorizationSession* authSession = AuthorizationSession::get(client); Status status = authSession->checkAuthForQuery(ns, q.query); audit::logQueryAuthzCheck(client, ns, q.query, status.code()); uassertStatusOK(status); LOG(3) << "query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << " options: " << q.queryOptions; if (q.ntoreturn == 1 && strstr(q.ns, ".$cmd")) throw UserException(8010, "something is wrong, shouldn't see a command here"); if (q.queryOptions & QueryOption_Exhaust) { uasserted(18526, string("the 'exhaust' query option is invalid for mongos queries: ") + q.ns + " " + q.query.toString()); } // Spigot which controls whether OP_QUERY style find on mongos uses the new ClusterClientCursor // code path. // TODO: Delete the spigot and always use the new code. if (useClusterClientCursor) { auto txn = cc().makeOperationContext(); ReadPreferenceSetting readPreference(ReadPreference::PrimaryOnly, TagSet::primaryOnly()); BSONElement rpElem; auto readPrefExtractStatus = bsonExtractTypedField( q.query, LiteParsedQuery::kFindCommandReadPrefField, mongo::Object, &rpElem); if (readPrefExtractStatus.isOK()) { auto parsedRps = ReadPreferenceSetting::fromBSON(rpElem.Obj()); uassertStatusOK(parsedRps.getStatus()); readPreference = parsedRps.getValue(); } else if (readPrefExtractStatus != ErrorCodes::NoSuchKey) { uassertStatusOK(readPrefExtractStatus); } auto canonicalQuery = CanonicalQuery::canonicalize(q, WhereCallbackNoop()); uassertStatusOK(canonicalQuery.getStatus()); // Do the work to generate the first batch of results. This blocks waiting to get responses // from the shard(s). std::vector<BSONObj> batch; // 0 means the cursor is exhausted and // otherwise we assume that a cursor with the returned id can be retrieved via the // ClusterCursorManager auto cursorId = ClusterFind::runQuery(txn.get(), *canonicalQuery.getValue(), readPreference, &batch); uassertStatusOK(cursorId.getStatus()); // Build the response document. // TODO: this constant should be shared between mongos and mongod, and should // not be inside ShardedClientCursor. BufBuilder buffer(ShardedClientCursor::INIT_REPLY_BUFFER_SIZE); int numResults = 0; for (const auto& obj : batch) { buffer.appendBuf((void*)obj.objdata(), obj.objsize()); numResults++; } replyToQuery(0, // query result flags r.p(), r.m(), buffer.buf(), buffer.len(), numResults, 0, // startingFrom cursorId.getValue()); return; } QuerySpec qSpec((string)q.ns, q.query, q.fields, q.ntoskip, q.ntoreturn, q.queryOptions); // Parse "$maxTimeMS". StatusWith<int> maxTimeMS = LiteParsedQuery::parseMaxTimeMSQuery(q.query); uassert(17233, maxTimeMS.getStatus().reason(), maxTimeMS.isOK()); if (_isSystemIndexes(q.ns) && doShardedIndexQuery(txn, r, qSpec)) { return; } ParallelSortClusteredCursor* cursor = new ParallelSortClusteredCursor(qSpec, CommandInfo()); verify(cursor); // TODO: Move out to Request itself, not strategy based try { cursor->init(txn); if (qSpec.isExplain()) { BSONObjBuilder explain_builder; cursor->explain(explain_builder); explain_builder.appendNumber("executionTimeMillis", static_cast<long long>(queryTimer.millis())); BSONObj b = explain_builder.obj(); replyToQuery(0, r.p(), r.m(), b); delete (cursor); return; } } catch (...) { delete cursor; throw; } // TODO: Revisit all of this when we revisit the sharded cursor cache if (cursor->getNumQueryShards() != 1) { // More than one shard (or zero), manage with a ShardedClientCursor // NOTE: We may also have *zero* shards here when the returnPartial flag is set. // Currently the code in ShardedClientCursor handles this. ShardedClientCursorPtr cc(new ShardedClientCursor(q, cursor)); BufBuilder buffer(ShardedClientCursor::INIT_REPLY_BUFFER_SIZE); int docCount = 0; const int startFrom = cc->getTotalSent(); bool hasMore = cc->sendNextBatch(q.ntoreturn, buffer, docCount); if (hasMore) { LOG(5) << "storing cursor : " << cc->getId(); int cursorLeftoverMillis = maxTimeMS.getValue() - queryTimer.millis(); if (maxTimeMS.getValue() == 0) { // 0 represents "no limit". cursorLeftoverMillis = kMaxTimeCursorNoTimeLimit; } else if (cursorLeftoverMillis <= 0) { cursorLeftoverMillis = kMaxTimeCursorTimeLimitExpired; } cursorCache.store(cc, cursorLeftoverMillis); } replyToQuery(0, r.p(), r.m(), buffer.buf(), buffer.len(), docCount, startFrom, hasMore ? cc->getId() : 0); } else { // Only one shard is used // Remote cursors are stored remotely, we shouldn't need this around. unique_ptr<ParallelSortClusteredCursor> cursorDeleter(cursor); ShardPtr shard = cursor->getQueryShard(); verify(shard.get()); DBClientCursorPtr shardCursor = cursor->getShardCursor(shard->getId()); // Implicitly stores the cursor in the cache r.reply(*(shardCursor->getMessage()), shardCursor->originalHost()); // We don't want to kill the cursor remotely if there's still data left shardCursor->decouple(); } }
void run() { // Run the update. { Client::WriteContext ctx(&_txn, ns()); // Populate the collection. for (int i = 0; i < 10; ++i) { insert(BSON("_id" << i << "foo" << i)); } ASSERT_EQUALS(10U, count(BSONObj())); Client& c = cc(); CurOp& curOp = *c.curop(); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver( (UpdateDriver::Options()) ); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); // Get the DiskLocs that would be returned by an in-order scan. vector<DiskLoc> locs; getLocs(coll, CollectionScanParams::FORWARD, &locs); UpdateRequest request(&_txn, nsString()); UpdateLifecycleImpl updateLifecycle(false, nsString()); request.setLifecycle(&updateLifecycle); // Update is a multi-update that sets 'bar' to 3 in every document // where foo is less than 5. BSONObj query = fromjson("{foo: {$lt: 5}}"); BSONObj updates = fromjson("{$set: {bar: 3}}"); request.setMulti(); request.setQuery(query); request.setUpdates(updates); ASSERT_OK(driver.parse(request.getUpdates(), request.isMulti())); // Configure the scan. CollectionScanParams collScanParams; collScanParams.collection = coll; collScanParams.direction = CollectionScanParams::FORWARD; collScanParams.tailable = false; // Configure the update. UpdateStageParams updateParams(&request, &driver, opDebug); scoped_ptr<CanonicalQuery> cq(canonicalize(query)); updateParams.canonicalQuery = cq.get(); scoped_ptr<WorkingSet> ws(new WorkingSet()); auto_ptr<CollectionScan> cs( new CollectionScan(&_txn, collScanParams, ws.get(), cq->root())); scoped_ptr<UpdateStage> updateStage( new UpdateStage(updateParams, ws.get(), db, cs.release())); const UpdateStats* stats = static_cast<const UpdateStats*>(updateStage->getSpecificStats()); const size_t targetDocIndex = 3; while (stats->nModified < targetDocIndex) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = updateStage->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, state); } // Remove locs[targetDocIndex]; updateStage->saveState(); updateStage->invalidate(locs[targetDocIndex], INVALIDATION_DELETION); BSONObj targetDoc = coll->docFor(&_txn, locs[targetDocIndex]); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); updateStage->restoreState(&_txn); // Do the remaining updates. while (!updateStage->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = updateStage->work(&id); ASSERT(PlanStage::NEED_TIME == state || PlanStage::IS_EOF == state); } ctx.commit(); // 4 of the 5 matching documents should have been modified (one was deleted). ASSERT_EQUALS(4U, stats->nModified); ASSERT_EQUALS(4U, stats->nMatched); } // Check the contents of the collection. { Client::ReadContext ctx(&_txn, ns()); Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns()); vector<BSONObj> objs; getCollContents(collection, &objs); // Verify that the collection now has 9 docs (one was deleted). ASSERT_EQUALS(9U, objs.size()); // Make sure that the collection has certain documents. assertHasDoc(objs, fromjson("{_id: 0, foo: 0, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 1, foo: 1, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 2, foo: 2, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 4, foo: 4, bar: 3}")); assertHasDoc(objs, fromjson("{_id: 5, foo: 5}")); assertHasDoc(objs, fromjson("{_id: 6, foo: 6}")); } }
void ServerLobbyRoomProtocol::checkRaceFinished() { assert(RaceEventManager::getInstance()->isRunning()); assert(World::getWorld()); // if race is over, give the final score to everybody if (RaceEventManager::getInstance()->isRaceOver()) { // calculate karts ranks : int num_players = race_manager->getNumberOfKarts(); std::vector<int> karts_results; std::vector<float> karts_times; for (int j = 0; j < num_players; j++) { float kart_time = race_manager->getKartRaceTime(j); for (unsigned int i = 0; i < karts_times.size(); i++) { if (kart_time < karts_times[i]) { karts_times.insert(karts_times.begin()+i, kart_time); karts_results.insert(karts_results.begin()+i, j); break; } } } const std::vector<STKPeer*> &peers = STKHost::get()->getPeers(); NetworkString queue(karts_results.size()*2); for (unsigned int i = 0; i < karts_results.size(); i++) { queue.ai8(1).ai8(karts_results[i]); // kart pos = i+1 Log::info("ServerLobbyRoomProtocol", "Kart %d finished #%d", karts_results[i], i + 1); } for (unsigned int i = 0; i < peers.size(); i++) { NetworkString ns(6); ns.ai8(LE_RACE_FINISHED).ai8(4).ai32(peers[i]->getClientServerToken()); NetworkString total = ns + queue; sendSynchronousMessage(peers[i], total, true); } Log::info("ServerLobbyRoomProtocol", "End of game message sent"); m_in_race = false; // stop race protocols Protocol* protocol = ProtocolManager::getInstance() ->getProtocol(PROTOCOL_CONTROLLER_EVENTS); if (protocol) protocol->requestTerminate(); else Log::error("ClientLobbyRoomProtocol", "No controller events protocol registered."); protocol = ProtocolManager::getInstance() ->getProtocol(PROTOCOL_KART_UPDATE); if (protocol) protocol->requestTerminate(); else Log::error("ClientLobbyRoomProtocol", "No kart update protocol registered."); protocol = ProtocolManager::getInstance() ->getProtocol(PROTOCOL_GAME_EVENTS); if (protocol) protocol->requestTerminate(); else Log::error("ClientLobbyRoomProtocol", "No game events protocol registered."); // notify the network world that it is stopped RaceEventManager::getInstance()->stop(); // exit the race now race_manager->exitRace(); race_manager->setAIKartOverride(""); } } // checkRaceFinished
void insert(const BSONObj& doc) { _client.insert(ns(), doc); }
virtual bool run( const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl = false ) { NamespaceString ns( dbname, cmdObj[name].String() ); Client::ReadContext ctx(ns.ns()); Database* db = ctx.ctx().db(); Collection* collection = db->getCollection( ns ); if ( !collection ) return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound, str::stream() << "ns does not exist: " << ns.ns() ) ); size_t numCursors = static_cast<size_t>( cmdObj["numCursors"].numberInt() ); if ( numCursors == 0 || numCursors > 10000 ) return appendCommandStatus( result, Status( ErrorCodes::BadValue, str::stream() << "numCursors has to be between 1 and 10000" << " was: " << numCursors ) ); OwnedPointerVector<RecordIterator> iterators(collection->getManyIterators()); if (iterators.size() < numCursors) { numCursors = iterators.size(); } OwnedPointerVector<MultiIteratorRunner> runners; for ( size_t i = 0; i < numCursors; i++ ) { runners.push_back(new MultiIteratorRunner(ns.ns(), collection)); } // transfer iterators to runners using a round-robin distribution. // TODO consider using a common work queue once invalidation issues go away. for (size_t i = 0; i < iterators.size(); i++) { runners[i % runners.size()]->addIterator(iterators.releaseAt(i)); } { BSONArrayBuilder bucketsBuilder; for (size_t i = 0; i < runners.size(); i++) { // transfer ownership of a runner to the ClientCursor (which manages its own // lifetime). ClientCursor* cc = new ClientCursor( collection, runners.releaseAt(i) ); // we are mimicking the aggregation cursor output here // that is why there are ns, ok and empty firstBatch BSONObjBuilder threadResult; { BSONObjBuilder cursor; cursor.appendArray( "firstBatch", BSONObj() ); cursor.append( "ns", ns ); cursor.append( "id", cc->cursorid() ); threadResult.append( "cursor", cursor.obj() ); } threadResult.appendBool( "ok", 1 ); bucketsBuilder.append( threadResult.obj() ); } result.appendArray( "cursors", bucketsBuilder.obj() ); } return true; }
size_t count(const BSONObj& query) { return _client.count(ns(), query, 0, 0, 0); }
void XmlParser::OnStartElement(const XML_Char *name, const XML_Char **atts) { const XML_Char **p = atts; std::map<exlib::string, exlib::string> nss; exlib::string def_ns; bool has_def = false; while (p[0] && p[1]) { const XML_Char *ns = p[0]; if (!qstrcmp(ns, "xmlns", 5)) { if (ns[5] == ':') nss.insert(std::pair<exlib::string, exlib::string>(ns + 6, p[1])); else if (!ns[5]) { def_ns = p[1]; has_def = true; } } p += 2; } obj_ptr<XmlElement> el; const char *str = qstrchr(name, ':'); if (str) { exlib::string prefix(name, str - name); exlib::string qname(str + 1); std::map<exlib::string, exlib::string>::iterator it; it = nss.find(prefix); if (it != nss.end()) def_ns = it->second; else m_now->lookupNamespaceURI(prefix, def_ns); } else if (!has_def) { int32_t type; m_now->get_nodeType(type); if (type == xml_base::_ELEMENT_NODE) ((XmlElement *)(XmlNode_base *)m_now)->get_defaultNamespace(def_ns); } if (!def_ns.empty()) el = new XmlElement(m_document, def_ns, name, m_isXml); else el = new XmlElement(m_document, name, m_isXml); newNode(el, true); while (atts[0] && atts[1]) { name = atts[0]; str = qstrchr(name, ':'); if (str && str[1]) { exlib::string ns(name, str - name); exlib::string qname(str + 1); std::map<exlib::string, exlib::string>::iterator it; it = nss.find(ns); if (it != nss.end()) def_ns = it->second; else m_now->lookupNamespaceURI(ns, def_ns); } else def_ns.clear(); if (!def_ns.empty()) el->setAttributeNS(def_ns, name, atts[1]); else el->setAttribute(name, atts[1]); atts += 2; } }
Base() { setClient( ns() ); }
void remove_internal_symbols( symbol_tablet &symbol_table) { namespacet ns(symbol_table); find_symbols_sett exported; // we retain certain special ones find_symbols_sett special; special.insert("c::argc'"); special.insert("c::argv'"); special.insert("c::envp'"); special.insert("c::envp_size'"); special.insert("c::__CPROVER_memory"); special.insert("c::__CPROVER_initialize"); special.insert("c::__CPROVER_malloc_size"); special.insert("c::__CPROVER_deallocated"); special.insert("c::__CPROVER_rounding_mode"); for(symbol_tablet::symbolst::const_iterator it=symbol_table.symbols.begin(); it!=symbol_table.symbols.end(); it++) { // already marked? if(exported.find(it->first)!=exported.end()) continue; // not marked yet const symbolt &symbol=it->second; if(special.find(symbol.name)!=special.end()) { get_symbols_rec(ns, symbol, exported); continue; } bool is_function=symbol.type.id()==ID_code; bool is_file_local=symbol.is_file_local; bool is_type=symbol.is_type; bool has_body=symbol.value.is_not_nil(); bool has_initializer= symbol.value.is_not_nil() && !symbol.value.get_bool(ID_C_zero_initializer); if(is_type) { // never EXPORTED by itself } else if(is_function) { // body? not local? if(has_body && !is_file_local) get_symbols_rec(ns, symbol, exported); } else { // export only if there is an initializer and // the symbol is not file-local if(has_initializer && !is_file_local) get_symbols_rec(ns, symbol, exported); } } // remove all that are _not_ exported! for(symbol_tablet::symbolst::iterator it=symbol_table.symbols.begin(); it!=symbol_table.symbols.end(); ) // no it++ { if(exported.find(it->first)==exported.end()) { symbol_tablet::symbolst::iterator next=it; ++next; symbol_table.symbols.erase(it); it=next; } else it++; } }
static NamespaceDetails *nsd() { return nsdetails( ns() ); }
void run() { Client::WriteContext ctx(ns()); // Insert a bunch of data for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << 1)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1)); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Scan over bar == 1 params.descriptor = getIndex(BSON("bar" << 1)); ah->addChild(new IndexScan(params, &ws, NULL)); // Get the set of disklocs in our collection to use later. set<DiskLoc> data; getLocs(&data); // We're making an assumption here that happens to be true because we clear out the // collection before running this: increasing inserts have increasing DiskLocs. // This isn't true in general if the collection is not dropped beforehand. WorkingSetID id; // Sorted AND looks at the first child, which is an index scan over foo==1. ah->work(&id); // The first thing that the index scan returns (due to increasing DiskLoc trick) is the // very first insert, which should be the very first thing in data. Let's invalidate it // and make sure it shows up in the flagged results. ah->prepareToYield(); ah->invalidate(*data.begin()); remove(data.begin()->obj()); ah->recoverFromYield(); // Make sure the nuked obj is actually in the flagged data. ASSERT_EQUALS(ws.getFlagged().size(), size_t(1)); WorkingSetMember* member = ws.get(ws.getFlagged()[0]); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); set<DiskLoc>::iterator it = data.begin(); // Proceed along, AND-ing results. int count = 0; while (!ah->isEOF() && count < 10) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; ++it; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_EQUALS(member->loc, *it); } // Move 'it' to a result that's yet to show up. for (int i = 0; i < count + 10; ++i) { ++it; } // Remove a result that's coming up. It's not the 'target' result of the AND so it's // not flagged. ah->prepareToYield(); ah->invalidate(*it); remove(it->obj()); ah->recoverFromYield(); // Get all results aside from the two we killed. while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); } ASSERT_EQUALS(count, 48); ASSERT_EQUALS(size_t(1), ws.getFlagged().size()); }