query::query(int orderid) { orderkey = orderid; numOfPage = 0; offsetOfKey = 0; for (int i = 0; i < 4; i++) result[i] = 0; result2 = 0; for (int i = 0; i < 4; i++) fIn[i] = NULL; page_int = NULL; fIndex = NULL; fDec = NULL; strcpy(file_name[0], "orderkey.fjl"); strcpy(file_name[1], "custkey.fjl"); strcpy(file_name[2], "totalprice.fjl"); strcpy(file_name[3], "shippriority.fjl"); Query(); }
void Dialog_Mail::SlotSave() { if (Subj->text().trimmed().isEmpty()) { GuiWarning("Предмет сообщения не может быть пустыми!"); return; } QSqlQuery Query(Global.DataBase); if (Mode == DialogEdit) { Query.prepare("UPDATE public.\"Mail\" " "SET \"Sender\"=:Sender,\"Receiver\"=:Receiver,\"Subj\"=:Subj,\"Body\"=:Body " "WHERE \"Id\"=:Idx;"); Query.bindValue(":Idx",*Idx); } else { Query.prepare("INSERT INTO public.\"Mail\" " "(\"Sender\",\"Receiver\",\"Subj\",\"Body\") " "VALUES(:Sender,:Receiver,:Subj,:Body)"); } Query.bindValue(":Sender",Global.UserId); Query.bindValue(":Receiver",Users[Receiver->currentText()]); Query.bindValue(":Subj",Subj->text().trimmed()); Query.bindValue(":Body",Message->toHtml().trimmed()); if (!Query.exec()) { GuiSqlError("Ошибка при работе с БД!"); } accept(); }
void Browse_User::UpdateFunc() { QSqlQuery Query(Global.DataBase); QString SearchString = QuoteString(FilterBy->text().trimmed()); QString Where = ""; QString QueryString; QStringList SearchList; if (Mode == buWidget) { Where = SearchPrepare( " POSITION (LOWER(?) IN LOWER(f.\"Name\")) > 0 OR " " POSITION (LOWER(?) IN LOWER(f.\"Role\")) > 0 OR " " POSITION (LOWER(?) IN LOWER(to_char(f.\"StartDate\"::date ,'DD.MM.YYYY' ))) > 0 OR " " POSITION (LOWER(?) IN LOWER(f.\"StopDate\")) > 0 OR " " POSITION (LOWER(?) IN LOWER(f.\"Position\")) > 0 ", &SearchList, SearchString); QString QueryString = "SELECT * FROM (" " SELECT " " u.\"Id\" AS \"Id\", " " u.\"Name\" AS \"Name\", " " COALESCE(r.\"Name\",'-') AS \"Role\", " " u.\"StartDate\" AS \"StartDate\", " " CASE WHEN (u.\"StopDate\" = to_date('1980-01-01', 'YYYY-MM-DD')) THEN 'в штате' ELSE to_char(u.\"StopDate\",'DD-MM-YYYY') END AS \"StopDate\", " " COALESCE(d.\"Name\",'-') AS \"Position\", " " u.\"LongName\" AS \"LongName\" " " FROM public.\"Users\" as u " " LEFT OUTER JOIN public.\"Rubricator\" AS r ON (r.\"GroupId\" = 1 AND r.\"Id\" = u.\"Role\") " " LEFT OUTER JOIN public.\"Rubricator\" AS d ON (d.\"GroupId\" = 2 AND d.\"Id\" = u.\"Position\") " " GROUP BY u.\"Id\", r.\"Name\", d.\"Name\" " ") AS f " +Where +OrderBy; Query.prepare(QueryString); SearchBind(SearchList,&Query,5); if (!Query.exec()) { GuiSqlError("Ошибка при работе с БД!"); return; } Model->setQuery(Query); Model->setHeaderData(0,Qt::Horizontal,tr("Ид")); Model->setHeaderData(1,Qt::Horizontal,tr("ФИО")); Model->setHeaderData(2,Qt::Horizontal,tr("Права")); Model->setHeaderData(3,Qt::Horizontal,tr("Прием")); Model->setHeaderData(4,Qt::Horizontal,tr("Увольнение")); Model->setHeaderData(5,Qt::Horizontal,tr("Подразделение")); Model->setHeaderData(6,Qt::Horizontal,tr("ФИО(полностью)")); Table->setModel(Model); Table->setItemDelegate(new NotEditableDelegate()); Table->setColumnHidden(0,true); Table->setColumnHidden(6,true); TuneTable(Table); Table->setSortingEnabled(true); Table->horizontalHeader()->setSortIndicator(SortColumn, SortOrder); Table->resizeColumnsToContents(); Table->horizontalHeader()->setSectionResizeMode(1,QHeaderView::Stretch); } if (Mode == buService) { Where = SearchPrepare( " POSITION (LOWER(?) IN LOWER(f.\"Name\")) > 0", &SearchList, SearchString); QueryString = "SELECT * FROM ( " " SELECT u.\"Id\" AS \"Id\", " " u.\"Name\" AS \"Name\" " " FROM public.\"Users\" AS u, public.\"Service2Users\" AS s " " WHERE u.\"Id\" = s.\"User\" AND s.\"Service\" ="+(*Ref)["Id"].toString()+" " ") AS f " +Where +OrderBy; Query.prepare(QueryString); SearchBind(SearchList,&Query,1); if (!Query.exec()) { GuiSqlError("Ошибка при работе с БД!"); return; } Model->setQuery(Query); Model->setHeaderData(0,Qt::Horizontal,tr("Ид")); Model->setHeaderData(1,Qt::Horizontal,tr("ФИО")); Table->setModel(Model); Table->setColumnHidden(0,true); TuneTable(Table); Table->setSortingEnabled(true); Table->horizontalHeader()->setSortIndicator(SortColumn, SortOrder); Table->resizeColumnsToContents(); Table->horizontalHeader()->setSectionResizeMode(1,QHeaderView::Stretch); } if (Mode == buSelect) { Where = SearchPrepare( " POSITION (LOWER(?) IN LOWER(f.\"Name\")) > 0", &SearchList, SearchString); QueryString = " SELECT * FROM ( " " SELECT u.\"Id\", u.\"Name\" FROM public.\"Users\" AS u " " WHERE u.\"Id\" NOT IN ( " " SELECT u1.\"Id\" FROM public.\"Users\" AS u1, public.\"Service2Users\" AS s " " WHERE s.\"Service\" = 1 AND s.\"User\" = u1.\"Id\" " " )) AS f " +Where +OrderBy; Query.prepare(QueryString); SearchBind(SearchList,&Query,1); if (!Query.exec()) { GuiSqlError("Ошибка при работе с БД!"); return; } Model->setQuery(Query); Model->setHeaderData(0,Qt::Horizontal,tr("Ид")); Model->setHeaderData(1,Qt::Horizontal,tr("ФИО")); Table->setModel(Model); Table->setColumnHidden(0,true); TuneTable(Table); Table->setSortingEnabled(true); Table->horizontalHeader()->setSortIndicator(SortColumn, SortOrder); Table->resizeColumnsToContents(); Table->horizontalHeader()->setSectionResizeMode(1,QHeaderView::Stretch); } connect(Table->selectionModel(),SIGNAL(currentRowChanged(QModelIndex,QModelIndex)),this,SLOT(SlotCurChanged(QModelIndex,QModelIndex))); int SelRow = FindRow((QSqlQueryModel*)(Table->model()),Cur); Table->selectRow(SelRow); }
bool IsConnected() const throw() { return Query(); }
virtual void run(){ client().update( "", Query(), BSON( "$set" << BSON( "x" << 1 )) ); ASSERT( !client().getLastError().empty() ); }
BOOL CSplitLayer::Insert(IN LPTSTR lpFileName) { BOOL bResult = FALSE; HMODULE hModule = NULL; LPSPLIT_MOD_INFO pModInfoAddr = NULL; GETINDEX fpGetIndex; SPLIT fpSplit; MERGE fpMerge; do { hModule = ::LoadLibrary(lpFileName); if (!hModule) { if (GetLastError() != 193) { //LOG(ErrorLevel, _SPLIT_LAYER_MOD_, _T("加载破碎算法文件 %S 失败 (%d)"), lpFileName, ::GetLastError()); LOG(_T("插入破入碎算法文件失败!")); } break; } //获取导出各函数地址 //使用编号导出进行验证Dll有效性 fpGetIndex = (GETINDEX)GetProcAddress(hModule, "GetIndex"); fpSplit = (SPLIT)GetProcAddress(hModule, "Split"); fpMerge = (MERGE)GetProcAddress(hModule, "Merge"); if (!fpGetIndex || !fpSplit || !fpMerge) { break; } //查询链表中是否存在节点,存在则不用添加 pModInfoAddr = Query(fpGetIndex()); if (pModInfoAddr) { FreeLibrary(hModule); bResult = TRUE; break; } //申请内存块,存放节点 pModInfoAddr = (LPSPLIT_MOD_INFO)::HeapAlloc(ms_Heap, HEAP_ZERO_MEMORY, sizeof(SPLIT_MOD_INFO)); if(!pModInfoAddr) { //LOG(ErrorLevel, _SPLIT_LAYER_MOD_, _T("申请内存失败 (%d)"), ::GetLastError()); LOG(_T("申请内存失败!")); break; } //初始化 pModInfoAddr->Index = fpGetIndex(); pModInfoAddr->fpSplit = fpSplit; pModInfoAddr->fpMerge = fpMerge; pModInfoAddr->hMod = hModule; //将其插入到链表中 ::WaitForSingleObject(ms_hSynObject, INFINITE); pModInfoAddr->NextNode = ms_ListHead; ms_ListHead = pModInfoAddr; ::SetEvent(ms_hSynObject); //所有操作成功 bResult = TRUE; } while (FALSE); if(!bResult && hModule) ::FreeLibrary(hModule); return bResult; }
//使用Class查询 bool AnalyseClassCallRelation::QuerybyClass(const string &str, deque<ClassResourceRelation> &qCallPath) { return Query(str, true, qCallPath); }
void run() { const char *ns = "querytests.EmptyTail"; ASSERT_EQUALS( 0, client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, Option_CursorTailable )->getCursorId() ); insert( ns, BSON( "a" << 0 ) ); ASSERT( 0 != client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, Option_CursorTailable )->getCursorId() ); }
void createOplog() { Lock::GlobalWrite lk; const char * ns = "local.oplog.$main"; bool rs = !cmdLine._replSet.empty(); if( rs ) ns = rsoplog; Client::Context ctx(ns); NamespaceDetails * nsd = nsdetails( ns ); if ( nsd ) { if ( cmdLine.oplogSize != 0 ) { int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) ); int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) ); if ( n != o ) { stringstream ss; ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog"; log() << ss.str() << endl; throw UserException( 13257 , ss.str() ); } } if( rs ) return; DBDirectClient c; BSONObj lastOp = c.findOne( ns, Query().sort(reverseNaturalObj) ); if ( !lastOp.isEmpty() ) { OpTime::setLast( lastOp[ "ts" ].date() ); } return; } /* create an oplog collection, if it doesn't yet exist. */ BSONObjBuilder b; double sz; if ( cmdLine.oplogSize != 0 ) sz = (double)cmdLine.oplogSize; else { /* not specified. pick a default size */ sz = 50.0 * 1000 * 1000; if ( sizeof(int *) >= 8 ) { #if defined(__APPLE__) // typically these are desktops (dev machines), so keep it smallish sz = (256-64) * 1000 * 1000; #else sz = 990.0 * 1000 * 1000; boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported. double fivePct = free * 0.05; if ( fivePct > sz ) sz = fivePct; #endif } } log() << "******" << endl; log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB..." << endl; b.append("size", sz); b.appendBool("capped", 1); b.appendBool("autoIndexId", false); string err; BSONObj o = b.done(); userCreateNS(ns, o, err, false); if( !rs ) logOp( "n", "", BSONObj() ); /* sync here so we don't get any surprising lag later when we try to sync */ MemoryMappedFile::flushAll(true); log() << "******" << endl; }
void AlertLog::LoadList() { QSqlDatabase db = QSqlDatabase::database(); QString TextLine, SelectStatement, TimeFilter, NameFilter, PageFilter, LevelFilter; QSqlQuery Query(db); SelectStatement = "SELECT * FROM Alerts"; TimeFilter = "Timestamp >= '" + ui->dateTimeEdit_From->text() + "' AND Timestamp <= '" + ui->dateTimeEdit_To->text() + "'"; if(ui->lineEdit_Name->text().length()>0) { NameFilter = "Name LIKE '%" + ui->lineEdit_Name->text() + "%'"; } if(ui->comboBox_Level->currentIndex() != 3) { LevelFilter = "Alert = '" + QString::number(ui->comboBox_Level->currentIndex() - 3) + "'"; } if(ui->comboBox_Page->currentIndex() > 0) { PageFilter = "Page = '" + ui->comboBox_Page->currentText() + "'"; } QueryString = SelectStatement + " WHERE " + TimeFilter; if(NameFilter.length() > 0) { QueryString = QueryString + " AND " + NameFilter; } if(PageFilter.length() > 0) { QueryString = QueryString + " AND " + PageFilter; } if(LevelFilter.length() > 0) { QueryString = QueryString + " AND " + LevelFilter; } Query.exec(QueryString); if(!Query.isActive()) { ui->listWidget->clear(); ui->listWidget->addItem("Failed to open log"); ui->listWidget->addItem(QueryString); } else { ui->listWidget->clear(); TextLine = "Date/Time"; TextLine = TextLine.leftJustified(26, ' '); TextLine = TextLine + "Page"; TextLine = TextLine.leftJustified(46, ' '); TextLine = TextLine + "Name"; TextLine = TextLine.leftJustified(66, ' '); TextLine = TextLine + "L"; TextLine = TextLine.leftJustified(71, ' '); TextLine = TextLine + "Description"; TextLine = TextLine.leftJustified(99, ' '); ui->listWidget->addItem(TextLine); while(Query.next() and ui->listWidget->count() < 10000) { TextLine = Query.value(0).toString(); TextLine.truncate(25); TextLine = TextLine.leftJustified(26, ' '); TextLine = TextLine + Query.value(1).toString(); TextLine.truncate(45); TextLine = TextLine.leftJustified(46, ' '); TextLine = TextLine + Query.value(2).toString(); TextLine.truncate(65); TextLine = TextLine.leftJustified(66, ' '); TextLine = TextLine + Query.value(3).toString(); TextLine.truncate(70); TextLine = TextLine.leftJustified(71, ' '); TextLine = TextLine + Query.value(4).toString(); TextLine = TextLine.leftJustified(99, ' '); ui->listWidget->addItem(TextLine); } if(ui->listWidget->count() >= 10000)ui->listWidget->addItem("Stopped at 10000 records, you might want to filter that a little"); } }
void reload() { list<BSONObj> all; { scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getScopedDbConnection( configServer.getPrimary().getConnString() ) ); auto_ptr<DBClientCursor> c = conn->get()->query( ShardNS::shard , Query() ); massert( 13632 , "couldn't get updated shard list from config server" , c.get() ); while ( c->more() ) { all.push_back( c->next().getOwned() ); } conn->done(); } scoped_lock lk( _mutex ); // We use the _lookup table for all shards and for the primary config DB. The config DB info, // however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave // the config state intact. The rationale is that this way we could drop shards that // were removed without reinitializing the config DB information. ShardMap::iterator i = _lookup.find( "config" ); if ( i != _lookup.end() ) { ShardPtr config = i->second; _lookup.clear(); _lookup[ "config" ] = config; } else { _lookup.clear(); } _rsLookup.clear(); for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); ++i ) { BSONObj o = *i; string name = o["_id"].String(); string host = o["host"].String(); long long maxSize = 0; BSONElement maxSizeElem = o[ ShardFields::maxSize.name() ]; if ( ! maxSizeElem.eoo() ) { maxSize = maxSizeElem.numberLong(); } bool isDraining = false; BSONElement isDrainingElem = o[ ShardFields::draining.name() ]; if ( ! isDrainingElem.eoo() ) { isDraining = isDrainingElem.Bool(); } ShardPtr s( new Shard( name , host , maxSize , isDraining ) ); if ( o["tags"].type() == Array ) { vector<BSONElement> v = o["tags"].Array(); for ( unsigned j=0; j<v.size(); j++ ) { s->addTag( v[j].String() ); } } _lookup[name] = s; _installHost( host , s ); } }
bool MySQLConnection::Query(const char* sql, ResultSet* & ref_ptr_set, bool commit_flags) { if (NULL == this->mysql_ || NULL == sql) { return false; } else { //如果查询成功,返回0。如果出现错误,返回非0值 long len = strlen(sql); if (mysql_real_query(this->mysql_, sql, len)) { uint32_t last_errno = mysql_errno(this->mysql_); if (HandleMySQLErrno(last_errno)) // If it returns true, an error was handled successfully (i.e. reconnection) { return Query(sql, ref_ptr_set, commit_flags); // Try again } return false; } else { MYSQL_RES* ptr_mysql_res = mysql_store_result(this->mysql_); if(ptr_mysql_res != NULL) { my_ulonglong num_rows = mysql_num_rows(ptr_mysql_res); if( num_rows > 0x00 ) { ref_ptr_set = new ResultSet(ptr_mysql_res); } else { if(ptr_mysql_res != NULL) { mysql_free_result(ptr_mysql_res); } } if(commit_flags) { mysql_commit(this->mysql_); } } else // mysql_store_result() returned nothing; should it have? { uint32_t query_field_count = mysql_field_count(this->mysql_); if(0 == query_field_count) { // query does not return data // (it was not a SELECT) my_ulonglong affected_rows = mysql_affected_rows(this->mysql_); //std::cout << "affected_rows : " << affected_rows << std::endl; } else // mysql_store_result() should have returned data { std::cout << "Error: " << mysql_error(this->mysql_) << std::endl; return false; } } } } return true; }
void LogManager::LoginLog(bool isLogin, DWORD dwAccountID, DWORD dwPID, int bLevel, BYTE bJob, DWORD dwPlayTime) { Query("INSERT DELAYED INTO loginlog%s (type, time, channel, account_id, pid, level, job, playtime) VALUES (%s, NOW(), %d, %u, %u, %d, %d, %u)", get_table_postfix(), isLogin ? "'LOGIN'" : "'LOGOUT'", g_bChannel, dwAccountID, dwPID, bLevel, bJob, dwPlayTime); }
void LogManager::DragonSlayLog(DWORD dwGuildID, DWORD dwDragonVnum, DWORD dwStartTime, DWORD dwEndTime) { Query( "INSERT INTO dragon_slay_log%s VALUES( %d, %d, FROM_UNIXTIME(%d), FROM_UNIXTIME(%d) )", get_table_postfix(), dwGuildID, dwDragonVnum, dwStartTime, dwEndTime); }
void LogManager::VCardLog(DWORD vcard_id, DWORD x, DWORD y, const char * hostname, const char * giver_name, const char * giver_ip, const char * taker_name, const char * taker_ip) { Query("INSERT DELAYED INTO vcard_log (vcard_id, x, y, hostname, giver_name, giver_ip, taker_name, taker_ip) VALUES(%u, %u, %u, '%s', '%s', '%s', '%s', '%s')", vcard_id, x, y, hostname, giver_name, giver_ip, taker_name, taker_ip); }
virtual bool run(const string& dbname, BSONObj& cmdObj, int x, string& errmsg, BSONObjBuilder& result, bool y) { static DBDirectClient db; if ( cmdObj["sort"].eoo() ) return runNoDirectClient( dbname , cmdObj , x, errmsg , result, y ); string ns = dbname + '.' + cmdObj.firstElement().valuestr(); BSONObj origQuery = cmdObj.getObjectField("query"); // defaults to {} Query q (origQuery); BSONElement sort = cmdObj["sort"]; if (!sort.eoo()) q.sort(sort.embeddedObjectUserCheck()); bool upsert = cmdObj["upsert"].trueValue(); BSONObj fieldsHolder (cmdObj.getObjectField("fields")); const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder); Projection projection; if (fields) { projection.init(fieldsHolder); if (!projection.includeID()) fields = NULL; // do projection in post-processing } BSONObj out = db.findOne(ns, q, fields); if (out.isEmpty()) { if (!upsert) { result.appendNull("value"); return true; } BSONElement update = cmdObj["update"]; uassert(13329, "upsert mode requires update field", !update.eoo()); uassert(13330, "upsert mode requires query field", !origQuery.isEmpty()); db.update(ns, origQuery, update.embeddedObjectUserCheck(), true); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } if (cmdObj["new"].trueValue()) { BSONElement _id = gle["upserted"]; if (_id.eoo()) _id = origQuery["_id"]; out = db.findOne(ns, QUERY("_id" << _id), fields); } } else { if (cmdObj["remove"].trueValue()) { uassert(12515, "can't remove and update", cmdObj["update"].eoo()); db.remove(ns, QUERY("_id" << out["_id"]), 1); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } } else { // update BSONElement queryId = origQuery["_id"]; if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) { // need to include original query for $ positional operator BSONObjBuilder b; b.append(out["_id"]); BSONObjIterator it(origQuery); while (it.more()) { BSONElement e = it.next(); if (strcmp(e.fieldName(), "_id")) b.append(e); } q = Query(b.obj()); } if (q.isComplex()) // update doesn't work with complex queries q = Query(q.getFilter().getOwned()); BSONElement update = cmdObj["update"]; uassert(12516, "must specify remove or update", !update.eoo()); db.update(ns, q, update.embeddedObjectUserCheck()); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } if (cmdObj["new"].trueValue()) out = db.findOne(ns, QUERY("_id" << out["_id"]), fields); } } if (!fieldsHolder.isEmpty() && !fields){ // we need to run projection but haven't yet out = projection.transform(out); } result.append("value", out); return true; }
static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) { client_.update( ns, Query( q ), o, upsert ); }
/** * Checks if the oplog given is too far ahead to read from. * * @param r the oplog * @param hn the hostname (for log messages) * * @return if we are stale compared to the oplog on hn */ bool ReplSetImpl::_isStale(OplogReader& r, const string& hn) { BSONObj remoteOldestOp = r.findOne(rsoplog, Query()); OpTime ts = remoteOldestOp["ts"]._opTime(); DEV log() << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog; else log(3) << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) { char db[256]; nsToClient( ns, db ); NamespaceDetails *nsd = nsdetails( ns ); if ( nsd ){ /** note: its ok to clone into a collection, but only if the range you're copying doesn't exist on this server */ string err; if ( runCount( ns , BSON( "query" << query ) , err ) > 0 ){ log() << "WARNING: data already exists for: " << ns << " in range : " << query << " deleting..." << endl; deleteObjects( ns , query , false , logForRepl , false ); } } { dbtemprelease r; auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( fromhost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; // Start temporary op log BSONObjBuilder cmdSpec; cmdSpec << "logCollection" << ns << "start" << 1; if ( logSizeMb != INT_MIN ) cmdSpec << "logSizeMb" << logSizeMb; BSONObj info; if ( !conn->runCommand( db, cmdSpec.done(), info ) ) { errmsg = "logCollection failed: " + (string)info; return false; } } if ( ! nsd ) { BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) ); if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) ) return false; } copy( ns, ns, false, logForRepl, false, false, query ); if ( copyIndexes ) { string indexNs = string( db ) + ".system.indexes"; copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) ); } auto_ptr< DBClientCursor > c; { dbtemprelease r; string logNS = "local.temp.oplog." + string( ns ); c = conn->query( logNS.c_str(), Query(), 0, 0, 0, Option_CursorTailable ); } if ( c->more() ) { replayOpLog( c.get(), query ); cursorId = c->getCursorId(); massert( "Expected valid tailing cursor", cursorId != 0 ); } else { massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 ); cursorId = 0; } c->decouple(); return true; }
Query ConfigDiffTracker<ValType,ShardType>:: configDiffQuery( const set<ChunkVersion>& extraMinorVersions ) const { verifyAttached(); // // Basic idea behind the query is to find all the chunks $gt the current max version, and // then also update chunks that we need minor versions - splits and (2.0) max chunks on // shards // static const int maxMinorVersionClauses = 50; BSONObjBuilder queryB; int numStaleMinorClauses = extraMinorVersions.size() + _maxShardVersions->size(); #ifdef _DEBUG // In debug builds, randomly trigger full reloads to exercise both codepaths if( rand() % 2 ) numStaleMinorClauses = maxMinorVersionClauses; #endif queryB.append(ChunkType::ns(), _ns); // // If we have only a few minor versions to refresh, we can be more selective in our query // if( numStaleMinorClauses < maxMinorVersionClauses ) { // // Get any version changes higher than we know currently // BSONArrayBuilder queryOrB( queryB.subarrayStart( "$or" ) ); { BSONObjBuilder queryNewB( queryOrB.subobjStart() ); { BSONObjBuilder ts(queryNewB.subobjStart(ChunkType::DEPRECATED_lastmod())); // We should *always* pull at least a single chunk back, this lets us quickly // detect if our collection was unsharded (and most of the time if it was // resharded) in the meantime ts.appendTimestamp( "$gte", _maxVersion->toLong() ); ts.done(); } queryNewB.done(); } // Get any shard version changes higher than we know currently // Needed since there could have been a split of the max version chunk of any shard // TODO: Ideally, we shouldn't care about these for( typename map<ShardType, ChunkVersion>::const_iterator it = _maxShardVersions->begin(); it != _maxShardVersions->end(); it++ ) { BSONObjBuilder queryShardB( queryOrB.subobjStart() ); queryShardB.append(ChunkType::shard(), nameFrom( it->first ) ); { BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod())); ts.appendTimestamp( "$gt", it->second.toLong() ); ts.done(); } queryShardB.done(); } // Get any minor version changes we've marked as interesting // TODO: Ideally we shouldn't care about these for( set<ChunkVersion>::const_iterator it = extraMinorVersions.begin(); it != extraMinorVersions.end(); it++ ) { BSONObjBuilder queryShardB( queryOrB.subobjStart() ); { BSONObjBuilder ts(queryShardB.subobjStart(ChunkType::DEPRECATED_lastmod())); ts.appendTimestamp( "$gt", it->toLong() ); ts.appendTimestamp( "$lt", ChunkVersion( it->majorVersion() + 1, 0, OID() ).toLong() ); ts.done(); } queryShardB.done(); } queryOrB.done(); } BSONObj query = queryB.obj(); LOG(2) << "major version query from " << *_maxVersion << " and over " << _maxShardVersions->size() << " shards is " << query << endl; // // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, TO HANDLE // CURSOR YIELDING BETWEEN CHUNKS BEING MIGRATED. // // This ensures that changes to chunk version (which will always be higher) will always // come *after* our current position in the chunk cursor. // Query queryObj(query); queryObj.sort(BSON( "lastmod" << 1 )); return Query( query ); }
void createOplog() { dblock lk; const char * ns = "local.oplog.$main"; Client::Context ctx(ns); NamespaceDetails * nsd = nsdetails( ns ); if ( nsd ) { if ( cmdLine.oplogSize != 0 ){ int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) ); int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) ); if ( n != o ){ stringstream ss; ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog"; log() << ss.str() << endl; throw UserException( 13257 , ss.str() ); } } DBDirectClient c; BSONObj lastOp = c.findOne( ns, Query().sort( BSON( "$natural" << -1 ) ) ); if ( !lastOp.isEmpty() ) { OpTime::setLast( lastOp[ "ts" ].date() ); } return; } /* create an oplog collection, if it doesn't yet exist. */ BSONObjBuilder b; double sz; if ( cmdLine.oplogSize != 0 ) sz = (double)cmdLine.oplogSize; else { /* not specified. pick a default size */ sz = 50.0 * 1000 * 1000; if ( sizeof(int *) >= 8 ) { #if defined(__APPLE__) // typically these are desktops (dev machines), so keep it smallish sz = (256-64) * 1000 * 1000; #else sz = 990.0 * 1000 * 1000; boost::intmax_t free = freeSpace(); //-1 if call not supported. double fivePct = free * 0.05; if ( fivePct > sz ) sz = fivePct; #endif } } log() << "******\n"; log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB (use --oplogSize to change)\n"; log() << "******" << endl; b.append("size", sz); b.appendBool("capped", 1); b.appendBool("autoIndexId", false); string err; BSONObj o = b.done(); userCreateNS(ns, o, err, false); logOp( "n", "dummy", BSONObj() ); }
GetMore() : ns_( testNs( this ) ) { for( int i = 0; i < 100000; ++i ) client_->insert( ns_.c_str(), BSON( "a" << i ) ); c_ = client_->query( ns_.c_str(), Query() ); }
virtual void run(){ auto_ptr<DBClientCursor> cursor = client().query( "", Query(), 1 ); BSONObj result = cursor->next().getOwned(); ASSERT( result.hasField( "$err" )); }
void OplogReader::connectToSyncSource(OperationContext* txn, const OpTime& lastOpTimeFetched, ReplicationCoordinator* replCoord) { const Timestamp sentinelTimestamp(duration_cast<Seconds>(Milliseconds(curTimeMillis64())), 0); const OpTime sentinel(sentinelTimestamp, std::numeric_limits<long long>::max()); OpTime oldestOpTimeSeen = sentinel; invariant(conn() == NULL); while (true) { HostAndPort candidate = replCoord->chooseNewSyncSource(lastOpTimeFetched.getTimestamp()); if (candidate.empty()) { if (oldestOpTimeSeen == sentinel) { // If, in this invocation of connectToSyncSource(), we did not successfully // connect to any node ahead of us, // we apparently have no sync sources to connect to. // This situation is common; e.g. if there are no writes to the primary at // the moment. return; } // Connected to at least one member, but in all cases we were too stale to use them // as a sync source. error() << "too stale to catch up"; log() << "our last optime : " << lastOpTimeFetched; log() << "oldest available is " << oldestOpTimeSeen; log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember"; setMinValid(txn, oldestOpTimeSeen); bool worked = replCoord->setFollowerMode(MemberState::RS_RECOVERING); if (!worked) { warning() << "Failed to transition into " << MemberState(MemberState::RS_RECOVERING) << ". Current state: " << replCoord->getMemberState(); } return; } if (!connect(candidate)) { LOG(2) << "can't connect to " << candidate.toString() << " to read operations"; resetConnection(); replCoord->blacklistSyncSource(candidate, Date_t::now() + Seconds(10)); continue; } // Read the first (oldest) op and confirm that it's not newer than our last // fetched op. Otherwise, we have fallen off the back of that source's oplog. BSONObj remoteOldestOp(findOne(rsOplogName.c_str(), Query())); OpTime remoteOldOpTime = fassertStatusOK(28776, OpTime::parseFromBSON(remoteOldestOp)); // remoteOldOpTime may come from a very old config, so we cannot compare their terms. if (!lastOpTimeFetched.isNull() && lastOpTimeFetched.getTimestamp() < remoteOldOpTime.getTimestamp()) { // We're too stale to use this sync source. resetConnection(); replCoord->blacklistSyncSource(candidate, Date_t::now() + Minutes(1)); if (oldestOpTimeSeen.getTimestamp() > remoteOldOpTime.getTimestamp()) { warning() << "we are too stale to use " << candidate.toString() << " as a sync source"; oldestOpTimeSeen = remoteOldOpTime; } continue; } // Got a valid sync source. return; } // while (true) }
virtual void run(){ client().remove( "", Query() ); ASSERT( !client().getLastError().empty() ); }
QSqlQuery MysqlConnection::Query(quint16 sqlQueryId) { return Query(GetSqlQuery(sqlQueryId)); }
int ExecCommand(tGame *game, tCommand * command) { int i = command->command_ref; int res; switch (i) { case COMMAND_SWEEP: res = Sweep(game, command); if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves--; break; case COMMAND_FLAG: if (!(command->flag.is_range)) { res = DoFlagUnflag(game, command, DO_FLAG); if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves--; } else { res = FlagRange(game, command, DO_FLAG); if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves-= res; } break; case COMMAND_UNFLAG: if (!(command->flag.is_range)) { res = DoFlagUnflag(game, command, DO_UNFLAG); if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves--; } else { res = FlagRange(game, command, DO_UNFLAG); if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves-= res; } break; case COMMAND_QUERY: res = Query(&game->hiddenboard, command); break; case COMMAND_SAVE: res = WriteSaveFile(game, command->save_filename); break; case COMMAND_QUIT: Quit(game, command); exit(0); break; case COMMAND_UNDO: if (command->undo.can_undo && game->undos) { Undo(game, &command->undo); game->undos--; if (game->gametype != GAMETYPE_INDIVIDUAL_NOLIMIT) game->moves--; } else command->undo.undo_error = TRUE; break; } if (res == SWEEP_MINE && i == COMMAND_SWEEP) { if (game->undos) { if (game->gametype == GAMETYPE_INDIVIDUAL_NOLIMIT) AskUndo(game, &command->undo); else if (game->moves) AskUndo(game, &command->undo); else game->gamestate = GAMESTATE_LOSE; } else game->gamestate = GAMESTATE_LOSE; } return res; }
Query Connection::query(const std::string & str) { return Query(this, str); }
std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() const { const Query query = Query().sort(BSON("$natural" << -1)); const BSONObj fields = BSON("ts" << 1 << "h" << 1); return std::unique_ptr<OplogInterface::Iterator>(new OplogIteratorRemote( _getConnection()->query(_collectionName, query, 0, 0, &fields, 0, 0))); }
bool CDBWrapper::UpdateMissedTasks() const { static const string sSQLFormat = "UPDATE schedules SET schedExecTime = DATE_ADD(now(), INTERVAL 1 SECOND) WHERE schedExecTime < now()"; return Query( sSQLFormat ); }