void MethodGetRecentContacts::onCall(const sg::rpc::Uri& target) { common::UserID uid(common::UserPointID(this->getFrom())); mongo::BSONObj returnfield = BSON(COLL_RECENTCONTACTS_CONTACT << 1); mongo::ScopedDbConnection conn( MongoDBManagerSingleton::instance().host()); auto_ptr<mongo::DBClientCursor> cursor = conn->query( COLL_RECENTCONTACTS, QUERY(COLL_RECENTCONTACTS_UID << uid.toString()).sort(COLL_RECENTCONTACTS_CONTACT"."COLL_RECENTCONTACTS_CONTACT_TIME, -1), getRequestData().max_count(), 0, &returnfield); methods::rpc::GetRecentContactsResponse resp; while (cursor->more()) { mongo::BSONObj p = cursor->nextSafe(); methods::rpc::GetRecentContactsResponse_Contact* contact = resp.add_contacts(); common::UserID contact_uid; contact_uid.parseFromString(p[COLL_RECENTCONTACTS_CONTACT_UID].String()); contact_uid.get(contact->mutable_user_id()); bool flag = p[COLL_CHATMSGS_DATA_FLAG].Int(); if (contact_uid.uid < uid.uid) flag = !flag; methods::rpc::ChatMsgData* pdata = contact->mutable_last_msg(); pdata->set_flag(flag ? methods::rpc::ChatMsgData_Flag_FLAG_RECV : methods::rpc::ChatMsgData_Flag_FLAG_SEND); pdata->set_msg_id(p[COLL_CHATMSGS_DATA_MSGID].Long()); pdata->set_msg_time(p[COLL_CHATMSGS_DATA_TIME].Date().toTimeT()); p[COLL_CHATMSGS_DATA_CONTENT_TYPE].Val(*pdata->mutable_content_type()); p[COLL_CHATMSGS_DATA_CONTENT].Val(*pdata->mutable_content()); } conn.done(); reply(target, resp); }
static void cmd_window_server(const char *data) { SERVER_REC *server; QUERY_REC *query; g_return_if_fail(data != NULL); server = server_find_tag(data); query = QUERY(active_win->active); if (server == NULL || query == NULL) return; /* /WINDOW SERVER used in a query window */ query_change_server(query, server); printformat(NULL, NULL, MSGLEVEL_CLIENTNOTICE, TXT_QUERY_SERVER_CHANGED, query->name, server->tag); signal_stop(); }
Status AuthzManagerExternalStateMongod::updatePrivilegeDocument( const UserName& user, const BSONObj& updateObj, const BSONObj& writeConcern) { try { const std::string userNS = "admin.system.users"; DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.update(userNS, QUERY(AuthorizationManager::USER_NAME_FIELD_NAME << user.getUser() << AuthorizationManager::USER_SOURCE_FIELD_NAME << user.getDB()), updateObj); } // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string err = client.getLastErrorString(res); if (!err.empty()) { return Status(ErrorCodes::UserModificationFailed, err); } int numUpdated = res["n"].numberInt(); dassert(numUpdated <= 1 && numUpdated >= 0); if (numUpdated == 0) { return Status(ErrorCodes::UserNotFound, mongoutils::str::stream() << "User " << user.getFullName() << " not found"); } return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
static int process_cmd (const char *line) { RESULT[0] = '\0'; if (matchstr (&line, "EJECT")) return EJECT (line); else if (matchstr (&line, "INSERT")) return INSERT (line); else if (matchstr (&line, "QUERY")) return QUERY (line); else if (matchstr (&line, "FEEDBACK")) return FEEDBACK (line); else if (matchstr (&line, "VERSION")) return GET_VERSION (line); else if (matchstr (&line, "BYE")) QUIT (); else if (matchstr (&line, "QUIT")) QUIT (); else if (matchstr (&line, "DEBUG")) return DEBUG (); else if (matchstr (&line, "RESET")) m68k_reset (0); else if (matchstr (&line, "DISPLAY")) return DISPLAY (line); else if (matchstr (&line, "FRAMERATE")) return FRAMERATE( line); else if (matchstr (&line, "FAKEJOYSTICK")) return FAKEJOYSTICK (line); else if (matchstr (&line, "SOUND")) return SOUND (line); else if (matchstr (&line, "UAEEXE")) return UAEEXE (line); else return RC_ERROR; return RC_OK; }
void UpdateGoogleData::doInBackground(const mongo::OID & user_id,CompletionHandler handler){ std::cout << "do" << boost::this_thread::get_id() << std::endl; Log * log = Log::Instance(); Config * config = Config::Instance(); log->write("Update user google info: " + user_id.toString()); bool ok = false; char *req_url = NULL; char *reply = NULL; DB * db = DB::Instance(); std::string mail, name, profile, thumbnail; if(db){ mongo::BSONObj user = db->findOne("yquest.users", QUERY("_id" << user_id)); if(user.valid()){ req_url = oauth_sign_url2(config->OAUTH_PROFILE_REQUEST_URL.c_str(), NULL, OA_HMAC, NULL, config->OAUTH_KEY.c_str(), config->OAUTH_SECRET.c_str(), user["oauth_token"].String().c_str(), user["oauth_token_secret"].String().c_str()); reply = oauth_http_get(req_url,NULL); log->write(reply); std::stringstream stream(reply); boost::property_tree::ptree ptree; boost::property_tree::json_parser::read_json(stream, ptree); std::cout << ptree.get<std::string>("entry.displayName")<< std::endl; } } io_service.post(boost::bind(handler,ok)); delete this; }
/** * (re)connect to database */ int IpfixDbWriterMongo::connectToDB() { dbError = true; // If a connection exists don't reconnect if (con.isFailed()) return 0; // Connect string err; mongo::HostAndPort dbLogon; dbLogon = mongo::HostAndPort(dbHost, dbPort); msg(MSG_INFO,"IpfixDbWriterMongo: Connection details: %s", dbLogon.toString().c_str()); if(!con.connect(dbLogon, err)) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo connect failed. Error: %s", err.c_str()); return 1; } if(!dbUser.empty() && !dbPassword.empty()) { // we need to authenticate if(!con.auth(dbName, dbUser, dbPassword, err)) { msg(MSG_FATAL,"IpfixDbWriterMongo: Mongo authentication failed. Error: %s", err.c_str()); return 1; } } // create couter to support incrementing Exporter IDs if(con.findOne(dbCollectionCounters, QUERY("_id" << "exporterCounter")).isEmpty()) { mongo::BSONObjBuilder b; b << "_id" << "exporterCounter" << "c" << 0; mongo::BSONObj obj = b.obj(); con.insert(dbCollectionCounters, obj); } msg(MSG_DEBUG,"IpfixDbWriterMongo: Mongo connection successful"); dbError = false; return 0; }
Eina_Bool getConfig(DB *db, Zentific_Ctx *ctx, const char *key, const char *subsystem, int mid, int uid, int gid, int vgid, int ngid, int vm, int node, int platform, int role){ if (!key || !subsystem || !key[0] || !subsystem[0]){ ctx->error = AZY_ERR(INVALID_PARAMETER); return EINA_FALSE; } DB_VARS; QUERY(KEYVALUE_esql, "SELECT * FROM `config` WHERE `key`='%s'" " AND `subsystem`='%s' AND `mid`='%d'" " AND `uid`='%d' AND `gid`='%d' AND `vgid`='%d'" " AND `ngid`='%d' AND `vm`='%d' AND `node`='%d'" " AND `platform`='%d' AND `role`='%d'", key, subsystem, mid, uid, gid, vgid, ngid, vm, node, platform, role); ctx->free_func = (Ecore_Cb)KEYVALUE_free; QID_RETURN; }
static void e_data_cal_view_get_property (GObject *object, guint property_id, GValue *value, GParamSpec *pspec) { EDataCalView *query; EDataCalViewPrivate *priv; query = QUERY (object); priv = query->priv; switch (property_id) { case PROP_BACKEND: g_value_set_object (value, priv->backend); break; case PROP_SEXP: g_value_set_object (value, priv->sexp); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); break; } }
/* SYNTAX: UNQUERY [<nick>] */ static void cmd_unquery(const char *data, SERVER_REC *server, WI_ITEM_REC *item) { QUERY_REC *query; g_return_if_fail(data != NULL); if (*data == '\0') { /* remove current query */ query = QUERY(item); if (query == NULL) return; } else { query = query_find(server, data); if (query == NULL) { printformat(server, NULL, MSGLEVEL_CLIENTERROR, TXT_NO_QUERY, data); return; } } query_destroy(query); }
void User::IncrCredits(const std::string& section, long long kBytes) { auto doIncrement = [section, kBytes](acl::UserID uid) { NoErrorConnection conn; auto updateExisting = [&]() -> bool { auto query = BSON("uid" << uid << "credits" << BSON("$elemMatch" << BSON("section" << section))); auto update = BSON("$inc" << BSON("credits.$.value" << kBytes)); auto cmd = BSON("findandmodify" << "users" << "query" << query << "update" << update); mongo::BSONObj result; return conn.RunCommand(cmd, result) && result["value"].type() != mongo::jstNULL; }; auto doInsert = [&]() -> bool { auto query = QUERY("uid" << uid << "credits" << BSON("$not" << BSON("$elemMatch" << BSON("section" << section)))); auto update = BSON("$push" << BSON("credits" << BSON("section" << section << "value" << kBytes))); return conn.Update("users", query, update, false) > 0; }; if (updateExisting()) return; if (doInsert()) return; if (updateExisting()) return; logs::Database("Unable to increment credits for UID %1%%2%", uid, !section.empty() ? " in section " + section : std::string("")); }; asyncTasks.Assign(std::async(std::launch::async, doIncrement, user.id)); }
void CComputeRowDataProcessor::ComputeData() { bool bResult = false; BSONObj boRowRecord, boVLanInfo; Query queryRackInRow; CRackModel *pRackModel; CRowModel *pRowModel; auto_ptr<DBClientCursor> ptrRackCursor = auto_ptr<DBClientCursor>(); while (m_ptrCursor->more()) { boRowRecord = m_ptrCursor->nextSafe(); if (pRowModel->GetRackQuery(queryRackInRow, boRowRecord)) { if (m_pRackController->Find(ptrRackCursor, queryRackInRow)) { boVLanInfo = pRackModel->GetVLanInfo(ptrRackCursor); m_pCIController->Update(boVLanInfo, QUERY("_id" << boRowRecord["_id"])); } } ptrRackCursor.reset(); } }
//----------------------------------------------------------------------------- bool solution_c::fetch( Reveal::Core::solution_ptr& solution, Reveal::DB::database_ptr db, std::string experiment_id, std::string scenario_id, double t, double epsilon ) { std::auto_ptr<mongo::DBClientCursor> cursor; Reveal::Core::scenario_ptr ptr; std::string table = "solution"; mongo_ptr mongo = mongo_c::service( db ); if( !mongo ) return false; double lbound = t - epsilon; double ubound = t + epsilon; mongo->fetch( cursor, table, QUERY( "experiment_id" << experiment_id << "scenario_id" << scenario_id << "t" << mongo::GTE << lbound << mongo::LTE << ubound ) ); if( !cursor->more() ) return false; // TODO: add error handling mongo::BSONObj record = cursor->next(); map( solution, record, Reveal::Core::solution_c::CLIENT ); return true; }
void MethodRemoveRecentContacts::onCall(const sg::rpc::Uri& target) { if (getRequestData().user_ids_size()) { common::UserID uid(common::UserPointID(this->getFrom())); mongo::ScopedDbConnection conn( MongoDBManagerSingleton::instance().host()); mongo::BSONArrayBuilder contactsBuilder; for (int i=0; i<getRequestData().user_ids_size(); ++i) { contactsBuilder << common::UserID(getRequestData().user_ids(i)).toString(); } conn->remove(COLL_RECENTCONTACTS, QUERY(COLL_RECENTCONTACTS_UID << uid.toString() << "$or" << BSON(COLL_RECENTCONTACTS_CONTACT << contactsBuilder.arr()))); conn.done(); } reply(target); }
Status AuthzManagerExternalStateMongos::updatePrivilegeDocument( const UserName& user, const BSONObj& updateObj, const BSONObj& writeConcern) { try { const std::string userNS = "admin.system.users"; scoped_ptr<ScopedDbConnection> conn(getConnectionForAuthzCollection(userNS)); conn->get()->update( userNS, QUERY(AuthorizationManager::USER_NAME_FIELD_NAME << user.getUser() << AuthorizationManager::USER_SOURCE_FIELD_NAME << user.getDB()), updateObj); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; conn->get()->runCommand("admin", gleBuilder.done(), res); string err = conn->get()->getLastErrorString(res); conn->done(); if (!err.empty()) { return Status(ErrorCodes::UserModificationFailed, err); } int numUpdated = res["n"].numberInt(); dassert(numUpdated <= 1 && numUpdated >= 0); if (numUpdated == 0) { return Status(ErrorCodes::UserNotFound, mongoutils::str::stream() << "User " << user.getFullName() << " not found"); } return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
void FilesystemEntry::updateContentType(mongo::GridFile& gridfile, mode_t mode, uid_t uid, gid_t gid, time_t time) { std::stringstream lContentType; lContentType << "m:" << mode << "|u:" << uid << "|g:" << gid << "|t:" << time; // create update filter and query mongo::OID id(gridfile.getFileField("_id").OID().str()); mongo::Query filter = QUERY("_id" << id); mongo::BSONObj update = BSON( "$set" << BSON ( "contentType" << lContentType.str() )); // update it // TODO DK this is not multi process safe because it doesn't store a new file // entry, but don't see a better solution yet. theConnection->update(filesCollection(), filter, update); synchonizeUpdate(); }
Status AuthzManagerExternalStateMongod::updatePrivilegeDocument( const UserName& user, const BSONObj& updateObj) { try { string userNS = mongoutils::str::stream() << user.getDB() << ".system.users"; DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.update(userNS, QUERY("user" << user.getUser() << "userSource" << BSONNULL), updateObj); } // 30 second timeout for w:majority BSONObj res = client.getLastErrorDetailed(false, false, -1, 30*1000); string err = client.getLastErrorString(res); if (!err.empty()) { return Status(ErrorCodes::UserModificationFailed, err); } int numUpdated = res["n"].numberInt(); dassert(numUpdated <= 1 && numUpdated >= 0); if (numUpdated == 0) { return Status(ErrorCodes::UserNotFound, mongoutils::str::stream() << "User " << user.getFullName() << " not found"); } return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
//----------------------------------------------------------------------------- bool trial_c::fetch( Reveal::Core::trial_ptr& trial, Reveal::DB::database_ptr db, std::string scenario_id, double t, double epsilon ) { std::auto_ptr<mongo::DBClientCursor> cursor; // Reveal::DB::query_c query; Reveal::Core::scenario_ptr ptr; // get mongo service and verify mongo_ptr mongo = mongo_c::service( db ); if( !mongo ) return false; double lbound = t - epsilon; double ubound = t + epsilon; //mongo->fetch( cursor, "trial", QUERY( "scenario_id" << scenario_id << "t" << t ) ); mongo->fetch( cursor, "trial", QUERY( "scenario_id" << scenario_id << "t" << mongo::GTE << lbound << mongo::LTE << ubound) ); if( !cursor->more() ) return false; // add error handling mongo::BSONObj record = cursor->next(); map( trial, record ); return true; }
void DB::loadOrders() { mongo::BSONObj o_max = getInstance().findOne("btct.orders", mongo::Query().sort("_id", -1)); mongo::BSONObj t_max = getInstance().findOne("btct.transactions", mongo::Query().sort("_id", -1)); uint64_t o_next_id = o_max.getField("_id").ok()?o_max.getField("_id").Long():10000; if(o_next_id < 10000) { o_next_id = 10000; } uint64_t t_next_id = t_max.getField("_id").ok()?t_max.getField("_id").Long():1; if(t_next_id <= 1){ t_next_id = 1; } Order::next_id = o_next_id + 1; Transaction::next_id = t_next_id + 1; std::auto_ptr<mongo::DBClientCursor> cursor = getInstance().query("btct.orders", QUERY("qty" << mongo::GT << 0)); int order_count = 0; printf("Loading orders...\n"); while (cursor->more()) { mongo::BSONObj p = cursor->next(); Market *market = Market::markets[p.getIntField("market_id")]; Order *o = new Order( market, (const uint32_t) p.getIntField("account_id"), (const order_type_t) p.getBoolField("direction"), (const uint64_t) p.getField("qty").Long(), (const uint64_t) p.getField("orig_qty").Long(), (const uint64_t) p.getField("price").Long(), (const uint64_t) p.getField("_id").Long(), (const uint32_t) p.getIntField("timestamp"), (const order_status_t) p.getIntField("status")); market->addOrder(o); order_count++; } printf("Loaded %u orders\n", order_count); printf("Biggest order number: %llu\n", o_next_id); }
/* * Free all the data allocated for queries and results. * This is called by rlib_free() when destroying everything. */ static void rlib_free_results_and_queries(rlib *r) { int i; for (i = 0; i < r->queries_count; i++) { if (r->results[i]->result && INPUT(r, i)->free_result) { INPUT(r, i)->free_result(INPUT(r, i), r->results[i]->result); r->results[i]->result = NULL; } g_free(r->results[i]); r->results[i] = NULL; if (QUERY(r, i) && QUERY(r, i)->input && QUERY(r, i)->input->free_query) QUERY(r, i)->input->free_query(QUERY(r, i)->input, QUERY(r, i)); if (r->queries[i]->sql_allocated) g_free(r->queries[i]->sql); g_free(r->queries[i]->name); g_free(r->queries[i]); r->queries[i] = NULL; } g_free(r->results); r->results = NULL; g_free(r->queries); r->queries = NULL; r->queries_count = 0; }
static int svga_get_driver_query_info(struct pipe_screen *screen, unsigned index, struct pipe_driver_query_info *info) { #define QUERY(NAME, ENUM, UNITS) \ {NAME, ENUM, {0}, UNITS, PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE, 0, 0x0} static const struct pipe_driver_query_info queries[] = { /* per-frame counters */ QUERY("num-draw-calls", SVGA_QUERY_NUM_DRAW_CALLS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-fallbacks", SVGA_QUERY_NUM_FALLBACKS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-flushes", SVGA_QUERY_NUM_FLUSHES, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-validations", SVGA_QUERY_NUM_VALIDATIONS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("map-buffer-time", SVGA_QUERY_MAP_BUFFER_TIME, PIPE_DRIVER_QUERY_TYPE_MICROSECONDS), QUERY("num-resources-mapped", SVGA_QUERY_NUM_RESOURCES_MAPPED, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-bytes-uploaded", SVGA_QUERY_NUM_BYTES_UPLOADED, PIPE_DRIVER_QUERY_TYPE_BYTES), QUERY("command-buffer-size", SVGA_QUERY_COMMAND_BUFFER_SIZE, PIPE_DRIVER_QUERY_TYPE_BYTES), QUERY("flush-time", SVGA_QUERY_FLUSH_TIME, PIPE_DRIVER_QUERY_TYPE_MICROSECONDS), QUERY("surface-write-flushes", SVGA_QUERY_SURFACE_WRITE_FLUSHES, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-readbacks", SVGA_QUERY_NUM_READBACKS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-resource-updates", SVGA_QUERY_NUM_RESOURCE_UPDATES, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-buffer-uploads", SVGA_QUERY_NUM_BUFFER_UPLOADS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-const-buf-updates", SVGA_QUERY_NUM_CONST_BUF_UPDATES, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-const-updates", SVGA_QUERY_NUM_CONST_UPDATES, PIPE_DRIVER_QUERY_TYPE_UINT64), /* running total counters */ QUERY("memory-used", SVGA_QUERY_MEMORY_USED, PIPE_DRIVER_QUERY_TYPE_BYTES), QUERY("num-shaders", SVGA_QUERY_NUM_SHADERS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-resources", SVGA_QUERY_NUM_RESOURCES, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-state-objects", SVGA_QUERY_NUM_STATE_OBJECTS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-surface-views", SVGA_QUERY_NUM_SURFACE_VIEWS, PIPE_DRIVER_QUERY_TYPE_UINT64), QUERY("num-generate-mipmap", SVGA_QUERY_NUM_GENERATE_MIPMAP, PIPE_DRIVER_QUERY_TYPE_UINT64), }; #undef QUERY if (!info) return Elements(queries); if (index >= Elements(queries)) return 0; *info = queries[index]; return 1; }
virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string source = cmdObj.getStringField( name.c_str() ); string target = cmdObj.getStringField( "to" ); uassert(15967, "invalid collection name: " + target, NamespaceString::validCollectionComponent(target.c_str())); if ( source.empty() || target.empty() ) { errmsg = "invalid command syntax"; return false; } string sourceDB = nsToDatabase(source); string targetDB = nsToDatabase(target); string databaseName = sourceDB; databaseName += ".system.indexes"; int longestIndexNameLength = 0; vector<BSONObj> oldIndSpec = Helpers::findAll(databaseName, BSON("ns" << source)); for (size_t i = 0; i < oldIndSpec.size(); ++i) { int thisLength = oldIndSpec[i].getField("name").valuesize(); if (thisLength > longestIndexNameLength) { longestIndexNameLength = thisLength; } } unsigned int longestAllowed = maxNamespaceLen - longestIndexNameLength - 1; if (target.size() > longestAllowed) { StringBuilder sb; sb << "collection name length of " << target.size() << " exceeds maximum length of " << longestAllowed << ", allowing for index names"; uasserted(16451, sb.str()); } bool capped = false; long long size = 0; std::vector<BSONObj> indexesInProg; { Client::Context ctx( source ); NamespaceDetails *nsd = nsdetails( source ); uassert( 10026 , "source namespace does not exist", nsd ); indexesInProg = stopIndexBuilds(dbname, cmdObj); capped = nsd->isCapped(); if ( capped ) for( DiskLoc i = nsd->firstExtent(); !i.isNull(); i = i.ext()->xnext ) size += i.ext()->length; } Client::Context ctx( target ); if ( nsdetails( target ) ) { uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() ); Status s = cc().database()->dropCollection( target ); if ( !s.isOK() ) { errmsg = s.toString(); return false; } } // if we are renaming in the same database, just // rename the namespace and we're done. { if ( sourceDB == targetDB ) { Status s = ctx.db()->renameCollection( source, target, cmdObj["stayTemp"].trueValue() ); if ( !s.isOK() ) { errmsg = s.toString(); return false; } return true; } } // renaming across databases, so we must copy all // the data and then remove the source collection. BSONObjBuilder spec; if ( capped ) { spec.appendBool( "capped", true ); spec.append( "size", double( size ) ); } if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) ) return false; auto_ptr< DBClientCursor > c; DBDirectClient bridge; { c = bridge.query( source, BSONObj(), 0, 0, 0, fromRepl ? QueryOption_SlaveOk : 0 ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); theDataFileMgr.insertWithObjMod( target.c_str(), o ); } string sourceIndexes = nsToDatabase( source ) + ".system.indexes"; string targetIndexes = nsToDatabase( target ) + ".system.indexes"; { c = bridge.query( sourceIndexes, QUERY( "ns" << source ), 0, 0, 0, fromRepl ? QueryOption_SlaveOk : 0 ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); BSONObjBuilder b; BSONObjIterator i( o ); while( i.moreWithEOO() ) { BSONElement e = i.next(); if ( e.eoo() ) break; if ( strcmp( e.fieldName(), "ns" ) == 0 ) { b.append( "ns", target ); } else { b.append( e ); } } BSONObj n = b.done(); theDataFileMgr.insertWithObjMod( targetIndexes.c_str(), n ); } { Client::Context ctx( source ); Status s = ctx.db()->dropCollection( source ); if ( !s.isOK() ) { errmsg = s.toString(); return false; } IndexBuilder::restoreIndexes(targetIndexes, indexesInProg); } return true; }
bool CCMDBController::FindOperatingAggregationSwitch(auto_ptr<DBClientCursor>& ptrCursor) { bool bResult = Find(TBL_AGGREGATION_SWITCH, ptrCursor, QUERY("deleted" << 0)); return bResult; }
bool CCMDBController::FindOperatingAccessSwitch(auto_ptr<DBClientCursor>& ptrCursor) { bool bResult = Find(TBL_ACCESS_SWITCH, ptrCursor, QUERY("deleted" << 0)); return bResult; }
bool CCMDBController::FindOperatingChassis(auto_ptr<DBClientCursor>& ptrCursor) { bool bResult = Find(TBL_CHASSIS, ptrCursor, QUERY("deleted" << 0)); return bResult; }
BSONObj _look_up_seedbank(scoped_ptr<ScopedDbConnection> const& scoped_conn, int seedbank_id) { BSONObj sb_record; DBClientBase* conn = scoped_conn->get(); if (conn->isFailed()) { log_util::error() << "torrentdb::_look_up_seedbank: mongodb connection failed" << endl; } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_look_up_seedbank: running mongodb query (" << seedbank_id << ")" << endl; #endif std::auto_ptr<DBClientCursor> cursor = conn->query(_param_map["seedbankdb_ns"], QUERY("seedbank_id" << seedbank_id)); bool found_results = false; //if (conn->getLastError().empty()) { while (cursor->more()) { // TODO: verify no more than one record returned? sb_record = cursor->next(); found_results = true; } //} if (!found_results) { log_util::error() << "torrentdb::_look_up_seedbank: mongodb result not found" << endl; } } return sb_record; }
BSONObj _look_up_info_hash(scoped_ptr<ScopedDbConnection> const& scoped_conn, sha1_hash const& info_hash) { BSONObj torrent_record; char ih_hex[41]; to_hex((char const*)&info_hash[0], sha1_hash::size, ih_hex); DBClientBase* conn = scoped_conn->get(); if (conn->isFailed()) { log_util::error() << "torrentdb::_look_up_info_hash: mongodb connection failed" << endl; } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_look_up_info_hash: running mongodb query (" << ih_hex << ")" << endl; #endif std::auto_ptr<DBClientCursor> cursor = conn->query(_param_map["torrentdb_ns"], QUERY("info_hash" << ih_hex)); bool found_results = false; //if (conn->getLastError().empty()) { while (cursor->more()) { // TODO: verify no more than one record returned? torrent_record = cursor->next(); found_results = true; } //} #ifdef _DEBUG if (!found_results) { log_util::debug() << "torrentdb::_look_up_info_hash: torrent not found" << endl; } #endif } return torrent_record; }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string source = cmdObj.getStringField( name.c_str() ); string target = cmdObj.getStringField( "to" ); if ( source.empty() || target.empty() ) { errmsg = "invalid command syntax"; return false; } setClient( source.c_str() ); NamespaceDetails *nsd = nsdetails( source.c_str() ); uassert( "source namespace does not exist", nsd ); bool capped = nsd->capped; long long size = 0; if ( capped ) for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext ) size += i.ext()->length; setClient( target.c_str() ); uassert( "target namespace exists", !nsdetails( target.c_str() ) ); { char from[256]; nsToClient( source.c_str(), from ); char to[256]; nsToClient( target.c_str(), to ); if ( strcmp( from, to ) == 0 ) { renameNamespace( source.c_str(), target.c_str() ); return true; } } BSONObjBuilder spec; if ( capped ) { spec.appendBool( "capped", true ); spec.append( "size", double( size ) ); } if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) ) return false; auto_ptr< DBClientCursor > c; DBDirectClient bridge; { c = bridge.query( source, BSONObj() ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); theDataFileMgr.insert( target.c_str(), o ); } char cl[256]; nsToClient( source.c_str(), cl ); string sourceIndexes = string( cl ) + ".system.indexes"; nsToClient( target.c_str(), cl ); string targetIndexes = string( cl ) + ".system.indexes"; { c = bridge.query( sourceIndexes, QUERY( "ns" << source ) ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); BSONObjBuilder b; BSONObjIterator i( o ); while( i.moreWithEOO() ) { BSONElement e = i.next(); if ( e.eoo() ) break; if ( strcmp( e.fieldName(), "ns" ) == 0 ) { b.append( "ns", target ); } else { b.append( e ); } } BSONObj n = b.done(); theDataFileMgr.insert( targetIndexes.c_str(), n ); } setClient( source.c_str() ); dropCollection( source, errmsg, result ); return true; }
void run() { const char *ns = "querytests.EmptyTail"; ASSERT_EQUALS( 0, client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, Option_CursorTailable )->getCursorId() ); insert( ns, BSON( "a" << 0 ) ); ASSERT( 0 != client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, Option_CursorTailable )->getCursorId() ); }
virtual bool run(const string& dbname, BSONObj& cmdObj, int x, string& errmsg, BSONObjBuilder& result, bool y) { static DBDirectClient db; if ( cmdObj["sort"].eoo() ) return runNoDirectClient( dbname , cmdObj , x, errmsg , result, y ); string ns = dbname + '.' + cmdObj.firstElement().valuestr(); BSONObj origQuery = cmdObj.getObjectField("query"); // defaults to {} Query q (origQuery); BSONElement sort = cmdObj["sort"]; if (!sort.eoo()) q.sort(sort.embeddedObjectUserCheck()); bool upsert = cmdObj["upsert"].trueValue(); BSONObj fieldsHolder (cmdObj.getObjectField("fields")); const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder); Projection projection; if (fields) { projection.init(fieldsHolder); if (!projection.includeID()) fields = NULL; // do projection in post-processing } BSONObj out = db.findOne(ns, q, fields); if (out.isEmpty()) { if (!upsert) { result.appendNull("value"); return true; } BSONElement update = cmdObj["update"]; uassert(13329, "upsert mode requires update field", !update.eoo()); uassert(13330, "upsert mode requires query field", !origQuery.isEmpty()); db.update(ns, origQuery, update.embeddedObjectUserCheck(), true); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } if (cmdObj["new"].trueValue()) { BSONElement _id = gle["upserted"]; if (_id.eoo()) _id = origQuery["_id"]; out = db.findOne(ns, QUERY("_id" << _id), fields); } } else { if (cmdObj["remove"].trueValue()) { uassert(12515, "can't remove and update", cmdObj["update"].eoo()); db.remove(ns, QUERY("_id" << out["_id"]), 1); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } } else { // update BSONElement queryId = origQuery["_id"]; if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) { // need to include original query for $ positional operator BSONObjBuilder b; b.append(out["_id"]); BSONObjIterator it(origQuery); while (it.more()) { BSONElement e = it.next(); if (strcmp(e.fieldName(), "_id")) b.append(e); } q = Query(b.obj()); } if (q.isComplex()) // update doesn't work with complex queries q = Query(q.getFilter().getOwned()); BSONElement update = cmdObj["update"]; uassert(12516, "must specify remove or update", !update.eoo()); db.update(ns, q, update.embeddedObjectUserCheck()); BSONObj gle = db.getLastErrorDetailed(dbname); result.append("lastErrorObject", gle); if (gle["err"].type() == String) { errmsg = gle["err"].String(); return false; } if (cmdObj["new"].trueValue()) out = db.findOne(ns, QUERY("_id" << out["_id"]), fields); } } if (!fieldsHolder.isEmpty() && !fields){ // we need to run projection but haven't yet out = projection.transform(out); } result.append("value", out); return true; }
str SQLparser(Client c) { bstream *in = c->fdin; stream *out = c->fdout; str msg = NULL; backend *be; mvc *m; int oldvtop, oldstop; int pstatus = 0; int err = 0, opt = 0; be = (backend *) c->sqlcontext; if (be == 0) { /* tell the client */ mnstr_printf(out, "!SQL state descriptor missing, aborting\n"); mnstr_flush(out); /* leave a message in the log */ fprintf(stderr, "SQL state descriptor missing, cannot handle client!\n"); /* stop here, instead of printing the exception below to the * client in an endless loop */ c->mode = FINISHCLIENT; throw(SQL, "SQLparser", "State descriptor missing"); } oldvtop = c->curprg->def->vtop; oldstop = c->curprg->def->stop; be->vtop = oldvtop; #ifdef _SQL_PARSER_DEBUG mnstr_printf(GDKout, "#SQL compilation \n"); printf("debugger? %d(%d)\n", (int) be->mvc->emode, (int) be->mvc->emod); #endif m = be->mvc; m->type = Q_PARSE; SQLtrans(m); pstatus = m->session->status; /* sqlparse needs sql allocator to be available. It can be NULL at * this point if this is a recursive call. */ if (!m->sa) m->sa = sa_create(); m->emode = m_normal; m->emod = mod_none; if (be->language == 'X') { int n = 0, v, off, len; if (strncmp(in->buf + in->pos, "export ", 7) == 0) n = sscanf(in->buf + in->pos + 7, "%d %d %d", &v, &off, &len); if (n == 2 || n == 3) { mvc_export_chunk(be, out, v, off, n == 3 ? len : m->reply_size); in->pos = in->len; /* HACK: should use parsed length */ return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "close ", 6) == 0) { res_table *t; v = (int) strtol(in->buf + in->pos + 6, NULL, 0); t = res_tables_find(m->results, v); if (t) m->results = res_tables_remove(m->results, t); in->pos = in->len; /* HACK: should use parsed length */ return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "release ", 8) == 0) { cq *q = NULL; v = (int) strtol(in->buf + in->pos + 8, NULL, 0); if ((q = qc_find(m->qc, v)) != NULL) qc_delete(m->qc, q); in->pos = in->len; /* HACK: should use parsed length */ return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "auto_commit ", 12) == 0) { int commit; v = (int) strtol(in->buf + in->pos + 12, NULL, 10); commit = (!m->session->auto_commit && v); m->session->auto_commit = (v) != 0; m->session->ac_on_commit = m->session->auto_commit; if (m->session->active) { if (commit && mvc_commit(m, 0, NULL) < 0) { mnstr_printf(out, "!COMMIT: commit failed while " "enabling auto_commit\n"); msg = createException(SQL, "SQLparser", "Xauto_commit (commit) failed"); } else if (!commit && mvc_rollback(m, 0, NULL) < 0) { RECYCLEdrop(0); mnstr_printf(out, "!COMMIT: rollback failed while " "disabling auto_commit\n"); msg = createException(SQL, "SQLparser", "Xauto_commit (rollback) failed"); } } in->pos = in->len; /* HACK: should use parsed length */ if (msg != NULL) goto finalize; return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "reply_size ", 11) == 0) { v = (int) strtol(in->buf + in->pos + 11, NULL, 10); if (v < -1) { msg = createException(SQL, "SQLparser", "reply_size cannot be negative"); goto finalize; } m->reply_size = v; in->pos = in->len; /* HACK: should use parsed length */ return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "sizeheader", 10) == 0) { v = (int) strtol(in->buf + in->pos + 10, NULL, 10); m->sizeheader = v != 0; in->pos = in->len; /* HACK: should use parsed length */ return MAL_SUCCEED; } if (strncmp(in->buf + in->pos, "quit", 4) == 0) { c->mode = FINISHCLIENT; return MAL_SUCCEED; } mnstr_printf(out, "!unrecognized X command: %s\n", in->buf + in->pos); msg = createException(SQL, "SQLparser", "unrecognized X command"); goto finalize; } if (be->language !='S') { mnstr_printf(out, "!unrecognized language prefix: %ci\n", be->language); msg = createException(SQL, "SQLparser", "unrecognized language prefix: %c", be->language); goto finalize; } if ((err = sqlparse(m)) || /* Only forget old errors on transaction boundaries */ (mvc_status(m) && m->type != Q_TRANS) || !m->sym) { if (!err &&m->scanner.started) /* repeat old errors, with a parsed query */ err = mvc_status(m); if (err) { msg = createException(PARSE, "SQLparser", "%s", m->errstr); handle_error(m, c->fdout, pstatus); } sqlcleanup(m, err); goto finalize; } assert(m->session->schema != NULL); /* * We have dealt with the first parsing step and advanced the input reader * to the next statement (if any). * Now is the time to also perform the semantic analysis, optimize and * produce code. */ be->q = NULL; if (m->emode == m_execute) { assert(m->sym->data.lval->h->type == type_int); be->q = qc_find(m->qc, m->sym->data.lval->h->data.i_val); if (!be->q) { err = -1; mnstr_printf(out, "!07003!EXEC: no prepared statement with id: %d\n", m->sym->data.lval->h->data.i_val); msg = createException(SQL, "PREPARE", "no prepared statement with id: %d", m->sym->data.lval->h->data.i_val); handle_error(m, c->fdout, pstatus); sqlcleanup(m, err); goto finalize; } else if (be->q->type != Q_PREPARE) { err = -1; mnstr_printf(out, "!07005!EXEC: given handle id is not for a " "prepared statement: %d\n", m->sym->data.lval->h->data.i_val); msg = createException(SQL, "PREPARE", "is not a prepared statement: %d", m->sym->data.lval->h->data.i_val); handle_error(m, c->fdout, pstatus); sqlcleanup(m, err); goto finalize; } m->emode = m_inplace; scanner_query_processed(&(m->scanner)); } else if (caching(m) && cachable(m, NULL) && m->emode != m_prepare && (be->q = qc_match(m->qc, m->sym, m->args, m->argc, m->scanner.key ^ m->session->schema->base.id)) != NULL) { // look for outdated plans if ( OPTmitosisPlanOverdue(c, be->q->name) ){ msg = SQLCacheRemove(c, be->q->name); qc_delete(be->mvc->qc, be->q); goto recompilequery; } if (m->emod & mod_debug) SQLsetDebugger(c, m, TRUE); if (m->emod & mod_trace) SQLsetTrace(be, c, TRUE); if (!(m->emod & (mod_explain | mod_debug | mod_trace | mod_dot))) m->emode = m_inplace; scanner_query_processed(&(m->scanner)); } else { sql_rel *r; stmt *s; recompilequery: r = sql_symbol2relation(m, m->sym); s = sql_relation2stmt(m, r); if (s == 0 || (err = mvc_status(m) && m->type != Q_TRANS)) { msg = createException(PARSE, "SQLparser", "%s", m->errstr); handle_error(m, c->fdout, pstatus); sqlcleanup(m, err); goto finalize; } assert(s); /* generate the MAL code */ if (m->emod & mod_trace) SQLsetTrace(be, c, TRUE); if (m->emod & mod_debug) SQLsetDebugger(c, m, TRUE); if (!caching(m) || !cachable(m, s)) { scanner_query_processed(&(m->scanner)); if (backend_callinline(be, c, s, 0) == 0) { opt = 1; } else { err = 1; } } else { /* generate a factory instantiation */ be->q = qc_insert(m->qc, m->sa, /* the allocator */ r, /* keep relational query */ m->sym, /* the sql symbol tree */ m->args, /* the argument list */ m->argc, m->scanner.key ^ m->session->schema->base.id, /* the statement hash key */ m->emode == m_prepare ? Q_PREPARE : m->type, /* the type of the statement */ sql_escape_str(QUERY(m->scanner))); scanner_query_processed(&(m->scanner)); be->q->code = (backend_code) backend_dumpproc(be, c, be->q, s); if (!be->q->code) err = 1; be->q->stk = 0; /* passed over to query cache, used during dumpproc */ m->sa = NULL; m->sym = NULL; /* register name in the namespace */ be->q->name = putName(be->q->name, strlen(be->q->name)); if (m->emode == m_normal && m->emod == mod_none) m->emode = m_inplace; } } if (err) m->session->status = -10; if (err == 0) { if (be->q) { if (m->emode == m_prepare) err = mvc_export_prepare(m, c->fdout, be->q, ""); else if (m->emode == m_inplace) { /* everything ready for a fast call */ } else { /* call procedure generation (only in cache mode) */ backend_call(be, c, be->q); } } /* In the final phase we add any debugging control */ if (m->emod & mod_trace) SQLsetTrace(be, c, FALSE); if (m->emod & mod_debug) SQLsetDebugger(c, m, FALSE); /* * During the execution of the query exceptions can be raised. * The default action is to print them out at the end of the * query block. */ pushEndInstruction(c->curprg->def); chkTypes(c->fdout, c->nspace, c->curprg->def, TRUE); /* resolve types */ if (opt) { MalBlkPtr mb = c->curprg->def; trimMalBlk(mb); chkProgram(c->fdout, c->nspace, mb); addOptimizers(c, mb, "default_pipe"); msg = optimizeMALBlock(c, mb); if (msg != MAL_SUCCEED) { sqlcleanup(m, err); goto finalize; } c->curprg->def = mb; } //printFunction(c->fdout, c->curprg->def, 0, LIST_MAL_ALL); /* we know more in this case than chkProgram(c->fdout, c->nspace, c->curprg->def); */ if (c->curprg->def->errors) { showErrors(c); /* restore the state */ MSresetInstructions(c->curprg->def, oldstop); freeVariables(c, c->curprg->def, c->glb, oldvtop); c->curprg->def->errors = 0; msg = createException(PARSE, "SQLparser", "Semantic errors"); } } finalize: if (msg) sqlcleanup(m, 0); return msg; }