const Tree* ParserImpl::parse(const char *str, size_t len) { if (!str) { WHAT << "NULL pointer is given"; return 0; } // Set charset/posset, bacause Tree::read() may depend on // these parameters. if (!tree_.get()) { tree_.reset(new Tree); } tree_->set_charset(charset_); tree_->set_posset(posset_); if (!tree_->read(str, len, input_layer_)) { WHAT << "format error: [" << str << "] "; return 0; } if (!parse(tree_.get())) { WHAT << tree_->what(); return 0; } return tree_.get(); }
void cmd_connect_to(string connect_to) { auto devlist = LibusbInterface::listDevices(0xF539, 0xF539); int i = 0; if(devlist.empty()) { cout << " No devices found!" << endl; return; } if(devlist.size() > 1) { if(connect_to.compare("") == 0) { cout << " Select a device" << endl; for(auto dev : devlist) { cout << " " << dev.first << "\t" << dev.second << endl; } return; } else { chosen_serial = connect_to; } } else { chosen_serial = devlist[0].first; } if(connected) { liObj->endSignal(); dpObj->endSignal(); connected = false; thread1.join(); thread2.join(); delete liObj; delete dpObj; dpObj = NULL; } dQueue.reset(new queue<DataSet>()); liObj = new LibusbInterface(&bmutex, dQueue.get(), 0xF539, 0xF539, chosen_serial); dpObj = new DataProcessor(&bmutex, dQueue.get()); thread1 = boost::thread(bind(&LibusbInterface::operator(),liObj)); // Bind prevents copying obj (need to keep ptr) thread2 = boost::thread(bind(&DataProcessor::operator(),dpObj)); connected = true; cout << " Connected to device, serial: " << chosen_serial << endl; }
void Z3BaseSolverImpl::createBuilder() { assert(builder_cache_ && "The cache needs to be created first"); switch (ArrayConsMode) { case Z3_ARRAY_ITE: builder_.reset(new Z3IteBuilder(context_, (Z3IteBuilderCache*)builder_cache_.get())); break; case Z3_ARRAY_STORES: builder_.reset(new Z3StoreArrayBuilder(context_, (Z3ArrayBuilderCache*)builder_cache_.get())); break; case Z3_ARRAY_ASSERTS: builder_.reset(new Z3AssertArrayBuilder(solver_, (Z3ArrayBuilderCache*)builder_cache_.get())); break; } }
BSONObj _look_up_seedbank(scoped_ptr<ScopedDbConnection> const& scoped_conn, int seedbank_id) { BSONObj sb_record; DBClientBase* conn = scoped_conn->get(); if (conn->isFailed()) { log_util::error() << "torrentdb::_look_up_seedbank: mongodb connection failed" << endl; } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_look_up_seedbank: running mongodb query (" << seedbank_id << ")" << endl; #endif std::auto_ptr<DBClientCursor> cursor = conn->query(_param_map["seedbankdb_ns"], QUERY("seedbank_id" << seedbank_id)); bool found_results = false; //if (conn->getLastError().empty()) { while (cursor->more()) { // TODO: verify no more than one record returned? sb_record = cursor->next(); found_results = true; } //} if (!found_results) { log_util::error() << "torrentdb::_look_up_seedbank: mongodb result not found" << endl; } } return sb_record; }
BSONObj _look_up_info_hash(scoped_ptr<ScopedDbConnection> const& scoped_conn, sha1_hash const& info_hash) { BSONObj torrent_record; char ih_hex[41]; to_hex((char const*)&info_hash[0], sha1_hash::size, ih_hex); DBClientBase* conn = scoped_conn->get(); if (conn->isFailed()) { log_util::error() << "torrentdb::_look_up_info_hash: mongodb connection failed" << endl; } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_look_up_info_hash: running mongodb query (" << ih_hex << ")" << endl; #endif std::auto_ptr<DBClientCursor> cursor = conn->query(_param_map["torrentdb_ns"], QUERY("info_hash" << ih_hex)); bool found_results = false; //if (conn->getLastError().empty()) { while (cursor->more()) { // TODO: verify no more than one record returned? torrent_record = cursor->next(); found_results = true; } //} #ifdef _DEBUG if (!found_results) { log_util::debug() << "torrentdb::_look_up_info_hash: torrent not found" << endl; } #endif } return torrent_record; }
StatusWith<RecordId> insertBSON(scoped_ptr<OperationContext>& opCtx, scoped_ptr<RecordStore>& rs, const Timestamp& opTime) { BSONObj obj = BSON( "ts" << opTime ); WriteUnitOfWork wuow(opCtx.get()); RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get()); invariant( rrs ); Status status = rrs->oplogDiskLocRegister( opCtx.get(), opTime ); if (!status.isOK()) return StatusWith<RecordId>( status ); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), false); if (res.isOK()) wuow.commit(); return res; }
void _execute_update_stats(scoped_ptr<ScopedDbConnection> const& scoped_conn, stats_update_t const& params) { char ih_hex[41]; to_hex((char const*)¶ms.info_hash[0], sha1_hash::size, ih_hex); DBClientBase* conn = scoped_conn->get(); if (conn->isFailed()) { log_util::error() << "mongodb connection failed trying to update stats (torrent: " << ih_hex << ", inc_completed: " << params.increment_completed << ")" << endl; } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: Running mongodb update query (" << ih_hex << ")" << endl; #endif mongo::Date_t now = terasaur::date::get_now_mongo(); mongo::Query query = QUERY("info_hash" << ih_hex); BSONObj update_bson; if (params.increment_completed) { #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: increment completed true" << endl; #endif update_bson = BSON("$set" << BSON("seeds" << params.seeds << "peers" << params.peers << "updated" << now) << "$inc" << BSON( "completed" << 1)); } else { #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: increment completed false" << endl; #endif update_bson = BSON("$set" << BSON("seeds" << params.seeds << "peers" << params.peers << "updated" << now)); } #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: query: " << query << endl; log_util::debug() << "torrentdb::_execute_update_stats: update bson: " << update_bson << endl; log_util::debug() << "torrentdb::_execute_update_stats: calling mongodb update()" << endl; #endif conn->update(_param_map["torrentdb_ns"], query, update_bson); #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: calling mongodb getLastError()" << endl; #endif string err = conn->getLastError(); bool success = err.empty(); if (success == false) { log_util::error() << "torrentdb::_execute_update_stats: mongodb update returned error (" << err << ")" << endl; } #ifdef _DEBUG else { log_util::debug() << "torrentdb::_execute_update_stats: mongodb update successful" << endl; } #endif } #ifdef _DEBUG log_util::debug() << "torrentdb::_execute_update_stats: returning" << endl; #endif }
RecordId _oplogOrderInsertOplog( OperationContext* txn, scoped_ptr<RecordStore>& rs, int inc ) { Timestamp opTime = Timestamp(5,inc); RocksRecordStore* rrs = dynamic_cast<RocksRecordStore*>(rs.get()); Status status = rrs->oplogDiskLocRegister( txn, opTime ); ASSERT_OK( status ); BSONObj obj = BSON( "ts" << opTime ); StatusWith<RecordId> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false ); ASSERT_OK( res.getStatus() ); return res.getValue(); }
DiskLoc _oplogOrderInsertOplog( OperationContext* txn, scoped_ptr<RecordStore>& rs, int inc ) { OpTime opTime = OpTime(5,inc); WiredTigerRecordStore* wrs = dynamic_cast<WiredTigerRecordStore*>(rs.get()); Status status = wrs->oplogDiskLocRegister( txn, opTime ); ASSERT_OK( status ); BSONObj obj = BSON( "ts" << opTime ); StatusWith<DiskLoc> res = rs->insertRecord( txn, obj.objdata(), obj.objsize(), false ); ASSERT_OK( res.getStatus() ); return res.getValue(); }
virtual void gotObject( const BSONObj& obj ) { if (_curns == OPLOG_SENTINEL) { // intentional ptr compare if (obj["op"].valuestr()[0] == 'n') // skip no-ops return; // exclude operations that don't meet (timestamp) criteria if ( _opmatcher.get() && ! _opmatcher->matches ( obj ) ) { _oplogEntrySkips++; return; } string db = obj["ns"].valuestr(); db = db.substr(0, db.find('.')); BSONObj cmd = BSON( "applyOps" << BSON_ARRAY( obj ) ); BSONObj out; conn().runCommand(db, cmd, out); _oplogEntryApplies++; // wait for ops to propagate to "w" nodes (doesn't warn if w used without replset) if (mongoRestoreGlobalParams.w > 0) { string err = conn().getLastError(db, false, false, mongoRestoreGlobalParams.w); if (!err.empty()) { error() << "Error while replaying oplog: " << err; } } } else if (nsToCollectionSubstring(_curns) == "system.indexes") { createIndex(obj, true); } else if (mongoRestoreGlobalParams.drop && nsToCollectionSubstring(_curns) == ".system.users" && _users.count(obj["user"].String())) { // Since system collections can't be dropped, we have to manually // replace the contents of the system.users collection BSONObj userMatch = BSON("user" << obj["user"].String()); conn().update(_curns, Query(userMatch), obj); _users.erase(obj["user"].String()); } else { conn().insert( _curns , obj ); // wait for insert to propagate to "w" nodes (doesn't warn if w used without replset) if (mongoRestoreGlobalParams.w > 0) { string err = conn().getLastError(_curdb, false, false, mongoRestoreGlobalParams.w); if (!err.empty()) { error() << err; } } } }
int addMaterial(IStorm3D_Material *stormMaterial) { assert(stormMaterial); DecalMaterial material = createMaterial(static_cast<Storm3D_Material *> (stormMaterial), tree.get()); for(unsigned int i = 0; i < materials.size(); ++i) { if(equals(materials[i], material)) return i; } int index = materials.size(); material.materialIndex = index; materials.push_back(material); return index; }
virtual int doRun() { // authenticate enum Auth::Level authLevel = Auth::NONE; auth("", &authLevel); uassert(15935, "user does not have write access", authLevel == Auth::WRITE); boost::filesystem::path root = getParam("dir"); // check if we're actually talking to a machine that can write if (!isMaster()) { return -1; } if (isMongos() && _db == "" && exists(root / "config")) { log() << "Cannot do a full restore on a sharded system" << endl; return -1; } _drop = hasParam( "drop" ); _keepIndexVersion = hasParam("keepIndexVersion"); _restoreOptions = !hasParam("noOptionsRestore"); _restoreIndexes = !hasParam("noIndexRestore"); _w = getParam( "w" , 1 ); bool doOplog = hasParam( "oplogReplay" ); if (doOplog) { // fail early if errors if (_db != "") { log() << "Can only replay oplog on full restore" << endl; return -1; } if ( ! exists(root / "oplog.bson") ) { log() << "No oplog file to replay. Make sure you run mongodump with --oplog." << endl; return -1; } BSONObj out; if (! conn().simpleCommand("admin", &out, "buildinfo")) { log() << "buildinfo command failed: " << out["errmsg"].String() << endl; return -1; } StringData version = out["version"].valuestr(); if (versionCmp(version, "1.7.4-pre-") < 0) { log() << "Can only replay oplog to server version >= 1.7.4" << endl; return -1; } string oplogLimit = getParam( "oplogLimit", "" ); string oplogInc = "0"; if(!oplogLimit.empty()) { size_t i = oplogLimit.find_first_of(':'); if ( i != string::npos ) { if ( i + 1 < oplogLimit.length() ) { oplogInc = oplogLimit.substr(i + 1); } oplogLimit = oplogLimit.substr(0, i); } try { _oplogLimitTS.reset(new OpTime( boost::lexical_cast<unsigned long>(oplogLimit.c_str()), boost::lexical_cast<unsigned long>(oplogInc.c_str()))); } catch( const boost::bad_lexical_cast& error) { log() << "Could not parse oplogLimit into Timestamp from values ( " << oplogLimit << " , " << oplogInc << " )" << endl; return -1; } if (!oplogLimit.empty()) { // Only for a replica set as master will have no-op entries so we would need to // skip them all to find the real op scoped_ptr<DBClientCursor> cursor( conn().query("local.oplog.rs", Query().sort(BSON("$natural" << -1)), 1 /*return first*/)); OpTime tsOptime; // get newest oplog entry and make sure it is older than the limit to apply. if (cursor->more()) { tsOptime = cursor->next().getField("ts")._opTime(); if (tsOptime > *_oplogLimitTS.get()) { log() << "The oplogLimit is not newer than" << " the last oplog entry on the server." << endl; return -1; } } BSONObjBuilder tsRestrictBldr; if (!tsOptime.isNull()) tsRestrictBldr.appendTimestamp("$gt", tsOptime.asDate()); tsRestrictBldr.appendTimestamp("$lt", _oplogLimitTS->asDate()); BSONObj query = BSON("ts" << tsRestrictBldr.obj()); if (!tsOptime.isNull()) { log() << "Latest oplog entry on the server is " << tsOptime.getSecs() << ":" << tsOptime.getInc() << endl; log() << "Only applying oplog entries matching this criteria: " << query.jsonString() << endl; } _opmatcher.reset(new Matcher(query)); } } } /* If _db is not "" then the user specified a db name to restore as. * * In that case we better be given either a root directory that * contains only .bson files or a single .bson file (a db). * * In the case where a collection name is specified we better be * given either a root directory that contains only a single * .bson file, or a single .bson file itself (a collection). */ drillDown(root, _db != "", _coll != "", !(_oplogLimitTS.get() == NULL), true); // should this happen for oplog replay as well? conn().getLastError(_db == "" ? "admin" : _db); if (doOplog) { log() << "\t Replaying oplog" << endl; _curns = OPLOG_SENTINEL; processFile( root / "oplog.bson" ); log() << "Applied " << _oplogEntryApplies << " oplog entries out of " << _oplogEntryApplies + _oplogEntrySkips << " (" << _oplogEntrySkips << " skipped)." << endl; } return EXIT_CLEAN; }
//!Returns a copy of the stored pointer //!Never throws template<class T, class D> inline typename scoped_ptr<T, D>::pointer to_raw_pointer(scoped_ptr<T, D> const & p) { return p.get(); }
raw_ptr(const scoped_ptr<U, Deleter>& x): p_(x.get()) {}