void OIDInfo::construct(JSContext* cx, JS::CallArgs args) { OID oid; if (args.length() == 0) { oid.init(); } else { auto str = ValueWriter(cx, args.get(0)).toString(); Scope::validateObjectIdString(str); oid.init(str); } make(cx, oid, args.rval()); }
void testoid() { OID id; id.init(); // sleepsecs(3); OID b; // goes with sleep above... // b.init(); // assert( memcmp(id.getData(), b.getData(), 12) < 0 ); b.init( id.str() ); assert( b == id ); }
BSONObj StorageEngine::getoid(string oid) { OID oidobj; oidobj.init(oid); BSONObjBuilder ob; ob.appendOID("_id",&oidobj); return ob.obj(); }
ObjectId() : ns_( testNs( this ) ) { OID id; for( int i = 0; i < 100000; ++i ) { id.init(); client_->insert( ns_.c_str(), BSON( "a" << id ) ); } }
bool HealthLog::log(const HealthLogEntry& entry) { BSONObjBuilder builder; OID oid; oid.init(); builder.append("_id", oid); entry.serialize(&builder); return _writer.insertDocument(builder.obj()); }
JSBool object_id_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){ Convertor c( cx ); OID oid; if ( argc == 0 ){ oid.init(); } else { uassert( "object_id_constructor can't take more than 1 param" , argc == 1 ); oid.init( c.toString( argv[0] ) ); } jsval v = c.toval( oid.str().c_str() ); assert( JS_SetProperty( cx , obj , "str" , &v ) ); return JS_TRUE; }
OID toOID( jsval v ) { JSContext * cx = _context; assert( JSVAL_IS_OID( v ) ); JSObject * o = JSVAL_TO_OBJECT( v ); OID oid; oid.init( getString( o , "str" ) ); return oid; }
void ShardingTestFixture::expectConfigCollectionInsert(const HostAndPort& configHost, StringData collName, Date_t timestamp, const std::string& what, const std::string& ns, const BSONObj& detail) { onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQUALS(configHost, request.target); ASSERT_EQUALS("config", request.dbname); BatchedInsertRequest actualBatchedInsert; std::string errmsg; ASSERT_TRUE(actualBatchedInsert.parseBSON(request.dbname, request.cmdObj, &errmsg)); ASSERT_EQ("config", actualBatchedInsert.getNS().db()); ASSERT_EQ(collName, actualBatchedInsert.getNS().coll()); auto inserts = actualBatchedInsert.getDocuments(); ASSERT_EQUALS(1U, inserts.size()); const ChangeLogType& actualChangeLog = assertGet(ChangeLogType::fromBSON(inserts.front())); ASSERT_EQUALS(operationContext()->getClient()->clientAddress(true), actualChangeLog.getClientAddr()); ASSERT_EQUALS(detail, actualChangeLog.getDetails()); ASSERT_EQUALS(ns, actualChangeLog.getNS()); ASSERT_EQUALS(network()->getHostName(), actualChangeLog.getServer()); ASSERT_EQUALS(timestamp, actualChangeLog.getTime()); ASSERT_EQUALS(what, actualChangeLog.getWhat()); // Handle changeId specially because there's no way to know what OID was generated std::string changeId = actualChangeLog.getChangeId(); size_t firstDash = changeId.find("-"); size_t lastDash = changeId.rfind("-"); const std::string serverPiece = changeId.substr(0, firstDash); const std::string timePiece = changeId.substr(firstDash + 1, lastDash - firstDash - 1); const std::string oidPiece = changeId.substr(lastDash + 1); ASSERT_EQUALS(grid.getNetwork()->getHostName(), serverPiece); ASSERT_EQUALS(timestamp.toString(), timePiece); OID generatedOID; // Just make sure this doesn't throws and assume the OID is valid generatedOID.init(oidPiece); BatchedCommandResponse response; response.setOk(true); return response.toBSON(); }); }
void insert( const BSONObj &o ) { if ( o["_id"].eoo() ) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID( "_id", &oid ); b.appendElements( o ); _collection->insertDocument( &_txn, b.obj(), false ); } else { _collection->insertDocument( &_txn, o, false ); } }
v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args ){ v8::Handle<v8::Object> it = args.This(); if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){ v8::Function * f = getObjectIdCons(); it = f->NewInstance(); } OID oid; if ( args.Length() == 0 ){ oid.init(); } else { string s = toSTLString( args[0] ); oid.init( s ); } it->Set( String::New( "str" ) , String::New( oid.str().c_str() ) ); return it; }
BSONObj GridFileBuilder::buildFile(const string &name, const string& content_type) { privateAppendPendingData(); /* from gridfs.cpp at https://github.com/mongodb/mongo-cxx-driver/blob/legacy/src/mongo/client/gridfs.cpp */ // Wait for any pending writebacks to finish BSONObj errObj = _client->getLastErrorDetailed(); uassert( 16428, str::stream() << "Error storing GridFS chunk for file: " << name << ", error: " << errObj, DBClientWithCommands::getLastErrorString(errObj) == "" ); BSONObj res; if ( ! _client->runCommand( _dbName.c_str() , BSON( "filemd5" << _file_id << "root" << _prefix ) , res ) ) throw UserException( 9008 , "filemd5 failed" ); BSONObjBuilder file; file << "_id" << _file_id["_id"] << "filename" << name << "chunkSize" << (unsigned int)_chunkSize << "uploadDate" << DATENOW << "md5" << res["md5"] ; if (_file_length < 1024*1024*1024) { // 2^30 file << "length" << (int) _file_length; } else { file << "length" << (long long) _file_length; } if (!content_type.empty()) file << "contentType" << content_type; BSONObj ret = file.obj(); _client->insert(_filesNS.c_str(), ret); // resets the object _current_chunk = 0; _pending_data = NULL; _pending_data_size = 0; _file_length = 0; OID id; id.init(); _file_id = BSON("_id" << id); return ret; }
bool Balancer::_shouldIBalance( DBClientBase& conn ){ BSONObj x = conn.findOne( ShardNS::settings , BSON( "_id" << "balancer" ) ); log(2) << "balancer: " << x << endl; if ( ! x.isEmpty() ){ if ( x["who"].String() == _myid ){ log(2) << "balancer: i'm the current balancer" << endl; return true; } BSONObj other = conn.findOne( ShardNS::mongos , x["who"].wrap( "_id" ) ); massert( 13125 , (string)"can't find mongos: " + x["who"].String() , ! other.isEmpty() ); int secsSincePing = (int)(( jsTime() - other["ping"].Date() ) / 1000 ); log(2) << "current balancer is: " << other << " ping delay(secs): " << secsSincePing << endl; if ( secsSincePing < ( 60 * 10 ) ){ return false; } log() << "balancer: going to take over" << endl; // we want to take over, so fall through to below } // Taking over means replacing 'who' with this balancer's address. Note that // to avoid any races, we use a compare-and-set strategy relying on the // incarnation of the previous balancer (the key 'x'). OID incarnation; incarnation.init(); BSONObjBuilder updateQuery; updateQuery.append( "_id" , "balancer" ); if ( x["x"].type() ) updateQuery.append( x["x"] ); else updateQuery.append( "x" , BSON( "$exists" << false ) ); conn.update( ShardNS::settings , updateQuery.obj() , BSON( "$set" << BSON( "who" << _myid << "x" << incarnation ) ) , true ); // If another balancer beats this one to the punch, the following query will see // the incarnation for that other guy. x = conn.findOne( ShardNS::settings , BSON( "_id" << "balancer" ) ); log() << "balancer: after update: " << x << endl; return _myid == x["who"].String() && incarnation == x["x"].OID(); }
GridFileBuilder::GridFileBuilder(DBClientBase *client, const string &dbName, unsigned int chunkSize, const string& prefix) : _client(client), _dbName(dbName), _prefix(prefix), _chunkSize(chunkSize), _current_chunk(0), _pending_data(NULL), _pending_data_size(0), _file_length(0) { _chunkNS = _dbName + "." + _prefix + ".chunks"; _filesNS = _dbName + "." + _prefix + ".files"; OID id; id.init(); _file_id = BSON("_id" << id); // forces to build the gridFS collections GridFS aux(*client, dbName, prefix); }
void insert(const char* s) { WriteUnitOfWork wunit(&_txn); const BSONObj o = fromjson(s); if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); _collection->insertDocument(&_txn, b.obj(), false); } else { _collection->insertDocument(&_txn, o, false); } wunit.commit(); }
void insert(const char* s) { WriteUnitOfWork wunit(&_opCtx); const BSONObj o = fromjson(s); OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false); } else { _collection->insertDocument(&_opCtx, o, nullOpDebug, false); } wunit.commit(); }
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType) { char const * const end = data + length; OID id; id.init(); BSONObj idObj = BSON("_id" << id); int chunkNumber = 0; while (data < end) { int chunkLen = MIN(_chunkSize, (unsigned)(end-data)); GridFSChunk c(idObj, chunkNumber, data, chunkLen); _client.insert( _chunksNS.c_str() , c._data ); chunkNumber++; data += chunkLen; } return insertFile(remoteName, id, length, contentType); }
BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType) { uassert( 10012 , "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) ); FILE* fd; if (fileName == "-") fd = stdin; else fd = fopen( fileName.c_str() , "rb" ); uassert( 10013 , "error opening file", fd); OID id; id.init(); BSONObj idObj = BSON("_id" << id); int chunkNumber = 0; gridfs_offset length = 0; while (!feof(fd)) { //boost::scoped_array<char>buf (new char[_chunkSize+1]); char * buf = new char[_chunkSize+1]; char* bufPos = buf;//.get(); unsigned int chunkLen = 0; // how much in the chunk now while(chunkLen != _chunkSize && !feof(fd)) { int readLen = fread(bufPos, 1, _chunkSize - chunkLen, fd); chunkLen += readLen; bufPos += readLen; assert(chunkLen <= _chunkSize); } GridFSChunk c(idObj, chunkNumber, buf, chunkLen); _client.insert( _chunksNS.c_str() , c._data ); length += chunkLen; chunkNumber++; delete[] buf; } if (fd != stdin) fclose( fd ); return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType); }
BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType){ uassert( "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) ); FILE* fd; if (fileName == "-") fd = stdin; else fd = fopen( fileName.c_str() , "rb" ); uassert("error opening file", fd); OID id; id.init(); BSONObj idObj = BSON("_id" << id); int chunkNumber = 0; gridfs_offset length = 0; while (!feof(fd)){ boost::scoped_array<char>buf (new char[DEFAULT_CHUNK_SIZE]); char* bufPos = buf.get(); unsigned int chunkLen = 0; // how much in the chunk now while(chunkLen != DEFAULT_CHUNK_SIZE && !feof(fd)){ int readLen = fread(bufPos, 1, DEFAULT_CHUNK_SIZE - chunkLen, fd); chunkLen += readLen; bufPos += readLen; assert(chunkLen <= DEFAULT_CHUNK_SIZE); } Chunk c(idObj, chunkNumber, buf.get(), chunkLen); _client.insert( _chunksNS.c_str() , c._data ); length += chunkLen; chunkNumber++; } if (fd != stdin) fclose( fd ); massert("large files not yet implemented", length <= 0xffffffff); return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType); }
void Model::save( bool check ){ ScopedDbConnection conn( modelServer() ); BSONObjBuilder b; serialize( b ); if ( _id.isEmpty() ){ OID oid; oid.init(); b.appendOID( "_id" , &oid ); BSONObj o = b.obj(); conn->insert( getNS() , o ); _id = o["_id"].wrap().getOwned(); log(4) << "inserted new model " << getNS() << " " << o << endl; } else { BSONElement id = _id["_id"]; b.append( id ); BSONObjBuilder qb; qb.append( id ); BSONObj q = qb.obj(); BSONObj o = b.obj(); log(4) << "updated old model" << getNS() << " " << q << " " << o << endl; conn->update( getNS() , q , o ); } string errmsg = ""; if ( check ) errmsg = conn->getLastError(); conn.done(); if ( check && errmsg.size() ) throw UserException( (string)"error on Model::save: " + errmsg ); }
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){ massert("large files not yet implemented", length <= 0xffffffff); char const * const end = data + length; OID id; id.init(); BSONObj idObj = BSON("_id" << id); int chunkNumber = 0; while (data < end){ int chunkLen = MIN(DEFAULT_CHUNK_SIZE, end-data); Chunk c(idObj, chunkNumber, data, chunkLen); _client.insert( _chunksNS.c_str() , c._data ); chunkNumber++; data += chunkLen; } return insertFile(remoteName, id, length, contentType); }
static void insert( const BSONObj &o, bool god = false ) { OperationContextImpl txn; Lock::DBWrite lk(txn.lockState(), ns()); Client::Context ctx(ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(&txn, ns()); if (!coll) { coll = db->createCollection(&txn, ns()); } if (o.hasField("_id")) { coll->insertDocument(&txn, o, true); return; } class BSONObjBuilder b; OID id; id.init(); b.appendOID("_id", &id); b.appendElements(o); coll->insertDocument(&txn, b.obj(), true); }
bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , JSObject * o ){ if ( JS_InstanceOf( c->_context , o , &object_id_class , 0 ) ){ OID oid; oid.init( c->getString( o , "str" ) ); b.append( name.c_str() , oid ); return true; } if ( JS_InstanceOf( c->_context , o , &minkey_class , 0 ) ){ b.appendMinKey( name.c_str() ); return true; } if ( JS_InstanceOf( c->_context , o , &maxkey_class , 0 ) ){ b.appendMaxKey( name.c_str() ); return true; } if ( JS_InstanceOf( c->_context , o , ×tamp_class , 0 ) ){ b.appendTimestamp( name.c_str() , (unsigned long long)c->getNumber( o , "t" ) , (unsigned int )c->getNumber( o , "i" ) ); return true; } { jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o ); if ( d ){ b.appendDate( name.c_str() , (unsigned long long)d ); return true; } } return false; }
void init() { type = static_cast<char>(jstOID); strcpy( id, "_id" ); oid.init(); verify( size() == 17 ); }
void init(){ serverID.init(); setupSIGTRAPforGDB(); signal(SIGTERM, sighandler); signal(SIGINT, sighandler); }
static void lua_append_bson(lua_State *L, const char *key, int stackpos, BSONObjBuilder *builder, int ref) { int type = lua_type(L, stackpos); if (type == LUA_TTABLE) { if (stackpos < 0) stackpos = lua_gettop(L) + stackpos + 1; lua_checkstack(L, 3); int bsontype_found = luaL_getmetafield(L, stackpos, "__bsontype"); if (!bsontype_found) { // not a special bsontype // handle as a regular table, iterating keys lua_rawgeti(L, LUA_REGISTRYINDEX, ref); lua_pushvalue(L, stackpos); lua_rawget(L, -2); if (lua_toboolean(L, -1)) { // do nothing if the same table encountered lua_pop(L, 2); } else { lua_pop(L, 1); lua_pushvalue(L, stackpos); lua_pushboolean(L, 1); lua_rawset(L, -3); lua_pop(L, 1); BSONObjBuilder b; bool dense = true; int len = 0; for (lua_pushnil(L); lua_next(L, stackpos); lua_pop(L, 1)) { ++len; if ((lua_type(L, -2) != LUA_TNUMBER) || (lua_tointeger(L, -2) != len)) { lua_pop(L, 2); dense = false; break; } } if (dense) { for (int i = 0; i < len; i++) { lua_rawgeti(L, stackpos, i+1); std::stringstream ss; ss << i; lua_append_bson(L, ss.str().c_str(), -1, &b, ref); lua_pop(L, 1); } builder->appendArray(key, b.obj()); } else { for (lua_pushnil(L); lua_next(L, stackpos); lua_pop(L, 1)) { switch (lua_type(L, -2)) { // key type case LUA_TNUMBER: { std::stringstream ss; ss << lua_tonumber(L, -2); lua_append_bson(L, ss.str().c_str(), -1, &b, ref); break; } case LUA_TSTRING: { lua_append_bson(L, lua_tostring(L, -2), -1, &b, ref); break; } } } builder->append(key, b.obj()); } } } else { int bson_type = lua_tointeger(L, -1); lua_pop(L, 1); lua_rawgeti(L, -1, 1); switch (bson_type) { case mongo::Date: builder->appendDate(key, lua_tonumber(L, -1)); break; case mongo::Timestamp: builder->appendTimestamp(key); break; case mongo::RegEx: { const char* regex = lua_tostring(L, -1); lua_rawgeti(L, -2, 2); // options const char* options = lua_tostring(L, -1); lua_pop(L, 1); if (regex && options) builder->appendRegex(key, regex, options); break; } case mongo::NumberInt: builder->append(key, static_cast<int32_t>(lua_tointeger(L, -1))); break; case mongo::NumberLong: builder->append(key, static_cast<long long int>(lua_tonumber(L, -1))); break; case mongo::Symbol: { const char* c = lua_tostring(L, -1); if (c) builder->appendSymbol(key, c); break; } case mongo::BinData: { size_t l; const char* c = lua_tolstring(L, -1, &l); if (c) builder->appendBinData(key, l, mongo::BinDataGeneral, c); break; } case mongo::jstOID: { OID oid; const char* c = lua_tostring(L, -1); if (c) { oid.init(c); builder->appendOID(key, &oid); } break; } case mongo::jstNULL: builder->appendNull(key); break; /*default: luaL_error(L, LUAMONGO_UNSUPPORTED_BSON_TYPE, luaL_typename(L, stackpos));*/ } lua_pop(L, 1); } } else if (type == LUA_TNIL) { builder->appendNull(key); } else if (type == LUA_TNUMBER) { double numval = lua_tonumber(L, stackpos); if ((numval == floor(numval)) && fabs(numval)< INT_MAX ) { // The numeric value looks like an integer, treat it as such. // This is closer to how JSON datatypes behave. int intval = lua_tointeger(L, stackpos); builder->append(key, static_cast<int32_t>(intval)); } else { builder->append(key, numval); } } else if (type == LUA_TBOOLEAN) { builder->appendBool(key, lua_toboolean(L, stackpos)); } else if (type == LUA_TSTRING) { builder->append(key, lua_tostring(L, stackpos)); }/* else { luaL_error(L, LUAMONGO_UNSUPPORTED_LUA_TYPE, luaL_typename(L, stackpos)); }*/ }
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){ if ( value->IsString() ){ if ( sname == "$where" ) b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); else b.append( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsFunction() ){ b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsNumber() ){ b.append( sname.c_str() , value->ToNumber()->Value() ); return; } if ( value->IsArray() ){ BSONObj sub = v8ToMongo( value->ToObject() ); b.appendArray( sname.c_str() , sub ); return; } if ( value->IsDate() ){ b.appendDate( sname.c_str() , (unsigned long long )(v8::Date::Cast( *value )->NumberValue()) ); return; } if ( value->IsObject() ){ string s = toSTLString( value ); if ( s.size() && s[0] == '/' ){ s = s.substr( 1 ); string r = s.substr( 0 , s.find( "/" ) ); string o = s.substr( s.find( "/" ) + 1 ); b.appendRegex( sname.c_str() , r.c_str() , o.c_str() ); } else if ( value->ToObject()->GetPrototype()->IsObject() && value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( String::New( "isObjectId" ) ) ){ OID oid; oid.init( toSTLString( value ) ); b.appendOID( sname.c_str() , &oid ); } else { BSONObj sub = v8ToMongo( value->ToObject() ); b.append( sname.c_str() , sub ); } return; } if ( value->IsBoolean() ){ b.appendBool( sname.c_str() , value->ToBoolean()->Value() ); return; } else if ( value->IsUndefined() ){ return; } else if ( value->IsNull() ){ b.appendNull( sname.c_str() ); return; } cout << "don't know how to covert to mongo field [" << name << "]\t" << value << endl; }
static void lua_append_bson(lua_State *L, const char *key, int stackpos, BSONObjBuilder *builder) { int type = lua_type(L, stackpos); if (type == LUA_TTABLE) { int bsontype_found = luaL_getmetafield(L, stackpos, "__bsontype"); if (!bsontype_found) { // not a special bsontype // handle as a regular table, iterating keys BSONObjBuilder b; int arraylen = check_array(L, stackpos); if (arraylen) { for (int i = 0; i < arraylen; i++) { lua_rawgeti(L, stackpos, i+1); stringstream ss; ss << i; lua_append_bson(L, ss.str().c_str(), -1, &b); lua_pop(L, 1); } builder->appendArray(key, b.obj()); } else { lua_pushnil(L); while (lua_next(L, stackpos-1) != 0) { if (lua_isnumber(L, -2)) { stringstream ss; ss << lua_tonumber(L, -2); lua_append_bson(L, ss.str().c_str(), -1, &b); } else { const char *k = lua_tostring(L, -2); lua_append_bson(L, k, -1, &b); } lua_pop(L, 1); } builder->append(key, b.obj()); } } else { int bson_type = lua_tointeger(L, -1); lua_pop(L, 1); lua_rawgeti(L, -1, 1); switch (bson_type) { case mongo::Date: builder->appendDate(key, lua_tonumber(L, -1)); break; case mongo::Timestamp: builder->appendTimestamp(key); break; case mongo::RegEx: { const char* regex = lua_tostring(L, -1); lua_rawgeti(L, -2, 2); // options const char* options = lua_tostring(L, -1); lua_pop(L, 1); builder->appendRegex(key, regex, options); break; } case mongo::NumberInt: builder->append(key, static_cast<int32_t>(lua_tointeger(L, -1))); break; case mongo::NumberLong: builder->append(key, static_cast<long long int>(lua_tonumber(L, -1))); break; case mongo::Symbol: builder->appendSymbol(key, lua_tostring(L, -1)); break; case mongo::jstOID: { OID oid; oid.init(lua_tostring(L, -1)); builder->appendOID(key, &oid); break; } case mongo::jstNULL: builder->appendNull(key); break; default: luaL_error(L, LUAMONGO_UNSUPPORTED_BSON_TYPE, luaL_typename(L, stackpos)); } lua_pop(L, 1); } } else if (type == LUA_TNIL) { builder->appendNull(key); } else if (type == LUA_TNUMBER) { double numval = lua_tonumber(L, stackpos); if (numval == floor(numval)) { // The numeric value looks like an integer, treat it as such. // This is closer to how JSON datatypes behave. int intval = lua_tointeger(L, stackpos); builder->append(key, static_cast<int32_t>(intval)); } else { builder->append(key, numval); } } else if (type == LUA_TBOOLEAN) { builder->appendBool(key, lua_toboolean(L, stackpos)); } else if (type == LUA_TSTRING) { builder->append(key, lua_tostring(L, stackpos)); } else { luaL_error(L, LUAMONGO_UNSUPPORTED_LUA_TYPE, luaL_typename(L, stackpos)); } }
/* **************************************************************************** * * processRegisterContext - * * This function has a slightly different behaviour depending on whether the id * parameter is null (new registration case) or not null (update case), in * particular: * * - In the new registration case, the _id is generated and insert() is used to * put the document in the DB. * - In the update case, the _id is set according to the argument 'id' and update() is * used to put the document in the DB. * */ HttpStatusCode processRegisterContext ( RegisterContextRequest* requestP, RegisterContextResponse* responseP, OID* id, const std::string& tenant, const std::string& servicePath, const std::string& format, const std::string& fiwareCorrelator ) { std::string err; /* If expiration is not present, then use a default one */ if (requestP->duration.isEmpty()) { requestP->duration.set(DEFAULT_DURATION); } /* Calculate expiration (using the current time and the duration field in the request) */ long long expiration = getCurrentTime() + requestP->duration.parse(); LM_T(LmtMongo, ("Registration expiration: %lu", expiration)); /* Create the mongoDB registration document */ BSONObjBuilder reg; OID oid; if (id == NULL) { oid.init(); } else { oid = *id; } reg.append("_id", oid); reg.append(REG_EXPIRATION, expiration); reg.append(REG_SERVICE_PATH, servicePath == "" ? DEFAULT_SERVICE_PATH_UPDATES : servicePath); reg.append(REG_FORMAT, format); // // We accumulate the subscriptions in a map. The key of the map is the string representing // subscription id // std::map<string, TriggeredSubscription*> subsToNotify; // This vector is used to define which entities to include in notifications EntityIdVector triggerEntitiesV; BSONArrayBuilder contextRegistration; for (unsigned int ix = 0; ix < requestP->contextRegistrationVector.size(); ++ix) { ContextRegistration* cr = requestP->contextRegistrationVector[ix]; BSONArrayBuilder entities; for (unsigned int jx = 0; jx < cr->entityIdVector.size(); ++jx) { EntityId* en = cr->entityIdVector[jx]; triggerEntitiesV.push_back(en); if (en->type == "") { entities.append(BSON(REG_ENTITY_ID << en->id)); LM_T(LmtMongo, ("Entity registration: {id: %s}", en->id.c_str())); } else { entities.append(BSON(REG_ENTITY_ID << en->id << REG_ENTITY_TYPE << en->type)); LM_T(LmtMongo, ("Entity registration: {id: %s, type: %s}", en->id.c_str(), en->type.c_str())); } } BSONArrayBuilder attrs; for (unsigned int jx = 0; jx < cr->contextRegistrationAttributeVector.size(); ++jx) { ContextRegistrationAttribute* cra = cr->contextRegistrationAttributeVector[jx]; attrs.append(BSON(REG_ATTRS_NAME << cra->name << REG_ATTRS_TYPE << cra->type << "isDomain" << cra->isDomain)); LM_T(LmtMongo, ("Attribute registration: {name: %s, type: %s, isDomain: %s}", cra->name.c_str(), cra->type.c_str(), cra->isDomain.c_str())); for (unsigned int kx = 0; kx < requestP->contextRegistrationVector[ix]->contextRegistrationAttributeVector[jx]->metadataVector.size(); ++kx) { // FIXME: metadata not supported at the moment } } contextRegistration.append( BSON( REG_ENTITIES << entities.arr() << REG_ATTRS << attrs.arr() << REG_PROVIDING_APPLICATION << requestP->contextRegistrationVector[ix]->providingApplication.get())); LM_T(LmtMongo, ("providingApplication registration: %s", requestP->contextRegistrationVector[ix]->providingApplication.c_str())); std::string err; if (!addTriggeredSubscriptions(*cr, subsToNotify, err, tenant)) { responseP->errorCode.fill(SccReceiverInternalError, err); return SccOk; } } reg.append(REG_CONTEXT_REGISTRATION, contextRegistration.arr()); /* Note we are using upsert = "true". This means that if the document doesn't previously * exist in the collection, it is created. Thus, this way both uses of registerContext are OK * (either new registration or updating an existing one) */ if (!collectionUpdate(getRegistrationsCollectionName(tenant), BSON("_id" << oid), reg.obj(), true, &err)) { responseP->errorCode.fill(SccReceiverInternalError, err); releaseTriggeredSubscriptions(subsToNotify); return SccOk; } // // Send notifications for each one of the subscriptions accumulated by // previous addTriggeredSubscriptions() invocations // processSubscriptions(triggerEntitiesV, subsToNotify, err, tenant, fiwareCorrelator); // Fill the response element responseP->duration = requestP->duration; responseP->registrationId.set(oid.toString()); responseP->errorCode.fill(SccOk); return SccOk; }
bool handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) { if ( ! shardingState.enabled() ) return false; int op = m.operation(); if ( op < 2000 || op >= 3000 || op == dbGetMore // cursors are weird ) return false; DbMessage d(m); const char *ns = d.getns(); string errmsg; if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ) { return false; } log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; if ( doesOpGetAResponse( op ) ) { assert( dbresponse ); BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg ); b.appendBuf( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse->response = resp; dbresponse->responseTo = m.header()->id; return true; } OID writebackID; writebackID.init(); lastError.getSafe()->writeback( writebackID ); const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); log(1) << "got write with an old config - writing back ns: " << ns << endl; if ( logLevel ) log(1) << debugString( m ) << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.append( "id" , writebackID ); b.append( "connectionId" , cc().getConnectionId() ); b.appendTimestamp( "version" , shardingState.getVersion( ns ) ); b.appendTimestamp( "yourVersion" , ShardedConnectionInfo::get( true )->getVersion( ns ) ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); log(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; writeBackManager.queueWriteBack( clientID.str() , b.obj() ); return true; }
bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) { DEV assert( shardingState.enabled() ); int op = m.operation(); if ( op < 2000 || op >= 3000 || op == dbGetMore // cursors are weird ) return false; DbMessage d(m); const char *ns = d.getns(); string errmsg; // We don't care about the version here, since we're returning it later in the writeback ConfigVersion received, wanted; if ( shardVersionOk( ns , errmsg, received, wanted ) ) { return false; } LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; if ( doesOpGetAResponse( op ) ) { assert( dbresponse ); BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg << "ns" << ns ); b.appendBuf( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse->response = resp; dbresponse->responseTo = m.header()->id; return true; } uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 ); OID writebackID; writebackID.init(); lastError.getSafe()->writeback( writebackID ); const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); LOG(1) << "got write with an old config - writing back ns: " << ns << endl; LOG(1) << m.toString() << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.append( "id" , writebackID ); b.append( "connectionId" , cc().getConnectionId() ); b.append( "instanceIdent" , prettyHostName() ); b.appendTimestamp( "version" , shardingState.getVersion( ns ) ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); b.appendTimestamp( "yourVersion" , info ? info->getVersion(ns) : (ConfigVersion)0 ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; writeBackManager.queueWriteBack( clientID.str() , b.obj() ); return true; }