void msg_callback(const sensor_msgs::Image::ConstPtr& msg) { BSONObjBuilder document; Date_t stamp = msg->header.stamp.sec * 1000 + msg->header.stamp.nsec / 1000000; document.append("header", BSON( "seq" << msg->header.seq << "stamp" << stamp << "frame_id" << msg->header.frame_id)); document.append("height", msg->height); document.append("width", msg->width); document.append("encoding", msg->encoding); document.append("is_bigendian", msg->is_bigendian); document.append("step", msg->step); document.appendBinData("data", msg->data.size(), BinDataGeneral, const_cast<unsigned char*>(&msg->data[0])); mongodb_conn->insert(collection, document.obj()); // If we'd get access to the message queue this could be more useful // https://code.ros.org/trac/ros/ticket/744 pthread_mutex_lock(&in_counter_mutex); ++in_counter; pthread_mutex_unlock(&in_counter_mutex); pthread_mutex_lock(&out_counter_mutex); ++out_counter; pthread_mutex_unlock(&out_counter_mutex); }
GridFSChunk::GridFSChunk( BSONObj fileObject , int chunkNumber , const char * data , int len ) { BSONObjBuilder b; b.appendAs( fileObject["_id"] , "files_id" ); b.append( "n" , chunkNumber ); b.appendBinData( "data" , len, BinDataGeneral, data ); _data = b.obj(); }
void run(){ Scope * s = globalScriptEngine->createScope(); s->localConnect( "asd" ); const char * foo = "asdasdasdasd"; BSONObj in; { BSONObjBuilder b; b.append( "a" , 7 ); b.appendBinData( "b" , strlen( foo ) , ByteArray , foo ); in = b.obj(); s->setObject( "x" , in ); } s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , BSONObj() ); s->invokeSafe( "y = { c : myb };" , BSONObj() ); BSONObj out = s->getObject( "y" ); ASSERT_EQUALS( BinData , out["c"].type() ); //blah( "in " , in["b"] ); //blah( "out" , out["c"] ); ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) ); delete s; }
InsertBig() { char buf[200000]; BSONObjBuilder b; b.append("x", 99); b.appendBinData("bin", 200000, (BinDataType) 129, buf); x = b.obj(); }
bool handlePossibleShardedMessage( Message &m, DbResponse &dbresponse ){ if ( shardConfigServer.empty() ){ return false; } int op = m.data->operation(); if ( op < 2000 || op >= 3000 ) return false; const char *ns = m.data->_data + 4; string errmsg; if ( shardVersionOk( ns , errmsg ) ){ return false; } log() << "shardVersionOk failed ns:" << ns << " " << errmsg << endl; if ( doesOpGetAResponse( op ) ){ BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg ); b.append( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = QueryResult::ResultFlag_ErrSet | QueryResult::ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse.response = resp; dbresponse.responseTo = m.data->id; return true; } OID * clientID = clientServerIds.get(); massert( 10422 , "write with bad shard config and no server id!" , clientID ); log() << "got write with an old config - writing back" << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.appendBinData( "msg" , m.data->len , bdtCustom , (char*)(m.data) ); log() << "writing back msg with len: " << m.data->len << " op: " << m.data->_operation << endl; clientQueues[clientID->str()]->push( b.obj() ); return true; }
BSONObj createBSONMetricChunkDocument(ConstDataRange buf, Date_t date) { BSONObjBuilder builder; builder.appendDate(kFTDCIdField, date); builder.appendNumber(kFTDCTypeField, static_cast<int>(FTDCType::kMetricChunk)); builder.appendBinData(kFTDCDataField, buf.length(), BinDataType::BinDataGeneral, buf.data()); return builder.obj(); }
void run() { { BSONObjBuilder b; b.genOID(); b.append("files_id", 0); b.append("n", 0); b.appendBinData("data", 6, BinDataGeneral, "hello "); db.insert(ns(), b.obj()); } { BSONObjBuilder b; b.genOID(); b.append("files_id", 0); b.append("n", 1); b.appendBinData("data", 5, BinDataGeneral, "world"); db.insert(ns(), b.obj()); } BSONObj result; ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) ); ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() ); }
void run(){ Scope * s = globalScriptEngine->newScope(); s->localConnect( "asd" ); const char * foo = "asdas\0asdasd"; const char * base64 = "YXNkYXMAYXNkYXNk"; BSONObj in; { BSONObjBuilder b; b.append( "a" , 7 ); b.appendBinData( "b" , 12 , BinDataGeneral , foo ); in = b.obj(); s->setObject( "x" , in ); } s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , BSONObj() ); s->invokeSafe( "y = { c : myb };" , BSONObj() ); BSONObj out = s->getObject( "y" ); ASSERT_EQUALS( BinData , out["c"].type() ); // pp( "in " , in["b"] ); // pp( "out" , out["c"] ); ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) ); // check that BinData js class is utilized s->invokeSafe( "q = x.b.toString();", BSONObj() ); stringstream expected; expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")"; ASSERT_EQUALS( expected.str(), s->getString( "q" ) ); stringstream scriptBuilder; scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };"; string script = scriptBuilder.str(); s->invokeSafe( script.c_str(), BSONObj() ); out = s->getObject( "z" ); // pp( "out" , out["c"] ); ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) ); s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", BSONObj() ); out = s->getObject( "a" ); int len = -1; out[ "f" ].binData( len ); ASSERT_EQUALS( 0, len ); ASSERT_EQUALS( 128, out[ "f" ].binDataType() ); delete s; }
/** * @brief insert_user * insert a new user in database * @param username * user name * @param key * XOR key to encrypt password * @param role * user role (0 = normal / 1 = admin) * @return * success status */ bool Database::insert_user(std::string username,std::string key_data,int role) { QDateTime currentDate = QDateTime::currentDateTime(); std::string hash = username + ":" + realm + ":" + key_data; QByteArray convert_hash; if (digest_algo==ALGO_MD5) convert_hash = QCryptographicHash::hash(hash.data(), QCryptographicHash::Md5).toHex(); else if (digest_algo==ALGO_SHA1) convert_hash = QCryptographicHash::hash(hash.data(), QCryptographicHash::Sha1).toHex(); char * crypted_data = new char[convert_hash.length()]; encrypt((char*)convert_hash.data(),convert_hash.length(),key,crypted_data); BSONObjBuilder generatedCert; generatedCert.append(USER_USERNAME ,username); generatedCert.append(USER_ROLE ,role); generatedCert.appendBinData(USER_KEY ,convert_hash.length(),BinDataGeneral,crypted_data); generatedCert.append(USER_CREATION ,Date_t(currentDate.toMSecsSinceEpoch())); generatedCert.append(USER_LAST_LOGIN ,Date_t(currentDate.toMSecsSinceEpoch())); delete[] crypted_data; BSONObj userObj = generatedCert.obj(); con.insert("ssldashboard.users", userObj); string e = con.getLastError(); if( !e.empty() ) { cerr << e << endl; return false; } return true; }
const char *GridFileBuilder::privateAppendChunk(const char *data, size_t length, bool pending_insert) { size_t chunk_len; char const * const end = data + length; while (data < end) { chunk_len = min(_chunkSize, (size_t)(end-data)); // the last chunk needs to be stored as pending_data if (chunk_len < _chunkSize && !pending_insert) break; /* from gridfs.cpp at https://github.com/mongodb/mongo-cxx-driver/blob/legacy/src/mongo/client/gridfs.cpp */ BSONObjBuilder b; b.appendAs( _file_id["_id"] , "files_id" ); b.append( "n" , _current_chunk ); b.appendBinData( "data" , chunk_len, BinDataGeneral, data ); BSONObj chunk_data = b.obj(); /************************************************************************/ ++_current_chunk; _client->insert( _chunkNS.c_str(), chunk_data ); data += chunk_len; _file_length += chunk_len; } return data; }
/** * @brief insertGeneratedCert * insert a new certificate in database * * @param isCaCert * cert is CA or not * @param certPublic * public key value * @param certPrivate * private key value * @param certP12 * p12 cert value * @param start_date * cert starting date * @param end_date * cert ending date * @param serial * cert serial number * @param commonName * cert common name * @param signBySerialNum * cert is sign with a given cert identified by serial number (-1 if self signed) * @return * success status */ bool Database::insertGeneratedCert(bool isCaCert, std::string certPublic, std::string certPrivate, std::vector<char> certP12, unsigned long long start_date, unsigned long long end_date, int serial, std::string commonName, int signBySerialNum ){ QDateTime currentDate = QDateTime::currentDateTime(); BSONObjBuilder generatedCert; generatedCert.append(CERT_SEQ_NUM ,serial); generatedCert.append(CERT_COMMON_NAME ,commonName); generatedCert.append(CERT_IS_CA ,isCaCert); generatedCert.append(CERT_SIGN_BY_SERIAL ,signBySerialNum); generatedCert.append(CERT_START_DATE ,Date_t(start_date)); generatedCert.append(CERT_END_DATE ,Date_t(end_date)); generatedCert.append(CERT_RECORD_DATE_FIELD ,Date_t(currentDate.toMSecsSinceEpoch())); generatedCert.append(CERT_PUBLIC_BODY ,certPublic); generatedCert.append(CERT_PRIVATE_BODY ,certPrivate); generatedCert.appendBinData(CERT_P12 ,certP12.size() ,BinDataGeneral,certP12.data()); BSONObj generatedCertObj = generatedCert.obj(); con.insert("ssldashboard.generatedcerts", generatedCertObj); string e = con.getLastError(); if( !e.empty() ) { cerr << e << endl; return false; } return true; }
bool handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) { if ( ! shardingState.enabled() ) return false; int op = m.operation(); if ( op < 2000 || op >= 3000 || op == dbGetMore // cursors are weird ) return false; DbMessage d(m); const char *ns = d.getns(); string errmsg; if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ) { return false; } log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; if ( doesOpGetAResponse( op ) ) { assert( dbresponse ); BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg ); b.appendBuf( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse->response = resp; dbresponse->responseTo = m.header()->id; return true; } OID writebackID; writebackID.init(); lastError.getSafe()->writeback( writebackID ); const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); log(1) << "got write with an old config - writing back ns: " << ns << endl; if ( logLevel ) log(1) << debugString( m ) << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.append( "id" , writebackID ); b.append( "connectionId" , cc().getConnectionId() ); b.appendTimestamp( "version" , shardingState.getVersion( ns ) ); b.appendTimestamp( "yourVersion" , ShardedConnectionInfo::get( true )->getVersion( ns ) ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); log(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; writeBackManager.queueWriteBack( clientID.str() , b.obj() ); return true; }
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){ if ( value->IsString() ){ b.append( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsFunction() ){ b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsNumber() ){ if ( value->IsInt32() ) b.append( sname.c_str(), int( value->ToInt32()->Value() ) ); else b.append( sname.c_str() , value->ToNumber()->Value() ); return; } if ( value->IsArray() ){ BSONObj sub = v8ToMongo( value->ToObject() ); b.appendArray( sname.c_str() , sub ); return; } if ( value->IsDate() ){ b.appendDate( sname.c_str() , Date_t(v8::Date::Cast( *value )->NumberValue()) ); return; } if ( value->IsExternal() ) return; if ( value->IsObject() ){ // The user could potentially modify the fields of these special objects, // wreaking havoc when we attempt to reinterpret them. Not doing any validation // for now... Local< v8::Object > obj = value->ToObject(); if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) { switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead case Timestamp: b.appendTimestamp( sname.c_str(), Date_t( v8::Date::Cast( *obj->Get( v8::String::New( "time" ) ) )->NumberValue() ), obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() ); return; case MinKey: b.appendMinKey( sname.c_str() ); return; case MaxKey: b.appendMaxKey( sname.c_str() ); return; default: assert( "invalid internal field" == 0 ); } } string s = toSTLString( value ); if ( s.size() && s[0] == '/' ){ s = s.substr( 1 ); string r = s.substr( 0 , s.rfind( "/" ) ); string o = s.substr( s.rfind( "/" ) + 1 ); b.appendRegex( sname.c_str() , r.c_str() , o.c_str() ); } else if ( value->ToObject()->GetPrototype()->IsObject() && value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ){ OID oid; oid.init( toSTLString( value ) ); b.appendOID( sname.c_str() , &oid ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) { // TODO might be nice to potentially speed this up with an indexed internal // field, but I don't yet know how to use an ObjectTemplate with a // constructor. unsigned long long val = ( (unsigned long long)( value->ToObject()->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) + (unsigned)( value->ToObject()->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() ); b.append( sname.c_str(), (long long)val ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) { OID oid; oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) ); string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) ); b.appendDBRef( sname.c_str(), ns.c_str(), oid ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) { int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value(); v8::String::Utf8Value data( obj->Get( v8::String::New( "data" ) ) ); const char *dataArray = *data; assert( data.length() == len ); b.appendBinData( sname.c_str(), len, mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ), dataArray ); } else { BSONObj sub = v8ToMongo( value->ToObject() ); b.append( sname.c_str() , sub ); } return; } if ( value->IsBoolean() ){ b.appendBool( sname.c_str() , value->ToBoolean()->Value() ); return; } else if ( value->IsUndefined() ){ b.appendUndefined( sname.c_str() ); return; } else if ( value->IsNull() ){ b.appendNull( sname.c_str() ); return; } cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl; }
Status saslClientAuthenticate(Gsasl *gsasl, DBClientWithCommands* client, const BSONObj& saslParameters, void* sessionHook) { GsaslSession session; int saslLogLevel = getSaslClientLogLevel(saslParameters); Status status = configureSession(gsasl, client, saslParameters, sessionHook, &session); if (!status.isOK()) return status; std::string targetDatabase; status = bsonExtractStringFieldWithDefault(saslParameters, saslCommandPrincipalSourceFieldName, saslDefaultDBName, &targetDatabase); if (!status.isOK()) return status; BSONObj saslFirstCommandPrefix = BSON( saslStartCommandName << 1 << saslCommandMechanismFieldName << session.getMechanism()); BSONObj saslFollowupCommandPrefix = BSON(saslContinueCommandName << 1); BSONObj saslCommandPrefix = saslFirstCommandPrefix; BSONObj inputObj = BSON(saslCommandPayloadFieldName << ""); bool isServerDone = false; while (!session.isDone()) { std::string payload; BSONType type; status = saslExtractPayload(inputObj, &payload, &type); if (!status.isOK()) return status; LOG(saslLogLevel) << "sasl client input: " << base64::encode(payload) << endl; std::string responsePayload; status = session.step(payload, &responsePayload); if (!status.isOK()) return status; LOG(saslLogLevel) << "sasl client output: " << base64::encode(responsePayload) << endl; BSONObjBuilder commandBuilder; commandBuilder.appendElements(saslCommandPrefix); commandBuilder.appendBinData(saslCommandPayloadFieldName, int(responsePayload.size()), BinDataGeneral, responsePayload.c_str()); BSONElement conversationId = inputObj[saslCommandConversationIdFieldName]; if (!conversationId.eoo()) commandBuilder.append(conversationId); // Server versions 2.3.2 and earlier may return "ok: 1" with a non-zero "code" field, // indicating a failure. Subsequent versions should return "ok: 0" on failure with a // non-zero "code" field to indicate specific failure. In all versions, ok: 1, code: >0 // and ok: 0, code optional, indicate failure. bool ok = client->runCommand(targetDatabase, commandBuilder.obj(), inputObj); ErrorCodes::Error code = ErrorCodes::fromInt( inputObj[saslCommandCodeFieldName].numberInt()); if (!ok || code != ErrorCodes::OK) { if (code == ErrorCodes::OK) code = ErrorCodes::UnknownError; return Status(code, inputObj[saslCommandErrmsgFieldName].str()); } isServerDone = inputObj[saslCommandDoneFieldName].trueValue(); saslCommandPrefix = saslFollowupCommandPrefix; } if (!isServerDone) return Status(ErrorCodes::ProtocolError, "Client finished before server."); return Status::OK(); }
void SHA1Block::appendAsBinData(BSONObjBuilder& builder, StringData fieldName) const { builder.appendBinData(fieldName, _hash.size(), BinDataGeneral, _hash.data()); }
bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) { DEV assert( shardingState.enabled() ); int op = m.operation(); if ( op < 2000 || op >= 3000 || op == dbGetMore // cursors are weird ) return false; DbMessage d(m); const char *ns = d.getns(); string errmsg; // We don't care about the version here, since we're returning it later in the writeback ConfigVersion received, wanted; if ( shardVersionOk( ns , errmsg, received, wanted ) ) { return false; } LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl; if ( doesOpGetAResponse( op ) ) { assert( dbresponse ); BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg << "ns" << ns ); b.appendBuf( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse->response = resp; dbresponse->responseTo = m.header()->id; return true; } uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 ); OID writebackID; writebackID.init(); lastError.getSafe()->writeback( writebackID ); const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); LOG(1) << "got write with an old config - writing back ns: " << ns << endl; LOG(1) << m.toString() << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.append( "id" , writebackID ); b.append( "connectionId" , cc().getConnectionId() ); b.append( "instanceIdent" , prettyHostName() ); b.appendTimestamp( "version" , shardingState.getVersion( ns ) ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); b.appendTimestamp( "yourVersion" , info ? info->getVersion(ns) : (ConfigVersion)0 ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; writeBackManager.queueWriteBack( clientID.str() , b.obj() ); return true; }
static BSONObj bamboo2bson(const dclass::DistributedType *type, DatagramIterator &dgi) { // The BSON library's weird data model doesn't allow elements to exist on // their own; they must be part of an object. Therefore, we always return // results in a single BSONObj with key "_" BSONObjBuilder b; switch(type->get_type()) { case dclass::Type::T_INT8: { b << "_" << dgi.read_int8(); } break; case dclass::Type::T_INT16: { b << "_" << dgi.read_int16(); } break; case dclass::Type::T_INT32: { b << "_" << dgi.read_int32(); } break; case dclass::Type::T_INT64: { b.appendIntOrLL("_", dgi.read_int64()); } break; case dclass::Type::T_UINT8: { b << "_" << dgi.read_uint8(); } break; case dclass::Type::T_UINT16: { b << "_" << dgi.read_uint16(); } break; case dclass::Type::T_UINT32: { b << "_" << dgi.read_uint32(); } break; case dclass::Type::T_UINT64: { b.appendIntOrLL("_", dgi.read_uint64()); } break; case dclass::Type::T_CHAR: { unsigned char c = dgi.read_uint8(); string str(c, 1); b << "_" << str; } break; case dclass::Type::T_FLOAT32: { b << "_" << dgi.read_float32(); } break; case dclass::Type::T_FLOAT64: { b << "_" << dgi.read_float64(); } break; case dclass::Type::T_STRING: { vector<uint8_t> vec = dgi.read_data(type->get_size()); string str((const char *)vec.data(), vec.size()); b << "_" << str; } case dclass::Type::T_VARSTRING: { b << "_" << dgi.read_string(); } break; case dclass::Type::T_BLOB: { vector<uint8_t> blob = dgi.read_data(type->get_size()); b.appendBinData("_", blob.size(), BinDataGeneral, blob.data()); } break; case dclass::Type::T_VARBLOB: { vector<uint8_t> blob = dgi.read_blob(); b.appendBinData("_", blob.size(), BinDataGeneral, blob.data()); } break; case dclass::Type::T_ARRAY: { const dclass::ArrayType *array = type->as_array(); BSONArrayBuilder ab; for(size_t i = 0; i < array->get_array_size(); i++) { ab << bamboo2bson(array->get_element_type(), dgi)["_"]; } b << "_" << ab.arr(); } break; case dclass::Type::T_VARARRAY: { const dclass::ArrayType *array = type->as_array(); dgsize_t array_length = dgi.read_size(); dgsize_t starting_size = dgi.tell(); BSONArrayBuilder ab; while(dgi.tell() != starting_size + array_length) { ab << bamboo2bson(array->get_element_type(), dgi)["_"]; if(dgi.tell() > starting_size + array_length) { throw mongo::DBException("Discovered corrupt array-length tag!", 0); } } b << "_" << ab.arr(); } break; case dclass::Type::T_STRUCT: { const dclass::Struct *s = type->as_struct(); size_t fields = s->get_num_fields(); BSONObjBuilder ob; for(unsigned int i = 0; i < fields; ++i) { const dclass::Field *field = s->get_field(i); ob << field->get_name() << bamboo2bson(field->get_type(), dgi)["_"]; } b << "_" << ob.obj(); } break; case dclass::Type::T_METHOD: { const dclass::Method *m = type->as_method(); size_t parameters = m->get_num_parameters(); BSONObjBuilder ob; for(unsigned int i = 0; i < parameters; ++i) { const dclass::Parameter *parameter = m->get_parameter(i); string name = parameter->get_name(); if(name.empty()) { stringstream n; n << "_" << i; name = n.str(); } ob << name << bamboo2bson(parameter->get_type(), dgi)["_"]; } b << "_" << ob.obj(); } break; case dclass::Type::T_INVALID: default: assert(false); break; } return b.obj(); }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result) { const std::string ns = parseNs(dbname, jsobj); md5digest d; md5_state_t st; md5_init(&st); int n = 0; bool partialOk = jsobj["partialOk"].trueValue(); if (partialOk) { // WARNING: This code depends on the binary layout of md5_state. It will not be // compatible with different md5 libraries or work correctly in an environment with // mongod's of different endians. It is ok for mongos to be a different endian since // it just passes the buffer through to another mongod. BSONElement stateElem = jsobj["md5state"]; if (!stateElem.eoo()) { int len; const char* data = stateElem.binDataClean(len); massert(16247, "md5 state not correct size", len == sizeof(st)); memcpy(&st, data, sizeof(st)); } n = jsobj["startAt"].numberInt(); } BSONObj query = BSON("files_id" << jsobj["filemd5"] << "n" << GTE << n); BSONObj sort = BSON("files_id" << 1 << "n" << 1); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { CanonicalQuery* cq; if (!CanonicalQuery::canonicalize(ns, query, sort, BSONObj(), &cq).isOK()) { uasserted(17240, "Can't canonicalize query " + query.toString()); return 0; } // Check shard version at startup. // This will throw before we've done any work if shard version is outdated // We drop and re-acquire these locks every document because md5'ing is expensive unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(txn, ns)); Collection* coll = ctx->getCollection(); PlanExecutor* rawExec; if (!getExecutor(txn, coll, cq, PlanExecutor::YIELD_MANUAL, &rawExec, QueryPlannerParams::NO_TABLE_SCAN).isOK()) { uasserted(17241, "Can't get executor for query " + query.toString()); return 0; } unique_ptr<PlanExecutor> exec(rawExec); // Process notifications when the lock is released/reacquired in the loop below exec->registerExec(); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { BSONElement ne = obj["n"]; verify(ne.isNumber()); int myn = ne.numberInt(); if (n != myn) { if (partialOk) { break; // skipped chunk is probably on another shard } log() << "should have chunk: " << n << " have:" << myn << endl; dumpChunks(txn, ns, query, sort); uassert(10040, "chunks out of order", n == myn); } // make a copy of obj since we access data in it while yielding locks BSONObj owned = obj.getOwned(); exec->saveState(); // UNLOCKED ctx.reset(); int len; const char* data = owned["data"].binDataClean(len); // This is potentially an expensive operation, so do it out of the lock md5_append(&st, (const md5_byte_t*)(data), len); n++; try { // RELOCKED ctx.reset(new AutoGetCollectionForRead(txn, ns)); } catch (const SendStaleConfigException& ex) { LOG(1) << "chunk metadata changed during filemd5, will retarget and continue"; break; } // Have the lock again. See if we were killed. if (!exec->restoreState(txn)) { if (!partialOk) { uasserted(13281, "File deleted during filemd5 command"); } } } if (partialOk) result.appendBinData("md5state", sizeof(st), BinDataGeneral, &st); // This must be *after* the capture of md5state since it mutates st md5_finish(&st, d); result.append("numChunks", n); result.append("md5", digestToString(d)); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "filemd5", dbname); return true; }
bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) { DEV verify( shardingState.enabled() ); int op = m.operation(); if ( op < 2000 || op >= 3000 || op == dbGetMore // cursors are weird ) return false; DbMessage d(m); const char *ns = d.getns(); string errmsg; // We don't care about the version here, since we're returning it later in the writeback ChunkVersion received, wanted; if ( shardVersionOk( ns , errmsg, received, wanted ) ) { return false; } bool getsAResponse = doesOpGetAResponse( op ); LOG(1) << "connection sharding metadata does not match for collection " << ns << ", will retry (wanted : " << wanted << ", received : " << received << ")" << ( getsAResponse ? "" : " (queuing writeback)" ) << endl; if( getsAResponse ){ verify( dbresponse ); BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObjBuilder bob; bob.append( "$err", errmsg ); bob.append( "ns", ns ); wanted.addToBSON( bob, "vWanted" ); received.addToBSON( bob, "vReceived" ); BSONObj obj = bob.obj(); b.appendBuf( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse->response = resp; dbresponse->responseTo = m.header()->id; return true; } uassert(9517, "cannot queue a writeback operation to the writeback queue", (d.reservedField() & Reserved_FromWriteback) == 0); const OID& clientID = ShardedConnectionInfo::get(false)->getID(); massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() ); // We need to check this here, since otherwise we'll get errors wrapping the writeback - // not just here, but also when returning as a command result. // We choose 1/2 the overhead of the internal maximum so that we can still handle ops of // 16MB exactly. massert( 16437, "data size of operation is too large to queue for writeback", m.dataSize() < BSONObjMaxInternalSize - (8 * 1024)); LOG(1) << "writeback queued for " << m.toString() << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.append( "connectionId" , cc().getConnectionId() ); b.append( "instanceIdent" , prettyHostName() ); wanted.addToBSON( b ); received.addToBSON( b, "yourVersion" ); b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) ); LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl; // we pass the builder to queueWriteBack so that it can select the writebackId // this is important so that the id is guaranteed to be ascending // that is important since mongos assumes if its seen a greater writeback // that all former have been processed OID writebackID = writeBackManager.queueWriteBack( clientID.str() , b ); lastError.getSafe()->writeback( writebackID ); return true; }