uint8_t* MONGO_API_CALL stitch_support_v1_update_upsert(stitch_support_v1_update* const update, stitch_support_v1_status* status) { return enterCXX(mongo::getStatusImpl(status), [&] { mongo::FieldRefSet immutablePaths; // Empty set bool docWasModified = false; mongo::mutablebson::Document mutableDoc(mongo::BSONObj(), mongo::mutablebson::Document::kInPlaceDisabled); uassertStatusOK(update->updateDriver.populateDocumentWithQueryFields( update->opCtx.get(), *update->matcher->matcher.getQuery(), immutablePaths, mutableDoc)); uassertStatusOK(update->updateDriver.update(mongo::StringData() /* matchedField */, &mutableDoc, false /* validateForStorage */, immutablePaths, true /* isInsert */, nullptr /* logOpRec */, &docWasModified, nullptr /*modifiedPaths*/)); auto outputObj = mutableDoc.getObject(); size_t outputSize = static_cast<size_t>(outputObj.objsize()); auto output = new (std::nothrow) char[outputSize]; uassert( mongo::ErrorCodes::ExceededMemoryLimit, "Failed to allocate memory for upsert", output); static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output)); return mongo::toInterfaceType(output); }); }
uint8_t* MONGO_API_CALL stitch_support_v1_update_apply(stitch_support_v1_update* const update, const uint8_t* documentBSON, stitch_support_v1_update_details* update_details, stitch_support_v1_status* status) { return enterCXX(mongo::getStatusImpl(status), [&]() { mongo::BSONObj document(mongo::fromInterfaceType(documentBSON)); std::string matchedField; if (update->updateDriver.needMatchDetails()) { invariant(update->matcher); mongo::MatchDetails matchDetails; matchDetails.requestElemMatchKey(); bool isMatch = update->matcher->matcher.matches(document, &matchDetails); invariant(isMatch); if (matchDetails.hasElemMatchKey()) { matchedField = matchDetails.elemMatchKey(); } else { // Empty 'matchedField' indicates that the matcher did not traverse an array. } } mongo::mutablebson::Document mutableDoc(document, mongo::mutablebson::Document::kInPlaceDisabled); mongo::FieldRefSet immutablePaths; // Empty set bool docWasModified = false; mongo::FieldRefSetWithStorage modifiedPaths; uassertStatusOK(update->updateDriver.update(matchedField, &mutableDoc, false /* validateForStorage */, immutablePaths, false /* isInsert */, nullptr /* logOpRec*/, &docWasModified, &modifiedPaths)); auto outputObj = mutableDoc.getObject(); size_t outputSize = static_cast<size_t>(outputObj.objsize()); auto output = new (std::nothrow) char[outputSize]; uassert( mongo::ErrorCodes::ExceededMemoryLimit, "Failed to allocate memory for update", output); static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output)); if (update_details) { update_details->modifiedPaths = modifiedPaths.serialize(); } return mongo::toInterfaceType(output); }); }
void BSONObj::dump() const { out() << hex; const char *p = objdata(); for ( int i = 0; i < objsize(); i++ ) { out() << i << '\t' << ( 0xff & ( (unsigned) *p ) ); if ( *p >= 'A' && *p <= 'z' ) out() << '\t' << *p; out() << endl; p++; } }
string BSONObj::hexDump() const { stringstream ss; const char *d = objdata(); int size = objsize(); for( int i = 0; i < size; ++i ) { ss.width( 2 ); ss.fill( '0' ); ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec; if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) ) ss << '\'' << d[ i ] << '\''; if ( i != size - 1 ) ss << ' '; } return ss.str(); }
uint8_t* MONGO_API_CALL stitch_support_v1_projection_apply(stitch_support_v1_projection* const projection, const uint8_t* documentBSON, stitch_support_v1_status* status) { return enterCXX(mongo::getStatusImpl(status), [&]() { mongo::BSONObj document(mongo::fromInterfaceType(documentBSON)); auto outputResult = projection->projectionExec.project(document); auto outputObj = uassertStatusOK(outputResult); auto outputSize = static_cast<size_t>(outputObj.objsize()); auto output = new (std::nothrow) char[outputSize]; uassert(mongo::ErrorCodes::ExceededMemoryLimit, "Failed to allocate memory for projection", output); static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output)); return mongo::toInterfaceType(output); }); }
TEST(KVEngineTestHarness, AllCommittedTimestamp) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); if (!engine->supportsDocLocking()) return; unique_ptr<RecordStore> rs; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); CollectionOptions options; options.capped = true; options.cappedSize = 10240; options.cappedMaxDocs = -1; NamespaceString oplogNss("local.oplog.rs"); ASSERT_OK(engine->createRecordStore(&opCtx, oplogNss.ns(), "ident", options)); rs = engine->getRecordStore(&opCtx, oplogNss.ns(), "ident", options); ASSERT(rs); } { Timestamp t11(1, 1); Timestamp t12(1, 2); Timestamp t21(2, 1); auto t11Doc = BSON("ts" << t11); auto t12Doc = BSON("ts" << t12); auto t21Doc = BSON("ts" << t21); Timestamp allCommitted = engine->getAllCommittedTimestamp(); MyOperationContext opCtx1(engine); WriteUnitOfWork uow1(&opCtx1); ASSERT_EQ(invariant(rs->insertRecord( &opCtx1, t11Doc.objdata(), t11Doc.objsize(), Timestamp::min())), RecordId(1, 1)); Timestamp lastAllCommitted = allCommitted; allCommitted = engine->getAllCommittedTimestamp(); ASSERT_GTE(allCommitted, lastAllCommitted); ASSERT_LT(allCommitted, t11); MyOperationContext opCtx2(engine); WriteUnitOfWork uow2(&opCtx2); ASSERT_EQ(invariant(rs->insertRecord( &opCtx2, t21Doc.objdata(), t21Doc.objsize(), Timestamp::min())), RecordId(2, 1)); uow2.commit(); lastAllCommitted = allCommitted; allCommitted = engine->getAllCommittedTimestamp(); ASSERT_GTE(allCommitted, lastAllCommitted); ASSERT_LT(allCommitted, t11); ASSERT_EQ(invariant(rs->insertRecord( &opCtx1, t12Doc.objdata(), t12Doc.objsize(), Timestamp::min())), RecordId(1, 2)); lastAllCommitted = allCommitted; allCommitted = engine->getAllCommittedTimestamp(); ASSERT_GTE(allCommitted, lastAllCommitted); ASSERT_LT(allCommitted, t11); uow1.commit(); lastAllCommitted = allCommitted; allCommitted = engine->getAllCommittedTimestamp(); ASSERT_GTE(allCommitted, lastAllCommitted); ASSERT_LTE(allCommitted, t21); } }
BSONObj BSONObj::copy() const { char* storage = static_cast<char*>(mongoMalloc(sizeof(Holder) + objsize())); memcpy(storage + sizeof(Holder), objdata(), objsize()); return BSONObj::takeOwnership(storage); }
bool BSONObj::valid() const { return validateBSON( objdata(), objsize() ).isOK(); }
BSONObj BSONObj::copy() const { auto storage = SharedBuffer::allocate(objsize()); memcpy(storage.get(), objdata(), objsize()); return BSONObj(std::move(storage)); }