Пример #1
0
Файл: gen.c Проект: minux/subc
int genadd(int p1, int p2, int swapped) {
	int	rp = PINT, t;

	gentext();
	if (cgload2() || !swapped) {
		t = p1;
		p1 = p2;
		p2 = t;
	}
	if (ptr(p1)) {
		if (needscale(p1)) {
			if (	(p1 & STCMASK) == STCPTR ||
				(p1 & STCMASK) == UNIPTR
			)
				cgscale2by(objsize(deref(p1), TVARIABLE, 1));
			else
				cgscale2();
		}
		rp = p1;
	}
	else if (ptr(p2)) {
		if (needscale(p2)) {
			if (	(p2 & STCMASK) == STCPTR ||
				(p2 & STCMASK) == UNIPTR
			)
				cgscaleby(objsize(deref(p2), TVARIABLE, 1));
			else
				cgscale();
		}
		rp = p2;
	}
	cgadd();
	return rp;
}
Пример #2
0
Файл: gen.c Проект: minux/subc
int gensub(int p1, int p2, int swapped) {
	int	rp = PINT;

	gentext();
	if (cgload2() || !swapped) cgswap();
	if (!inttype(p1) && !inttype(p2) && p1 != p2)
		error("incompatible pointer types in binary '-'", NULL);
	if (ptr(p1) && !ptr(p2)) {
		if (needscale(p1)) {
			if (	(p1 & STCMASK) == STCPTR ||
				(p1 & STCMASK) == UNIPTR
			)
				cgscale2by(objsize(deref(p1), TVARIABLE, 1));
			else
				cgscale2();
		}
		rp = p1;
	}
	cgsub();
	if (needscale(p1) && needscale(p2)) {
		if (	(p1 & STCMASK) == STCPTR ||
			(p1 & STCMASK) == UNIPTR
		)
			cgunscaleby(objsize(deref(p1), TVARIABLE, 1));
		else
			cgunscale();
	}
	return rp;
}
Пример #3
0
uint8_t* MONGO_API_CALL stitch_support_v1_update_upsert(stitch_support_v1_update* const update,
                                                        stitch_support_v1_status* status) {
    return enterCXX(mongo::getStatusImpl(status), [&] {
        mongo::FieldRefSet immutablePaths;  //  Empty set
        bool docWasModified = false;

        mongo::mutablebson::Document mutableDoc(mongo::BSONObj(),
                                                mongo::mutablebson::Document::kInPlaceDisabled);

        uassertStatusOK(update->updateDriver.populateDocumentWithQueryFields(
            update->opCtx.get(), *update->matcher->matcher.getQuery(), immutablePaths, mutableDoc));

        uassertStatusOK(update->updateDriver.update(mongo::StringData() /* matchedField */,
                                                    &mutableDoc,
                                                    false /* validateForStorage */,
                                                    immutablePaths,
                                                    true /* isInsert */,
                                                    nullptr /* logOpRec */,
                                                    &docWasModified,
                                                    nullptr /*modifiedPaths*/));

        auto outputObj = mutableDoc.getObject();
        size_t outputSize = static_cast<size_t>(outputObj.objsize());
        auto output = new (std::nothrow) char[outputSize];

        uassert(
            mongo::ErrorCodes::ExceededMemoryLimit, "Failed to allocate memory for upsert", output);

        static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output));
        return mongo::toInterfaceType(output);
    });
}
Пример #4
0
StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
    const Fetcher::Documents& documents, bool first, Timestamp lastTS) {
    if (first && documents.empty()) {
        return Status(ErrorCodes::OplogStartMissing,
                      str::stream() << "The first batch of oplog entries is empty, but expected at "
                                       "least 1 document matching ts: "
                                    << lastTS.toString());
    }

    DocumentsInfo info;
    // The count of the bytes of the documents read off the network.
    info.networkDocumentBytes = 0;
    info.networkDocumentCount = 0;
    for (auto&& doc : documents) {
        info.networkDocumentBytes += doc.objsize();
        ++info.networkDocumentCount;

        // If this is the first response (to the $gte query) then we already applied the first doc.
        if (first && info.networkDocumentCount == 1U) {
            continue;
        }

        // Check to see if the oplog entry goes back in time for this document.
        const auto docOpTime = OpTime::parseFromOplogEntry(doc);
        // entries must have a "ts" field.
        if (!docOpTime.isOK()) {
            return docOpTime.getStatus();
        }

        info.lastDocument = {doc["h"].numberLong(), docOpTime.getValue()};

        const auto docTS = info.lastDocument.opTime.getTimestamp();
        if (lastTS >= docTS) {
            return Status(ErrorCodes::OplogOutOfOrder,
                          str::stream() << "Out of order entries in oplog. lastTS: "
                                        << lastTS.toString()
                                        << " outOfOrderTS:"
                                        << docTS.toString()
                                        << " in batch with "
                                        << info.networkDocumentCount
                                        << "docs; first-batch:"
                                        << first
                                        << ", doc:"
                                        << doc);
        }
        lastTS = docTS;
    }

    // These numbers are for the documents we will apply.
    info.toApplyDocumentCount = documents.size();
    info.toApplyDocumentBytes = info.networkDocumentBytes;
    if (first) {
        // The count is one less since the first document found was already applied ($gte $ts query)
        // and we will not apply it again.
        --info.toApplyDocumentCount;
        auto alreadyAppliedDocument = documents.cbegin();
        info.toApplyDocumentBytes -= alreadyAppliedDocument->objsize();
    }
    return info;
}
Пример #5
0
 string BSONObj::md5() const {
     md5digest d;
     md5_state_t st;
     md5_init(&st);
     md5_append( &st , (const md5_byte_t*)_objdata , objsize() );
     md5_finish(&st, d);
     return digestToString( d );
 }
Пример #6
0
uint8_t* MONGO_API_CALL
stitch_support_v1_update_apply(stitch_support_v1_update* const update,
                               const uint8_t* documentBSON,
                               stitch_support_v1_update_details* update_details,
                               stitch_support_v1_status* status) {
    return enterCXX(mongo::getStatusImpl(status), [&]() {
        mongo::BSONObj document(mongo::fromInterfaceType(documentBSON));
        std::string matchedField;

        if (update->updateDriver.needMatchDetails()) {
            invariant(update->matcher);

            mongo::MatchDetails matchDetails;
            matchDetails.requestElemMatchKey();
            bool isMatch = update->matcher->matcher.matches(document, &matchDetails);
            invariant(isMatch);
            if (matchDetails.hasElemMatchKey()) {
                matchedField = matchDetails.elemMatchKey();
            } else {
                // Empty 'matchedField' indicates that the matcher did not traverse an array.
            }
        }

        mongo::mutablebson::Document mutableDoc(document,
                                                mongo::mutablebson::Document::kInPlaceDisabled);

        mongo::FieldRefSet immutablePaths;  // Empty set
        bool docWasModified = false;

        mongo::FieldRefSetWithStorage modifiedPaths;

        uassertStatusOK(update->updateDriver.update(matchedField,
                                                    &mutableDoc,
                                                    false /* validateForStorage */,
                                                    immutablePaths,
                                                    false /* isInsert */,
                                                    nullptr /* logOpRec*/,
                                                    &docWasModified,
                                                    &modifiedPaths));

        auto outputObj = mutableDoc.getObject();
        size_t outputSize = static_cast<size_t>(outputObj.objsize());
        auto output = new (std::nothrow) char[outputSize];

        uassert(
            mongo::ErrorCodes::ExceededMemoryLimit, "Failed to allocate memory for update", output);

        static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output));

        if (update_details) {
            update_details->modifiedPaths = modifiedPaths.serialize();
        }

        return mongo::toInterfaceType(output);
    });
}
Пример #7
0
Файл: sink.c Проект: Phuehvk/upb
void *upb_pipeline_allocobj(upb_pipeline *p, const upb_frametype *ft) {
  struct obj *obj = upb_pipeline_alloc(p, objsize(ft->size));
  if (!obj) return NULL;

  obj->prev = p->obj_head;
  obj->ft = ft;
  p->obj_head = obj;
  if (ft->init) ft->init(&obj->data, p);
  return &obj->data;
}
Пример #8
0
 void BSONObj::dump() const {
     out() << hex;
     const char *p = objdata();
     for ( int i = 0; i < objsize(); i++ ) {
         out() << i << '\t' << ( 0xff & ( (unsigned) *p ) );
         if ( *p >= 'A' && *p <= 'z' )
             out() << '\t' << *p;
         out() << endl;
         p++;
     }
 }
Пример #9
0
void BSONObj::_assertInvalid() const {
    StringBuilder ss;
    int os = objsize();
    ss << "BSONObj size: " << os << " (0x" << integerToHex( os ) << ") is invalid. "
       << "Size must be between 0 and " << BSONObjMaxInternalSize
       << "(" << ( BSONObjMaxInternalSize/(1024*1024) ) << "MB)";
    try {
        BSONElement e = firstElement();
        ss << " First element: " << e.toString();
    }
    catch ( ... ) { }
    massert( 10334 , ss.str() , 0 );
}
Пример #10
0
 string BSONObj::hexDump() const {
     stringstream ss;
     const char *d = objdata();
     int size = objsize();
     for( int i = 0; i < size; ++i ) {
         ss.width( 2 );
         ss.fill( '0' );
         ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
         if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
             ss << '\'' << d[ i ] << '\'';
         if ( i != size - 1 )
             ss << ' ';
     }
     return ss.str();
 }
Пример #11
0
Файл: gen.c Проект: minux/subc
static void genincptr(int *lv, int inc, int pre) {
	int	y, size;

	size = objsize(deref(lv[LVPRIM]), TVARIABLE, 1);
	gentext();
	y = lv[LVSYM];
	commit();
	if (!y && !pre) cgldinc();
	if (!pre) {
		genrval(lv);
		commit();
	}
	if (!y) {
		if (pre)
			if (inc)
				cginc1pi(size);
			else
				cgdec1pi(size);
		else
			if (inc)
				cginc2pi(size);
			else
				cgdec2pi(size);
	}
	else if (CAUTO == Stcls[y]) {
		if (inc)
			cgincpl(Vals[y], size);
		else
			cgdecpl(Vals[y], size);
	}
	else if (CLSTATC == Stcls[y]) {
		if (inc)
			cgincps(Vals[y], size);
		else
			cgdecps(Vals[y], size);
	}
	else {
		if (inc)
			cgincpg(gsym(Names[y]), size);
		else
			cgdecpg(gsym(Names[y]), size);
	}
	if (pre) genrval(lv);
}
Пример #12
0
uint8_t* MONGO_API_CALL
stitch_support_v1_projection_apply(stitch_support_v1_projection* const projection,
                                   const uint8_t* documentBSON,
                                   stitch_support_v1_status* status) {
    return enterCXX(mongo::getStatusImpl(status), [&]() {
        mongo::BSONObj document(mongo::fromInterfaceType(documentBSON));

        auto outputResult = projection->projectionExec.project(document);
        auto outputObj = uassertStatusOK(outputResult);
        auto outputSize = static_cast<size_t>(outputObj.objsize());
        auto output = new (std::nothrow) char[outputSize];

        uassert(mongo::ErrorCodes::ExceededMemoryLimit,
                "Failed to allocate memory for projection",
                output);

        static_cast<void>(std::copy_n(outputObj.objdata(), outputSize, output));
        return mongo::toInterfaceType(output);
    });
}
HeapPtr par_evacuate(HeapPtr heapcell, deque *deq, int me) {
 switch(heapcell[0])
 {
  case SIMPLE_OBJECT:
   SimpleObject *sobj = (SimpleObject *)par_allocToSpace(vmsizeof(SimpleObject), heapcell, me);
   if(!sobj) return (HeapPtr)get_forward(heapcell);
   *sobj = *((SimpleObject *)heapcell);
   return sobj;

  case POINTER_OBJECT:
   PointerObject pobj = (PointerObject *)par_allocToSpace(objsize(heapcell), heapcell, me);
   if(!pobj) return (HeapPtr)get_forward(heapcell);
   *pobj = *((PointerObject *)heapcell);

   pobj->pointers = pobj + vmsizeof(PointerObject);
   for(int i = 0; i < pobj->sizePoint; i++)
   {
    pobj->pointers[i] = ((PointerObject *)heapcell)->pointers[i];
    deque_push_bottom(deq, &pobj->pointers[i]);
   }
   return pobj;
 }
}
Пример #14
0
TEST(KVEngineTestHarness, AllCommittedTimestamp) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();
    if (!engine->supportsDocLocking())
        return;

    unique_ptr<RecordStore> rs;
    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        CollectionOptions options;
        options.capped = true;
        options.cappedSize = 10240;
        options.cappedMaxDocs = -1;

        NamespaceString oplogNss("local.oplog.rs");
        ASSERT_OK(engine->createRecordStore(&opCtx, oplogNss.ns(), "ident", options));
        rs = engine->getRecordStore(&opCtx, oplogNss.ns(), "ident", options);
        ASSERT(rs);
    }
    {
        Timestamp t11(1, 1);
        Timestamp t12(1, 2);
        Timestamp t21(2, 1);

        auto t11Doc = BSON("ts" << t11);
        auto t12Doc = BSON("ts" << t12);
        auto t21Doc = BSON("ts" << t21);

        Timestamp allCommitted = engine->getAllCommittedTimestamp();
        MyOperationContext opCtx1(engine);
        WriteUnitOfWork uow1(&opCtx1);
        ASSERT_EQ(invariant(rs->insertRecord(
                      &opCtx1, t11Doc.objdata(), t11Doc.objsize(), Timestamp::min())),
                  RecordId(1, 1));

        Timestamp lastAllCommitted = allCommitted;
        allCommitted = engine->getAllCommittedTimestamp();
        ASSERT_GTE(allCommitted, lastAllCommitted);
        ASSERT_LT(allCommitted, t11);

        MyOperationContext opCtx2(engine);
        WriteUnitOfWork uow2(&opCtx2);
        ASSERT_EQ(invariant(rs->insertRecord(
                      &opCtx2, t21Doc.objdata(), t21Doc.objsize(), Timestamp::min())),
                  RecordId(2, 1));
        uow2.commit();

        lastAllCommitted = allCommitted;
        allCommitted = engine->getAllCommittedTimestamp();
        ASSERT_GTE(allCommitted, lastAllCommitted);
        ASSERT_LT(allCommitted, t11);

        ASSERT_EQ(invariant(rs->insertRecord(
                      &opCtx1, t12Doc.objdata(), t12Doc.objsize(), Timestamp::min())),
                  RecordId(1, 2));

        lastAllCommitted = allCommitted;
        allCommitted = engine->getAllCommittedTimestamp();
        ASSERT_GTE(allCommitted, lastAllCommitted);
        ASSERT_LT(allCommitted, t11);

        uow1.commit();

        lastAllCommitted = allCommitted;
        allCommitted = engine->getAllCommittedTimestamp();
        ASSERT_GTE(allCommitted, lastAllCommitted);
        ASSERT_LTE(allCommitted, t21);
    }
}
Пример #15
0
static void genincptr(int *lv, int inc, int pre)
{

    int y;
    int size = objsize(deref(lv[LVPRIM]), TVARIABLE, 1);

    gentext();

    y = lv[LVSYM];

    commit();

    if (!y && !pre)
        cgldinc();

    if (!pre)
    {

        rvalue(lv);
        commit();

    }

    if (!y)
    {

        if (pre)
        {

            if (inc)
                cginc1pi(size);
            else
                cgdec1pi(size);

        }

        else
        {

            if (inc)
                cginc2pi(size);
            else
                cgdec2pi(size);

        }

    }

    else if (CAUTO == symbols[y].stcl)
    {

        if (inc)
            cgincpl(symbols[y].value, size);
        else
            cgdecpl(symbols[y].value, size);

    }

    else if (CLSTATC == symbols[y].stcl)
    {

        if (inc)
            cgincps(symbols[y].value, size);
        else
            cgdecps(symbols[y].value, size);

    }

    else
    {

        if (inc)
            cgincpg(gsym(symbols[y].name), size);
        else
            cgdecpg(gsym(symbols[y].name), size);

    }

    if (pre)
        rvalue(lv);

}
Пример #16
0
    int BSONElement::size( int maxLen ) const {
        if ( totalSize >= 0 )
            return totalSize;

        int remain = maxLen - fieldNameSize() - 1;
        
        int x = 0;
        switch ( type() ) {
        case EOO:
        case Undefined:
        case jstNULL:
        case MaxKey:
        case MinKey:
            break;
        case Bool:
            x = 1;
            break;
        case NumberInt:
            x = 4;
            break;
        case Timestamp:
        case Date:
        case NumberDouble:
            x = 8;
            break;
        case jstOID:
            x = 12;
            break;
        case Symbol:
        case Code:
        case String:
            massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
            x = valuestrsize() + 4;
            break;
        case CodeWScope:
            massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
            x = objsize();
            break;

        case DBRef:
            massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
            x = valuestrsize() + 4 + 12;
            break;
        case Object:
        case Array:
            massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
            x = objsize();
            break;
        case BinData:
            massert( "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
            x = valuestrsize() + 4 + 1/*subtype*/;
            break;
        case RegEx:
        {
            const char *p = value();
            int len1 = ( maxLen == -1 ) ? strlen( p ) : strnlen( p, remain );
            massert( "Invalid regex string", len1 != -1 );
            p = p + len1 + 1;
            int len2 = ( maxLen == -1 ) ? strlen( p ) : strnlen( p, remain - len1 - 1 );
            massert( "Invalid regex options string", len2 != -1 );
            x = len1 + 1 + len2 + 1;
        }
        break;
        default: {
            stringstream ss;
            ss << "BSONElement: bad type " << (int) type();
            massert(ss.str().c_str(),false);
        }
        }
        totalSize =  x + fieldNameSize() + 1; // BSONType

        return totalSize;
    }
Пример #17
0
void BackgroundSync::_fetcherCallback(const StatusWith<Fetcher::QueryResponse>& result,
                                      BSONObjBuilder* bob,
                                      const HostAndPort& source,
                                      OpTime lastOpTimeFetched,
                                      long long lastFetchedHash,
                                      Milliseconds fetcherMaxTimeMS,
                                      Status* returnStatus) {
    // if target cut connections between connecting and querying (for
    // example, because it stepped down) we might not have a cursor
    if (!result.isOK()) {
        LOG(2) << "Error returned from oplog query: " << result.getStatus();
        *returnStatus = result.getStatus();
        return;
    }

    if (inShutdown()) {
        LOG(2) << "Interrupted by shutdown while querying oplog. 1";  // 1st instance.
        return;
    }

    // Check if we have been stopped.
    if (isStopped()) {
        LOG(2) << "Interrupted by stop request while querying the oplog. 1";  // 1st instance.
        return;
    }

    const auto& queryResponse = result.getValue();
    bool syncSourceHasSyncSource = false;
    OpTime sourcesLastOpTime;

    // Forward metadata (containing liveness information) to replication coordinator.
    bool receivedMetadata =
        queryResponse.otherFields.metadata.hasElement(rpc::kReplSetMetadataFieldName);
    if (receivedMetadata) {
        auto metadataResult =
            rpc::ReplSetMetadata::readFromMetadata(queryResponse.otherFields.metadata);
        if (!metadataResult.isOK()) {
            error() << "invalid replication metadata from sync source " << source << ": "
                    << metadataResult.getStatus() << ": " << queryResponse.otherFields.metadata;
            return;
        }
        const auto& metadata = metadataResult.getValue();
        _replCoord->processReplSetMetadata(metadata);
        if (metadata.getPrimaryIndex() != rpc::ReplSetMetadata::kNoPrimary) {
            _replCoord->cancelAndRescheduleElectionTimeout();
        }
        syncSourceHasSyncSource = metadata.getSyncSourceIndex() != -1;
        sourcesLastOpTime = metadata.getLastOpVisible();
    }

    const auto& documents = queryResponse.documents;
    auto firstDocToApply = documents.cbegin();
    auto lastDocToApply = documents.cend();

    if (!documents.empty()) {
        LOG(2) << "fetcher read " << documents.size()
               << " operations from remote oplog starting at " << documents.front()["ts"]
               << " and ending at " << documents.back()["ts"];
    } else {
        LOG(2) << "fetcher read 0 operations from remote oplog";
    }

    // Check start of remote oplog and, if necessary, stop fetcher to execute rollback.
    if (queryResponse.first) {
        auto getNextOperation = [&firstDocToApply, lastDocToApply]() -> StatusWith<BSONObj> {
            if (firstDocToApply == lastDocToApply) {
                return Status(ErrorCodes::OplogStartMissing, "remote oplog start missing");
            }
            return *(firstDocToApply++);
        };

        *returnStatus = checkRemoteOplogStart(getNextOperation, lastOpTimeFetched, lastFetchedHash);
        if (!returnStatus->isOK()) {
            // Stop fetcher and execute rollback.
            return;
        }

        // If this is the first batch and no rollback is needed, we should have advanced
        // the document iterator.
        invariant(firstDocToApply != documents.cbegin());
    }

    // No work to do if we are draining/primary.
    if (_replCoord->isWaitingForApplierToDrain() || _replCoord->getMemberState().primary()) {
        LOG(2) << "Interrupted by waiting for applier to drain "
               << "or becoming primary while querying the oplog. 1";  // 1st instance.
        return;
    }

    // The count of the bytes of the documents read off the network.
    int networkDocumentBytes = 0;
    Timestamp lastTS;
    {
        stdx::unique_lock<stdx::mutex> lock(_mutex);
        // If we are stopped then return without queueing this batch to apply.
        if (_stopped) {
            LOG(2) << "Interrupted by stop request while querying the oplog. 2";  // 2nd instance.
            return;
        }
        lastTS = _lastOpTimeFetched.getTimestamp();
    }
    int count = 0;
    for (auto&& doc : documents) {
        networkDocumentBytes += doc.objsize();
        ++count;

        // If this is the first response (to the $gte query) then we already applied the first doc.
        if (queryResponse.first && count == 1) {
            continue;
        }

        // Check to see if the oplog entry goes back in time for this document.
        const auto docOpTime = OpTime::parseFromOplogEntry(doc);
        fassertStatusOK(34362, docOpTime.getStatus());  // entries must have a "ts" field.
        const auto docTS = docOpTime.getValue().getTimestamp();

        if (lastTS >= docTS) {
            *returnStatus = Status(
                ErrorCodes::OplogOutOfOrder,
                str::stream() << "Reading the oplog from" << source.toString()
                              << " returned out of order entries. lastTS: " << lastTS.toString()
                              << " outOfOrderTS:" << docTS.toString() << " at count:" << count);
            return;
        }
        lastTS = docTS;
    }

    // These numbers are for the documents we will apply.
    auto toApplyDocumentCount = documents.size();
    auto toApplyDocumentBytes = networkDocumentBytes;
    if (queryResponse.first) {
        // The count is one less since the first document found was already applied ($gte $ts query)
        // and we will not apply it again. We just needed to check it so we didn't rollback, or
        // error above.
        --toApplyDocumentCount;
        const auto alreadyAppliedDocument = documents.cbegin();
        toApplyDocumentBytes -= alreadyAppliedDocument->objsize();
    }

    if (toApplyDocumentBytes > 0) {
        // Wait for enough space.
        _buffer.waitForSpace(toApplyDocumentBytes);

        OCCASIONALLY {
            LOG(2) << "bgsync buffer has " << _buffer.size() << " bytes";
        }

        // Buffer docs for later application.
        std::vector<BSONObj> objs{firstDocToApply, lastDocToApply};
        _buffer.pushAllNonBlocking(objs);

        // Inc stats.
        opsReadStats.increment(documents.size());  // we read all of the docs in the query.
        networkByteStats.increment(networkDocumentBytes);
        bufferCountGauge.increment(toApplyDocumentCount);
        bufferSizeGauge.increment(toApplyDocumentBytes);

        // Update last fetched info.
        auto lastDoc = objs.back();
        {
            stdx::unique_lock<stdx::mutex> lock(_mutex);
            _lastFetchedHash = lastDoc["h"].numberLong();
            _lastOpTimeFetched = fassertStatusOK(28770, OpTime::parseFromOplogEntry(lastDoc));
            LOG(3) << "batch resetting _lastOpTimeFetched: " << _lastOpTimeFetched;
        }
    }
Пример #18
0
BSONObj BSONObj::copy() const {
    char* storage = static_cast<char*>(mongoMalloc(sizeof(Holder) + objsize()));
    memcpy(storage + sizeof(Holder), objdata(), objsize());
    return BSONObj::takeOwnership(storage);
}
Пример #19
0
bool BSONObj::valid() const {
    return validateBSON( objdata(), objsize() ).isOK();
}
Пример #20
0
BSONObj BSONObj::copy() const {
    auto storage = SharedBuffer::allocate(objsize());
    memcpy(storage.get(), objdata(), objsize());
    return BSONObj(std::move(storage));
}