UT_sint32 ABI_Collab_Import::_getIncomingAdjustmentForState(const UT_GenericVector<ChangeAdjust *>* pExpAdjusts, UT_sint32 iStart, UT_sint32 iEnd, UT_sint32 iIncomingPos, UT_sint32 iIncomingLength, const UT_UTF8String& sIncomingUUID, std::deque<int>& incAdjs)
{
	UT_DEBUGMSG(("ABI_Collab_Import::_getIncomingAdjustmentForState()\n"));
	UT_return_val_if_fail(pExpAdjusts, 0);

	UT_sint32 iAdjust = 0;
	for (UT_sint32 j = iEnd-1; j>=iStart; j--)
	{
		ChangeAdjust* pPrev = pExpAdjusts->getNthItem(j);
		if (sIncomingUUID == pPrev->getRemoteDocUUID())
		{
			UT_DEBUGMSG(("Looking at possible adjustment with queue pos: %d, -adjust: %d\n", j, -pPrev->getLocalAdjust()));

			if (static_cast<UT_sint32>(pPrev->getRemoteDocPos()) < iIncomingPos+iAdjust)
			{
				if (pPrev->getLocalAdjust() > 0)
				{
					if (_isOverlapping(pPrev->getRemoteDocPos(), pPrev->getLocalLength(), iIncomingPos+iAdjust, iIncomingLength))
					{
						// NOTE: if the position was in the middle of an insert done previously, 
						// then we only need to take the insertion adjust partially into account
						UT_DEBUGMSG(("ADJUST OVERLAP DETECTED with queue pos: %d, pPrev->getRemoteDocPos(): %d, pPrev->m_iLength: %d, iIncomingPos: %d, iAdjust: %d\n", 
									j, pPrev->getRemoteDocPos(), pPrev->getLocalLength(), iIncomingPos, iAdjust));
						iAdjust -= (iIncomingPos+iAdjust - pPrev->getRemoteDocPos());
						incAdjs.push_front(iIncomingPos+iAdjust - pPrev->getRemoteDocPos());
					}
					else
					{
						UT_DEBUGMSG(("ADJUSTMENT influenced normally by queue pos: %d\n", j));
						iAdjust -= pPrev->getLocalAdjust();
						incAdjs.push_front(pPrev->getLocalAdjust());
					}
				}
				else if (pPrev->getLocalAdjust() < 0)
				{
					// TODO: is the < 0 case correctly handled like this?
					UT_DEBUGMSG(("ADJUSTMENT influence by delete by queue pos: %d, pPrev->m_iProgDocPos: %d, pPrev->getRemoteDocPos(): %d\n", j, pPrev->getRemoteDocPos(), pPrev->getRemoteDocPos()));
					iAdjust -= pPrev->getLocalAdjust();
					incAdjs.push_front(pPrev->getLocalAdjust());		
				}
				else
				{
					UT_DEBUGMSG(("ADJUSTMENT influence of 0 by queue pos: %d, pPrev->m_iProgDocPos: %d, pPrev->getRemoteDocPos(): %d\n", j, pPrev->getRemoteDocPos(), pPrev->getRemoteDocPos()));
					incAdjs.push_front(0);
				}
			}
			else if (static_cast<UT_sint32>(pPrev->getRemoteDocPos()) > iIncomingPos+iAdjust)
			{
				UT_DEBUGMSG(("no ADJUSTMENT influence (insertion point smaller than checkpoint) by queue pos: %d, pPrev->m_iProgDocPos: %d, pPrev->getRemoteDocPos(): %d\n", j, pPrev->getRemoteDocPos(), pPrev->getRemoteDocPos()));
				incAdjs.push_front(0);
			}
			else
			{
				UT_DEBUGMSG(("no ADJUSTMENT influence (insertion point equals checkpoint) by queue pos: %d, pPrev->m_iProgDocPos: %d, pPrev->getRemoteDocPos(): %d\n", j, pPrev->getRemoteDocPos(), pPrev->getRemoteDocPos()));
				incAdjs.push_front(0);
			}
		}
	}
	return iAdjust;
}
Esempio n. 2
0
int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
                                                    const std::vector<ChunkType>& chunks) {
    _assertAttached();

    // Apply the chunk changes to the ranges and versions
    //
    // Overall idea here is to work in two steps :
    // 1. For all the new chunks we find, increment the maximum version per-shard and
    //      per-collection, and remove any conflicting chunks from the ranges.
    // 2. For all the new chunks we're interested in (all of them for mongos, just chunks on
    //      the shard for mongod) add them to the ranges.

    std::vector<ChunkType> newTracked;

    // Store epoch now so it doesn't change when we change max
    OID currEpoch = _maxVersion->epoch();

    _validDiffs = 0;

    for (const ChunkType& chunk : chunks) {
        const ChunkVersion& chunkVersion = chunk.getVersion();

        if (!chunkVersion.hasEqualEpoch(currEpoch)) {
            warning() << "got invalid chunk version " << chunkVersion << " in document "
                      << redact(chunk.toString())
                      << " when trying to load differing chunks at version "
                      << ChunkVersion(
                             _maxVersion->majorVersion(), _maxVersion->minorVersion(), currEpoch);

            // Don't keep loading, since we know we'll be broken here
            return -1;
        }

        _validDiffs++;

        // Get max changed version and chunk version
        if (chunkVersion > *_maxVersion) {
            *_maxVersion = chunkVersion;
        }

        // Chunk version changes
        ShardId shard = shardFor(txn, chunk.getShard());

        typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard);
        if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) {
            (*_maxShardVersions)[shard] = chunkVersion;
        }

        // See if we need to remove any chunks we are currently tracking because of this chunk's
        // changes
        {
            RangeOverlap overlap = _overlappingRange(chunk.getMin(), chunk.getMax());
            _currMap->erase(overlap.first, overlap.second);
        }

        // Figure out which of the new chunks we need to track
        // Important - we need to actually own this doc, in case the cursor decides to getMore
        // or unbuffer.
        if (isTracked(chunk)) {
            newTracked.push_back(chunk);
        }
    }

    LOG(3) << "found " << _validDiffs << " new chunks for collection " << _ns << " (tracking "
           << newTracked.size() << "), new version is " << *_maxVersion;

    for (const ChunkType& chunk : newTracked) {
        // Invariant enforced by sharding - it's possible to read inconsistent state due to
        // getMore and yielding, so we want to detect it as early as possible.
        //
        // TODO: This checks for overlap, we also should check for holes here iff we're
        // tracking all chunks.
        if (_isOverlapping(chunk.getMin(), chunk.getMax())) {
            return -1;
        }

        _currMap->insert(rangeFor(txn, chunk));
    }

    return _validDiffs;
}
/*!
 * Scan back through the CR's we've emitted since this remote CR was sent
 * and see if any overlap this one.
 * return true if there is a collision.
 */
bool ABI_Collab_Import::_checkForCollision(const AbstractChangeRecordSessionPacket& acrsp, UT_sint32& iRev, UT_sint32& iImportAdjustment)
{
	UT_DEBUGMSG(("ABI_Collab_Import::_checkForCollision() - pos: %d, length: %d, UUID: %s, remoterev: %d\n", 
				 acrsp.getPos(), acrsp.getLength(), acrsp.getDocUUID().utf8_str(), acrsp.getRemoteRev()));

	ABI_Collab_Export* pExport = m_pAbiCollab->getExport();
	UT_return_val_if_fail(pExport, false);

	const UT_GenericVector<ChangeAdjust *>* pExpAdjusts = pExport->getAdjusts();
	UT_return_val_if_fail(pExpAdjusts, false);

	iImportAdjustment = 0;

	// get the collision sequence (if any)
	UT_sint32 iStart = 0;
	UT_sint32 iEnd = 0;
	_calculateCollisionSeqence(acrsp.getRemoteRev(), acrsp.getDocUUID(), iStart, iEnd);
	UT_return_val_if_fail(iStart >= 0 && iEnd >= 0, false);
	if (iStart == iEnd)
	{
		UT_DEBUGMSG(("Empty collision sequence, no possible collision\n"));
		return false;
	}

	std::deque<int> incAdjs;
	UT_sint32 iIncomingStateAdjust = _getIncomingAdjustmentForState(pExpAdjusts, iStart, iEnd, acrsp.getPos(), acrsp.getLength(), acrsp.getDocUUID(), incAdjs);
	UT_DEBUGMSG(("IINCOMMINGSTATEADJUST: %d\n", iIncomingStateAdjust));

	// Now scan forward and look for an overlap of the new changerecord with the collision sequence
	UT_DEBUGMSG(("Checking collision sequence [%d..%d) for overlapping changerecords\n", iStart, iEnd));
	bool bDenied = false;
	for (UT_sint32 i = iStart; i < iEnd; i++)
	{
		ChangeAdjust* pChange = pExpAdjusts->getNthItem(i);
		if (pChange)
		{
			UT_DEBUGMSG(("Looking at pChange->getRemoteDocUUID(): %s\n", pChange->getRemoteDocUUID().utf8_str()));

			if (pChange->getRemoteDocUUID() != acrsp.getDocUUID())
			{
				if (_isOverlapping(acrsp.getPos()+iIncomingStateAdjust, acrsp.getLength(), pChange->getLocalPos(), pChange->getLocalLength()) &&
					!AbiCollab_ImportRuleSet::isOverlapAllowed(*pChange, acrsp, iIncomingStateAdjust))
				{
					UT_DEBUGMSG(("Fatal overlap detected for incoming pos: %d, incoming length: %d, pChange->getLocalPos(): %d, pChange->getLocalLength(): %d\n", 
							acrsp.getPos(), acrsp.getLength(), pChange->getLocalPos(), pChange->getLocalLength()));
					iRev = pChange->getLocalRev();
					bDenied = true;
					break;
				}
				else
                {
					UT_DEBUGMSG(("No (fatal) overlap detected for incoming pos: %d, incoming length: %d, pChange->getLocalPos(): %d, pChange->getLocalLength(): %d\n", 
							acrsp.getPos(), acrsp.getLength(), pChange->getLocalPos(), pChange->getLocalLength()));
                }
				
				if (pChange->getLocalPos() < acrsp.getPos()+iIncomingStateAdjust)
				{
					UT_DEBUGMSG(("Normal Upward influence detected\n"));
					iIncomingStateAdjust += pChange->getLocalAdjust();
				}
			}
			else
			{
				UT_DEBUGMSG(("Skipping overlap detection: changerecords came from the same document; incoming pos: %d, incoming length: %d, pChange->getLocalPos(): %d, pChange->getLocalLength(): %d\n", 
							acrsp.getPos(), acrsp.getLength(), pChange->getLocalPos(), pChange->getLocalLength()));
				if (!incAdjs.empty())
				{
					iIncomingStateAdjust += incAdjs.front();
					incAdjs.pop_front();
				}
				else
				{
					UT_ASSERT_HARMLESS(UT_SHOULD_NOT_HAPPEN);
				}
			}

			UT_DEBUGMSG(("Now: iIncomingStateAdjust: %d\n", iIncomingStateAdjust));
		}
		else
			UT_return_val_if_fail(false, false);
	}

	if (!bDenied && !incAdjs.empty())
	{
		UT_ASSERT_HARMLESS(UT_SHOULD_NOT_HAPPEN);
	}

	while (!incAdjs.empty())
	{
		UT_DEBUGMSG(("Adding left-over incoming adjustment: %d\n", incAdjs.front()));
		iIncomingStateAdjust += incAdjs.front();
		incAdjs.pop_front();
	}

	iImportAdjustment = iIncomingStateAdjust;
	UT_DEBUGMSG(("Full import adjustment: %d\n", iImportAdjustment));

	return bDenied;
}