Exemplo n.º 1
0
    void DurRecoveryUnit::rollbackInnermostChanges() {
        // TODO SERVER-15043 reduce logging at default verbosity after a burn-in period
        invariant(_changes.size() <= size_t(std::numeric_limits<int>::max()));
        const int rollbackTo = _startOfUncommittedChangesForLevel.back();
        log() << "   ***** ROLLING BACK " << (_changes.size() - rollbackTo) << " changes";
        for (int i = _changes.size() - 1; i >= rollbackTo; i--) {
            const type_info& type = typeid(*_changes[i]);
            if (type != typeid(MemoryWrite)) {
                log() << "CUSTOM ROLLBACK " << demangleName(type);
            }

            _changes[i]->rollback();
        }
        _changes.erase(_changes.begin() + rollbackTo, _changes.end());

        if (inOutermostUnitOfWork()) {
            // We just rolled back so we are now "clean" and don't need to roll back anymore.
            invariant(_changes.empty());
            _mustRollback = false;
        }
        else {
            // Inner UOW rolled back, so outer must not commit. We can loosen this in the future,
            // but that would require all StorageEngines to support rollback of nested transactions.
            _mustRollback = true;
        }
    }
Exemplo n.º 2
0
    void DurRecoveryUnit::publishChanges() {
        if (!inAUnitOfWork())
            return;

        invariant(!_mustRollback);
        invariant(inOutermostUnitOfWork());

        for (Changes::iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
            (*it)->commit();
        }

        // We now reset to a "clean" state without any uncommited changes.
        _changes.clear();
        invariant(_startOfUncommittedChangesForLevel.front() == 0);
    }
Exemplo n.º 3
0
    void DurRecoveryUnit::commitUnitOfWork() {
        invariant(inAUnitOfWork());
        invariant(!_mustRollback);

        if (!inOutermostUnitOfWork()) {
            // If we are nested, make all changes for this level part of the containing UnitOfWork.
            // They will be added to the global damages list once the outermost UnitOfWork commits,
            // which it must now do.
            if (haveUncommitedChangesAtCurrentLevel()) {
                _startOfUncommittedChangesForLevel.back() = _changes.size();
            }
            return;
        }

        publishChanges();

        // global journal flush opportunity
        getDur().commitIfNeeded(_txn);
    }
Exemplo n.º 4
0
    void DurRecoveryUnit::commitChanges() {
        if (!inAUnitOfWork())
            return;

        invariant(!_mustRollback);
        invariant(inOutermostUnitOfWork());
        invariant(_startOfUncommittedChangesForLevel.front().changeIndex == 0);
        invariant(_startOfUncommittedChangesForLevel.front().writeIndex == 0);

        if (getDur().isDurable())
            pushChangesToDurSubSystem();

        for (Changes::const_iterator it = _changes.begin(), end = _changes.end(); it != end; ++it) {
            (*it)->commit();
        }

        // We now reset to a "clean" state without any uncommited changes.
        _changes.clear();
        _writes.clear();
        _preimageBuffer.clear();
    }
Exemplo n.º 5
0
    void DurRecoveryUnit::rollbackInnermostChanges() {
        // Using signed ints to avoid issues in loops below around index 0.
        invariant(_changes.size() <= size_t(std::numeric_limits<int>::max()));
        invariant(_writes.size() <= size_t(std::numeric_limits<int>::max()));
        const int changesRollbackTo = _startOfUncommittedChangesForLevel.back().changeIndex;
        const int writesRollbackTo = _startOfUncommittedChangesForLevel.back().writeIndex;

        LOG(2) << "   ***** ROLLING BACK " << (_writes.size() - writesRollbackTo) << " disk writes"
               << " and " << (_changes.size() - changesRollbackTo) << " custom changes";

        // First rollback disk writes, then Changes. This matches behavior in other storage engines
        // that either rollback a transaction or don't write a writebatch.

        for (int i = _writes.size() - 1; i >= writesRollbackTo; i--) {
            // TODO need to add these pages to our "dirty count" somehow.
            _preimageBuffer.copy(_writes[i].addr, _writes[i].len, _writes[i].offset);
        }

        for (int i = _changes.size() - 1; i >= changesRollbackTo; i--) {
            LOG(2) << "CUSTOM ROLLBACK " << demangleName(typeid(*_changes[i]));
            _changes[i]->rollback();
        }

        _writes.erase(_writes.begin() + writesRollbackTo, _writes.end());
        _changes.erase(_changes.begin() + changesRollbackTo, _changes.end());

        if (inOutermostUnitOfWork()) {
            // We just rolled back so we are now "clean" and don't need to roll back anymore.
            invariant(_changes.empty());
            invariant(_writes.empty());
            _preimageBuffer.clear();
            _mustRollback = false;
        }
        else {
            // Inner UOW rolled back, so outer must not commit. We can loosen this in the future,
            // but that would require all StorageEngines to support rollback of nested transactions.
            _mustRollback = true;
        }
    }