std::vector<DomainAndId2D> getNonOverlapingBlocks(std::vector<DomainAndId2D> const& domainsWithId) {
    std::vector<DomainAndId2D> nonOverlapingBlocks;
    // Start with the first domain, which is taken without modification.
    if (!domainsWithId.empty()) {
        nonOverlapingBlocks.push_back(domainsWithId[0]);
    }
    // All subsequent domains get special treatment, as their overlap
    //   with previously adopted domains are cut out.
    for (pluint iDomain=1; iDomain<domainsWithId.size(); ++iDomain) {
        std::vector<Box2D> newDomains;
        newDomains.push_back(domainsWithId[iDomain].domain);
        for (pluint iPrevious=0; iPrevious<iDomain; ++iPrevious) {
            std::vector<Box2D> exceptedDomains;
            for (pluint iNewPart=0; iNewPart<newDomains.size(); ++iNewPart) {
                except(newDomains[iNewPart], domainsWithId[iPrevious].domain, exceptedDomains);
            }
            newDomains.swap(exceptedDomains);
        }
        for (pluint iNew=0; iNew<newDomains.size(); ++iNew) {
            nonOverlapingBlocks.push_back(DomainAndId2D( newDomains[iNew],
                                                         domainsWithId[iDomain].id) );
        }
    }
    return nonOverlapingBlocks;
}
void MultiProcessing2D<OriginalGenerator,MutableGenerator>::subdivideGenerator()
{
    // To start with, determine which multi-blocks are read and which are written
    std::vector<bool> isWritten(multiBlocks.size());
    generator.getModificationPattern(isWritten);
    PLB_ASSERT( isWritten.size() == multiBlocks.size() );

    // The reference block (the one for which the envelope is included if
    //   the domain generator.appliesTo() include the envelope) is either the
    //   multi-block which is written, or the first multi-block if all are read-only.
    pluint referenceBlock = 0;
    for (pluint iBlock=0; iBlock<isWritten.size(); ++iBlock) {
        if (isWritten[iBlock]) {
            referenceBlock = iBlock;
            break;
        }
    }

    // In debug mode, make sure that a most one multi-block is written when envelope is included.
#ifdef PLB_DEBUG
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        plint numWritten = 0;
        for (pluint iBlock=0; iBlock<isWritten.size(); ++iBlock) {
            if (isWritten[iBlock]) {
                ++numWritten;
            }
        }
        PLB_ASSERT( numWritten <= 1 );
    }
#endif

    // The first step is to access the domains of the the atomic blocks, as well
    //   as their IDs in each of the coupled multi blocks. The domain corresponds
    //   to the bulk and/or to the envelope, depending on the value of generator.appliesTo().
    std::vector<std::vector<DomainAndId2D> > domainsWithId(multiBlocks.size());
    for (pluint iMulti=0; iMulti<multiBlocks.size(); ++iMulti) {
        std::vector<plint> const& blocks
            = multiBlocks[iMulti]->getMultiBlockManagement().getLocalInfo().getBlocks();
        for (pluint iBlock=0; iBlock<blocks.size(); ++iBlock) {
            plint blockId = blocks[iBlock];
            SmartBulk2D bulk(multiBlocks[iMulti]->getMultiBlockManagement(), blockId);
            switch (generator.appliesTo()) {
            case BlockDomain::bulk:
                domainsWithId[iMulti].push_back(DomainAndId2D(bulk.getBulk(),blockId));
                break;
            case BlockDomain::bulkAndEnvelope:
                // It's only the reference block that should have the envelope. However, we start
                //   by assigning bulk and envelope to all of them, and eliminate overlapping
                //   envelope components further down.
                domainsWithId[iMulti].push_back(DomainAndId2D(bulk.computeEnvelope(),blockId));
                break;
            case BlockDomain::envelope:
                // For the reference block, we restrict ourselves to the envelope, because
                //   that's the desired domain of application.
                if (iMulti==referenceBlock) {
                    std::vector<Box2D> envelopeOnly;
                    except(bulk.computeEnvelope(), bulk.getBulk(), envelopeOnly);
                    for (pluint iEnvelope=0; iEnvelope<envelopeOnly.size(); ++iEnvelope) {
                        domainsWithId[iMulti].push_back(DomainAndId2D(envelopeOnly[iEnvelope], blockId));
                    }
                }
                // For the other blocks, we need to take bulk and envelope, because all these domains
                //   potentially intersect with the envelope of the reference block.
                else {
                    domainsWithId[iMulti].push_back(DomainAndId2D(bulk.computeEnvelope(),blockId));
                }
                break;
            }
        }
    }

    // If the multi-blocks are not at the same level of grid refinement, the level
    //   of the first block is taken as reference, and the coordinates of the other
    //   blocks are rescaled accordingly.
    plint firstLevel = multiBlocks[0]->getMultiBlockManagement().getRefinementLevel();
    for (pluint iMulti=1; iMulti<multiBlocks.size(); ++iMulti) {
        plint relativeLevel = firstLevel -
                              multiBlocks[iMulti]->getMultiBlockManagement().getRefinementLevel();
        if (relativeLevel != 0) {
            for (pluint iBlock=0; iBlock<domainsWithId[iMulti].size(); ++iBlock) {
                domainsWithId[iMulti][iBlock].domain =
                    global::getDefaultMultiScaleManager().scaleBox (
                        domainsWithId[iMulti][iBlock].domain, relativeLevel );
            }
        }
    }

    // If the envelopes are included as well, it is assumed that at most one of
    //   the multi blocks has write-access. All others (those that have read-only
    //   access) need to be non-overlaping, to avoid multiple writes on the cells
    //   of the write-access-multi-block. Thus, overlaps are now eliminitated in
    //   the read-access-multi-blocks.
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        for (pluint iMulti=0; iMulti<multiBlocks.size(); ++iMulti) {
            if (!isWritten[iMulti]) {
                std::vector<DomainAndId2D> nonOverlapBlocks(getNonOverlapingBlocks(domainsWithId[iMulti]));
                domainsWithId[iMulti].swap(nonOverlapBlocks);
            }
        }
    }

    // This is the heart of the whole procedure: intersecting atomic blocks
    //   between all coupled multi blocks are identified.
    std::vector<Box2D> finalDomains;
    std::vector<std::vector<plint> > finalIds;
    intersectDomainsAndIds(domainsWithId, finalDomains, finalIds);

    // And, to end with, re-create processor generators adapted to the
    //   computed domains of intersection.
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        // In case the envelope is included, periodicity must be explicitly treated.
        //   Indeed, the user indicates the domain of applicability with respect to
        //   bulk nodes only. The generator is therefore shifted in all space directions
        //   to englobe periodic boundary nodes as well.
        plint shiftX = firstMultiBlock->getNx();
        plint shiftY = firstMultiBlock->getNy();
        PeriodicitySwitch2D const& periodicity = firstMultiBlock->periodicity();
        for (plint orientX=-1; orientX<=+1; ++orientX) {
            for (plint orientY=-1; orientY<=+1; ++orientY) {
                if (periodicity.get(orientX,orientY)) {
                    extractGeneratorOnBlocks( finalDomains, finalIds,
                                              orientX*shiftX, orientY*shiftY );
                }
            }
        }
    }
    else {
        extractGeneratorOnBlocks(finalDomains, finalIds);
    }
}