Esempio n. 1
0
/// Execute a data processor over several MultiGrid3D
void executeDataProcessor( DataProcessorGenerator3D const& generator,
                           std::vector<MultiGrid3D*> multiGrids,
                           plint referenceLevel )
{
    if(multiGrids.empty()) return;
    
    for (plint iLevel=0; iLevel<(plint)multiGrids[0]->getNumLevels(); ++iLevel) {
        std::auto_ptr<DataProcessorGenerator3D> localGenerator(generator.clone());
        int dxScale = (int)referenceLevel - (int)iLevel;
        int dtScale = dxScale;  // TODO: here, we assume convective scaling; general case should be considered.
        
        plint boxRescaleFactor = util::roundToInt(util::twoToThePowerPlint(std::abs(referenceLevel-iLevel)));
        if (dxScale < 0) // if we go to a coarser grid
            localGenerator->divide(boxRescaleFactor);  
        else  // otherwise we go to a finer grid
            localGenerator->multiply(boxRescaleFactor);
        
        localGenerator->setscale(dxScale,dtScale);
        
        std::vector<MultiBlock3D*> localBlocks(multiGrids.size());
        for (plint iBlock=0; iBlock<(plint)localBlocks.size(); ++iBlock) {
            localBlocks[iBlock] =  &multiGrids[iBlock]->getComponent(iLevel);
        }
        executeDataProcessor(*localGenerator, localBlocks);
    }
}
void addInternalProcessor( DataProcessorGenerator3D const& generator, MultiBlock3D& actor,
                           std::vector<MultiBlock3D*> multiBlockArgs, plint level )
{
    MultiProcessing3D<DataProcessorGenerator3D const, DataProcessorGenerator3D >
        multiProcessing(generator, multiBlockArgs);
    std::vector<DataProcessorGenerator3D*> const& retainedGenerators = multiProcessing.getRetainedGenerators();
    std::vector<std::vector<plint> > const& atomicBlockNumbers = multiProcessing.getAtomicBlockNumbers();

    for (pluint iGenerator=0; iGenerator<retainedGenerators.size(); ++iGenerator) {
        std::vector<AtomicBlock3D*> extractedAtomicBlocks(multiBlockArgs.size());
        for (pluint iBlock=0; iBlock<extractedAtomicBlocks.size(); ++iBlock) {
            extractedAtomicBlocks[iBlock] =
                &multiBlockArgs[iBlock]->getComponent(atomicBlockNumbers[iGenerator][iBlock]);
        }
        // It is assumed that the actor has the same distribution as block 0.
        PLB_ASSERT(!atomicBlockNumbers[iGenerator].empty());
        AtomicBlock3D& atomicActor = actor.getComponent(atomicBlockNumbers[iGenerator][0]);
        // Delegate to the "AtomicBlock version" of addInternal.
        plb::addInternalProcessor(*retainedGenerators[iGenerator], atomicActor, extractedAtomicBlocks, level);
    }
    // Subscribe the processor in the multi-block. This guarantees that the multi-block is aware
    //   of the maximal current processor level, and it instantiates the communication pattern
    //   for an update of envelopes after processor execution.
    std::vector<MultiBlock3D*> updatedMultiBlocks;
    std::vector<modif::ModifT> typeOfModification;
    multiProcessing.multiBlocksWhichRequireUpdate(updatedMultiBlocks, typeOfModification);
    actor.subscribeProcessor (
            level,
            updatedMultiBlocks, typeOfModification,
            BlockDomain::usesEnvelope(generator.appliesTo()) );
    actor.storeProcessor(generator, multiBlockArgs, level);
}
Esempio n. 3
0
MultiBlock3D::ProcessorStorage3D::ProcessorStorage3D (
        DataProcessorGenerator3D const& generator_,
        std::vector<MultiBlock3D*> const& multiBlocks_,
        plint level_ )
    : generator(generator_.clone()),
      multiBlockIds(multiBlocks_.size()),
      level(level_)
{
    for(pluint iBlock=0; iBlock<multiBlockIds.size(); ++iBlock) {
        multiBlockIds[iBlock] = multiBlocks_[iBlock]->getId();
    }
}