void executeDataProcessor( ReductiveDataProcessorGenerator3D& generator, std::vector<MultiGrid3D*> multiGrids, plint referenceLevel ) { if(multiGrids.empty()) return; plint numLevels = (plint)multiGrids[0]->getNumLevels(); std::vector<int> dimensionsX, dimensionsT; generator.getDimensionsX(dimensionsX); generator.getDimensionsT(dimensionsT); std::vector<ReductiveDataProcessorGenerator3D*> localGenerators(numLevels); std::vector<BlockStatistics*> localStatistics(numLevels); for (plint iLevel=0; iLevel<numLevels; ++iLevel) { int dxScale = (int)referenceLevel - (int)iLevel; int dtScale = dxScale; // TODO: here, we assume convective scaling; general case could be considered. localGenerators[iLevel] = generator.clone(); plint boxRescaleFactor = util::roundToInt(util::twoToThePowerPlint(std::abs(referenceLevel-iLevel))); if (dxScale < 0) generator.divide(boxRescaleFactor); else generator.multiply(boxRescaleFactor); std::vector<MultiBlock3D*> localBlocks(multiGrids.size()); for (plint iBlock=0; iBlock<(plint)localBlocks.size(); ++iBlock) { localBlocks[iBlock] = &multiGrids[iBlock]->getComponent(iLevel); } executeDataProcessor(*localGenerators[iLevel], localBlocks); std::vector<double> scales(dimensionsX.size()); for (pluint iScale=0; iScale<scales.size(); ++iScale) { scales[iScale] = scaleToReference(dxScale, dimensionsX[iScale], dtScale, dimensionsT[iScale]); } localGenerators[iLevel]->getStatistics().rescale(scales); localStatistics[iLevel] = &(localGenerators[iLevel]->getStatistics()); } combine(localStatistics, generator.getStatistics()); }
void executeDataProcessor( ReductiveDataProcessorGenerator3D& generator, std::vector<MultiBlock3D*> multiBlocks ) { MultiProcessing3D<ReductiveDataProcessorGenerator3D, ReductiveDataProcessorGenerator3D > multiProcessing(generator, multiBlocks); std::vector<ReductiveDataProcessorGenerator3D*> const& retainedGenerators = multiProcessing.getRetainedGenerators(); std::vector<std::vector<plint> > const& atomicBlockNumbers = multiProcessing.getAtomicBlockNumbers(); std::vector<BlockStatistics const*> individualStatistics(retainedGenerators.size()); for (pluint iGenerator=0; iGenerator<retainedGenerators.size(); ++iGenerator) { std::vector<AtomicBlock3D*> extractedAtomicBlocks(multiBlocks.size()); for (pluint iBlock=0; iBlock<extractedAtomicBlocks.size(); ++iBlock) { extractedAtomicBlocks[iBlock] = &multiBlocks[iBlock]->getComponent(atomicBlockNumbers[iGenerator][iBlock]); } // Delegate to the "AtomicBlock Reductive version" of executeDataProcessor. plb::executeDataProcessor(*retainedGenerators[iGenerator], extractedAtomicBlocks); individualStatistics[iGenerator] = &(retainedGenerators[iGenerator]->getStatistics()); } multiBlocks[0]->getCombinedStatistics().combine(individualStatistics, generator.getStatistics()); // In the "executeProcessor" version, envelopes are updated right here, because the processor // has already been executed. This behavior is unlike the behavior of the "addInternalProcessor" version, // where envelopes are updated from within the multi-block, after the execution of internal processors. multiProcessing.updateEnvelopesWhereRequired(); }