/// Return a <tt>SubVectorT1B</tt> view of the jth sub-column (Preconditions: <tt>values()!=NULL (1<=j<=numSubCols()</tt>) SubVectorT1B<Scalar> col( const Teuchos_Index j ) const { #ifdef TEUCHOS_DEBUG TEST_FOR_EXCEPTION( !( 1 <= j && j <= numSubCols_ ), std::logic_error ,"Error, index j="<<j<<" does not fall in the range [1,"<<(numSubCols_-1)<<"]!" ); #endif return SubVectorT1B<Scalar>(globalOffset(),subDim(),values()+(j-1)*leadingDim(),1); }
void operator()(ThreadParams& params, const std::string& name, T_Scalar value, const std::string& attrName = "", T_Attribute attribute = T_Attribute()) { log<picLog::INPUT_OUTPUT>("HDF5: write %1%D scalars: %2%") % simDim % name; // Size over all processes Dimensions globalSize(1, 1, 1); // Offset for this process Dimensions localOffset(0, 0, 0); // Offset for all processes Dimensions globalOffset(0, 0, 0); for (uint32_t d = 0; d < simDim; ++d) { globalSize[d] = Environment<simDim>::get().GridController().getGpuNodes()[d]; localOffset[d] = Environment<simDim>::get().GridController().getPosition()[d]; } Dimensions localSize(1, 1, 1); // avoid deadlock between not finished pmacc tasks and mpi calls in adios __getTransactionEvent().waitForFinished(); typename traits::PICToSplash<T_Scalar>::type splashType; params.dataCollector->writeDomain(params.currentStep, /* id == time step */ globalSize, /* total size of dataset over all processes */ localOffset, /* write offset for this process */ splashType, /* data type */ simDim, /* NDims spatial dimensionality of the field */ splash::Selection(localSize), /* data size of this process */ name.c_str(), /* data set name */ splash::Domain( globalOffset, /* offset of the global domain */ globalSize /* size of the global domain */ ), DomainCollector::GridType, &value); if(!attrName.empty()) { /*simulation attribute for data*/ typename traits::PICToSplash<T_Attribute>::type attType; log<picLog::INPUT_OUTPUT>("HDF5: write attribute %1% for scalars: %2%") % attrName % name; params.dataCollector->writeAttribute(params.currentStep, attType, name.c_str(), attrName.c_str(), &attribute); } }
/** * Initialises a slide of the simulation area. * * Starts a slide of the simulation area. In the process, GPU nodes are * reassigned to new grid positions to enable large simulation areas * to be computed. * All nodes in the simulation must call this function at the same iteration. * * @return true if the position of the calling GPU is switched to the end, false otherwise */ bool slide() { Manager::getInstance().waitForAllTasks(); //wait that all TAsk are finisehd bool result=comm.slide(); /* if we slide we must change our globalOffset of the simulation * (only change slide direction Y) */ int gpuOffset_y = this->getPosition().y(); PMACC_AUTO(simBox, SubGrid<DIM>::getInstance().getSimulationBox()); DataSpace<DIM> globalOffset(simBox.getGlobalOffset()); /* this is allowed in the case that we use sliding window * because size in Y direction is the same for all gpus domains */ globalOffset.y() = gpuOffset_y * simBox.getLocalSize().y(); SubGrid<DIM>::getInstance().setGlobalOffset(globalOffset); return result; }
/** \brief . */ operator ConstSubVectorView<Scalar>() { return ConstSubVectorView<Scalar>(globalOffset(),subDim(),arcp_values(),stride()); }