void DustSystem::writedensity() const { // construct a private class instance to do the work (parallelized) WriteDensity wd(this); Parallel* parallel = find<ParallelFactory>()->parallel(); // get the dimension of the dust system int dimDust = dimension(); // For the xy plane (always) { wd.setup(1,1,0); parallel->call(&wd, Np); wd.write(); } // For the xz plane (only if dimension is at least 2) if (dimDust >= 2) { wd.setup(1,0,1); parallel->call(&wd, Np); wd.write(); } // For the yz plane (only if dimension is 3) if (dimDust == 3) { wd.setup(0,1,1); parallel->call(&wd, Np); wd.write(); } }
bool Parallel::initWithActions(ActionInterval * action, ...) { va_list actions; va_start(actions, action); ActionInterval * action1 = action; while (action1 != NULL) { ActionInterval * action2 = va_arg(actions, ActionInterval *); if (action2 != NULL) { Parallel * tmp = new Parallel(); tmp->autorelease(); tmp->_initWithTwoActions(action1, action2); action1 = tmp; } else { ActionInterval * tmp = new ActionInterval(); tmp->autorelease(); _initWithTwoActions(action1, tmp); break; } } va_end(actions); return false; }
// Parses a network that begins with 'R'. Network* NetworkBuilder::ParseR(const StaticShape& input_shape, char** str) { char dir = (*str)[1]; if (dir == 'x' || dir == 'y') { STRING name = "Reverse"; name += dir; *str += 2; Network* network = BuildFromString(input_shape, str); if (network == nullptr) return nullptr; Reversed* rev = new Reversed(name, dir == 'y' ? NT_YREVERSED : NT_XREVERSED); rev->SetNetwork(network); return rev; } int replicas = strtol(*str + 1, str, 10); if (replicas <= 0) { tprintf("Invalid R spec!:%s\n", *str); return nullptr; } Parallel* parallel = new Parallel("Replicated", NT_REPLICATED); char* str_copy = *str; for (int i = 0; i < replicas; ++i) { str_copy = *str; Network* network = BuildFromString(input_shape, &str_copy); if (network == NULL) { tprintf("Invalid replicated network!\n"); delete parallel; return nullptr; } parallel->AddToStack(network); } *str = str_copy; return parallel; }
void test_parallel() { printf("[test parallel]\n"); Parallel p; int cpus = p.get_num_cpus(); printf("CPUS: %d\n", cpus); }
void DustSystem::writedepthmap() const { // construct a private class instance to do the work (parallelized) WriteDepthMap wdm(this); Parallel* parallel = find<ParallelFactory>()->parallel(); parallel->call(&wdm, Npy); wdm.write(); }
EBTStatus PlannerTaskParallel::update(Agent* pAgent, EBTStatus childStatus) { BEHAVIAC_UNUSED_VAR(childStatus); BEHAVIAC_ASSERT(Parallel::DynamicCast(this->m_node) != 0); Parallel* node = (Parallel*)this->m_node; EBTStatus s = node->ParallelUpdate(pAgent, this->m_children); return s; }
TEST(Parallel_Test, Batch_Test) { reset_counters(); std::atomic<unsigned> atom = 0; Future future; for (unsigned x = 0; x < BATCH_TEST_COUNT; ++x) { parallel.batch(testFunction, nullptr, &atom); } parallel.batch(testFunction, &future, &atom); future.wait(); while(!parallel.isQueueEmpty()){} ASSERT_EQ(atom.load(), BATCH_TEST_COUNT + 1); }
// Parses a parallel set of networks, defined by (<net><net>...). Network* NetworkBuilder::ParseParallel(const StaticShape& input_shape, char** str) { Parallel* parallel = new Parallel("Parallel", NT_PARALLEL); ++*str; Network* network = NULL; while (**str != '\0' && **str != ')' && (network = BuildFromString(input_shape, str)) != NULL) { parallel->AddToStack(network); } if (**str != ')') { tprintf("Missing ) at end of (Parallel)!\n"); delete parallel; return nullptr; } ++*str; return parallel; }
void DustSystem::writequality() const { Log* log = find<Log>(); Units* units = find<Units>(); Parallel* parallel = find<ParallelFactory>()->parallel(); // Density metric log->info("Calculating quality metric for the grid density..."); DustSystemDensityCalculator calc1(this, _Nrandom, _Ncells/5); parallel->call(&calc1, _Nrandom); log->info(" Mean value of density delta: " + QString::number(units->omassvolumedensity(calc1.meanDelta()*1e9)) + " nano" + units->umassvolumedensity()); log->info(" Standard deviation of density delta: " + QString::number(units->omassvolumedensity(calc1.stddevDelta()*1e9)) + " nano" + units->umassvolumedensity()); // Optical depth metric log->info("Calculating quality metric for the optical depth in the grid..."); DustSystemDepthCalculator calc2(this, _Nrandom, _Ncells/50, _Nrandom*10); parallel->call(&calc2, _Nrandom); log->info(" Mean value of optical depth delta: " + QString::number(calc2.meanDelta())); log->info(" Standard deviation of optical depth delta: " + QString::number(calc2.stddevDelta())); // Output to file QString filename = find<FilePaths>()->output("ds_quality.dat"); log->info("Writing quality metrics for the grid to " + filename + "..."); ofstream file(filename.toLocal8Bit().constData()); file << "Mean value of density delta: " << units->omassvolumedensity(calc1.meanDelta()) << ' ' << units->umassvolumedensity().toStdString() << '\n' << "Standard deviation of density delta: " << units->omassvolumedensity(calc1.stddevDelta()) << ' ' << units->umassvolumedensity().toStdString() << '\n'; file << "Mean value of optical depth delta: " << calc2.meanDelta() << '\n' << "Standard deviation of optical depth delta: " << calc2.stddevDelta() << '\n'; file.close(); log->info("File " + filename + " created."); }
// Builds a set of 4 lstms with x and y reversal, running in true parallel. Network* NetworkBuilder::BuildLSTMXYQuad(int num_inputs, int num_states) { Parallel* parallel = new Parallel("2DLSTMQuad", NT_PAR_2D_LSTM); parallel->AddToStack(new LSTM("L2DLTRDown", num_inputs, num_states, num_states, true, NT_LSTM)); Reversed* rev = new Reversed("L2DLTRXRev", NT_XREVERSED); rev->SetNetwork(new LSTM("L2DRTLDown", num_inputs, num_states, num_states, true, NT_LSTM)); parallel->AddToStack(rev); rev = new Reversed("L2DRTLYRev", NT_YREVERSED); rev->SetNetwork( new LSTM("L2DRTLUp", num_inputs, num_states, num_states, true, NT_LSTM)); Reversed* rev2 = new Reversed("L2DXRevU", NT_XREVERSED); rev2->SetNetwork(rev); parallel->AddToStack(rev2); rev = new Reversed("L2DXRevY", NT_YREVERSED); rev->SetNetwork(new LSTM("L2DLTRDown", num_inputs, num_states, num_states, true, NT_LSTM)); parallel->AddToStack(rev); return parallel; }
TEST(Parallel_Test, Process_Object_Test_Large) { reset_counters(); TestCls* values = new TestCls[DEFAULT_PARALLEL_THREAD_COUNT + 1]; for (unsigned x = 0; x < DEFAULT_PARALLEL_THREAD_COUNT + 1; ++x) values[x] = x; Future future; parallel.process(values, &TestCls::operator*=, DEFAULT_PARALLEL_THREAD_COUNT + 1, 0, &future, 2); future.wait(); for (unsigned x = 0; x < DEFAULT_PARALLEL_THREAD_COUNT + 1; ++x) ASSERT_EQ(x * 2, values[x].value()); delete[] values; }
TEST(Parallel_Test, Future_Test) { reset_counters(); TestCls* values = new TestCls[DEFAULT_PARALLEL_THREAD_COUNT]; for (unsigned x = 0; x < DEFAULT_PARALLEL_THREAD_COUNT; ++x) values[x] = x; Future future; parallel.process(values, &TestCls::operator*=, DEFAULT_PARALLEL_THREAD_COUNT, 0, &future, 2); ASSERT_EQ(DEFAULT_PARALLEL_THREAD_COUNT, future.worker_count); future.wait(); ASSERT_EQ(future.finished_count, future.worker_count); delete[] values; }
TEST(Parallel_Test, Process_Object_Test_Equal) { reset_counters(); TestCls* values = new TestCls[DEFAULT_PARALLEL_THREAD_COUNT]; for (unsigned x = 0; x < DEFAULT_PARALLEL_THREAD_COUNT; ++x) values[x] = x; Future future; parallel.process(values, &TestCls::operator*=, DEFAULT_PARALLEL_THREAD_COUNT, 0, &future, 2); future.wait(); ASSERT_EQ(DEFAULT_PARALLEL_THREAD_COUNT, multiplication_assignment); for (unsigned x = 0; x < DEFAULT_PARALLEL_THREAD_COUNT; ++x) ASSERT_EQ(x * 2, values[x].value()); delete[] values; }
// Parses an LSTM network, either individual, bi- or quad-directional. Network* NetworkBuilder::ParseLSTM(const StaticShape& input_shape, char** str) { bool two_d = false; NetworkType type = NT_LSTM; char* spec_start = *str; int chars_consumed = 1; int num_outputs = 0; char key = (*str)[chars_consumed], dir = 'f', dim = 'x'; if (key == 'S') { type = NT_LSTM_SOFTMAX; num_outputs = num_softmax_outputs_; ++chars_consumed; } else if (key == 'E') { type = NT_LSTM_SOFTMAX_ENCODED; num_outputs = num_softmax_outputs_; ++chars_consumed; } else if (key == '2' && (((*str)[2] == 'x' && (*str)[3] == 'y') || ((*str)[2] == 'y' && (*str)[3] == 'x'))) { chars_consumed = 4; dim = (*str)[3]; two_d = true; } else if (key == 'f' || key == 'r' || key == 'b') { dir = key; dim = (*str)[2]; if (dim != 'x' && dim != 'y') { tprintf("Invalid dimension (x|y) in L Spec!:%s\n", *str); return nullptr; } chars_consumed = 3; if ((*str)[chars_consumed] == 's') { ++chars_consumed; type = NT_LSTM_SUMMARY; } } else { tprintf("Invalid direction (f|r|b) in L Spec!:%s\n", *str); return nullptr; } int num_states = strtol(*str + chars_consumed, str, 10); if (num_states <= 0) { tprintf("Invalid number of states in L Spec!:%s\n", *str); return nullptr; } Network* lstm = nullptr; if (two_d) { lstm = BuildLSTMXYQuad(input_shape.depth(), num_states); } else { if (num_outputs == 0) num_outputs = num_states; STRING name(spec_start, *str - spec_start); lstm = new LSTM(name, input_shape.depth(), num_states, num_outputs, false, type); if (dir != 'f') { Reversed* rev = new Reversed("RevLSTM", NT_XREVERSED); rev->SetNetwork(lstm); lstm = rev; } if (dir == 'b') { name += "LTR"; Parallel* parallel = new Parallel("BidiLSTM", NT_PAR_RL_LSTM); parallel->AddToStack(new LSTM(name, input_shape.depth(), num_states, num_outputs, false, type)); parallel->AddToStack(lstm); lstm = parallel; } } if (dim == 'y') { Reversed* rev = new Reversed("XYTransLSTM", NT_XYTRANSPOSE); rev->SetNetwork(lstm); lstm = rev; } return lstm; }
Parallel::Parallel(const Parallel& orig) : refcount(0) { num_threads=orig.get_num_threads(); }
void PanDustSystem::write() const { DustSystem::write(); // If requested, output the interstellar radiation field in every dust cell to a data file if (_writeISRF) { WavelengthGrid* lambdagrid = find<WavelengthGrid>(); Units* units = find<Units>(); // Create a text file TextOutFile file(this, "ds_isrf", "ISRF"); // Write the header file.writeLine("# Mean field intensities for all dust cells with nonzero absorption"); file.addColumn("dust cell index", 'd'); file.addColumn("x coordinate of cell center (" + units->ulength() + ")", 'g'); file.addColumn("y coordinate of cell center (" + units->ulength() + ")", 'g'); file.addColumn("z coordinate of cell center (" + units->ulength() + ")", 'g'); for (int ell=0; ell<_Nlambda; ell++) file.addColumn("J_lambda (W/m3/sr) for lambda = " + QString::number(units->owavelength(lambdagrid->lambda(ell))) + " " + units->uwavelength(), 'g'); // Write one line for each dust cell with nonzero absorption for (int m=0; m<_Ncells; m++) { double Ltotm = Labs(m); if (Ltotm>0.0) { QList<double> values; Position bfr = _grid->centralPositionInCell(m); values << m << units->olength(bfr.x()) << units->olength(bfr.y()) << units->olength(bfr.z()); for (auto J : meanintensityv(m)) values << J; file.writeRow(values); } } } // If requested, output temperate map(s) along coordiate axes cuts if (_writeTemp) { // construct a private class instance to do the work (parallelized) WriteTemp wt(this); Parallel* parallel = find<ParallelFactory>()->parallel(); // get the dimension of the dust grid int dimDust = _grid->dimension(); // Create an assigner that assigns all the work to the root process RootAssigner* assigner = new RootAssigner(0); assigner->assign(Np); // For the xy plane (always) { wt.setup(1,1,0); parallel->call(&wt, assigner); wt.write(); } // For the xz plane (only if dimension is at least 2) if (dimDust >= 2) { wt.setup(1,0,1); parallel->call(&wt, assigner); wt.write(); } // For the yz plane (only if dimension is 3) if (dimDust == 3) { wt.setup(0,1,1); parallel->call(&wt, assigner); wt.write(); } } }
cchem::cc::Energy cchem::cc::energy(Wavefunction wf, Runtime &rt, const std::string &method) { using utility::make_array; wf.sort(); wf.reverse(); double cutoff = rt.get<double>("/cc/integrals/cutoff", 1e-10); ::integrals::Screening screening(wf.basis(), cutoff); size_t N = wf.basis().size(); size_t no = wf.active().size(); size_t nv = wf.virtuals().size(); // std::cout << "atomic orbitals: " << N << std::endl; // std::cout << "occupied orbitals: " << no << std::endl; // std::cout << "virtual orbitals: " << nv << std::endl; Parallel pe; #define RT_ARRAYS_ALLOCATE(name, dims) \ if (!rt.arrays().contains(name)) { \ rt.arrays().allocate<double>(name, make_array dims, pe); \ pe.cout() << *rt.arrays().find<Array>(name) << std::endl; \ } // first ones may be allocated in faster storage RT_ARRAYS_ALLOCATE("cc.t(ijab)", (no,no,nv,nv)); RT_ARRAYS_ALLOCATE("cc.u(ijab)", (no,no,N,N)); RT_ARRAYS_ALLOCATE("integrals.v(ijab)", (no,no,nv+no,N)); size_t B = wf.basis().max().size(); RT_ARRAYS_ALLOCATE("integrals.v(iqrs)", (no,B,N,no+nv)); RT_ARRAYS_ALLOCATE("integrals.v(ijka)", (no,no,no,nv)); RT_ARRAYS_ALLOCATE("integrals.v(ijkl)", (no,no,no,no)); RT_ARRAYS_ALLOCATE("integrals.v(iajb)", (no,nv,no,N)); { Map<Array*> V; V["ijkl"] = rt.arrays().find<Array>("integrals.v(ijkl)"); V["ijka"] = rt.arrays().find<Array>("integrals.v(ijka)"); V["ijab"] = rt.arrays().find<Array>("integrals.v(ijab)"); V["iajb"] = rt.arrays().find<Array>("integrals.v(iajb)"); // V["iabc"] = rt.arrays().find<Array>("integrals.v(iabc)"); V["iqrs"] = rt.arrays().find<Array>("integrals.v(iqrs)"); utility::timer timer; double cutoff = rt.get<double>("/cc/integrals/cutoff", 1e-10); integrals::Screening screening(wf.basis(), cutoff); cc::integrals(pe, wf, V, screening); pe.cout() << "integrals time: " << timer << std::endl; } if (method == "ccsd") rt.arrays().erase<Array>("integrals.v(iqrs)"); Map<const Array*> V; V["ijkl"] = rt.arrays().find<Array>("integrals.v(ijkl)"); V["ijka"] = rt.arrays().find<Array>("integrals.v(ijka)"); V["ijab"] = rt.arrays().find<Array>("integrals.v(ijab)"); V["iajb"] = rt.arrays().find<Array>("integrals.v(iajb)"); // V["iabc"] = rt.arrays().find<Array>("integrals.v(iabc)"); Map<Array*> A; A["t(ijab)"] = rt.arrays().find<Array>("cc.t(ijab)"); A["u(ijab)"] = rt.arrays().find<Array>("cc.u(ijab)"); cc::Energy E; E["mp2"] = mp2(pe, wf, *V["ijab"], *A["t(ijab)"]); pe.cout() << "mbpt(2) energy: " << std::setprecision(10) << E["mp2"] << std::endl; RT_ARRAYS_ALLOCATE("cc.vt2(ijab)", (no,no,N,N)); RT_ARRAYS_ALLOCATE("cc.vt1(ijab)", (no,no,N,N)); RT_ARRAYS_ALLOCATE("cc.vt1\'", (no,no,N,N)); RT_ARRAYS_ALLOCATE("cc.vt1\"", (no,no,N,N)); RT_ARRAYS_ALLOCATE("cc.t(ia)", (no, nv)); A["t(ia)"] = rt.arrays().find<Array>("cc.t(ia)"); A["vt1"] = rt.arrays().find<Array>("cc.vt1(ijab)"); A["vt2"] = rt.arrays().find<Array>("cc.vt2(ijab)"); A["vt1\'"] = rt.arrays().find<Array>("cc.vt1\'"); A["vt1\""] = rt.arrays().find<Array>("cc.vt1\""); std::auto_ptr<DIIS> diis; if (pe.rank() == 0) { File::Group fg = rt.file("cc").create_group("diis"); diis.reset(new DIIS(no, nv, fg, rt.get<int>("/cc/diis/max", 5))); } E["ccsd"] = sd(rt).energy(pe, wf, V, A, diis.get()); rt.arrays().erase<Array>("cc.vt2(ijab)"); rt.arrays().erase<Array>("cc.vt1(ijab)"); rt.arrays().erase<Array>("cc.vt1\""); rt.arrays().erase<Array>("cc.vt1\'"); V.clear(); A.clear(); if (method == "ccsd(t)") { Map<Array*> V; RT_ARRAYS_ALLOCATE("integrals.v(iabc)", (no,nv,N,nv)); V["iqrs"] = rt.arrays().find<Array>("integrals.v(iqrs)"); V["iabc"] = rt.arrays().find<Array>("integrals.v(iabc)"); { utility::timer timer; double cutoff = rt.get<double>("/cc/integrals/cutoff", 1e-10); integrals::Screening screening(wf.basis(), cutoff); cc::integrals(pe, wf, V, screening); pe.cout() << "integrals time: " << timer << std::endl; } V["ijka"] = rt.arrays().find<Array>("integrals.v(ijka)"); V["ijab"] = rt.arrays().find<Array>("integrals.v(ijab)"); Map<Array*> t; t["ia"] = rt.arrays().find<Array>("cc.t(ia)"); t["ijab"] = rt.arrays().find<Array>("cc.t(ijab)"); cc::Energy e = cc::triples::energy(pe, wf, V, t); E["ccsd[t]"] = E["ccsd"] + e["[t]"]; E["ccsd(t)"] = E["ccsd[t]"] + e["(t)"]; pe.cout() << "ccsd[t]: " << E["ccsd[t]"] << std::endl; pe.cout() << "ccsd(t): " << E["ccsd(t)"] << std::endl; } return E; }
int main(){ Mecanismo P = Mecanismo(2); cube I1__; I1__.zeros(3,3,2); I1__.slice(0) << 0 << 0 << 0 << endr << 0 << 107.307e-6 + 146.869e-6 << 0 << endr << 0 << 0 << 107.307e-6 + 146.869e-6 << endr; I1__.slice(1) << 0 << 0 << 0 << endr << 0 << 438.0e-6 << 0 << endr << 0 << 0 << 438.0e-6 << endr; cube I2__; I2__.zeros(3,3,2); I2__.slice(0) << 0 << 0 << 0 << endr << 0 << 107.307e-6 + 188.738e-6 << 0 << endr << 0 << 0 << 107.307e-6 + 188.738e-6 << endr; I2__.slice(1) << 0 << 0 << 0 << endr << 0 << 301.679e-6 << 0 << endr << 0 << 0 << 301.679e-6 << endr; Serial RR1 = Serial(2, {0.12, 0.16}, {0.06, 0.078},{0.062, 0.124}, I1__ , {0, 0, 9.8}, &fDH_RR); Serial RR2 = Serial(2, {0.12, 0.16}, {0.06, 0.058},{0.062, 0.097}, I2__ , {0, 0, 9.8}, &fDH_RR); Serial **RR_ = new Serial* [2]; RR_[0] = &RR1; RR_[1] = &RR2; //Matrizes que descrevem a arquitetura do mecanismo double l0 = 0.05; mat D_ = join_vert((mat)eye(2,2),2); mat E_ = join_diag( Roty(0)(span(0,1),span(0,2)), Roty(PI)(span(0,1),span(0,2)) ); mat F_ = zeros(4,4); vec f_ = {l0,0,-l0,0}; Parallel Robot = Parallel(2, &P, RR_, 2, {2,4}, D_, E_, F_, f_); Reference RefObj = Reference(0.12, {0.08, 0.16}, {-0.08, 0.4}); //Plotar área de trabalho uint nx = 96.0; uint ny = 56.0; double lx = 0.24; double ly = 0.28; double xi = -lx; double xf = lx; double yi = 0.0; double yf = ly; double dx = (xf-xi)/(nx-1); double dy = (yf-yi)/(ny-1); double dl = 0.5*(dx+dy); Mat<int> M; M.zeros(nx,ny); field<mat> fZ_(nx,ny); field<mat> fMh_(nx,ny); field<vec> fgh_(nx,ny); field<vec> fa1_(nx,ny); field<vec> fa2_(nx,ny); field<vec> fa12_(nx,ny); for(uint i=0; i<nx; i++){ for(uint j=0; j<ny; j++){ fZ_(i,j).zeros(2,2); fMh_(i,j).zeros(2,2); fgh_(i,j).zeros(2); fa1_(i,j).zeros(2); fa2_(i,j).zeros(2); fa12_(i,j).zeros(2); } } vec v1_ = {1,0}; vec v2_ = {0,1}; vec v12_ = {1,1}; double r = 0.07; double x0 = 0.0; double y0 = 0.17; uint rows = nx; uint cols = ny; vec q0_ = {0.823167, 1.81774, 0.823167, 1.81774}; GNR2 gnr2 = GNR2("RK6", &Robot, 1e-6, 30); //gnr2.Doit(q0_, {0.05,0.08}); //cout << gnr2.convergiu << endl; //cout << gnr2.x_ << endl; //cout << gnr2.res_ << endl; //cout << gnr2.n << endl; double x; double y; mat A2_; for(uint i=0; i<rows; i++){ for(uint j=0; j<cols; j++){ x = xi + i*dx; y = yi + j*dy; gnr2.Doit(q0_, {x, y}); if(gnr2.convergiu){ q0_ = gnr2.x_; A2_ = join_horiz(Robot.Ah_, join_horiz(Robot.Ao_.col(1), Robot.Ao_.col(3)) ); if(abs(det(Robot.Ao_)) < 1.6*1e-6 || abs(det(A2_)) < 1e-11 ) M(i,j) = 2; else{ M(i,j) = 1; Robot.Doit(Robot.q0_, join_vert(v1_, -solve(Robot.Ao_, Robot.Ah_*v1_)) ); fZ_(i,j) = Robot.Z_; fMh_(i,j) = Robot.dy->Mh_; fgh_(i,j) = Robot.dy->gh_; fa1_(i,j) = Robot.dy->vh_; Robot.Doit(Robot.q0_, Robot.C_*v2_); fa2_(i,j) = Robot.dy->vh_; Robot.Doit(Robot.q0_, Robot.C_*v12_); fa12_(i,j) = Robot.dy->vh_ - fa1_(i,j) - fa2_(i,j); } } gnr2.convergiu = false; if( ((x - x0)*(x - x0) + (y - y0)*(y - y0) <= (r+dl)*(r+dl) ) && ((x - x0)*(x - x0) + (y - y0)*(y - y0) >= (r-dl)*(r-dl) ) && (M(i,j) != 2) ) M(i,j) = 3; } } for(uint i=0; i<rows; i++){ for(uint j=0; j<cols; j++){ cout << M(i,j) << ";" ; if(j==cols-1) cout << endl; } } fZ_.save("fZ_field"); fMh_.save("fMh_field"); fgh_.save("fgh_field"); fa1_.save("fa1_field"); fa2_.save("fa2_field"); fa12_.save("fa12_field"); return 0; }
void PanDustSystem::write() const { DustSystem::write(); PeerToPeerCommunicator* comm = find<PeerToPeerCommunicator>(); bool dataParallel = comm->dataParallel(); // If requested, output the interstellar radiation field in every dust cell to a data file if (_writeISRF) { WavelengthGrid* lambdagrid = find<WavelengthGrid>(); Units* units = find<Units>(); // Create a text file TextOutFile file(this, "ds_isrf", "ISRF"); // Write the header file.writeLine("# Mean field intensities for all dust cells with nonzero absorption"); file.addColumn("dust cell index", 'd'); file.addColumn("x coordinate of cell center (" + units->ulength() + ")", 'g'); file.addColumn("y coordinate of cell center (" + units->ulength() + ")", 'g'); file.addColumn("z coordinate of cell center (" + units->ulength() + ")", 'g'); for (int ell=0; ell<_Nlambda; ell++) file.addColumn("J_lambda (W/m3/sr) for lambda = " + QString::number(units->owavelength(lambdagrid->lambda(ell))) + " " + units->uwavelength(), 'g'); // Write one line for each dust cell with nonzero absorption for (int m=0; m<_Ncells; m++) { if (!dataParallel) { double Ltotm = Labs(m); if (Ltotm>0.0) { QList<double> values; Position bfr = _grid->centralPositionInCell(m); values << m << units->olength(bfr.x()) << units->olength(bfr.y()) << units->olength(bfr.z()); for (auto J : meanintensityv(m)) values << J; file.writeRow(values); } } else // for distributed mode { QList<double> values; Position bfr = _grid->centralPositionInCell(m); values << m << units->olength(bfr.x()) << units->olength(bfr.y()) << units->olength(bfr.z()); // the correct process gets Jv Array Jv(_Nlambda); if (_assigner->validIndex(m)) Jv = meanintensityv(m); // and broadcasts it int sender = _assigner->rankForIndex(m); comm->broadcast(Jv,sender); if (Jv.sum()>0) { for (auto J : Jv) values << J; file.writeRow(values); } } } } // If requested, output temperature map(s) along coordinate axes and temperature data for each dust cell if (_writeTemp) { // Parallelize the calculation over the threads Parallel* parallel = find<ParallelFactory>()->parallel(); // If the necessary data is distributed over the processes, do the calculation on all processes. // Else, let the root do everything. bool isRoot = comm->isRoot(); // Output temperature map(s) along coordinate axes { // Construct a private class instance to do the work (parallelized) WriteTempCut wt(this); // Get the dimension of the dust grid int dimDust = _grid->dimension(); // For the xy plane (always) { wt.setup(1,1,0); if (dataParallel) parallel->call(&wt, Np); else if (isRoot) parallel->call(&wt, Np); wt.write(); } // For the xz plane (only if dimension is at least 2) if (dimDust >= 2) { wt.setup(1,0,1); if (dataParallel) parallel->call(&wt, Np); else if (isRoot) parallel->call(&wt, Np); wt.write(); } // For the yz plane (only if dimension is 3) if (dimDust == 3) { wt.setup(0,1,1); if (dataParallel) parallel->call(&wt, Np); else if (isRoot) parallel->call(&wt, Np); wt.write(); } } // Output a text file with temperature data for each dust cell { find<Log>()->info("Calculating indicative dust temperatures for each cell..."); // Construct a private class instance to do the work (parallelized) WriteTempData wt(this); // Call the body on the right cells. If everything is available, no unnecessary communication will be done. if (dataParallel) { // Calculate the temperature for the cells owned by this process parallel->call(&wt, _assigner); } else if (isRoot) { // Let root calculate it for everything parallel->call(&wt, _Ncells); } wt.write(); } } }