bool VariablePacker::CheckVariablesWithinPackingLimits(int maxVectors, const TVariableInfoList& in_variables) { ASSERT(maxVectors > 0); maxRows_ = maxVectors; topNonFullRow_ = 0; bottomNonFullRow_ = maxRows_ - 1; TVariableInfoList variables(in_variables); // As per GLSL 1.017 Appendix A, Section 7 variables are packed in specific // order by type, then by size of array, largest first. std::sort(variables.begin(), variables.end(), TVariableInfoComparer()); rows_.clear(); rows_.resize(maxVectors, 0); // Packs the 4 column variables. size_t ii = 0; for (; ii < variables.size(); ++ii) { const TVariableInfo& variable = variables[ii]; if (GetNumComponentsPerRow(variable.type) != 4) { break; } topNonFullRow_ += GetNumRows(variable.type) * variable.size; } if (topNonFullRow_ > maxRows_) { return false; } // Packs the 3 column variables. int num3ColumnRows = 0; for (; ii < variables.size(); ++ii) { const TVariableInfo& variable = variables[ii]; if (GetNumComponentsPerRow(variable.type) != 3) { break; } num3ColumnRows += GetNumRows(variable.type) * variable.size; } if (topNonFullRow_ + num3ColumnRows > maxRows_) { return false; } fillColumns(topNonFullRow_, num3ColumnRows, 0, 3); // Packs the 2 column variables. int top2ColumnRow = topNonFullRow_ + num3ColumnRows; int twoColumnRowsAvailable = maxRows_ - top2ColumnRow; int rowsAvailableInColumns01 = twoColumnRowsAvailable; int rowsAvailableInColumns23 = twoColumnRowsAvailable; for (; ii < variables.size(); ++ii) { const TVariableInfo& variable = variables[ii]; if (GetNumComponentsPerRow(variable.type) != 2) { break; } int numRows = GetNumRows(variable.type) * variable.size; if (numRows <= rowsAvailableInColumns01) { rowsAvailableInColumns01 -= numRows; } else if (numRows <= rowsAvailableInColumns23) { rowsAvailableInColumns23 -= numRows; } else { return false; } } int numRowsUsedInColumns01 = twoColumnRowsAvailable - rowsAvailableInColumns01; int numRowsUsedInColumns23 = twoColumnRowsAvailable - rowsAvailableInColumns23; fillColumns(top2ColumnRow, numRowsUsedInColumns01, 0, 2); fillColumns(maxRows_ - numRowsUsedInColumns23, numRowsUsedInColumns23, 2, 2); // Packs the 1 column variables. for (; ii < variables.size(); ++ii) { const TVariableInfo& variable = variables[ii]; ASSERT(1 == GetNumComponentsPerRow(variable.type)); int numRows = GetNumRows(variable.type) * variable.size; int smallestColumn = -1; int smallestSize = maxRows_ + 1; int topRow = -1; for (int column = 0; column < kNumColumns; ++column) { int row = 0; int size = 0; if (searchColumn(column, numRows, &row, &size)) { if (size < smallestSize) { smallestSize = size; smallestColumn = column; topRow = row; } } } if (smallestColumn < 0) { return false; } fillColumns(topRow, numRows, smallestColumn, 1); } ASSERT(variables.size() == ii); return true; }
TEST_F(test_move, independents) { independent_variables independents(1, 2); independents(1) = 1.5; independents(2) = 3.5; gradient_structure gs; ASSERT_EQ(0, gradient_structure::GRAD_STACK1->total()); ASSERT_EQ(1750, gradient_structure::GRAD_LIST->total_addresses()); dvar_vector variables(independents); dvariable a(variables(1)); dvariable b(variables(2)); auto sum = [&a, &b]() { return a + b; }; dvariable result = sum(); ASSERT_EQ(4, gradient_structure::GRAD_STACK1->total()); ASSERT_EQ(1753, gradient_structure::GRAD_LIST->total_addresses()); ASSERT_DOUBLE_EQ(value(result), 5.0); grad_stack_entry* ptr = gradient_structure::GRAD_STACK1->ptr; ASSERT_TRUE(ptr->func == NULL); --ptr; ASSERT_TRUE(ptr->func == &default_evaluation1); ASSERT_TRUE(ptr->dep_addr == gradient_structure::GRAD_LIST->get(1752)); ASSERT_TRUE(ptr->ind_addr1 == &(gradient_structure::RETURN_PTR->v->x)); ASSERT_TRUE(ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(ptr->mult2, 0.0); --ptr; ASSERT_TRUE(ptr->func == &default_evaluation4); ASSERT_TRUE(ptr->dep_addr == &(gradient_structure::RETURN_PTR->v->x)); ASSERT_TRUE(ptr->ind_addr1 == gradient_structure::GRAD_LIST->get(1750)); ASSERT_TRUE(ptr->ind_addr2 == gradient_structure::GRAD_LIST->get(1751)); ASSERT_DOUBLE_EQ(ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(ptr->mult2, 0.0); --ptr; ASSERT_TRUE(ptr->func == &default_evaluation1); ASSERT_TRUE(ptr->dep_addr == gradient_structure::GRAD_LIST->get(1751)); ASSERT_TRUE(ptr->ind_addr1 == &(variables(2).v->x)); ASSERT_TRUE(ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(ptr->mult2, 0.0); --ptr; ASSERT_TRUE(ptr->func == &default_evaluation1); ASSERT_TRUE(ptr->dep_addr == gradient_structure::GRAD_LIST->get(1750)); ASSERT_TRUE(ptr->ind_addr1 == &(variables(1).v->x)); ASSERT_TRUE(ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(ptr->mult2, 0.0); }
TEST_F(test_move, independents_gradmanual) { independent_variables independents(1, 2); independents(1) = 1.5; independents(2) = 3.5; gradient_structure gs; ASSERT_EQ(0, gradient_structure::GRAD_STACK1->total()); ASSERT_EQ(1750, gradient_structure::GRAD_LIST->total_addresses()); dvar_vector variables(independents); auto sum = [&variables]() { dvariable a(variables(1)); dvariable b(variables(2)); return a + b; }; dvariable result = sum(); ASSERT_EQ(4, gradient_structure::GRAD_STACK1->total()); ASSERT_EQ(1752, gradient_structure::GRAD_LIST->total_addresses()); ASSERT_DOUBLE_EQ(value(result), 5.0); --gradient_structure::GRAD_STACK1->ptr; double_and_int* ptr = (double_and_int*)gradient_structure::get_ARRAY_MEMBLOCK_BASE(); unsigned long int imax = gradient_structure::ARR_LIST1->get_max_last_offset() / sizeof(double_and_int); for (unsigned int i = 0; i < imax; ++i) { ptr->x = 0.0; ++ptr; } gradient_structure::GRAD_LIST->initialize(); *gradient_structure::GRAD_STACK1->ptr->dep_addr = 1.0; //Sum ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 1.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 0.0); (*(gradient_structure::GRAD_STACK1->ptr->func))(); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 0.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 1.0); ASSERT_TRUE(gradient_structure::GRAD_STACK1->ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult2, 0.0); //Addition --gradient_structure::GRAD_STACK1->ptr; ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 1.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 0.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr2), 0.0); (*(gradient_structure::GRAD_STACK1->ptr->func))(); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 0.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 1.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr2), 1.0); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult2, 0.0); //Constructor --gradient_structure::GRAD_STACK1->ptr; ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 1.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 0.0); (*(gradient_structure::GRAD_STACK1->ptr->func))(); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 0.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 1.0); ASSERT_TRUE(gradient_structure::GRAD_STACK1->ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult2, 0.0); //Constructor --gradient_structure::GRAD_STACK1->ptr; ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 1.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 0.0); (*(gradient_structure::GRAD_STACK1->ptr->func))(); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->dep_addr), 0.0); ASSERT_DOUBLE_EQ(*(gradient_structure::GRAD_STACK1->ptr->ind_addr1), 1.0); ASSERT_TRUE(gradient_structure::GRAD_STACK1->ptr->ind_addr2 == NULL); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult1, 0.0); ASSERT_DOUBLE_EQ(gradient_structure::GRAD_STACK1->ptr->mult2, 0.0); //ASSERT_DOUBLE_EQ(*gradient_structure::INDVAR_LIST->get_address(1), 0.0); //ASSERT_DOUBLE_EQ(*gradient_structure::INDVAR_LIST->get_address(2), 0.0); ASSERT_DOUBLE_EQ(value(variables(1)), 1.0); ASSERT_DOUBLE_EQ(value(variables(2)), 1.0); }
void write_realization( const circuit& circ, std::ostream& os, const write_realization_settings& settings ) { unsigned oldsize = 0; if ( !settings.header.empty() ) { std::string header = settings.header; boost::algorithm::replace_all( header, "\n", "\n# " ); os << "# " << header << std::endl; } if ( !settings.version.empty() ) { os << ".version " << settings.version << std::endl; } os << ".numvars " << circ.lines() << std::endl; std::vector<std::string> variables( circ.lines() ); for ( unsigned i = 0u; i < circ.lines(); ++i ) { variables[i] = boost::str( boost::format( "x%d" ) % i ); } std::vector<std::string> _inputs( circ.inputs().begin(), circ.inputs().end() ); oldsize = _inputs.size(); _inputs.resize( circ.lines() ); for ( unsigned i = oldsize; i < circ.lines(); ++i ) { _inputs[i] = boost::str( boost::format( "i%d" ) % i ); } std::vector<std::string> _outputs( circ.outputs().begin(), circ.outputs().end() ); oldsize = _outputs.size(); _outputs.resize( circ.lines() ); for ( unsigned i = oldsize; i < circ.lines(); ++i ) { _outputs[i] = boost::str( boost::format( "o%d" ) % i ); } os << ".variables " << boost::algorithm::join( variables, " " ) << std::endl; namespace karma = boost::spirit::karma; namespace ascii = boost::spirit::ascii; os << ".inputs"; //std::ostream_iterator<char> outit( os ); //karma::generate_delimited( outit, *( karma::no_delimit['"' << karma::string] << '"' ), ascii::space, _inputs ); for ( const auto& _input : _inputs ) { std::string quote = ( _input.find( " " ) != std::string::npos ) ? "\"" : ""; os << boost::format( " %s%s%s" ) % quote % _input % quote; } os << std::endl; os << ".outputs"; //karma::generate_delimited( outit, *( karma::no_delimit['"' << karma::string] << '"' ), ascii::space, _outputs ); for ( const auto& _output : _outputs ) { std::string quote = ( _output.find( " " ) != std::string::npos ) ? "\"" : ""; os << boost::format( " %s%s%s" ) % quote % _output % quote; } os << std::endl; std::string _constants( circ.lines(), '-' ); std::transform( circ.constants().begin(), circ.constants().end(), _constants.begin(), constant_to_char() ); std::string _garbage( circ.lines(), '-' ); std::transform( circ.garbage().begin(), circ.garbage().end(), _garbage.begin(), garbage_to_char() ); os << ".constants " << _constants << std::endl << ".garbage " << _garbage << std::endl; for ( const auto& bus : circ.inputbuses().buses() ) { std::vector<std::string> lines; std::transform( bus.second.begin(), bus.second.end(), std::back_inserter( lines ), line_to_variable() ); os << ".inputbus " << bus.first << " " << boost::algorithm::join( lines, " " ) << std::endl; } for ( const auto& bus : circ.outputbuses().buses() ) { std::vector<std::string> lines; std::transform( bus.second.begin(), bus.second.end(), std::back_inserter( lines ), line_to_variable() ); os << ".outputbus " << bus.first << " " << boost::algorithm::join( lines, " " ) << std::endl; } for ( const auto& bus : circ.statesignals().buses() ) { std::vector<std::string> lines; std::transform( bus.second.begin(), bus.second.end(), std::back_inserter( lines ), line_to_variable() ); os << ".state " << bus.first << " " << boost::algorithm::join( lines, " " ) << std::endl; } for ( const auto& module : circ.modules() ) { os << ".module " << module.first << std::endl; write_realization_settings module_settings; module_settings.version.clear(); module_settings.header.clear(); write_realization( *module.second, os, module_settings ); } os << ".begin" << std::endl; std::string cmd; for ( const auto& g : circ ) { cmd = settings.type_label( g ); std::vector<std::string> lines; // Peres is special boost::transform( g.controls(), std::back_inserter( lines ), line_to_variable() ); boost::transform( g.targets(), std::back_inserter( lines ), line_to_variable() ); os << cmd << " " << boost::algorithm::join( lines, " " ); boost::optional<const std::map<std::string, std::string>&> annotations = circ.annotations( g ); if ( annotations ) { std::string sannotations; for ( const auto& p : *annotations ) { sannotations += boost::str( boost::format( " %s=\"%s\"" ) % p.first % p.second ); } os << " #@" << sannotations; } os << std::endl; } os << ".end" << std::endl; }
void plotMakerConstBin(string htag,string newDatasetName, unsigned int nNewFiles) { if (newDatasetName=="") { cerr << "no new dataset name? what the hell do you want me to do? Returning..." << endl; return; } // variables to plot are in defined in plotVars.h std::vector<string> variables(plotVars, plotVars + sizeof plotVars / sizeof plotVars[0]); TChain * newFileChain=new TChain("CollectionTree"); if(nNewFiles == 0) { cerr << "No files in new dataset? This shouldn't happen. Something has gone wrong. Check inputs" << endl; return; } else if(nNewFiles == 1) { // if its only 1 file, then its not a folder newFileChain->Add(newDatasetName.c_str()); } else // it's folder! { for(int i = 1; i <= nNewFiles;i++) { size_t found = newDatasetName.find_last_of("/"); string baseFileName=newDatasetName.substr(found+1); size_t f = baseFileName.find(".root"); string fileNum=""; if(i < 10) fileNum="00"+std::to_string(i); else if (i < 100) fileNum="0"+std::to_string(i); else fileNum=std::to_string(i); baseFileName.replace(f, baseFileName.length(), "."+fileNum+".root"); string path=newDatasetName+"/"+baseFileName; newFileChain->Add(path.c_str()); } } float progress=0.0; size_t found = newDatasetName.find_last_of("/"); string baseFileName=newDatasetName.substr(found+1); string outfilename="samples/"+htag+"/"+baseFileName; TFile f(outfilename.c_str(),"recreate"); for(unsigned int i =0; i< variables.size();i++) { //cout << variables[i] << endl; //if (variables[i] != "HGamEventInfoAuxDyn.m_yy_truthVertex" ) continue; int barWidth = 70; std::cout << "["; int pos = barWidth * progress; for (int i = 0; i < barWidth; ++i) { if (i < pos) std::cout << "="; else if (i == pos) std::cout << ">"; else std::cout << " "; } std::cout << "] " << int(progress * 100.0) << " %\r"; std::cout.flush(); //size_t found = newDatasetName.find_last_of("/"); //string baseFileName=newDatasetName.substr(found+1); //cout << "1" << endl; TCanvas * c1 =new TCanvas("c1"); if(newFileChain->Draw(variables[i].c_str(),"") == -1) { cout << "Draw command failed!" << endl; progress += 1.0/variables.size(); continue; } //cout << "2" << endl; TH1F *htemp = (TH1F*)gPad->GetPrimitive("htemp"); if (htemp==0) { cout << "histo is empty for var " << variables[i] << ", must have no entries" << endl; continue; } //cout << "2" << endl; //cout << htemp << endl; htemp->SetLineColor( kRed); float MeanVal=htemp->GetMean(); float stdev =htemp->GetStdDev(); delete c1; float xmin = MeanVal-2*stdev; long xminRound=floor(xmin/1000)*1000; if(xmin<-10000) // case for variables like Energy where there is no reason to have a negative xmin { xminRound = 0; } float xmax = MeanVal+2*stdev; long xmaxRound=floor(xmax/1000)*1000; if(xmin==xmax) // case for where there is no std dev for variables like m_ee in ggH125_small samples, so xmin==xmax and root gets confused { xmin = xmin - 1000; xmax = xmax + 1000; } string lowCut=variables[i]+" > "+std::to_string(xmin); string highCut=variables[i]+" < "+std::to_string(xmax); string fullCut=lowCut+" && "+highCut+" && HGamEventInfoAuxDyn.isPassedPreselection"; long nBins=(xmaxRound-xminRound)/1000; TCanvas * c2 =new TCanvas("c2"); string plotVar=variables[i]+">>htemp("+to_string(nBins)+","+to_string(xminRound)+","+to_string(xmaxRound)+")"; newFileChain->Draw(plotVar.c_str(),fullCut.c_str(),""); htemp = (TH1F*)gPad->GetPrimitive("htemp"); //cout << "3" << endl; int maxITER = 5; int iter=0; //while( (htemp == 0 || htemp->Integral() < 5 ) && iter <= maxITER ) //{ // fullCut = increaseCut(variables[i],MeanVal,stdev,xmin,xmax); // newFileChain->Draw(variables[i].c_str(),fullCut.c_str()); // htemp = (TH1F*)gPad->GetPrimitive("htemp"); // iter++; // // if(iter==maxITER) cout << "max iterations reached, might be no plot for " << variables[i] << endl; //} htemp->SetNameTitle(variables[i].c_str(),variables[i].c_str()); htemp->Write(); delete c2; progress += 1.0/variables.size(); } int barWidth=70; std::cout << "["; int pos = barWidth * 1; for (int i = 0; i < barWidth; ++i) { if (i < pos) std::cout << "="; else if (i == pos) std::cout << ">"; else std::cout << " "; } std::cout << "] " << int(1 * 100.0) << " %\r"; std::cout.flush(); cout << endl; f.Close(); }
void perform_write( hpx::lcos::local::channel<void>& sync , octree_server& e , DBfile* file , std::vector<std::string> const& directory_names , std::string const& variable_name , boost::uint64_t variable_index ) { // {{{ boost::uint64_t const bw = science().ghost_zone_length; boost::uint64_t const gnx = config().grid_node_length; boost::uint64_t level = e.get_level(); int nnodes[] = { int(gnx - 2 * bw + 1) , int(gnx - 2 * bw + 1) , int(gnx - 2 * bw + 1) }; int nzones[] = { int(gnx - 2 * bw) , int(gnx - 2 * bw) , int(gnx - 2 * bw) }; //char* coordinate_names[] = { (char*) "X", (char*) "Y", (char*) "Z" }; boost::scoped_array<double*> coordinates(new double* [3]); coordinates[0] = new double [nnodes[0]]; coordinates[1] = new double [nnodes[1]]; coordinates[2] = new double [nnodes[2]]; boost::scoped_array<double> variables (new double [nzones[0] * nzones[1] * nzones[2]]); for (boost::uint64_t i = bw; i < (gnx - bw + 1); ++i) { coordinates[0][i - bw] = e.x_face(i); coordinates[1][i - bw] = e.y_face(i); coordinates[2][i - bw] = e.z_face(i); } for (boost::uint64_t i = bw; i < (gnx - bw); ++i) for (boost::uint64_t j = bw; j < (gnx - bw); ++j) for (boost::uint64_t k = bw; k < (gnx - bw); ++k) { boost::uint64_t index = (i - bw) + (j - bw) * nzones[0] + (k - bw) * nzones[0] * nzones[1]; variables[index] = e(i, j, k)[variable_index]; } array<boost::uint64_t, 3> location = e.get_location(); std::string mesh_name = boost::str( boost::format("mesh_L%i_%i_%i_%i") % level % location[0] % location[1] % location[2]); std::string value_name = boost::str( boost::format("%s_L%i_%i_%i_%i") % variable_name % level % location[0] % location[1] % location[2]); int error = DBSetDir(file, directory_names[level].c_str()); OCTOPUS_ASSERT(error == 0); { DBoptlist* optlist = DBMakeOptlist(1); // REVIEW: Verify this. int type = DB_ROWMAJOR; DBAddOption(optlist, DBOPT_MAJORORDER, &type); error = DBPutQuadmesh(file , mesh_name.c_str() //, coordinate_names , NULL // SILO docs say this is ignored. , coordinates.get() , nnodes , 3, DB_DOUBLE, DB_COLLINEAR, optlist); OCTOPUS_ASSERT(error == 0); } { DBoptlist* optlist = DBMakeOptlist(3); // REVIEW: Verify this. int type = DB_ROWMAJOR; DBAddOption(optlist, DBOPT_MAJORORDER, &type); error = DBPutQuadvar1(file , value_name.c_str() , mesh_name.c_str() , variables.get() , nzones , 3, NULL, 0, DB_DOUBLE, DB_ZONECENT, optlist); OCTOPUS_ASSERT(error == 0); } delete[] coordinates[0]; delete[] coordinates[1]; delete[] coordinates[2]; sync.post(); } // }}}
/*! Returns all the variables declared in the global context. \sa functions(), classes() */ QStringList QSInterpreter::variables(bool includeStatic, bool includeCustom, bool includeMemberVariables) const { return variables(QString::null, includeStatic, includeCustom, includeMemberVariables); }
void EBPoissonOp:: restrictResidual(LevelData<EBCellFAB>& a_resCoar, LevelData<EBCellFAB>& a_phiThisLevel, const LevelData<EBCellFAB>& a_rhsThisLevel) { CH_TIME("EBPoissonOp::restrictResidual"); CH_assert(a_resCoar.nComp() == 1); CH_assert(a_phiThisLevel.nComp() == 1); CH_assert(a_rhsThisLevel.nComp() == 1); LevelData<EBCellFAB> resThisLevel; bool homogeneous = true; EBCellFactory ebcellfactTL(m_eblg.getEBISL()); IntVect ghostVec = a_rhsThisLevel.ghostVect(); resThisLevel.define(m_eblg.getDBL(), 1, ghostVec, ebcellfactTL); // Get the residual on the fine grid residual(resThisLevel,a_phiThisLevel,a_rhsThisLevel,homogeneous); // now use our nifty averaging operator Interval variables(0, 0); CH_assert(m_hasMGObjects); m_ebAverageMG.average(a_resCoar, resThisLevel, variables); #ifdef DO_EB_RHS_CORRECTION // Apply error-correction modification to restricted RHS // Right now this only works with Dirichlet BC's, and only makes sense for the EB int correctionType = 0; // Change this to activate RHS correction if (correctionType != 0) { for (DataIterator dit = a_resCoar.disjointBoxLayout().dataIterator(); dit.ok(); ++dit) { EBCellFAB& resFAB = a_resCoar[dit()]; // Extract the FAB for this box const EBISBox& ebis = resFAB.getEBISBox(); // Get the set of all IntVect indices // Iterate over the parts of the RHS corresponding to the irregular cells, // correcting the RHS in each cell VoFIterator ebvofit(ebis.getIrregIVS(ebis.getRegion()), ebis.getEBGraph()); for (ebvofit.reset(); ebvofit.ok(); ++ebvofit) { for (int icomp = 0; icomp < resFAB.nComp(); ++icomp) // For each component of the residual on this VoF { if (correctionType == 1) { // Setting the residual to zero on the irregular cells gives convergence, // though the rate isn't as good as the full correction resFAB(ebvofit(), icomp) = 0.0; } else if (correctionType == 2) { // Kludge valid only for pseudo-1D test case. May not work for you. Real kappa = ebis.volFrac(ebvofit()); kappa = (kappa > 0.5 ? kappa : 0.5); // Floor on kappa to prevent dividing by a tiny number Real rhoCoeff = (kappa + 0.5)/(2.0*kappa); resFAB(ebvofit(), icomp) *= (1.0 - rhoCoeff); } // else silently do nothing } } } } #endif }
Statement* Expand::operator()(Each* e) { vector<string> variables(e->variables()); Expression* expr = e->list()->perform(eval->with(env, backtrace)); List* list = 0; Map* map = 0; if (expr->concrete_type() == Expression::MAP) { map = static_cast<Map*>(expr); } else if (expr->concrete_type() != Expression::LIST) { list = new (ctx.mem) List(expr->path(), expr->position(), 1, List::COMMA); *list << expr; } else { list = static_cast<List*>(expr); } Env new_env; for (size_t i = 0, L = variables.size(); i < L; ++i) new_env[variables[i]] = 0; new_env.link(env); env = &new_env; Block* body = e->block(); if (map) { for (auto key : map->keys()) { Expression* k = key->perform(eval->with(env, backtrace)); Expression* v = map->at(key)->perform(eval->with(env, backtrace)); if (variables.size() == 1) { List* variable = new (ctx.mem) List(map->path(), map->position(), 2, List::SPACE); *variable << k; *variable << v; (*env)[variables[0]] = variable; } else { (*env)[variables[0]] = k; (*env)[variables[1]] = v; } append_block(body); } } else { for (size_t i = 0, L = list->length(); i < L; ++i) { List* variable = 0; if ((*list)[i]->concrete_type() != Expression::LIST || variables.size() == 1) { variable = new (ctx.mem) List((*list)[i]->path(), (*list)[i]->position(), 1, List::COMMA); *variable << (*list)[i]; } else { variable = static_cast<List*>((*list)[i]); } for (size_t j = 0, K = variables.size(); j < K; ++j) { if (j < variable->length()) { (*env)[variables[j]] = (*variable)[j]->perform(eval->with(env, backtrace)); } else { (*env)[variables[j]] = new (ctx.mem) Null(expr->path(), expr->position()); } } append_block(body); } } env = new_env.parent(); return 0; }
TString TMVAPredict(TString method_name, EnumPredictMode predictMode = EnumPredictMode::FINAL) { std::cout << "------------ predict with : " << method_name << " ------ " << std::endl; std::vector<std::string> inputNames = {"training","test","check_correlation","check_agreement"}; std::map<std::string,std::vector<std::string>> varsForInput; std::vector<std::string> variableOrder = {"id", "signal", "mass", "min_ANNmuon", "prediction"}; varsForInput["training"].emplace_back ("prediction"); if (predictMode != EnumPredictMode::INTERMEDIATE) { varsForInput["training"].emplace_back ("id"); varsForInput["training"].emplace_back ("signal"); varsForInput["training"].emplace_back ("mass"); varsForInput["training"].emplace_back ("min_ANNmuon"); varsForInput["test"].emplace_back ("prediction"); varsForInput["test"].emplace_back ("id"); varsForInput["check_agreement"].emplace_back ("signal"); varsForInput["check_agreement"].emplace_back ("weight"); varsForInput["check_agreement"].emplace_back ("prediction"); varsForInput["check_correlation"].emplace_back ("mass"); varsForInput["check_correlation"].emplace_back ("prediction"); } std::map<std::string,std::vector<std::string>> createForInput; createForInput["training"].emplace_back ("root"); if (predictMode != EnumPredictMode::INTERMEDIATE) { createForInput["training"].emplace_back ("csv"); createForInput["test"].emplace_back ("csv"); createForInput["check_agreement"].emplace_back ("csv"); createForInput["check_correlation"].emplace_back ("csv"); } // -------- prepare the Reader ------ TMVA::Tools::Instance(); std::cout << "==> Start TMVAPredict" << std::endl; TMVA::Reader *reader = new TMVA::Reader( "!Color:!Silent" ); std::vector<Float_t> variables (variableNames.size ()); auto itVar = begin (variables); for (auto varName : variableNames) { Float_t* pVar = &(*itVar); auto localVarName = varName; localVarName.substr(0,localVarName.find(":=")); reader->AddVariable(varName.c_str(), pVar); (*itVar) = 0.0; ++itVar; } // spectators not known for the reader (in test.csv) for (auto varName : spectatorNames) { Float_t spectator (0.0); reader->AddSpectator (varName.c_str(), &spectator); ++itVar; } TString dir = "weights/"; TString prefix = "TMVAClassification"; TString weightfile = dir + prefix + TString("_") + method_name + TString(".weights.xml"); std::cout << "weightfile name : " << weightfile.Data () << std::endl; reader->BookMVA( method_name, weightfile ); // --------- for each of the input files for (auto inputName : inputNames) { // --- define variables Int_t id; Float_t prediction; Float_t weight; Float_t min_ANNmuon; Float_t mass; Float_t signal; // --- open input file TFile *input(0); std::stringstream infilename; infilename << pathToData.Data () << inputName << ".root"; std::cout << "infilename = " << infilename.str ().c_str () << std::endl; input = TFile::Open (infilename.str ().c_str ()); TTree* tree = (TTree*)input->Get("data"); // --- prepare branches on input file // id field if needed if (contains (varsForInput, inputName, "id")) tree->SetBranchAddress("id", &id); // signal field if needed if (contains (varsForInput, inputName, "signal")) tree->SetBranchAddress("signal", &signal); // min_ANNmuon field if needed if (contains (varsForInput, inputName, "min_ANNmuon")) tree->SetBranchAddress("min_ANNmuon", &min_ANNmuon); // mass field if needed if (contains (varsForInput, inputName, "mass")) tree->SetBranchAddress("mass", &mass); // weight field if needed if (contains (varsForInput, inputName, "weight")) tree->SetBranchAddress("weight", &weight); // variables for prediction itVar = begin (variables); for (auto inputName : variableNames) { Float_t* pVar = &(*itVar); tree->SetBranchAddress(inputName.c_str(), pVar); ++itVar; } // ---- make ROOT file TString rootFileName; TFile* outRootFile (NULL); TTree* outTree (NULL); if (contains (createForInput, inputName, "root")) { rootFileName = TString (inputName.c_str ()) + TString ("_prediction__") + method_name + TString (".root"); outRootFile = new TFile (rootFileName.Data (), "RECREATE"); outTree = new TTree("data","data"); if (contains (varsForInput, inputName, "id")) outTree->Branch ("id", &id, "F"); if (contains (varsForInput, inputName, "signal")) outTree->Branch ("signal", &signal, "F"); if (contains (varsForInput, inputName, "min_ANNmuon")) outTree->Branch ("min_ANNmuon", &min_ANNmuon, "F"); if (contains (varsForInput, inputName, "mass")) outTree->Branch ("mass", &mass, "F"); if (contains (varsForInput, inputName, "weight")) outTree->Branch ("weight", &weight, "F"); if (contains (varsForInput, inputName, "prediction")) outTree->Branch ("prediction", &prediction, "F"); } // ---- prepare csv file std::ofstream outfile; if (contains (createForInput, inputName, "csv")) { std::stringstream outfilename; outfilename << inputName << "_prediction__" << method_name.Data () << ".csv"; std::cout << outfilename.str () << std::endl; /* return; */ outfile.open (outfilename.str ()); bool isFirst = true; for (auto varName : variableOrder) { if (contains (varsForInput, inputName, varName)) { if (!isFirst) outfile << ","; isFirst = false; outfile << varName; } } outfile << "\n"; } bool doCSV = contains (createForInput, inputName, "csv"); bool doROOT = contains (createForInput, inputName, "root"); for (Long64_t ievt=0; ievt < tree->GetEntries(); ievt++) { tree->GetEntry(ievt); // predict prediction = reader->EvaluateMVA (method_name); prediction = std::max<double> (0.0, std::min<double> (1.0, prediction)); //prediction = (prediction + 1.0)/2.0; if (doCSV) { for (auto varName : variableOrder) { if (varName == "id" && contains (varsForInput, inputName, "id")) outfile << id << ","; if (varName == "signal" && contains (varsForInput, inputName, "signal")) outfile << signal << ","; if (varName == "min_ANNmuon" && contains (varsForInput, inputName, "min_ANNmuon")) outfile << min_ANNmuon << ","; if (varName == "mass" && contains (varsForInput, inputName, "mass")) outfile << mass << ","; if (varName == "weight" && contains (varsForInput, inputName, "weight")) outfile << weight << ","; if (varName == "prediction" && contains (varsForInput, inputName, "prediction")) outfile << prediction; } outfile << "\n"; } if (doROOT) { outTree->Fill (); } } outfile.close(); input->Close(); if (doROOT) { outRootFile->Write (); } if (predictMode == EnumPredictMode::INTERMEDIATE) { delete reader; std::cout << "DONE predict INTERMEDIATE" << std::endl; return rootFileName; } } delete reader; if (predictMode == EnumPredictMode::FINAL) { std::cout << "DONE predict FINAL" << std::endl; TString cmd (".! python tests.py "); cmd += method_name; gROOT->ProcessLine (cmd); } return method_name; }
void createCDF () { std::cout << "==> create CDF" << std::endl; std::vector<std::string> inputNames = {"training"}; for (auto inputName : inputNames) { std::stringstream outfilename; outfilename << inputName << "_cdf__" << inputName << ".root"; std::cout << outfilename.str () << std::endl; /* return; */ std::stringstream infilename; infilename << pathToData.Data () << inputName << ".root"; TFile *input(0); std::cout << "infilename = " << infilename.str ().c_str () << std::endl; input = TFile::Open (infilename.str ().c_str ()); TTree* tree = (TTree*)input->Get("data"); // variables for prediction std::cout << "prepare variables" << std::endl; auto localVariableNames = variableNames+additionalVariableNames; std::vector<Float_t> variables (localVariableNames.size ()); auto itVar = begin (variables); for (auto inputName : localVariableNames) { Float_t* pVar = &(*itVar); tree->SetBranchAddress(inputName.c_str(), pVar); ++itVar; } Int_t id; // id field tree->SetBranchAddress("id", &id); Long64_t ievtEnd = tree->GetEntries (); ievtEnd = 100; std::cout << "process entries #" << ievtEnd << std::endl; std::vector<double> sumSmaller (ievtEnd, 0.0); struct Vars { typedef std::vector<Float_t>::const_iterator iterator; Vars (iterator itBegin, iterator itEnd, Float_t _weight, Int_t _id, Long64_t _order) : variables (itBegin, itEnd) , weight (_weight) , id (_id) , order (_order) { } std::vector<Float_t> variables; Int_t id; Float_t weight; Float_t cdf; Long64_t order; bool operator< (const Vars& other) const { for (auto itOther = begin (other.variables), it = begin (variables), itOtherEnd = end (other.variables), itEnd = end (variables); it != itEnd && itOther != itOtherEnd; ++itOther, ++it) { //std::cout << "(" << *it << "," << *itOther << ")" << std::flush; if (*it >= *itOther) { // std::cout << "X" << std::flush; return false; } else std::cout << "D" << std::flush; } std::cout << "U" << std::flush; return true; } }; Float_t weightSum (0.0); std::vector<Vars> vars; for (Long64_t ievt=0; ievt < ievtEnd; ievt++) { tree->GetEntry (ievt); std::cout << "." << std::flush; Float_t weight = 1.0; vars.emplace_back (begin (variables), end (variables), weight, id, ievt); weightSum += weight; } std::cout << "provide values" << std::endl; for (auto it = begin (vars), itEnd = end (vars); it != itEnd; ++it) { std::cout << "-" << std::flush; for (auto itCmp = begin (vars), itCmpEnd = end (vars); itCmp != itCmpEnd; ++itCmp) { if (*it < *itCmp) { std::cout << "!" << std::flush; break; } else { std::cout << "+" << std::flush; (*it).cdf += (*itCmp).weight; } } } std::cout << "normalize" << std::endl; for_each (begin (vars), end (vars), [weightSum](Vars& v) { v.cdf /= weightSum; }); // sort by order std::sort (begin (vars), end (vars), [](const Vars& lhs, const Vars& rhs){ return lhs.order < rhs.order; }); input->Close(); std::cout << "store data" << std::endl; TFile* outFile = new TFile (outfilename.str ().c_str (), "RECREATE"); TTree* outTree = new TTree("cdf_raw","cdf_raw"); Float_t cdf (0.0); outTree->Branch ("id", &id, "F"); outTree->Branch ("cdf", &cdf, "F"); for (auto v : vars) { id = v.id; cdf = v.cdf; outTree->Fill (); } outFile->Write (); outFile->Close (); } }
TString useAutoencoder (TString method_name) { TMVA::Tools::Instance(); std::cout << "==> Start useAutoencoder" << std::endl; TMVA::Reader *reader = new TMVA::Reader( "!Color:!Silent" ); Float_t signal = 0.0; Float_t outSignal = 0.0; Float_t inSignal = 0.0; std::vector<std::string> localVariableNames (variableNames+additionalVariableNames); std::vector<Float_t> variables (localVariableNames.size ()); auto itVar = begin (variables); for (auto varName : localVariableNames) { Float_t* pVar = &(*itVar); reader->AddVariable(varName.c_str(), pVar); (*itVar) = 0.0; ++itVar; } int idxSignal = std::distance (localVariableNames.begin (), std::find (localVariableNames.begin (), localVariableNames.end (),std::string ("signal"))); TString dir = "weights/"; TString prefix = "TMVAAutoencoder"; TString weightfile = dir + prefix + TString("_") + method_name + TString(".weights.xml"); TString outPrefix = "transformed"; TString outfilename = pathToData + outPrefix + TString("_") + method_name + TString(".root"); reader->BookMVA( method_name, weightfile ); TFile* outFile = new TFile (outfilename.Data (), "RECREATE"); std::vector<std::string> inputNames = {"training"}; std::map<std::string,std::vector<std::string>> varsForInput; varsForInput["training"].emplace_back ("id"); varsForInput["training"].emplace_back ("signal"); for (auto inputName : inputNames) { std::stringstream outfilename; outfilename << inputName << "_transformed__" << method_name.Data () << ".root"; std::cout << outfilename.str () << std::endl; /* return; */ std::stringstream infilename; infilename << pathToData.Data () << inputName << ".root"; TTree* outTree = new TTree("transformed","transformed"); std::vector<Float_t> outVariables (localVariableNames.size ()); itVar = begin (variables); auto itOutVar = begin (outVariables); for (auto varName : localVariableNames) { Float_t* pOutVar = &(*itOutVar); outTree->Branch (varName.c_str (), pOutVar, "F"); (*itOutVar) = 0.0; ++itOutVar; Float_t* pVar = &(*itVar); std::stringstream svar; svar << varName << "_in"; outTree->Branch (svar.str ().c_str (), pVar, "F"); (*itVar) = 0.0; ++itVar; } Float_t signal_original = 0.0; outTree->Branch ("signal_original", &signal_original, "F"); TFile *input(0); std::cout << "infilename = " << infilename.str ().c_str () << std::endl; input = TFile::Open (infilename.str ().c_str ()); TTree* tree = (TTree*)input->Get("data"); Int_t ids; // id field if needed if (std::find (varsForInput[inputName].begin (), varsForInput[inputName].end (), "id") != varsForInput[inputName].end ()) tree->SetBranchAddress("id", &ids); // variables for prediction itVar = begin (variables); for (auto inputName : localVariableNames) { Float_t* pVar = &(*itVar); tree->SetBranchAddress (inputName.c_str(), pVar); ++itVar; } for (Long64_t ievt=0; ievt < tree->GetEntries(); ievt++) { tree->GetEntry(ievt); // predict signal_original = variables.at (idxSignal); for (int forcedSignal = 0; forcedSignal <= 1; ++forcedSignal) { variables.at (idxSignal) = forcedSignal; std::vector<Float_t> regressionValues = reader->EvaluateRegression (method_name); size_t idx = 0; for (auto it = std::begin (regressionValues), itEnd = std::end (regressionValues); it != itEnd; ++it) { outVariables.at (idx) = *it; ++idx; } outTree->Fill (); } } outFile->Write (); input->Close(); } delete reader; return outfilename; }
QStringList pEnvironmentVariablesManager::variables( bool keepDisabled ) const { return pEnvironmentVariablesModel::variablesToStringList( variables(), keepDisabled ); }
ProblemInstance::ProblemInstance (RPHandle hndl, SharedPtr<ReaderPluginFunctionTable> functions) : handle(hndl), ftable(functions) { int i; m_variables = ftable->variables(handle); m_constraints = ftable->constraints(handle); m_objectives = ftable->objectives(handle); m_variableTypes = new char[variables()+2*constraints()+2*objectives()]; m_functionTypes = &m_variableTypes[variables()]; m_constraintTypes = &m_functionTypes[constraints()+objectives()]; m_objectiveTypes = &m_constraintTypes[constraints()]; for (i = 0; i < variables(); ++i) { m_variableTypes[i] = ftable->variableType(handle, i); } for (i = 0; i < constraints(); ++i) { m_functionTypes[i] = ftable->functionType(handle, 'c', i); } for (i = 0; i < objectives(); ++i) { m_functionTypes[constraints()+i] = ftable->functionType(handle, 'o', i); } for (i = 0; i < constraints(); ++i) { m_constraintTypes[i] = ftable->constraintType(handle, i); } for (i = 0; i < objectives(); ++i) { m_objectiveTypes[i] = ftable->objectiveType(handle, i); } int* tmp = new int[std::max(objectives()+constraints()+2, variables())]; m_presenceIndexes = new unsigned[2*variables()+constraints()+objectives()]; for (i = 0; i < variables(); ++i) { ftable->variablePresence(handle, i, tmp, &tmp[constraints()]); m_presences.reserve(m_presences.size()+2+tmp[0]+tmp[constraints()]); varConstrPresenceIndex(i) = m_presences.size(); m_presences.insert(m_presences.end(), tmp, &tmp[tmp[0]+1]); varObjPresenceIndex(i) = m_presences.size(); m_presences.insert(m_presences.end(), &tmp[constraints()], &tmp[constraints()+tmp[constraints()]+1]); } m_variableCounts = new int[4*(constraints()+objectives())]; std::fill(m_variableCounts, &m_variableCounts[4*(constraints()+objectives())], 0); for (i = 0; i < constraints(); ++i) { int vars = ftable->constraintVariables(handle, i, tmp); m_presences.reserve(m_presences.size()+1+vars); constraintPresenceIndex(i) = m_presences.size(); m_presences.push_back(vars); m_presences.insert(m_presences.end(), tmp, &tmp[vars]); unsigned offset = 4*i; m_variableCounts[offset] = vars; for (int j = 0; j < vars; ++j) { switch (variableType(tmp[j])) { case 'r': ++m_variableCounts[offset+1]; break; case 'b': ++m_variableCounts[offset+2]; break; case 'i': ++m_variableCounts[offset+3]; break; } } } for (i = 0; i < objectives(); ++i) { int vars = ftable->objectiveVariables(handle, i, tmp); m_presences.reserve(m_presences.size()+1+vars); objectivePresenceIndex(i) = m_presences.size(); m_presences.push_back(vars); m_presences.insert(m_presences.end(), tmp, &tmp[vars]); unsigned offset = 4*(constraints()+i); m_variableCounts[offset] = vars; for (int j = 0; j < vars; ++j) { switch (variableType(tmp[j])) { case 'r': ++m_variableCounts[offset+1]; break; case 'b': ++m_variableCounts[offset+2]; break; case 'i': ++m_variableCounts[offset+3]; break; } } } delete [] tmp; m_varBounds = new Real[2*(variables()+constraints())]; m_constrBounds = &m_varBounds[2*variables()]; for (i = 0; i < variables(); ++i) { ftable->variableBounds(handle, i, &m_varBounds[2*i], &m_varBounds[2*i+1]); } for (i = 0; i < constraints(); ++i) { ftable->constraintBounds(handle, i, &m_constrBounds[2*i], &m_constrBounds[2*i+1]); switch (constraintType(i)) { case 'l': m_constrBounds[2*i] = -std::numeric_limits<Real>::infinity(); break; case 'g': m_constrBounds[2*i+1] = std::numeric_limits<Real>::infinity(); break; case 'e': m_constrBounds[2*i] = m_constrBounds[2*i+1]; break; case 'u': m_constrBounds[2*i] = -std::numeric_limits<Real>::infinity(); m_constrBounds[2*i+1] = std::numeric_limits<Real>::infinity(); break; default: break; } } latestErrorType = 0; }