void CCMSDIntegrator::ParseCMSD(std::string filename) { CMSD::CCMSD doc = CMSD::CCMSD::LoadFromFile(filename); doc.SaveToFile((::ExeDirectory() + "Test1.xml").c_str(), true); //CMSD::CCMSD::DataSection d = doc.DataSection().first(); CMSD::CwhiteSpaceType root = doc.whiteSpace.first(); CMSD::CCMSDDocument cmsddocument = doc.CMSDDocument[0]; CMSD::CDataSectionType2 data = cmsddocument.DataSection[0]; for(int i=0; i< data.PartType.count() ; i++) { Part * apart ( (Part*) Part().CreateSave<Part>()); apart->Load(data.PartType[i].GetNode()); //std::vector<IObjectPtr> &someparts ( apart->objects()); //Part * part2=(Part *) someparts[0].get(); } for(int i=0; i< data.ProcessPlan.count() ; i++) { ProcessPlan * aplan ( (ProcessPlan *) IObject::CreateSave<ProcessPlan>()); aplan->Load(data.ProcessPlan[i].GetNode()); } for(int i=0; i< data.Resource.count() ; i++) { if(Cell::IsResourceCell(data.Resource[i].GetNode())) { Cell * acell( (Cell *) IObject::CreateSave<Cell>()); acell->Load(data.Resource[i].GetNode()); } else { Resource * aresource ((Resource *) IObject::CreateSave<Resource>()); aresource->Load(data.Resource[i].GetNode()); } } for(int i=0; i< data.Job.count() ; i++) { Job * ajob ( IObject::CreateSave<Job>() ); ajob->Load(data.Job[i].GetNode()); } for(int i=0; i< data.DistributionDefinition.count() ; i++) { Distribution * astat ( (Distribution *) IObject::CreateSave<Distribution>() ); astat->LoadDefinition(data.DistributionDefinition[i].GetNode()); } for(int i=0; i< data.Calendar.count() ; i++) { Calendar * calendar ( (Calendar *) IObject::CreateSave<Calendar>()); calendar->Load(data.Calendar[i].GetNode()); } for(int i=0; i< data.Layout.count() ; i++) { Layout * layout ((Layout *) IObject::CreateSave<Layout>()); layout->Load(data.Layout[i].GetNode()); } //CMSD::CInventoryItem inv = data.InventoryItem[0]; //inv.Location int j=0; }
Distribution combine(Distribution const& c, InfoMap const& info) { std::map<Cell, Value> tmp; for (size_t i = 0; i < c.size(); ++i) { Distribution const& w = info(c.at(i).first)->weights; Value const f = c.at(i).second; for (size_t j = 0; j < w.size(); ++j) { Cell const v = w.at(j).first; if (tmp.count(v) == 0) tmp[v] = 0; tmp[v] += f * w.at(j).second; } } Distribution result(tmp.size()); std::map<Cell, Value>::const_iterator iter = tmp.begin(); for (size_t i = 0; i < tmp.size(); ++i, ++iter) result[i] = std::make_pair(iter->first, iter->second); return result; }
void Resource::Save(CMSD::CResource& resource) { resource.Identifier.append() = std::string((LPCSTR) identifier); CREATEIF(resource.Name.append() , name); CREATEIF(resource.ResourceType.append() , type); CREATEIF(resource.Description.append(), description); //PropertyElement(L"Capacity", capacity).Save(resource.Property.append()); //PropertyElement(L"Manufacturer", manufacturer).Save(resource.Property.append()); //PropertyElement(L"Serial_number", serial_number).Save(resource.Property.append()); //for(int i=0; i< simpleproperties.size(); i++) //{ // simpleproperties[i].Save(resource.Property.append()); //} PropertyElement().SaveProperties<CMSD::CResource>(resource, properties); Distribution* mtbfdist = CCMSDIntegrator::FindDistributionById(mtbfid); Distribution* mttrdist = CCMSDIntegrator::FindDistributionById(mttrid); if(mtbfdist!=NULL)// !mtbf.IsEmpty()) mtbf->Save(resource.Property.append()); if(mttrdist!=NULL) mttrdist->Save(resource.Property.append()); }
void ColorizeByQuality(uintptr_t meshptr, bool vertexQuality, float qMin, float qMax, float perc, bool zerosym, int colorMap) { MyMesh &m = *((MyMesh*) meshptr); bool usePerc = (perc > 0); Distribution<float> H; if(vertexQuality) tri::Stat<MyMesh>::ComputePerVertexQualityDistribution(m, H); else tri::Stat<MyMesh>::ComputePerFaceQualityDistribution(m, H); float percLo = H.Percentile(perc/100.0f); float percHi = H.Percentile(1.0f - (perc/100.0f)); if (qMin == qMax) { std::pair<float, float> minmax; if(vertexQuality) minmax = tri::Stat<MyMesh>::ComputePerVertexQualityMinMax(m); else minmax = tri::Stat<MyMesh>::ComputePerFaceQualityMinMax(m); qMin = minmax.first; qMax = minmax.second; } if (zerosym) { qMin = std::min(qMin, -math::Abs(qMax)); qMax = -qMin; percLo = std::min(percLo, -math::Abs(percHi)); percHi = -percLo; } if (usePerc) { qMin = percLo; qMax = percHi; printf("Used (%f %f) percentile (%f %f)\n", percLo, percHi, perc, (100.0f-perc)); } printf("Quality Range: %f %f; Used (%f %f)\n", H.Min(), H.Max(), qMin, qMax); switch (colorMap) { case 0: if(vertexQuality) tri::UpdateColor<MyMesh>::PerVertexQualityRamp(m, qMin, qMax); else tri::UpdateColor<MyMesh>::PerFaceQualityRamp(m, qMin, qMax); break; case 1: if(vertexQuality) tri::UpdateColor<MyMesh>::PerVertexQualityGray(m, qMin, qMax); else tri::UpdateColor<MyMesh>::PerFaceQualityGray(m, qMin, qMax); break; case 2: if(vertexQuality) tri::UpdateColor<MyMesh>::PerVertexQualityRamp(m, qMin, qMax); else tri::UpdateColor<MyMesh>::PerFaceQualityRamp(m, qMin, qMax); break; default: assert(0); } }
//Set the app by the current distro void GeneralModule::setByDistribution() { Distribution dist; distroLogoButton->setIcon(QIcon(":/resources/distributions/" + dist.name().toLower() + "-icon.png").pixmap(128, 128)); distroNameLabel->setText("<h1>" + dist.name() + "</h1>" + " " + dist.version() + " " + dist.codename()); kernelLabel->setText("<b>" + trUtf8("Linux Kernel") + "</b> " + dist.kernel()); }
void Calculator::generate_sample(params & par, std::vector<double> &rez) { switch(par.distr) { case NORMAL: Distribution< boost::normal_distribution<>, double > dis; dis.generate_sample(par.n, rez); break; } }
void print(Distribution values){ for (Distribution::iterator it=values.begin();it!=values.end();it++){ cout << "(" << it->first.first << "," << it->first.second << "):" << endl; cout << "vector1:" << endl; int size1 = it->second.first.size(); for (int i=0;i<size1;i++){cout << it->second.first[i] << endl;} cout << "vector2:" << endl; int size2 = it->second.second.size(); for (int i=0;i<size2;i++){cout << it->second.second[i] << endl;} } }
Distribution* CCMSDIntegrator::FindDistributionById(bstr_t id) { Distribution * dist = IObject::Create<Distribution>() ; for(int i=0; i< dist->objects().size(); i++) { Distribution* distribution ( (Distribution *) dist->objects()[i].get()); if(distribution->identifier==id) return distribution; } return NULL; }
void StatTest() { Distribution stat, stat2,stat3,stat4; stat.SetParameters(_T("normal"), 10, 5); stat2.SetParameters(_T("uniform"), 5, 10); stat3.SetParameters(_T("exponential"), 5, 10,18); stat4.SetParameters(_T("weibull"),1,5); // gamma, k std::vector<double> normdata,unidata,expdata,weibdata; for(int i=0; i< 10000; i++) { normdata.push_back(stat.RandomVariable()); unidata.push_back(stat2.RandomVariable()); expdata.push_back(stat3.RandomVariable()); weibdata.push_back(stat4.RandomVariable()); } std::string results; StatFitting statfit; statfit.EstimateAll(normdata); results=statfit.ToString(); OutputDebugString(results.c_str()); StatFitting statfit1; statfit1.EstimateAll(unidata); results=statfit1.ToString(); OutputDebugString(results.c_str()); StatFitting statfit2; statfit2.EstimateAll(expdata); OutputDebugString(statfit2.ToString().c_str()); StatFitting statfitW; double a,b; std::vector<double> T = TokenList<double>("16,34,53,75,93,120", ","); // vlist_of<double>( 16 )( 34)( 53)( 75)( 93)( 120 ); statfitW.ComputeWeibull( T, a, b); //http://home.comcast.net/~pstlarry/BaikoMan.htm StatFitting statfit3; //std::vector<double> weibulldata = data.GetData("D:\\Program Files\\NIST\\proj\\DES\\SimulationModel\\CapacityCalculator\\Data\\WeibullTestDat1.txt"); //statfit3.EstimateAll(weibdata); Distribution dist = statfit3.BestFit(weibdata); OutputDebugString(dist.ToString().c_str()); }
double InfoGainSplitCrit::splitCritValue(Distribution &bags, double totalNoInst, double oldEnt) const { double numerator, noUnknown, unknownRate; noUnknown = totalNoInst - bags.total(); unknownRate = noUnknown / totalNoInst; numerator = (oldEnt - newEnt(bags)); numerator = (1 - unknownRate) * numerator; // Splits with no gain are useless. if (Utils::eq(numerator, 0)) { return 0; } return numerator / bags.total(); }
SUMOReal NIVissimEdge::getRealSpeed(/* NBDistribution &dc */ int distNo) { std::string id = toString<int>(distNo); Distribution* dist = NBDistribution::dictionary("speed", id); if (dist == 0) { WRITE_WARNING("The referenced speed distribution '" + id + "' is not known."); return -1; } assert(dist != 0); SUMOReal speed = dist->getMax(); if (speed < 0 || speed > 1000) { WRITE_WARNING("What about distribution '" + toString<int>(distNo) + "' "); } return speed; }
double NV_EM_log_likelihood::operator()( const Matrix& data, const ProbTable& theta, const Distribution& pY, const CondProbTable& pXY ) const { double result = 0.0; unsigned N = utility::nrows(data); for ( unsigned i = 0; i < N; ++i ) { const std::vector<int>& X = data[i]; unsigned K = pY.size(), P = X.size(); for ( unsigned y = 0; y < K; ++y ) { double llh_y = m_log(pY[y]); for ( int p = 0; p < P; ++p ) { int x = X[p]; if (pXY[y][p][x]) { llh_y += m_log( pXY[y][p][x] ); } } llh_y *= theta[i][y]; result += llh_y; } } // printf("result: %f\n", result ); return result; }
HMM<Distribution>::HMM(const size_t states, const Distribution emissions, const double tolerance) : transition(arma::ones<arma::mat>(states, states) / (double) states), emission(states, /* default distribution */ emissions), dimensionality(emissions.Dimensionality()), tolerance(tolerance) { /* nothing to do */ }
void ClampVertexQuality(uintptr_t meshptr, float qMin, float qMax, float perc, bool zerosym) { MyMesh &m = *((MyMesh*) meshptr); bool usePerc = (perc > 0); Distribution<float> H; tri::Stat<MyMesh>::ComputePerVertexQualityDistribution(m, H); float percLo = H.Percentile(perc/100.0f); float percHi = H.Percentile(1.0f - (perc/100.0f)); if (qMin == qMax) { std::pair<float, float> minmax = tri::Stat<MyMesh>::ComputePerVertexQualityMinMax(m); qMin = minmax.first; qMax = minmax.second; } if (zerosym) { qMin = std::min(qMin, -math::Abs(qMax)); qMax = -qMin; percLo = std::min(percLo, -math::Abs(percHi)); percHi = -percLo; } if (usePerc) { tri::UpdateQuality<MyMesh>::VertexClamp(m, percLo, percHi); printf("Quality Range: %f %f; Used (%f %f) percentile (%f %f)\n", H.Min(), H.Max(), percLo, percHi, perc, (100.0f-perc)); } else { tri::UpdateQuality<MyMesh>::VertexClamp(m, qMin, qMax); printf("Quality Range: %f %f; Used (%f %f)\n", H.Min(), H.Max(), qMin, qMax); } }
SUMOReal NIVissimDistrictConnection::getRealSpeed(/*NBDistribution &dc, */int distNo) const { std::string id = toString<int>(distNo); Distribution* dist = NBDistribution::dictionary("speed", id); if (dist == 0) { WRITE_WARNING("The referenced speed distribution '" + id + "' is not known."); WRITE_WARNING(". Using default."); return OptionsCont::getOptions().getFloat("vissim.default-speed"); } assert(dist != 0); SUMOReal speed = dist->getMax(); if (speed < 0 || speed > 1000) { WRITE_WARNING(" False speed at district '" + id); WRITE_WARNING(". Using default."); speed = OptionsCont::getOptions().getFloat("vissim.default-speed"); } return speed; }
void fill_matrix(int rank, Distribution &ds, Distribution &block_ds) { // set B columns int n_cols_B=block_ds.size(); std::vector<PetscInt> b_cols(n_cols_B); for( unsigned int p=0;p<block_ds.np();p++) for (unsigned int j=block_ds.begin(p); j<block_ds.end(p); j++) { //int proc=block_ds.get_proc(j); b_cols[j]=ds.end(p)+j; } // create block A of matrix int local_idx=0; for (unsigned int i = block_ds.begin(); i < block_ds.end(); i++) { // make random block values std::vector<PetscScalar> a_vals(block_size * block_size, 0); for (unsigned int j=0; j<block_size; j++) a_vals[ j + j*block_size ]= (rank + 2); // set rows and columns indices std::vector<PetscInt> a_rows(block_size); for (unsigned int j=0; j<block_size; j++) { a_rows[j]=ds.begin() + block_ds.begin() + local_idx; local_idx++; } mat_set_values(block_size, &a_rows[0], block_size, &a_rows[0], &a_vals[0]); // set B values std::vector<PetscScalar> b_vals(block_size*n_cols_B); for (int j=0; j<block_size*n_cols_B; j++) b_vals[j] = 1; // set C values std::vector<PetscScalar> c_vals(n_cols_B); for (int j=0; j<n_cols_B; j++) c_vals[j] = 0; // must iterate per rows to get correct transpose for(unsigned int row=0; row<block_size;row++) { mat_set_values(1, &a_rows[row], 1, &b_cols[rank], &b_vals[row*n_cols_B]); mat_set_values(1, &b_cols[rank],1, &a_rows[row], &b_vals[row*n_cols_B]); } mat_set_values(1, &b_cols[rank], 1, &b_cols[rank], &c_vals[rank]); } }
//-------------------------------------------------------------------------- Distribution LossDistMonteCarlo::operator()(const vector<Real>& nominals, const vector<Real>& probabilities) const { //-------------------------------------------------------------------------- Distribution dist (nBuckets_, 0.0, maximum_); // KnuthUniformRng rng(seed_); // LecuyerUniformRng rng; MersenneTwisterUniformRng rng; for (Size i = 0; i < simulations_; i++) { double e = 0; for (Size j = 0; j < nominals.size(); j++) { Real r = rng.next().value; if (r <= probabilities[j]) e += nominals[j]; } dist.add (e + epsilon_); } dist.normalize(); return dist; }
//-------------------------------------------------------------------------- Distribution LossDistBinomial::operator()(Size n, Real volume, Real probability) const { //-------------------------------------------------------------------------- n_ = n; probability_.clear(); probability_.resize(n_+1, 0.0); Distribution dist (nBuckets_, 0.0, maximum_); BinomialDistribution binomial (probability, n); for (Size i = 0; i <= n; i++) { if (volume_ * i <= maximum_) { probability_[i] = binomial(i); Size bucket = dist.locate(volume * i); dist.addDensity (bucket, probability_[i] / dist.dx(bucket)); dist.addAverage (bucket, volume * i); } } excessProbability_.clear(); excessProbability_.resize(n_+1, 0.0); excessProbability_[n_] = probability_[n_]; for (int k = n_-1; k >= 0; k--) excessProbability_[k] = excessProbability_[k+1] + probability_[k]; dist.normalize(); return dist; }
void Predictor::sampleFromGaussian(Distribution& d, int num_particles, Particle mean, Particle variance, float sigma){ Particle p; Particle epsilon; d.push_back(mean); for(int i=1; i<num_particles; i++){ // particle to sample from p = mean; // move function (gaussian noise) epsilon = genNoise(sigma, variance); // apply movement p.Translate(epsilon.t); p.RotateAxis(epsilon.r); p.Translate( m_cam_view.x * epsilon.z, m_cam_view.y * epsilon.z, m_cam_view.z * epsilon.z); // add particle to distribution d.push_back(p); } }
void Resource::Load(MSXML2::IXMLDOMNodePtr ini) { CMSD::CResource resource = ini; ASSIGN(name ,((std::string) resource.Name[0]).c_str(), L"None"); ASSIGN(identifier ,((std::string) resource.Identifier[0]).c_str(), L"None"); ASSIGN(type ,((std::string) resource.ResourceType[0]).c_str(), L"None"); ASSIGN(description ,((std::string) resource.Description[0]).c_str(), L"None"); ASSIGN(hourlyRate , resource.HourlyRate[0].Value2[0].GetNode()->text, L"None"); ASSIGN(hourlyRateUnit ,((std::string) resource.HourlyRate[0].Unit[0]).c_str(), L"None"); // These are properties // capacity = CCMSDIntegrator::GetProperty(ini, bstr_t(L"Capacity"), bstr_t(L"1")); // manufacturer = CCMSDIntegrator::GetProperty(ini, bstr_t(L"Manufacturer"), bstr_t(L"Acme")); // serial_number = CCMSDIntegrator::GetProperty(ini, bstr_t(L"SerialNumber"), bstr_t(L"Acme")); PropertyElement().LoadProperties<CMSD::CResource>(resource, properties); for(int i=0; i< resource.Property.count(); i++) { if( resource.Property[i].Name[0].GetNode()->text == bstr_t("MTBF:Measured")) { Distribution * astat ( (Distribution *) IObject::CreateSave<Distribution>() ); astat->LoadProperty(resource.Property[i].GetNode()); mtbfid= astat->identifier= this->identifier + "MTBF:Measured"; this->mtbf=astat; } else if( resource.Property[i].Name[0].GetNode()->text == bstr_t("MTTR:Measured")) { Distribution * astat ( (Distribution *) IObject::CreateSave<Distribution>() ); astat->LoadProperty(resource.Property[i].GetNode()); mttrid= astat->identifier= this->identifier + "MTTR:Measured"; this->mttr=astat; } } }
Distribution HomogeneousPoolLossModel<CP>::lossDistrib( const Date& d) const { LossDistHomogeneous bucktLDistBuff(nBuckets_, detachAmount_); std::vector<Real> lgd;// switch to a mutable cache member std::vector<Real> recoveries = copula_->recoveries(); std::transform(recoveries.begin(), recoveries.end(), std::back_inserter(lgd), std::bind1st(std::minus<Real>(), 1.)); std::transform(lgd.begin(), lgd.end(), notionals_.begin(), lgd.begin(), std::multiplies<Real>()); std::vector<Real> prob = basket_->remainingProbabilities(d); for(Size iName=0; iName<prob.size(); iName++) prob[iName] = copula_->inverseCumulativeY(prob[iName], iName); // integrate locally (1 factor). // use explicitly a 1D latent model object? Distribution dist(nBuckets_, 0.0, detachAmount_); //notional_); std::vector<Real> mkft(1, min_ + delta_ /2.); for (Size i = 0; i < nSteps_; i++) { std::vector<Real> conditionalProbs; for(Size iName=0; iName<notionals_.size(); iName++) conditionalProbs.push_back( copula_->conditionalDefaultProbabilityInvP(prob[iName], iName, mkft)); Distribution d = bucktLDistBuff(lgd, conditionalProbs); Real densitydm = delta_ * copula_->density(mkft); // also, instead of calling the static method it could be wrapped // through an inlined call in the latent model for (Size j = 0; j < nBuckets_; j++) dist.addDensity(j, d.density(j) * densitydm); mkft[0] += delta_; } return dist; }
double InfoGainSplitCrit::splitCritValue(Distribution &bags) const { double numerator; numerator = oldEnt(bags) - newEnt(bags); // Splits with no gain are useless. if (Utils::eq(numerator, 0)) { return std::numeric_limits<double>::max(); } // We take the reciprocal value because we want to minimize the // splitting criterion's value. return bags.total() / numerator; }
void from_distribution(const Distribution& distribution, const int& new_size) { // we first create a local array to sample to. this way, if this // is passed as an argument the locations and pmf are not overwritten // while sampling LocationArray new_locations(new_size); for(int i = 0; i < new_size; i++) { new_locations[i] = distribution.sample(); } set_uniform(new_size); locations_ = new_locations; }
double NV_EM_log_likelihood::likelihood( const Matrix& data, const unsigned i, const ProbTable& theta, const Distribution& pY, const CondProbTable& pXY) const { double llh = 0.0; const std::vector<int>& X = data[i]; unsigned K = pY.size(), P = X.size(); for ( unsigned y = 0; y < K; ++y ) { double llh_y = m_log(pY[y]); for ( int p = 0; p < P; ++p ) { int x = X[p]; llh_y += m_log( pXY[y][p][x] ); } llh_y *= theta[i][y]; } return llh; }
Asset::Asset(double coeffM, double weight, double recoveryRate, const std::vector<double>& defaultProba, const Distribution& distrib) : _coeffM(coeffM), _weight(weight), _recoveryRate(recoveryRate), _coeffX(sqrt(1 - pow(coeffM, 2))) { assert(std::is_sorted(defaultProba.begin(), defaultProba.end())); assert(_recoveryRate >= 0 && _recoveryRate <= 1); assert(_coeffM >= 0 && _coeffM <= 1); for (auto proba : defaultProba) { assert (proba >= 0 && proba <= 1); //specific treatment of 0 and 1 to prevent error in inversion if (proba > 0 && proba < 1){ double quantile = distrib.inverse_cumulative(proba); _defaultQuantiles.push_back(quantile); } else if (proba == 0) { _defaultQuantiles.push_back(std::numeric_limits<double>::lowest()); } else { _defaultQuantiles.push_back(std::numeric_limits<double>::max()); } } }
void StatFitting::ksone(std::vector<double> data,Distribution & dist, double *d, double *prob) { unsigned long n=data.size()-1; unsigned long j; double dt,en,ff,fn,fo=0.0; std::sort(data.begin(), data.end()); en=n; *d=0.0; for (j=1; j<=n; j++) { fn=j/en; ff= dist.cdf(data[j]); //ff=(*func)(data[j]); dt=FMAX(fabs(fo-ff),fabs(fn-ff)); if (dt > *d) *d=dt; fo=fn; } en=sqrt(en); *prob=probks((en+0.12+0.11/en)*(*d)); }
//------------------------------------------------------------------------- Distribution ManipulateDistribution::convolve (const Distribution& d1, const Distribution& d2) { //------------------------------------------------------------------------- // force equal constant bucket sizes QL_REQUIRE (d1.dx_[0] == d2.dx_[0], "bucket sizes differ in d1 and d2"); for (Size i = 1; i < d1.size(); i++) QL_REQUIRE (d1.dx_[i] == d1.dx_[i-1], "bucket size varies in d1"); for (Size i = 1; i < d2.size(); i++) QL_REQUIRE (d2.dx_[i] == d2.dx_[i-1], "bucket size varies in d2"); // force offset 0 QL_REQUIRE (d1.xmin_ == 0.0 && d2.xmin_ == 0.0, "distributions offset larger than 0"); Distribution dist(d1.size() + d2.size() - 1, 0.0, // assuming both distributions have xmin = 0 d1.xmax_ + d2.xmax_); for (Size i1 = 0; i1 < d1.size(); i1++) { Real dx = d1.dx_[i1]; for (Size i2 = 0; i2 < d2.size(); i2++) dist.density_[i1+i2] = d1.density_[i1] * d2.density_[i2] * dx; } // update cumulated and excess dist.excessProbability_[0] = 1.0; for (Size i = 0; i < dist.size(); i++) { dist.cumulativeDensity_[i] = dist.density_[i] * dist.dx_[i]; if (i > 0) { dist.cumulativeDensity_[i] += dist.cumulativeDensity_[i-1]; dist.excessProbability_[i] = dist.excessProbability_[i-1] - dist.density_[i-1] * dist.dx_[i-1]; } } return dist; }
//-------------------------------------------------------------------------- Distribution LossDistHomogeneous::operator()(Real volume, const vector<Real>& p) const { //-------------------------------------------------------------------------- volume_ = volume; n_ = p.size(); probability_.clear(); probability_.resize(n_+1, 0.0); vector<Real> prev; probability_[0] = 1.0; for (Size k = 0; k < n_; k++) { prev = probability_; probability_[0] = prev[0] * (1.0 - p[k]); for (Size i = 1; i <= k; i++) probability_[i] = prev[i-1] * p[k] + prev[i] * (1.0 - p[k]); probability_[k+1] = prev[k] * p[k]; } excessProbability_.clear(); excessProbability_.resize(n_+1, 0.0); excessProbability_[n_] = probability_[n_]; for (int k = n_ - 1; k >= 0; k--) excessProbability_[k] = excessProbability_[k+1] + probability_[k]; Distribution dist (nBuckets_, 0.0, maximum_); for (Size i = 0; i <= n_; i++) { if (volume * i <= maximum_) { Size bucket = dist.locate(volume * i); dist.addDensity (bucket, probability_[i] / dist.dx(bucket)); dist.addAverage (bucket, volume*i); } } dist.normalize(); return dist; }
// Core Function doing the actual mesh processing. bool FilterMeasurePlugin::applyFilter( const QString& filterName,MeshDocument& md,EnvWrap& env, vcg::CallBackPos * /*cb*/ ) { if (filterName == "Compute Topological Measures") { CMeshO &m=md.mm()->cm; tri::Allocator<CMeshO>::CompactFaceVector(m); tri::Allocator<CMeshO>::CompactVertexVector(m); md.mm()->updateDataMask(MeshModel::MM_FACEFACETOPO); md.mm()->updateDataMask(MeshModel::MM_VERTFACETOPO); int edgeManifNum = tri::Clean<CMeshO>::CountNonManifoldEdgeFF(m,true); int faceEdgeManif = tri::UpdateSelection<CMeshO>::FaceCount(m); tri::UpdateSelection<CMeshO>::VertexClear(m); tri::UpdateSelection<CMeshO>::FaceClear(m); int vertManifNum = tri::Clean<CMeshO>::CountNonManifoldVertexFF(m,true); tri::UpdateSelection<CMeshO>::FaceFromVertexLoose(m); int faceVertManif = tri::UpdateSelection<CMeshO>::FaceCount(m); int edgeNum=0,borderNum=0; tri::Clean<CMeshO>::CountEdges(m, edgeNum, borderNum); int holeNum; Log("V: %6i E: %6i F:%6i",m.vn,edgeNum,m.fn); int unrefVertNum = tri::Clean<CMeshO>::CountUnreferencedVertex(m); Log("Unreferenced Vertices %i",unrefVertNum); Log("Boundary Edges %i",borderNum); int connectedComponentsNum = tri::Clean<CMeshO>::CountConnectedComponents(m); Log("Mesh is composed by %i connected component(s)\n",connectedComponentsNum); if(edgeManifNum==0 && vertManifNum==0) { Log("Mesh is two-manifold "); } if(edgeManifNum!=0) Log("Mesh has %i non two manifold edges and %i faces are incident on these edges\n",edgeManifNum,faceEdgeManif); if(vertManifNum!=0) Log("Mesh has %i non two manifold vertexes and %i faces are incident on these vertices\n",vertManifNum,faceVertManif); // For Manifold meshes compute some other stuff if(vertManifNum==0 && edgeManifNum==0) { holeNum = tri::Clean<CMeshO>::CountHoles(m); Log("Mesh has %i holes",holeNum); int genus = tri::Clean<CMeshO>::MeshGenus(m.vn-unrefVertNum, edgeNum, m.fn, holeNum, connectedComponentsNum); Log("Genus is %i",genus); } else { Log("Mesh has a undefined number of holes (non 2-manifold mesh)"); Log("Genus is undefined (non 2-manifold mesh)"); } return true; } /************************************************************/ if (filterName == "Compute Topological Measures for Quad Meshes") { CMeshO &m=md.mm()->cm; md.mm()->updateDataMask(MeshModel::MM_FACEFACETOPO); md.mm()->updateDataMask(MeshModel::MM_FACEQUALITY); if (! tri::Clean<CMeshO>::IsFFAdjacencyConsistent(m)) { this->errorMessage = "Error: mesh has a not consistent FF adjacency"; return false; } if (! tri::Clean<CMeshO>::HasConsistentPerFaceFauxFlag(m)) { this->errorMessage = "QuadMesh problem: mesh has a not consistent FauxEdge tagging"; return false; } int nQuads = tri::Clean<CMeshO>::CountBitQuads(m); int nTris = tri::Clean<CMeshO>::CountBitTris(m); int nPolys = tri::Clean<CMeshO>::CountBitPolygons(m); int nLargePolys = tri::Clean<CMeshO>::CountBitLargePolygons(m); if(nLargePolys>0) nQuads=0; Log("Mesh has %8i triangles \n",nTris); Log(" %8i quads \n",nQuads); Log(" %8i polygons \n",nPolys); Log(" %8i large polygons (with internal faux vertexes)",nLargePolys); if (! tri::Clean<CMeshO>::IsBitTriQuadOnly(m)) { this->errorMessage = "QuadMesh problem: the mesh is not TriQuadOnly"; return false; } // // i // // // i+1 i+2 tri::UpdateFlags<CMeshO>::FaceClearV(m); Distribution<float> AngleD; // angle distribution Distribution<float> RatioD; // ratio distribution tri::UpdateFlags<CMeshO>::FaceClearV(m); for(CMeshO::FaceIterator fi=m.face.begin(); fi!=m.face.end(); ++fi) if(!fi->IsV()) { fi->SetV(); // Collect the vertices Point3f qv[4]; bool quadFound=false; for(int i=0; i<3; ++i) { if((*fi).IsF(i) && !(*fi).IsF((i+1)%3) && !(*fi).IsF((i+2)%3) ) { qv[0] = fi->V0(i)->P(), qv[1] = fi->FFp(i)->V2( fi->FFi(i) )->P(), qv[2] = fi->V1(i)->P(), qv[3] = fi->V2(i)->P(); quadFound=true; } } assert(quadFound); for(int i=0; i<4; ++i) AngleD.Add(fabs(90-math::ToDeg(Angle(qv[(i+0)%4] - qv[(i+1)%4], qv[(i+2)%4] - qv[(i+1)%4])))); float edgeLen[4]; for(int i=0; i<4; ++i) edgeLen[i]=Distance(qv[(i+0)%4],qv[(i+1)%4]); std::sort(edgeLen,edgeLen+4); RatioD.Add(edgeLen[0]/edgeLen[3]); } Log("Right Angle Discrepancy Avg %4.3f Min %4.3f Max %4.3f StdDev %4.3f Percentile 0.05 %4.3f percentile 95 %4.3f", AngleD.Avg(), AngleD.Min(), AngleD.Max(),AngleD.StandardDeviation(),AngleD.Percentile(0.05),AngleD.Percentile(0.95)); Log("Quad Ratio Avg %4.3f Min %4.3f Max %4.3f", RatioD.Avg(), RatioD.Min(), RatioD.Max()); return true; } /************************************************************/ if(filterName == "Compute Geometric Measures") { CMeshO &m=md.mm()->cm; tri::Inertia<CMeshO> I(m); float Area = tri::Stat<CMeshO>::ComputeMeshArea(m); float Volume = I.Mass(); Log("Mesh Bounding Box Size %f %f %f", m.bbox.DimX(), m.bbox.DimY(), m.bbox.DimZ()); Log("Mesh Bounding Box Diag %f ", m.bbox.Diag()); Log("Mesh Volume is %f", Volume); Log("Mesh Surface is %f", Area); Point3f bc=tri::Stat<CMeshO>::ComputeShellBarycenter(m); Log("Thin shell barycenter %9.6f %9.6f %9.6f",bc[0],bc[1],bc[2]); if(Volume<=0) Log("Mesh is not 'solid', no information on barycenter and inertia tensor."); else { Log("Center of Mass is %f %f %f", I.CenterOfMass()[0], I.CenterOfMass()[1], I.CenterOfMass()[2]); Matrix33f IT; I.InertiaTensor(IT); Log("Inertia Tensor is :"); Log(" | %9.6f %9.6f %9.6f |",IT[0][0],IT[0][1],IT[0][2]); Log(" | %9.6f %9.6f %9.6f |",IT[1][0],IT[1][1],IT[1][2]); Log(" | %9.6f %9.6f %9.6f |",IT[2][0],IT[2][1],IT[2][2]); Matrix33f PCA; Point3f pcav; I.InertiaTensorEigen(PCA,pcav); Log("Principal axes are :"); Log(" | %9.6f %9.6f %9.6f |",PCA[0][0],PCA[0][1],PCA[0][2]); Log(" | %9.6f %9.6f %9.6f |",PCA[1][0],PCA[1][1],PCA[1][2]); Log(" | %9.6f %9.6f %9.6f |",PCA[2][0],PCA[2][1],PCA[2][2]); Log("axis momenta are :"); Log(" | %9.6f %9.6f %9.6f |",pcav[0],pcav[1],pcav[2]); } return true; } /************************************************************/ if((filterName == "Per Vertex Quality Stat") || (filterName == "Per Face Quality Stat") ) { CMeshO &m=md.mm()->cm; Distribution<float> DD; if(filterName == "Per Vertex Quality Stat") tri::Stat<CMeshO>::ComputePerVertexQualityDistribution(m, DD, false); else tri::Stat<CMeshO>::ComputePerFaceQualityDistribution(m, DD, false); Log(" Min %f Max %f",DD.Min(),DD.Max()); Log(" Avg %f Med %f",DD.Avg(),DD.Percentile(0.5f)); Log(" StdDev %f",DD.StandardDeviation()); Log(" Variance %f",DD.Variance()); return true; } if((filterName == "Per Vertex Quality Histogram") || (filterName == "Per Face Quality Histogram") ) { CMeshO &m=md.mm()->cm; float RangeMin = env.evalFloat("HistMin"); float RangeMax = env.evalFloat("HistMax"); int binNum = env.evalInt("binNum"); Histogramf H; H.SetRange(RangeMin,RangeMax,binNum); if(filterName == "Per Vertex Quality Histogram") { for(CMeshO::VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD()) { assert(!math::IsNAN((*vi).Q()) && "You should never try to compute Histogram with Invalid Floating points numbers (NaN)"); H.Add((*vi).Q()); } } else { for(CMeshO::FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD()) { assert(!math::IsNAN((*fi).Q()) && "You should never try to compute Histogram with Invalid Floating points numbers (NaN)"); H.Add((*fi).Q()); } } Log("( -inf..%15.7f) : %4.0f",RangeMin,H.BinCountInd(0)); for(int i=1; i<=binNum; ++i) Log("[%15.7f..%15.7f) : %4.0f",H.BinLowerBound(i),H.BinUpperBound(i),H.BinCountInd(i)); Log("[%15.7f.. +inf) : %4.0f",RangeMax,H.BinCountInd(binNum+1)); return true; } return false; }
Foam::Distribution<Type>::Distribution(const Distribution<Type>& d) : List<List<scalar>>(static_cast<const List<List<scalar>>& >(d)), binWidth_(d.binWidth()), listStarts_(d.listStarts()) {}