/// Apply noise on the decision vector based on rho void robust::inject_noise_x(decision_vector &x) const { // We follow the algorithm at // http://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability // 0. Define the radius double radius = m_rho * pow(m_uniform_dist(m_drng),1.0/x.size()); // 1. Sampling N(0,1) on each dimension std::vector<double> perturbation(x.size(), 0.0); double c2=0; for(size_type i = 0; i < perturbation.size(); i++){ perturbation[i] = m_normal_dist(m_drng); c2 += perturbation[i]*perturbation[i]; } // 2. Normalize the vector for(size_type i = 0; i < perturbation.size(); i++){ perturbation[i] *= (radius / sqrt(c2) ); x[i] += perturbation[i]; } // 3. Clip the variables to the valid bounds for(base::size_type i = 0; i < x.size(); i++){ x[i] = std::max(x[i], get_lb()[i]); x[i] = std::min(x[i], get_ub()[i]); } }
void perturbationStage(Volume* baseVol,Volume* regVol,Transform7* globalMinima,Transform7* higherResStart) { Transform7 grid4mm[FLIRT_MINIMA_COUNT * 27]; float result4mm[FLIRT_MINIMA_COUNT * 27]; for(int i=0;i<FLIRT_MINIMA_COUNT;i++) { perturbation(baseVol,regVol,globalMinima[i],grid4mm+(i*27),result4mm+(i*27)); } for(int i=0;i<FLIRT_HIGHER_RESOLUTION_START; i++) { for(int j=i;j<FLIRT_MINIMA_COUNT*27;j++) { if(result4mm[i] > result4mm[j]) { Transform7 tmpTrans=grid4mm[i]; grid4mm[i]=grid4mm[j]; grid4mm[j]=tmpTrans; float tmpValue=result4mm[i]; result4mm[i]=result4mm[j]; result4mm[j]=tmpValue; } } higherResStart[i]=grid4mm[i]; //printf("%d:rotation(%f,%f,%f),trans(%f,%f,%f),scale(%f),value is:%f\n",i,grid4mm[i].rotation.x,grid4mm[i].rotation.y,grid4mm[i].rotation.z,grid4mm[i].translateAndScale.x,grid4mm[i].translateAndScale.y,grid4mm[i].translateAndScale.z,grid4mm[i].translateAndScale.w,result4mm[i]); } }
void run(std::string inputFile) { srand(1607); loadData(inputFile); clock_t begin = clock(); int bestCost = 0; int* bestSolution; int it = 1; int lastImprovement = 0; const int numExchanges = 10; const int itNoImprovement = 100; const int maxIterations = 500; int* s = initialSolution(); localSearch(s, n); //printf("f(0) = %d\n", cost); bestCost = cost; bestSolution = s; while((it-lastImprovement < itNoImprovement) && (it < maxIterations)) { s = perturbation(s, numExchanges, n); localSearch(s, n); if(cost > bestCost) { bestCost = cost; bestSolution = s; lastImprovement = it; } if((1.0*cost) < ((1-epslon)*bestCost)) { s = bestSolution; } //printf("f(%d) = %d lastImprovement = %d\n", it, bestCost, lastImprovement); it++; } double elapsedSecs = double(clock() - begin) / CLOCKS_PER_SEC; avgTime = avgTime + elapsedSecs; verifySolution(inputFile, bestSolution, bestCost, elapsedSecs); deleteMatrix(matrix, n); deleteMatrix(diff, n); //deleteMatrix(simDegrees, n); //delete [] pos; delete [] s; }
void EMOPSO::flight(){ for(int _i(0);_i<nparticles;_i++){ //numer klastra od zera do nclusters - 1 int _whichcluster=(int)_i/(nparticles/nclusters); int _gbestselected; //wybranie losowego lidera... ale z archiwum najlepszych do tej pory! _gbestselected=archive->selectClusteredRandomSolution(_whichcluster); Particle _gbestarchparticle(ndimensions,nobjectives, nconstr); _gbestarchparticle = archive->solutions[_gbestselected]; for(int _k(0);_k<flyTime;_k++){ //there was 5 in here instead of 1. No idea why... //ilość generacji jednego PSO - była ustawiona na 5 for(int _j(0);_j<ndimensions;_j++){ particles[_i].vel[_j]=W*particles[_i].vel[_j]+C1*rnd(0,1)*(_gbestarchparticle.x[_j]-particles[_i].x[_j])+C2*rnd(0,1)*(particles[_i].xpbest[_j]-particles[_i].x[_j]); //uwaga - w razie czego nie stosować perturbacji!!! perturbation(_i); particles[_i].x[_j]+=particles[_i].vel[_j]; if (_j>=ndimensions-getBinarySize()){ particles[_i].x[_j] = (int)particles[_i].x[_j]; } if(particles[_i].x[_j]<lb[_j]) particles[_i].x[_j]=lb[_j]; //particles[_i].vel[_j]=0; //może dodać zerowanie prędkości na granicy? if(particles[_i].x[_j]>ub[_j]) particles[_i].x[_j]=ub[_j]; //particles[_i].vel[_j]=0; } //std::cout << particles.size() << std::endl; function(_i); //sprawdź czy lepsze od dotychczasowego pbest int _tmp=archive->dominePBest(particles[_i]); if(_tmp==11||_tmp==1){ //zapamiętaj to rozwiązanie jako pbest copy(particles[_i].fxpbest,particles[_i].fx); copy(particles[_i].xpbest,particles[_i].x); //dodaj to rozwiązanie do archiwum/usuń zdominowane i jeszcze wiele wiele innych rzeczy... archive->add(particles[_i],(int)_i/(nparticles/nclusters)); } } } }
void ICLSLAM::handleScan(double timestamp, const PointScan &pscan) { doBackup(timestamp); if(doICL(timestamp)) { SegmentScan sm(pscan); bool skip = false; if(outdoor) { Rototranslation rt(currentPose); freeContent(); fforeach(const LineSegment &s, sm.getSegments()) { segments.append(new LineSegment(rt * s)); } fforeach(const Frontier &f, sm.getFrontiers()) { if(f.length() > 0.2) frontiers.append(new Frontier(rt * f)); } if(takeAMeasure(timestamp)) { poses.append(TimedPose(timestamp, currentPose)); lastMeasurePose = lastPose; lastMeasureTimestamp = timestamp; } return; } ICLImpl icl(sm, segments, currentPose); icl.run(); Eigen::Vector3d z = icl.measure(); #ifndef SLAM_SKIP_DEBUG ldbg << "ins: " << ins.getPose() << endl; ldbg << "currentPose: " << currentPose << endl; ldbg << "z: " << z << endl; #endif Eigen::Vector3d diff = currentPose.vectorForm() - z; if(std::abs(wrap(diff[2])) >= M_PI / 12 || SQUARE(diff[0]) + SQUARE(diff[1]) >= 3) { for(int i = 0; i < 10; i++) { Rototranslation perturbation( Random::normal(0, 1 / 3.), Random::normal(0, 1 / 3.), Random::normal(0, SQUARE(M_PI / 6) / 3.)); ICLImpl icl(sm, segments, perturbation * currentPose); icl.run(); const Eigen::Vector3d &z1 = icl.measure(); if(poseDistance(z1, currentPose) < poseDistance(z, currentPose)) { #ifndef SLAM_SKIP_DEBUG ldbg << "Cambiato" << endl; #endif z = z1; } } } diff = currentPose.vectorForm() - z; if(std::abs(wrap(diff[2])) >= M_PI / 12 || SQUARE(diff[0]) + SQUARE(diff[1]) >= 3) { z = currentPose; skipCount++; skip = true; } if(skip && skipCount <= MAX_SKIP_COUNT) { return; } else { skipCount = 0; } #ifndef SLAM_SKIP_DEBUG ldbg << "ins: " << ins.getPose() << endl; ldbg << "currentPose: " << currentPose << endl; ldbg << "z: " << z << endl; #endif Pose guess = currentPose; currentPose = z; if(!almostEqual(ins.getPose().theta(), 0, 0.02) || !almostEqual(ins.getPose().phi(), 0, 0.02)) { return; } SegmentScan rotoscan = Rototranslation(z) * sm; #ifndef SLAM_SKIP_DEBUG ldbg << endl << "counter=" << counter++; ldbg << endl << "guess=" << guess << endl; #endif //printMap(rotoscan.getSegments(), "scan"); //printMap(segments, "walls"); //printMap(frontiers, "frontiers"); //ldbg << "plot(xwalls, ywalls, 'b', xfrontiers, yfrontiers, 'g', xscan, yscan, 'r');" << endl; //printMapCPP(sm.getSegments(), "scan"); //printMapCPP(segments, "segments"); //ldbg << "Graphics[" << rotoscan.getSegments() << ",Dashed," << rotoscan.getFrontiers() << "]" << endl; //ldbg << "Graphics[{" << segments << ",Dashed," << frontiers << ",Opacity[0.1]," // << rotoscan.toPolygon() << "}]" << endl; mergeSegments(rotoscan); if(takeAMeasure(timestamp)) { mergeFrontiers(rotoscan); poses.append(TimedPose(timestamp, currentPose)); lastMeasurePose = lastPose; lastMeasureTimestamp = timestamp; } if(timestamp - lastThinningTimestamp >= SLAM_MAP_THINNING_INTERVAL) { #ifndef SLAM_SKIP_DEBUG ldbg << "Map Thinning" << endl; #endif mapThinning(); lastThinningTimestamp = timestamp; } #ifndef SLAM_SKIP_DEBUG //ldbg << endl << "ListPlot[" << pscan << "]" << endl; ldbg << endl << "Graphics[" << sm.getSegments() << "]"; ldbg << endl << "Graphics[{" << segments << ",Dashed," << frontiers << ",Opacity[0.1]," << rotoscan.toPolygon() << "}]" << endl; #endif /*printMap(rotoscan.getSegments(), "scan"); printMap(segments, "walls"); printMap(frontiers, "frontiers"); ldbg << "plot(xwalls, ywalls, 'b', xfrontiers, yfrontiers, 'g', xscan, yscan, 'r');" << endl; */ //if(counter == 1508) // exit(0); Pose p = Rototranslation(initialPose) * currentPose; emit newRobotPose(TimedPose(lastPose.timestamp(), p)); } }
int main(int argc, char *argv[]) { # include "setRootCase.H" # include "createTime.H" # include "createMesh.H" Info << "perturbU: generalised velocity perturbation implementation for " << "the initialisation of ducted well-resolved LES flows." << endl; // Xdir -> Ubar - streamwise // Ydir -> wallReflection vectors // Zdir -> cross product Xdir^Ydir // Ubar and Retau should be set in transportProperties // The choice of Retau is not critical as long as it is // approximately the right order of magnitude // A laminar background velocity profile is assumed // with maximum U at h = max(wall distance) // A laminar initial profile is essential since wall normal motion // of a mean turbulent profile without resolved turbulence will // diffuse the perturbations, preventing transition in some cases wallDist yw(mesh); const scalar h = max(yw.internalField()); // local yDir wallDistReflection reflexVec(mesh); const volVectorField yDir = reflexVec.n(); IOobject Uheader ( "U", runTime.timeName(), mesh, IOobject::MUST_READ ); Info << "Reading U" << endl; volVectorField U(Uheader, mesh); IOdictionary transportProperties ( IOobject ( "transportProperties", runTime.constant(), mesh, IOobject::MUST_READ, IOobject::NO_WRITE ) ); dimensionedScalar nu ( transportProperties.lookup("nu") ); dimensionedVector Ubar ( transportProperties.lookup("Ubar") ); dimensionedScalar Retau ( transportProperties.lookup("Retau") ); scalar sigma ( transportProperties.lookupOrDefault<scalar>("sigma", 0.00055) ); Info << " sigma = " << sigma << endl; scalar duplusC ( transportProperties.lookupOrDefault<scalar>("duplusC", 0.25) ); Info << " duplusC = " << duplusC << endl; scalar epsilonC ( transportProperties.lookupOrDefault<scalar>("epsilonC", 0.05) ); Info << " epsilonC = " << epsilonC << endl; scalar deviationC ( transportProperties.lookupOrDefault<scalar>("deviationC", 0.2) ); Info << " deviationC = " << deviationC << endl; vector xDir = Ubar.value() / mag(Ubar.value()); Info << "Re(tau) = " << Retau << endl; const scalar utau = Retau.value() * nu.value() / h; Info << " u(tau) = " << utau << endl; // wall normal circulation const scalar duplus = Ubar.value().x() * duplusC / utau; // spanwise wavenumber: spacing z+ = 200 const scalar betaPlus = 2.0 * 3.14 *(1.0 / 200.0); // streamwise wave number: spacing x+ = 500 const scalar alphaPlus = 2.0 * 3.14 * (1.0 / 500.0); const scalar epsilon = Ubar.value().x() * epsilonC; const vectorField& centres(mesh.C()); Random perturbation(1234567); forAll(centres, celli) { // add a small random component to enhance symmetry breaking scalar deviation = 1.0 + deviationC * perturbation.GaussNormal(); const vector& cCentre = centres[celli]; vector zDir = xDir^yDir[celli]; zDir /= mag(zDir); scalar zplus = (cCentre & zDir) * Retau.value() / h; scalar yplus = yw[celli] * Retau.value() / h; scalar xplus = (cCentre & xDir) * Retau.value() / h; // ML: it seems that this profile (or coefficient before Ubar) // is correct for rectangular shape, for body of // revolution it is (should be?) different // laminar parabolic profile U[celli] = 3.0 * Ubar.value() * (yw[celli] / h - 0.5 * sqr(yw[celli] / h)); // streak streamwise velocity U[celli] += xDir * (utau * duplus / 2.0) * (yplus / 40.0) * Foam::exp(-sigma * Foam::sqr(yplus) + 0.5) * Foam::cos(betaPlus * zplus) * deviation; // streak spanwise perturbation U[celli] += zDir * epsilon * Foam::sin(alphaPlus * xplus) * yplus * Foam::exp(-sigma * Foam::sqr(yplus)) * deviation; }
int main() { // LOAD DATA arma::mat X; // A) Toy Data // char inputFile[] = "../data_files/toyclusters/toyclusters.dat"; // B) X4.dat //char inputFile[] = "./X4.dat"; // C) fisher data //char inputFile[] = "./fisher.dat"; // D) MNIST data //char inputFile[] = "../data_files/MNIST/MNIST.dat"; // E) Reduced MNIST (5000x400) //char inputFile[] = "../data_files/MNIST/MNISTreduced.dat"; // F) Reduced MNIST (0 and 1) (1000x400) //char inputFile[] = "../data_files/MNIST/MNISTlittle.dat"; // G) Girl.png (512x768, RGB, already unrolled) //char inputFile[] = "girl.dat"; // H) Pool.png (383x512, RGB, already unrolled) //char inputFile[] = "pool.dat"; // I) Cat.png (733x490, RGB, unrolled) //char inputFile[] = "cat.dat"; // J) Airplane.png (512x512, RGB, unrolled) //char inputFile[] = "airplane.dat"; // K) Monarch.png (512x768, RGB, unrolled) //char inputFile[] = "monarch.dat"; // L) tulips.png (512x768 ,RGB, unrolled) //char inputFile[] = "tulips.dat"; // M) demo.dat (2d data) char inputFile[] = "demo.dat"; // INITIALIZE PARAMETERS X.load(inputFile); const arma::uword N = X.n_rows; const arma::uword D = X.n_cols; arma::umat ids(N,1); // needed to shuffle indices later arma::umat shuffled_ids(N,1); for (arma::uword i = 0; i < N; ++i) { ids(i,0) = i; } arma::arma_rng::set_seed_random(); // set arma rng // int seed = time(NULL); // set RNG seed to current time // srand(seed); arma::uword initial_K = 32; // initial number of clusters arma::uword K = initial_K; arma::umat clusters(N,1); // contains cluster assignments for each data point for (arma::uword i = 0; i < N; ++i) { clusters(i, 0) = i%K; // initialize as [0,1,...,K-1,0,1,...,K-1,0,...] } arma::umat cluster_sizes(N,1,arma::fill::zeros); // contains num data points in cluster k for (arma::uword i = 0; i < N; ++i) { cluster_sizes(clusters(i,0), 0) += 1; } arma::mat mu(N, D, arma::fill::zeros); // contains cluster mean parameters arma::mat filler(D,D,arma::fill::eye); std::vector<arma::mat> sigma(N,filler); // contains cluster covariance parameters if (K < N) { // set parameters not belonging to any cluster to -999 mu.rows(K,N-1).fill(-999); for (arma::uword k = K; k < N; ++k) { sigma[k].fill(-999); } } arma::umat uword_dummy(1,1); // dummy 1x1 matrix; // for (arma::uword i = 0; i <N; ++i) { // std::cout << sigma[i] << std::endl; // } // std::cout << X << std::endl // << N << std::endl // << D << std::endl // << K << std::endl // << clusters << std::endl // << cluster_sizes << std::endl // << ids << std::endl; // INITIALIZE HYPER PARAMETERS // Dirichlet Process concentration parameter is alpha: double alpha = 1; // Dirichlet Process base distribution (i.e. prior) is // H(mu,sigma) = NIW(mu,Sigma|m_0,k_0,S_0,nu_0) = N(mu|m_0,Sigma/k_0)IW(Sigma|S_0,nu_0) arma::mat perturbation(D,D,arma::fill::eye); perturbation *= 0.000001; //const arma::mat S_0 = arma::cov(X,X,1) + perturbation; // S_xbar / N const arma::mat S_0(D,D,arma::fill::eye); const double nu_0 = D + 2; const arma::mat m_0 = mean(X).t(); const double k_0 = 0.01; // std::cout << "S_0" << S_0 << std::endl; // std::cout << S_0 << std::endl // << nu_0 << std::endl // << m_0 << std::endl // << k_0 << std::endl; // INITIALIZE SAMPLING PARAMETERS arma::uword NUM_SWEEPS = 250; // number of Gibbs iterations bool SAVE_CHAIN = false; // save output of each Gibbs iteration? // arma::uword BURN_IN = NUM_SWEEPS - 10; // arma::uword CHAINSIZE = NUM_SWEEPS - BURN_IN; // std::vector<arma::uword> chain_K(CHAINSIZE, K); // Initialize chain variable to initial parameters for convinience // std::vector<arma::umat> chain_clusters(CHAINSIZE, clusters); // std::vector<arma::umat> chain_clusterSizes(CHAINSIZE, cluster_sizes); // std::vector<arma::mat> chain_mu(CHAINSIZE, mu); // std::vector<std::vector<arma::mat> > chain_sigma(CHAINSIZE, sigma); // for (arma::uword sweep = 0; sweep < CHAINSIZE; ++sweep) { // std::cout << sweep << " K\n" << chain_K[sweep] << std::endl // << sweep << " clusters\n" << chain_clusters[sweep] << std::endl // << sweep << " sizes\n" << chain_clusterSizes[sweep] << std::endl // << sweep << " mu\n" << chain_mu[sweep] << std::endl; // for (arma::uword i = 0; i < N; ++i) { // std::cout << sweep << " " << i << " sigma\n" << chain_sigma[sweep][i] << std::endl; // } // } // START CHAIN std::cout << "Starting Algorithm with K = " << K << std::endl; for (arma::uword sweep = 0; sweep < NUM_SWEEPS; ++sweep) { // shuffle indices shuffled_ids = shuffle(ids); // std::cout << shuffled_ids << std::endl; // SAMPLE CLUSTERS for (arma::uword j = 0; j < N; ++j){ // std::cout << "j = " << j << std::endl; arma::uword i = shuffled_ids(j); arma::mat x = X.row(i).t(); // current data point // Remove i's statistics and any empty clusters arma::uword c = clusters(i,0); // current cluster cluster_sizes(c,0) -= 1; //std::cout << "old c = " << c << std::endl; if (cluster_sizes(c,0) == 0) { // remove empty cluster cluster_sizes(c,0) = cluster_sizes(K-1,0); // move entries for K onto position c mu.row(c) = mu.row(K-1); sigma[c] = sigma[K-1]; uword_dummy(0,0) = c; arma::uvec idx = find(clusters == K - 1); clusters.each_row(idx) = uword_dummy; cluster_sizes(K-1,0) = 0; mu.row(K-1).fill(-999); sigma[K-1].fill(-999); --K; } // quick test of logMvnPdf: // arma::mat m_(2,1); // arma::mat s_(2,2); // arma::mat t_(2,1); // m_ << 1 << arma::endr << 2; // s_ << 3 << -0.2 << arma::endr << -0.2 << 1; // t_ << -3 << arma::endr << -3; // double lpdf = logMvnPdf(t_, m_, s_); // std::cout << lpdf << std::endl; // śhould be -19.1034 (works) // Find categorical distribution over clusters (tested) arma::mat logP(K+1, 1, arma::fill::zeros); // quick test of logInvWishPdf // arma::mat si_(2,2), s_(2,2); // double nu_ = 4; // si_ << 1 << 0.5 << arma::endr << 0.5 << 4; // s_ << 3 << -0.2 << arma::endr << -0.2 << 1; // double lpdf = logInvWishPdf(si_, s_, nu_); // std::cout << lpdf << std::endl; // should be -7.4399 (it is) // quick test for logNormInvWishPdf // arma::mat si_(2,2), s_(2,2); // arma :: mat mu_(2,1), m_(2,1); // double k_ = 0.5, nu_ = 4; // si_ << 1 << 0.5 << arma::endr << 0.5 << 4; // s_ << 3 << -0.2 << arma::endr << -0.2 << 1; // mu_ << -3 << arma::endr << -3; // m_ << 1 << arma::endr << 2; // double lp = logNormInvWishPdf(mu_,si_,m_,k_,s_,nu_); // std::cout << lp << std::endl; // should equal -15.2318 (it is) // p(existing clusters) (tested) for (arma::uword k = 0; k < K; ++k) { arma::mat m_ = mu.row(k).t(); arma::mat s_ = sigma[k]; logP(k,0) = log(cluster_sizes(k,0)) - log(N-1+alpha) + logMvnPdf(x,m_,s_); } // p(new cluster): find partition function (tested) // arma::mat dummy_mu(D, 1, arma::fill::zeros); // arma::mat dummy_sigma(D, D, arma::fill::eye); // double logPrior, logLklihd, logPstr, logPartition; // posterior hyperparameters (tested) arma::mat m_1(D,1), S_1(D,D); double k_1, nu_1; k_1 = k_0 + 1; nu_1 = nu_0 + 1; m_1 = (k_0*m_0 + x) / k_1; S_1 = S_0 + x * x.t() + k_0 * (m_0 * m_0.t()) - k_1 * (m_1 * m_1.t()); // std::cout << k_1 << std::endl // << nu_1 << std::endl // << m_1 << std::endl // << S_1 << std::endl; // // partition = likelihood*prior/posterior (tested) // // (perhaps a direct computation of the partition function would be better) // logPrior = logNormInvWishPdf(dummy_mu, dummy_sigma, m_0, k_0, S_0, nu_0); // logLklihd = logMvnPdf(x, dummy_mu, dummy_sigma); // logPstr = logNormInvWishPdf(dummy_mu, dummy_sigma, m_1, k_1, S_1, nu_1); // logPartition = logPrior + logLklihd - logPstr; // std::cout << "log Prior = " << logPrior << std::endl // << "log Likelihood = " << logLklihd << std::endl // << "log Posterior = " << logPstr << std::endl // << "log Partition = " << logPartition << std::endl; // Computing partition directly double logS0,signS0,logS1,signS1; arma::log_det(logS0,signS0,S_0); arma::log_det(logS1,signS1,S_1); /*std::cout << "log(det(S_0)) = " << logS0 << std::endl << "log(det(S_1)) = " << logS1 << std::endl;*/ double term1 = 0.5*D*(log(k_0)-log(k_1)); double term2 = -0.5*D*log(arma::datum::pi); double term3 = 0.5*(nu_0*logS0 - nu_1*logS1); double term4 = lgamma(0.5*nu_1); double term5 = -lgamma(0.5*(nu_1-D)); double logPartition = term1+term2+term3+term4+term5; /*double logPartition = 0.5*D*(log(k_0)-log(k_1)-log(arma::datum::pi)) \ /+0.5*(nu_0*logS0 - nu_1*logS1) + lgamma(0.5*nu_1) - lgamma(0.5*(nu_1-D));*/ /* std::cout << "term1 = " << term1 << std::endl << "term2 = " << term2 << std::endl << "term3 = " << term3 << std::endl << "term4 = " << term4 << std::endl << "term5 = " << term5 << std::endl;*/ //std::cout << "logP = " << logPartition << std::endl; // p(new cluster): (tested) logP(K,0) = log(alpha) - log(N - 1 + alpha) + logPartition; // std::cout << "logP(new cluster) = " << logP(K,0) << std::endl; //if(i == 49) //assert(false); // sample cluster for i arma::uword c_ = logCatRnd(logP); clusters(i,0) = c_; //if (j % 10 == 0){ //std::cout << "New c = " << c_ << std::endl; //std::cout << "logP = \n" << logP << std::endl; //} // quick test for mvnRnd // arma::mat mu, si; // mu << 1 << arma::endr << 2; // si << 1 << 0.9 << arma::endr << 0.9 << 1; // arma::mat m = mvnRnd(mu, si); // std::cout << m << std::endl; // quick test for invWishRnd // double df = 4; // arma::mat si(2,2); // si << 1 << 1 << arma::endr << 1 << 1; // arma::mat S = invWishRnd(si,df); // std::cout << S << std::endl; if (c_ == K) { // Sample parameters for any new-born clusters from posterior cluster_sizes(K, 0) = 1; arma::mat si_ = invWishRnd(S_1, nu_1); //arma::mat si_ = S_1; arma::mat mu_ = mvnRnd(m_1, si_/k_1); //arma::mat mu_ = m_1; mu.row(K) = mu_.t(); sigma[K] = si_; K += 1; } else { cluster_sizes(c_,0) += 1; } // if (sweep == 0) // std::cout << " K = " << K << std::endl; // // if (j == N-1) { // // std::cout << logP << std::endl; // // std::cout << K << std::endl; // // assert(false); // // } // std::cout << "K = " << K << "\n" << std::endl; } // sample CLUSTER PARAMETERS FROM POSTERIOR for (arma::uword k = 0; k < K; ++k) { // std::cout << "k = " << k << std::endl; // cluster data arma::mat Xk = X.rows(find(clusters == k)); arma::uword Nk = cluster_sizes(k,0); // posterior hyperparameters arma::mat m_Nk(D,1), S_Nk(D,D); double k_Nk, nu_Nk; arma::mat sum_k = sum(Xk,0).t(); arma::mat cov_k(D, D, arma::fill::zeros); for (arma::uword l = 0; l < Nk; ++l) { cov_k += Xk.row(l).t() * Xk.row(l); } k_Nk = k_0 + Nk; nu_Nk = nu_0 + Nk; m_Nk = (k_0 * m_0 + sum_k) / k_Nk; S_Nk = S_0 + cov_k + k_0 * (m_0 * m_0.t()) - k_Nk * (m_Nk * m_Nk.t()); // sample fresh parameters arma::mat si_ = invWishRnd(S_Nk, nu_Nk); //arma::mat si_ = S_Nk; arma::mat mu_ = mvnRnd(m_Nk, si_/k_Nk); //arma::mat mu_ = m_Nk; mu.row(k) = mu_.t(); sigma[k] = si_; } std::cout << "Iteration " << sweep + 1 << "/" << NUM_SWEEPS<< " done. K = " << K << std::endl; // // STORE CHAIN // if (SAVE_CHAIN) { // if (sweep >= BURN_IN) { // chain_K[sweep - BURN_IN] = K; // chain_clusters[sweep - BURN_IN] = clusters; // chain_clusterSizes[sweep - BURN_IN] = cluster_sizes; // chain_mu[sweep - BURN_IN] = mu; // chain_sigma[sweep - BURN_IN] = sigma; // } // } } std::cout << "Final cluster sizes: " << std::endl << cluster_sizes.rows(0, K-1) << std::endl; // WRITE OUPUT DATA TO FILE arma::mat MU = mu.rows(0,K-1); arma::mat SIGMA(D*K,D); for (arma::uword k = 0; k < K; ++k) { SIGMA.rows(k*D,(k+1)*D-1) = sigma[k]; } arma::umat IDX = clusters; // A) toycluster data // char MuFile[] = "../data_files/toyclusters/dpmMU.out"; // char SigmaFile[] = "../data_files/toyclusters/dpmSIGMA.out"; // char IdxFile[] = "../data_files/toyclusters/dpmIDX.out"; // B) X4.dat char MuFile[] = "dpmMU.out"; char SigmaFile[] = "dpmSIGMA.out"; char IdxFile[] = "dpmIDX.out"; std::ofstream KFile("K.out"); MU.save(MuFile, arma::raw_ascii); SIGMA.save(SigmaFile, arma::raw_ascii); IDX.save(IdxFile, arma::raw_ascii); KFile << "K = " << K << std::endl; if (SAVE_CHAIN) {} // std::ofstream chainKFile("chainK.out"); // std::ofstream chainClustersFile("chainClusters.out"); // std::ofstream chainClusterSizesFile("chainClusterSizes.out"); // std::ofstream chainMuFile("chainMu.out"); // std::ofstream chainSigmaFile("chainSigma.out"); // chainKFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl // << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl // << "Burn-In: " << BURN_IN << std::endl // << "Initial number of clusters: " << initial_K << std::endl // << "Output: Number of cluster (K)\n" << std::endl; // chainClustersFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl // << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl // << "Burn-In: " << BURN_IN << std::endl // << "Initial number of clusters: " << initial_K << std::endl // << "Output: Cluster identities (clusters)\n" << std::endl; // chainClusterSizesFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl // << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl // << "Burn-In: " << BURN_IN << std::endl // << "Initial number of clusters: " << initial_K << std::endl // << "Output: Size of clusters (cluster_sizes)\n" << std::endl; // chainMuFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl // << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl // << "Burn-In: " << BURN_IN << std::endl // << "Initial number of clusters: " << initial_K << std::endl // << "Output: Samples for cluster mean parameters (mu. Note: means stored in rows)\n" << std::endl; // chainSigmaFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl // << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl // << "Burn-In " << BURN_IN << std::endl // << "Initial number of clusters: " << initial_K << std::endl // << "Output: Samples for cluster covariances (sigma)\n" << std::endl; // for (arma::uword sweep = 0; sweep < CHAINSIZE; ++sweep) { // arma::uword K = chain_K[sweep]; // chainKFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_K[sweep] << std::endl; // chainClustersFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_clusters[sweep] << std::endl; // chainClusterSizesFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_clusterSizes[sweep].rows(0, K - 1) << std::endl; // chainMuFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_mu[sweep].rows(0, K - 1) << std::endl; // chainSigmaFile << "Sweep #" << BURN_IN + sweep + 1<< "\n"; // for (arma::uword i = 0; i < K; ++i) { // chainSigmaFile << chain_sigma[sweep][i] << std::endl; // } // chainSigmaFile << std::endl; // } // } return 0; }
void PointSetToMesh<Convex, T>::visit( pointset_type* pset, mpl::int_<2> ) { #if defined(FEELPP_HAS_VTK) // reinitialize mesh M_mesh = mesh_ptrtype( new mesh_type( Environment::worldComm().subWorldCommSeq() ) ); vtkPoints *newPoints = vtkPoints::New(); if ( M_vertices ) { DVLOG(2) << "adding vertices\n" << M_vertices.get() << "\n"; for ( size_type i = 0; i < M_vertices->size2(); ++i ) { newPoints->InsertNextPoint( M_vertices.get()( 0, i ), M_vertices.get()( 1, i ), 0 ); } } std::vector<double> perturbation( pset->nPoints() ); for ( size_type i=0 ; i< pset->nPoints() ; ++i ) { perturbation[i] = std::rand()*1e-6/RAND_MAX; uint16_type index = newPoints->InsertNextPoint( pset->points()( 0,i )+perturbation[i], pset->points()( 1,i ), 0 ); DVLOG(2) << "Inserting point with id " << index << "\n"; DVLOG(2) << "pset.point( " << i << " )= " << pset->point( i ) << "\n"; } vtkPolyData *polyData = vtkPolyData::New(); polyData->SetPoints( newPoints ); vtkDelaunay2D *delaunay2D = vtkDelaunay2D::New(); #if VTK_MAJOR_VERSION <= 5 delaunay2D->SetInput( polyData ); #else delaunay2D->SetInputData( polyData ); #endif /** * The Offset parameter helps to get a convex hull of the points. * If the result mesh is not convex, increase the Offset ! **/ //delaunay2D->SetOffset( 8 ) ; delaunay2D->Update(); vtkPolyData* outMesh = delaunay2D->GetOutput( ); /** General informations about vtkPolyData **/ outMesh->BuildLinks(); int Nelem = outMesh->GetNumberOfPolys(); int Npts = outMesh->GetNumberOfPoints(); DVLOG(2) << "Number of cells = " << Nelem << "\n"; DVLOG(2) << "Number of points = " << Npts << "\n"; for ( int i=0; i< Npts; ++i ) { // revert back the perturbation outMesh->GetPoints()->SetPoint( i, outMesh->GetPoints()->GetPoint( i )[0]- perturbation[i], outMesh->GetPoints()->GetPoint( i )[1], outMesh->GetPoints()->GetPoint( i )[2] ); } for ( int i=0; i< Nelem; ++i ) { // std::cout << "\nLa cellule num�ro : " << i << " compte " << outMesh->GetCell(i)->GetNumberOfPoints() << " points." << "\n"; DVLOG(2) << "Element Id = " << i << "\n"; DVLOG(2) << "Point 0 (" << ( int )outMesh->GetCell( i )->GetPointId( 0 ) <<") =" ; DVLOG(2) << "(" << outMesh->GetCell( i )->GetPoints()->GetPoint( 0 )[0] << " , " << outMesh->GetCell( i )->GetPoints()->GetPoint( 0 )[1]<< ")" << "\n"; DVLOG(2) << "Point 1 (" << ( int )outMesh->GetCell( i )->GetPointId( 1 ) <<") =" ; DVLOG(2) << "(" << outMesh->GetCell( i )->GetPoints()->GetPoint( 1 )[0] << " , " << outMesh->GetCell( i )->GetPoints()->GetPoint( 1 )[1]<< ")" << "\n"; DVLOG(2) << "Point 2 (" << ( int )outMesh->GetCell( i )->GetPointId( 2 ) <<") =" ; DVLOG(2) << "(" << outMesh->GetCell( i )->GetPoints()->GetPoint( 2 )[0] << " , " << outMesh->GetCell( i )->GetPoints()->GetPoint( 2 )[1]<< ")" << "\n"; DVLOG(2) << outMesh->GetCell( i )->GetNumberOfEdges() << "\n"; DVLOG(2) << ( int )outMesh->GetCell( i )->GetEdge( 0 )->GetPointId( 0 ) << "\n"; DVLOG(2) << ( int )outMesh->GetCell( i )->GetEdge( 0 )->GetPointId( 1 ) << "\n"; } DVLOG(2) << "[PointSetToMesh::visit(<2>)] delaunay done, now vtk to Mesh<>\n"; FilterFromVtk<mesh_type> meshfromvtk( outMesh ); meshfromvtk.visit( M_mesh.get() ); DVLOG(2) << "[PointSetToMesh::visit(<2>)] done\n"; #else std::cerr << "The library was not compiled with vtk support\n"; #endif /* FEELPP_HAS_VTK */ }