void Bloom::FillBlurParams(float dx, float dy) { const int sampleCount = 15; float sampleWeights[sampleCount]; D3DXVECTOR2 sampleOffsets[sampleCount]; sampleWeights[0] = Gaussian(0); sampleOffsets[0].x = sampleOffsets[0].y = 0; float totalWeights = sampleWeights[0]; for(int i = 0; i < sampleCount / 2; i++) { float weight = Gaussian(i + 1); sampleWeights[i * 2 + 1] = sampleWeights[i * 2 + 2] = weight; totalWeights += weight * 2; float sampleOffset = i * 2 + 1.5f; D3DXVECTOR2 delta(dx * sampleOffset, dy * sampleOffset); sampleOffsets[i * 2 + 1] = delta; sampleOffsets[i * 2 + 2] = -delta; } for(int i = 0; i < sampleCount; i++) sampleWeights[i] /= totalWeights; m_fx->setVal("sampleOffsets", (f32*)sampleOffsets, 2*sampleCount*sizeof(f32)); m_fx->setVal("sampleWeights", (f32*)sampleWeights, sampleCount*sizeof(f32)); }
double GMM::fixCenterGmm(const std::vector<double> & data, int centers, std::vector< Gaussian > &re, double DELTA ) { if( centers <= 1 ) { re.push_back( getGaussian(data) ); return caculateBIC(data, re); } bool fixDelta = true; if(DELTA <= 0.01) { fixDelta = false; } double mx = *max_element(data.begin(), data.end()); double mn = *min_element(data.begin(), data.end()); double diff = mx - mn; double delta = getDelta(data); for(int i = 0; i < centers; ++i) { if(fixDelta) { re.push_back( Gaussian(mn + i*diff/(centers-1), DELTA, 1.0 / centers) ); } else { re.push_back( Gaussian(mn + i*diff/(centers-1), delta, 1.0 / centers) ); } } std::vector< std::vector<double> > beta( data.size(), std::vector<double>(centers, 0.0) ); std::vector< Gaussian > tmp(centers, Gaussian() ); int itera = 0; while( itera++ < MAX_ITERATOR && !ok(re, tmp) ) { tmp = re; for(int i = 0; i < data.size(); ++i) { for(int j = 0; j < centers; ++j) { beta[i][j] = re[j].getProbability(data[i]); } double sum = accumulate(beta[i].begin(), beta[i].end(), 0.0); for(int j = 0; j < centers; ++j) { beta[i][j] /= sum; } } for(int j = 0; j < centers; ++j) { double sumBeta = 0.0, sumweightBeta = 0.0, sumVar = 0.0; for(int i = 0; i < data.size(); ++i) { sumBeta += beta[i][j]; sumweightBeta += data[i] * beta[i][j]; } re[j].weight = sumBeta / data.size(); re[j].mean = sumweightBeta / sumBeta; for(int i = 0; i < data.size(); ++i) { sumVar += beta[i][j] * (data[i] - re[j].mean) * (data[i] - re[j].mean); } if(!fixDelta) { re[j].delta = std::pow( sumVar / sumBeta, 0.5); } } } return caculateBIC(data, re); }
MultiGaussianWeighted MultiGaussianWeighted::fitGaussianKernel(fracfloat_t* errors, unsigned count, unsigned binCount){ // std::sort(errors, errors + count); fracfloat_t minVal = min<fracfloat_t>(errors, count); fracfloat_t maxVal = max<fracfloat_t>(errors, count); fracfloat_t binWidth = (fracfloat_t)((maxVal - minVal) / binCount); //Calculate the weight of each bin fracfloat_t* weights = new fracfloat_t[binCount]; arrayZero<fracfloat_t>(weights, binCount); for(unsigned i = 0; i < count; i++){ //Find the bin unsigned bin = (unsigned) ((errors[i] - minVal) / binWidth); if(bin == binCount) bin = binCount - 1; //To handle the top bin assert(bin < binCount); weights[bin] += (fracfloat_t) (1.0 / count); } assert(weights[0] > 0); //First bin always has at least the min sample. //assert(weights[binCount - 1] > 0); //Last bin always has at least the max sample. (This is only true when minVal != maxVal) //Make gaussians (stdev = bin width) Gaussian* gaussians = new Gaussian[binCount]; for(unsigned i = 0; i < binCount; i++){ gaussians[i] = Gaussian(minVal + binWidth * i + binWidth / 2, binWidth); } MultiGaussianWeighted mg = MultiGaussianWeighted(gaussians, weights, binCount); return mg; }
double Gaussian(){ // This is a Gaussian Distribution with center at x=380 double sigma=17.4847; // double sigma=8.; double x,y,r2,result; do{ x=-1+2*rand()%30000/30000.; //random number in [0,1) y=-1*2*rand()%30000/30000.; r2=x*x+y*y; }while(r2>1.0 || r2==0.); result=380.+sigma * y * sqrt (-2.0 * log (r2) / r2); if (result < 300.) return Gaussian(); if (result > 600.) return Gaussian(); return result; }
void GaussianRow(int elements, vector<float>& row, float variance = .2) { row.resize(elements); for(int i = 0; i < elements; i++) { float x = ofMap(i, 0, elements - 1, -1, 1); row[i] = Gaussian(x, 0, variance); } }
void Img::GaussianSmoothOriginal(double sigma) { std::vector< std::vector<double> > tmp; // make array for gaussian values int size = ceil(6 * sigma); if (size % 2 == 0) { size += 1; } double gauss[size]; double weight = 0; for (int i = 0; i < size; i++){ gauss[i] = Gaussian(i - (size - 1)/2, sigma); weight += gauss[i]; } // normalize for (auto &i: gauss){ i = i/weight; } // make sure the gauss filter is not bigger the the image itself std::pair<int, int> s = SizeOriginal(); if (s.first < size){ std::cout << "x-direction to low" << std::endl; std::exit(1); } if (s.second < size){ std::cout << "y-direction to low" << std::endl; std::exit(1); } tmp.resize(s.first); for (auto &i: tmp){ i.resize(s.second); } // smoothing in x-Direction for (int i=0; i<s.first; i++){ for (int j=0; j<s.second; j++){ double sum = 0; for (int k=0; k<size; k++){ sum += gauss[k] * AtOriginal(i+k-(size-1)/2, j, true); } tmp[i][j] = sum; } } original = tmp; // smoothing in y-direction for (int i=0; i<s.first; i++){ for (int j=0; j<s.second; j++){ double sum = 0; for (int k=0; k<size; k++){ sum += gauss[k] * AtOriginal(i, j+k-(size-1)/2, true); } tmp[i][j] = sum; } } original = tmp; }
// copy constructor NonLinearAnalyticConditionalGaussian_Ginac::NonLinearAnalyticConditionalGaussian_Ginac (const NonLinearAnalyticConditionalGaussian_Ginac& g) : AnalyticConditionalGaussianAdditiveNoise( Gaussian(g.AdditiveNoiseMuGet(),g.AdditiveNoiseSigmaGet()) ,2), func_sym (g.func_sym), cond_sym (g.cond_sym), u_sym (g.u_sym), x_sym (g.x_sym), cond_size (cond_sym.size()), u_size (u_sym.size()), x_size (x_sym.size()), func_size (func_sym.rows()), dfunc_dcond (cond_size), dfunc_dx (x_size) { // test for consistent input assert (func_sym.cols() == 1); if (cond_size!=0) assert (g.AdditiveNoiseSigmaGet().rows() == cond_size); // derive func to cond for (unsigned int i=0; i<cond_size; i++) dfunc_dcond[i] = func_sym.diff(cond_sym[i]); // derive func to x for (unsigned int i=0; i < x_size; i++) dfunc_dx[i] = func_sym.diff(x_sym[i]); }
Vect BlackScholesJump(int N) {// Black-Scholes: dX = x(r*dt + sigma dW) // Euler Method: S_{n+1}=S_n(1 + r*d_t + sigma*g_n*d_t^{1/2}) // d_t = 1, r, g_n const Vect JBS(N), g(N); JBS(0)=1.; double d_t = 1.; double b = 1; //JUMPS///// int Nbj; double sigm = 1.; double lambda = 0.1; g(0) = 0.; //////////// for ( int i = 1; i < N; ++i ){ g(i) += g(i-1) + Gaussian( 0., 1. ); JBS(i) = JBS(0)*exp( ( b - pow(sigm,2.)/2. ) * i * d_t + sigm * g(i) * pow(d_t, 0.5) ); Nbj = PoissonLaw(lambda); for (int nj = 0; nj < Nbj; ++nj) { JBS(i) *= (1. + randf(1.0)); } } return JBS; }
Image::Pointer VolumeVisualizationImagePreprocessor::Process( Image::Pointer m_originalCT, Image::Pointer m_originalLiverMask) { VVP_INFO << "Processing..."; // converting mitk image -> itk image CTImage::Pointer CTImageWork = CTImage::New(); CastToItkImage( m_originalCT, CTImageWork ); // converting mitk image -> itk image BinImage::Pointer BinImageMask = BinImage::New(); CastToItkImage( m_originalLiverMask, BinImageMask ); DetermineBoundingBox( BinImageMask ); if( m_MaxX < m_MinX || m_MaxY < m_MinY || m_MaxZ < m_MinZ ) return 0; CTImageWork = Gaussian(Crop( CTImageWork )); BinImageMask = Crop( BinImageMask ); CTImage::Pointer itkResult =Composite(CTImageWork,BinImageMask,Dilate(BinImageMask),Erode(BinImageMask)); mitk::Image::Pointer mitkResult= mitk::Image::New(); mitk::CastToMitkImage( itkResult, mitkResult ); //TODO here we can perhaps save memory VVP_INFO << "Finished..."; return mitkResult; }
void RNG::ComplexGaussianAmplitude(num_t &r, num_t &i) { num_t amplitude = Gaussian(); num_t phase = Uniform() * 2.0 * M_PIn; r = amplitude * std::cos(phase); i = amplitude * std::sin(phase); }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow) { ui->setupUi(this); connect(ui->openButton, SIGNAL(clicked()), this, SLOT(Open())); connect(ui->saveButton, SIGNAL(clicked()), this, SLOT(SaveImage())); connect(ui->SSDButton, SIGNAL(clicked()), this, SLOT(SSD())); connect(ui->SADButton, SIGNAL(clicked()), this, SLOT(SAD())); connect(ui->NCCButton, SIGNAL(clicked()), this, SLOT(NCC())); connect(ui->GTCheckBox, SIGNAL(clicked()), this, SLOT(GTOnOff())); connect(ui->gaussianButton, SIGNAL(clicked()), this, SLOT(Gaussian())); connect(ui->maxButton, SIGNAL(clicked()), this, SLOT(FindBestDisparity())); connect(ui->bilateralButton, SIGNAL(clicked()), this, SLOT(Bilateral())); connect(ui->segmentButton, SIGNAL(clicked()), this, SLOT(Segment())); connect(ui->renderButton, SIGNAL(clicked()), this, SLOT(Render())); connect(ui->renderSlider, SIGNAL(valueChanged(int)), this, SLOT(RenderSlider(int))); connect(ui->magicButton, SIGNAL(clicked()), this, SLOT(MagicStereo(int))); ui->GTCheckBox->setChecked(true); ui->pixelErrorLabel->setText(""); ui->gaussianSigmaSpinBox->setValue(1.0); ui->biSigmaSSpinBox->setValue(1.0); ui->biSigmaISpinBox->setValue(20.0); ui->renderSlider->setValue(100); ui->SADOffsetBox->setValue(2); ui->SSDOffsetBox->setValue(2); ui->NCCOffsetBox->setValue(2); ui->segmentGridBox->setValue(20); ui->segmentColorSpinBox->setValue(20.0); ui->segmentSpatialSpinBox->setValue(6.0); ui->segmentIterBox->setValue(4); m_Image1Display.setParent(ui->tab); m_Image2Display.setParent(ui->tab_2); m_GTDisplay.setParent(ui->tab_4); m_DisparityDisplay.setParent(ui->tab_3); m_ErrorDisplay.setParent(ui->tab_5); m_RenderDisplay.setParent(ui->tab_6); m_SegmentDisplay.setParent(ui->tab_7); m_Image1Display.window = this; m_Image2Display.window = this; m_GTDisplay.window = this; m_DisparityDisplay.window = this; m_ErrorDisplay.window = this; m_RenderDisplay.window = this; m_SegmentDisplay.window = this; ui->tabWidget->setCurrentIndex(0); m_LastRow = 0; m_SegmentIteration = 0; m_MatchCost = NULL; }
void ImplicitHaar( Matrix<F>& A, Matrix<F>& t, Matrix<Base<F>>& d, Int n ) { DEBUG_ONLY(CallStackEntry cse("ImplicitHaar")) // TODO: Replace this with a quadratic scheme similar to Stewart's, which // essentially generates random Householder reflectors Gaussian( A, n, n ); QR( A, t, d ); }
void Haar( AbstractDistMatrix<F>& A, Int n ) { DEBUG_ONLY(CallStackEntry cse("Haar")) // TODO: Replace this with a quadratic scheme similar to Stewart's, which // essentially generates random Householder reflectors Gaussian( A, n, n ); qr::ExplicitUnitary( A ); }
Vect BMotion(int N) { Vect BM(N); BM(0)=0.; for ( int i = 1; i < N; ++i ){ BM(i) = BM(i-1) + Gaussian( 0., 1.); } return BM; }
inline void ImplicitHaar( DistMatrix<F>& A, DistMatrix<F,MD,STAR>& t, Int n ) { DEBUG_ONLY(CallStackEntry cse("Haar")) // TODO: Replace this with a quadratic scheme similar to Stewart's, which // essentially generates random Householder reflectors Gaussian( A, n, n ); QR( A, t ); }
///30. Several Gaussian double Mult_Gaussian(const double& X, const int& Nb_peak, const vec& Mean, const vec& SD, const vec& Ampl){ double result = 0.; for (int i = 0; i < Nb_peak; i++) { assert(SD(i)>0); result += Gaussian(X, Mean(i), SD(i), Ampl(i)); } return result; }
void testGaussianNumerics(){ std::cout.precision(16); std::cout << std::scientific; Gaussian g = Gaussian(5, 1); for(unsigned i = 0; i < 100; i++){ std::cout << i << ": L: " << g.likelihood((fracfloat_t)i) << " -LL: " << (-INV_LN_2 * log(g.likelihood(i))) << ", S: " << g.surprisal((fracfloat_t)i) << std::endl; } }
//------------------------------------------------------------- double peak::get_density_ODF(const double &theta) //------------------------------------------------------------- { switch (method) { case 1: { return ODF_sd(theta, mean, params); break; } case 2: { return ODF_hard(theta, mean, s_dev, ampl) + ODF_hard(theta - pi, mean, s_dev, ampl) + ODF_hard(theta + pi, mean, s_dev, ampl); break; } case 3: { return Gaussian(theta, mean, s_dev, ampl) + Gaussian(theta - pi, mean, s_dev, ampl) + Gaussian(theta + pi, mean, s_dev, ampl); break; } case 4: { return Lorentzian(theta, mean, width, ampl) + Lorentzian(theta - pi, mean, width, ampl) + Lorentzian(theta + pi, mean, width, ampl); break; } case 5: { return PseudoVoigt(theta, mean, s_dev, width, ampl, params) + PseudoVoigt(theta - pi, mean, s_dev, width, ampl, params) + PseudoVoigt(theta + pi, mean, s_dev, width, ampl, params); break; } case 6: { assert(width > 0.); double inv_width = 1./width; return Pearson7(theta, mean, inv_width, params) + Pearson7(theta - pi, mean, inv_width, params) + Pearson7(theta + pi, mean, inv_width, params); break; } case 7: { return 1.; break; } default : { cout << "Error: The peak type specified is not recognized" << endl; return 0; break; } } }
MultiGaussian MultiGaussian::fitGaussianKernelNBin(fracfloat_t* errors, unsigned count){ fracfloat_t sigma = 1.0 / sqrt(count); Array<Gaussian> gaussians = Array<Gaussian>(count); for(unsigned i = 0; i < count; i++){ gaussians[i] = Gaussian(errors[i], sigma); } return MultiGaussian(gaussians); }
inline void Haar( DistMatrix<F>& A, Int n ) { #ifndef RELEASE CallStackEntry entry("Haar"); #endif // TODO: Replace this with a quadratic scheme similar to Stewart's, which // essentially generates random Householder reflectors Gaussian( A, n, n ); qr::Explicit( A ); }
Vect SimuStocha(int N) {// Simple stochastic process // S_{n+1}=S_n + d_t*b + sigma*g_n*d_t^{1/2} // d_t = 1, b, g_n const Vect SimuS(N); SimuS(0)=0.; double b = 0.01; double sigm = 1.; for ( int i = 1; i < N; ++i ){ SimuS(i) = SimuS(i-1) + b + sigm * Gaussian( 0., 1.); } return SimuS; }
Vect BlackScholes(int N) {// Black-Scholes: dX = x(r*dt + sigma dW) // Euler Method: S_{n+1}=S_n(1 + r*d_t + sigma*g_n*d_t^{1/2}) // d_t = 1, r, g_n const Vect BS(N); BS(0)=1.; double d_t = 0.01; double b = 1; double sigm = 1.; for ( int i = 1; i < N; ++i ){ BS(i) = BS(i-1)*(1. + b * d_t + sigm * Gaussian( 0., 1. )*pow(d_t,0.5)); } return BS; }
Vect BlackScholes2(int N) {// Black-Scholes: dX = x(r*dt + sigma dW) // Method: S_{n}=S_0 exp( (r- sigma^2/2.) * t + sigma*\sum_i g_i ) // d_t = 1, r, g_n const Vect BS2(N), g(N); BS2(0) = 1.; double d_t = 0.01; double b = 0.1; double sigm = 1.; g(0) = 0.; for ( int i = 1; i < N; ++i ){ g(i) += g(i-1) + Gaussian( 0., 1. ); BS2(i) = BS2(0)*exp( ( b - pow(sigm,2.)/2. ) * i * d_t + sigm * g(i) * pow(d_t, 0.5) ); } return BS2; }
double ScatteredPointInterpolator::Height(const Vector2d& vPos) { // m: dimension of input vector // 2: the x and y values // size_t m = 2; size_t k = m_vecPoints.size(); double dHeight = 0.0; for (size_t i=0; i<k; i++) { Vector2d xi(m_vecPoints[i].X(), m_vecPoints[i].Y()); double dDist = Distance(vPos, xi); dHeight += m_vecWeights[i] * Gaussian(dDist); } return dHeight; }
// initialize prior density of filter void TrackerKalman::initialize(const StatePosVel& mu, const StatePosVel& sigma, const double time) { ColumnVector mu_vec(6); SymmetricMatrix sigma_vec(6); sigma_vec = 0; for (unsigned int i=0; i<3; i++){ mu_vec(i+1) = mu.pos_[i]; mu_vec(i+4) = mu.vel_[i]; sigma_vec(i+1,i+1) = pow(sigma.pos_[i],2); sigma_vec(i+4,i+4) = pow(sigma.vel_[i],2); } prior_ = Gaussian(mu_vec, sigma_vec); filter_ = new ExtendedKalmanFilter(&prior_); // tracker initialized tracker_initialized_ = true; quality_ = 1; filter_time_ = time; init_time_ = time; }
std::vector<std::string> TermFactory::available() const { std::vector<std::string> result; result.push_back(Discrete().className()); result.push_back(Bell().className()); result.push_back(Gaussian().className()); result.push_back(GaussianProduct().className()); result.push_back(PiShape().className()); result.push_back(Ramp().className()); result.push_back(Rectangle().className()); result.push_back(SShape().className()); result.push_back(Sigmoid().className()); result.push_back(SigmoidDifference().className()); result.push_back(SigmoidProduct().className()); result.push_back(Trapezoid().className()); result.push_back(Triangle().className()); result.push_back(ZShape().className()); return result; }
MultiGaussian MultiGaussian::fitGaussianKernel(fracfloat_t* errors, unsigned count, unsigned binCount){ //Optimization: could use kth order statistics. //Note: Parallel was causing problems. std::sort(errors, errors + count); Array<Gaussian> gaussians = Array<Gaussian>(binCount); for(unsigned i = 0; i < binCount; i++){ unsigned bin0 = count * i / binCount; unsigned bin1 = count * (i + 1) / binCount - 1; //Another way to do it: //Gaussian centered at the center of the bin, stdev the width of the bin. //Detail should stdev be half the width? gaussians[i] = Gaussian( 0.5 * (errors[bin0] + errors[bin1]), 1.0 * (errors[bin1] - errors[bin0]) + FRACFLOAT_EPSILON); //gaussians[i] = Gaussian(mean<fracfloat_t>(errors + bin0, bin1 - bin0), errors[bin1] - errors[bin0] + FRACFLOAT_EPSILON); } return MultiGaussian(gaussians); }
void GaussianBlur(const RowMatrixXf& image, const double sigma, RowMatrixXf* out) { int kernel_size = std::ceil(((sigma - 0.8) / 0.3 + 1.0) * 2.0); if (kernel_size % 2 == 0) { kernel_size += 1; } RowVectorXf gauss_kernel(kernel_size); double norm_factor = 0; for (int i = 0; i < gauss_kernel.size(); i++) { gauss_kernel(i) = Gaussian(i, (kernel_size - 1.0) / 2.0, sigma); norm_factor += gauss_kernel(i); } gauss_kernel /= norm_factor; SeparableConvolution2d(image, gauss_kernel, gauss_kernel, REPLICATE, out); }
void testDifferentialEntropy(){ std::cout << "Testing Differential Entropy." << std::endl; fracfloat_t samples[DE_TEST_SIZE]; std::default_random_engine generator; std::normal_distribution<fracfloat_t> distributions[DISTSIZE]; distributions[0] = std::normal_distribution<fracfloat_t>(0.0,1.0); distributions[1] = std::normal_distribution<fracfloat_t>(0.1,4.0); distributions[2] = std::normal_distribution<fracfloat_t>(2.0,1.0); distributions[3] = std::normal_distribution<fracfloat_t>(5.0,8.0); for(unsigned i = 0; i < DE_TEST_SIZE; i++){ samples[i] = distributions[i % DISTSIZE](generator); } Gaussian g = Gaussian(samples, DE_TEST_SIZE); MultiGaussian mg = MultiGaussian::fitGaussianKernel(samples, DE_TEST_SIZE, (unsigned)sqrt(DE_TEST_SIZE)); fracfloat_t supportStart = min<fracfloat_t>(samples, DE_TEST_SIZE) * 2; fracfloat_t supportEnd = max<fracfloat_t>(samples, DE_TEST_SIZE) * 2; std::cout << "Gaussian: " << g << std::endl; std::cout << "Multi Gaussian: " << mg << std::endl; std::cout << "Integral gaussian approximation: " << g.integrate(supportStart, supportEnd, 1000) << std::endl; std::cout << "Integral multigaussian approximation: " << mg.integrate(supportStart, supportEnd, 1000) << std::endl; std::cout << "Integral 10: " << mg.approximateDifferentialEntropyFromIntegral(supportStart, supportEnd, 10) << std::endl; std::cout << "Integral 100: " << mg.approximateDifferentialEntropyFromIntegral(supportStart, supportEnd, 100) << std::endl; std::cout << "Integral 1000: " << mg.approximateDifferentialEntropyFromIntegral(supportStart, supportEnd, 1000) << std::endl; std::cout << "Distribution Trick: " << mg.approximateDifferentialEntropyFromSamples(Array<fracfloat_t>(samples, DE_TEST_SIZE / 1)) << std::endl; std::cout << "Distribution Trick 1/2: " << mg.approximateDifferentialEntropyFromSamples(Array<fracfloat_t>(samples, DE_TEST_SIZE / 2)) << std::endl; std::cout << "Distribution Trick 1/4: " << mg.approximateDifferentialEntropyFromSamples(Array<fracfloat_t>(samples, DE_TEST_SIZE / 4)) << std::endl; std::cout << "Distribution Trick 1/8: " << mg.approximateDifferentialEntropyFromSamples(Array<fracfloat_t>(samples, DE_TEST_SIZE / 8)) << std::endl; std::cout << "Distribution Trick 1/16: " << mg.approximateDifferentialEntropyFromSamples(Array<fracfloat_t>(samples, DE_TEST_SIZE / 16)) << std::endl; }
void Bitmap::bilteral_filter(){ double sSigma,iSigma;//two sigma: space sigma and idensity sigma int windowWidth,windowHeight;//window be proportional to the image size double rWp,gWp,bWp;//the normalization factor,need to calculated dynamically(not predefined) windowHeight = ih.biHeight*0.12; //0.12 is a hight ratio windowWidth = ih.biWidth*0.12; sSigma = 0.02*sqrt((ih.biWidth*ih.biWidth+ih.biHeight*ih.biHeight)); //space sigma: 2% of diagnal BYTE* image = new BYTE[widthBytes*ih.biHeight]; //store the new image iSigma = 5; //predefine iSigma to be 1 //iterate the whole image for (int i=windowHeight/2; i<ih.biHeight-windowHeight/2; i++) { for (int j=windowWidth/2; j<ih.biWidth-windowWidth/2; j++) { //calculate Wp rWp=0;gWp=0;bWp=0; double RR=0,GG=0,BB=0; for(int x = i-windowHeight/2;x<i+windowHeight/2;x++) for (int y = j-windowWidth/2; y< j+windowWidth/2; y++) { int R_R = (imageData[x*widthBytes+y*3+0]-imageData[i*widthBytes+j*3+0])*(imageData[x*widthBytes+y*3+0]-imageData[i*widthBytes+j*3+0]); double gaussian=Gaussian(sSigma, dist2(i, j, x, y))*Gaussian(iSigma, R_R); rWp+=gaussian; RR +=gaussian*imageData[x*widthBytes+y*3+0]; } for(int x = i-windowHeight/2;x<i+windowHeight/2;x++) for (int y = j-windowWidth/2; y< j+windowWidth/2; y++) { int G_G = (imageData[x*widthBytes+y*3+1]-imageData[i*widthBytes+j*3+1])*(imageData[x*widthBytes+y*3+1]-imageData[i*widthBytes+j*3+1]); double gaussian=Gaussian(sSigma, dist2(i, j, x, y))*Gaussian(iSigma, G_G); gWp+=gaussian; GG +=gaussian*imageData[x*widthBytes+y*3+1]; } for(int x = i-windowHeight/2;x<i+windowHeight/2;x++) for (int y = j-windowWidth/2; y< j+windowWidth/2; y++) { int B_B = (imageData[x*widthBytes+y*3+2]-imageData[i*widthBytes+j*3+2])*(imageData[x*widthBytes+y*3+2]-imageData[i*widthBytes+j*3+2]); double gaussian=Gaussian(sSigma, dist2(i, j, x, y))*Gaussian(iSigma, B_B); bWp+=gaussian; BB +=gaussian*imageData[x*widthBytes+y*3+2]; } image[i*widthBytes+j*3+0]=RR/rWp; image[i*widthBytes+j*3+1]=GG/gWp; image[i*widthBytes+j*3+2]=BB/bWp; } } delete[] imageData; imageData = image; }