int main(int argc, char** argv) { gROOT->Reset(); gROOT->SetStyle("Plain"); gStyle->SetPalette(1); gStyle->SetOptStat(1111); gStyle->SetOptFit(111); TF1 gaussian("gaussian","-exp(-0.1*x) + exp(-0.2 * x)",0,50); TH1F histo("histo","histo",1000,0,50); histo.FillRandom("gaussian",100000); TCanvas cc("cc","cc",400,400); histo.SetLineColor(kRed); histo.Draw(); std::cerr << "===== Get Neyman intervals ====" << std::endl; std::vector<double> band = getSigmaBands_FeldmanCousins (histo) ; std::cerr << "=======================" << std::endl; std::cerr << " " << band.at(0) << " << " << band.at(1) << " << " << band.at(2) << " << " << band.at(3) << " << " << band.at(4) << std::endl; std::cerr << "=======================" << std::endl; TLine* lVertLeft95 = new TLine(band.at(0),0,band.at(0),1000); lVertLeft95->SetLineColor(kBlue); lVertLeft95->SetLineWidth(2); lVertLeft95->SetLineStyle(5); TLine* lVertLeft68 = new TLine(band.at(1),0,band.at(1),1000); lVertLeft68->SetLineColor(kMagenta); lVertLeft68->SetLineWidth(2); lVertLeft68->SetLineStyle(5); TLine* lVertMiddle = new TLine(band.at(2),0,band.at(2),1000); lVertMiddle->SetLineColor(kGreen); lVertMiddle->SetLineWidth(2); lVertMiddle->SetLineStyle(5); TLine* lVertRight68 = new TLine(band.at(3),0,band.at(3),1000); lVertRight68->SetLineColor(kMagenta); lVertRight68->SetLineWidth(2); lVertRight68->SetLineStyle(5); TLine* lVertRight95 = new TLine(band.at(4),0,band.at(4),1000); lVertRight95->SetLineColor(kBlue); lVertRight95->SetLineWidth(2); lVertRight95->SetLineStyle(5); lVertLeft95->Draw(); lVertLeft68->Draw(); lVertMiddle->Draw(); lVertRight68->Draw(); lVertRight95->Draw(); cc.SaveAs("exampleBand.png"); return 0; }
/** * Generate a gaussian random variable with zero mean and unit * variance. */ inline double normal(const double mean = double(0), const double stdev = double(1)) { return gaussian(mean, stdev); } // end of normal
//! Get the modified descriptor. See Agrawal ECCV 08 //! Modified descriptor contributed by Pablo Fernandez void Surf::getDescriptor(bool bUpright) { int y, x, sample_x, sample_y, count=0; int i = 0, ix = 0, j = 0, jx = 0, xs = 0, ys = 0; float scale, *desc, dx, dy, mdx, mdy, co, si; float gauss_s1 = 0.f, gauss_s2 = 0.f; float rx = 0.f, ry = 0.f, rrx = 0.f, rry = 0.f, len = 0.f; float cx = -0.5f, cy = 0.f; //Subregion centers for the 4x4 gaussian weighting Ipoint *ipt = &ipts[index]; scale = ipt->scale; x = fRound(ipt->x); y = fRound(ipt->y); desc = ipt->descriptor; if (bUpright) { co = 1; si = 0; } else { co = cos(ipt->orientation); si = sin(ipt->orientation); } i = -8; //Calculate descriptor for this interest point while(i < 12) { j = -8; i = i-4; cx += 1.f; cy = -0.5f; while(j < 12) { dx=dy=mdx=mdy=0.f; cy += 1.f; j = j - 4; ix = i + 5; jx = j + 5; xs = fRound(x + ( -jx*scale*si + ix*scale*co)); ys = fRound(y + ( jx*scale*co + ix*scale*si)); for (int k = i; k < i + 9; ++k) { for (int l = j; l < j + 9; ++l) { //Get coords of sample point on the rotated axis sample_x = fRound(x + (-l*scale*si + k*scale*co)); sample_y = fRound(y + ( l*scale*co + k*scale*si)); //Get the gaussian weighted x and y responses gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5f*scale); rx = haarX(sample_y, sample_x, 2*fRound(scale)); ry = haarY(sample_y, sample_x, 2*fRound(scale)); //Get the gaussian weighted x and y responses on rotated axis rrx = gauss_s1*(-rx*si + ry*co); rry = gauss_s1*(rx*co + ry*si); dx += rrx; dy += rry; mdx += fabs(rrx); mdy += fabs(rry); } } //Add the values to the descriptor vector gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); desc[count++] = dx*gauss_s2; desc[count++] = dy*gauss_s2; desc[count++] = mdx*gauss_s2; desc[count++] = mdy*gauss_s2; len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy) * gauss_s2*gauss_s2; j += 9; } i += 9; } //Convert to Unit Vector len = sqrt(len); for(int i = 0; i < 64; ++i) desc[i] /= len; }
/** * * Function to compute the optical flow using multiple scales * **/ void Dual_TVL1_optic_flow_multiscale( float *I0, // source image float *I1, // target image float *u1, // x component of the optical flow float *u2, // y component of the optical flow const int nxx, // image width const int nyy, // image height const float tau, // time step const float lambda, // weight parameter for the data term const float theta, // weight parameter for (u - v)² const int nscales, // number of scales const float zfactor, // factor for building the image piramid const int warps, // number of warpings per scale const float epsilon, // tolerance for numerical convergence const bool verbose // enable/disable the verbose mode ) { int size = nxx * nyy; // allocate memory for the pyramid structure float **I0s = xmalloc(nscales * sizeof(float*)); float **I1s = xmalloc(nscales * sizeof(float*)); float **u1s = xmalloc(nscales * sizeof(float*)); float **u2s = xmalloc(nscales * sizeof(float*)); int *nx = xmalloc(nscales * sizeof(int)); int *ny = xmalloc(nscales * sizeof(int)); I0s[0] = xmalloc(size*sizeof(float)); I1s[0] = xmalloc(size*sizeof(float)); u1s[0] = u1; u2s[0] = u2; nx [0] = nxx; ny [0] = nyy; // normalize the images between 0 and 255 image_normalization(I0, I1, I0s[0], I1s[0], size); // pre-smooth the original images gaussian(I0s[0], nx[0], ny[0], PRESMOOTHING_SIGMA); gaussian(I1s[0], nx[0], ny[0], PRESMOOTHING_SIGMA); // create the scales for (int s = 1; s < nscales; s++) { zoom_size(nx[s-1], ny[s-1], &nx[s], &ny[s], zfactor); const int sizes = nx[s] * ny[s]; // allocate memory I0s[s] = xmalloc(sizes*sizeof(float)); I1s[s] = xmalloc(sizes*sizeof(float)); u1s[s] = xmalloc(sizes*sizeof(float)); u2s[s] = xmalloc(sizes*sizeof(float)); // zoom in the images to create the pyramidal structure zoom_out(I0s[s-1], I0s[s], nx[s-1], ny[s-1], zfactor); zoom_out(I1s[s-1], I1s[s], nx[s-1], ny[s-1], zfactor); } // initialize the flow at the coarsest scale for (int i = 0; i < nx[nscales-1] * ny[nscales-1]; i++) u1s[nscales-1][i] = u2s[nscales-1][i] = 0.0; // pyramidal structure for computing the optical flow for (int s = nscales-1; s >= 0; s--) { if (verbose) fprintf(stderr, "Scale %d: %dx%d\n", s, nx[s], ny[s]); // compute the optical flow at the current scale Dual_TVL1_optic_flow( I0s[s], I1s[s], u1s[s], u2s[s], nx[s], ny[s], tau, lambda, theta, warps, epsilon, verbose ); // if this was the last scale, finish now if (!s) break; // otherwise, upsample the optical flow // zoom the optical flow for the next finer scale zoom_in(u1s[s], u1s[s-1], nx[s], ny[s], nx[s-1], ny[s-1]); zoom_in(u2s[s], u2s[s-1], nx[s], ny[s], nx[s-1], ny[s-1]); // scale the optical flow with the appropriate zoom factor for (int i = 0; i < nx[s-1] * ny[s-1]; i++) { u1s[s-1][i] *= (float) 1.0 / zfactor; u2s[s-1][i] *= (float) 1.0 / zfactor; } } // delete allocated memory for (int i = 1; i < nscales; i++) { free(I0s[i]); free(I1s[i]); free(u1s[i]); free(u2s[i]); } free(I0s[0]); free(I1s[0]); free(I0s); free(I1s); free(u1s); free(u2s); free(nx); free(ny); }
edge_image_type IonDetector::canny_edge( const image_type& img ) { image_type::extents_type three( 3, 3 ); Kernel gaussian( image_type( three, boost::assign::list_of ( 1./16 )( 2./16 )( 1./16 ) ( 2./16 )( 4./16 )( 2./16 ) ( 1./16 )( 2./16 )( 1./16 ) ) ); Kernel sobelx( image_type( three, boost::assign::list_of (1)(0)(-1)(2)(0)(-2)(1)(0)(-1) ) ); Kernel sobely( image_type( three, boost::assign::list_of (1)(2)(1)(0)(0)(0)(-1)(-2)(-1) ) ); image_type img_gauss( img.extents ), img_sobelx( img.extents ), img_sobely( img.extents ); //gaussian.apply_kernel( img, img_gauss ); sobelx.apply_kernel( img, img_sobelx ); sobely.apply_kernel( img, img_sobely ); save_file( "orig.fits", img ); //save_file( "gauss.fits", img_gauss ); //save_file( "sobelx.fits", img_sobelx ); //save_file( "sobely.fits", img_sobely ); image_type img_sobel( img_sobelx.extents ); edge_image_type edges( img_sobelx.extents ); image_type::element mean = 0, std_dev = 0; for( image_type::index i = 2; i < img_sobelx.extents.first-2; ++i ) for( image_type::index j = 2; j < img_sobelx.extents.second-2; ++j ) { image_type::element val = img_sobelx( i, j )*img_sobelx( i, j ) + img_sobely( i, j )*img_sobely( i, j ); img_sobel( i, j ) = val; mean += val; } save_file( "sobel.fits", img_sobel ); mean /= img_sobel.num_elements(); for( image_type::index i = 2; i<img_sobelx.extents.first-2; ++i ) for( image_type::index j = 2; j<img_sobelx.extents.second-2; ++j ) { std_dev += ( img_sobel( i, j ) - mean )* ( img_sobel( i, j ) - mean ); } std_dev = sqrt(std_dev / img_sobel.num_elements()); image_type::element thresh = mean + canny_threshold * std_dev; image_type::element continue_thresh = mean + canny_continue_threshold * std_dev; // First mark off edges with a high threshold for( image_type::index i = 2; i<img_sobelx.extents.first-2; ++i ) for( image_type::index j = 2; j<img_sobelx.extents.second-2; ++j ) { edge_image_type::position pos( i, j ); if( edges( pos ) ) continue; if( canny_check( img_sobel, img_sobelx, img_sobely, pos, thresh ) ) { std::stack< image_type::position > checks; checks.push( pos ); while( !checks.empty() ) { pos = checks.top(); checks.pop(); if( edges( pos ) ) continue; if( !canny_check( img_sobel, img_sobelx, img_sobely, pos, continue_thresh ) ) continue; edges( pos ) = 1.0f; pos.first++; checks.push( pos ); pos.second++; checks.push( pos ); pos.second-=2; checks.push( pos ); pos.first--; checks.push( pos ); pos.second+=2; checks.push( pos ); pos.first--; checks.push( pos ); pos.second--; checks.push( pos ); pos.second--; checks.push( pos ); } } } save_file( "edges.fits", edges ); return edges; }
void ComputeMSURFDescriptor( const ImageT & Lx , const ImageT & Ly , const int id_octave , const SIOPointFeature & ipt , Descriptor< Real , 64 > & desc ) { Real dx = 0, dy = 0, mdx = 0, mdy = 0, gauss_s1 = 0, gauss_s2 = 0; Real rx = 0, ry = 0, rrx = 0, rry = 0, xf = 0, yf = 0, ys = 0, xs = 0; Real sample_x = 0, sample_y = 0, co = 0, si = 0, angle = 0; Real ratio = 0; int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; int scale = 0, dsize = 0, level = 0; // Subregion centers for the 4x4 gaussian weighting Real cx = - static_cast<Real>( 0.5 ) , cy = static_cast<Real>( 0.5 ) ; // Set the descriptor size and the sample and pattern sizes dsize = 64; sample_step = 5; pattern_size = 12; // Get the information from the keypoint ratio = static_cast<Real>( 1 << id_octave ); scale = MathTrait<float>::round( ipt.scale() / ratio ); angle = ipt.orientation() ; yf = ipt.y() / ratio; xf = ipt.x() / ratio; co = MathTrait<Real>::cos( angle ); si = MathTrait<Real>::sin( angle ); i = -8; // Calculate descriptor for this interest point // Area of size 24 s x 24 s while ( i < pattern_size ) { j = -8; i = i - 4; cx += 1.0; cy = -0.5; while ( j < pattern_size ) { dx = dy = mdx = mdy = 0.0; cy += 1.0; j = j - 4; ky = i + sample_step; kx = j + sample_step; xs = xf + ( -kx * scale * si + ky * scale * co ); ys = yf + ( kx * scale * co + ky * scale * si ); for ( int k = i; k < i + 9; ++k ) { for ( int l = j; l < j + 9; ++l ) { // Get coords of sample point on the rotated axis sample_y = yf + ( l * scale * co + k * scale * si ); sample_x = xf + ( -l * scale * si + k * scale * co ); // Get the gaussian weighted x and y responses gauss_s1 = gaussian( xs - sample_x, ys - sample_y, static_cast<Real>( 2.5 ) * static_cast<Real>( scale ) ); rx = SampleLinear( Lx, sample_y, sample_x ); ry = SampleLinear( Ly, sample_y, sample_x ); // Get the x and y derivatives on the rotated axis rry = gauss_s1 * ( rx * co + ry * si ); rrx = gauss_s1 * ( -rx * si + ry * co ); // Sum the derivatives to the cumulative descriptor dx += rrx; dy += rry; mdx += MathTrait<Real>::abs( rrx ); mdy += MathTrait<Real>::abs( rry ); } } // Add the values to the descriptor vector gauss_s2 = gaussian( cx - static_cast<Real>( 2.0 ) , cy - static_cast<Real>( 2.0 ) , static_cast<Real>( 1.5 ) ) ; desc[dcount++] = dx * gauss_s2; desc[dcount++] = dy * gauss_s2; desc[dcount++] = mdx * gauss_s2; desc[dcount++] = mdy * gauss_s2; j += 9; } i += 9; } // convert to unit vector (L2 norm) typedef Eigen::Matrix<Real, Eigen::Dynamic, 1> VecReal; Eigen::Map< VecReal > dataMap( &desc[0], 64); dataMap.normalize(); //std::cout << dataMap.transpose() << std::endl << std::endl; }
double* Network::activate(double* inputs) { int totalInputs = inputCount + biasCount; //set bias for(int i=0; i < biasCount; i++) registers[i] = 1.0; //set inputs for(int i=0; i < inputCount; i++) registers[i+biasCount] = inputs[i]; for(int i=0; i < nodeCount; i++) { //activate in order int tgtNeuronIx = nodeOrder[i]; //skip inputs and bias if(tgtNeuronIx < totalInputs) continue; //Hello. Are you there? //Ix int* regIxArray = registerArrays[tgtNeuronIx]; int* weightIxArray = weightArrays[tgtNeuronIx]; int nCount = nodeIncoming[tgtNeuronIx]; int aType = activationTypes[tgtNeuronIx]; double nodeSum = 0; for(int r=0; r < nCount; r++) { nodeSum += registers[regIxArray[r]]*weights[weightIxArray[r]]; } switch(aType) { case ActivationInt::BipolarSigmoid: registers[tgtNeuronIx] = bipolarSigmoid(nodeSum); break; case ActivationInt::Gaussian: registers[tgtNeuronIx] = gaussian(nodeSum); break; case ActivationInt::Linear: registers[tgtNeuronIx] = linear(nodeSum); break; case ActivationInt::Sine: registers[tgtNeuronIx] = sine(nodeSum); break; case ActivationInt::StepFunction: registers[tgtNeuronIx] = stepFunction(nodeSum); break; } printf("tgtIx %d - calc: %f \n", tgtNeuronIx, registers[tgtNeuronIx]); //register done, move on! } //activate double* fullOutputs = new double[outputCount]; //copy to double array memcpy (fullOutputs, ®isters[totalInputs], outputCount); //send back registers starting at outputs! return fullOutputs; }
void BfmWrapper::SpectralRange(bfm_qdp<Float> &bfm) { int Ls = bfm.Ls; multi1d<T4> gaus(Ls); for(int s=0;s<Ls;s++) gaussian(gaus[s]); Fermion_t tmp1,tmp2; Fermion_t b; Fermion_t Ab; double mu; tmp1 = bfm.allocFermion(); tmp2 = bfm.allocFermion(); b = bfm.allocFermion(); Ab = bfm.allocFermion(); bfm.importFermion(gaus,b,0); int dagyes= 1; int dagno = 0; int donrm = 1; double abnorm; double absq; double n; /////////////////////////////////////// // Get the highest evalue of MdagM /////////////////////////////////////// #pragma omp parallel for for(int i=0;i<bfm.nthread;i++) { int me = bfm.thread_barrier(); for(int k=0;k<100;k++){ n = bfm.norm(b); absq = bfm.Mprec(b,tmp2,tmp1,dagno,donrm); bfm.Mprec(tmp2,Ab,tmp1,dagyes); //MdagM mu = absq/n; if(bfm.isBoss() && (!me) ) { printf("PowerMethod %d mu=%le absq=%le n=%le \n",k,mu,absq,n); } abnorm = sqrt(absq); // b = Ab / |Ab| bfm.axpby(b,Ab,Ab,0.0,1.0/abnorm); } } ///////////////////////////////////////// // Get the lowest eigenvalue of mdagm // Apply power method to [lambda_max - MdagM] ///////////////////////////////////////// double lambda_max = mu; bfm.importFermion(gaus,b,0); #pragma omp parallel for for(int i=0;i<bfm.nthread;i++) { int me = bfm.thread_barrier(); for(int k=0;k<100;k++){ n = bfm.norm(b); absq = bfm.Mprec(b,tmp2,tmp1,dagno,donrm); bfm.Mprec(tmp2,Ab,tmp1,dagyes); bfm.axpby(Ab,Ab,b,-1,lambda_max); absq = lambda_max * n - absq; mu = absq/n; if(bfm.isBoss() && (!me) ) { printf("PowerMethod for low EV %d mu=%le absq=%le n=%le \n",k,mu,absq,n); printf("PowerMethod %d lambda_min =%le\n",k,lambda_max - mu); } abnorm = sqrt(absq); // b = Ab / |Ab| bfm.axpby(b,Ab,Ab,0.0,1.0/abnorm); } } bfm.freeFermion(tmp1); bfm.freeFermion(tmp2); bfm.freeFermion(b); bfm.freeFermion(Ab); }
float gabor(V2d v,V2d d,float size,float freq) { return gaussian(v.mag()/size)*sin(v.dot(d)*freq); }