std::vector< float > Saliency::distribution( const std::vector< SuperpixelStatistic >& stat ) const {
	const int N = stat.size();
	std::vector< float > r( N );
	const float sc =  0.5 / (settings_.sigma_c_*settings_.sigma_c_);
	for( int i=0; i<N; i++ ) {
		float u = 0, norm = 1e-10;
		Vec3f c = stat[i].mean_color_;
		Vec2f p(0.f, 0.f);
		
		// Find the mean position
		for( int j=0; j<N; j++ ) {
			Vec3f dc = stat[j].mean_color_ - c;
			float w = fast_exp( - sc * dc.dot(dc) );
			p += w*stat[j].mean_position_;
			norm += w;
		}
		p *= 1.0 / norm;
		
		// Compute the variance
		for( int j=0; j<N; j++ ) {
			Vec3f dc = stat[j].mean_color_ - c;
			Vec2f dp = stat[j].mean_position_ - p;
			float w = fast_exp( - sc * dc.dot(dc) );
			u += w*dp.dot(dp);
		}
		r[i] = u / norm;
	}
	normVec( r );
	return r;
}
void DenseCRF::expAndNormalize ( float* out, const float* in, float scale, float relax ) {
	float *V = new float[ N_+10 ];
	for( int i=0; i<N_; i++ ){
		const float * b = in + i*M_;    // b = score + pos * numberClasses
		// Find the max and subtract it so that the exp doesn't explode
		float mx = scale*b[0];
		for( int j=1; j<M_; j++ )
			if( mx < scale*b[j] )
				mx = scale*b[j];
		float tt = 0;
		for( int j=0; j<M_; j++ ){
			V[j] = fast_exp( scale*b[j]-mx );
			tt += V[j];
		}
		// Make it a probability
		for( int j=0; j<M_; j++ )
			V[j] /= tt;
		
		float * a = out + i*M_;
		for( int j=0; j<M_; j++ )
			if (relax == 1)
				a[j] = V[j];
			else
				a[j] = (1-relax)*a[j] + relax*V[j];
		
	}
	delete[] V;
}
std::vector< float > Saliency::uniqueness( const std::vector< SuperpixelStatistic >& stat ) const {
	const int N = stat.size();
	std::vector< float > r( N );
	const float sp = 0.5 / (settings_.sigma_p_ * settings_.sigma_p_);
	for( int i=0; i<N; i++ ) {
		float u = 0, norm = 1e-10;
		Vec3f c = stat[i].mean_color_;
		Vec2f p = stat[i].mean_position_;
		
		// Evaluate the score, for now without filtering
		for( int j=0; j<N; j++ ) {
			Vec3f dc = stat[j].mean_color_ - c;
			Vec2f dp = stat[j].mean_position_ - p;
			
			float w = fast_exp( - sp * dp.dot(dp) );
			u += w*dc.dot(dc);
			norm += w;
		}
		// Let's not normalize here, must have been a typo in the paper
// 		r[i] = u / norm;
		r[i] = u;
	}
	normVec( r );
	return r;
}
示例#4
0
int main()
{
	long long int a,b,res;
	a=2;
	b=4;
	res=fast_exp(a,b);
	printf("%lld\n",res);
	return 0;
}
示例#5
0
文件: expmod.c 项目: BelfordZ/cAlgs
int main(void)
{
  int b, n, m;

  while (scanf("%d %d %d", &b, &n, &m) == 3) {
    printf("%d^%d mod %d = %d\n", b, n, m, fast_exp(b, n, m));
  }
  return 0;

}
示例#6
0
	void connect(CpuSNN* net, int srcGrp, int src_i, int destGrp, int dest_i, float& weight, float& maxWt, float& delay, bool& connected)
	{
		// extract x and y positions...
		int dest_i_x  = dest_i%dest_x;
		int dest_i_y  = dest_i/dest_x;
		int src_i_y = src_i/src_x;
		int src_i_x = src_i%(src_x);

		float distance2 = ((dest_i_y-src_i_y)*(dest_i_y-src_i_y))+((dest_i_x-src_i_x)*(dest_i_x-src_i_x));
		float gaus = fast_exp(-distance2/radius/radius*3);
		
		connected = gaus>0.1;
		delay     = 1;
		weight    = gaus*weightScale;
	}
示例#7
0
	void connect(CpuSNN* net, int srcGrp, int i, int destGrp, int j, float& weight, float& maxWt, float& delay, bool& connected)
	{
		int X = j%nrX;
		int Y = (j/nrX)%nrY;
		int o = j/(nrX*nrY);

		int iX = (i%(nrX/spatialScale))*spatialScale;
		int iY = ((i/(nrX/spatialScale))%(nrY/spatialScale))*spatialScale;
		int iOr = i/(nrX*nrY/spatialScale/spatialScale);

		float gaus = fast_exp(-((X-iX)*(X-iX)+(Y-iY)*(Y-iY))/MTsize); //for Inhibition use twice the radius...

		connected = getRand()<gaus*(o!=iOr);//cos((o-iOr+2)/4.0*2*3.1459);
		weight = weightScale;
		delay = 1;
	}
示例#8
0
	void connect(CpuSNN* net, int srcGrp, int src_i, int destGrp, int dest_i, float& weight, float& maxWt, float& delay, bool& connected)
	{
		// extract x and y position from the destination
		int dest_i_x = dest_i%V4_LAYER_DIM;
		int dest_i_y = dest_i/V4_LAYER_DIM;

		// extract x and y position from the source
		int src_i_x = src_i%V4_LAYER_DIM;
		int src_i_y = src_i/V4_LAYER_DIM;

		float distance2 = ((dest_i_y-src_i_y)*(dest_i_y-src_i_y))+((dest_i_x-src_i_x)*(dest_i_x-src_i_x));
		float gaus = fast_exp(-distance2/localRadius2*3);

		connected   = gaus>0.1;
		delay       = 1.0;
		weight  = gaus*weightScale;
	}
示例#9
0
	void connect(CpuSNN* net, int srcGrp, int i, int destGrp, int j, float& weight, float& maxWt, float& delay, bool& connected)
	{
		int v1X = i%nrX;
		int v1Y = (i/nrX)%nrY;
		int spaceTimeInd = (i/(nrX*nrY))%28;
		int scale = i/(nrX*nrY)/28;

		int edgedist = fmin(fmin(v1X,nrX-1-v1X),fmin(v1Y,nrY-1-v1Y)); // deal with the edges, which tend to have very high responses...

		int v4X = (j%(nrX/spatialScale))*spatialScale;
		int v4Y = ((j/(nrX/spatialScale))%(nrY/spatialScale))*spatialScale;
		int o = j/(nrX*nrY/spatialScale/spatialScale);

		float gaus = fast_exp(-((v4X-v1X)*(v4X-v1X)+(v4Y-v1Y)*(v4Y-v1Y))/MTsize/2);//sqrt(2*3.1459*MTsize);

		connected = getRand()<gaus*proj[spaceTimeInd][o];
		weight = proj[spaceTimeInd][o] * bias[o]*fmin(9,edgedist)/9.0*weightScale;
		delay = 1;
	}
long long chineseRemTheorem(){
    long long int m = 1;
    long long int i = 0;
    long long val = 0;
    
    long long int mi,yi;
    
    while(Prm[i]){
        m *= Prm[i];
        i++;
    }
    
    i = 0;
    
    while(Prm[i]){
        mi = m/Prm[i];
        yi = fast_exp(mi,Prm[i]-2,Prm[i]);
        
        val = (val + (R[i]*yi*mi)%m)%m;
        i++;
    }
    return val;
}
示例#11
0
int main(void)
{
  double x,x_diff,y,y_est;
  int xi,i;

  y = 1.0;
  for(i=0;i<10;++i)
  {
    table[i] = y;

    y *= WN_E;
  }

  /*
  for(x=0.0;x<=5.0;x+=5.0/20.0)
  for(x=0.0;x<=5.0;x+=5.0/100000000.0)
  */
  x = 0.01;
  for(i=0;i<10000000;++i)
  {
    /*
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    y_est = 1.1-0.5*exp(x);
    x = y_est;
    */
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    y_est = 1.1-0.5*fast_exp(x);
    x = y_est;
    /*
    y = exp(x);
    y_est = wn_eval_polynomial2(x,DA0,DA1,DA2);
    y_est = wn_eval_polynomial3(x,A0,A1,A2,A3);
    */

    /*
    xi = (int)x;
    x_diff = x-xi;

    y_est = table[xi]*wn_eval_polynomial3(x_diff,A0,A1,A2,A3);
    */

    /*
    y_est = fast_exp(x);
    */

    /*
    y_est = fast_exp_macro(x);
    */

    /*
    printf("x=%lg,y=%lg,y_est=%lg,quot=%lg\n",x,y,y_est,y_est/y-1.0);
    printf("y_est=%lg\n",y_est);
    x = y_est;
    */
  }

  printf("y_est=%lg\n",y_est);

  return(0);
}
示例#12
0
/**
 * Applique les fonction de chiffrement RSA. On utilise la même méthode pour chiffrer
 * et déchiffre le bloc, il suffit de choisir la bonne clé :
 *   - chiffrement : clé publique
 *   - déchiffrement : clé privée
 *   - signature : clé privée
 *   - vérification de signature : clé publique
 * @param key Définit le nom de clé  à utiliser.
**/
void rsaBloc::applyRSA(rsaKeyName key)
{
	value=fast_exp(value,keys.getKey(key),keys.getCommonKey());
}
示例#13
0
/* the probability of seeing a particular symbol next */
double SkipCTS::prob(bit_t b) {

    // We proceed as with update(), except that we keep track of the symbol probability at
    // each node instead of actually updating parameters
    getContext();

    zobhash_t hash = 0;
    int skips_left, last_idx;
    double symbolLogProb = LogOneHalf; 

    // propagate the symbol probability from leaves to root 
    for (int i=m_depth; i >= 0; i--) {
        
        // update the KT statistics, then the weighted 
        // probability for every node on this level
        const indices_list_t &il = m_indices[i];
        
        for (int j=0; j < il.size(); j++) {

            getContextInfo(hash, il[j]);
            skips_left = m_auxinfo[i][j].skips_left;
            last_idx   = m_auxinfo[i][j].last_idx;

            // update the node
            int n_submodels = numSubmodels(last_idx, skips_left);

            SkipNode &n = getNode(hash, i, n_submodels);

            // handle the stop case
            double log_est_mul = n.logKTMul(b);
            if (n_submodels == 1) {
                n.m_buf = symbolLogProb = log_est_mul;
                continue;
            }
                 
            // Here we rely on the property that log_prob_est and the like are unnormalized
            // posteriors. we accumulate the symbol log probability under 'symbolLogProb' 
            symbolLogProb = n.m_log_prob_est + log_est_mul;
 
            // handle the split case
            zobhash_t delta = s_zobtbl[last_idx+1][m_context[last_idx+1]];
            const SkipNode &nn = getNode(hash ^ delta, i+1, numSubmodels(last_idx+1, skips_left));
            // recall that m_buf contains the symbol probability at the child node
            double log_split_pred = nn.m_buf;
            symbolLogProb = fast_logadd(symbolLogProb, n.m_log_prob_split + log_split_pred);

            // handle the skipping case
            if (n_submodels > 2) {
                
                // if we did not yet allocate these models, this node must never have been
                // updated. We assume (perhaps incorrectly) that none of this node's children
                // exist and pretend they return a symbol probability of 0.5 
                if (n.m_log_skip_lik == NULL) {

                    const prior_t &p = SplitSkipPrior[skips_left];
                    symbolLogProb = fast_logadd(symbolLogProb, p.skip + LogOneHalf); 
                }
                
                // mix in the symbol probability from the skipping models
                else for (int k=last_idx+2; k < m_depth; k++) { 
                    
                    zobhash_t h = hash ^ s_zobtbl[k][m_context[k]];
                    SkipNode &sn = getNode(h, i+1, numSubmodels(k, skips_left - 1));
                    
                    double log_skip_pred = sn.m_buf;
                    int z = k - last_idx - 2;
                    symbolLogProb = fast_logadd(symbolLogProb, n.m_log_skip_lik[z] + log_skip_pred);
                }
            }

            // Finally we normalize by the mixture probability at this node
            symbolLogProb -= n.m_log_prob_weighted;
            n.m_buf = symbolLogProb;

            assert(n.m_buf < 0.0);
        }
    }
   
    // our scheme assumes that the last node processed is the root; the variable 'symbolLogProb'
    // contains its symbol probability 
    return fast_exp(symbolLogProb);
}