コード例 #1
0
ファイル: fileManager.cpp プロジェクト: dmordom/hClustering
WHcoord fileManager::meanCoordFromMask() const
{
    if (m_maskMatrix.empty()) {
        std::cerr<< "ERROR @ fileManager::storeFullTract(): Mask hast not been loaded, returning 0 coordinate"<<std::endl;
        return WHcoord();
    }
    size_t sumX( 0 ), sumY( 0 ), sumZ( 0 ), sumElements( 0 );
    for( int i=0 ; i<m_maskMatrix.size() ; ++i )
    {
        for( int j=0 ; j< m_maskMatrix[i].size() ; ++j )
        {
            for( int k=0 ; k<m_maskMatrix[i][j].size() ; ++k )
            {
                if (m_maskMatrix[i][j][k])
                {
                    sumX += i;
                    sumY += j;
                    sumZ += k;
                    ++sumElements;
                }
            }
        }
    }
    size_t meanX( sumX/sumElements );
    size_t meanY( sumY/sumElements );
    size_t meanZ( sumZ/sumElements );
    WHcoord meanCoord(meanX,meanY,meanZ);

    return meanCoord;
} // end "meanCoordFromMask()" -----------------------------------------------------------------
コード例 #2
0
ファイル: NelderMead.cpp プロジェクト: tjisana/MATH4074
// Function to calculate the mean value of
// a set of n vectors each of dimension n
// namely a (n x n) matrix
vector<double> CNelderMead::VMean(vector<vector<double> > X, int n) {
	vector<double> meanX(n);
	for (int i=0; i<=n-1; i++) 	{
		meanX[i]=0.0;
		for (int j=0; j<=n-1; j++)
			meanX[i] += X[i][j];
		meanX[i] = meanX[i] / n;
	}
	return meanX;
}
コード例 #3
0
ファイル: AIDA_Histogram2D.cpp プロジェクト: apfeiffer1/iAIDA
bool
iAIDA::AIDA_Histogram_native::AIDA_Histogram2D::setRms( double rmsX, double rmsY )
{
  const double sw = sumBinHeights();
  const double mnX = meanX();
  const double mnY = meanY();
  m_sumWeightTimesSquaredX = ( mnX*mnX + rmsX*rmsX) * sw;
  m_sumWeightTimesSquaredY = ( mnY*mnY + rmsY*rmsY) * sw;
  m_validStatistics = false;
  return true;
}
コード例 #4
0
ファイル: AIDA_Histogram2D.cpp プロジェクト: apfeiffer1/iAIDA
void
iAIDA::AIDA_Histogram_native::AIDA_Histogram2D::updateAnnotation() const
{
  iAIDA::AIDA_Histogram_native::AIDA_BaseHistogram::updateAnnotation();
  const AIDA::IAnnotation& anno = annotationNoUpdate();
  AIDA::IAnnotation& annotation = const_cast< AIDA::IAnnotation& >( anno );

  annotation.setValue( meanXKey, iAIDA_annotationNumberFormater.formatDouble( meanX() ) );
  annotation.setValue( rmsXKey, iAIDA_annotationNumberFormater.formatDouble( rmsX() ) );
  annotation.setValue( meanYKey, iAIDA_annotationNumberFormater.formatDouble( meanY() ) );
  annotation.setValue( rmsYKey, iAIDA_annotationNumberFormater.formatDouble( rmsY() ) );
  annotation.setValue( extra_entriesKey, iAIDA_annotationNumberFormater.formatInteger( extraEntries() ) );
}
コード例 #5
0
static LogisticRegression _Table_to_LogisticRegression (Table me, long *factors, long numberOfFactors, long dependent1, long dependent2) {
	long numberOfParameters = numberOfFactors + 1;
	long numberOfCells = my rows -> size, numberOfY0 = 0, numberOfY1 = 0, numberOfData = 0;
	double logLikelihood = 1e300, previousLogLikelihood = 2e300;
	if (numberOfParameters < 1)   // includes intercept
		Melder_throw ("Not enough columns (has to be more than 1).");
	/*
	 * Divide up the contents of the table into a number of independent variables (x) and two dependent variables (y0 and y1).
	 */
	autoNUMmatrix <double> x (1, numberOfCells, 0, numberOfFactors);   // column 0 is the intercept
	autoNUMvector <double> y0 (1, numberOfCells);
	autoNUMvector <double> y1 (1, numberOfCells);
	autoNUMvector <double> meanX (1, numberOfFactors);
	autoNUMvector <double> stdevX (1, numberOfFactors);
	autoNUMmatrix <double> smallMatrix (0, numberOfFactors, 0, numberOfParameters);
	autoLogisticRegression thee = LogisticRegression_create (my columnHeaders [dependent1]. label, my columnHeaders [dependent2]. label);
	for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
		double minimum = Table_getMinimum (me, factors [ivar]);
		double maximum = Table_getMaximum (me, factors [ivar]);
		Regression_addParameter (thee.peek(), my columnHeaders [factors [ivar]]. label, minimum, maximum, 0.0);
	}
	for (long icell = 1; icell <= numberOfCells; icell ++) {
		y0 [icell] = Table_getNumericValue_Assert (me, icell, dependent1);
		y1 [icell] = Table_getNumericValue_Assert (me, icell, dependent2);
		numberOfY0 += y0 [icell];
		numberOfY1 += y1 [icell];
		numberOfData += y0 [icell] + y1 [icell];
		x [icell] [0] = 1.0;   /* Intercept. */
		for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
			x [icell] [ivar] = Table_getNumericValue_Assert (me, icell, factors [ivar]);
			meanX [ivar] += x [icell] [ivar] * (y0 [icell] + y1 [icell]);
		}
	}
	if (numberOfY0 == 0 && numberOfY1 == 0)
		Melder_throw ("No data in either class. Cannot determine result.");
	if (numberOfY0 == 0)
		Melder_throw ("No data in class ", my columnHeaders [dependent1]. label, ". Cannot determine result.");
	if (numberOfY1 == 0)
		Melder_throw ("No data in class ", my columnHeaders [dependent2]. label, ". Cannot determine result.");
	/*
	 * Normalize the data.
	 */
	for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
		meanX [ivar] /= numberOfData;
		for (long icell = 1; icell <= numberOfCells; icell ++) {
			x [icell] [ivar] -= meanX [ivar];
		}
	}
	for (long icell = 1; icell <= numberOfCells; icell ++) {
		for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
			stdevX [ivar] += x [icell] [ivar] * x [icell] [ivar] * (y0 [icell] + y1 [icell]);
		}
	}
	for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
		stdevX [ivar] = sqrt (stdevX [ivar] / numberOfData);
		for (long icell = 1; icell <= numberOfCells; icell ++) {
			x [icell] [ivar] /= stdevX [ivar];
		}
	}
	/*
	 * Initial state of iteration: the null model.
	 */
	thy intercept = log ((double) numberOfY1 / (double) numberOfY0);   // initial state of intercept: best guess for average log odds
	for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
		RegressionParameter parm = static_cast<RegressionParameter> (thy parameters -> item [ivar]);
		parm -> value = 0.0;   // initial state of dependence: none
	}
	long iteration = 1;
	for (; iteration <= 100; iteration ++) {
		previousLogLikelihood = logLikelihood;
		for (long ivar = 0; ivar <= numberOfFactors; ivar ++) {
			for (long jvar = ivar; jvar <= numberOfParameters; jvar ++) {
				smallMatrix [ivar] [jvar] = 0.0;
			}
		}
		/*
		 * Compute the current log likelihood.
		 */
		logLikelihood = 0.0;
		for (long icell = 1; icell <= numberOfCells; icell ++) {
			double fittedLogit = thy intercept, fittedP, fittedQ, fittedLogP, fittedLogQ, fittedPQ, fittedVariance;
			for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
				RegressionParameter parm = static_cast<RegressionParameter> (thy parameters -> item [ivar]);
				fittedLogit += parm -> value * x [icell] [ivar];
			}
			/*
			 * Basically we have fittedP = 1.0 / (1.0 + exp (- fittedLogit)),
			 * but that works neither for fittedP values near 0 nor for values near 1.
			 */
			if (fittedLogit > 15.0) {
				/*
				 * For large fittedLogit, fittedLogP = ln (1/(1+exp(-fittedLogit))) = -ln (1+exp(-fittedLogit)) =~ - exp(-fittedLogit)
				 */
				fittedLogP = - exp (- fittedLogit);
				fittedLogQ = - fittedLogit;
				fittedPQ = exp (- fittedLogit);
				fittedP = exp (fittedLogP);
				fittedQ = 1.0 - fittedP;
			} else if (fittedLogit < -15.0) {
				fittedLogP = fittedLogit;
				fittedLogQ = - exp (fittedLogit);
				fittedPQ = exp (fittedLogit);
				fittedP = exp (fittedLogP);
				fittedQ = 1 - fittedP;
			} else {
				fittedP = 1.0 / (1.0 + exp (- fittedLogit));
				fittedLogP = log (fittedP);
				fittedQ = 1.0 - fittedP;
				fittedLogQ = log (fittedQ);
				fittedPQ = fittedP * fittedQ;
			}
			logLikelihood += -2 * (y1 [icell] * fittedLogP + y0 [icell] * fittedLogQ);
			/*
			 * Matrix shifting stuff.
			 * Suppose a + b Sk + c Tk = ln (pk / qk),
			 * where {a, b, c} are the coefficients to be optimized,
			 * Sk and Tk are properties of stimulus k,
			 * and pk and qk are the fitted probabilities for y1 and y0, respectively, given stimulus k.
			 * Then ln pk = - ln (1 + qk / pk) = - ln (1 + exp (- (a + b Sk + c Tk)))
			 * d ln pk / da = 1 / (1 + exp (a + b Sk + c Tk)) = qk
			 * d ln pk / db = qk Sk
			 * d ln pk / dc = qk Tk
			 * d ln qk / da = - pk
			 * Now LL = Sum(k) (y1k ln pk + y0k ln qk)
			 * so that dLL/da = Sum(k) (y1k d ln pk / da + y0k ln qk / da) = Sum(k) (y1k qk - y0k pk)
			 */
			fittedVariance = fittedPQ * (y0 [icell] + y1 [icell]);
			for (long ivar = 0; ivar <= numberOfFactors; ivar ++) {
				/*
				 * The last column gets the gradient of LL: dLL/da, dLL/db, dLL/dc.
				 */
				smallMatrix [ivar] [numberOfParameters] += x [icell] [ivar] * (y1 [icell] * fittedQ - y0 [icell] * fittedP);
				for (long jvar = ivar; jvar <= numberOfFactors; jvar ++) {
					smallMatrix [ivar] [jvar] += x [icell] [ivar] * x [icell] [jvar] * fittedVariance;
				}
			}
		}
		if (fabs (logLikelihood - previousLogLikelihood) < 1e-11) {
			break;
		}
		/*
		 * Make matrix symmetric.
		 */
		for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
			for (long jvar = 0; jvar < ivar; jvar ++) {
				smallMatrix [ivar] [jvar] = smallMatrix [jvar] [ivar];
			}
		}
		/*
		 * Invert matrix in the simplest way, and shift and wipe the last column with it.
		 */
		for (long ivar = 0; ivar <= numberOfFactors; ivar ++) {
			double pivot = smallMatrix [ivar] [ivar];   /* Save diagonal. */
			smallMatrix [ivar] [ivar] = 1.0;
			for (long jvar = 0; jvar <= numberOfParameters; jvar ++) {
				smallMatrix [ivar] [jvar] /= pivot;
			}
			for (long jvar = 0; jvar <= numberOfFactors; jvar ++) {
				if (jvar != ivar) {
					double temp = smallMatrix [jvar] [ivar];
					smallMatrix [jvar] [ivar] = 0.0;
					for (long kvar = 0; kvar <= numberOfParameters; kvar ++) {
						smallMatrix [jvar] [kvar] -= temp * smallMatrix [ivar] [kvar];
					}
				}
			}
		}
		/*
		 * Update the parameters from the last column of smallMatrix.
		 */
		thy intercept += smallMatrix [0] [numberOfParameters];
		for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
			RegressionParameter parm = static_cast<RegressionParameter> (thy parameters -> item [ivar]);
			parm -> value += smallMatrix [ivar] [numberOfParameters];
		}
	}
	if (iteration > 100) {
		Melder_warning (L"Logistic regression has not converged in 100 iterations. The results are unreliable.");
	}
	for (long ivar = 1; ivar <= numberOfFactors; ivar ++) {
		RegressionParameter parm = static_cast<RegressionParameter> (thy parameters -> item [ivar]);
		parm -> value /= stdevX [ivar];
		thy intercept -= parm -> value * meanX [ivar];
	}
	return thee.transfer();
}
コード例 #6
0
void EdgeBoxGenerator::clusterEdges( arrayf &E, arrayf &O, arrayf &V )
{
  int c, r, cd, rd, i, j; h=E._h; w=E._w;

  // greedily merge connected edge pixels into clusters (create _segIds)
  _segIds.init(h,w); _segCnt=1;
  for( c=0; c<w; c++ ) for( r=0; r<h; r++ ) {
    if( c==0 || r==0 || c==w-1 || r==h-1 || E.val(c,r)<=_edgeMinMag )
      _segIds.val(c,r)=-1; else _segIds.val(c,r)=0;
  }
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ ) {
    if(_segIds.val(c,r)!=0) continue;
    float sumv=0; int c0=c, r0=r; vectorf vs; vectori cs, rs;
    while( sumv < _edgeMergeThr ) {
      _segIds.val(c0,r0)=_segCnt;
      float o0 = O.val(c0,r0), o1, v; bool found;
      for( cd=-1; cd<=1; cd++ ) for( rd=-1; rd<=1; rd++ ) {
        if( _segIds.val(c0+cd,r0+rd)!=0 ) continue; found=false;
        for( i=0; i<cs.size(); i++ )
          if( cs[i]==c0+cd && rs[i]==r0+rd ) { found=true; break; }
          if( found ) continue; o1=O.val(c0+cd,r0+rd);
          v=fabs(o1-o0)/PI; if(v>.5) v=1-v;
          vs.push_back(v); cs.push_back(c0+cd); rs.push_back(r0+rd);
      }
      float minv=1000; j=0;
      for( i=0; i<vs.size(); i++ ) if( vs[i]<minv ) {
        minv=vs[i]; c0=cs[i]; r0=rs[i]; j=i;
      }
      sumv+=minv; if(minv<1000) vs[j]=1000;
    }
    _segCnt++;
  }

  // merge or remove small segments
  _segMag.resize(_segCnt,0);
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ )
    if( (j=_segIds.val(c,r))>0 ) _segMag[j]+=E.val(c,r);
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ )
    if( (j=_segIds.val(c,r))>0 && _segMag[j]<=_clusterMinMag)
      _segIds.val(c,r)=0;
  i=1; while(i>0) {
    i=0; for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ ) {
      if( _segIds.val(c,r)!=0 ) continue;
      float o0=O.val(c,r), o1, v, minv=1000; j=0;
      for( cd=-1; cd<=1; cd++ ) for( rd=-1; rd<=1; rd++ ) {
        if( _segIds.val(c+cd,r+rd)<=0 ) continue; o1=O.val(c+cd,r+rd);
        v=fabs(o1-o0)/PI; if(v>.5) v=1-v;
        if( v<minv ) { minv=v; j=_segIds.val(c+cd,r+rd); }
      }
      _segIds.val(c,r)=j; if(j>0) i++;
    }
  }

  // compactify representation
  _segMag.assign(_segCnt,0); vectori map(_segCnt,0); _segCnt=1;
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ )
    if( (j=_segIds.val(c,r))>0 ) _segMag[j]+=E.val(c,r);
  for( i=0; i<_segMag.size(); i++ ) if( _segMag[i]>0 ) map[i]=_segCnt++;
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ )
    if( (j=_segIds.val(c,r))>0 ) _segIds.val(c,r)=map[j];

  // compute positional means and recompute _segMag
  _segMag.assign(_segCnt,0); vectorf meanX(_segCnt,0), meanY(_segCnt,0);
  vectorf meanOx(_segCnt,0), meanOy(_segCnt,0), meanO(_segCnt,0);
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ ) {
    j=_segIds.val(c,r); if(j<=0) continue;
    float m=E.val(c,r), o=O.val(c,r); _segMag[j]+=m;
    meanOx[j]+=m*cos(2*o); meanOy[j]+=m*sin(2*o);
    meanX[j]+=m*c; meanY[j]+=m*r;
  }
  for( i=0; i<_segCnt; i++ ) if( _segMag[i]>0 ) {
    float m=_segMag[i]; meanX[i]/=m; meanY[i]/=m;
    meanO[i]=atan2(meanOy[i]/m,meanOx[i]/m)/2;
  }

  // compute segment affinities
  _segAff.resize(_segCnt); _segAffIdx.resize(_segCnt);
  for(i=0; i<_segCnt; i++) _segAff[i].resize(0);
  for(i=0; i<_segCnt; i++) _segAffIdx[i].resize(0);
  const int rad = 2;
  for( c=rad; c<w-rad; c++ ) for( r=rad; r<h-rad; r++ ) {
    int s0=_segIds.val(c,r); if( s0<=0 ) continue;
    for( cd=-rad; cd<=rad; cd++ ) for( rd=-rad; rd<=rad; rd++ ) {
      int s1=_segIds.val(c+cd,r+rd); if(s1<=s0) continue;
      bool found = false; for(i=0;i<_segAffIdx[s0].size();i++)
        if(_segAffIdx[s0][i] == s1) { found=true; break; }
      if( found ) continue;
      float o=atan2(meanY[s0]-meanY[s1],meanX[s0]-meanX[s1])+PI/2;
      float a=fabs(cos(meanO[s0]-o)*cos(meanO[s1]-o)); a=pow(a,_gamma);
      _segAff[s0].push_back(a); _segAffIdx[s0].push_back(s1);
      _segAff[s1].push_back(a); _segAffIdx[s1].push_back(s0);
    }
  }

  // compute _segC and _segR
  _segC.resize(_segCnt); _segR.resize(_segCnt);
  for( c=1; c<w-1; c++ ) for( r=1; r<h-1; r++ )
    if( (j=_segIds.val(c,r))>0 ) { _segC[j]=c; _segR[j]=r; }

  // optionally create visualization (assume memory initialized is 3*w*h)
  if( V._x ) for( c=0; c<w; c++ ) for( r=0; r<h; r++ ) {
    i=_segIds.val(c,r);
    V.val(c+w*0,r) = i<=0 ? 1 : ((123*i + 128)%255)/255.0f;
    V.val(c+w*1,r) = i<=0 ? 1 : ((7*i + 3)%255)/255.0f;
    V.val(c+w*2,r) = i<=0 ? 1 : ((174*i + 80)%255)/255.0f;
  }
}