Esempio n. 1
0
// Multiply by vector
NEWMAT::ReturnMatrix FullBFMatrix::MulByVec(const NEWMAT::ColumnVector& invec) const
{
  if (invec.Nrows() != int(Ncols())) {throw BFMatrixException("FullBFMatrix::MulByVec: Matrix-vector size mismatch");}
  NEWMAT::ColumnVector  ret;
  ret = (*mp)*invec;
  ret.Release();
  return(ret);
}
Esempio n. 2
0
// Given A*x=b, solve for x
NEWMAT::ReturnMatrix FullBFMatrix::SolveForx(const NEWMAT::ColumnVector& b,       // Ignoring all parameters except b
					     MISCMATHS::MatrixType       type,
					     double                      tol,
                                             int                         miter) const
{
  if (int(Nrows()) != b.Nrows()) {throw BFMatrixException("FullBFMatrix::SolveForx: Matrix-vector size mismatch");}
  NEWMAT::ColumnVector  ret;
  ret = mp->i()*b;
  ret.Release();
  return(ret);
}
Esempio n. 3
0
NEWMAT::ColumnVector
ossimRpcProjection::getResidue(const ossimTieGptSet& tieSet)
{
   //init
   NEWMAT::ColumnVector residue;
   int dimObs;

   bool useImageObs = useForward(); //caching
   if (useImageObs)
   {
      dimObs = 2; //image observation
   } else {
      dimObs = 3; //ground observations
   }
   int no = dimObs * tieSet.size(); //number of observations

   residue.ReSize(no);

   const vector<ossimRefPtr<ossimTieGpt> >& theTPV = tieSet.getTiePoints();
   vector<ossimRefPtr<ossimTieGpt> >::const_iterator tit;
   unsigned long c=1;

   if (useImageObs)
   { 
     //image observations 
     ossimDpt resIm;
     // loop on tie points
      for (tit = theTPV.begin() ; tit != theTPV.end() ; ++tit)
      {
         //compute residue
         resIm = (*tit)->tie - forward(**tit);
         residue(c++) = resIm.x;
         residue(c++) = resIm.y;
      }
   } else {
      // ground observations
      ossimGpt gd;
      // loop on tie points
      for (tit = theTPV.begin() ; tit != theTPV.end() ; ++tit)
      {
         //compute residue
         gd = inverse((*tit)->tie);
         residue(c++) = ((*tit)->lon - gd.lon) * 100000.0; //approx meters //TBC TBD
         residue(c++) = ((*tit)->lat - gd.lat) * 100000.0 * cos(gd.lat / 180.0 * M_PI);
         residue(c++) = (*tit)->hgt - gd.hgt; //meters
      }
   } //end of if (useImageObs)

   return residue;
}
Esempio n. 4
0
void energyEvalFunction_init(int ndim, NEWMAT::ColumnVector& x)
{
	for (int i = 0; i < ndim; i++)
	{
		x.element(i) = opt_params.orig_params_each_piece->element(i);
	}
}
void threadParamEvalFunction_init(int ndim, NEWMAT::ColumnVector& x)
{
	for (int i = 0; i < ndim; i++)
	{
		x.element(i) = opt_params_thread_params.orig_params->element(i);
	}
}
Esempio n. 6
0
phy::vector_t const toNumber(NEWMAT::ColumnVector const & v)
{
  unsigned n = v.Nrows();
  phy::vector_t u(n);
  for (unsigned i = 0; i < n; i++)
    u[i] = v[i];
  return u;
}
Esempio n. 7
0
void
nlf1(int mode, int ndim, NEWMAT::ColumnVector const & x, OPTPP::real & fx, NEWMAT::ColumnVector & gx, int & result, void * data)
{
  result = OPTPP::NLPNoOp;

  if (mode & OPTPP::NLPFunction)
    nlf0(ndim, x, fx, result, data);

  if (mode & OPTPP::NLPGradient)
  {
    ScalarFunction const * func = static_cast<ScalarFunction *>(data);
    AnalyticD1ScalarFunction const * func1 = dynamic_cast<AnalyticD1ScalarFunction const *>(func);
    alwaysAssertM(func1, "OPTPPNumericalOptimizer: Function does not have analytical first derivative");

    gx.ReSize(ndim);
    func1->gradientAt(x.data(), gx.data());
    result |= OPTPP::NLPGradient;
  }
}
	void CUniversalKriging::Init_r(const CGridPoint& pt, CGridPointVector& va, NEWMAT::ColumnVector& r)const
	{

		double unbias = m_pVariogram->GetUnbias();
		size_t neq = va.size() + GetMDT();
		r.resize((int)neq);
		r = 0;

		CDiscretisation m_discr;
		m_discr.Initialization(pt, *m_pVariogram, m_p, m_param.m_bFillNugget);
		// Set up the right hand side: 
		for (int i = 0; i < (int)va.size(); i++)
		{
			//compute mean over dicretisation
			double cb = 0.0;
			for (int j = 0; j < (int)m_discr.size(); j++)
			{
				cb += m_pVariogram->cova3(va[i], m_discr[j]);

				double d = va[i].GetDistance(va[j]);
				if (d < EPSLON && (m_discr.size() > 1 || m_param.m_bFillNugget))
					cb -= m_pVariogram->GetNugget();
			}
			cb = cb / m_discr.size();
			r[i] = cb;
		}


		// Fill in the OK unbiasedness portion of the matrix (if not doing SK): 
		if (!m_p.m_bSK)
			r[(int)va.size()] = unbias;

		// Add the additional Unbiasedness constraints: 
		int im = int(va.size()) + 1;

		double resc = max(m_pVariogram->GetMaxCov(), LIMIT) / (2.0 * m_p.m_radius);
		const CDetrending& detrending = m_pVariogram->GetDetrending();


		for (int i = 0; i < m_externalDrift.size(); i++)
		{
			r[im] = m_pVariogram->GetMaxCov();
			im++;
		}

	}
Esempio n. 9
0
void
nlf2(int mode, int ndim, NEWMAT::ColumnVector const & x, OPTPP::real & fx, NEWMAT::ColumnVector & gx,
     NEWMAT::SymmetricMatrix & Hx, int & result, void * data)
{
  result = OPTPP::NLPNoOp;

  if (mode & OPTPP::NLPFunction || mode & OPTPP::NLPGradient)
    nlf1(mode, ndim, x, fx, gx, result, data);

  if (mode & OPTPP::NLPHessian)
  {
    ScalarFunction const * func = static_cast<ScalarFunction *>(data);
    AnalyticD2ScalarFunction const * func2 = dynamic_cast<AnalyticD2ScalarFunction const *>(func);
    alwaysAssertM(func2, "OPTPPNumericalOptimizer: Function does not have analytical second derivative");

    Hx.ReSize(ndim);
    OPTPPSymMatWrapper Hwrapper(&Hx);
    func2->hessianAt(x.data(), Hwrapper);
    result |= OPTPP::NLPHessian;
  }
}
Esempio n. 10
0
void
ossimRpcProjection::buildNormalEquation(const ossimTieGptSet& tieSet,
                                      NEWMAT::SymmetricMatrix& A,
                                      NEWMAT::ColumnVector& residue,
                                      NEWMAT::ColumnVector& projResidue,
                                      double pstep_scale)
{
   //goal:       build Least Squares system
   //constraint: never store full Jacobian matrix in memory (can be huge)
   //            so we build the matrices incrementally
   // the system can be built using forward() or inverse() depending on the projection capabilities : useForward()
   //
   //TBD : add covariance matrix for each tie point

   //init
   int np = getNumberOfAdjustableParameters();
   int dimObs;
   bool useImageObs = useForward(); //caching
   if (useImageObs)
   {
      dimObs = 2; //image observation
   } else {
      dimObs = 3; //ground observations
   }
   int no = dimObs * tieSet.size(); //number of observations

   A.ReSize(np);
   residue.ReSize(no);
   projResidue.ReSize(np);
   //Zeroify matrices that will be accumulated
   A           = 0.0;
   projResidue = 0.0;

   const vector<ossimRefPtr<ossimTieGpt> >& theTPV = tieSet.getTiePoints();
   vector<ossimRefPtr<ossimTieGpt> >::const_iterator tit;
   unsigned long c=1;

   if (useImageObs)
   { 
     //image observations 
     ossimDpt* imDerp = new ossimDpt[np];
     ossimDpt resIm;
     // loop on tie points
      for (tit = theTPV.begin() ; tit != theTPV.end() ; ++tit)
      {
         //compute residue
         resIm = (*tit)->tie - forward(*(*tit));
         residue(c++) = resIm.x;
         residue(c++) = resIm.y;

         //compute all image derivatives regarding parametres for the tie point position
         for(int p=0;p<np;++p)
         {
            imDerp[p] = getForwardDeriv( p , *(*tit) , pstep_scale);
         }

         //compute influence of tie point on all sytem elements
         for(int p1=0;p1<np;++p1)
         {        
            //proj residue: J * residue
            projResidue.element(p1) += imDerp[p1].x * resIm.x + imDerp[p1].y * resIm.y;

            //normal matrix A = transpose(J)*J
            for(int p2=p1;p2<np;++p2)
            {
               A.element(p1,p2) += imDerp[p1].x * imDerp[p2].x + imDerp[p1].y * imDerp[p2].y;
            }
         }
      }
      delete []imDerp;
   }
   else
   {
      // ground observations
      std::vector<ossimGpt> gdDerp(np);
      ossimGpt gd, resGd;
      // loop on tie points
      for (tit = theTPV.begin() ; tit != theTPV.end() ; ++tit)
      {
         //compute residue
         gd = inverse((*tit)->tie);
         residue(c++) = resGd.lon = ((*tit)->lon - gd.lon) * 100000.0;
         residue(c++) = resGd.lat = ((*tit)->lat - gd.lat) * 100000.0 * cos(gd.lat / 180.0 * M_PI);
         residue(c++) = resGd.hgt = (*tit)->hgt - gd.hgt; //TBD : normalize to meters?

         //compute all image derivatives regarding parametres for the tie point position
         for(int p=0;p<np;++p)
         {
            gdDerp[p] = getInverseDeriv( p , (*tit)->tie, pstep_scale);
         }

         //compute influence of tie point on all sytem elements
         for(int p1=0;p1<np;++p1)
         {        
            //proj residue: J * residue
            projResidue.element(p1) += gdDerp[p1].lon * resGd.lon + gdDerp[p1].lat * resGd.lat + gdDerp[p1].hgt * resGd.hgt; //TBC

            //normal matrix A = transpose(J)*J
            for(int p2=p1;p2<np;++p2)
            {
               A.element(p1,p2) += gdDerp[p1].lon * gdDerp[p2].lon + gdDerp[p1].lat * gdDerp[p2].lat + gdDerp[p1].hgt * gdDerp[p2].hgt;
            }
         }
      }
   } //end of if (useImageObs)
}
Esempio n. 11
0
double
ossimRpcProjection::optimizeFit(const ossimTieGptSet& tieSet, double* /* targetVariance */)
{
#if 1
   //NOTE : ignore targetVariance
   ossimRefPtr<ossimRpcSolver> solver = new ossimRpcSolver(false, false); //TBD : choices should be part of setupFromString

   std::vector<ossimDpt> imagePoints;
   std::vector<ossimGpt> groundPoints;
   tieSet.getSlaveMasterPoints(imagePoints, groundPoints);
   solver->solveCoefficients(imagePoints, groundPoints);

   ossimRefPtr< ossimImageGeometry > optProj = solver->createRpcProjection();
   if (!optProj)
   {
      ossimNotify(ossimNotifyLevel_FATAL) << "FATAL ossimRpcProjection::optimizeFit(): error when optimizing the RPC with given tie points"
                                             << std::endl;
      return -1.0;
   }

   if(optProj->hasProjection())
   {
      ossimKeywordlist kwl;
      optProj->getProjection()->saveState(kwl);
      this->loadState(kwl);
   }

   return std::pow(solver->getRmsError(), 2); //variance in pixel^2
#else
   // COPIED from ossimRpcProjection
   //
   //
   //use a simple Levenberg-Marquardt non-linear optimization
   //note : please limit the number of tie points
   //
   //INPUTS: requires Jacobian matrix (partial derivatives with regards to parameters)
   //OUTPUTS: will also compute parameter covariance matrix
   //
   //TBD: use targetVariance!
 
   int np = getNumberOfAdjustableParameters();
   int nobs = tieSet.size();

   //setup initail values
   int iter=0;
   int iter_max = 200;
   double minResidue = 1e-10; //TBC
   double minDelta = 1e-10; //TBC

   //build Least Squares initial normal equation
   // don't waste memory, add samples one at a time
   NEWMAT::SymmetricMatrix A;
   NEWMAT::ColumnVector residue;
   NEWMAT::ColumnVector projResidue;
   double deltap_scale = 1e-4; //step_Scale is 1.0 because we expect parameters to be between -1 and 1
   buildNormalEquation(tieSet, A, residue, projResidue, deltap_scale);
   double ki2=residue.SumSquare();

   //get current adjustment (between -1 and 1 normally) and convert to ColumnVector
   ossimAdjustmentInfo cadj;
   getAdjustment(cadj);
   std::vector< ossimAdjustableParameterInfo >& parmlist = cadj.getParameterList();
   NEWMAT::ColumnVector cparm(np), nparm(np);
   for(int n=0;n<np;++n)
   {
      cparm(n+1) = parmlist[n].getParameter();
   }

   double damping_speed = 2.0;
   //find max diag element for A
   double maxdiag=0.0;
   for(int d=1;d<=np;++d) {
      if (maxdiag < A(d,d)) maxdiag=A(d,d);
   }
   double damping = 1e-3 * maxdiag;
   double olddamping = 0.0;
   bool found = false;

//DEBUG TBR
cout<<"rms="<<sqrt(ki2/nobs)<<" ";
cout.flush();

   while ( (!found) && (iter < iter_max) ) //non linear optimization loop
   {
      bool decrease = false;

      do
      {
         //add damping update to normal matrix
         for(int d=1;d<=np;++d) A(d,d) += damping - olddamping;
         olddamping = damping;

         NEWMAT::ColumnVector deltap = solveLeastSquares(A, projResidue);

         if (deltap.NormFrobenius() <= minDelta) 
         {
            found = true;
         } else {
            //update adjustment
            nparm = cparm + deltap;
            for(int n=0;n<np;++n)
            {
               setAdjustableParameter(n, nparm(n+1), false); //do not update now, wait
            }
            adjustableParametersChanged();

            //check residue is reduced
            NEWMAT::ColumnVector newresidue = getResidue(tieSet);
            double newki2=newresidue.SumSquare();
            double res_reduction = (ki2 - newki2) / (deltap.t()*(deltap*damping + projResidue)).AsScalar();
 //DEBUG TBR
       cout<<sqrt(newki2/nobs)<<" ";
       cout.flush();

            if (res_reduction > 0)
            {
               //accept new parms
               cparm = nparm;
               ki2=newki2;

               deltap_scale = max(1e-15, deltap.NormInfinity()*1e-4);

               buildNormalEquation(tieSet, A, residue, projResidue, deltap_scale);
               olddamping = 0.0;

               found = ( projResidue.NormInfinity() <= minResidue );
               //update damping factor
               damping *= std::max( 1.0/3.0, 1.0-std::pow((2.0*res_reduction-1.0),3));
               damping_speed = 2.0;
               decrease = true;
            } else {
               //cancel parameter update
               for(int n=0;n<np;++n)
               {
                  setAdjustableParameter(n, nparm(n+1), false); //do not update right now
               }
               adjustableParametersChanged();

               damping *= damping_speed;
               damping_speed *= 2.0;
            }
         }
      } while (!decrease && !found);
      ++iter;
   }

//DEBUG TBR
cout<<endl;

   //compute parameter correlation
   // use normal matrix inverse
   //TBD

   return ki2/nobs;
#endif
}
Esempio n. 12
0
void initWrapper(int ndim, NEWMAT::ColumnVector & x)
{
  assert( ndim == x.Nrows() );
  assert( ndim == static_cast<signed>( glbInitParams->size() ) );
  x = toColumnVector( *glbInitParams );
}
	double CUniversalKriging::Evaluate(const CGridPoint& pt, int iXval)const
	{
		//if variogram is not initialised correckly, we retuern no data;
		if (!m_pVariogram->IsInit())
			return m_param.m_noData;

		// Find the nearest samples: 
		CGridPointVector va;

		Init_va(pt, iXval, va);

		int mdt = GetMDT();
		// Test number of samples found: 
		// Test if there are enough samples to estimate all drift terms: 
		if ((int)va.size() < m_p.m_nbPoint || (int)va.size() <= mdt)
			return m_param.m_noData;

		if ( /*iXval<0 &&*/ va[0].GetDistance(pt) > m_param.m_maxDistance)
			return m_param.m_noData;

		// There are enough samples - proceed with estimation. 
		// Go ahead and set up the OK portion of the kriging matrix: 

		NEWMAT::ColumnVector r;
		NEWMAT::Matrix a;

		Init_a(pt, va, a);
		Init_r(pt, va, r);

		// If estimating the trend then reset all the right hand side terms=0.0:
		if (m_p.m_bTrend)
			r = 0;

		//copy r to compute variance
		NEWMAT::ColumnVector rr(r);


		// Solve the kriging system: 
		int neq = mdt + int(va.size());

		//reduce the matrix until the 2/3 of the number of neightbor
		bool bOK = false;
		while (!bOK && neq >= mdt + m_p.m_nbPoint * 2 / 3)//by RSA 25/10/2010
		{
			Try
			{
				a = a.i();
				bOK = true;
			}
				Catch(NEWMAT::Exception)
			{
				OutputDebugStringW(L"a=a.i() failled; Reduce number of point");

				Init_a(pt, va, a);
				Init_r(pt, va, r);

				neq--;
				int firstRow = 1 + mdt + int(va.size()) - neq;
				a = a.SubMatrix(firstRow, a.Nrows(), firstRow, a.Ncols());
				r = r.SubMatrix(firstRow, r.Nrows(), 1, 1);
			}

			if (m_pAgent && m_pAgent->TestConnection(m_hxGridSessionID) == S_FALSE)
				throw(CHxGridException());

		}

		//if we don't solve system, return missing
		if (!bOK)
			return m_param.m_noData;


		NEWMAT::ColumnVector s = a*r;


		CDiscretisation m_discr;
		m_discr.Initialization(pt, *m_pVariogram, m_p, m_param.m_bFillNugget);

		// Compute the solution: 
		double uuk = 0.0;
		double ukv = m_discr.cbb;
		for (int j = 0; j < neq; j++)
		{
			ukv -= s[j] * rr[j];
			if (j < (int)va.size())
				uuk += s[j] * (m_prePostTransfo.Transform(va[j].m_event) - m_p.m_SKmean);
		}

		uuk += m_p.m_SKmean;
		uuk = m_prePostTransfo.InvertTransform(uuk, m_param.m_noData);
		//
		if ( /*iXval<0 &&*/ m_param.m_bRegionalLimit  && uuk > m_param.m_noData)
		{
			CStatistic stat;
			for (size_t i = 0; i < va.size(); i++)
				stat += va[i].m_event;

			bool bOutside = uuk<stat[LOWEST] - m_param.m_regionalLimitSD*stat[STD_DEV] || uuk>stat[HIGHEST] + m_param.m_regionalLimitSD*stat[STD_DEV];
			if (bOutside)
			{
				if (m_param.m_bRegionalLimitToBound)
					uuk = min(stat[HIGHEST] + m_param.m_regionalLimitSD*stat[STD_DEV], max(stat[LOWEST] - m_param.m_regionalLimitSD*stat[STD_DEV], uuk));
				else
					uuk = m_param.m_noData;
			}
		}

		if ( /*iXval<0 &&*/ m_param.m_bGlobalLimit && uuk > m_param.m_noData)
		{
			bool bOutside = uuk<m_stat[LOWEST] - m_param.m_globalLimitSD*m_stat[STD_DEV] || uuk>m_stat[HIGHEST] + m_param.m_globalLimitSD*m_stat[STD_DEV];
			if (bOutside)
			{
				if (m_param.m_bGlobalLimitToBound)
					uuk = min(m_stat[HIGHEST] + m_param.m_globalLimitSD*m_stat[STD_DEV], max(m_stat[LOWEST] - m_param.m_globalLimitSD*m_stat[STD_DEV], uuk));
				else
					uuk = m_param.m_noData;
			}
		}

		if ( /*iXval<0 &&*/ m_param.m_bGlobalMinMaxLimit && uuk > m_param.m_noData)
		{
			bool bOutside = uuk<m_param.m_globalMinLimit || uuk>m_param.m_globalMaxLimit;
			if (bOutside)
			{
				if (m_param.m_bGlobalMinMaxLimitToBound)
					uuk = min(m_param.m_globalMaxLimit, max(m_param.m_globalMinLimit, uuk));
				else
					uuk = m_param.m_noData;
			}
		}


		return uuk;
	}