Exemple #1
0
/* ************************************************************************** */
double DotProduct(double *x, double *y, long n )
/*----------------------------------------------------------------------------->
/ purpose:  Compute Dot Product x . y and return result
/
/ coded by: Gerhard L.H. Kruizinga                07/13/99
/
/ input:    x,y input vectors, n length of vector
/
/ output:   dot product is returned by routine
/
<-----------------------------------------------------------------------------*/
{
 double normx,normy;
 double dotproduct;

 long i;

 normx = VectorNorm(x,n);
 normy = VectorNorm(y,n);

 dotproduct = 0;

 loop(i,n) dotproduct = dotproduct + x[i]*y[i];

 return dotproduct/normx/normy;
}
Exemple #2
0
int ComputeMError (edge *TheEdge, double center_error, double image_error)
{
  double m[3], e[3];

  /* N.B. image_error and center_error are given in pixels */

  if (first) {
    /* calculate the pixel size */
    pixel_size = (2 * FOCAL_LENGTH * tan(FIELD_OF_VIEW * M_PI / 360.0));
    pixel_size /= N_PIXELS;
    first = 0;
  }

  m[0] = TheEdge->y2 - TheEdge->y1;
  m[1] = TheEdge->x1 - TheEdge->x2;
  m[2] = (TheEdge->x1)*(TheEdge->y2) - (TheEdge->x2)*(TheEdge->y1);

  e[0] = e[1] = 2 * image_error;
  e[2] = fabs (TheEdge->x1 - TheEdge->x2) * center_error +
         fabs (TheEdge->y1 - TheEdge->y2) * center_error + 
         (fabs (TheEdge->x1) + fabs (TheEdge->y1)) * image_error +
         (fabs (TheEdge->x2) + fabs (TheEdge->y2)) * image_error;

  TheEdge->m_error = sqr((VectorNorm (e) * pixel_size) / VectorNorm (m));
  TheEdge->l_error = sqr((image_error + center_error) * pixel_size) *
                     TheEdge->length;

  return 0;
}
Exemple #3
0
// Look-at code
Matrix lookatMatrix(Vector eye, Vector center, Vector up) {
	Vector f = VectorSub(center, eye);
	Vector fn = VectorNorm(f);
	Vector upn = VectorNorm(up);
	Vector s = VectorCross(fn, upn);
	Vector u = VectorCross(s, fn);
	Matrix lookat = MakeMatrix(
		 s.x, s.y, s.z, 0.0f,
		 u.x, u.y, u.z, 0.0f,
		-f.x, -f.y, -f.z, 0.0f,
		0.0f, 0.0f, 0.0f, 1.0f
	);
	return lookat;
}
Exemple #4
0
double DexAnalogMixin::FilterManipulandumRotations( Vector3 rotations ) {
	// Combine the new rotations sample with previous filtered value (recursive filtering).
	ScaleVector( filteredManipulandumRotations, filteredManipulandumRotations, filterConstant );
	AddVectors( filteredManipulandumRotations, filteredManipulandumRotations, rotations );
	ScaleVector( filteredManipulandumRotations, filteredManipulandumRotations, 1.0 / (1.0 + filterConstant ));
	// Return the filtered value in place.
	CopyVector( rotations, filteredManipulandumRotations );
	return( VectorNorm( rotations ) );
}
Exemple #5
0
double DexAnalogMixin::FilterCoP( int which_ati, Vector3 center_of_pressure ) {
	// Combine the new force sample with previous filtered value (recursive filtering).
	ScaleVector( filteredCoP[which_ati], filteredCoP[which_ati], filterConstant );
	AddVectors( filteredCoP[which_ati], filteredCoP[which_ati], center_of_pressure );
	ScaleVector( filteredCoP[which_ati], filteredCoP[which_ati], 1.0 / (1.0 + filterConstant ));
	// Return the filtered value in place.
	CopyVector( center_of_pressure, filteredCoP[which_ati] );
	return( VectorNorm( filteredCoP[which_ati] ) );
}
Exemple #6
0
double DexAnalogMixin::FilterLoadForce( Vector3 load_force ) {
	// Combine the new force sample with previous filtered value (recursive filtering).
	ScaleVector( filteredLoadForce, filteredLoadForce, filterConstant );
	AddVectors( filteredLoadForce, filteredLoadForce, load_force );
	ScaleVector( filteredLoadForce, filteredLoadForce, 1.0 / (1.0 + filterConstant ));
	// Return the filtered value in place.
	CopyVector( load_force, filteredLoadForce );
	return( VectorNorm( filteredLoadForce ) );
}
Exemple #7
0
// Same as the above, but the load force normal to the sensors is ignored.
double DexAnalogMixin::ComputePlanarLoadForce( Vector3 &load, Vector3 &force1, Vector3 &force2 ) {
	// Compute the net force perpendicular to the pinch axis.
	ComputeLoadForce( load, force1, force2 );
	// Ignore the normal component.
	// NB: Using same axis definitions as Dex code provided by Bert.
	load[X] = 0.0;
	// Return the magnitude of the net force on the object.
	return( VectorNorm( load ) );
}
Exemple #8
0
double DexAnalogMixin::FilterAcceleration( Vector3 acceleration ) {
	// Combine the new position sample with previous filtered value (recursive filtering).
	ScaleVector( filteredAcceleration, filteredAcceleration, filterConstant );
	AddVectors( filteredAcceleration, filteredAcceleration, acceleration );
	ScaleVector( filteredAcceleration, filteredAcceleration, 1.0 / (1.0 + filterConstant ));
	// Return the filtered value in place.
	CopyVector( acceleration, filteredAcceleration );
	return( VectorNorm( acceleration ) );
}
Exemple #9
0
// Computes the net force (load force) acting on the manipulandum, based on the 
// measured 3D force vectors from the two ATI sensors. Result is returned in the
// 3D vector 'load', while the magnitude of the load is returned as a double.
double DexAnalogMixin::ComputeLoadForce( Vector3 &load, Vector3 &force1, Vector3 &force2 ) {
	// Compute the net force on the object.
	// If the reference frames have been properly aligned,
	// the net force on the object is simply the sum of the forces 
	// applied to either side.
	load[X] = force1[X] + force2[X];
	load[Y] = force1[Y] + force2[Y];
	load[Z] = force1[Z] + force2[Z];
	// Return the magnitude of the net force on the object.
	return( VectorNorm( load ) );
}
/*!\rst
  Test vector norm.

  For several problem sizes, check:

  1. zeros have ``norm = 0``
  2. ``\|\alpha\| = \alpha`` where \alpha is scalar
  3. Scaling a vector by its own norm results in a vector with ``norm = 1.0``.
  4. Columns of a matrix whose columns are *known* to have unit-norm.

  \return
    number of cases where the norm is wrong
\endrst*/
OL_WARN_UNUSED_RESULT int TestNorm() noexcept {
  int total_errors = 0;
  const int num_sizes = 3;
  const int sizes[num_sizes] = {11, 100, 1007};
  UniformRandomGenerator uniform_generator(34187);

  for (int i = 0; i < num_sizes; ++i) {
    std::vector<double> vector(sizes[i], 0.0);
    // zero vector has zero norm
    double norm = VectorNorm(vector.data(), vector.size());
    if (!CheckDoubleWithin(norm, 0.0, 0.0)) {
      ++total_errors;
    }

    BuildRandomVector(sizes[i], -1.0, 1.0, &uniform_generator, vector.data());

    // norm of nothing element is 0
    norm = VectorNorm(vector.data(), 0);
    if (!CheckDoubleWithin(norm, 0.0, 0.0)) {
      ++total_errors;
    }

    // norm of 1 element is |that element|
    norm = VectorNorm(vector.data(), 1);
    if (!CheckDoubleWithinRelative(norm, std::fabs(vector[0]), std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }

    // unit vectors have unit norm
    norm = VectorNorm(vector.data(), vector.size());
    std::vector<double> unit_vector(vector);
    VectorScale(unit_vector.size(), 1.0/norm, unit_vector.data());
    double unit_norm = VectorNorm(unit_vector.data(), unit_vector.size());
    if (!CheckDoubleWithinRelative(unit_norm, 1.0, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }

    if (i < num_sizes - 1) {  // don't do the largest case
      std::vector<double> orthogonal_matrix(Square(sizes[i]));
      BuildOrthogonalSymmetricMatrix(sizes[i], orthogonal_matrix.data());
      for (int j = 0; j < sizes[i]; ++j) {
        double column_norm = VectorNorm(orthogonal_matrix.data() + j*sizes[i], sizes[i]);
        if (!CheckDoubleWithinRelative(column_norm, 1.0, std::sqrt(static_cast<double>(sizes[i]))*std::numeric_limits<double>::epsilon())) {
          ++total_errors;
        }
      }
    }
  }

  return total_errors;
}
Exemple #11
0
void GramSchmidtTranspose ( double * A, int n){
	int i,j,k;
	double a,max=0.;

	
	NormalizeRows (A, n);
	for ( i=0 ; i<n ; i++){
		a = VectorNorm(&A[i*n],n);
		for ( j=0 ; j<n ; j++)
			A[i*n+j] /= a;
		for ( j=i+1 ; j<n ; j++){
			a = VectorDotProduct(&A[j*n],&A[i*n],n);
			max = (max>fabs(a))?max:fabs(a);
			for ( k=0 ; k<n ; k++)
				A[j*n+k] -= a * A[i*n+k];
		}
	}
	if(max>0.1)
		printf ("Large change in GS\n");
}
int DexApparatus::CheckEarlyStarts(  int max_early_starts, float hold_time, float threshold, float filter_constant, 
									 const char *msg, const char *picture ) {
	
	const char *fmt;
	bool  error = false;

	int		early_starts = 0;
	int		first, last;
	int		i, j, index, frm;
	int		hold_frames = (int) floor( hold_time / tracker->samplePeriod );

	int N = 0;
	double tangential_velocity[DEX_MAX_MARKER_FRAMES];
	Vector3 delta;

	// First we should look for the start and end of the actual movement based on 
	// events such as when the subject reaches the first target. 
	FindAnalysisFrameRange( first, last );

	// Compute the instantaneous tangential velocity. 
	for ( i = first + 1; i < last; i++ ) {
		if ( acquiredManipulandumState[i].visibility && acquiredManipulandumState[i-1].visibility) {
			SubtractVectors( delta, acquiredManipulandumState[i].position, acquiredManipulandumState[i-1].position );
			ScaleVector( delta, delta, 1.0 / tracker->GetSamplePeriod() );
			tangential_velocity[i] = VectorNorm( delta );
			N++;
		}
		else {
			tangential_velocity[i] = tangential_velocity[i-1];
		}
	}
	// If there is no valid position data, signal an error.
	if ( N <= 0.0 ) {
		monitor->SendEvent( "No valid data." );
		error = true;
	}
	else {

		// Smooth the tangential velocity using a recursive filter.
		for ( frm = 1; frm < nAcqFrames; frm++ ) {
			tangential_velocity[frm] = ( filter_constant * tangential_velocity[frm-1] + tangential_velocity[frm] ) / ( 1.0 + filter_constant );
		}
		// Run the filter backwards to eliminate the phase lag.
		for ( frm = nAcqFrames - 2; frm >= 0; frm-- ) {
			tangential_velocity[frm] = ( filter_constant * tangential_velocity[frm+1] + tangential_velocity[frm] ) / ( 1.0 + filter_constant );
		}

		// Step through each marked movement trigger and verify that the velocity is 
		// close to zero when the trigger was sent.
		FindAnalysisEventRange( first, last );
		for ( i = first; i < last; i++ ) {
			if ( eventList[i].event == TRIGGER_MOVEMENT ) {
				index = TimeToFrame( eventList[i].time );
				for ( j = index; j > index - hold_frames && j > first; j-- ) {
					if ( tangential_velocity[i] > threshold ) {
						early_starts++;
						break;
					}
				}
			}
		}
		// Check if the computed number of cycles is in the desired range.
		error = ( early_starts > max_early_starts );

	}

	// If not, signal the error to the subject.
	// Here I take the approach of calling a method to signal the error.
	// We agree instead that the routine should either return a null pointer
	// if there is no error, or return the error message as a static string.
	if ( error ) {
		
		// If the user provided a message to signal a visibilty error, use it.
		// If not, generate a generic message.
		if ( !msg ) msg = "To many false starts.";
		// The message could be different depending on whether the maniplulandum
		// was not moved enough, or if it just could not be seen.
		if ( N <= 0.0 ) fmt = "%s\n Manipulandum not visible.";
		else fmt = "%s\n False Starts Detected: %d\nMaximum Allowed: %d";
		int response = fSignalError( MB_ABORTRETRYIGNORE, picture, fmt, msg, early_starts, max_early_starts );
		
		if ( response == IDABORT ) return( ABORT_EXIT );
		if ( response == IDRETRY ) return( RETRY_EXIT );
		if ( response == IDIGNORE ) return( IGNORE_EXIT );
		
	}
	
	// This is my means of signalling the event.
	monitor->SendEvent( "Early Starts OK.\n Measured: %d\n Maximum Allowed: %d", early_starts, max_early_starts );
	return( NORMAL_EXIT );
	
}
Exemple #13
0
long eci2quaternion(long T2000_int, double T2000_frac ,quaternion *Q,
                    long norder_int, FILE *eci)
/*----------------------------------------------------------------------------->
/ Purpose: return orbit (in xyz and llh) at time tag "time"
/
/ Coded by: Gerhard L.H. Kruizinga                    07/28/98
/ Modified: Gerhard L.H. Kruizinga                    12/11/01
/ Modified: Gerhard L.H. Kruizinga                    08/23/04
/
/ input:   T2000_int  GPS time seconds past 01/01/2000 12:00:00
/          T2000_frac fractional seconds (sec)
/          eci        pointer to open eci file
/          norder_int order to be used for lagrangian orbit interpolation
/ output:  Q          quaternion  given pointing information of the spacecraft 
/                    (inertial to orbit_local frame, defined by position,
/                     velocity and cross product of these two vectors) 
/ 
/
/ eci2quaternion returns  0: for succesful interpolation/quaternion conversion
/                         1: requested time past orbit file interval
/                        -1: requested time before orbit file interval
<-----------------------------------------------------------------------------*/
{
  long out,i;

  double xyz[N],llh[N],xyzdot[N],ae,flat;
  double Norm,Xbod[N],Ybod[N],Zbod[N];
  double Rot[N][N];

  char *eci_filename;

  static long first = 1;

  ae    = AE;
  flat  = FLAT;
  Q->q0 = -2.0;
  Q->q1 = -2.0;
  Q->q2 = -2.0;
  Q->q3 = -2.0;

  /*>>>> check if eci pointer is set if not querry enviroment variable ECIFILE <<<<<*/

  if (eci == NULL && first)
  {
    first = 0;

    if ( (eci_filename = getenv("ECIFILE") ) == NULL  )
    {
      fprintf( stderr,"\neci2quaternion: No environment variable defined : ECIFILE\n");
      exit(1);
    }

    if ( ( eci  = fopen(eci_filename, "r" ) ) == NULL )
    {
      fprintf( stdout,"\neci2quaternion: ECIFILE  %s can not be opened. \n", eci_filename );
      exit(1);
    }
  }

  /*>>>> interpolate orbit to current gps time <<<<*/

  if ((out = OrbitEciInt(T2000_int,T2000_frac,xyz,llh, xyzdot,ae,flat,norder_int,eci)) != 0)
     return out;

  /* compute unit vectors for the body fixed xbod,ybod,zbod in the inertial
   coordinate system */

  /* z-axis */

  Zbod[0] = -xyz[0];
  Zbod[1] = -xyz[1];
  Zbod[2] = -xyz[2];

  Norm = VectorNorm(Zbod, N);

  loop(i,N) Zbod[i] = Zbod[i]/Norm;

  /* y-axis */

  Xbod[0] = xyzdot[0];
  Xbod[1] = xyzdot[1];
  Xbod[2] = xyzdot[2];

  CrossProduct(Zbod, Xbod, N , Ybod);

  Norm = VectorNorm(Ybod, N);

  loop(i,N) Ybod[i] = Ybod[i]/Norm;

  /* x-axis */

  CrossProduct(Ybod, Zbod, N , Xbod);

  Norm = VectorNorm(Xbod, N);


  loop(i,N) Xbod[i] = Xbod[i]/Norm;

/*
  fprintf(stderr,"\nXbod : %lf %lf %lf \n",Xbod[0],Xbod[1],Xbod[2]);
  fprintf(stderr,"Ybod : %lf %lf %lf \n",Ybod[0],Ybod[1],Ybod[2]);
  fprintf(stderr,"Zbod : %lf %lf %lf \n",Zbod[0],Zbod[1],Zbod[2]);
*/

  /* compute rotation matrix */

  Rot[0][0] = Xbod[0]; Rot[0][1] = Ybod[0]; Rot[0][2] = Zbod[0];
  Rot[1][0] = Xbod[1]; Rot[1][1] = Ybod[1]; Rot[1][2] = Zbod[1];
  Rot[2][0] = Xbod[2]; Rot[2][1] = Ybod[2]; Rot[2][2] = Zbod[2];

  /* convert rotation matrix into quaternions */

  Q->q0 = sqrt(1.0 + Rot[0][0] + Rot[1][1] + Rot[2][2])/2.0;

  if (fabs(Q->q0) < 1e-6)
  {
    fprintf(stderr,"eci2quaternion: Q0 = %.16g < 1e-6, quaternion maybe corrupted at t = %.16g\n",
                   Q->q0,T2000_frac+(double)T2000_int);
  }

  Q->q1 = -(Rot[1][2] - Rot[2][1])/4.0/Q->q0;
  Q->q2 = -(Rot[2][0] - Rot[0][2])/4.0/Q->q0;
  Q->q3 = -(Rot[0][1] - Rot[1][0])/4.0/Q->q0;

  return 0L;
}
/**
 * Solves a linear least squares problem to obtain a N degree polynomial that
 * fits the specified input data as nearly as possible.
 *
 * Returns true if a solution is found, false otherwise.
 *
 * The input consists of two vectors of data points X and Y with indices 0..m-1
 * along with a weight vector W of the same size.
 *
 * The output is a vector B with indices 0..n that describes a polynomial
 * that fits the data, such the sum of W[i] * W[i] * abs(Y[i] - (B[0] + B[1]
 * X[i] * + B[2] X[i]^2 ... B[n] X[i]^n)) for all i between 0 and m-1 is
 * minimized.
 *
 * Accordingly, the weight vector W should be initialized by the caller with the
 * reciprocal square root of the variance of the error in each input data point.
 * In other words, an ideal choice for W would be W[i] = 1 / var(Y[i]) = 1 /
 * stddev(Y[i]).
 * The weights express the relative importance of each data point.  If the
 * weights are* all 1, then the data points are considered to be of equal
 * importance when fitting the polynomial.  It is a good idea to choose weights
 * that diminish the importance of data points that may have higher than usual
 * error margins.
 *
 * Errors among data points are assumed to be independent.  W is represented
 * here as a vector although in the literature it is typically taken to be a
 * diagonal matrix.
 *
 * That is to say, the function that generated the input data can be
 * approximated by y(x) ~= B[0] + B[1] x + B[2] x^2 + ... + B[n] x^n.
 *
 * The coefficient of determination (R^2) is also returned to describe the
 * goodness of fit of the model for the given data.  It is a value between 0
 * and 1, where 1 indicates perfect correspondence.
 *
 * This function first expands the X vector to a m by n matrix A such that
 * A[i][0] = 1, A[i][1] = X[i], A[i][2] = X[i]^2, ..., A[i][n] = X[i]^n, then
 * multiplies it by w[i]./
 *
 * Then it calculates the QR decomposition of A yielding an m by m orthonormal
 * matrix Q and an m by n upper triangular matrix R.  Because R is upper
 * triangular (lower part is all zeroes), we can simplify the decomposition into
 * an m by n matrix Q1 and a n by n matrix R1 such that A = Q1 R1.
 *
 * Finally we solve the system of linear equations given by
 * R1 B = (Qtranspose W Y) to find B.
 *
 * For efficiency, we lay out A and Q column-wise in memory because we
 * frequently operate on the column vectors.  Conversely, we lay out R row-wise.
 *
 * http://en.wikipedia.org/wiki/Numerical_methods_for_linear_least_squares
 * http://en.wikipedia.org/wiki/Gram-Schmidt
 */
static bool SolveLeastSquares(const float* x,
                              const float* y,
                              const float* w,
                              uint32_t m,
                              uint32_t n,
                              float* out_b,
                              float* out_det) {
  // MSVC does not support variable-length arrays (used by the original Android
  // implementation of this function).
#if defined(COMPILER_MSVC)
  const uint32_t M_ARRAY_LENGTH =
      LeastSquaresVelocityTrackerStrategy::kHistorySize;
  const uint32_t N_ARRAY_LENGTH = Estimator::kMaxDegree;
  DCHECK_LE(m, M_ARRAY_LENGTH);
  DCHECK_LE(n, N_ARRAY_LENGTH);
#else
  const uint32_t M_ARRAY_LENGTH = m;
  const uint32_t N_ARRAY_LENGTH = n;
#endif

  // Expand the X vector to a matrix A, pre-multiplied by the weights.
  float a[N_ARRAY_LENGTH][M_ARRAY_LENGTH];  // column-major order
  for (uint32_t h = 0; h < m; h++) {
    a[0][h] = w[h];
    for (uint32_t i = 1; i < n; i++) {
      a[i][h] = a[i - 1][h] * x[h];
    }
  }

  // Apply the Gram-Schmidt process to A to obtain its QR decomposition.

  // Orthonormal basis, column-major order.
  float q[N_ARRAY_LENGTH][M_ARRAY_LENGTH];
  // Upper triangular matrix, row-major order.
  float r[N_ARRAY_LENGTH][N_ARRAY_LENGTH];
  for (uint32_t j = 0; j < n; j++) {
    for (uint32_t h = 0; h < m; h++) {
      q[j][h] = a[j][h];
    }
    for (uint32_t i = 0; i < j; i++) {
      float dot = VectorDot(&q[j][0], &q[i][0], m);
      for (uint32_t h = 0; h < m; h++) {
        q[j][h] -= dot * q[i][h];
      }
    }

    float norm = VectorNorm(&q[j][0], m);
    if (norm < 0.000001f) {
      // vectors are linearly dependent or zero so no solution
      return false;
    }

    float invNorm = 1.0f / norm;
    for (uint32_t h = 0; h < m; h++) {
      q[j][h] *= invNorm;
    }
    for (uint32_t i = 0; i < n; i++) {
      r[j][i] = i < j ? 0 : VectorDot(&q[j][0], &a[i][0], m);
    }
  }

  // Solve R B = Qt W Y to find B.  This is easy because R is upper triangular.
  // We just work from bottom-right to top-left calculating B's coefficients.
  float wy[M_ARRAY_LENGTH];
  for (uint32_t h = 0; h < m; h++) {
    wy[h] = y[h] * w[h];
  }
  for (uint32_t i = n; i-- != 0;) {
    out_b[i] = VectorDot(&q[i][0], wy, m);
    for (uint32_t j = n - 1; j > i; j--) {
      out_b[i] -= r[i][j] * out_b[j];
    }
    out_b[i] /= r[i][i];
  }

  // Calculate the coefficient of determination as 1 - (SSerr / SStot) where
  // SSerr is the residual sum of squares (variance of the error),
  // and SStot is the total sum of squares (variance of the data) where each
  // has been weighted.
  float ymean = 0;
  for (uint32_t h = 0; h < m; h++) {
    ymean += y[h];
  }
  ymean /= m;

  float sserr = 0;
  float sstot = 0;
  for (uint32_t h = 0; h < m; h++) {
    float err = y[h] - out_b[0];
    float term = 1;
    for (uint32_t i = 1; i < n; i++) {
      term *= x[h];
      err -= term * out_b[i];
    }
    sserr += w[h] * w[h] * err * err;
    float var = y[h] - ymean;
    sstot += w[h] * w[h] * var * var;
  }
  *out_det = sstot > 0.000001f ? 1.0f - (sserr / sstot) : 1;
  return true;
}
/*!\rst
  Test that ``A * x`` and ``A^T * x`` work, where ``A`` is a matrix and ``x`` is a vector.
  Outline:

  1. Check different input size combinations and no/transpose setups on small hand-checked problems.
  2. Exploit special property of Householder matrices: perform ``Ax`` and verify that the output has
     the property (see implementation).

  \return
    number of cases where matrix-vector multiply failed
\endrst*/
OL_WARN_UNUSED_RESULT int TestGeneralMatrixVectorMultiply() noexcept {
  int total_errors = 0;

  // simple, hand-checked problems that are be minimally affected by floating point errors
  {  // hide scope
    static const int kSize_m = 3;  // rows
    static const int kSize_n = 5;  // cols

    double matrix_A[kSize_m*kSize_n] =
        {-7.4, 0.1, 9.1,  // first COLUMN of A (col-major storage)
         1.5, -8.8, -0.3,
         -2.9, 6.4, -9.7,
         -9.1, -6.6, 3.1,
         4.6, 3.0, -1.0
        };
    double matrix_A_T[kSize_n*kSize_m];
    const double test_vector1[kSize_n] = {1.3, -3.8, 4.2, 2.1, 0.2};
    const double test_vector2[kSize_n] = {0.5, -2.0, -0.2, -3.1, 1.9};
    const double result_vector1[kSize_m] = {-45.689999999999998, 47.190000000000005, -21.460000000000001};
    const double result_vector2[kSize_m] = {30.829999999999998, 42.530000000000001, -4.420000000000002};
    double product_vector1[kSize_m];

    const double test_vector3[kSize_m] = {-3.2, 0.4, 1.3};
    const double test_vector4[kSize_m] = {2.8, -4.2, 3.5};
    const double result_vector3[kSize_n] = {35.550000000000004, -8.710000000000001, -0.770000000000000, 30.510000000000002, -14.820000000000000};
    const double result_vector4[kSize_n] = {10.709999999999999, 40.110000000000014, -68.949999999999989, 13.090000000000002, -3.220000000000002};
    double product_vector2[kSize_n];

    MatrixTranspose(matrix_A, kSize_m, kSize_n, matrix_A_T);

    // using A as base
    GeneralMatrixVectorMultiply(matrix_A, 'N', test_vector1, 1.0, 0.0, kSize_m, kSize_n, kSize_m, product_vector1);
    for (int i = 0; i < kSize_m; ++i) {
      if (!CheckDoubleWithinRelative(product_vector1[i], result_vector1[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A, 'N', test_vector2, 1.0, 0.0, kSize_m, kSize_n, kSize_m, product_vector1);
    for (int i = 0; i < kSize_m; ++i) {
      if (!CheckDoubleWithinRelative(product_vector1[i], result_vector2[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A_T, 'N', test_vector3, 1.0, 0.0, kSize_n, kSize_m, kSize_n, product_vector2);
    for (int i = 0; i < kSize_n; ++i) {
      if (!CheckDoubleWithinRelative(product_vector2[i], result_vector3[i], 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A_T, 'N', test_vector4, 1.0, 0.0, kSize_n, kSize_m, kSize_n, product_vector2);
    for (int i = 0; i < kSize_n; ++i) {
      if (!CheckDoubleWithinRelative(product_vector2[i], result_vector4[i], 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    // now with A^T as base
    GeneralMatrixVectorMultiply(matrix_A_T, 'T', test_vector1, 1.0, 0.0, kSize_n, kSize_m, kSize_n, product_vector1);
    for (int i = 0; i < kSize_m; ++i) {
      if (!CheckDoubleWithinRelative(product_vector1[i], result_vector1[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A_T, 'T', test_vector2, 1.0, 0.0, kSize_n, kSize_m, kSize_n, product_vector1);
    for (int i = 0; i < kSize_m; ++i) {
      if (!CheckDoubleWithinRelative(product_vector1[i], result_vector2[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A, 'T', test_vector3, 1.0, 0.0, kSize_m, kSize_n, kSize_m, product_vector2);
    for (int i = 0; i < kSize_n; ++i) {
      if (!CheckDoubleWithinRelative(product_vector2[i], result_vector3[i], 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    GeneralMatrixVectorMultiply(matrix_A, 'T', test_vector4, 1.0, 0.0, kSize_m, kSize_n, kSize_m, product_vector2);
    for (int i = 0; i < kSize_n; ++i) {
      if (!CheckDoubleWithinRelative(product_vector2[i], result_vector4[i], 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }
  }

  const int num_tests = 3;
  const int sizes[num_tests] = {5, 11, 20};

  UniformRandomGenerator uniform_generator(34187);
  // Here we check the performance of matrix-vector multiply using a very well-conditioned matrix
  // with a very unique property.  F(x), the householder reflector for a vector x, has the following property:
  // F * x = [||x||_2, zeros(n-1,1)]  (for an arbitrary vector, ||F*y|| = ||y|| always)
  // Additionally, F is orthogonal so cond(F) = 1 and our results should be computable very accurately.
  for (int i = 0; i < num_tests; ++i) {
    std::vector<double> house_matrix(sizes[i]*sizes[i]);
    std::vector<double> vector(sizes[i]);
    std::vector<double> reflected_vector(sizes[i]);

    BuildRandomVector(sizes[i], -1.0, 1.0, &uniform_generator, vector.data());
    BuildHouseholderReflectorMatrix(vector.data(), sizes[i], house_matrix.data());
    GeneralMatrixVectorMultiply(house_matrix.data(), 'N', vector.data(), 1.0, 0.0, sizes[i], sizes[i], sizes[i], reflected_vector.data());

    double vector_norm = VectorNorm(vector.data(), sizes[i]);

    if (!CheckDoubleWithinRelative(std::fabs(reflected_vector[0]), vector_norm, 5*std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }
    for (int j = 1; j < sizes[i]; ++j) {
      if (!CheckDoubleWithin(reflected_vector[j], 0.0, 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    // reflector is symmetric, so test T version too
    GeneralMatrixVectorMultiply(house_matrix.data(), 'T', vector.data(), 1.0, 0.0, sizes[i], sizes[i], sizes[i], reflected_vector.data());

    vector_norm = VectorNorm(vector.data(), sizes[i]);

    if (!CheckDoubleWithinRelative(std::fabs(reflected_vector[0]), vector_norm, 5*std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }
    for (int j = 1; j < sizes[i]; ++j) {
      if (!CheckDoubleWithin(reflected_vector[j], 0.0, 5*std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }
  }

  return total_errors;
}
/*!\rst
  Test that SPDMatrixInverse and CholeskyFactorLMatrixVectorSolve are  working correctly
  against some especially bad named matrices and some random inputs.
  Outline:

  1. Construct matrices, ``A``
  2. Select a solution, ``x``, randomly.
  3. Construct RHS by doing ``A*x``.
  4. Solve ``Ax = b`` using backsolve and direct-inverse; check the size of ``\|b - Ax\|``.

  \return
    number of test cases where the solver error is too large
\endrst*/
OL_WARN_UNUSED_RESULT int TestSPDLinearSolvers() {
  int total_errors = 0;

  // simple/small case where numerical factors are not present.
  // taken from: http://en.wikipedia.org/wiki/Cholesky_decomposition#Example
  {
    constexpr int size = 3;
    std::vector<double> matrix =
        {  4.0,   12.0, -16.0,
          12.0,   37.0, -43.0,
         -16.0,  -43.0,  98.0};

    const std::vector<double> cholesky_factor_L_truth =
        { 2.0, 6.0, -8.0,
          0.0, 1.0, 5.0,
          0.0, 0.0, 3.0};
    std::vector<double> rhs = {-20.0, -43.0, 192.0};
    std::vector<double> solution_truth = {1.0, 2.0, 3.0};

    int local_errors = 0;
    // check factorization is correct; only check lower-triangle
    if (ComputeCholeskyFactorL(size, matrix.data()) != 0) {
      ++total_errors;
    }
    for (int i = 0; i < size; ++i) {
      for (int j = 0; j < size; ++j) {
        if (j >= i) {
          if (!CheckDoubleWithinRelative(matrix[i*size + j], cholesky_factor_L_truth[i*size + j], 0.0)) {
            ++local_errors;
          }
        }
      }
    }

    // check the solve is correct
    CholeskyFactorLMatrixVectorSolve(matrix.data(), size, rhs.data());
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(rhs[i], solution_truth[i], 0.0)) {
        ++local_errors;
      }
    }

    total_errors += local_errors;
  }

  const int num_tests = 10;
  const int num_test_sizes = 3;
  const int sizes[num_test_sizes] = {5, 11, 20};

  // following tolerances are based on numerical experiments and/or computations
  // of the matrix condition numbers (in MATLAB)
  const double tolerance_backsolve_max_list[3][num_test_sizes] =
      { {10*std::numeric_limits<double>::epsilon(), 100*std::numeric_limits<double>::epsilon(), 100*std::numeric_limits<double>::epsilon()},       // prolate
        {100*std::numeric_limits<double>::epsilon(), 1.0e4*std::numeric_limits<double>::epsilon(), 1.0e6*std::numeric_limits<double>::epsilon()},  // moler
        {50*std::numeric_limits<double>::epsilon(), 1.0e3*std::numeric_limits<double>::epsilon(), 5.0e4*std::numeric_limits<double>::epsilon()}    // random
      };
  const double tolerance_inverse_max_list[3][num_test_sizes] =
      { {1.0e-13, 1.0e-9, 1.0e-2},  // prolate
        {5.0e-13, 2.0e-8, 1.0e1},   // moler
        {7.0e-10, 7.0e-6, 1.0e2}    // random
      };

  UniformRandomGenerator uniform_generator(34187);
  // In each iteration, we form some an ill-conditioned SPD matrix.  We are using
  // prolate, moler, and random matrices.
  // Then we compute L * L^T = A.
  // We want to test solutions of A * x = b using two methods:
  //   1) Inverse: using L, we form A^-1 explicitly (ILL-CONDITIONED!)
  //   2) Backsolve: we never form A^-1, but instead backsolve L and L^T against b.
  // The resulting residual norms, ||b - A*x||_2 are computed; we check that
  // backsolve is accurate and inverse is affected strongly by conditioning.
  for (int j = 0; j < num_tests; ++j) {
    for (int i = 0; i < num_test_sizes; ++i) {
      std::vector<double> matrix(sizes[i]*sizes[i]);
      std::vector<double> inverse_matrix(sizes[i]*sizes[i]);
      std::vector<double> cholesky_factor(sizes[i]*sizes[i]);
      std::vector<double> rhs(sizes[i]);
      std::vector<double> solution(2*sizes[i]);

      double tolerance_backsolve_max;
      double tolerance_inverse_max;
      switch (j) {
        case 0: {
          BuildProlateMatrix(kProlateDefaultParameter, sizes[i], matrix.data());
          tolerance_backsolve_max = tolerance_backsolve_max_list[0][i];
          tolerance_inverse_max = tolerance_inverse_max_list[0][i];
          break;
        }
        case 1: {
          BuildMolerMatrix(-1.66666666666, sizes[i], matrix.data());
          tolerance_backsolve_max = tolerance_backsolve_max_list[1][i];
          tolerance_inverse_max = tolerance_inverse_max_list[1][i];
          break;
        }
        default: {
          if (j <= 1 || j > num_tests) {
            OL_THROW_EXCEPTION(BoundsException<int>, "Invalid switch option.", j, 2, num_tests);
          } else {
            BuildRandomSPDMatrix(sizes[i], &uniform_generator, matrix.data());
            tolerance_backsolve_max = tolerance_backsolve_max_list[2][i];
            tolerance_inverse_max = tolerance_inverse_max_list[2][i];
            break;
          }
        }
      }
      // cholesky-factor A, form A^-1
      std::copy(matrix.begin(), matrix.end(), cholesky_factor.begin());
      if (ComputeCholeskyFactorL(sizes[i], cholesky_factor.data()) != 0) {
        ++total_errors;
      }
      SPDMatrixInverse(cholesky_factor.data(), sizes[i], inverse_matrix.data());

      // set b = A*random_vector.  This way we know the solution explicitly.
      // this also allows us to ignore ||A|| in our computations when we
      // normalize the RHS.
      BuildRandomVector(sizes[i], 0.0, 1.0, &uniform_generator, solution.data());
      SymmetricMatrixVectorMultiply(matrix.data(), solution.data(), sizes[i], rhs.data());

      // re-scale the RHS so that its norm can be ignored in later computations
      double rhs_norm = VectorNorm(rhs.data(), sizes[i]);
      VectorScale(sizes[i], 1.0/rhs_norm, rhs.data());
      std::copy(rhs.begin(), rhs.end(), solution.begin());

      // Solve L * L^T * x1 = b (backsolve) and compute x2 = A^-1 * b
      CholeskyFactorLMatrixVectorSolve(cholesky_factor.data(), sizes[i], solution.data());
      SymmetricMatrixVectorMultiply(inverse_matrix.data(), rhs.data(), sizes[i], solution.data() + sizes[i]);

      double norm_residual_via_backsolve = ResidualNorm(matrix.data(), solution.data(), rhs.data(), sizes[i]);
      double norm_residual_via_inverse = ResidualNorm(matrix.data(), solution.data() + sizes[i], rhs.data(), sizes[i]);

      if (norm_residual_via_backsolve > tolerance_backsolve_max) {
        ++total_errors;
        OL_ERROR_PRINTF("experiment %d, size[%d] = %d, norm_backsolve = %.18E > %.18E = tol\n", j, i, sizes[i],
               norm_residual_via_backsolve, tolerance_backsolve_max);
      }

      if (norm_residual_via_inverse > tolerance_inverse_max) {
        ++total_errors;
        OL_ERROR_PRINTF("experiment %d, size[%d] = %d, norm_inverse = %.18E > %.18E = tol\n", j, i, sizes[i],
               norm_residual_via_inverse, tolerance_inverse_max);
      }
    }
  }

  return total_errors;
}
/*!\rst
  Test several vector (BLAS-1) functions:

  1. scale: ``y = \alpha * y``
  2. AXPY: ``y = \alpha * x + y``
  3. dot product: ``c = x^T * y``, ``c`` is scalar
  4. norm: ``\|x\|``
\endrst*/
OL_WARN_UNUSED_RESULT int TestVectorFunctions() noexcept {
  int total_errors = 0;
  const int size = 4;
  const double orig_input[size] = {1.5, 2.0, 0.0, -3.2};
  double input[size] = {0};
  const double positive_scale = 2.0;
  const double negative_scale = -3.0;
  const double zero_scale = 0.0;

  // test VectorScale
  {
    const double result_positive_scale[size] = {3.0, 4.0, 0.0, -6.4};
    const double result_negative_scale[size] = {-4.5, -6.0, 0.0, 9.6};
    const double result_zero_scale[size] = {0.0, 0.0, 0.0, 0.0};

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, positive_scale, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_positive_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, negative_scale, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_negative_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, zero_scale, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_zero_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }
  }

  // test VectorAXPY
  {
    const double vector_x[size] = {0.0, 0.5, -1.3, 3.0};
    const double result_positive_scale[size] = {1.5, 3.0, -2.6, 2.8};
    const double result_negative_scale[size] = {1.5, 0.5, 3.9, -12.2};
    const double result_zero_scale[size] = {1.5, 2.0, 0.0, -3.2};

    std::copy(orig_input, orig_input + size, input);
    VectorAXPY(size, positive_scale, vector_x, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_positive_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    std::copy(orig_input, orig_input + size, input);
    VectorAXPY(size, negative_scale, vector_x, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_negative_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }

    std::copy(orig_input, orig_input + size, input);
    VectorAXPY(size, zero_scale, vector_x, input);
    for (int i = 0; i < size; ++i) {
      if (!CheckDoubleWithinRelative(input[i], result_zero_scale[i], std::numeric_limits<double>::epsilon())) {
        ++total_errors;
      }
    }
  }

  // test dot product
  {
    const double input2[size] = {0.0};
    const double input3[size] = {0.0, 0.5, -1.3, 3.0};
    double output;
    const double result2 = 0.0;
    const double result3 = 2.0*0.5 - 3.2*3.0;
    output = DotProduct(orig_input, input2, size);
    if (!CheckDoubleWithinRelative(output, result2, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }
    output = DotProduct(orig_input, input3, size);
    if (!CheckDoubleWithinRelative(output, result3, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }
    // check symmetry
    output = DotProduct(input3, orig_input, size);
    if (!CheckDoubleWithinRelative(output, result3, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }
  }

  // test norm
  {
    const double truth_norm_input = 4.060788100849391;
    double norm = VectorNorm(orig_input, size);
    if (!CheckDoubleWithinRelative(norm, truth_norm_input, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, positive_scale, input);
    norm = VectorNorm(input, size);
    if (!CheckDoubleWithinRelative(norm, positive_scale*truth_norm_input, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, negative_scale, input);
    norm = VectorNorm(input, size);
    if (!CheckDoubleWithinRelative(norm, -negative_scale*truth_norm_input, std::numeric_limits<double>::epsilon())) {
      ++total_errors;
    }

    std::copy(orig_input, orig_input + size, input);
    VectorScale(size, zero_scale, input);
    norm = VectorNorm(input, size);
    if (!CheckDoubleWithin(norm, 0.0, 0.0)) {
      ++total_errors;
    }

    total_errors += TestNorm();
  }
  return total_errors;
}
Exemple #18
0
double _RKOCPfhg_implicitZ(Vector nlp_x, Vector nlp_h, Vector nlp_g) {
    RKOCP r = thisRKOCP;
    double phi = 0;
    int i_node;
    int i, j, k;
    int m = r->nlp_m;
    int p = r->nlp_p;
    int n_nodes = r->n_nodes;
    int n_parameters = r->n_parameters;
    int n_states = r->n_states;
    int n_controls = r->n_controls;
    int n_inequality = r->ocp->n_inequality;
    int n_initial = r->ocp->n_initial;
    int n_terminal = r->ocp->n_terminal;
    int only_compute_lte = r->only_compute_lte;

    int rk_stages = r->rk_stages;
    double **W = r->rk_w->e;
    //double **rk_a = r->rk_a->e;
    //double *rk_b = r->rk_b->e;
    double *rk_c = r->rk_c->e;
    //double *rk_e = r->rk_e->e;

    Vector T = r->T;
    Matrix Yw = r->Yw;
    Vector yc = r->yc;
    Vector uc = r->uc;
    Vector pc = r->pc;
    //Vector local_error = r->local_error;
    //Vector lte = r->lte;

    Vector Gamma = r->Gamma;
    Vector G = r->G;
    Vector Psi = r->Psi;

    Matrix *Ydata = r->Ydata;  // Ydata[i] => Ystage
    Matrix *Udata = r->Udata;
    Vector *Zstage = r->Kstage;
    Matrix Ystage;
    double ti;

    // FIX: preallocate the variables
    Vector y_old = VectorNew(n_states+1);
    Vector *Z_old = NULL;
    Z_old = VectorArrayNew(r->rk_stages, n_states+1);
    Vector f = VectorNew(n_states+1);
    Vector res = VectorNew(rk_stages*(n_states+1));
    Vector dZ = VectorNew(rk_stages*(n_states+1));
    Vector y = VectorNew(n_states+1);
    Matrix L;// = MatrixNew(rk_stages*(n_states+1), rk_stages*(n_states+1));
    Matrix U;// = MatrixNew(rk_stages*(n_states+1), rk_stages*(n_states+1));
    int *P;// = (int *)malloc(rk_stages*(n_states+1)*sizeof(int));
    
    if (only_compute_lte == NO) {
        if (m > 0) {
            VectorSetAllTo(nlp_h, 0.0);
        }
        if (p > 0) {
            VectorSetAllTo(nlp_g, 0.0);
        }
    }
    for (i = 0; i < n_parameters; i++) {
        pc->e[i] = nlp_x->e[i];
    }
    for (j = 0; j < n_states; j++) {
        Yw->e[0][j] = nlp_x->e[n_parameters+j];
        y->e[j] = Yw->e[0][j];
    }
    Yw->e[0][n_states] = 0;
    y->e[n_states] = 0;

    if (n_controls > 0) {
        _setUdata(r, nlp_x);
        for (i = 0; i < n_controls; i++) {
            uc->e[i] = nlp_x->e[n_parameters+n_states+i];
        }
    }

    if (only_compute_lte == NO) {
        // form Gamma(y(t_initial), p)
        if (n_initial > 0) {
            r->ocp->initial_constraints(y, pc, Gamma);
            for (i = 0; i < n_initial; i++) {
                nlp_h->e[i] = Gamma->e[i];
            }
        }
        // form g(y,u,p,t_initial)
        if (n_inequality > 0) {
            r->ocp->inequality_constraints(y, uc, pc, T->e[0], G);
            for (i = 0; i < n_inequality; i++) {
                nlp_g->e[i] = G->e[i];
            }
        }
    }
    
    // integrate the ODEs using an implicit Runge-Kutta method

    int MAX_NEWTON_ITER = 15;
    int iter;
    int err;
    
    for (i_node = 0; i_node < (n_nodes-1); i_node++) {
        L = r->Lnode[i_node];
        U = r->Unode[i_node];
        P = r->pLUnode[i_node];
        // form H = (1/h) W (x) I - I (x) J
        // get the L, U, p factors of H
        double h = T->e[i_node+1] - T->e[i_node];
        err = _formIRKmatrixZ(r, y, uc, pc, T->e[i_node], h, L, U, P);
        if (err != 0) {
printf("IRK factor error: t = %e\n", T->e[i_node]);
//pause();
            r->integration_fatal_error = YES;
            if (m > 0) VectorSetAllTo(nlp_h, 0);
            if (p > 0) VectorSetAllTo(nlp_g, 0);
            return 0;
        }
        // initialize Kstage
        // FIX: extrapolate previous result
        if (i_node == 0 ) {
            for (i = 0; i < rk_stages; i++) {
                for (j = 0; j < n_states+1; j++) {
                    Zstage[i]->e[j] = 0;
                }
            }
        } else {
            // given y0, Z0, y1, tau = h1/h0, estimate Z1
            // using a Lagrange interpolation formula
            double tau;
            tau = h / (T->e[i_node] - T->e[i_node-1]);
            _estimateZ(r, y_old, Z_old, y, tau, Zstage);
        }
        Ystage = Ydata[i_node];
        
        // Y_i = y + Z_i
        // Ydot_i = (1/h) sum_{j = 1, s} w_{i,j} * Z_j
        // set res_i = -Ydot_i
        for (i = 0; i < rk_stages; i++) {
            for (j = 0; j < n_states+1; j++) {
                Ystage->e[i][j] = y->e[j] + Zstage[i]->e[j];
            }
            for (k = 0; k < n_states+1; k++) {
                //Ydot[i]->e[k] = 0.0;
                res->e[i*(n_states+1) + k] = 0;
            }
            for (j = 0; j < rk_stages; j++) {
                for (k = 0; k < n_states+1; k++) {
                    //Ydot[i]->e[k] += W[i][j] * Zstage[j]->e[k] / h;
                    res->e[i*(n_states+1) + k] -= W[i][j] * Zstage[j]->e[k] / h;
                }
            }
        }

        for (iter = 0; iter < MAX_NEWTON_ITER; iter++) {
            for (i = 0; i < rk_stages; i++) {
                ti = T->e[i_node] + h*rk_c[i];
                // get uc = u(t_i)
                for (j = 0; j < n_controls; j++) {
                    uc->e[j] = Udata[i_node]->e[i][j];
                }
                // get yc = Y_i
                for (j = 0; j < n_states+1; j++) {
                    yc->e[j] = Ystage->e[i][j];
                }
                // form f(Y_i, u_i, p, t_i)
                f->e[n_states] = r->ocp->differential_equations(yc, uc, pc, ti, f);
                // form res_i = Ydot_i - f(Y_i, u_i, p, t_i)
                for (j = 0; j < n_states+1; j++) {
                    res->e[i*(n_states+1) + j] += f->e[j];
                }
            }
            // solve H*dZ = -res = -(ydot - f(y,u,p,t))
            err = LUSolve(L, U, P, res, dZ);
            // Z = Z + dZ
            for (i = 0; i < rk_stages; i++) {
                for (j = 0; j < n_states+1; j++) {
                    Zstage[i]->e[j] += dZ->e[i*(n_states+1) + j];
                }
            }
            // Y_i = y + Z_i
            // Ydot_i = (1/h) sum_{j = 1, s} w_{i,j} * Z_j
            // set res_i = -Ydot_i
            for (i = 0; i < rk_stages; i++) {
                for (j = 0; j < n_states+1; j++) {
                    Ystage->e[i][j] = y->e[j] + Zstage[i]->e[j];
                }
                for (k = 0; k < n_states+1; k++) {
                    //Ydot[i]->e[k] = 0.0;
                     res->e[i*(n_states+1) + k] = 0;
                }
                for (j = 0; j < rk_stages; j++) {
                    for (k = 0; k < n_states+1; k++) {
                        //Ydot[i]->e[k] += W[i][j] * Zstage[j]->e[k] / h;
                        res->e[i*(n_states+1) + k] -= W[i][j] * Zstage[j]->e[k] / h;
                    }
                }
            }
            // test for convergence
            double norm_dZ = VectorNorm(dZ);
            if (norm_dZ <= 0.01*r->tolerance) {
                break;
            }
        }
//printf("i_node: %d, iter: %d\n", i_node, iter);
        // y_old = y
        // Z_old = Z
        for (j = 0; j < n_states+1; j++) {
            for (i = 0; i < rk_stages; i++) {
                Z_old[i]->e[j] = Zstage[i]->e[j];
            }
            y_old->e[j] = y->e[j];
        }
        
        // Assumes R-K methods with c[s] = 1
        // form Yw[i_node+1] = Ystage_s
        // update y = Yw[i_node+1]
        for (j = 0; j < n_states+1; j++) {
            Yw->e[i_node+1][j] = Ystage->e[rk_stages-1][j];
            y->e[j] = Yw->e[i_node+1][j];
        }
        
        // update u = u[i_node+1];
        int i_node1 = i_node+1;
        if ((r->c_type == CONSTANT) && (i_node1 == (r->n_nodes-1))) {
            i_node1 -= 1;
        }
        
        for (i = 0; i < n_controls; i++) {
            uc->e[i] = nlp_x->e[n_parameters + n_states + i_node1*n_controls + i];
        }
        
        // compute g(y(t+h),u(t+h),p,t+h)
        if (only_compute_lte == NO) {
            if (n_inequality > 0) {
                r->ocp->inequality_constraints(y, uc, pc, T->e[i_node+1], G);
                for (i = 0; i < n_inequality; i++) {
                    nlp_g->e[i + (i_node+1) * n_inequality] = G->e[i];
                }
            }
        }
        // FIX:  compute LTE 
    }

    if (only_compute_lte == YES) {
        return 0;
    }

    // form phi, Psi
    phi = r->ocp->terminal_constraints(yc, pc, Psi);
    for (i = 0; i < n_terminal; i++) {
        nlp_h->e[i + n_initial] = Psi->e[i];
    }

    VectorDelete(y_old);
    VectorArrayDelete(Z_old, r->rk_stages);
    VectorDelete(f);
    VectorDelete(res);
    VectorDelete(y);
    VectorDelete(dZ);
    //MatrixDelete(L);
    //MatrixDelete(U);
    //free(P);
//pause();
    return phi + Yw->e[n_nodes-1][n_states];
}
int DexApparatus::CheckMovementAmplitude(  double min, double max, 
										   double dirX, double dirY, double dirZ,
										   const char *msg, const char *picture ) {
	
	const char *fmt;
	bool  error = false;
	
	int i, k, m;

	int first, last;
	
	double N = 0.0, sd;
	Matrix3x3 Sxy;
	Vector3  delta, mean;
	Vector3   direction, vect;

	// TODO: Should normalize the direction vector here.
	direction[X] = dirX;
	direction[Y] = dirY;
	direction[Z] = dirZ;

	// First we should look for the start and end of the actual movement based on 
	// events such as when the subject reaches the first target. 
	FindAnalysisFrameRange( first, last );

	// Compute the mean position. 
	N = 0.0;
	CopyVector( mean, zeroVector );
	for ( i = first; i < last; i++ ) {
		if ( acquiredManipulandumState[i].visibility ) {
			if ( abs( acquiredManipulandumState[i].position[X] ) > 1000 ) {
				i = i;
			}
			N++;
			AddVectors( mean, mean, acquiredManipulandumState[i].position );
		}
	}
	// If there is no valid position data, signal an error.
	if ( N <= 0.0 ) {
		monitor->SendEvent( "Movement extent - No valid data." );
		sd = 0.0;
		error = true;
	}
	else {
	
		// This is the mean.
		ScaleVector( mean, mean, 1.0 / N );;

		// Compute the sums required for the variance calculation.
		CopyMatrix( Sxy, zeroMatrix );
		N = 0.0;

		for ( i = first; i < last; i ++ ) {
			if ( acquiredManipulandumState[i].visibility ) {
				N++;
				SubtractVectors( delta, acquiredManipulandumState[i].position, mean );
				for ( k = 0; k < 3; k++ ) {
					for ( m = 0; m < 3; m++ ) {
						Sxy[k][m] += delta[k] * delta[m];
					}
				}
			}
		}
		
		// If we have some data, compute the directional variance and then
		// the standard deviation along that direction;
		// This is just a scalar times a matrix.

		// Sxy has to be a double or we risk underflow when computing the sums.
		for ( k = 0; k < 3; k++ ) {
			vect[k] = 0;
			for ( m = 0; m < 3; m++ ) {
				Sxy[k][m] /= N;
			}
		}
		// This is a matrix multiply.
		for ( k = 0; k < 3; k++ ) {
			for ( m = 0; m < 3; m++ ) {
				vect[k] += Sxy[m][k] * direction[m];
			}
		}
		// Compute the length of the vector, which is the variance along 
		// the specified direction. Then take the square root of that 
		// magnitude to get the standard deviation along that direction.
		sd = sqrt( VectorNorm( vect ) );

		// Check if the computed value is in the desired range.
		error = ( sd < min || sd > max );

	}

	// If not, signal the error to the subject.
	// Here I take the approach of calling a method to signal the error.
	// We agree instead that the routine should either return a null pointer
	// if there is no error, or return the error message as a static string.
	if ( error ) {
		
		// If the user provided a message to signal a visibilty error, use it.
		// If not, generate a generic message.
		if ( !msg ) msg = "Movement extent outside range.";
		// The message could be different depending on whether the maniplulandum
		// was not moved enough, or if it just could not be seen.
		if ( N <= 0.0 ) fmt = "%s\n Manipulandum not visible.";
		else fmt = "%s\n Measured: %f\n Desired range: %f - %f\n Direction: < %.2f %.2f %.2f>";
		int response = fSignalError( MB_ABORTRETRYIGNORE, picture, fmt, msg, sd, min, max, dirX, dirY, dirZ );
		
		if ( response == IDABORT ) return( ABORT_EXIT );
		if ( response == IDRETRY ) return( RETRY_EXIT );
		if ( response == IDIGNORE ) return( IGNORE_EXIT );
		
	}
	
	// This is my means of signalling the event.
	monitor->SendEvent( "Movement extent OK.\n Measured: %f\n Desired range: %f - %f\n Direction: < %.2f %.2f %.2f>", sd, min, max, dirX, dirY, dirZ );
	return( NORMAL_EXIT );
	
}
long eci2grace_quaternion(long T2000_int, double T2000_frac , double clkcorrA,
                          double clkcorrB,
                          quaternion *Q_A, quaternion *Q_B, long norder_int, 
                          FILE *eciA, FILE *eciB)
/*----------------------------------------------------------------------------->
/ Purpose: return quaternion for GRACE A and GRACE B based on both orbits in
/          eci format at time tag "time"
/
/ Coded by: Gerhard L.H. Kruizinga                    07/28/98
/ Modified: Gerhard L.H. Kruizinga                    12/11/01
/ Modified: Gerhard L.H. Kruizinga                    01/16/02
/
/ input:   T2000_int  GPS time seconds past 01/01/2000 12:00:00
/          T2000_frac fractional seconds (sec)
/          clkcorrA   clock correction for Satellite A (added to time tag)
/          clkcorrB   clock correction for Satellite B (added to time tag)
/          eciA       pointer to open eciA file
/          eciB       pointer to open eciB file
/          norder_int order to be used for lagrangian orbit interpolation
/ output:  Q_A        quaternion  given pointing information of GRACE A S/C
/                    (inertial to orbit_local frame, defined by position,
/                     Line Of Site (LOS) vector between GRACE A and GRACE B) 
/          Q_B        quaternion  given pointing information of GRACE A S/C
/                    (inertial to orbit_local frame, defined by position,
/                     Line Of Site (LOS) vector between GRACE A and GRACE B) 
/ 
/
/ eci2quaternion returns  0: for succesful interpolation/quaternion conversion
/                         1: requested time past orbit file interval
/                        -1: requested time before orbit file interval
<-----------------------------------------------------------------------------*/
{
  long out,i;

  double xyzA[N],llhA[N],xyzdotA[N],ae,flat;
  double xyzB[N],llhB[N],xyzdotB[N];
  double LOSepochA[N];
  double LOSepochB[N];
  double Norm,Xbod[N],Ybod[N],Zbod[N];
  double Rot[N][N];
  double time,time_frac;

  long   time_int;

  ae    = AE;
  flat  = FLAT;

  Q_A->q0 = -2.0;
  Q_A->q1 = -2.0;
  Q_A->q2 = -2.0;
  Q_A->q3 = -2.0;

  Q_B->q0 = -2.0;
  Q_B->q1 = -2.0;
  Q_B->q2 = -2.0;
  Q_B->q3 = -2.0;

  /*>>>> interpolate orbit to current gps time for GRACEA <<<<*/

  time = T2000_int + T2000_frac;
  time += clkcorrA;
  time_int = (long) time;  
  time_frac = time - (double) time_int;

  if ((out = OrbitEciInt(time_int,time_frac,xyzA,llhA, xyzdotA,ae,flat,
       norder_int,eciA)) != 0) return out;

  if ((out = OrbitEciInt_second(time_int,time_frac,LOSepochA,llhB, xyzdotB,ae,flat,
       norder_int,eciB)) != 0) return out;

  /*>>>> interpolate orbit to current gps time for GRACEB <<<<*/

  time = T2000_int + T2000_frac;
  time += clkcorrB;
  time_int = (long) time;  
  time_frac = time - (double) time_int;

  if ((out = OrbitEciInt(time_int,time_frac,LOSepochB,llhA, xyzdotA,ae,flat,
       norder_int,eciA)) != 0) return out;

  if ((out = OrbitEciInt_second(time_int,time_frac,xyzB,llhB, xyzdotB,ae,flat,
       norder_int,eciB)) != 0) return out;

  /*>>>> compute LOS vector GRACE A -> GRACE B at GRACE A epoch<<<<*/

  LOSepochA[0] -= xyzA[0];
  LOSepochA[1] -= xyzA[1];
  LOSepochA[2] -= xyzA[2];

  /*>>>> compute LOS vector GRACE B -> GRACE A at GRACE B epoch<<<<*/

  LOSepochB[0] -= xyzB[0];
  LOSepochB[1] -= xyzB[1];
  LOSepochB[2] -= xyzB[2];

  /* compute unit vectors for the body fixed xbod,ybod,zbod in the inertial
   coordinate system for GRACE A */

  /* x-axis */

  Xbod[0] = LOSepochA[0];
  Xbod[1] = LOSepochA[1];
  Xbod[2] = LOSepochA[2];

  Norm = VectorNorm(Xbod, N);

  loop(i,N) Xbod[i] = Xbod[i]/Norm;

  /* y-axis */

  Zbod[0] = -xyzA[0];
  Zbod[1] = -xyzA[1];
  Zbod[2] = -xyzA[2];

  Norm = VectorNorm(Zbod, N);

  loop(i,N) Zbod[i] = Zbod[i]/Norm;

  CrossProduct(Zbod, Xbod, N , Ybod);

  Norm = VectorNorm(Ybod, N);

  loop(i,N) Ybod[i] = Ybod[i]/Norm;

  /* z-axis */

  CrossProduct(Xbod, Ybod, N , Zbod);

  Norm = VectorNorm(Zbod, N);

  loop(i,N) Zbod[i] = Zbod[i]/Norm;

/*
  fprintf(stderr,"\nLOS A->B: %lf %lf %lf\n\n",LOSepochA[0],LOSepochA[1],LOSepochA[2]);
  fprintf(stderr,"\nLOS B->A: %lf %lf %lf\n\n",LOSepochB[0],LOSepochB[1],LOSepochB[2]);
  fprintf(stderr,"\nXbod A: %lf %lf %lf \n",Xbod[0],Xbod[1],Xbod[2]);
  fprintf(stderr,"Ybod A: %lf %lf %lf \n",Ybod[0],Ybod[1],Ybod[2]);
  fprintf(stderr,"Zbod A: %lf %lf %lf \n",Zbod[0],Zbod[1],Zbod[2]);
*/

  /* compute rotation matrix */

  Rot[0][0] = Xbod[0]; Rot[0][1] = Ybod[0]; Rot[0][2] = Zbod[0];
  Rot[1][0] = Xbod[1]; Rot[1][1] = Ybod[1]; Rot[1][2] = Zbod[1];
  Rot[2][0] = Xbod[2]; Rot[2][1] = Ybod[2]; Rot[2][2] = Zbod[2];

  /* convert rotation matrix into quaternions */

  Q_A->q0 = sqrt(1.0 + Rot[0][0] + Rot[1][1] + Rot[2][2])/2.0;

  if (fabs(Q_A->q0) < 1e-6)
  {
    fprintf(stderr,"eci2grace_quaternion: Q0_A = %.16g < 1e-6, quaternion maybe corrupted at t = %.16g\n",
                   Q_A->q0,T2000_frac+(double)T2000_int);
  }

  Q_A->q1 = -(Rot[1][2] - Rot[2][1])/4.0/Q_A->q0;
  Q_A->q2 = -(Rot[2][0] - Rot[0][2])/4.0/Q_A->q0;
  Q_A->q3 = -(Rot[0][1] - Rot[1][0])/4.0/Q_A->q0;

  /* compute unit vectors for the body fixed xbod,ybod,zbod in the inertial
   coordinate system for GRACE B */

  /* x-axis */

  Xbod[0] = LOSepochB[0];
  Xbod[1] = LOSepochB[1];
  Xbod[2] = LOSepochB[2];

  Norm = VectorNorm(Xbod, N);

  loop(i,N) Xbod[i] = Xbod[i]/Norm;

  /* y-axis */

  Zbod[0] = -xyzB[0];
  Zbod[1] = -xyzB[1];
  Zbod[2] = -xyzB[2];

  Norm = VectorNorm(Zbod, N);

  loop(i,N) Zbod[i] = Zbod[i]/Norm;

  CrossProduct(Zbod, Xbod, N , Ybod);

  Norm = VectorNorm(Ybod, N);

  loop(i,N) Ybod[i] = Ybod[i]/Norm;

  /* z-axis */

  CrossProduct(Xbod, Ybod, N , Zbod);

  Norm = VectorNorm(Zbod, N);

  loop(i,N) Zbod[i] = Zbod[i]/Norm;

/*
  fprintf(stderr,"\nXbod B: %lf %lf %lf \n",Xbod[0],Xbod[1],Xbod[2]);
  fprintf(stderr,"Ybod B: %lf %lf %lf \n",Ybod[0],Ybod[1],Ybod[2]);
  fprintf(stderr,"Zbod B: %lf %lf %lf \n",Zbod[0],Zbod[1],Zbod[2]);
*/

  /* compute rotation matrix */

  Rot[0][0] = Xbod[0]; Rot[0][1] = Ybod[0]; Rot[0][2] = Zbod[0];
  Rot[1][0] = Xbod[1]; Rot[1][1] = Ybod[1]; Rot[1][2] = Zbod[1];
  Rot[2][0] = Xbod[2]; Rot[2][1] = Ybod[2]; Rot[2][2] = Zbod[2];

  /* convert rotation matrix into quaternions */

  Q_B->q0 = sqrt(1.0 + Rot[0][0] + Rot[1][1] + Rot[2][2])/2.0;

  if (fabs(Q_B->q0) < 1e-6)
  {
    fprintf(stderr,"eci2grace_quaternion: Q0_B = %.16g < 1e-6, quaternion maybe corrupted at t = %.16g\n",
                   Q_B->q0,T2000_frac+(double)T2000_int);
  }

  Q_B->q1 = -(Rot[1][2] - Rot[2][1])/4.0/Q_B->q0;
  Q_B->q2 = -(Rot[2][0] - Rot[0][2])/4.0/Q_B->q0;
  Q_B->q3 = -(Rot[0][1] - Rot[1][0])/4.0/Q_B->q0;

  return 0L;
}
Exemple #21
0
  However, a numerically (backward) stable algorithm will compute solutions
  with small relative residual norms *regardless* of conditioning.  Hence
  coupled with knowledge of the particular algorithm for solving ``A*x = b``,
  residual norm is a valuable measure of correctness.

  Suppose ``x`` (computed solution) satisfies: ``(A + \delta A)*x = b``.  Then:
  ``\|r\| / (\|A\| * \|x\|) \le \|\delta A\| / \|A\|``
  So large ``\|r\|`` indicates a large backward error, implying the linear solver
  is not backward stable (and hence should not be used).
\endrst*/
double ResidualNorm(double const * restrict A, double const * restrict x, double const * restrict b, int size) noexcept {
  std::vector<double> y(b, b + size);  // y = b
  GeneralMatrixVectorMultiply(A, 'N', x, -1.0, 1.0, size, size, size, y.data());  // y -= A * x

  double norm = VectorNorm(y.data(), size);
  return norm;
}

bool CheckDoubleWithin(double value, double truth, double tolerance) noexcept {
  double diff = std::fabs(value - truth);
  bool passed = diff <= tolerance;

  if (passed != true) {
    OL_ERROR_PRINTF("value = %.18E, truth = %.18E, diff = %.18E, tol = %.18E\n", value, truth, diff, tolerance);
  }
  return passed;
}

bool CheckDoubleWithinRelativeWithThreshold(double value, double truth, double tolerance, double threshold) noexcept {
  double denom = std::fabs(truth);