Example #1
0
void LatticeImageAndKernel
( const Matrix<Field>& B,
        Matrix<Field>& M,
        Matrix<Field>& K,
  const LLLCtrl<Base<Field>>& ctrl )
{
    EL_DEBUG_CSE

    Matrix<Field> BCopy( B );
    Matrix<Field> U, R;
    auto info = LLL( BCopy, U, R, ctrl );
    const Int rank = info.rank;
    const Int n = B.Width();
    M = BCopy(ALL,IR(0,rank));
    K = U(ALL,IR(rank,n));

    // Reduce the columns of U that corresponded to the kernel
    LLL( K, ctrl );

    // Rather than explicitly inverting the Gram matrix of the kernel basis
    // as suggested by Cohen, we can simply solve a least squares problem
    //
    // NOTE: 'R' is reused for the least squares solution
    // TODO(poulson): Support other options than just "Babai rounding", e.g.,
    // Nulling and Cancelling (with optimal ordering)
    LeastSquares( NORMAL, K, M, R );
    Round( R );
    Gemm( NORMAL, NORMAL, Field(-1), K, R, Field(1), M );
}
int main(int argc, char *argv[]){
	char filename[256];
	int verbose = 0;
	// process command line args

	if ((argc > 1 && strcmp(argv[2],"-help") == 0) || argc == 1){  // display help message if either no arguments passed or "-help" passed
		printf("\n\nThis program calculates the least squares fit of a set of data. \nSupported file format is .csv. \nCurrently only supports comma as delimiter.\n\nDefault usage is: leastsquares.exe \"filename\" [-v]\n\n\t-help \tDisplays this help message\n\t-v\tVerbose mode: displays errors/status, prints the\n\t\tdata that it reads for verification purposes.\n\n");
		return 0;
	}

	else if(argc > 1){
		strcpy(filename,argv[1]);
		if (strcmp(argv[2],"-v") == 0){
			verbose = 1;
		}
	}
	int choice;
	// command line args processed, now go ~do shit~

	dataset input;
	graph output;

	input = GetInput(filename,verbose); // go open the file
	output = LeastSquares(input.length,input.x,input.y,input.yerr);
	if(isnan(output.m) == 0 && isnan(output.merr) == 0 && isnan(output.c) == 0 && isnan(output.cerr) == 0){
		printf("The least squares fit of this data is: y = (%.2f \361 %.2f)x + (%.2f \361 %.2f) \n",output.m,output.merr,output.c,output.cerr);
	}
	return 0;
}
Example #3
0
bool LeastSquares(const vector<Vector>& data,int dependentVariable,
		  Vector& coeffs)
{
  assert(!data.empty());
  int n=data[0].n;
  assert((int)data.size() >= n);
  assert(dependentVariable >= 0 && dependentVariable < n);
  Matrix mdata((int)data.size(),n-1);
  Vector vdep((int)data.size());
  for(size_t i=0;i<data.size();i++) {
    assert(data[i].n == n);
    for(int j=0;j<n;j++) {
      if(j < dependentVariable)
	mdata(i,j) = data[i](j);
      else if(j > dependentVariable)
	mdata(i,j-1) = data[i](j);
      else
	vdep(i) = data[i](j);
    }
  }
  Vector tempcoeffs; Real offset;
  if(!LeastSquares(mdata,vdep,tempcoeffs,offset)) return false;
  coeffs.resize(n);
  for(int j=0;j<n;j++) {
    if(j < dependentVariable)
      coeffs(j) = tempcoeffs(j);
    else if(j > dependentVariable)
      coeffs(j) = tempcoeffs(j-1);
    else
      coeffs(j) = offset;
  }
  return true;
}
Example #4
0
bool LeastSquaresPickDependent(const vector<Vector>& data,
			       int& dependentVariable,Vector& coeffs)
{
  //pick the dimension with the smallest standard deviation
  Vector stddev;
  StdDev_Robust(data,stddev);
  Real minstddev = stddev.minElement(&dependentVariable);
  return LeastSquares(data,dependentVariable,coeffs);
}
Example #5
0
void Tikhonov
( Orientation orientation,
  const SparseMatrix<F>& A,
  const Matrix<F>& B,
  const SparseMatrix<F>& G,
        Matrix<F>& X, 
  const LeastSquaresCtrl<Base<F>>& ctrl )
{
    DEBUG_CSE
    
    // Explicitly form W := op(A)
    // ==========================
    SparseMatrix<F> W;
    if( orientation == NORMAL )
        W = A;
    else if( orientation == TRANSPOSE )
        Transpose( A, W );
    else
        Adjoint( A, W );

    const Int m = W.Height();
    const Int n = W.Width();
    const Int numRHS = B.Width();

    // Embed into a higher-dimensional problem via appending regularization
    // ====================================================================
    SparseMatrix<F> WEmb;
    if( m >= n )
        VCat( W, G, WEmb ); 
    else
        HCat( W, G, WEmb );
    Matrix<F> BEmb;
    Zeros( BEmb, WEmb.Height(), numRHS );
    if( m >= n )
    {
        auto BEmbT = BEmb( IR(0,m), IR(0,numRHS) );
        BEmbT = B;
    }
    else
        BEmb = B;

    // Solve the higher-dimensional problem
    // ====================================
    Matrix<F> XEmb;
    LeastSquares( NORMAL, WEmb, BEmb, XEmb, ctrl );

    // Extract the solution
    // ====================
    if( m >= n )
        X = XEmb;
    else
        X = XEmb( IR(0,n), IR(0,numRHS) ); 
}
Example #6
0
HRESULT FrequencyOffset::Calibration()
{
	HRESULT ret = -1;

	LeastSquares(&slope, &intercept, &x[0], &y[0], x.size());

	logger->Log(LOG_INFO, L"Calibration result:\r\ny = %fx + %f\r\n", slope, intercept);

	Radio::Current()->SetCalibrationLineSlope(slope);
	Radio::Current()->SetCalibrationLineIntercept(intercept);

	//ret = AddCalibrationToRegistry(slope, intercept);
	return ret; 
}
Example #7
0
void Ricatti( Matrix<F>& W, Matrix<F>& X, SignCtrl<Base<F>> ctrl )
{
    DEBUG_ONLY(CallStackEntry cse("Ricatti"))
    Sign( W, ctrl );
    const Int n = W.Height()/2;
    Matrix<F> WTL, WTR,
              WBL, WBR;
    PartitionDownDiagonal
    ( W, WTL, WTR,
         WBL, WBR, n );

    // (ML, MR) = sgn(W) - I
    UpdateDiagonal( W, F(-1) );

    // Solve for X in ML X = -MR
    Matrix<F> ML, MR;
    PartitionRight( W, ML, MR, n );
    Scale( F(-1), MR );
    LeastSquares( NORMAL, ML, MR, X );
}
Example #8
0
void Tikhonov
( Orientation orientation,
  const DistSparseMatrix<F>& A,
  const DistMultiVec<F>& B,
  const DistSparseMatrix<F>& G,
        DistMultiVec<F>& X, 
  const LeastSquaresCtrl<Base<F>>& ctrl )
{
    DEBUG_CSE
    mpi::Comm comm = A.Comm();
    
    // Explicitly form W := op(A)
    // ==========================
    DistSparseMatrix<F> W(comm);
    if( orientation == NORMAL )
        W = A;
    else if( orientation == TRANSPOSE )
        Transpose( A, W );
    else
        Adjoint( A, W );

    const Int m = W.Height();
    const Int n = W.Width();
    const Int numRHS = B.Width();

    // Embed into a higher-dimensional problem via appending regularization
    // ====================================================================
    DistSparseMatrix<F> WEmb(comm);
    if( m >= n )
        VCat( W, G, WEmb ); 
    else
        HCat( W, G, WEmb );

    DistMultiVec<F> BEmb(comm);
    Zeros( BEmb, WEmb.Height(), numRHS );
    if( m >= n )
    {
        // BEmb := [B; 0]
        // --------------
        const Int mLocB = B.LocalHeight();
        BEmb.Reserve( mLocB*numRHS );
        for( Int iLoc=0; iLoc<mLocB; ++iLoc )
        {
            const Int i = B.GlobalRow(iLoc);
            for( Int j=0; j<numRHS; ++j )
                BEmb.QueueUpdate( i, j, B.GetLocal(iLoc,j) );
        }
        BEmb.ProcessQueues();
    }
    else
        BEmb = B;

    // Solve the higher-dimensional problem
    // ====================================
    DistMultiVec<F> XEmb(comm);
    LeastSquares( NORMAL, WEmb, BEmb, XEmb, ctrl );

    // Extract the solution
    // ====================
    if( m >= n )
        X = XEmb;
    else
        GetSubmatrix( XEmb, IR(0,n), IR(0,numRHS), X );
}