int LINEAR::recognize( const ColumnVector &v ) const { int label; if( v.length() < liblinear->prob.n ){ ERR_PRINT("Dimension of feature vector is too small. %d < %d\n",v.length(),liblinear->prob.n ); exit(1); } else if( v.length() > liblinear->prob.n ) ERR_PRINT("Warning: Dimension of feature vector is too large. %d > %d\n",v.length(),liblinear->prob.n ); // 特徴ベクトルをセット struct feature_node *x = new struct feature_node[ liblinear->prob.n + 2 ]; int idx = 0; for( int i = 0; i < liblinear->prob.n; i++ ){ x[ idx ].index = i; x[ idx ].value = v( i ); if( is_scaling ){ // 下記の条件を満たさないときはスキップ(idxを更新しない) if( ( feature_max[ x[ idx ].index ] != feature_min[ x[ idx ].index ] ) && ( x[ idx ].value != 0 ) ){ x[ idx ].value = scaling( x[ idx ].index, x[ idx ].value ); if( liblinear->model->bias < 0 ) x[ idx ].index++; // indexが1から始まる ++idx; } } else{ // 下記の条件を満たさないときはスキップ(idxを更新しない) if( x[ idx ].value != 0 ){ if( liblinear->model->bias < 0 ) x[ idx ].index++; // indexが1から始まる ++idx; } } } if(liblinear->model->bias>=0){ x[ idx ].index = liblinear->prob.n; x[ idx ].value = liblinear->model->bias; idx++; } x[ idx ].index = -1; // predict if( probability ){ if( liblinear->model->param.solver_type != L2R_LR ){ ERR_PRINT( "probability output is only supported for logistic regression\n" ); exit(1); } label = predict_probability(liblinear->model,x,prob_estimates); // for(j=0;j<nclass;j++) // printf(" %g",prob_estimates[j]); // printf("\n"); } else label = predict(liblinear->model,x); if( is_y_scaling ) label = y_scaling( label ); delete x; return label; }
void SpinAdapted::xsolve_AxeqB(const Matrix& a, const ColumnVector& b, ColumnVector& x) { FORTINT ar = a.Nrows(); int bc = 1; int info=0; FORTINT* ipiv = new FORTINT[ar]; double* bwork = new double[ar]; for(int i = 0;i<ar;++i) bwork[i] = b.element(i); double* workmat = new double[ar*ar]; for(int i = 0;i<ar;++i) for(int j = 0;j<ar;++j) workmat[i*ar+j] = a.element(j,i); GESV(ar, bc, workmat, ar, ipiv, bwork, ar, info); delete[] ipiv; delete[] workmat; for(int i = 0;i<ar;++i) x.element(i) = bwork[i]; delete[] bwork; if(info != 0) { pout << "Xsolve failed with info error " << info << endl; abort(); } }
void CActorFromContinuousActionGradientPolicy::receiveError(double critic, CStateCollection *oldState, CAction *Action, CActionData *data) { gradientETraces->updateETraces(Action->getDuration()); CContinuousActionData *contData = NULL; if (data) { contData = dynamic_cast<CContinuousActionData *>(data); } else { contData = dynamic_cast<CContinuousActionData *>(Action->getActionData()); } assert(gradientPolicy->getRandomController()); ColumnVector *noise = gradientPolicy->getRandomController()->getLastNoise(); if (DebugIsEnabled('a')) { DebugPrint('a', "ActorCritic Noise: "); policyDifference->saveASCII(DebugGetFileHandle('a')); } for (int i = 0; i < gradientPolicy->getNumOutputs(); i ++) { gradientFeatureList->clear(); gradientPolicy->getGradient(oldState, i, gradientFeatureList); gradientETraces->addGradientETrace(gradientFeatureList, noise->element(i)); } gradientPolicy->updateGradient(gradientETraces->getGradientETraces(), critic * getParameter("ActorLearningRate")); }
/* Solve Ax = b using the Jacobi Method. */ Matrix jacobi(Matrix& A, Matrix& b) { ColumnVector x0(A.rows()); // our initial guess ColumnVector x1(A.rows()); // our next guess // STEP 1: Choose an initial guess fill(x0.begin(),x0.end(),1); // STEP 2: While convergence is not reached, iterate. ColumnVector r = static_cast<ColumnVector>(A*x0 - b); while (r.length() > 1) { for (int i=0;i<A.cols();i++) { double sum = 0; for (int j=0;j<A.cols();j++) { if (j==i) continue; sum = sum + A(i,j) * x0(j,0); } x1(i,0) = (b(i,0) - sum) / A(i,i); } x0 = x1; r = static_cast<ColumnVector>(A*x0 - b); } shared_ptr<Matrix> final_x(new Matrix(static_cast<Matrix>(x0))); return *final_x; }
CvPoint3D32f Model::getRealCoordinatesFromVoxelMap(int u, int v, int w) { //Matrix worldMat(4, 1); //worldMat = 0; float modelContents[] = { u, v, w, 1 }; ColumnVector modelMat(4); modelMat << modelContents; // [x,y,z,1] ? // [x,y,z,1] = convMat^{-1} * [u,v,w,1] //cvSolve(&A, &b, &x); // solve (Ax=b) for x // cvSolve(convMat, &modelMat, &worldMat); ColumnVector worldMat = convMat.i() * modelMat; //printCvMat(&worldMat); float x = worldMat.element(0); float y = worldMat.element(1); float z = worldMat.element(2); //LOG4CPLUS_DEBUG(myLogger, "(u,v,w)=("<< u <<","<<v<<","<<w<<") -> (x,y,z)=(" << x <<","<<y<<","<<z<<")"); CvPoint3D32f p = cvPoint3D32f(x, y, z); return p; }
/* solve Ax=b using the Method of Steepest Descent. */ Matrix steepestDescent(Matrix& A, Matrix& b) { // the Method of Steepest Descent *requires* a symmetric matrix. if (isSymmetric(A)==false) { shared_ptr<Matrix> nullMat(new Matrix(0,0)); return *nullMat; } /* STEP 1: Start with a guess. Our guess is all ones. */ ColumnVector x(A.cols()); fill(x.begin(),x.end(),1); /* This is NOT an infinite loop. There's a break statement inside. */ while(true) { /* STEP 2: Calculate the residual r_0 = b - Ax_0 */ ColumnVector r = static_cast<ColumnVector> (b - A*x); if (r.length() < .01) break; /* STEP 3: Calculate alpha */ double alpha = (r.transpose() * r)(0,0) / (r.transpose() * A * r)(0,0); /* STEP 4: Calculate new X_1 where X_1 = X_0 + alpha*r_0 */ x = x + alpha * r; } shared_ptr<Matrix> final_x(new Matrix(static_cast<Matrix>(x))); return *final_x; }
/* Solve Ax = b using the conjugate gradient method. */ Matrix conjugateGradient(Matrix& A, Matrix& b) { double error_tol = .5; // error tolerance int max_iter = 200; // max # of iterations ColumnVector x(A.rows()); // the solution we will iteratively arrive at int i = 0; ColumnVector r = static_cast<ColumnVector>(b - A*x); ColumnVector d = r; double sigma_old = 0; // will be used later on, in the loop double sigma_new = (r.transpose() * r)(0,0); double sigma_0 = sigma_new; while (i < max_iter && sigma_new > error_tol * error_tol * sigma_0) { ColumnVector q = A * d; double alpha = sigma_new / (d.transpose() * q)(0,0); x = x + alpha * d; if (i % 50 == 0) { r = static_cast<ColumnVector>(b - A*x); }else{ r = r - alpha * q; } sigma_old = sigma_new; sigma_new = (r.transpose() * r)(0,0); double beta = sigma_new / sigma_old; d = r + beta * d; i++; } shared_ptr<Matrix> final_x(new Matrix(static_cast<Matrix>(x))); return *final_x; }
// Matrix A's first n columns are orthonormal // so A.Columns(1,n).t() * A.Columns(1,n) is the identity matrix. // Fill out the remaining columns of A to make them orthonormal // so A.t() * A is the identity matrix void extend_orthonormal(Matrix& A, int n) { REPORT Tracer et("extend_orthonormal"); int nr = A.nrows(); int nc = A.ncols(); if (nc > nr) Throw(IncompatibleDimensionsException(A)); if (n > nc) Throw(IncompatibleDimensionsException(A)); ColumnVector SSR; { Matrix A1 = A.Columns(1,n); SSR = A1.sum_square_rows(); } for (int i = n; i < nc; ++i) { // pick row with smallest SSQ int k; SSR.minimum1(k); // orthogonalise column with 1 at element k, 0 elsewhere // next line is rather inefficient ColumnVector X = - A.Columns(1, i) * A.SubMatrix(k, k, 1, i).t(); X(k) += 1.0; // normalise X /= sqrt(X.SumSquare()); // update row sums of squares for (k = 1; k <= nr; ++k) SSR(k) += square(X(k)); // load new column into matrix A.Column(i+1) = X; } }
double CLocalRBFRegression::doRegression(ColumnVector *vector, DataSubset *subset) { // cout << "NNs for Input " << vector->t() << endl; DataSubset::iterator it = subset->begin(); /* for (int i = 0; it != subset->end(); it ++, i++) { ColumnVector dist(*vector); dist = dist - *(*input)[*it]; printf("(%d %f) ", *it, dist.norm_Frobenius()); } printf("\n"); */ ColumnVector *rbfFactors = getRBFFactors(vector, subset); it = subset->begin(); double value = 0; for (int i = 0; it != subset->end(); it ++, i++) { value += rbfFactors->element(i) * (*output)[*it]; } double sum = rbfFactors->sum(); if (sum > 0) { value = value / sum; } //printf("Value: %f %f ", value ,sum); //cout << rbfFactors->t(); return value; }
ColumnVector find_top_n(ColumnVector W, int n, double cindex){ ColumnVector out(n); int count=0; double max=0.0; out(0)=cindex; for(int i=0;i<W.rows();i++){ if(W(i)!=0.0) count +=1; if(W(i)<0.0) W(i)=(-1)*W(i); } out(1)=count; int k=2; while(k<n){ out(k)=0; max=W(0); for(int i=1;i<W.rows();i++){ if(W(i)>max){ max=W(i); out(k)=i; } } W(out(k))=0.0; k +=1; } return out; }
ColumnVector<Type,N> RowVector<Type,N>::T() const { Trace("RowVector<Type,N>", "T()"); ColumnVector<Type,N> Result; for (unsigned int i = 1; i <= N; ++i) Result.Value(i) = this->Value(i); return Result; }
void test1(Real* y, Real* x1, Real* x2, int nobs, int npred) { cout << "\n\nTest 1 - traditional, bad\n"; // traditional sum of squares and products method of calculation // but not adjusting means; maybe subject to round-off error // make matrix of predictor values with 1s into col 1 of matrix int npred1 = npred+1; // number of cols including col of ones. Matrix X(nobs,npred1); X.Column(1) = 1.0; // load x1 and x2 into X // [use << rather than = when loading arrays] X.Column(2) << x1; X.Column(3) << x2; // vector of Y values ColumnVector Y(nobs); Y << y; // form sum of squares and product matrix // [use << rather than = for copying Matrix into SymmetricMatrix] SymmetricMatrix SSQ; SSQ << X.t() * X; // calculate estimate // [bracket last two terms to force this multiplication first] // [ .i() means inverse, but inverse is not explicity calculated] ColumnVector A = SSQ.i() * (X.t() * Y); // Get variances of estimates from diagonal elements of inverse of SSQ // get inverse of SSQ - we need it for finding D DiagonalMatrix D; D << SSQ.i(); ColumnVector V = D.AsColumn(); // Calculate fitted values and residuals ColumnVector Fitted = X * A; ColumnVector Residual = Y - Fitted; Real ResVar = Residual.SumSquare() / (nobs-npred1); // Get diagonals of Hat matrix (an expensive way of doing this) DiagonalMatrix Hat; Hat << X * (X.t() * X).i() * X.t(); // print out answers cout << "\nEstimates and their standard errors\n\n"; // make vector of standard errors ColumnVector SE(npred1); for (int i=1; i<=npred1; i++) SE(i) = sqrt(V(i)*ResVar); // use concatenation function to form matrix and use matrix print // to get two columns cout << setw(11) << setprecision(5) << (A | SE) << endl; cout << "\nObservations, fitted value, residual value, hat value\n"; // use concatenation again; select only columns 2 to 3 of X cout << setw(9) << setprecision(3) << (X.Columns(2,3) | Y | Fitted | Residual | Hat.AsColumn()); cout << "\n\n"; }
void NonLinearLeastSquares::Fit(const ColumnVector& Data, ColumnVector& Parameters) { Tracer tr("NonLinearLeastSquares::Fit"); n_param = Parameters.Nrows(); n_obs = Data.Nrows(); DataPointer = &Data; FindMaximum2::Fit(Parameters, Lim); cout << "\nConverged\n"; }
/** * Compute a distance metric between two columns of a * matrix. <b>Note that the indexes are *1* based (not 0) as that is * Newmat's convention</b>. Note that dist(M,i,j) must equal dist(M,j,i); * * @param M - Matrix whose columns represent individual items to be clustered. * @param col1Ix - Column index to be compared (1 based). * @param col2Ix - Column index to be compared (1 based). * * @return - "Distance" or "dissimilarity" metric between two columns of matrix. */ double GuassianRadial::dist(const Matrix &M, int col1Ix, int col2Ix) const { double dist = 0; if(col1Ix == col2Ix) return 0; ColumnVector V = M.Column(col1Ix) - M.Column(col2Ix); dist = V.SumSquare() / (2 * m_Sigma * m_Sigma); dist = exp(-1 * dist); return dist; }
const ColumnVector* RowVector::transpose(const RowVector *matA) { ColumnVector* t = new ColumnVector(matA->cols()); for (int i = 0; i < matA->cols(); i++) { t->set(i, matA->get(i)); } return t; }
void vector_constant( const ColumnVector<T1> & x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) { const auto & x_data = x.getData(); for (size_t i = 0, size = x.size(); i < size; ++i) result[i] = calculate<Transform>(x_data[i], y, timezone_x, timezone_y); }
/** Utility printing function. */ void printColumnVector(ColumnVector &v, std::ostream *out, const std::string& delim) { int nRow = v.Nrows(); int i = 0; if(out == NULL) out = &cout; for(i = 0; i < nRow - 1; i++) (*out) << v.element(i) << delim; (*out) << v.element(i); }
void constant_vector( T1 x, const ColumnVector<T2> & y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y, ColumnInt64::Container & result) { const auto & y_data = y.getData(); for (size_t i = 0, size = y.size(); i < size; ++i) result[i] = calculate<Transform>(x, y_data[i], timezone_x, timezone_y); }
double getvlog(const Matrix &W, const Matrix &T, const ColumnVector &bf, double cterm, int df, double tol) { Matrix Vk, Vtr; ColumnVector gbeta; double lgbeta, ldetVtr, xbfVtri; Vk=T.i()*W; gbeta=Vk.i()*bf; QRD Vtrqr(Vk.t(),tol); Vtr=Vtrqr.R(); lgbeta=log(fabs(gbeta(1))); //Rprintf("log(abs(gbeta[1])): %f\n", lgbeta); ldetVtr=log(fabs(Vtr.Determinant())); //Rprintf("ldetVtr: %f\n",ldetVtr); xbfVtri=(bf.t()*Vtr.i()*(bf.t()*Vtr.i()).t()).AsScalar(); //Rprintf("xbfVtri: %f\n",xbfVtri); return cterm-ldetVtr+df*lgbeta-0.5*df*xbfVtri; }
void test3(Real* y, Real* x1, Real* x2, int nobs, int npred) { cout << "\n\nTest 3 - Cholesky\n"; // traditional sum of squares and products method of calculation // with subtraction of means - using Cholesky decomposition Matrix X(nobs,npred); X.Column(1) << x1; X.Column(2) << x2; ColumnVector Y(nobs); Y << y; ColumnVector Ones(nobs); Ones = 1.0; RowVector M = Ones.t() * X / nobs; Matrix XC(nobs,npred); XC = X - Ones * M; ColumnVector YC(nobs); Real m = Sum(Y) / nobs; YC = Y - Ones * m; SymmetricMatrix SSQ; SSQ << XC.t() * XC; // Cholesky decomposition of SSQ LowerTriangularMatrix L = Cholesky(SSQ); // calculate estimate ColumnVector A = L.t().i() * (L.i() * (XC.t() * YC)); // calculate estimate of constant term Real a = m - (M * A).AsScalar(); // Get variances of estimates from diagonal elements of invoice of SSQ DiagonalMatrix D; D << L.t().i() * L.i(); ColumnVector V = D.AsColumn(); Real v = 1.0/nobs + (L.i() * M.t()).SumSquare(); // Calculate fitted values and residuals int npred1 = npred+1; ColumnVector Fitted = X * A + a; ColumnVector Residual = Y - Fitted; Real ResVar = Residual.SumSquare() / (nobs-npred1); // Get diagonals of Hat matrix (an expensive way of doing this) Matrix X1(nobs,npred1); X1.Column(1)<<Ones; X1.Columns(2,npred1)<<X; DiagonalMatrix Hat; Hat << X1 * (X1.t() * X1).i() * X1.t(); // print out answers cout << "\nEstimates and their standard errors\n\n"; cout.setf(ios::fixed, ios::floatfield); cout << setw(11) << setprecision(5) << a << " "; cout << setw(11) << setprecision(5) << sqrt(v*ResVar) << endl; ColumnVector SE(npred); for (int i=1; i<=npred; i++) SE(i) = sqrt(V(i)*ResVar); cout << setw(11) << setprecision(5) << (A | SE) << endl; cout << "\nObservations, fitted value, residual value, hat value\n"; cout << setw(9) << setprecision(3) << (X | Y | Fitted | Residual | Hat.AsColumn()); cout << "\n\n"; }
std::vector<float> BinghamThread::fit_bingham( const ColumnVector& sh_data, const Matrix& tess, const std::vector<QSet<int> >& adj, const Matrix& base, const int neighborhood, const int num_max ) { unsigned int mod = 9; // reserve memory: std::vector<float> result( 27, 0 ); // if no CSD no fit necessary. if ( sh_data( 1 ) == 0 ) { return result; } // get maxima: ColumnVector radius = base * sh_data; std::vector<float> qfRadius( radius.Nrows() ); for ( unsigned int i = 0; i < qfRadius.size(); ++i ) { qfRadius[i] = radius( i + 1 ); } std::vector<int> qiRadius( radius.Nrows() ); for ( unsigned int i = 0; i < qiRadius.size(); ++i ) { qiRadius[i] = i; } std::vector<int> maxima; for ( unsigned int i = 0; i < qfRadius.size(); ++i ) { QSet<int> n = adj[i]; float r = qfRadius[i]; if ( r > 0 ) { bool isMax = true; foreach (const int &value, n) { if ( r < qfRadius[value] ) { isMax = false; } } if ( isMax ) { maxima.push_back( i ); } } }
void SpectClust::multByMatrix(ColumnVector &N, ColumnVector &O, const Matrix &M) { int nRow = M.Nrows(), nCol = M.Ncols(); if(!(N.Nrows() == O.Nrows() && M.Nrows() == M.Ncols() && M.Nrows() == N.Nrows())) { Err::errAbort("wrong dimensions: " + ToStr(O.Nrows()) + " " + ToStr(N.Nrows()) + " " + ToStr(M.Nrows())); } for(int rowIx = 0; rowIx < nRow; rowIx++) { N[rowIx] = 0; for(int colIx = 0; colIx < nCol; colIx++) { N[rowIx] += O[colIx] * M[rowIx][colIx]; } } }
// same as above for X a ColumnVector, length n, element j = 1; otherwise 0 ReturnMatrix Helmert(int n, int j, bool full) { REPORT Tracer et("Helmert:single element "); if (n <= 0) Throw(ProgramException("X Vector of length <= 0")); if (j > n || j <= 0) Throw(ProgramException("Out of range element number ")); ColumnVector Y; if (full) Y.resize(n); else Y.resize(n-1); Y = 0.0; if (j > 1) Y(j-1) = sqrt((Real)(j-1) / (Real)j); for (int i = j; i < n; ++i) Y(i) = - 1.0 / sqrt((Real)i * (i+1)); if (full) Y(n) = 1.0 / sqrt((Real)n); Y.release(); return Y.for_return(); }
void Uniform::UniformSet (const ColumnVector& center,const ColumnVector& width) { assert(center.rows() == width.rows()); _Lower = center - width/2.0; _Higher = center + width/2.0; _Height = 1; for (int i=1 ; i < width.rows()+1 ; i++ ) { _Height = _Height / width(i); } if (this->DimensionGet() == 0) this->DimensionSet(center.rows()); assert(this->DimensionGet() == center.rows()); }
Uniform::Uniform (const ColumnVector& center, const ColumnVector& width) : Pdf<ColumnVector> ( center.rows() ) , _samples(DimensionGet()) { // check if inputs are consistent assert (center.rows() == width.rows()); _Lower = center - width/2.0; _Higher = center + width/2.0; _Height = 1; for (int i=1 ; i < width.rows()+1 ; i++ ) { _Height = _Height / width(i); } }
int main() { { // Get the data ColumnVector X(6); ColumnVector Y(6); X << 1 << 2 << 3 << 4 << 6 << 8; Y << 3.2 << 7.9 << 11.1 << 14.5 << 16.7 << 18.3; // Do the fit Model_3pe model(X); // the model object NonLinearLeastSquares NLLS(model); // the non-linear least squares // object ColumnVector Para(3); // for the parameters Para << 9 << -6 << .5; // trial values of parameters cout << "Fitting parameters\n"; NLLS.Fit(Y,Para); // do the fit // Inspect the results ColumnVector SE; // for the standard errors NLLS.GetStandardErrors(SE); cout << "\n\nEstimates and standard errors\n" << setw(10) << setprecision(2) << (Para | SE) << endl; Real ResidualSD = sqrt(NLLS.ResidualVariance()); cout << "\nResidual s.d. = " << setw(10) << setprecision(2) << ResidualSD << endl; SymmetricMatrix Correlations; NLLS.GetCorrelations(Correlations); cout << "\nCorrelationMatrix\n" << setw(10) << setprecision(2) << Correlations << endl; ColumnVector Residuals; NLLS.GetResiduals(Residuals); DiagonalMatrix Hat; NLLS.GetHatDiagonal(Hat); cout << "\nX, Y, Residual, Hat\n" << setw(10) << setprecision(2) << (X | Y | Residuals | Hat.AsColumn()) << endl; // recover var/cov matrix SymmetricMatrix D; D << SE.AsDiagonal() * Correlations * SE.AsDiagonal(); cout << "\nVar/cov\n" << setw(14) << setprecision(4) << D << endl; } #ifdef DO_FREE_CHECK FreeCheck::Status(); #endif return 0; }
void FFT(const ColumnVector& U, const ColumnVector& V, ColumnVector& X, ColumnVector& Y) { // from Carl de Boor (1980), Siam J Sci Stat Comput, 1 173-8 // but first try Sande and Gentleman Tracer trace("FFT"); REPORT const int n = U.Nrows(); // length of arrays if (n != V.Nrows() || n == 0) Throw(ProgramException("Vector lengths unequal or zero", U, V)); if (n == 1) { REPORT X = U; Y = V; return; } // see if we can use the newfft routine if (!FFT_Controller::OnlyOldFFT && FFT_Controller::CanFactor(n)) { REPORT X = U; Y = V; if ( FFT_Controller::ar_1d_ft(n,X.Store(),Y.Store()) ) return; } ColumnVector B = V; ColumnVector A = U; X.ReSize(n); Y.ReSize(n); const int nextmx = 8; #ifndef ATandT int prime[8] = { 2,3,5,7,11,13,17,19 }; #else int prime[8]; prime[0]=2; prime[1]=3; prime[2]=5; prime[3]=7; prime[4]=11; prime[5]=13; prime[6]=17; prime[7]=19; #endif int after = 1; int before = n; int next = 0; bool inzee = true; int now = 0; int b1; // initialised to keep gnu happy do { for (;;) { if (next < nextmx) { REPORT now = prime[next]; } b1 = before / now; if (b1 * now == before) { REPORT break; } next++; now += 2; } before = b1; if (inzee) { REPORT fftstep(A, B, X, Y, after, now, before); } else { REPORT fftstep(X, Y, A, B, after, now, before); } inzee = !inzee; after *= now; }
void LoadDNSSCD::loadHuber(ITableWorkspace_sptr tws) { ColumnVector<double> huber = tws->getVector("Huber(degrees)"); // set huber[0] for each run in m_data for (auto &ds : m_data) { ds.huber = huber[0]; } // dublicate runs for each huber in the table std::vector<ExpData> old(m_data); for (size_t i = 1; i < huber.size(); ++i) { for (auto &ds : old) { ds.huber = huber[i]; m_data.push_back(ds); } } }
ColumnVector Cluster::distance(ColumnVector point) { if (point.dim1() != this->mu->dim1()) { TRACE << "size mismatch: point:" << point.dim1() << "muDim:" << mu->dim1() << endl; return point; } ColumnVector retColumn = point - *(this->mu); for (int i = 0; i < point.dim1(); i++) { retColumn(i) = fabs(retColumn(i)); } return retColumn; }
//Log probability of this data double SOGP::log_prob(const ColumnVector& in, const ColumnVector& out){ static const double ls2pi= log(sqrt(2*M_PI)); //Only compute once double sigma; double out2; if(current_size == 0){ //mu = zero, sigma = kappa. sigma=sqrt(m_params.m_kernel->kstar(in)+m_params.s20); //Is this right? V_0=kstar, v_1 = s20 out2=out.SumSquare(); } else{ ColumnVector mu = predict(in,sigma); mu-=out; out2=mu.SumSquare(); } return(-ls2pi -log(sigma) -.5*out2/(sigma*sigma)); }