void load_sigma2(const fmat &V, fvec &sigma2){ int nsnp = V.size(); sigma2 = fvec (nsnp, .0f); for(int i = 0; i < nsnp; ++i){ sigma2[i] = V[i][i]; } }
// X es la matriz de datos ([m casos] X [n features + BIAS]) // X ya se supone normalizada, y con la columna de BIAS agregada. // Y es la matriz de respuestas ([m casos] X [c categorias posibles]) fmat SGD(const fmat& X, const fmat& Y, double alpha) { int m = X.n_rows; // Filas = casos int n = X.n_cols; // Columnas = features + BIAS int c = Y.n_cols; // Categorias posibles double lambda = 4.0; fmat Theta(n, c); fmat reg(n, c); fmat gradient(n, c); Theta.fill(0.0); int its = m/SGD_N; fmat subX, subY; double loss; for (int i = 0; i < GD_IT; i++) { // SGD. Debería modularizar un poco esto. Quizás con un define. // cout << "iterancion " << i << endl; for (int j = 0; j < its; j++) { subX = X.rows(SGD_N*j, SGD_N*(j+1)-1); subY = Y.rows(SGD_N*j, SGD_N*(j+1)-1); Theta = gdStep(Theta, subX, subY, alpha, lambda); } // Tomo las filas que faltan. subX = X.rows(its*SGD_N, m - 1); subY = Y.rows(its*SGD_N, m - 1); Theta = gdStep(Theta, subX, subY, alpha, lambda); cout << "terminada la iteración: %d" << i; #ifndef NDEBUG if (i % 10 == 0) { loss = logloss(predict(X, Theta), Y); cout << " logloss %G" << loss; } #endif cout << endl; } loss = logloss(predict(X, Theta), Y); cout << "Logloss final: " << loss << endl; return Theta; }
//function [y,Y,P,Y1]=ut(f,X,Wm,Wc,n,R) void BFilterUKF::utMeasurement(fmat X, fvec Wm, fvec Wc, unsigned int n, fmat R) { //Unscented Transformation //Input: // f: nonlinear map // X: sigma points // Wm: weights for mean // Wc: weights for covraiance // n: numer of outputs of f // R: additive covariance //Output: // y: transformed mean // Y: transformed smapling points // P: transformed covariance // Y1: transformed deviations unsigned int L=X.n_cols; z1=zeros<fvec>(n); Z1=zeros<fmat>(n,L); //for k=1:L for (unsigned int k=0; k < L; ++k) { fmat XColK = X.col(k); Z1.col(k)= process->ffun(&XColK); z1=z1+Wm(k)*Z1.col(k); } //Z2=Z1-x1(:,ones(1,L)); Z2 = Z1; for (unsigned int j = 0; j < L; ++j) { for (unsigned int i = 0; i < x1.n_rows; ++i) { Z2(i,j) -= x1(i); } } P2=Z2*Wc.diag()*Z2.t()+R; }
//function [y,Y,P,Y1]=ut(f,X,Wm,Wc,n,R) void BFilterUKF::utProcess(fmat X,fvec Wm, fvec Wc, unsigned int n, fmat R) { //Unscented Transformation //Input: // f: nonlinear map // X: sigma points // Wm: weights for mean // Wc: weights for covraiance // n: numer of outputs of f // R: additive covariance //Output: // y: transformed mean // Y: transformed smapling points // P: transformed covariance // Y1: transformed deviations unsigned int L=X.n_cols; x1 = zeros<fvec>(n); X1 = zeros<fmat>(n,L); //for k=1:L for (unsigned int k=0; k < L; ++k) { fmat XColK = X.col(k); X1.col(k)= process->ffun(&XColK); x1=x1+Wm(k)*X1.col(k); } //X2=X1-x1(:,ones(1,L)); // generate duplicates of vector x1 X2 = X1; for (unsigned int j = 0; j < L; ++j) { for (unsigned int i = 0; i < x1.n_rows; ++i) { X2(i,j) -= x1(i); } } P1=X2*Wc.diag()*X2.t()+R; }
fmat gdStep(const fmat& Theta, const fmat& X, const fmat& Y, double alpha, double lambda) { fmat gradient = (alpha / X.n_rows) * X.t() * (sigmoide(X * Theta) - Y); fmat reg = (lambda / X.n_rows) * Theta; reg.row(0) = zeros<frowvec>(Y.n_cols); return Theta - gradient - reg; }
fmat scaleFeatures(fmat X, fmat mu, fmat sigma, int columns) { for (unsigned int i = 0; i < columns; ++i) { X.col(i) = (X.col(i) - mu(i)) / sigma(i); } return X; }