GURLS_EXPORT void inv(const gMat2D<float>& A, gMat2D<float>& Ainv, InversionAlgorithm alg) { Ainv = A; int k = std::min(Ainv.cols(), Ainv.rows()); int info; int* ipiv = new int[k]; int m = Ainv.rows(); int n = Ainv.cols(); int lda = Ainv.rows(); sgetrf_(&m, &n, Ainv.getData(), &lda, ipiv, &info); float* work = new float[n]; sgetri_(&m, Ainv.getData(), &lda, ipiv, work, &n, &info); delete[] ipiv; delete[] work; }
GURLS_EXPORT void cholesky(const gMat2D<float>& A, gMat2D<float>& L, bool upper){ typedef float T; L = A; int LDA = A.rows(); int n = A.cols(); char UPLO = upper? 'U' : 'L'; int info; spotrf_(&UPLO,&n, L.getData(),&LDA,&info); // This is required because we adopted a column major order to store the // data into matrices gMat2D<T> tmp(L.rows(), L.cols()); if (!upper){ L.uppertriangular(tmp); } else { L.lowertriangular(tmp); } tmp.transpose(L); }
bool updateModule() { ++updateCount; if (updateCount > numPred) { cout << "Specified number of predictions reached. Shutting down the module." << endl; return false; } // DEBUG if(verbose) cout << "updateModule #" << updateCount << endl; // Recursive update support and storage variables declaration and initialization gMat2D<T> Xnew(1,d); gMat2D<T> ynew(1,t); gVec<T> Xnew_v(d); gVec<T> ynew_v(t); gMat2D<T> *resptr = 0; // Wait for input feature vector if(verbose) cout << "Expecting input vector" << endl; Bottle *bin = inVec.read(); // blocking call if (bin != 0) { if(verbose) cout << "Got it!" << endl << bin->toString() << endl; //Store the received sample in gMat2D format for it to be compatible with gurls++ for (int i = 0 ; i < bin->size() ; ++i) { if ( i < d ) { Xnew(0,i) = bin->get(i).asDouble(); } else if ( (i>=d) && (i<d+t) ) { ynew(0, i - d ) = bin->get(i).asDouble(); } } if(verbose) cout << "Xnew: " << endl << Xnew << endl; if(verbose) cout << "ynew: " << endl << ynew.rows() << " x " << ynew.cols() << endl; if(verbose) cout<< ynew << endl; //----------------------------------- // Prediction //----------------------------------- // Test on the incoming sample resptr = estimator.eval(Xnew); Bottle& bpred = pred.prepare(); // Get a place to store things. bpred.clear(); // clear is important - b might be a reused object for (int i = 0 ; i < t ; ++i) { bpred.addDouble((*resptr)(0 , i)); } if(verbose) printf("Sending prediction!!! %s\n", bpred.toString().c_str()); pred.write(); if(verbose) printf("Prediction written to port\n"); //---------------------------------- // performance Bottle& bperf = perf.prepare(); // Get a place to store things. bperf.clear(); // clear is important - b might be a reused object if (perfType == "nMSE") // WARNING: The estimated variance could be unreliable... { // Compute nMSE and store //NOTE: In GURLS, "/" operator works like matlab's "\". error += varCols / ( ynew - *resptr )*( ynew - *resptr ) ; gMat2D<T> tmp = error / (updateCount); // WARNING: Check for (int i = 0 ; i < t ; ++i) { bperf.addDouble(tmp(0 , i)); } } else if (perfType == "RMSE") { gMat2D<T> tmp(1,t); tmp = ( ynew - *resptr )*( ynew - *resptr ); //error = ( error * (updateCount-1) + sqrt(( ynew - *resptr )*( ynew - *resptr )) ) / updateCount; error = error * (updateCount-1); for (int i = 0 ; i < ynew.cols() ; ++i) error(0,i) += sqrt(tmp(0,i)); error = error / updateCount; /* for (int i = 0 ; i < t ; ++i) { bperf.addDouble(sqrt(MSE(0 , i))); } */ // WARNING: Temporary avg RMSE computation bperf.addDouble( (error(0 , 0) + error(0 , 1) + error(0 , 2))/ 3.0); // Average MSE on forces bperf.addDouble( (error(0 , 3) + error(0 , 4) + error(0 , 5))/ 3.0); // Average MSE on torques } else if (perfType == "MSE") { //Compute MSE and store error = ( error * (updateCount-1) + ( ynew - *resptr )*( ynew - *resptr ) ) / updateCount; for (int i = 0 ; i < t ; ++i) { bperf.addDouble(error(0 , i)); } } // Error storage matrix management // Update error storage matrix if (updateCount <= savedPerfNum) { gVec<T> errRow = error[0]; storedError.setRow( errRow, updateCount-1); } // Save to CSV file if (updateCount == savedPerfNum) { std::ostringstream ss; ss << experimentCount; //string tmp(std::to_string(experimentCount)); storedError.saveCSV("storedError" + ss.rdbuf()->str() + ".csv"); cout << "Error measurement matrix saved." << endl; } // Write computed error to output port if(verbose) printf("Sending %s measurement: %s\n", perfType.c_str(), bperf.toString().c_str()); perf.write(); //----------------------------------- // Update //----------------------------------- // Update estimator with a new input pair //if(verbose) std::cout << "Update # " << i+1 << std::endl; if(verbose) cout << "Now performing RRLS update" << endl; if(verbose) cout << "Xnew" << Xnew << endl; if(verbose) cout << "ynew" << ynew << endl; estimator.update(Xnew, ynew); if(verbose) cout << "Update completed" << endl; } if ( numPred >=0 && (updateCount == numPred) ) { cout << "Specified number of predictions reached. Shutting down the module." << endl; return false; } return true; }
bool configure(ResourceFinder &rf) { string name=rf.find("name").asString().c_str(); setName(name.c_str()); // Set verbosity verbose = rf.check("verbose",Value(0)).asInt(); // Set dimensionalities d = rf.check("d",Value(0)).asInt(); t = rf.check("t",Value(0)).asInt(); if (d <= 0 || t <= 0 ) { printf("Error: Inconsistent feature or output dimensionalities!\n"); return false; } // Set perf type WARNING: perf types should be defined as separate sister classes perfType = rf.check("perf",Value("RMSE")).asString(); if ( perfType != "MSE" && perfType != "RMSE" && perfType != "nMSE" ) { printf("Error: Inconsistent performance measure! Set to RMSE.\n"); perfType = "RMSE"; } // Set number of saved performance measurements numPred = rf.check("numPred",Value("-1")).asInt(); // Set number of saved performance measurements savedPerfNum = rf.check("savedPerfNum",Value("0")).asInt(); if (savedPerfNum > numPred) { savedPerfNum = numPred; cout << "Warning: savedPerfNum > numPred, setting savedPerfNum = numPred" << endl; } //experimentCount = rf.check("experimentCount",Value("0")).asInt(); experimentCount = rf.find("experimentCount").asInt(); // Print Configuration cout << endl << "-------------------------" << endl; cout << "Configuration parameters:" << endl << endl; cout << "experimentCount = " << experimentCount << endl; cout << "d = " << d << endl; cout << "t = " << t << endl; cout << "perf = " << perfType << endl; cout << "-------------------------" << endl << endl; // Open ports string fwslash="/"; inVec.open((fwslash+name+"/vec:i").c_str()); printf("inVec opened\n"); pred.open((fwslash+name+"/pred:o").c_str()); printf("pred opened\n"); perf.open((fwslash+name+"/perf:o").c_str()); printf("perf opened\n"); rpcPort.open((fwslash+name+"/rpc:i").c_str()); printf("rpcPort opened\n"); // Attach rpcPort to the respond() method attach(rpcPort); // Initialize random number generator srand(static_cast<unsigned int>(time(NULL))); // Initialize error structures error.resize(1,t); error = gMat2D<T>::zeros(1, t); // if (savedPerfNum > 0) { storedError.resize(savedPerfNum,t); storedError = gMat2D<T>::zeros(savedPerfNum, t); //MSE } updateCount = 0; //------------------------------------------ // Pre-training //------------------------------------------ if ( pretrain == 1 ) { if ( pretr_type == "fromFile" ) { //------------------------------------------ // Pre-training from file //------------------------------------------ string trainFilePath = rf.getContextPath() + "/data/" + pretrainFile; try { // Load data files cout << "Loading data file..." << endl; trainSet.readCSV(trainFilePath); cout << "File " + trainFilePath + " successfully read!" << endl; cout << "trainSet: " << trainSet << endl; cout << "n_pretr = " << n_pretr << endl; cout << "d = " << d << endl; //WARNING: Add matrix dimensionality check! // Resize Xtr Xtr.resize( n_pretr , d ); // Initialize Xtr //Xtr.submatrix(trainSet , n_pretr , d); Xtr.submatrix(trainSet , 0 , 0); cout << "Xtr initialized!" << endl << Xtr << endl; // Resize ytr ytr.resize( n_pretr , t ); cout << "ytr resized!" << endl; // Initialize ytr gVec<T> tmpCol(trainSet.rows()); cout << "tmpCol" << tmpCol << endl; for ( int i = 0 ; i < t ; ++i ) { cout << "trainSet(d + i): " << trainSet(d + i) << endl; tmpCol = trainSet(d + i); gVec<T> tmpCol1(n_pretr); //cout << tmpCol.subvec( (unsigned int) n_pretr , (unsigned int) 0); // WARNING: Fixed in latest GURLS version gVec<T> locs(n_pretr); for (int j = 0 ; j < n_pretr ; ++j) locs[j] = j; cout << "locs" << locs << endl; gVec<T>& tmpCol2 = tmpCol.copyLocations(locs); cout << "tmpCol2" << tmpCol2 << endl; //tmpCol1 = tmpCol.subvec( (unsigned int) n_pretr ); //cout << "tmpCol1: " << tmpCol1 << endl; ytr.setColumn( tmpCol2 , (long unsigned int) i); } cout << "ytr initialized!" << endl; // Compute variance for each output on the training set gMat2D<T> varCols = gMat2D<T>::zeros(1,t); gVec<T>* sumCols_v = ytr.sum(COLUMNWISE); // Vector containing the column-wise sum gMat2D<T> meanCols(sumCols_v->getData(), 1, t, 1); // Matrix containing the column-wise sum meanCols /= n_pretr; // Matrix containing the column-wise mean if (verbose) cout << "Mean of the output columns: " << endl << meanCols << endl; for (int i = 0; i < n_pretr; i++) { gMat2D<T> ytri(ytr[i].getData(), 1, t, 1); varCols += (ytri - meanCols) * (ytri - meanCols); // NOTE: Temporary assignment } varCols /= n_pretr; // Compute variance if (verbose) cout << "Variance of the output columns: " << endl << varCols << endl; // Initialize model cout << "Batch pretraining the RLS model with " << n_pretr << " samples." << endl; estimator.train(Xtr, ytr); } catch (gException& e) { cout << e.getMessage() << endl; return false; // Terminate program. NOTE: May be worth to set up specific error return values } } else if ( pretr_type == "fromStream" ) { //------------------------------------------ // Pre-training from stream //------------------------------------------ try { cout << "Pretraining from stream started. Listening on port vec:i." << n_pretr << " samples expected." << endl; // Resize Xtr Xtr.resize( n_pretr , d ); // Resize ytr ytr.resize( n_pretr , t ); // Initialize Xtr for (int j = 0 ; j < n_pretr ; ++j) { // Wait for input feature vector if(verbose) cout << "Expecting input vector # " << j+1 << endl; Bottle *bin = inVec.read(); // blocking call if (bin != 0) { if(verbose) cout << "Got it!" << endl << bin->toString() << endl; //Store the received sample in gMat2D format for it to be compatible with gurls++ for (int i = 0 ; i < bin->size() ; ++i) { if ( i < d ) { Xtr(j,i) = bin->get(i).asDouble(); } else if ( (i>=d) && (i<d+t) ) { ytr(j, i - d ) = bin->get(i).asDouble(); } } if(verbose) cout << "Xtr[j]:" << endl << Xtr[j] << endl << "ytr[j]:" << endl << ytr[j] << endl; } else --j; // WARNING: bug while closing with ctrl-c } cout << "Xtr initialized!" << endl; cout << "ytr initialized!" << endl; // Compute variance for each output on the training set gMat2D<T> varCols = gMat2D<T>::zeros(1,t); gVec<T>* sumCols_v = ytr.sum(COLUMNWISE); // Vector containing the column-wise sum gMat2D<T> meanCols(sumCols_v->getData(), 1, t, 1); // Matrix containing the column-wise sum meanCols /= n_pretr; // Matrix containing the column-wise mean if (verbose) cout << "Mean of the output columns: " << endl << meanCols << endl; for (int i = 0; i < n_pretr; i++) { gMat2D<T> ytri(ytr[i].getData(), 1, t, 1); varCols += (ytri - meanCols) * (ytri - meanCols); // NOTE: Temporary assignment } varCols /= n_pretr; // Compute variance if (verbose) cout << "Variance of the output columns: " << endl << varCols << endl; // Initialize model cout << "Batch pretraining the RLS model with " << n_pretr << " samples." << endl; estimator.train(Xtr, ytr); } catch (gException& e) { cout << e.getMessage() << endl; return false; // Terminate program. NOTE: May be worth to set up specific error return values } } // Print detailed pretraining information if (verbose) estimator.getOpt().printAll(); } return true; }
BigArray<T>::BigArray(std::string fileName, const gMat2D<T>& mat) { init(fileName, mat.rows(), mat.cols()); MPI_Barrier(MPI_COMM_WORLD); }
void BigArray<T>::setMatrix(unsigned long startingRow, unsigned long startingCol, const gMat2D<T>&value) { setMatrix(startingRow, startingCol, value.getData(), value.rows(), value.cols()); }
GURLS_EXPORT void cholesky(const gMat2D<float>& A, gMat2D<float>& L, bool upper) { cholesky<float>(A.getData(), A.rows(), A.cols(), L.getData(), upper); }
GURLS_EXPORT void lu(gMat2D<float>& A) { gVec<int> pv(std::min(A.cols(), A.rows())); lu(A, pv); }
GURLS_EXPORT void svd(const gMat2D<float>& A, gMat2D<float>& U, gVec<float>& W, gMat2D<float>& Vt) { char jobu = 'S', jobvt = 'S'; int m = A.rows(); int n = A.cols(); int k = std::min<int>(m, n); if ((int)W.getSize() < k) { throw gException("The length of vector W must be at least equal to the minimum dimension of the input matrix A"); } if ((int)U.rows() < m || (int)U.cols() < k) { throw gException("Please check the dimensions of the matrix U where to store the singular vectors"); } if ((int)Vt.rows() < k || (int)Vt.cols() < n) { throw gException("Please check the dimensions of the matrix Vt where to store the rigth singular vectors"); } int lda = A.cols(); int ldu = U.cols(); int ldvt = Vt.cols(); int info, lwork = std::max<int>(3*k+std::max<int>(m,n), 5*k); float* work = new float[lwork]; float* copy = new float[m*n]; A.asarray(copy, m*n); sgesvd_(&jobu, &jobvt, &n, &m, copy, &lda, W.getData(), Vt.getData(), &ldvt, U.getData(), &ldu, work, &lwork, &info); delete[] work; delete[] copy; }
GURLS_EXPORT void pinv(const gMat2D<float>& A, gMat2D<float>& Ainv, float RCOND){ /* subroutine SGELSS ( INTEGER M, INTEGER N, INTEGER NRHS, REAL,dimension( lda, * ) A, INTEGER LDA, REAL,dimension( ldb, * ) B, INTEGER LDB, REAL,dimension( * ) S, REAL RCOND, INTEGER RANK, REAL,dimension( * ) WORK, INTEGER LWORK, INTEGER INFO ) */ int M = A.rows(); int N = A.cols(); // The following step is required because we are currently storing // the matrices using a column-major order while LAPACK's // routines require row-major ordering float* a = new float[M*N]; const float* ptr_A = A.getData(); float* ptr_a = a; for (int j = 0; j < N ; j++){ for (int i = 0; i < M ; i++){ *ptr_a++ = *(ptr_A+i*N+j); } } int LDA = M; int LDB = std::max(M, N); int NRHS = LDB; float *b = new float[LDB*NRHS], *b_ptr = b; for (int i = 0; i < LDB*NRHS; i++){ *b_ptr++=0.f; } b_ptr = b; for (int i = 0; i < std::min(LDB, NRHS); i++, b_ptr+=(NRHS+1)){ *b_ptr = 1.f; } float* S = new float[std::min(M,N)]; float condnum = 0.f; // The condition number of A in the 2-norm = S(1)/S(min(m,n)). if (RCOND < 0){ RCOND = 0.f; } int RANK = -1; // std::min(M,N); int LWORK = -1; //2 * (3*LDB + std::max( 2*std::min(M,N), LDB)); float* WORK = new float[1]; /* INFO: = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value. > 0: the algorithm for computing the SVD failed to converge; if INFO = i, i off-diagonal elements of an intermediate bidiagonal form did not converge to zero. */ int INFO; /* Query and allocate the optimal workspace */ /*int res = */sgelss_( &M, &N, &NRHS, a, &LDA, b, &LDB, S, &RCOND, &RANK, WORK, &LWORK, &INFO); LWORK = static_cast<int>(WORK[0]); delete [] WORK; WORK = new float[LWORK]; /*res = */sgelss_( &M, &N, &NRHS, a, &LDA, b, &LDB, S, &RCOND, &RANK, WORK, &LWORK, &INFO); // TODO: check INFO on exit condnum = S[0]/(S[std::min(M, N)]-1); // gMat2D<float> *tmp = new gMat2D<float>(b, LDB, LDB, false); float *ainv = new float[N*M]; float* ptr_b = ainv; float* ptr_B = b; for (int i = 0; i < N ; i++){ for (int j = 0; j < M ; j++){ *(ptr_b+i*M+j) = *(ptr_B+j*NRHS+i); } } Ainv = * new gMat2D<float>(ainv, N, M, true); // gMat2D<float> *tmp = new gMat2D<float>(b, LDB, NRHS, false); // gMat2D<float> *tmp1 = new gMat2D<float>(NRHS, LDB); // tmp->transpose(*tmp1); // Ainv = * new gMat2D<float>(tmp1->getData(), N, M, true); // std::cout << "A = " << std::endl << A << std::endl; // std::cout << "pinv(A) = " << std::endl << Ainv << std::endl; delete [] S; delete [] WORK; delete [] a; // delete tmp, tmp1; delete [] b; }