/// \brief Update Kalman filter void UnscentedKalmanFilter::updateUKF(vnl_vector<double> u, vnl_vector<double> z) { // create Sigma points X for the current estimate distribution vnl_matrix<double> X(N,2*N+1); sigmas(x_hat, P_hat+Q, sqrt(C), X); vnl_matrix<double> Dbg; qDebug() << "X: "; for( int i = 0; i<6; i++) { qDebug() << X(i,0) << " " << X(i,1) << " " << X(i,2) << " " << X(i,3) << " " << X(i,4) << " " << X(i,5) << " " << X(i,6) << " " << X(i,7) << " " << X(i,8) << " " << X(i,9) << " " << X(i,10) << " " << X(i,11) << " " << X(i,12); } // apply unscented transformation for process model vnl_vector<double> x1(6); vnl_matrix<double> X1(N,2*N+1), P1(N,N), X2(N,2*N+1); utf(X, u, x1, X1, P1, X2); // apply unscented transformation for observation model vnl_vector<double> z1(M); vnl_matrix<double> Z1(M,2*M+1), P2(M,M), Z2(M,2*M+1); utg(X1, z1, Z1, P2, Z2); // define transformated cross-covariance vnl_matrix<double> WC(2*N+1,2*N+1,0.0); WC.set_diagonal(Wc.get_row(0)); vnl_matrix<double> P12 = X2*WC*Z2.transpose(); // perform state update K = P12*vnl_matrix_inverse<double>(P2); x_hat = x1 + K*(z-z1); // perform covariance update P_hat = P1 - K*P12.transpose(); }
void FormEfficiencyCalibration::on_pushFit_2_clicked() { QVector<SelectorItem> items = ui->spectrumSelector->items(); std::vector<double> xx, yy; for (auto &fit : peak_sets_) { bool visible = false; for (auto &i : items) if (i.visible && (fit.second.metadata_.name == i.text.toStdString())) visible = true; if (visible) { for (auto &q : fit.second.peaks()) { //BROKEN // if (!q.second.flagged) // continue; double x = q.second.energy; double y = q.second.efficiency_relative_ * fit.second.activity_scale_factor_; xx.push_back(x); yy.push_back(y); } } } // LogInverse p = LogInverse(xx, yy, ui->spinTerms->value()); std::vector<double> sigmas(yy.size(), 1); LogInverse p; p.add_coeff(0, -50, 50, 1); for (int i=1; i <= ui->spinTerms->value(); ++i) { if (i==1) p.add_coeff(i, -50, 50, 1); else p.add_coeff(i, -50, 50, 0); } p.fit(xx,yy,sigmas); if (p.coeffs_.size()) { new_calibration_.type_ = "Efficiency"; new_calibration_.bits_ = fit_data_.metadata_.bits; new_calibration_.coefficients_ = p.coeffs(); new_calibration_.calib_date_ = boost::posix_time::microsec_clock::universal_time(); //spectrum timestamp instead? new_calibration_.units_ = "ratio"; new_calibration_.model_ = Qpx::CalibrationModel::loginverse; PL_DBG << "<Efficiency calibration> new calibration fit " << new_calibration_.to_string(); } else PL_INFO << "<Efficiency calibration> Qpx::Calibration failed"; replot_calib(); toggle_push(); }
// ----------------------------------------------------------------------------- // // ----------------------------------------------------------------------------- void StatsGenODFWidget::extractStatsData(int index, StatsData* statsData, unsigned int phaseType) { VectorOfFloatArray arrays; if(phaseType == DREAM3D::PhaseType::PrimaryPhase) { PrimaryStatsData* pp = PrimaryStatsData::SafePointerDownCast(statsData); arrays = pp->getODF_Weights(); } if(phaseType == DREAM3D::PhaseType::PrecipitatePhase) { PrecipitateStatsData* pp = PrecipitateStatsData::SafePointerDownCast(statsData); arrays = pp->getODF_Weights(); } if(phaseType == DREAM3D::PhaseType::TransformationPhase) { TransformationStatsData* tp = TransformationStatsData::SafePointerDownCast(statsData); arrays = tp->getODF_Weights(); } if (arrays.size() > 0) { QVector<float> e1(static_cast<int>(arrays[0]->getNumberOfTuples())); ::memcpy( e1.data(), arrays[0]->getVoidPointer(0), sizeof(float)*e1.size() ); QVector<float> e2(static_cast<int>(arrays[1]->getNumberOfTuples())); ::memcpy( e2.data(), arrays[1]->getVoidPointer(0), sizeof(float)*e2.size() ); QVector<float> e3(static_cast<int>(arrays[2]->getNumberOfTuples())); ::memcpy( e3.data(), arrays[2]->getVoidPointer(0), sizeof(float)*e3.size() ); QVector<float> weights(static_cast<int>(arrays[3]->getNumberOfTuples())); ::memcpy( weights.data(), arrays[3]->getVoidPointer(0), sizeof(float)*weights.size() ); QVector<float> sigmas(static_cast<int>(arrays[4]->getNumberOfTuples())); ::memcpy( sigmas.data(), arrays[4]->getVoidPointer(0), sizeof(float)*sigmas.size() ); // Convert from Radians to Degrees for the Euler Angles for(int i = 0; i < e1.size(); ++i) { e1[i] = e1[i] * 180.0f / M_PI; e2[i] = e2[i] * 180.0f / M_PI; e3[i] = e3[i] * 180.0f / M_PI; } if(e1.size() > 0) { // Load the data into the table model m_ODFTableModel->setTableData(e1, e2, e3, weights, sigmas); } } // Write the MDF Data if we have that functionality enabled if (m_MDFWidget != NULL) { m_MDFWidget->extractStatsData(index, statsData, phaseType); } updatePlots(); }
void BFilterUKF::predict(){ //L=numel(x); //numer of states //m=numel(z); //numer of measurements unsigned int numStates = particles.samples.n_cols; float alpha=1.0f; //default 1e-3, tunable float kappa=0.0f; //default, tunable float beta=2.0f; //default, tunable //lambda=alpha^2*(L+kappa)-L; //scaling factor float lambda=alpha*alpha*(numStates+kappa)-numStates; //scaling factor float c=numStates+lambda; //scaling factor gamma //Wm=[lambda/c 0.5/c+zeros(1,2*L)]; //weights for means Wm=zeros<fmat>(1, 2*numStates+1) + (0.5f/c); //weights for means Wm(0)=lambda/c; Wc=Wm; Wc(0)=Wc(0)+(1-alpha*alpha+beta); //weights for covariance c=sqrt(c); fmat X=sigmas(fvec(particles.samples),P,c); //sigma points around x utProcess(X,Wm,Wc,numStates,process->Q); }
TMatrixD Chol (TVectorD& covV) { int nCov = covV.GetNrows(); int n = int((sqrt(8*nCov+1.)-1.)/2.+0.5); if ( nCov != n*(n+1)/2. ) { std::cout << "Chol: length of vector " << nCov << " is not n*(n+1)/2" << std::endl; return TMatrixD(); } // get diagonal elements int ind(0); TVectorD sigmas(n); for ( int i=0; i<n; ++i ) { for ( int j=0; j<=i; ++j ) { if ( j == i ) sigmas[i] = covV(ind); ++ind; } } // fill cov matrix (could be more elegant ...) ind = 0; TMatrixDSym covMatrix(n); for ( int i=0; i<n; ++i ) { for ( int j=0; j<=i; ++j ) { if ( j == i ) covMatrix(i,i) = sigmas(i)*sigmas(i); else covMatrix(i,j) = covMatrix(j,i) = covV(ind)*sigmas(i)*sigmas(j); ++ind; } } covMatrix.Print(); TDecompChol tdc(covMatrix); bool worked = tdc.Decompose(); if ( !worked ) { std::cout << "Decomposition failed" << std::endl; return TMatrixD(); } TMatrixD matU = tdc.GetU(); return matU; // // // // cross check with random generation // // // double sum0(0.); // TVectorD sum1(n); // TMatrixDSym sum2(n); // TRandom2 rgen; // TVectorD xrnd(n); // TVectorD xrndRot(n); // for ( unsigned int i=0; i<1000000; ++i ) { // for ( unsigned int j=0; j<n; ++j ) xrnd(j) = 0.; // for ( unsigned int j=0; j<n; ++j ) { // TVectorD aux(n); // for ( int k=0; k<n; ++k ) aux(k) = matU(j,k); // xrnd += rgen.Gaus(0.,1.)*aux; // } // // xrnd *= matUT; // sum0 += 1.; // for ( unsigned int j0=0; j0<n; ++j0 ) { // sum1(j0) += xrnd(j0); // for ( unsigned int j1=0; j1<n; ++j1 ) { // sum2(j0,j1) += xrnd(j0)*xrnd(j1); // } // } // } // for ( unsigned int j0=0; j0<n; ++j0 ) { // printf("%10.3g",sum1(j0)/sum0); // } // printf(" sum1 \n"); // printf("\n"); // for ( unsigned int j0=0; j0<n; ++j0 ) { // for ( unsigned int j1=0; j1<n; ++j1 ) { // printf("%10.3g",sum2(j0,j1)/sum0); // } // printf(" sum2 \n"); // } // return matU; }
// ----------------------------------------------------------------------------- // // ----------------------------------------------------------------------------- void StatsGenODFWidget::on_loadODFTextureBtn_clicked() { QString file = angleFilePath->text(); if(true == file.isEmpty()) { return; } QFileInfo fi(angleFilePath->text()); if(fi.exists() == false) { return; } size_t count = 0; QVector<float> e1s(count); QVector<float> e2s(count); QVector<float> e3s(count); QVector<float> weights(count); QVector<float> sigmas(count); QProgressDialog progress("Loading Data ....", "Cancel", 0, 3, this); progress.setWindowModality(Qt::WindowModal); progress.setMinimumDuration(2000); if (fi.suffix().compare(Ebsd::Ang::FileExt) == 0) { progress.setValue(1); progress.setLabelText("[1/3] Reading File ..."); AngReader loader; loader.setFileName(angleFilePath->text()); int err = loader.readFile(); if(err < 0) { QMessageBox::critical(this, "Error loading the ANG file", loader.getErrorMessage(), QMessageBox::Ok); return; } float* phi1 = loader.getPhi1Pointer(); float* phi = loader.getPhiPointer(); float* phi2 = loader.getPhi2Pointer(); int xDim = loader.getXDimension(); int yDim = loader.getYDimension(); count = xDim * yDim; e1s.resize(count); e2s.resize(count); e3s.resize(count); weights.resize(count); sigmas.resize(count); for(size_t i = 0; i < count; ++i) { e1s[i] = phi1[i]; e2s[i] = phi[i]; e3s[i] = phi2[i]; weights[i] = 1.0; sigmas[i] = 0.0; } } else if (fi.suffix().compare(Ebsd::Ctf::FileExt) == 0) { progress.setValue(1); progress.setLabelText("[1/3] Reading File ..."); CtfReader loader; loader.setFileName(angleFilePath->text()); int err = loader.readFile(); if(err < 0) { QMessageBox::critical(this, "Error loading the CTF file", loader.getErrorMessage(), QMessageBox::Ok); return; } float* phi1 = loader.getEuler1Pointer(); float* phi = loader.getEuler2Pointer(); float* phi2 = loader.getEuler3Pointer(); int xDim = loader.getXDimension(); int yDim = loader.getYDimension(); count = xDim * yDim; e1s.resize(count); e2s.resize(count); e3s.resize(count); weights.resize(count); sigmas.resize(count); for(size_t i = 0; i < count; ++i) { e1s[i] = phi1[i]; e2s[i] = phi[i]; e3s[i] = phi2[i]; weights[i] = 1.0; sigmas[i] = 0.0; } } else { progress.setValue(1); progress.setLabelText("[1/3] Reading File ..."); AngleFileLoader::Pointer loader = AngleFileLoader::New(); loader->setInputFile(angleFilePath->text()); loader->setAngleRepresentation(angleRepresentation->currentIndex()); loader->setFileAnglesInDegrees(anglesInDegrees->isChecked()); loader->setOutputAnglesInDegrees(true); QString delim; int index = delimiter->currentIndex(); switch(index) { case 0: delim = " "; break; case 1: delim = "\t"; break; case 2: delim = ","; break; case 3: delim = ";"; break; default: delim = " "; } loader->setDelimiter(delim); FloatArrayType::Pointer data = loader->loadData(); if (loader->getErrorCode() < 0) { QMessageBox::critical(this, "Error Loading Angle data", loader->getErrorMessage(), QMessageBox::Ok); return; } count = data->getNumberOfTuples(); e1s.resize(count); e2s.resize(count); e3s.resize(count); weights.resize(count); sigmas.resize(count); for(size_t i = 0; i < count; ++i) { e1s[i] = data->getComponent(i, 0); e2s[i] = data->getComponent(i, 1); e3s[i] = data->getComponent(i, 2); weights[i] = data->getComponent(i, 3); sigmas[i] = data->getComponent(i, 4); } } progress.setValue(2); progress.setLabelText("[2/3] Rendering Pole Figure ..."); m_OdfBulkTableModel->removeRows(0, m_OdfBulkTableModel->rowCount()); #if 1 m_OdfBulkTableModel->blockSignals(true); m_OdfBulkTableModel->setColumnData(SGODFTableModel::Euler1, e1s); m_OdfBulkTableModel->setColumnData(SGODFTableModel::Euler2, e2s); m_OdfBulkTableModel->setColumnData(SGODFTableModel::Euler3, e3s); m_OdfBulkTableModel->setColumnData(SGODFTableModel::Weight, weights); m_OdfBulkTableModel->setColumnData(SGODFTableModel::Sigma, sigmas); m_OdfBulkTableModel->blockSignals(false); #endif on_m_CalculateODFBtn_clicked(); progress.setValue(3); }
/************************************************************************* Linear regression Variant of LRBuild which uses vector of standatd deviations (errors in function values). INPUT PARAMETERS: XY - training set, array [0..NPoints-1,0..NVars]: * NVars columns - independent variables * last column - dependent variable S - standard deviations (errors in function values) array[0..NPoints-1], S[i]>0. NPoints - training set size, NPoints>NVars+1 NVars - number of independent variables OUTPUT PARAMETERS: Info - return code: * -255, in case of unknown internal error * -4, if internal SVD subroutine haven't converged * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). * -2, if S[I]<=0 * 1, if subroutine successfully finished LM - linear model in the ALGLIB format. Use subroutines of this unit to work with the model. AR - additional results -- ALGLIB -- Copyright 02.08.2008 by Bochkanov Sergey *************************************************************************/ void lrbuilds(const ap::real_2d_array& xy, const ap::real_1d_array& s, int npoints, int nvars, int& info, linearmodel& lm, lrreport& ar) { ap::real_2d_array xyi; ap::real_1d_array x; ap::real_1d_array means; ap::real_1d_array sigmas; int i; int j; double v; int offs; double mean; double variance; double skewness; double kurtosis; // // Test parameters // if( npoints<=nvars+1||nvars<1 ) { info = -1; return; } // // Copy data, add one more column (constant term) // xyi.setbounds(0, npoints-1, 0, nvars+1); for(i = 0; i <= npoints-1; i++) { ap::vmove(&xyi(i, 0), &xy(i, 0), ap::vlen(0,nvars-1)); xyi(i,nvars) = 1; xyi(i,nvars+1) = xy(i,nvars); } // // Standartization // x.setbounds(0, npoints-1); means.setbounds(0, nvars-1); sigmas.setbounds(0, nvars-1); for(j = 0; j <= nvars-1; j++) { ap::vmove(x.getvector(0, npoints-1), xy.getcolumn(j, 0, npoints-1)); calculatemoments(x, npoints, mean, variance, skewness, kurtosis); means(j) = mean; sigmas(j) = sqrt(variance); if( ap::fp_eq(sigmas(j),0) ) { sigmas(j) = 1; } for(i = 0; i <= npoints-1; i++) { xyi(i,j) = (xyi(i,j)-means(j))/sigmas(j); } } // // Internal processing // lrinternal(xyi, s, npoints, nvars+1, info, lm, ar); if( info<0 ) { return; } // // Un-standartization // offs = ap::round(lm.w(3)); for(j = 0; j <= nvars-1; j++) { // // Constant term is updated (and its covariance too, // since it gets some variance from J-th component) // lm.w(offs+nvars) = lm.w(offs+nvars)-lm.w(offs+j)*means(j)/sigmas(j); v = means(j)/sigmas(j); ap::vsub(&ar.c(nvars, 0), &ar.c(j, 0), ap::vlen(0,nvars), v); ap::vsub(ar.c.getcolumn(nvars, 0, nvars), ar.c.getcolumn(j, 0, nvars), v); // // J-th term is updated // lm.w(offs+j) = lm.w(offs+j)/sigmas(j); v = 1/sigmas(j); ap::vmul(&ar.c(j, 0), ap::vlen(0,nvars), v); ap::vmul(ar.c.getcolumn(j, 0, nvars), v); } }
void pelt(Dataloader &dload, const CageParams& params) { // Copy parameters for convenience int start = params.dataloader_params.start; int end = params.dataloader_params.end; int step = params.dataloader_params.step; double beta = params.beta; double K = 0; bool verbose = params.dataloader_params.verbose; string output_file = params.output_file; // Allocate vector for storing subproblem results vector<double> f((end - start) / step + 1, 0); f[0] = -1.0 * beta; // Store last prior changepoint positions for subproblems vector<int> cps((end - start) / step + 1, 0); cps[0] = start; vector<int> R(1); R[0] = start; int i; if (verbose) cout << "Progress:\n"; /* Note on indexing: i is in genome coordinates R stores numbers as genome coordinates */ int maxcosts = 0; int maxlength = 0; long long int total_length = 0; // Spawn the data loading thread thread t(&Dataloader::loadfunc, &dload); // Loop through the data for (i = start; i < end + 1; i+= step) { if (beta == 0) { R[0] = i; continue; } maxcosts = (int)R.size() > maxcosts ? R.size() : maxcosts; int length_now = (i - *min_element(R.begin(), R.end())); maxlength = length_now > maxlength ? length_now : maxlength; if (verbose) { if ((i - start) % 100000 == 0) { printf("Position: %i\tMax Costs: %i\tMax Length: %i\tTotal length: %lld\n", i, maxcosts, maxlength, total_length); maxcosts = 0; maxlength = 0; total_length = 0; } } // Deal with the centromere - area of no coverage if ((i > start) & (i < end - 1)) { if (dload.get_region(i, min(i + step, end)).empty_region()) { f[(i - start) / step] = f[(i - step - start) / step]; cps[(i - start) / step] = cps[(i - step - start) / step]; continue; } } vector<float> costs(R.size()); vector<float> Fs(R.size()); for (int j = 0; j < (int)R.size(); j++) { costs[j] = cost(dload.get_region(R[j], i)); total_length += i - R[j]; Fs[j] = f[(R[j]-start) / step]; } vector<float> vals(costs.size()); for (int j = 0; j < (int)vals.size(); j++) vals[j] = costs[j] + Fs[j]; auto min_ptr = min_element(vals.begin(), vals.end()); int idx = distance(vals.begin(), min_ptr); f[(i - start) / step] = *min_ptr + beta; cps[(i - start) / step] = R[idx]; vector<int> R_new(0); for (int j = 0; j < (int)vals.size(); j++) { if (vals[j] + K <= f[(i - start) / step]) { R_new.push_back(R[j]); } } R_new.push_back(i); R = move(R_new); } i -= step; vector<int> cps_n; if (beta != 0) { int pos; cps_n.push_back(end); cps_n.push_back(i); pos = cps[(i - start) / step]; while (pos != start) { cps_n.push_back(pos); pos = cps[(pos - start) / step]; } cps_n.push_back(start); reverse(cps_n.begin(), cps_n.end()); // Take the unique elements auto it = unique(cps_n.begin(), cps_n.end()); cps_n.resize(distance(cps_n.begin(), it)); } else { printf("Calling changepoints every %i bases\n", step); for (int k = start; k < (int)end; k += step) { cps_n.push_back(k); } cps_n.push_back(end); } cout << "Num of changepoints: " << cps_n.size() << endl; // Get the parameter values vector<double> lambdas(cps_n.size()-1); vector<double> eps(cps_n.size()-1); vector<double> gammas(cps_n.size()-1); vector<double> iotas(cps_n.size()-1); vector<double> zetas(cps_n.size()-1); vector<double> mus(cps_n.size()-1); vector<double> sigmas(cps_n.size()-1); vector<int> lengths(cps_n.size()-1); int row = 0; for (auto i = cps_n.begin(); i != cps_n.end() - 1; ++i) { int left = *i; int right = *(i+1); SuffStats tot_stats = dload.get_region(left, right); lambdas[row] = (double)tot_stats.N / tot_stats.L; eps[row] = (tot_stats.tot_bases == 0) ? 0 : (double)tot_stats.tot_errors / tot_stats.tot_bases; gammas[row] = (double)tot_stats.N_SNPs / tot_stats.L; iotas[row] = (tot_stats.tot_bases == 0) ? 0 : (double)tot_stats.indels / tot_stats.tot_bases; zetas[row] = (tot_stats.N == 0) ? 0 : (double)tot_stats.zero_mapq / tot_stats.N; mus[row] = (tot_stats.n_inserts == 0) ? 0 : (double)tot_stats.inserts_sum / tot_stats.n_inserts; sigmas[row] = (tot_stats.n_inserts > 0) ? 0 : sqrt((tot_stats.inserts_sumsq - pow((long long)tot_stats.inserts_sum,2)) / (tot_stats.n_inserts - 1)); lengths[row] = right - left; row++; } if(t.joinable()) { t.join(); } FILE *f_out; if (strcmp(output_file.c_str(), "") != 0) { f_out = fopen(output_file.c_str(), "w"); if (f_out == nullptr) { throw runtime_error("Could not open file " + output_file); } for (row = 0; row < (int)cps_n.size() - 2; row++) { if ((lengths[row] != 0)) { fprintf(f_out, "%i\t%i\t%0.8f\t%0.8f\t%0.8f\t%0.8f\t%0.8f\n", cps_n[row], lengths[row], lambdas[row], eps[row], gammas[row], iotas[row], zetas[row]); } } fclose(f_out); } }