cv::PCA *PCA_LoadData(int blocks) { char path[40]; int rows, cols; sprintf(path, "./PCAeigenvectors%d.mat", blocks); FILE *fd = fopen(path, "r+"); if (!fd) { perror("Error opening file for loading\n"); return NULL; } fscanf(fd, "%d", &rows); fscanf(fd, "%d", &cols); cv::Mat eigenvectors(rows, cols, CV_64FC1); for (int j=0; j < eigenvectors.rows; j++) { for (int i=0; i < eigenvectors.cols; i++) { fscanf(fd, "%lf", &(eigenvectors.at<double>(j,i))); } } fclose(fd); sprintf(path, "./PCAmean%d.mat", blocks); fd = fopen(path, "r+"); if (!fd) { printf("blah %s\n", path); perror("Error opening file for loading huh\n"); return NULL; } fscanf(fd, "%d", &rows); fscanf(fd, "%d", &cols); cv::Mat mean(rows, cols, CV_64FC1); for (int j=0; j < mean.rows; j++) { for (int i=0; i < mean.cols; i++) { fscanf(fd, "%lf", &(mean.at<double>(j,i))); } } fclose(fd); cv::PCA *pca_obj = new cv::PCA(); pca_obj->eigenvectors = eigenvectors; pca_obj->mean = mean; return pca_obj; }
bool eigen(const Mat2& M, Vec2& evals, Vec2 evecs[2]) { bool result = eigenvalues(M, evals); if( result ) eigenvectors(M, evals, evecs); return result; }
/** Compute the natural frequencies of the structure Arguments: n - number of natural frequencies to return (unless n exceeds the total DOF of the structure, in which case all computed natural frequencies will be returned. Return: freq - a numpy array of frequencies (not necessarily of length n as described above). **/ py::tuple computeNaturalFrequenciesAndEigenvectors(int n){ Vector freqs(n); Matrix eigenvectors(0, 0); beam->computeNaturalFrequencies(n, freqs, eigenvectors); return py::make_tuple(freqs, eigenvectors); }
Vector rmvn_robust_mt(RNG &rng, const Vector &mu, const SpdMatrix &V) { uint n = V.nrow(); Matrix eigenvectors(n, n); Vector eigenvalues = eigen(V, eigenvectors); for (uint i = 0; i < n; ++i) { // We're guaranteed that eigenvalues[i] is real and non-negative. We // can take the absolute value of eigenvalues[i] to guard against // spurious negative numbers close to zero. eigenvalues[i] = sqrt(fabs(eigenvalues[i])) * rnorm_mt(rng, 0, 1); } Vector ans(eigenvectors * eigenvalues); ans += mu; return ans; }
int Zoltan_Get_Coordinates( ZZ *zz, int num_obj, /* Input: number of objects */ ZOLTAN_ID_PTR global_ids, /* Input: global IDs of objects */ ZOLTAN_ID_PTR local_ids, /* Input: local IDs of objects; may be NULL. */ int *num_dim, /* Output: dimension of coordinates */ double **coords /* Output: array of coordinates; malloc'ed by fn if NULL upon input. */ ) { char *yo = "Zoltan_Get_Coordinates"; int i,j,rc; int num_gid_entries = zz->Num_GID; int num_lid_entries = zz->Num_LID; int alloced_coords = 0; ZOLTAN_ID_PTR lid; /* Temporary pointers to local IDs; used to pass NULL to query functions when NUM_LID_ENTRIES == 0. */ double dist[3]; double im[3][3]; double deg_ratio; double x; int order[3]; int reduce_dimensions, d, axis_aligned; int target_dim; int ierr = ZOLTAN_OK; char msg[256]; ZZ_Transform *tran; ZOLTAN_TRACE_ENTER(zz, yo); /* Error check -- make sure needed callback functions are registered. */ if (zz->Get_Num_Geom == NULL || (zz->Get_Geom == NULL && zz->Get_Geom_Multi == NULL)) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Must register ZOLTAN_NUM_GEOM_FN and " "either ZOLTAN_GEOM_MULTI_FN or ZOLTAN_GEOM_FN"); goto End; } /* Get problem dimension. */ *num_dim = zz->Get_Num_Geom(zz->Get_Num_Geom_Data, &ierr); if (ierr != ZOLTAN_OK && ierr != ZOLTAN_WARN) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Error returned from ZOLTAN_GET_NUM_GEOM_FN"); goto End; } if (*num_dim < 0 || *num_dim > 3) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Invalid dimension returned from ZOLTAN_NUM_GEOM_FN"); goto End; } /* Get coordinates for object; allocate memory if not already provided. */ if (*num_dim > 0 && num_obj > 0) { if (*coords == NULL) { alloced_coords = 1; *coords = (double *) ZOLTAN_MALLOC(num_obj * (*num_dim) * sizeof(double)); if (*coords == NULL) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Memory error"); goto End; } } if (zz->Get_Geom_Multi != NULL) { zz->Get_Geom_Multi(zz->Get_Geom_Multi_Data, zz->Num_GID, zz->Num_LID, num_obj, global_ids, local_ids, *num_dim, *coords, &ierr); if (ierr != ZOLTAN_OK && ierr != ZOLTAN_WARN) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Error returned from ZOLTAN_GET_GEOM_MULTI_FN"); goto End; } } else { for (i = 0; i < num_obj; i++) { lid = (num_lid_entries ? &(local_ids[i*num_lid_entries]) : NULL); zz->Get_Geom(zz->Get_Geom_Data, num_gid_entries, num_lid_entries, global_ids + i*num_gid_entries, lid, (*coords) + i*(*num_dim), &ierr); if (ierr != ZOLTAN_OK && ierr != ZOLTAN_WARN) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Error returned from ZOLTAN_GET_GEOM_FN"); goto End; } } } } /* * For RCB, RIB, and HSFC: if REDUCE_DIMENSIONS was selected, compute the * center of mass and inertial matrix of the coordinates. * * For 3D problems: If the geometry is "flat", transform the points so the * two primary directions lie along the X and Y coordinate axes and project * to the Z=0 plane. If in addition the geometry is "skinny", project to * the X axis. (This creates a 2D or 1D problem respectively.) * * For 2D problems: If the geometry is essentially a line, transform it's * primary direction to the X axis and project to the X axis, yielding a * 1D problem. * * Return these points to the partitioning algorithm, in effect partitioning * in only the 2 (or 1) significant dimensions. */ if (((*num_dim == 3) || (*num_dim == 2)) && ((zz->LB.Method==RCB) || (zz->LB.Method==RIB) || (zz->LB.Method==HSFC))){ Zoltan_Bind_Param(Reduce_Dim_Params, "KEEP_CUTS", (void *)&i); Zoltan_Bind_Param(Reduce_Dim_Params, "REDUCE_DIMENSIONS", (void *)&reduce_dimensions); Zoltan_Bind_Param(Reduce_Dim_Params, "DEGENERATE_RATIO", (void *)°_ratio); i = 0; reduce_dimensions = 0; deg_ratio = 10.0; Zoltan_Assign_Param_Vals(zz->Params, Reduce_Dim_Params, zz->Debug_Level, zz->Proc, zz->Debug_Proc); if (reduce_dimensions == 0){ goto End; } if (deg_ratio <= 1){ if (zz->Proc == 0){ ZOLTAN_PRINT_WARN(0, yo, "DEGENERATE_RATIO <= 1, setting it to 10.0"); } deg_ratio = 10.0; } if (zz->LB.Method == RCB){ tran = &(((RCB_STRUCT *)(zz->LB.Data_Structure))->Tran); } else if (zz->LB.Method == RIB){ tran = &(((RIB_STRUCT *)(zz->LB.Data_Structure))->Tran); } else{ tran = &(((HSFC_Data*)(zz->LB.Data_Structure))->tran); } d = *num_dim; if (tran->Target_Dim >= 0){ /* * On a previous load balancing call, we determined whether * or not the geometry was degenerate. If the geometry was * determined to be not degenerate, then we assume it is still * not degenerate, and we skip the degeneracy calculation. */ if (tran->Target_Dim > 0){ /* * The geometry *was* degenerate. We test the extent * of the geometry along the principal directions determined * last time to determine if it is still degenerate with that * orientation. If so, we transform the coordinates using the * same transformation we used last time. If not, we do the * entire degeneracy calculation again. */ if ((tran->Axis_Order[0] >= 0) && (tran->Axis_Order[1] >= 0) && (tran->Axis_Order[2] >= 0)){ axis_aligned = 1; } else{ axis_aligned = 0; } projected_distances(zz, *coords, num_obj, tran->CM, tran->Evecs, dist, d, axis_aligned, tran->Axis_Order); target_dim = get_target_dimension(dist, order, deg_ratio, d); if (target_dim > 0){ transform_coordinates(*coords, num_obj, d, tran); } else{ /* Set's Target_Dim to -1, flag to recompute degeneracy */ Zoltan_Initialize_Transformation(tran); } } } if (tran->Target_Dim < 0){ tran->Target_Dim = 0; /* * Get the center of mass and inertial matrix of coordinates. Ignore * vertex weights, we are only interested in geometry. Global operation. */ if (d == 2){ inertial_matrix2D(zz, *coords, num_obj, tran->CM, im); } else{ inertial_matrix3D(zz, *coords, num_obj, tran->CM, im); } /* * The inertial matrix is a 3x3 or 2x2 real symmetric matrix. Get its * three or two orthonormal eigenvectors. These usually indicate the * orientation of the geometry. */ rc = eigenvectors(im, tran->Evecs, d); if (rc){ if (zz->Proc == 0){ ZOLTAN_PRINT_WARN(0, yo, "REDUCE_DIMENSIONS calculation failed"); } goto End; } /* * Here we check to see if the eigenvectors are very close * to the coordinate axes. If so, we can more quickly * determine whether the geometry is degenerate, and also more * quickly transform the geometry to the lower dimensional * space. */ axis_aligned = 0; for (i=0; i<d; i++){ tran->Axis_Order[i] = -1; } for (j=0; j<d; j++){ for (i=0; i<d; i++){ x = fabs(tran->Evecs[i][j]); if (NEAR_ONE(x)){ tran->Axis_Order[j] = i; /* e'vector j is very close to i axis */ break; } } if (tran->Axis_Order[j] < 0){ break; } } if ((tran->Axis_Order[0] >= 0) && (tran->Axis_Order[1] >= 0) && (tran->Axis_Order[2] >= 0)){ axis_aligned = 1; } /* * Calculate the extent of the geometry along the three lines defined * by the direction of the eigenvectors through the center of mass. */ projected_distances(zz, *coords, num_obj, tran->CM, tran->Evecs, dist, d, axis_aligned, tran->Axis_Order); /* * Decide whether these distances indicate the geometry is * very flat in one or two directions. */ target_dim = get_target_dimension(dist, order, deg_ratio, d); if (target_dim > 0){ /* * Yes, geometry is degenerate */ if ((zz->Debug_Level > 0) && (zz->Proc == 0)){ if (d == 2){ sprintf(msg, "Geometry (~%lf x %lf), exceeds %lf to 1.0 ratio", dist[order[0]], dist[order[1]], deg_ratio); } else{ sprintf(msg, "Geometry (~%lf x %lf x %lf), exceeds %lf to 1.0 ratio", dist[order[0]], dist[order[1]], dist[order[2]], deg_ratio); } ZOLTAN_PRINT_INFO(zz->Proc, yo, msg); sprintf(msg, "We'll treat it as %d dimensional",target_dim); ZOLTAN_PRINT_INFO(zz->Proc, yo, msg); } if (axis_aligned){ /* ** Create new geometry, transforming the primary direction ** to the X-axis, and the secondary to the Y-axis. */ tran->Permutation[0] = tran->Axis_Order[order[0]]; if (target_dim == 2){ tran->Permutation[1] = tran->Axis_Order[order[1]]; } } else{ /* * Reorder the eigenvectors (they're the columns of evecs) from * longest projected distance to shorted projected distance. Compute * the transpose (the inverse) of the matrix. This will transform * the geometry to align along the X-Y plane, or along the X axis. */ for (i=0; i< target_dim; i++){ tran->Transformation[i][2] = 0.0; for (j=0; j<d; j++){ tran->Transformation[i][j] = tran->Evecs[j][order[i]]; } } for (i=target_dim; i< 3; i++){ for (j=0; j<3; j++){ tran->Transformation[i][j] = 0.0; } } } tran->Target_Dim = target_dim; transform_coordinates(*coords, num_obj, d, tran); } /* If geometry is very flat */ } /* If REDUCE_DIMENSIONS is true */ } /* If 2-D or 3-D rcb, rib or hsfc */ End: if (ierr != ZOLTAN_OK && ierr != ZOLTAN_WARN) { ZOLTAN_PRINT_ERROR(zz->Proc, yo, "Error found; no coordinates returned."); if (alloced_coords) ZOLTAN_FREE(coords); } ZOLTAN_TRACE_EXIT(zz, yo); return ierr; }
static void ambHessian( /* anisotropic radii & pos. gradient */ AMBHEMI *hp, FVECT uv[2], /* returned */ float ra[2], /* returned (optional) */ float pg[2] /* returned (optional) */ ) { static char memerrmsg[] = "out of memory in ambHessian()"; FVECT (*hessrow)[3] = NULL; FVECT *gradrow = NULL; FVECT hessian[3]; FVECT gradient; FFTRI fftr; int i, j; /* be sure to assign unit vectors */ VCOPY(uv[0], hp->ux); VCOPY(uv[1], hp->uy); /* clock-wise vertex traversal from sample POV */ if (ra != NULL) { /* initialize Hessian row buffer */ hessrow = (FVECT (*)[3])malloc(sizeof(FVECT)*3*(hp->ns-1)); if (hessrow == NULL) error(SYSTEM, memerrmsg); memset(hessian, 0, sizeof(hessian)); } else if (pg == NULL) /* bogus call? */ return; if (pg != NULL) { /* initialize form factor row buffer */ gradrow = (FVECT *)malloc(sizeof(FVECT)*(hp->ns-1)); if (gradrow == NULL) error(SYSTEM, memerrmsg); memset(gradient, 0, sizeof(gradient)); } /* compute first row of edges */ for (j = 0; j < hp->ns-1; j++) { comp_fftri(&fftr, hp, AI(hp,0,j), AI(hp,0,j+1)); if (hessrow != NULL) comp_hessian(hessrow[j], &fftr, hp->rp->ron); if (gradrow != NULL) comp_gradient(gradrow[j], &fftr, hp->rp->ron); } /* sum each row of triangles */ for (i = 0; i < hp->ns-1; i++) { FVECT hesscol[3]; /* compute first vertical edge */ FVECT gradcol; comp_fftri(&fftr, hp, AI(hp,i,0), AI(hp,i+1,0)); if (hessrow != NULL) comp_hessian(hesscol, &fftr, hp->rp->ron); if (gradrow != NULL) comp_gradient(gradcol, &fftr, hp->rp->ron); for (j = 0; j < hp->ns-1; j++) { FVECT hessdia[3]; /* compute triangle contributions */ FVECT graddia; double backg; backg = back_ambval(hp, AI(hp,i,j), AI(hp,i,j+1), AI(hp,i+1,j)); /* diagonal (inner) edge */ comp_fftri(&fftr, hp, AI(hp,i,j+1), AI(hp,i+1,j)); if (hessrow != NULL) { comp_hessian(hessdia, &fftr, hp->rp->ron); rev_hessian(hesscol); add2hessian(hessian, hessrow[j], hessdia, hesscol, backg); } if (gradrow != NULL) { comp_gradient(graddia, &fftr, hp->rp->ron); rev_gradient(gradcol); add2gradient(gradient, gradrow[j], graddia, gradcol, backg); } /* initialize edge in next row */ comp_fftri(&fftr, hp, AI(hp,i+1,j+1), AI(hp,i+1,j)); if (hessrow != NULL) comp_hessian(hessrow[j], &fftr, hp->rp->ron); if (gradrow != NULL) comp_gradient(gradrow[j], &fftr, hp->rp->ron); /* new column edge & paired triangle */ backg = back_ambval(hp, AI(hp,i+1,j+1), AI(hp,i+1,j), AI(hp,i,j+1)); comp_fftri(&fftr, hp, AI(hp,i,j+1), AI(hp,i+1,j+1)); if (hessrow != NULL) { comp_hessian(hesscol, &fftr, hp->rp->ron); rev_hessian(hessdia); add2hessian(hessian, hessrow[j], hessdia, hesscol, backg); if (i < hp->ns-2) rev_hessian(hessrow[j]); } if (gradrow != NULL) { comp_gradient(gradcol, &fftr, hp->rp->ron); rev_gradient(graddia); add2gradient(gradient, gradrow[j], graddia, gradcol, backg); if (i < hp->ns-2) rev_gradient(gradrow[j]); } } } /* release row buffers */ if (hessrow != NULL) free(hessrow); if (gradrow != NULL) free(gradrow); if (ra != NULL) /* extract eigenvectors & radii */ eigenvectors(uv, ra, hessian); if (pg != NULL) { /* tangential position gradient */ pg[0] = DOT(gradient, uv[0]); pg[1] = DOT(gradient, uv[1]); } }
void pclbo::LBOEstimation<PointT, NormalT>::compute() { typename pcl::KdTreeFLANN<PointT>::Ptr kdt(new pcl::KdTreeFLANN<PointT>()); kdt->setInputCloud(_cloud); const double avg_dist = pclbo::avg_distance<PointT>(10, _cloud, kdt); const double h = 5 * avg_dist; std::cout << "Average distance between points: " << avg_dist << std::endl; int points_with_mass = 0; double avg_mass = 0.0; B.resize(_cloud->size()); std::cout << "Computing the Mass matrix..." << std::flush; // Compute the mass matrix diagonal B for (int i = 0; i < _cloud->size(); i++) { const auto& point = _cloud->at(i); const auto& normal = _normals->at(i); const auto& normal_vector = normal.getNormalVector3fMap().template cast<double>(); if (!pcl::isFinite(point)) continue; std::vector<int> indices; std::vector<float> distances; kdt->radiusSearch(point, h, indices, distances); if (indices.size() < 4) { B[i] = 0.0; continue; } // Project the neighbor points in the tangent plane at p_i with normal n_i std::vector<Eigen::Vector3d> projected_points; for (const auto& neighbor_index : indices) { if (neighbor_index != i) { const auto& neighbor_point = _cloud->at(neighbor_index); projected_points.push_back(project(point, normal, neighbor_point)); } } assert(projected_points.size() >= 3); // Use the first vector to create a 2D basis Eigen::Vector3d u = projected_points[0]; u.normalize(); Eigen::Vector3d v = (u.cross(normal_vector)); v.normalize(); // Add the points to a 2D plane std::vector<Eigen::Vector2d> plane; // Add the point at the center plane.push_back(Eigen::Vector2d::Zero()); // Add the rest of the points for (const auto& projected : projected_points) { double x = projected.dot(u); double y = projected.dot(v); // Add the 2D point to the vector plane.push_back(Eigen::Vector2d(x, y)); } assert(plane.size() >= 4); // Compute the voronoi cell area of the point double area = VoronoiDiagram::area(plane); B[i] = area; avg_mass += area; points_with_mass++; } // Average mass if (points_with_mass > 0) { avg_mass /= static_cast<double>(points_with_mass); } // Set border points to have average mass for (auto& b : B) { if (b == 0.0) { b = avg_mass; } } std::cout << "done" << std::endl; std::cout << "Computing the stiffness matrix..." << std::flush; std::vector<double> diag(_cloud->size(), 0.0); // Compute the stiffness matrix Q for (int i = 0; i < _cloud->size(); i++) { const auto& point = _cloud->at(i); if (!pcl::isFinite(point)) continue; std::vector<int> indices; std::vector<float> distances; kdt->radiusSearch(point, h, indices, distances); for (const auto& j : indices) { if (j != i) { const auto& neighbor = _cloud->at(j); double d = (neighbor.getVector3fMap() - point.getVector3fMap()).norm(); double w = B[i] * B[j] * (1.0 / (4.0 * M_PI * h * h)) * exp(-(d * d) / (4.0 * h)); I.push_back(i); J.push_back(j); S.push_back(w); diag[i] += w; } } } // Fill the diagonal as the negative sum of the rows for (int i = 0; i < diag.size(); i++) { I.push_back(i); J.push_back(i); S.push_back(-diag[i]); } // Compute the B^{-1}Q matrix Eigen::MatrixXd Q = Eigen::MatrixXd::Zero(_cloud->size(), _cloud->size()); for (int i = 0; i < I.size(); i++) { const int row = I[i]; const int col = J[i]; Q(row, col) = S[i]; } std::cout << "done" << std::endl; std::cout << "Computing eigenvectors" << std::endl; Eigen::Map<Eigen::VectorXd> B_vec(B.data(), B.size()); Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> ges; ges.compute(Q, B_vec.asDiagonal()); eigenvalues = ges.eigenvalues(); eigenfunctions = ges.eigenvectors(); // Sort the eigenvalues by magnitude std::vector<std::pair<double, int> > map_vector(eigenvalues.size()); for (auto i = 0; i < eigenvalues.size(); i++) { map_vector[i].first = std::abs(eigenvalues(i)); map_vector[i].second = i; } std::sort(map_vector.begin(), map_vector.end()); // truncate the first 100 eigenfunctions Eigen::MatrixXd eigenvectors(eigenfunctions.rows(), eigenfunctions.cols()); Eigen::VectorXd eigenvals(eigenfunctions.cols()); eigenvalues.resize(map_vector.size()); for (auto i = 0; i < map_vector.size(); i++) { const auto& pair = map_vector[i]; eigenvectors.col(i) = eigenfunctions.col(pair.second); eigenvals(i) = pair.first; } eigenfunctions = eigenvectors; eigenvalues = eigenvals; }
/** * Description not yet available. * \param */ void laplace_approximation_calculator:: do_newton_raphson_banded(function_minimizer * pfmin,double f_from_1, int& no_converge_flag) { //quadratic_prior * tmpptr=quadratic_prior::ptr[0]; //cout << tmpptr << endl; laplace_approximation_calculator::where_are_we_flag=2; double maxg=fabs(evaluate_function(uhat,pfmin)); laplace_approximation_calculator::where_are_we_flag=0; dvector uhat_old(1,usize); for(int ii=1;ii<=num_nr_iters;ii++) { // test newton raphson switch(hesstype) { case 3: bHess->initialize(); break; case 4: Hess.initialize(); break; default: cerr << "Illegal value for hesstype here" << endl; ad_exit(1); } grad.initialize(); //int check=initial_params::stddev_scale(scale,uhat); //check=initial_params::stddev_curvscale(curv,uhat); //max_separable_g=0.0; sparse_count = 0; step=get_newton_raphson_info_banded(pfmin); //if (bHess) // cout << "norm(*bHess) = " << norm(*bHess) << endl; //cout << "norm(Hess) = " << norm(Hess) << endl; //cout << grad << endl; //check_pool_depths(); if (!initial_params::mc_phase) cout << "Newton raphson " << ii << " "; if (quadratic_prior::get_num_quadratic_prior()>0) { quadratic_prior::get_cHessian_contribution(Hess,xsize); quadratic_prior::get_cgradient_contribution(grad,xsize); } int ierr=0; if (hesstype==3) { if (use_dd_nr==0) { banded_lower_triangular_dmatrix bltd=choleski_decomp(*bHess,ierr); if (ierr && no_converge_flag ==0) { no_converge_flag=1; //break; } if (ierr) { double oldval; evaluate_function(oldval,uhat,pfmin); uhat=banded_calculations_trust_region_approach(uhat,pfmin); } else { if (dd_nr_flag==0) { dvector v=solve(bltd,grad); step=-solve_trans(bltd,v); //uhat_old=uhat; uhat+=step; } else { #if defined(USE_DD_STUFF) int n=grad.indexmax(); maxg=fabs(evaluate_function(uhat,pfmin)); uhat=dd_newton_raphson2(grad,*bHess,uhat); #else cerr << "high precision Newton Raphson not implemented" << endl; ad_exit(1); #endif } maxg=fabs(evaluate_function(uhat,pfmin)); if (f_from_1< pfmin->lapprox->fmc1.fbest) { uhat=banded_calculations_trust_region_approach(uhat,pfmin); maxg=fabs(evaluate_function(uhat,pfmin)); } } } else { cout << "error not used" << endl; ad_exit(1); /* banded_symmetric_ddmatrix bHessdd=banded_symmetric_ddmatrix(*bHess); ddvector gradd=ddvector(grad); //banded_lower_triangular_ddmatrix bltdd=choleski_decomp(bHessdd,ierr); if (ierr && no_converge_flag ==0) { no_converge_flag=1; break; } if (ierr) { double oldval; evaluate_function(oldval,uhat,pfmin); uhat=banded_calculations_trust_region_approach(uhat,pfmin); maxg=fabs(evaluate_function(uhat,pfmin)); } else { ddvector v=solve(bHessdd,gradd); step=-make_dvector(v); //uhat_old=uhat; uhat=make_dvector(ddvector(uhat)+step); maxg=fabs(evaluate_function(uhat,pfmin)); if (f_from_1< pfmin->lapprox->fmc1.fbest) { uhat=banded_calculations_trust_region_approach(uhat,pfmin); maxg=fabs(evaluate_function(uhat,pfmin)); } } */ } if (maxg < 1.e-13) { break; } } else if (hesstype==4) { dvector step; # if defined(USE_ATLAS) if (!ad_comm::no_atlas_flag) { step=-atlas_solve_spd(Hess,grad,ierr); } else { dmatrix A=choleski_decomp_positive(Hess,ierr); if (!ierr) { step=-solve(Hess,grad); //step=-solve(A*trans(A),grad); } } if (!ierr) break; # else if (sparse_hessian_flag) { //step=-solve(*sparse_triplet,Hess,grad,*sparse_symbolic); dvector temp=solve(*sparse_triplet2,grad,*sparse_symbolic2,ierr); if (ierr) { step=-temp; } else { cerr << "matrix not pos definite in sparse choleski" << endl; pfmin->bad_step_flag=1; int on; int nopt; if ((on=option_match(ad_comm::argc,ad_comm::argv,"-ieigvec",nopt)) >-1) { dmatrix M=make_dmatrix(*sparse_triplet2); ofstream ofs3("inner-eigvectors"); ofs3 << "eigenvalues and eigenvectors " << endl; dvector v=eigenvalues(M); dmatrix ev=trans(eigenvectors(M)); ofs3 << "eigenvectors" << endl; int i; for (i=1;i<=ev.indexmax();i++) { ofs3 << setw(4) << i << " " << setshowpoint() << setw(14) << setprecision(10) << v(i) << " " << setshowpoint() << setw(14) << setprecision(10) << ev(i) << endl; } } } //cout << norm2(step-tmpstep) << endl; //dvector step1=-solve(Hess,grad); //cout << norm2(step-step1) << endl; } else { step=-solve(Hess,grad); } # endif if (pmin->bad_step_flag) break; uhat_old=uhat; uhat+=step; double maxg_old=maxg; maxg=fabs(evaluate_function(uhat,pfmin)); if (maxg>maxg_old) { uhat=uhat_old; evaluate_function(uhat,pfmin); break; } if (maxg < 1.e-13) { break; } } if (sparse_hessian_flag==0) { for (int i=1;i<=usize;i++) { y(i+xsize)=uhat(i); } } else { for (int i=1;i<=usize;i++) { value(y(i+xsize))=uhat(i); } } } }
/** Symmetrize and invert the hessian */ void function_minimizer::hess_inv(void) { initial_params::set_inactive_only_random_effects(); int nvar=initial_params::nvarcalc(); // get the number of active parameters independent_variables x(1,nvar); initial_params::xinit(x); // get the initial values into the x vector //double f; dmatrix hess(1,nvar,1,nvar); uistream ifs("admodel.hes"); int file_nvar = 0; ifs >> file_nvar; if (nvar != file_nvar) { cerr << "Number of active variables in file mod_hess.rpt is wrong" << endl; } for (int i = 1;i <= nvar; i++) { ifs >> hess(i); if (!ifs) { cerr << "Error reading line " << i << " of the hessian" << " in routine hess_inv()" << endl; exit(1); } } int hybflag = 0; ifs >> hybflag; dvector sscale(1,nvar); ifs >> sscale; if (!ifs) { cerr << "Error reading sscale" << " in routine hess_inv()" << endl; } double maxerr=0.0; for (int i = 1;i <= nvar; i++) { for (int j=1;j<i;j++) { double tmp=(hess(i,j)+hess(j,i))/2.; double tmp1=fabs(hess(i,j)-hess(j,i)); tmp1/=(1.e-4+fabs(hess(i,j))+fabs(hess(j,i))); if (tmp1>maxerr) maxerr=tmp1; hess(i,j)=tmp; hess(j,i)=tmp; } } /* if (maxerr>1.e-2) { cerr << "warning -- hessian aprroximation is poor" << endl; } */ for (int i = 1;i <= nvar; i++) { int zero_switch=0; for (int j=1;j<=nvar;j++) { if (hess(i,j)!=0.0) { zero_switch=1; } } if (!zero_switch) { cerr << " Hessian is 0 in row " << i << endl; cerr << " This means that the derivative if probably identically 0 " " for this parameter" << endl; } } int ssggnn; ln_det(hess,ssggnn); int on1=0; { ofstream ofs3((char*)(ad_comm::adprogram_name + adstring(".eva"))); { dvector se=eigenvalues(hess); ofs3 << setshowpoint() << setw(14) << setprecision(10) << "unsorted:\t" << se << endl; se=sort(se); ofs3 << setshowpoint() << setw(14) << setprecision(10) << "sorted:\t" << se << endl; if (se(se.indexmin())<=0.0) { negative_eigenvalue_flag=1; cout << "Warning -- Hessian does not appear to be" " positive definite" << endl; } } ivector negflags(0,hess.indexmax()); int num_negflags=0; { int on = option_match(ad_comm::argc,ad_comm::argv,"-eigvec"); on1=option_match(ad_comm::argc,ad_comm::argv,"-spmin"); if (on > -1 || on1 >-1 ) { ofs3 << setshowpoint() << setw(14) << setprecision(10) << eigenvalues(hess) << endl; dmatrix ev=trans(eigenvectors(hess)); ofs3 << setshowpoint() << setw(14) << setprecision(10) << ev << endl; for (int i=1;i<=ev.indexmax();i++) { double lam=ev(i)*hess*ev(i); ofs3 << setshowpoint() << setw(14) << setprecision(10) << lam << " " << ev(i)*ev(i) << endl; if (lam<0.0) { num_negflags++; negflags(num_negflags)=i; } } if ( (on1>-1) && (num_negflags>0)) // we will try to get away from { // saddle point negative_eigenvalue_flag=0; spminflag=1; if(negdirections) { delete negdirections; } negdirections = new dmatrix(1,num_negflags); for (int i=1;i<=num_negflags;i++) { (*negdirections)(i)=ev(negflags(i)); } } int on2 = option_match(ad_comm::argc,ad_comm::argv,"-cross"); if (on2>-1) { // saddle point dmatrix cross(1,ev.indexmax(),1,ev.indexmax()); for (int i = 1;i <= ev.indexmax(); i++) { for (int j=1;j<=ev.indexmax();j++) { cross(i,j)=ev(i)*ev(j); } } ofs3 << endl << " e(i)*e(j) "; ofs3 << endl << cross << endl; } } } if (spminflag==0) { if (num_negflags==0) { hess=inv(hess); int on=0; if ( (on=option_match(ad_comm::argc,ad_comm::argv,"-eigvec"))>-1) { int i; ofs3 << "choleski decomp of correlation" << endl; dmatrix ch=choleski_decomp(hess); for (i=1;i<=ch.indexmax();i++) ofs3 << ch(i)/norm(ch(i)) << endl; ofs3 << "parameterization of choleski decomnp of correlation" << endl; for (i=1;i<=ch.indexmax();i++) { dvector tmp=ch(i)/norm(ch(i)); ofs3 << tmp(1,i)/tmp(i) << endl; } } } } } if (spminflag==0) { if (on1<0) { for (int i = 1;i <= nvar; i++) { if (hess(i,i) <= 0.0) { hess_errorreport(); ad_exit(1); } } } { adstring tmpstring="admodel.cov"; if (ad_comm::wd_flag) tmpstring = ad_comm::adprogram_name + ".cov"; uostream ofs((char*)tmpstring); ofs << nvar << hess; ofs << gradient_structure::Hybrid_bounded_flag; ofs << sscale; } } }
/// Execute algorithm. void Schrodinger1D::exec() { double startX = get("StartX"); double endX = get("EndX"); if (endX <= startX) { throw std::invalid_argument("StartX must be <= EndX"); } IFunction_sptr VPot = getClass("VPot"); chebfun vpot( 0, startX, endX ); vpot.bestFit( *VPot ); size_t nBasis = vpot.n() + 1; std::cerr << "n=" << nBasis << std::endl; //if (n < 3) { nBasis = 200; vpot.resize( nBasis ); } const double beta = get("Beta"); auto kinet = new ChebCompositeOperator; kinet->addRight( new ChebTimes(-beta) ); kinet->addRight( new ChebDiff2 ); auto hamiltonian = new ChebPlus; hamiltonian->add('+', kinet ); hamiltonian->add('+', new ChebTimes(VPot) ); GSLMatrix L; hamiltonian->createMatrix( vpot.getBase(), L ); GSLVector d; GSLMatrix v; L.diag( d, v ); std::vector<double> norms = vpot.baseNorm(); assert(norms.size() == L.size1()); assert(norms.size() == L.size2()); for(size_t i = 0; i < norms.size(); ++i) { double factor = 1.0 / norms[i]; for(size_t j = i; j < norms.size(); ++j) { v.multiplyBy(i,j,factor); } } // eigenvectors orthogonality check // GSLMatrix v1 = v; // GSLMatrix tst; // tst = Tr(v1) * v; // std::cerr << tst << std::endl; std::vector<size_t> indx(L.size1()); getSortedIndex( d, indx ); auto eigenvalues = API::TableWorkspace_ptr(dynamic_cast<API::TableWorkspace*>( API::WorkspaceFactory::instance().create("TableWorkspace")) ); eigenvalues->setRowCount(nBasis); setProperty("Eigenvalues", eigenvalues); eigenvalues->addColumn("double","N"); auto nColumn = static_cast<API::TableColumn<double>*>(eigenvalues->getColumn("N").get()); nColumn->asNumeric()->setPlotRole(API::NumericColumn::X); auto& nc = nColumn->data(); eigenvalues->addDoubleColumn("Energy"); auto eColumn = static_cast<API::TableColumn<double>*>(eigenvalues->getColumn("Energy").get()); eColumn->asNumeric()->setPlotRole(API::NumericColumn::Y); auto& ec = eigenvalues->getDoubleData("Energy"); boost::scoped_ptr<ChebfunVector> eigenvectors(new ChebfunVector); chebfun fun0(nBasis,startX,endX); ChebFunction_sptr theSum(new ChebFunction(fun0)); // collect indices of spurious eigenvalues to move them to the back std::vector<size_t> spurious; // index for good eigenvalues size_t n = 0; for(size_t j = 0; j < nBasis; ++j) { size_t i = indx[j]; chebfun fun(fun0); fun.setP(v,i); // check eigenvalues for spurious ones chebfun dfun(fun); dfun.square(); double norm = dfun.integr(); // I am not sure that it's a solid condition if ( norm < 0.999999 ) { // bad eigenvalue spurious.push_back(j); } else { nc[n] = double(n); ec[n] = d[i]; eigenvectors->add(ChebFunction_sptr(new ChebFunction(fun))); // test sum of functions squares *theSum += dfun; // chebfun dfun(fun); // hamiltonian->apply(fun,dfun); // dfun *= fun; // std::cerr << "ener["<<n<<"]=" << ec[n] << ' ' << norm << ' ' << dfun.integr() << std::endl; ++n; } } GSLVector eigv; ChebfunVector *eigf = NULL; improve(hamiltonian, eigenvectors.get(), eigv, &eigf); eigenvalues->setRowCount( eigv.size() ); for(size_t i = 0; i < eigv.size(); ++i) { nc[i] = double(i); ec[i] = eigv[i]; } eigf->add(theSum); setProperty("Eigenvectors",ChebfunVector_sptr(eigf)); //makeQuadrature(eigf); }
// This is the model equation for the timeframe RANSAC // B.K.P. Horn's closed form Absolute Orientation method (1987 paper) // The convention used here is: right = (1/scale) * RMat * left + TMat int Photogrammetry::absoluteOrientation(vector<cv::Point3d> & left, vector<cv::Point3d> & right, cv::Mat & RMat, cv::Mat & TMat, double & scale) { //check if both vectors have the same number of size if (left.size() != right.size()) { cerr << "Sizes don't match" << endl; return -1; } //compute the mean of the left and right set of points cv::Point3d leftmean, rightmean; leftmean.x = 0; leftmean.y = 0; leftmean.z = 0; rightmean.x = 0; rightmean.y = 0; rightmean.z = 0; for (int i = 0; i < left.size(); i++) { leftmean.x += left[i].x; leftmean.y += left[i].y; leftmean.z += left[i].z; rightmean.x += right[i].x; rightmean.y += right[i].y; rightmean.z += right[i].z; } leftmean.x /= left.size(); leftmean.y /= left.size(); leftmean.z /= left.size(); rightmean.x /= right.size(); rightmean.y /= right.size(); rightmean.z /= right.size(); cv::Mat leftmeanMat(3,1,CV_64F); cv::Mat rightmeanMat(3,1,CV_64F); leftmeanMat.at<double>(0,0) = leftmean.x; leftmeanMat.at<double>(0,1) = leftmean.y; leftmeanMat.at<double>(0,2) = leftmean.z; rightmeanMat.at<double>(0,0) = rightmean.x; rightmeanMat.at<double>(0,1) = rightmean.y; rightmeanMat.at<double>(0,2) = rightmean.z; //normalize all points for (int i = 0; i < left.size(); i++) { left[i].x -= leftmean.x; left[i].y -= leftmean.y; left[i].z -= leftmean.z; right[i].x -= rightmean.x; right[i].y -= rightmean.y; right[i].z -= rightmean.z; } //compute scale (use the symmetrical solution) double Sl = 0; double Sr = 0; // this is the symmetrical version of the scale ! for (int i = 0; i < left.size(); i++) { Sl += left[i].x*left[i].x + left[i].y*left[i].y + left[i].z*left[i].z; Sr += right[i].x*right[i].x + right[i].y*right[i].y + right[i].z*right[i].z; } scale = sqrt(Sr/Sl); // cout << "Scale: " << scale << endl; //create M matrix double M[3][3];// = {0.0}; /* // I believe this is wrong, since not summing over all left right elements, just for the last element ! KM Nov 21 for (int i = 0; i < left.size(); i++) { M[0][0] = left[i].x*right[i].x; M[0][1] = left[i].x*right[i].y; M[0][2] = left[i].x*right[i].z; M[1][0] = left[i].y*right[i].x; M[1][1] = left[i].y*right[i].y; M[1][2] = left[i].y*right[i].z; M[2][0] = left[i].z*right[i].x; M[2][1] = left[i].z*right[i].y; M[2][2] = left[i].z*right[i].z; } */ M[0][0] = 0; M[0][1] = 0; M[0][2] = 0; M[1][0] = 0; M[1][1] = 0; M[1][2] = 0; M[2][0] = 0; M[2][1] = 0; M[2][2] = 0; for (int i = 0; i < left.size(); i++) { M[0][0] += left[i].x*right[i].x; M[0][1] += left[i].x*right[i].y; M[0][2] += left[i].x*right[i].z; M[1][0] += left[i].y*right[i].x; M[1][1] += left[i].y*right[i].y; M[1][2] += left[i].y*right[i].z; M[2][0] += left[i].z*right[i].x; M[2][1] += left[i].z*right[i].y; M[2][2] += left[i].z*right[i].z; } //create N matrix cv::Mat N = cv::Mat::zeros(4,4,CV_64F); N.at<double>(0,0) = M[0][0] + M[1][1] + M[2][2]; N.at<double>(0,1) = M[1][2] - M[2][1]; N.at<double>(0,2) = M[2][0] - M[0][2]; N.at<double>(0,3) = M[0][1] - M[1][0]; N.at<double>(1,0) = M[1][2] - M[2][1]; N.at<double>(1,1) = M[0][0] - M[1][1] - M[2][2]; N.at<double>(1,2) = M[0][1] + M[1][0]; N.at<double>(1,3) = M[2][0] + M[0][2]; N.at<double>(2,0) = M[2][0] - M[0][2]; N.at<double>(2,1) = M[0][1] + M[1][0]; N.at<double>(2,2) = -M[0][0] + M[1][1] - M[2][2]; N.at<double>(2,3) = M[1][2] + M[2][1]; N.at<double>(3,0) = M[0][1] - M[1][0]; N.at<double>(3,1) = M[2][0] + M[0][2]; N.at<double>(3,2) = M[1][2] + M[2][1]; N.at<double>(3,3) = -M[0][0] - M[1][1] + M[2][2]; // cout << "N: " << N << endl; //compute eigenvalues cv::Mat eigenvalues(1,4,CV_64FC1); cv::Mat eigenvectors(4,4,CV_64FC1); // cout << "eigenvalues: \n" << eigenvalues << endl; if (!cv::eigen(N, eigenvalues, eigenvectors)) { cerr << "eigen failed" << endl; return -1; } // cout << "Eigenvalues:\n" << eigenvalues << endl; // cout << "Eigenvectors:\n" << eigenvectors << endl; //compute quaterion as maximum eigenvector double q[4]; q[0] = eigenvectors.at<double>(0,0); q[1] = eigenvectors.at<double>(0,1); q[2] = eigenvectors.at<double>(0,2); q[3] = eigenvectors.at<double>(0,3); /* // I believe this changed with the openCV implementation, eigenvectors are stored in row-order ! q[0] = eigenvectors.at<double>(0,0); q[1] = eigenvectors.at<double>(1,0); q[2] = eigenvectors.at<double>(2,0); q[3] = eigenvectors.at<double>(3,0); */ double absOfEigVec = sqrt(q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3]); q[0] /= absOfEigVec; q[1] /= absOfEigVec; q[2] /= absOfEigVec; q[3] /= absOfEigVec; cv::Mat qMat(4,1,CV_64F,q); // cout << "q: " << qMat << endl; //compute Rotation matrix RMat.at<double>(0,0) = q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3]; RMat.at<double>(0,1) = 2*(q[1]*q[2] - q[0]*q[3]); RMat.at<double>(0,2) = 2*(q[1]*q[3] + q[0]*q[2]); RMat.at<double>(1,0) = 2*(q[2]*q[1] + q[0]*q[3]); RMat.at<double>(1,1) = q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3]; RMat.at<double>(1,2) = 2*(q[2]*q[3] - q[0]*q[1]); RMat.at<double>(2,0) = 2*(q[3]*q[1] - q[0]*q[2]); RMat.at<double>(2,1) = 2*(q[2]*q[3] + q[0]*q[1]); RMat.at<double>(2,2) = q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]; // cout <<"R:\n" << RMat << endl; // cout << "Det: " << determinant(RMat) << endl; //find translation cv::Mat tempMat(3,1,CV_64F); //gemm(RMat, leftmeanMat, -1.0, rightmeanMat, 1.0, TMat); // enforcing scale of 1, since same scales in both frames // The convention used here is: right = (1/scale) * RMat * left + TMat TMat = -(1/scale) * RMat*leftmeanMat + rightmeanMat; // gemm(RMat, leftmeanMat, -1.0 * scale, rightmeanMat, 1.0, TMat); // cout << "Translation: " << TMat << endl; return 0; }
bool PivotCalibration2::ComputeSpinCalibration(bool snapRotation) { if (this->ToolToReferenceMatrices.size() < 10) { this->ErrorText = "Not enough input transforms are available"; return false; } if (this->GetMaximumToolOrientationDifferenceDeg() < this->MinimumOrientationDifferenceDeg) { this->ErrorText = "Not enough variation in the input transforms"; return false; } // Setup our system to find the axis of rotation unsigned int rows = 3, columns = 3; vnl_matrix<double> A(rows, columns, 0); vnl_matrix<double> I(3, 3, 0); I.set_identity(); vnl_matrix<double> RI(rows, columns); // this will store the maximum difference in orientation between the first transform and all the other transforms double maximumOrientationDifferenceDeg = 0; std::vector< vtkSmartPointer<vtkMatrix4x4> >::const_iterator previt = this->ToolToReferenceMatrices.end(); for (std::vector< vtkSmartPointer<vtkMatrix4x4> >::const_iterator it = this->ToolToReferenceMatrices.begin(); it != this->ToolToReferenceMatrices.end(); it++) { if (previt == this->ToolToReferenceMatrices.end()) { previt = it; continue; // No comparison to make for the first matrix } vtkSmartPointer< vtkMatrix4x4 > itinverse = vtkSmartPointer< vtkMatrix4x4 >::New(); vtkMatrix4x4::Invert((*it), itinverse); vtkSmartPointer< vtkMatrix4x4 > instRotation = vtkSmartPointer< vtkMatrix4x4 >::New(); vtkMatrix4x4::Multiply4x4(itinverse, (*previt), instRotation); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { RI(i, j) = instRotation->GetElement(i, j); } } RI = RI - I; A = A + RI.transpose() * RI; previt = it; } // Note: If the needle orientation protocol changes, only the definitions of shaftAxis and secondaryAxes need to be changed // Define the shaft axis and the secondary shaft axis // Current needle orientation protocol dictates: shaft axis -z, orthogonal axis +x // If StylusX is parallel to ShaftAxis then: shaft axis -z, orthogonal axis +y vnl_vector<double> shaftAxis_Shaft(columns, 0); shaftAxis_Shaft(0) = 0; shaftAxis_Shaft(1) = 0; shaftAxis_Shaft(2) = -1; vnl_vector<double> orthogonalAxis_Shaft(columns, 0); orthogonalAxis_Shaft(0) = 1; orthogonalAxis_Shaft(1) = 0; orthogonalAxis_Shaft(2) = 0; vnl_vector<double> backupAxis_Shaft(columns, 0); backupAxis_Shaft(0) = 0; backupAxis_Shaft(1) = 1; backupAxis_Shaft(2) = 0; // Find the eigenvector associated with the smallest eigenvalue // This is the best axis of rotation over all instantaneous rotations vnl_matrix<double> eigenvectors(columns, columns, 0); vnl_vector<double> eigenvalues(columns, 0); vnl_symmetric_eigensystem_compute(A, eigenvectors, eigenvalues); // Note: eigenvectors are ordered in increasing eigenvalue ( 0 = smallest, end = biggest ) vnl_vector<double> shaftAxis_ToolTip(columns, 0); shaftAxis_ToolTip(0) = eigenvectors(0, 0); shaftAxis_ToolTip(1) = eigenvectors(1, 0); shaftAxis_ToolTip(2) = eigenvectors(2, 0); shaftAxis_ToolTip.normalize(); // Snap the direction vector to be exactly aligned with one of the coordinate axes // This is if the sensor is known to be parallel to one of the axis, just not which one if (snapRotation) { int closestCoordinateAxis = element_product(shaftAxis_ToolTip, shaftAxis_ToolTip).arg_max(); shaftAxis_ToolTip.fill(0); shaftAxis_ToolTip.put(closestCoordinateAxis, 1); // Doesn't matter the direction, will be sorted out in the next step } // Make sure it is in the correct direction (opposite the StylusTipToStylus translation) vnl_vector<double> toolTipToToolTranslation(3); toolTipToToolTranslation(0) = this->ToolTipToToolMatrix->GetElement(0, 3); toolTipToToolTranslation(1) = this->ToolTipToToolMatrix->GetElement(1, 3); toolTipToToolTranslation(2) = this->ToolTipToToolMatrix->GetElement(2, 3); if (dot_product(shaftAxis_ToolTip, toolTipToToolTranslation) > 0) { shaftAxis_ToolTip = shaftAxis_ToolTip * (-1); } //set the RMSE this->SpinRMSE = (A * shaftAxis_ToolTip).rms(); // If the secondary axis 1 is parallel to the shaft axis in the tooltip frame, then use secondary axis 2 vnl_vector<double> orthogonalAxis_ToolTip; double angle = acos(dot_product(shaftAxis_ToolTip, orthogonalAxis_Shaft)); // Force angle to be between -pi/2 and +pi/2 if (angle > vtkMath::Pi() / 2) { angle -= vtkMath::Pi(); } if (angle < -vtkMath::Pi() / 2) { angle += vtkMath::Pi(); } if (fabs(angle) > vtkMath::RadiansFromDegrees(PARALLEL_ANGLE_THRESHOLD_DEGREES)) // If shaft axis and orthogonal axis are not parallel { orthogonalAxis_ToolTip = orthogonalAxis_Shaft; } else { orthogonalAxis_ToolTip = backupAxis_Shaft; } // Do the registration find the appropriate rotation orthogonalAxis_ToolTip = orthogonalAxis_ToolTip - dot_product(orthogonalAxis_ToolTip, shaftAxis_ToolTip) * shaftAxis_ToolTip; orthogonalAxis_ToolTip.normalize(); // Register X,Y,O points in the two coordinate frames (only spherical registration - since pure rotation) vnl_matrix<double> ToolTipPoints(3, 3, 0.0); vnl_matrix<double> ShaftPoints(3, 3, 0.0); ToolTipPoints.put(0, 0, shaftAxis_ToolTip(0)); ToolTipPoints.put(0, 1, shaftAxis_ToolTip(1)); ToolTipPoints.put(0, 2, shaftAxis_ToolTip(2)); ToolTipPoints.put(1, 0, orthogonalAxis_ToolTip(0)); ToolTipPoints.put(1, 1, orthogonalAxis_ToolTip(1)); ToolTipPoints.put(1, 2, orthogonalAxis_ToolTip(2)); ToolTipPoints.put(2, 0, 0); ToolTipPoints.put(2, 1, 0); ToolTipPoints.put(2, 2, 0); ShaftPoints.put(0, 0, shaftAxis_Shaft(0)); ShaftPoints.put(0, 1, shaftAxis_Shaft(1)); ShaftPoints.put(0, 2, shaftAxis_Shaft(2)); ShaftPoints.put(1, 0, orthogonalAxis_Shaft(0)); ShaftPoints.put(1, 1, orthogonalAxis_Shaft(1)); ShaftPoints.put(1, 2, orthogonalAxis_Shaft(2)); ShaftPoints.put(2, 0, 0); ShaftPoints.put(2, 1, 0); ShaftPoints.put(2, 2, 0); vnl_svd<double> ShaftToToolTipRegistrator(ShaftPoints.transpose() * ToolTipPoints); vnl_matrix<double> V = ShaftToToolTipRegistrator.V(); vnl_matrix<double> U = ShaftToToolTipRegistrator.U(); vnl_matrix<double> Rotation = V * U.transpose(); // Make sure the determinant is positve (i.e. +1) double determinant = vnl_determinant(Rotation); if (determinant < 0) { // Switch the sign of the third column of V if the determinant is not +1 // This is the recommended approach from Huang et al. 1987 V.put(0, 2, -V.get(0, 2)); V.put(1, 2, -V.get(1, 2)); V.put(2, 2, -V.get(2, 2)); Rotation = V * U.transpose(); } // Set the elements of the output matrix for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { this->ToolTipToToolMatrix->SetElement(i, j, Rotation[i][j]); } } this->ErrorText.empty(); return true; }
void CCA_logit(bool perm, vector<vector<int> > & blperm, Set & S, Plink & P) { /////////////// // Output results ofstream EPI; if (!perm) { string f = par::output_file_name+".genepi"; P.printLOG("\nWriting gene-based epistasis tests to [ " + f + " ]\n"); EPI.open(f.c_str(), ios::out); EPI.precision(4); EPI << setw(12) << "NIND" << " " << setw(12) << "GENE1" << " " << setw(12) << "GENE2" << " " << setw(12) << "NSNP1" << " " << setw(12) << "NSNP2" << " " << setw(12) << "P" << " " << "\n"; } ////////////////////////////////// // Canonical correlation analysis int ns = P.snpset.size(); // Consider each pair of genes for (int s1=0; s1 < ns-1; s1++) { for (int s2 = s1+1; s2 < ns; s2++) { //////////////////////////////////////////////////////// // Step 1. Construct covariance matrix (cases and controls together) // And partition covariance matrix: // S_11 S_21 // S_12 S_22 int n1=0, n2=0; vector<vector<double> > sigma(0); vector<double> mean(0); vector<CSNP*> pSNP(0); ///////////////////////////// // List of SNPs for both loci for (int l=0; l<P.snpset[s1].size(); l++) { if ( S.cur[s1][l] ) { pSNP.push_back( P.SNP[ P.snpset[s1][l] ] ); n1++; } } for (int l=0; l<P.snpset[s2].size(); l++) { if ( S.cur[s2][l] ) { pSNP.push_back( P.SNP[ P.snpset[s2][l] ] ); n2++; } } int n12 = n1 + n2; int ne = n1 < n2 ? n1 : n2; /////////////////////////////////// // Construct covariance matrix (cases and controls together) P.setFlags(false); vector<Individual*>::iterator person = P.sample.begin(); while ( person != P.sample.end() ) { (*person)->flag = true; person++; } int nind = calcGENEPIMeanVariance(pSNP, n1,n2, false, &P, mean, sigma, P.sample , blperm[s1], blperm[s2] ); /////////////////////////// // Partition covariance matrix vector<vector<double> > I11; vector<vector<double> > I11b; vector<vector<double> > I12; vector<vector<double> > I21; vector<vector<double> > I22; vector<vector<double> > I22b; sizeMatrix( I11, n1, n1); sizeMatrix( I11b, n1, n1); sizeMatrix( I12, n1, n2); sizeMatrix( I21, n2, n1); sizeMatrix( I22, n2, n2); sizeMatrix( I22b, n2, n2); // For step 4b (eigenvectors for gene2) for (int i=0; i<n1; i++) for (int j=0; j<n1; j++) { I11[i][j] = sigma[i][j]; I11b[i][j] = sigma[i][j]; } for (int i=0; i<n1; i++) for (int j=0; j<n2; j++) I12[i][j] = sigma[i][n1+j]; for (int i=0; i<n2; i++) for (int j=0; j<n1; j++) I21[i][j] = sigma[n1+i][j]; for (int i=0; i<n2; i++) for (int j=0; j<n2; j++) { I22[i][j] = sigma[n1+i][n1+j]; I22b[i][j] = sigma[n1+i][n1+j]; } //////////////////////////////////////////////////////// // Step 2. Calculate the p x p matrix M1 = inv(sqrt(sig11)) %*% sig12 %*% inv(sig22) %*% sig21 %*% inv(sqrt(sig11)) bool flag = true; I11 = msqrt(I11); I11 = svd_inverse(I11,flag); I22 = svd_inverse(I22,flag); I22b = msqrt(I22b);// For Step 4b I22b = svd_inverse(I22b,flag); I11b = svd_inverse(I11b,flag); matrix_t tmp; matrix_t M1; multMatrix(I11, I12, tmp); multMatrix(tmp, I22, M1); multMatrix(M1, I21, tmp); multMatrix(tmp, I11, M1); //////////////////////////////////////////////////////// // Step 4a. Calculate the p eigenvalues and p x p eigenvectors of // M (e). These are required to compute the coefficients used to // build the p canonical variates a[k] for gene1 (see below) double max_cancor = 0.90; // Compute evalues and evectors Eigen gene1_eigen = eigenvectors(M1); // Sort evalues for gene 1. (the first p of these equal the first p of gene 2 (ie M2), if they are sorted) vector<double> sorted_eigenvalues_gene1 = gene1_eigen.d; sort(sorted_eigenvalues_gene1.begin(),sorted_eigenvalues_gene1.end(),greater<double>()); // Position of the largest canonical correlation that is < // max_cancor in the sorted vector of eigenvalues. This will be // needed to use the right gene1 and gene2 coefficients to build // the appropriate canonical variates. double cancor1=0; int cancor1_pos; for (int i=0; i<n1; i++) { if ( sqrt(sorted_eigenvalues_gene1[i]) > cancor1 && sqrt(sorted_eigenvalues_gene1[i]) < max_cancor ) { cancor1 = sqrt(sorted_eigenvalues_gene1[i]); cancor1_pos = i; break; } } // Display largest canonical correlation and its position // cout << "Largest canonical correlation [position]\n" // << cancor1 << " [" << cancor1_pos << "]" << "\n\n" ; // Sort evectors. Rows must be ordered according to cancor value (highest first) matrix_t sorted_eigenvectors_gene1 = gene1_eigen.z; vector<int> order_eigenvalues_gene1(n1); for (int i=0; i<n1; i++) { // Determine position of the vector associated with the ith cancor for (int j=0; j<n1; j++) { if (gene1_eigen.d[j]==sorted_eigenvalues_gene1[i]) { if (i==0) { order_eigenvalues_gene1[i]=j; break; } else { if (j!=order_eigenvalues_gene1[i-1]) { order_eigenvalues_gene1[i]=j; break; } } } } } for (int i=0; i<n1; i++) { sorted_eigenvectors_gene1[i] = gene1_eigen.z[order_eigenvalues_gene1[i]]; } // cout << "Eigenvector matrix - unsorted:\n"; // display(gene1_eigen.z); //cout << "Eigenvector matrix - sorted:\n"; //display(sorted_eigenvectors_gene1); //////////////////////////////////////////////////////// // Step 4b. Calculate the q x q eigenvectors of M2 (f). These are // required to compute the coefficients used to build the p // canonical variates b[k] for gene2 (see below). The first p are // given by: f[k] = (1/sqrt(eigen[k])) * inv_sqrt_I22 %*% I21 %*% // inv_sqrt_sig11 %*% e[k] for (k in 1:p) { e.vectors.gene2[,k] = // (1/sqrt(e.values[k])) * inv.sqrt.sig22 %*% sig21 %*% // inv.sqrt.sig11 %*% e.vectors.gene1[,k] } matrix_t M2; multMatrix(I22b, I21, tmp); multMatrix(tmp, I11b, M2); multMatrix(M2, I12, tmp); multMatrix(tmp, I22b, M2); Eigen gene2_eigen = eigenvectors(M2); //cout << "Eigenvalues Gene 2 - unsorted:\n"; //display(gene2_eigen.d); // Sort evalues for gene2 vector<double> sorted_eigenvalues_gene2 = gene2_eigen.d; sort(sorted_eigenvalues_gene2.begin(),sorted_eigenvalues_gene2.end(),greater<double>()); // Sort eigenvectors for gene2 matrix_t sorted_eigenvectors_gene2 = gene2_eigen.z; vector<int> order_eigenvalues_gene2(n2); for (int i=0; i<n2; i++) { // Determine position of the vector associated with the ith cancor for (int j=0; j<n2; j++) { if (gene2_eigen.d[j]==sorted_eigenvalues_gene2[i]) { if (i==0) { order_eigenvalues_gene2[i]=j; break; } else { if (j!=order_eigenvalues_gene2[i-1]) { order_eigenvalues_gene2[i]=j; break; } } } } } for (int i=0; i<n2; i++) { sorted_eigenvectors_gene2[i] = gene2_eigen.z[order_eigenvalues_gene2[i]]; } //cout << "Eigenvector matrix Gene 2 - unsorted:\n"; //display(gene2_eigen.z); //cout << "Eigenvector matrix Gene 2 - sorted:\n"; //display(sorted_eigenvectors_gene2); //exit(0); ////////////////////////////////////////////////////////////////////////////////// // Step 5 - Calculate the gene1 (pxp) and gene2 (pxq) coefficients // used to create the canonical variates associated with the p // canonical correlations transposeMatrix(gene1_eigen.z); transposeMatrix(gene2_eigen.z); matrix_t coeff_gene1; matrix_t coeff_gene2; multMatrix(gene1_eigen.z, I11, coeff_gene1); multMatrix(gene2_eigen.z, I22b, coeff_gene2); //cout << "Coefficients for Gene 1:\n"; //display(coeff_gene1); //cout << "Coefficients for Gene 2:\n"; //display(coeff_gene2); //exit(0); /////////////////////////////////////////////////////////////////////// // Step 6 - Compute the gene1 and gene2 canonical variates // associated with the highest canonical correlation NOTE: the // original variables of data need to have the mean subtracted first! // Otherwise, the resulting correlation between variate.gene1 and // variate.gene1 != estimated cancor. // For each individual, eg compos.gene1 = // evector.gene1[1]*SNP1.gene1 + evector.gene1[2]*SNP2.gene1 + ... ///////////////////////////////// // Consider each SNP in gene1 vector<double> gene1(nind); for (int j=0; j<n1; j++) { CSNP * ps = pSNP[j]; /////////////////////////// // Iterate over individuals for (int i=0; i< P.n ; i++) { // Only need to look at one perm set bool a1 = ps->one[i]; bool a2 = ps->two[i]; if ( a1 ) { if ( a2 ) // 11 homozygote { gene1[i] += (1 - mean[j]) * coeff_gene1[order_eigenvalues_gene1[cancor1_pos]][j]; } else // 12 { gene1[i] += (0 - mean[j]) * coeff_gene1[order_eigenvalues_gene1[cancor1_pos]][j]; } } else { if ( a2 ) // 21 { gene1[i] += (0 - mean[j]) * coeff_gene1[order_eigenvalues_gene1[cancor1_pos]][j]; } else // 22 homozygote { gene1[i] += (-1 - mean[j]) * coeff_gene1[order_eigenvalues_gene1[cancor1_pos]][j]; } } } // Next individual } // Next SNP in gene1 ///////////////////////////////// // Consider each SNP in gene2 vector<double> gene2(P.n); int cur_snp = -1; for (int j=n1; j<n1+n2; j++) { cur_snp++; CSNP * ps = pSNP[j]; // Iterate over individuals for (int i=0; i<P.n; i++) { // Only need to look at one perm set bool a1 = ps->one[i]; bool a2 = ps->two[i]; if ( a1 ) { if ( a2 ) // 11 homozygote { gene2[i] += (1 - mean[j]) * coeff_gene2[order_eigenvalues_gene2[cancor1_pos]][cur_snp]; } else // 12 { gene2[i] += (0 - mean[j]) * coeff_gene2[order_eigenvalues_gene2[cancor1_pos]][cur_snp]; } } else { if ( a2 ) // 21 { gene2[i] += (0 - mean[j]) * coeff_gene2[order_eigenvalues_gene2[cancor1_pos]][cur_snp]; } else // 22 homozygote { gene2[i] += (-1 - mean[j]) * coeff_gene2[order_eigenvalues_gene2[cancor1_pos]][cur_snp]; } } } // Next individual } // Next SNP in gene2 // Store gene1.variate and gene2.variate in the multiple_covariates field of P.sample // TO DO: NEED TO CHECK IF FIELDS ARE EMPTY FIRST! for (int i=0; i<P.n; i++) { P.sample[i]->clist.resize(2); P.sample[i]->clist[0] = gene1[i]; P.sample[i]->clist[1] = gene2[i]; } /////////////////////////////////////////////// // STEP 7 - Logistic or linear regression epistasis test // Model * lm; if (par::bt) { LogisticModel * m = new LogisticModel(& P); lm = m; } else { LinearModel * m = new LinearModel(& P); lm = m; } // No SNPs used lm->hasSNPs(false); // Set missing data lm->setMissing(); // Main effect of GENE1 1. Assumes that the variable is in position 0 of the clist vector lm->addCovariate(0); lm->label.push_back("GENE1"); // Main effect of GENE 2. Assumes that the variable is in position 1 of the clist vector lm->addCovariate(1); lm->label.push_back("GENE2"); // Epistasis lm->addInteraction(1,2); lm->label.push_back("EPI"); // Build design matrix lm->buildDesignMatrix(); // Prune out any remaining missing individuals // No longer needed (check) // lm->pruneY(); // Fit linear model lm->fitLM(); // Did model fit okay? lm->validParameters(); // Obtain estimates and statistic lm->testParameter = 3; // interaction vector_t b = lm->getCoefs(); double chisq = lm->getStatistic(); double logit_pvalue = chiprobP(chisq,1); // Clean up delete lm; ///////////////////////////// // OUTPUT EPI << setw(12) << nind << " " << setw(12) << P.setname[s1] << " " << setw(12) << P.setname[s2] << " " << setw(12) << n1 << " " << setw(12) << n2 << " " << setw(12) << logit_pvalue << " " << "\n"; } // End of loop over genes2 } // End of loop over genes1 EPI.close(); } // End of CCA_logit()
Eigen::Eigen() { vector <double> eigenvalues(3); vector<vector <double>> eigenvectors(3,vector<double>(3)); }
ArrayXd BaseSolver::calc_spatial_ldos(float target_energy, float broadening) { return num::match2sp<ArrayX, ArrayXX>( eigenvalues(), eigenvectors(), compute::CalcSpatialLDOS{target_energy, broadening} ); }