void absOrientRobustTukey(const int npts, const double pts1[], const double pts2[], double R[], double t[], int maxIter) { assert(npts >= 3); double* w = new double[npts]; double* d = new double[npts]; for (int i = 0; i < npts; i++) w[i] = 1.0; double ad = 0, sv = 0, th = 0; for (int k = 0; k < maxIter; k++) { absOrientWeighted(npts, w, pts1, pts2, R, t); if (k == 0) { //get average residual ad = 0; for (int i = 0; i < npts; i++) { double M[3]; rigidTrans(R, t, pts1 + 3 * i, M); d[i] = dist3(M, pts2 + 3 * i); ad += d[i]; } ad /= npts; //get standard deviation; sv = 0; for (int i = 0; i < npts; i++) { double v = d[i] - ad; sv += v * v; } sv /= (npts - 1); sv = sqrt(sv); th = ad + 3 * sv; } //logInfo("-------(%g,%g) --- \n", ad, sv); for (int i = 0; i < npts; i++) { if (k > 0) { double M[3]; rigidTrans(R, t, pts1 + 3 * i, M); d[i] = dist3(M, pts2 + 3 * i); } if (d[i] > th) { w[i] = 0; } else { double dr = d[i] / th; w[i] = (1 - dr * dr); w[i] *= w[i]; } //test //logInfo("[%d]:%g %g\n", i, d[i], w[i]); } } delete[] w; delete[] d; }
double getCameraDistance(const double R1[9], const double t1[3], const double R2[9], const double t2[3]) { double org1[3], org2[3]; getCameraCenter(R1, t1, org1); getCameraCenter(R2, t2, org2); return dist3(org1, org2); }
void FixedConstraint::Fix() { rA = rAorig; rA.rot(bodyA->form->alpha); rB = rBorig; rB.rot(bodyB->form->alpha); Vector2 dist = bodyB->form->point + rB - bodyA->form->point - rA; double delta = dist.norm2() - init_dist_; if (abs(delta) > DBL_EPSILON) { dist.normalize2(); Vector2 fix_dist = dist * delta; bodyA->form->point = bodyA->form->point + fix_dist * bodyA->iMass * 0.7; bodyB->form->point = bodyB->form->point - fix_dist * bodyB->iMass * 0.7; Vector3 dist3(fix_dist.v1, fix_dist.v2, 0); Vector3 rA3(rA.v1, rA.v2, 0); Vector3 rB3(rB.v1, rB.v2, 0); //bodyA->form->alpha += rA3.cross(dist3).v3 * bodyA->iInert * 0.01; //bodyB->form->alpha += rB3.cross(dist3).v3 * bodyB->iInert * 0.01; } }
void FixedConstraint::init() { rA = rAorig; rA.rot(bodyA->form->alpha); rB = rBorig; rB.rot(bodyB->form->alpha); //std::cout << "Ra: " << rA.v1 << " " << rA.v2 << " Rb: " << rB.v1 << " " << rB.v2 << std::endl; Vector2 dist = (bodyB->form->point + rB - bodyA->form->point - rA); dist.normalize2(); Vector3 dist3(dist.v1, dist.v2, 0); Vector3 rA3(rA.v1, rA.v2, 0); Vector3 rB3(rB.v1, rB.v2, 0); A[0][0] = (bodyA->iMass + bodyB->iMass) + bodyA->iInert * (dist3.cross(rA3).v3) * (dist3.cross(rA3).v3) + bodyB->iInert * (dist3.cross(rB3).v3) * (dist3.cross(rB3).v3); rel_vel_ = dist * (bodyB->point_velocity(bodyB->form->point + rB) - bodyA->point_velocity(bodyA->form->point + rA)); Eta[0] = -rel_vel_; /*if (rel_vel_ > 0.1) std::cout << "A: " << A[0][0] << " rel vel: " << Eta[0] << std::endl;*/ }
// TEST 6 - WORLD-TO-EXTRINSICS AND EXTRINSICS-TO-WORLD TRANSFORMATIONS bool test6(vcg::Shotd shot1, vcg::Shotd shot2, vcg::Point3d p1, vcg::Point3d p2) { vcg::Matrix44d WtoE1 = shot1.GetWorldToExtrinsicsMatrix(); vcg::Matrix44d WtoE2 = shot2.GetWorldToExtrinsicsMatrix(); vcg::Matrix44d EtoW1 = shot1.GetExtrinsicsToWorldMatrix(); vcg::Matrix44d EtoW2 = shot2.GetExtrinsicsToWorldMatrix(); vcg::Matrix44d I1 = WtoE1 * EtoW1; vcg::Matrix44d I2 = WtoE2 * EtoW2; vcg::Matrix44d I3 = EtoW1 * WtoE1; vcg::Matrix44d I4 = EtoW2 * WtoE2; if (checkIdentity(I1) > precision) return false; if (checkIdentity(I2) > precision) return false; if (checkIdentity(I3) > precision) return false; if (checkIdentity(I4) > precision) return false; vcg::Point3d axisX(1.0, 0.0, 0.0); vcg::Point3d axisY(0.0, 1.0, 0.0); vcg::Point3d axisZ(0.0, 0.0, 1.0); vcg::Point3d vx = EtoW1 * axisX; vcg::Point3d vy = EtoW1 * axisY; vcg::Point3d vz = EtoW1 * axisZ; if (dist3(vx, shot1.Extrinsics.Tra() + shot1.Extrinsics.Rot().GetRow3(0)) > precision) return false; if (dist3(vy, shot1.Extrinsics.Tra() + shot1.Extrinsics.Rot().GetRow3(1)) > precision) return false; if (dist3(vz, shot1.Extrinsics.Tra() + shot1.Extrinsics.Rot().GetRow3(2)) > precision) return false; return true; }
// TEST 3 - DEPTH COMPUTATION /////////////////////////////////////////////////////////////////////////////// bool test3(vcg::Shotd shot1, vcg::Shotd shot2, vcg::Point3d p1, vcg::Point3d p2) { vcg::Point2d p1proj, p2proj; p1proj = shot1.Project(p1); p2proj = shot2.Project(p2); vcg::Point3d p1unproj, p2unproj; double depth1 = shot1.Depth(p1); double depth2 = shot2.Depth(p2); p1unproj = shot1.UnProject(p1proj, depth1); p2unproj = shot2.UnProject(p2proj, depth2); if (dist3(p1, p1unproj) > precision) return false; if (dist3(p2, p2unproj) > precision) return false; return true; }
// TEST 2 - PROJECTION AND UNPROJECTION /////////////////////////////////////////////////////////////////////////////// bool test2(vcg::Shotd shot1, vcg::Shotd shot2, vcg::Point3d p1, vcg::Point3d p2) { vcg::Point2d p1proj, p2proj; p1proj = shot1.Project(p1); p2proj = shot2.Project(p2); vcg::Point3d p1unproj, p2unproj; vcg::Point3d pcam1, pcam2; pcam1 = shot1.ConvertWorldToCameraCoordinates(p1); p1unproj = shot1.UnProject(p1proj, pcam1[2]); pcam2 = shot2.ConvertWorldToCameraCoordinates(p2); p2unproj = shot2.UnProject(p2proj, pcam2[2]); if (dist3(p1, p1unproj) > precision) return false; if (dist3(p2, p2unproj) > precision) return false; return true; }
void absOrientRobustL1(const int npts, const double pts1[], const double pts2[], double R[], double t[], int maxIter) { assert(npts >= 3); double* w = new double[npts]; for (int i = 0; i < npts; i++) w[i] = 1.0; for (int k = 0; k < maxIter; k++) { absOrientWeighted(npts, w, pts1, pts2, R, t); //get the residual and recompute the weights logInfo("-------\n"); for (int i = 0; i < npts; i++) { double M[3]; rigidTrans(R, t, pts1 + 3 * i, M); double d = dist3(M, pts2 + 3 * i); w[i] = 1.0 / (d + 1e-6); //test //logInfo("[%d]:%g %g\n", i, d, w[i]); } } delete[] w; }
// Circular arc entity (Type 100) bool InputIges::read_100(IgesDirectoryEntry* de, bool check_only_status) { bool result = true; static ecif_EdgeGeometry_X edge; init_trx_data(edge); edge.type = ECIF_CIRCLE; int i,j; int int_fld; double dbl_fldb[1]; char chr_fld; bool is_closed; static Point3 points[3]; locateParamEntry(de); //-Read one data-line readDataLine(lineBuffer, dataBuffer); istrstream* data_line = new istrstream(dataBuffer); //-Read center and two points *data_line >> int_fld >> chr_fld; // Entity-id readDoubleFields(data_line, 1, dbl_fldb); // z-inclination for(i = 0; i < 3; i++) { points[i][2] = 0.0; for (j = 0; j < 2; j++) { readDoubleFields(data_line, 1, dbl_fldb); points[i][j] = dbl_fldb[0]; } } if ( isZero(dist3(points[1], points[2])) ) { is_closed = true; } else { is_closed = false; } if ( modelDimension == ECIF_2D && is_closed ) { de->canBeBody = true; } if ( check_only_status ) { return result; } // Create body element // =================== edge.location = new Point3[1]; edge.start = new Point3[1]; edge.end = new Point3[1]; for (i = 0; i < 3; i++) { edge.location[0][i] = points[0][i]; edge.start[0][i] = points[1][i]; edge.end[0][i] = points[2][i]; } int body_layer = 0; createBodyElement2D(de->body, body_layer, edge); reset_trx_data(edge); return result; }
struct pair_comp gen_pair_comp(double *alpha, double dist, gsl_rng *rg) { struct pair_comp pc; double s1[3], min_v, max_dist = -1.0; unsigned i, min_i; double alpha_low[] = { 0.1, 0.1, 0.1, 0.1 }; if (dist == 1) { unsigned long c1 = gsl_rng_uniform_int(rg, 4); unsigned long c2 = (c1 + 1 + gsl_rng_uniform_int(rg, 3)) % 4; memcpy(pc.c1, bary_corners[c1], sizeof(pc.c1)); memcpy(pc.c2, bary_corners[c2], sizeof(pc.c2)); } else if (dist == 0) { gsl_ran_dirichlet(rg, 4, (dist > 0.9 ? alpha_low : alpha), pc.c1); memcpy(pc.c2, pc.c1, sizeof(pc.c2)); } else if (dist > 0.95) { fprintf(stderr, "Cannot handle distances not equal to 1 but > 0.95\n"); exit(1); } else { /* generate a valid c1 */ double s_corner[3]; while (max_dist < dist * (1.0 / 0.95)) { gsl_ran_dirichlet(rg, 4, (dist > 0.9 ? alpha_low : alpha), pc.c1); min_v = pc.c1[0], min_i = 0; for (i = 1; i != 4; ++i) if (min_v > pc.c1[i]) min_v = pc.c1[i], min_i = i; barycentric_to_simplex(pc.c1, s1); barycentric_to_simplex(bary_corners[min_i], s_corner); max_dist = dist3(s_corner, s1); } /* sample from the unit sphere until we find a point in the simplex */ double sph[3], s1p[3]; while (1) { unit_sphere3(sph, rg); for (i = 0; i != 3; ++i) s1p[i] = s1[i] + dist * sph[i]; if (inside_simplex(s1p)) break; } simplex_to_barycentric(s1p, pc.c2); } /* fprintf(stderr, */ /* "%5.3f,%5.3f,%5.3f,%5.3f\t" */ /* "%5.3f,%5.3f,%5.3f,%5.3f\t" */ /* "%f\t%f\n", */ /* pc.c1[0], pc.c1[1], pc.c1[2], pc.c1[3], */ /* pc.c2[0], pc.c2[1], pc.c2[2], pc.c2[3], */ /* dist, */ /* dist4_scaled(pc.c1, pc.c2) */ /* ); */ return pc; }
int SingleSLAM::newMapPoints(std::vector<MapPoint*>& mapPts, double maxEpiErr, double maxNcc) { std::vector<FeaturePoint*> vecFeatPts; getUnMappedAndTrackedFeatPts(vecFeatPts, 0, Param::nMinFeatTrkLen); mapPts.clear(); mapPts.reserve(4096); double M[3], m1[2], m2[2]; //reconstruct 3D map points int numRecons = 0; for (size_t k = 0; k < vecFeatPts.size(); k++) { FeaturePoint* cur_fp = vecFeatPts[k]; FeaturePoint* pre_fp = cur_fp; while (pre_fp->preFrame && pre_fp->preFrame->cam) { if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC) { break; } pre_fp = pre_fp->preFrame; } if (pre_fp->type == TYPE_FEATPOINT_DYNAMIC || !pre_fp->cam) continue; normPoint(iK.data, pre_fp->m, m1); normPoint(iK.data, cur_fp->m, m2); //triangulate the two feature points to get the 3D point binTriangulate(pre_fp->cam->R, pre_fp->cam->t, cur_fp->cam->R, cur_fp->cam->t, m1, m2, M); if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M)) continue; double cov[9], org[3]; getBinTriangulateCovMat(K.data, pre_fp->cam->R, pre_fp->cam->t, K.data, cur_fp->cam->R, cur_fp->cam->t, M, cov, Const::PIXEL_ERR_VAR); getCameraCenter(cur_fp->cam->R, cur_fp->cam->t, org); double s = fabs(cov[0] + cov[4] + cov[8]); if (dist3(org, M) < sqrt(s)) continue; //check the reprojection error double err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t, M, pre_fp->m); double err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t, M, cur_fp->m); if (err1 < maxEpiErr && err2 < maxEpiErr) { //a new map point is generated refineTriangulation(cur_fp, M, cov); err1 = reprojErrorSingle(K.data, pre_fp->cam->R, pre_fp->cam->t, M, pre_fp->m); err2 = reprojErrorSingle(K.data, cur_fp->cam->R, cur_fp->cam->t, M, cur_fp->m); if (isAtCameraBack(cur_fp->cam->R, cur_fp->cam->t, M) || isAtCameraBack(pre_fp->cam->R, pre_fp->cam->t, M)) continue; if (err1 < maxEpiErr && err2 < maxEpiErr) { MapPoint* pM = new MapPoint(M[0], M[1], M[2], pre_fp->f); doubleArrCopy(pM->cov, 0, cov, 9); mapPts.push_back(pM); pM->lastFrame = cur_fp->f; for (FeaturePoint* p = cur_fp; p; p = p->preFrame) p->mpt = pM; //add the feature point pM->addFeature(camId, cur_fp); pM->setLocalStatic(); //compute the NCC block pM->nccBlks[camId].computeScaled(m_lastKeyPos->imgSmall, m_lastKeyPos->imgScale, cur_fp->x, cur_fp->y); int x = max(0, min((int) cur_fp->x, m_rgb.w - 1)); int y = max(0, min((int) cur_fp->y, m_rgb.h - 1)); pM->setColor(m_rgb(x, y)); numRecons++; } } } return numRecons; }
double ptoline(point3 p,point3 l1,point3 l2) { return vlen(xmult(subt(p,l1),subt(l2,l1)))/dist3(l1,l2); }
double ptoline(point3 p,line3 l) { return vlen(xmult(subt(p,l.a),subt(l.b,l.a)))/dist3(l.a,l.b); }
static int maxmatch(v_data * graph, /* array of vtx data for graph */ ex_vtx_data * geom_graph, /* array of vtx data for graph */ int nvtxs, /* number of vertices in graph */ int *mflag, /* flag indicating vtx selected or not */ int dist2_limit ) /* Compute a matching of the nodes set. The matching is not based only on the edge list of 'graph', which might be too small, but on the wider edge list of 'geom_graph' (which includes 'graph''s edges) We match nodes that are close both in the graph-theoretical sense and in the geometry sense (in the layout) */ { int *order; /* random ordering of vertices */ int *iptr, *jptr; /* loops through integer arrays */ int vtx; /* vertex to process next */ int neighbor; /* neighbor of a vertex */ int nmerged = 0; /* number of edges in matching */ int i, j; /* loop counters */ float max_norm_edge_weight; double inv_size; double *matchability = N_NEW(nvtxs, double); double min_edge_len; double closest_val = -1, val; int closest_neighbor; float *vtx_vec = N_NEW(nvtxs, float); float *weighted_vtx_vec = N_NEW(nvtxs, float); float sum_weights; // gather statistics, to enable normalizing the values double avg_edge_len = 0, avg_deg_2 = 0; int nedges = 0; for (i = 0; i < nvtxs; i++) { avg_deg_2 += graph[i].nedges; for (j = 1; j < graph[i].nedges; j++) { avg_edge_len += ddist(geom_graph, i, graph[i].edges[j]); nedges++; } } avg_edge_len /= nedges; avg_deg_2 /= nvtxs; avg_deg_2 *= avg_deg_2; // the normalized edge weight of edge <v,u> is defined as: // weight(<v,u>)/sqrt(size(v)*size(u)) // Now we compute the maximal normalized weight if (graph[0].ewgts != NULL) { max_norm_edge_weight = -1; for (i = 0; i < nvtxs; i++) { inv_size = sqrt(1.0 / geom_graph[i].size); for (j = 1; j < graph[i].nedges; j++) { if (graph[i].ewgts[j] * inv_size / sqrt((float) geom_graph[graph[i].edges[j]].size) > max_norm_edge_weight) { max_norm_edge_weight = (float) (graph[i].ewgts[j] * inv_size / sqrt((double) geom_graph[graph[i].edges[j]].size)); } } } } else { max_norm_edge_weight = 1; } /* Now determine the order of the vertices. */ iptr = order = N_NEW(nvtxs, int); jptr = mflag; for (i = 0; i < nvtxs; i++) { *(iptr++) = i; *(jptr++) = -1; } // Option 1: random permutation #if 0 int temp; for (i=0; i<nvtxs-1; i++) { // use long_rand() (not rand()), as n may be greater than RAND_MAX j=i+long_rand()%(nvtxs-i); temp=order[i]; order[i]=order[j]; order[j]=temp; } #endif // Option 2: sort the nodes begining with the ones highly approriate for matching #ifdef DEBUG srand(0); #endif for (i = 0; i < nvtxs; i++) { vtx = order[i]; matchability[vtx] = graph[vtx].nedges; // we less want to match high degree nodes matchability[vtx] += geom_graph[vtx].size; // we less want to match large sized nodes min_edge_len = 1e99; for (j = 1; j < graph[vtx].nedges; j++) { min_edge_len = MIN(min_edge_len, ddist(geom_graph, vtx, graph[vtx].edges[j]) / avg_edge_len); } matchability[vtx] += min_edge_len; // we less want to match distant nodes matchability[vtx] += ((double) rand()) / RAND_MAX; // add some randomness } quicksort_place(matchability, order, 0, nvtxs - 1); free(matchability); // Start determining the matched pairs for (i = 0; i < nvtxs; i++) { vtx_vec[i] = 0; } for (i = 0; i < nvtxs; i++) { weighted_vtx_vec[i] = 0; } // relative weights of the different criteria for (i = 0; i < nvtxs; i++) { vtx = order[i]; if (mflag[vtx] >= 0) { /* already matched. */ continue; } inv_size = sqrt(1.0 / geom_graph[vtx].size); sum_weights = fill_neighbors_vec(graph, vtx, weighted_vtx_vec); fill_neighbors_vec_unweighted(graph, vtx, vtx_vec); closest_neighbor = -1; /* We match node i with the "closest" neighbor, based on 4 criteria: (1) (Weighted) fraction of common neighbors (measured on orig. graph) (2) AvgDeg*AvgDeg/(deg(vtx)*deg(neighbor)) (degrees measured on orig. graph) (3) AvgEdgeLen/dist(vtx,neighbor) (4) Weight of normalized direct connection between nodes (measured on orig. graph) */ for (j = 1; j < geom_graph[vtx].nedges; j++) { neighbor = geom_graph[vtx].edges[j]; if (mflag[neighbor] >= 0) { /* already matched. */ continue; } // (1): val = A * unweighted_common_fraction(graph, vtx, neighbor, vtx_vec); if (val == 0 && (dist2_limit || !dist3(graph, vtx, neighbor))) { // graph theoretical distance is larger than 3 (or 2 if '!dist3(graph, vtx, neighbor)' is commented) // nodes cannot be matched continue; } // (2) val += B * avg_deg_2 / (graph[vtx].nedges * graph[neighbor].nedges); // (3) val += C * avg_edge_len / ddist(geom_graph, vtx, neighbor); // (4) val += (weighted_vtx_vec[neighbor] * inv_size / sqrt((float) geom_graph[neighbor].size)) / max_norm_edge_weight; if (val > closest_val || closest_neighbor == -1) { closest_neighbor = neighbor; closest_val = val; } } if (closest_neighbor != -1) { mflag[vtx] = closest_neighbor; mflag[closest_neighbor] = vtx; nmerged++; } empty_neighbors_vec(graph, vtx, vtx_vec); empty_neighbors_vec(graph, vtx, weighted_vtx_vec); } free(order); free(vtx_vec); free(weighted_vtx_vec); return (nmerged); }