void ConvexPolygon::calculateMatrixForm() { //every two adjacent endpoints define a line -> inequality constraint //first, resize A and b. x and b are column vectors this->A = Eigen::MatrixXd (this->endpoints.size(),2); this->b = Eigen::VectorXd (this->endpoints.size()); Eigen::Vector2d normal; for(size_t i=1; i<endpoints.size(); i++) { //to define the correct line direction, we also need a point on the inside of the constraint - the seed normal(0) = endpoints[i-1](1) - endpoints[i](1); normal(1) = endpoints[i](0) - endpoints[i-1](0); if(normal.dot(seed) > 0) { //we want the outward pointing normal, so n.dot(s) < 0 normal = -normal; } normal.normalize(); b(i-1) = -endpoints[i].dot(normal); //endpoints[i]; A(i-1,0) = normal(0); A(i-1,1) = normal(1); } normal(0) = endpoints.back()(1) - endpoints.front()(1); normal(1) = endpoints.front()(0) - endpoints.back()(0); if(normal.dot(seed) > 0) { //we want the outward pointing normal, so n.dot(s) < 0 normal = -normal; } normal.normalize(); b(endpoints.size()-1) = -endpoints.front().dot(normal); //endpoints[i]; A(endpoints.size()-1,0) = normal(0); A(endpoints.size()-1,1) = normal(1); }
void LaserSensorModel2D::setInformationForVertexPoint(EdgeSE2PointXYCov*& edge, VertexPointXYCov*& point, LaserSensorParams& params) { double beamAngle = atan2(edge->measurement()(1), edge->measurement()(0)); ///calculate incidence angle in point frame Eigen::Vector2d beam = edge->measurement(); Eigen::Vector2d normal = point->normal(); beam = beam.normalized(); normal = normal.normalized(); double incidenceAngle = 0.0; if(point->covariance() != Eigen::Matrix2d::Identity()) incidenceAngle = acos(normal.dot(beam)); double d = (tan(params.angularResolution * 0.5) * edge->measurement().norm()); ///uncertainty of the surface measurement in direction of the beam double dk = fabs(params.scale * (d / cos(incidenceAngle))); edge->information().setIdentity(); edge->information()(0,0) = 1.0 / ((dk + params.sensorPrecision) * (dk + params.sensorPrecision)); double cError = 0.001 * edge->measurement().norm(); edge->information()(1,1) = 1.0 / (cError * cError); Eigen::Rotation2Dd rot(beamAngle); Eigen::Matrix2d mrot = rot.toRotationMatrix(); edge->information() = mrot * edge->information() * mrot.transpose(); }
bool ProbabilisticStereoTriangulator<CAMERA_GEOMETRY_T>::computeReprojectionError4( const std::shared_ptr<okvis::MultiFrame>& frame, size_t camId, size_t keypointId, const Eigen::Vector4d& homogeneousPoint, double& outError) const { OKVIS_ASSERT_LT_DBG(Exception, keypointId, frame->numKeypoints(camId), "Index out of bounds"); Eigen::Vector2d y; okvis::cameras::CameraBase::ProjectionStatus status = frame ->geometryAs<CAMERA_GEOMETRY_T>(camId)->projectHomogeneous( homogeneousPoint, &y); if (status == okvis::cameras::CameraBase::ProjectionStatus::Successful) { Eigen::Vector2d k; Eigen::Matrix2d inverseCov = Eigen::Matrix2d::Identity(); double keypointStdDev; frame->getKeypoint(camId, keypointId, k); frame->getKeypointSize(camId, keypointId, keypointStdDev); keypointStdDev = 0.8 * keypointStdDev / 12.0; inverseCov *= 1.0 / (keypointStdDev * keypointStdDev); y -= k; outError = y.dot(inverseCov * y); return true; } else return false; }
std::vector<cv::DMatch> Registrator3P::getInliers(const ptam::KeyFrame& kfa, const ptam::KeyFrame& kfb, const std::vector<cv::DMatch>& matches, const Sophus::SE3d& relPoseAB, double threshold, std::vector<Observation>& obs) { Sophus::SE3d relPoseBA = relPoseAB.inverse(); std::vector<cv::DMatch> inliers; double thresh2 = threshold*threshold; for (uint i = 0; i < matches.size(); i++) { const cv::DMatch& m = matches[i]; const Eigen::Vector3d& mpa = cs_geom::toEigenVec(kfa.mapPoints[m.queryIdx]->v3RelativePos); cv::Point2f imbcv; int scale = 1;// << kfb.keypoints[m.trainIdx].octave; imbcv.x = kfb.keypoints[m.trainIdx].pt.x * scale; imbcv.y = kfb.keypoints[m.trainIdx].pt.y * scale; Eigen::Vector2d ipb(imbcv.x, imbcv.y); Eigen::Vector2d err = cam_[kfb.nSourceCamera].project3DtoPixel(relPoseBA*mpa) - ipb;; if (err.dot(err) < thresh2) { inliers.push_back(m); obs.push_back(Observation(mpa, cam_[kfb.nSourceCamera].unprojectPixel(ipb))); } } return inliers; }
double GeometryUtils::angleLike(const Eigen::Vector2d & u, const Eigen::Vector2d & v) { // assume u and v are unitary /* double lu = length(u); if(lu>0) u = 1/lu * u; double lv=length(v); if(lv>0) v = 1/lv * v; */ double dot_ = u.dot(v); double det_ = u[0]*v[1] - u[1]*v[0]; double signDet = 1; if(det_ < 0) signDet = -1; return 2 - signDet*(dot_+1); }
/** Eigen::Vector2d internal_ray_lens_intersection( const Eigen::Vector2d &direction_of_internal, const Eigen::Vector2d &origin_of_internal, const Eigen::Vector2d ¢erpoint_of_sphere, double radius_of_sphere) { //https://people.cs.clemson.edu/~dhouse/courses/405/notes/raycast.pdf Eigen::Vector2d return_intersection; Eigen::Vector2d direction_of_internal_normalized = direction_of_internal.normalized(); Eigen::Vector2d o_minus_c = origin_of_internal-centerpoint_of_sphere; double discriminant = (direction_of_internal_normalized.dot(o_minus_c))*(direction_of_internal_normalized.dot(o_minus_c))-o_minus_c.norm()*o_minus_c.norm()+radius_of_sphere*radius_of_sphere; if (discriminant < 0) { throw 30; } else { double d_1 = -1*(direction_of_internal_normalized.dot(o_minus_c))+sqrt((direction_of_internal_normalized.dot(o_minus_c))*(direction_of_internal_normalized.dot(o_minus_c))-o_minus_c.norm()*o_minus_c.norm()+radius_of_sphere*radius_of_sphere); double d_2 = -1*(direction_of_internal_normalized.dot(o_minus_c))-sqrt((direction_of_internal_normalized.dot(o_minus_c))*(direction_of_internal_normalized.dot(o_minus_c))-o_minus_c.norm()*o_minus_c.norm()+radius_of_sphere*radius_of_sphere); if (d_2=d_1) { return_intersection = origin_of_internal+d_2*direction_of_internal_normalized; } else { Eigen::Vector2d return_intersection_1 = origin_of_internal+d_1*direction_of_internal_normalized; Eigen::Vector2d return_intersection_2 = origin_of_internal+d_2*direction_of_internal_normalized; if (return_intersection_1.y()<return_intersection_2.y()) { return_intersection = return_intersection_1; } else { return_intersection = return_intersection_2; } } cout << "lens choice 1: " << d_1 << endl; cout << "lens choice 2: " << d_2 << endl; } return return_intersection; } **/ Eigen::Vector2d internal_ray_lens_intersection( const Eigen::Vector2d &direction_of_internal, const Eigen::Vector2d &origin_of_internal, const Eigen::Vector2d ¢erpoint_of_sphere, double radius_of_sphere) { // https://people.cs.clemson.edu/~dhouse/courses/405/notes/raycast.pdf Eigen::Vector2d return_intersection; Eigen::Vector2d direction_of_internal_normalized = direction_of_internal.normalized(); Eigen::Vector2d o_minus_c = origin_of_internal-centerpoint_of_sphere; double t_close = direction_of_internal.dot(centerpoint_of_sphere-origin_of_internal); Eigen::Vector2d x_close = origin_of_internal + t_close*direction_of_internal; double discriminant = (x_close-centerpoint_of_sphere).norm(); if (discriminant < radius_of_sphere) { double a = sqrt(radius_of_sphere*radius_of_sphere-discriminant*discriminant); return_intersection = origin_of_internal + (t_close-a)*direction_of_internal; //cout << "Centerpoint of sphere : " << centerpoint_of_sphere << endl; //cout << "radius of sphere : " << radius_of_sphere << endl; } else { throw 30; } return return_intersection; }
unsigned NurbsTools::getClosestPoint (const Eigen::Vector2d &p, const Eigen::Vector2d &dir, const vector_vec2d &data, unsigned &idxcp) { if (data.empty ()) throw std::runtime_error ("[NurbsTools::getClosestPoint(2d)] Data empty.\n"); size_t idx = 0; idxcp = 0; double dist2 (0.0); double dist2cp (DBL_MAX); for (size_t i = 0; i < data.size (); i++) { Eigen::Vector2d v = (data[i] - p); double d2 = v.squaredNorm (); if (d2 < dist2cp) { idxcp = i; dist2cp = d2; } if (d2 == 0.0) return i; v.normalize (); double d1 = dir.dot (v); if (d1 / d2 > dist2) { idx = i; dist2 = d1 / d2; } } return idx; }
/*------------------------------------------------------------------------------*/ FM_INLINE FastMarchingVertex * UnfoldTriangle(const Eigen::MatrixXd &V, const Eigen::MatrixXi &F, const Eigen::MatrixXi &TT, const Eigen::MatrixXi &TTi, std::vector<struct FastMarchingVertex> &vertices, int f, int v, int v1, int v2, FM_Float& dist, FM_Float& dot1, FM_Float& dot2) { const Eigen::Vector3d& p = V.row(v);// vert.GetPosition(); const Eigen::Vector3d& p1 = V.row(v1);//vert1.GetPosition(); const Eigen::Vector3d& p2 = V.row(v2);//vert2.GetPosition(); Eigen::Vector3d e1 = p1 - p; FM_Float rNorm1 = e1.norm(); //~e1 e1.normalize(); // e1 /= rNorm1; Eigen::Vector3d e2 = p2 - p; FM_Float rNorm2 = e2.norm(); // ~e2; e2.normalize(); // e2 /= rNorm2; FM_Float dot = e1.adjoint()*e2;// e1*e2; FM_ASSERT(dot < 0); /* the equation of the lines defining the unfolding region [e.g. line 1 : {x ; <x,eq1>=0} ]*/ Eigen::Vector2d eq1 = Eigen::Vector2d(dot, sqrt(1 - dot*dot)); Eigen::Vector2d eq2 = Eigen::Vector2d(1, 0); /* position of the 2 points on the unfolding plane */ Eigen::Vector2d x1(rNorm1, 0); Eigen::Vector2d x2 = eq1*rNorm2; /* keep track of the starting point */ Eigen::Vector2d xstart1 = x1; Eigen::Vector2d xstart2 = x2; FastMarchingVertex* pV1 = &(vertices[v1]); FastMarchingVertex* pV2 = &(vertices[v2]); int cur_f, cur_v; get_oppisite_f_v(F, TT,TTi, f,v, cur_f, cur_v); //FM_GeodesicFace* pCurFace = (FM_GeodesicFace*)CurFace.GetFaceNeighbor(vert); int nNum = 0; while (nNum < 50 && cur_f != -1) // NULL) { // FastMarchingVertex* pV = (FastMarchingVertex*)pCurFace->GetVertex(*pV1, *pV2); //opposite vertex to face and edge(pV1,pV2) // FM_ASSERT(pV != NULL); FastMarchingVertex* pV = &(vertices[cur_v]); //opposite vertex to face and vert /* e1 = pV2->GetPosition() - pV1->GetPosition(); FM_Float rNorm1 = ~e1; e1 /= rNorm1; e2 = pV->GetPosition() - pV1->GetPosition(); FM_Float rNorm2 = ~e2; e2 /= rNorm2; */ Eigen::Vector3d e1 = V.row(pV2->vid) - V.row(pV1->vid); FM_Float rNorm1 = e1.norm(); //~e1 e1.normalize(); // e1 /= rNorm1; Eigen::Vector3d e2 = V.row(pV->vid) - V.row(pV1->vid); FM_Float rNorm2 = e2.norm(); // ~e2; e2.normalize(); // e2 /= rNorm2; /* compute the position of the new point x on the unfolding plane (via a rotation of -alpha on (x2-x1)/rNorm1 ) | cos(alpha) sin(alpha)| x = |-sin(alpha) cos(alpha)| * [x2-x1]*rNorm2/rNorm1 + x1 where cos(alpha)=dot */ Eigen::Vector2d vv = (x2 - x1)*rNorm2 / rNorm1; dot = e1.adjoint()*e2; //e1*e2; // Eigen::Vector2d x = vv.Rotate2D();////vv.Rotate(-acos(dot)) + x1; Eigen::Rotation2D<double> rot2(-acos(dot)); Eigen::Vector2d x = rot2*vv + x1; //dhw to check /* compute the intersection points. We look for x=x1+lambda*(x-x1) or x=x2+lambda*(x-x2) with <x,eqi>=0, so */ FM_Float lambda11 = -(x1.dot(eq1)) / ((x - x1).dot(eq1)); //-(x1*eq1) / ((x - x1)*eq1); // left most FM_Float lambda12 = -(x1.dot(eq2)) / ((x - x1).dot(eq2)); //-(x1*eq2) / ((x - x1)*eq2); // right most FM_Float lambda21 = -(x2.dot(eq1)) / ((x - x2).dot(eq1)); //-(x2*eq1) / ((x - x2)*eq1); // left most FM_Float lambda22 = -(x2.dot(eq2)) / ((x - x2).dot(eq2)); //-(x2*eq2) / ((x - x2)*eq2); // right most bool bIntersect11 = (lambda11 >= 0) && (lambda11 <= 1); bool bIntersect12 = (lambda12 >= 0) && (lambda12 <= 1); bool bIntersect21 = (lambda21 >= 0) && (lambda21 <= 1); bool bIntersect22 = (lambda22 >= 0) && (lambda22 <= 1); if (bIntersect11 && bIntersect12) { // FM_ASSERT( !bIntersect21 && !bIntersect22 ); /* we should unfold on edge [x x1] */ // pCurFace = (FM_GeodesicFace*)pCurFace->GetFaceNeighbor(*pV2); f = cur_f; get_oppisite_f_v(F, TT,TTi, f, pV2->vid, cur_f, cur_v); pV2 = pV; x2 = x; } else if (bIntersect21 && bIntersect22) { // FM_ASSERT( !bIntersect11 && !bIntersect12 ); /* we should unfold on edge [x x2] */ // pCurFace = (FM_GeodesicFace*)pCurFace->GetFaceNeighbor(*pV1); f = cur_f; get_oppisite_f_v(F, TT,TTi, f, pV1->vid, cur_f, cur_v); pV1 = pV; x1 = x; } else { FM_ASSERT(bIntersect11 && !bIntersect12 && !bIntersect21 && bIntersect22); /* that's it, we have found the point */ dist = x.norm(); // ~x; dot1 = x.dot(xstart1) / (dist * xstart1.norm());// ~xstart1); dot2 = x.dot(xstart2) / (dist * xstart2.norm());// ~xstart2); return pV; } nNum++; } return NULL; }
void KeyEdge::prepareSculptPreserveTangents_() { const bool preserveTangentEdges = false; if (!preserveTangentEdges) return; // find instant edges where tangency must be preserved sculpt_beginLeftDer_ = geometry()->der(0); sculpt_beginRightDer_ = geometry()->der(geometry()->length()); sculpt_keepRightAsLeft_.clear(); sculpt_keepLeftAsLeft_.clear(); sculpt_keepLeftAsRight_.clear(); sculpt_keepRightAsRight_.clear(); sculpt_keepMyselfTangent_ = false; if (preserveTangentEdges) { double dotThreshold = 0.9; if(startVertex_) { KeyEdgeSet leftEdges = startVertex_->spatialStar(); leftEdges.remove(this); foreach(KeyEdge * ie, leftEdges) { if(ie->endVertex_ == startVertex_) { Eigen::Vector2d rightDer = ie->geometry()->der(ie->geometry()->length()); double dot = rightDer.dot(sculpt_beginLeftDer_); if(dot > dotThreshold) sculpt_keepRightAsLeft_ << ie; } if(ie->startVertex_ == startVertex_) { Eigen::Vector2d rightDer = - ie->geometry()->der(0); double dot = rightDer.dot(sculpt_beginLeftDer_); if(dot > dotThreshold) sculpt_keepLeftAsLeft_ << ie; } } } if(endVertex_) { KeyEdgeSet rightEdges = endVertex_->spatialStar(); rightEdges.remove(this); foreach(KeyEdge * ie, rightEdges) { // For now, disabling tangent preservation for 0-edge loops if(ie == this) continue; if(ie->startVertex_ == endVertex_) { Eigen::Vector2d leftDer = ie->geometry()->der(0); double dot = leftDer.dot(sculpt_beginRightDer_); if(dot > dotThreshold) sculpt_keepLeftAsRight_ << ie; } if(ie->endVertex_ == endVertex_) { Eigen::Vector2d leftDer = - ie->geometry()->der(ie->geometry()->length()); double dot = leftDer.dot(sculpt_beginRightDer_); if(dot > dotThreshold) sculpt_keepRightAsRight_ << ie; } } } if(endVertex_ && (endVertex_ == startVertex_)) { // For now, disabling tangent preservation for 0-edge loops // What's below doesn't work, because continueSculpt assume // no resampling has occured since prepareSculpt //double dot = sculpt_beginRightDer_.dot(sculpt_beginLeftDer_); //if(dot > dotThreshold) //{ // sculpt_keepMyselfTangent_ = true; //} } } }
Sophus::SE3d Registrator3P::solve(const ptam::KeyFrame& kfa, const ptam::KeyFrame& kfb, const std::vector<cv::DMatch>& matches) { if (matches.size() < 5) return Sophus::SE3d(); // Generate nHyp_ hypotheses: std::vector<Hypothesis3P> hyp; nHyp_ = matches.size() * N_SAMPLES; while (hyp.size() < nHyp_) { std::vector<int> sampleInd; // choose x random matches randSampleNoReplacement(matches.size(), N_SAMPLES, sampleInd); std::vector<cv::Point3f> mapPointsA(N_SAMPLES); std::vector<cv::Point2f> imagePointsB(N_SAMPLES); for (int i = 0; i < N_SAMPLES; i++) { int ind = sampleInd[i]; const Eigen::Vector3d& mpa = cs_geom::toEigenVec(kfa.mapPoints[matches[ind].queryIdx]->v3RelativePos); mapPointsA[i] = cv::Point3f(mpa[0], mpa[1], mpa[2]); int scale = 1;// << kfb.keypoints[matches[ind].trainIdx].octave; imagePointsB[i].x = kfb.keypoints[matches[ind].trainIdx].pt.x;// * scale; imagePointsB[i].y = kfb.keypoints[matches[ind].trainIdx].pt.y;// * scale; } cv::Mat rvec(3,1,cv::DataType<double>::type); cv::Mat tvec(3,1,cv::DataType<double>::type); /// here we need the model of camera B! int camNum = kfb.nSourceCamera; // cout << "cv::solvePnP... cam " << camNum << endl; if (cv::solvePnP(mapPointsA, imagePointsB, cam_[camNum].K(), cam_[camNum].D(), rvec, tvec, false, cv::EPNP)) { Sophus::SE3d se3; Eigen::Vector3d r; cv::cv2eigen(rvec, r); cv::cv2eigen(tvec, se3.translation()); se3.so3() = Sophus::SO3d::exp(r); hyp.push_back(Hypothesis3P(se3)); } // cout << "cv::solvePnP finish..." << endl; } if (!hyp.size()) return Sophus::SE3d(); // Preemptive scoring // determine the order in which observations will be evaluated std::vector<int> oInd; randPerm(matches.size(),oInd); // start preemptive scoring int i = 0; int pr = preemption(i, nHyp_, blockSize_); while (i < nMaxObs_ && i < (int) matches.size() && pr > 1) { // observation oInd(i) consists of one pair of points: const cv::DMatch& match = matches[oInd[i]]; const Eigen::Vector3d& mpa = cs_geom::toEigenVec(kfa.mapPoints[match.queryIdx]->v3RelativePos); const cv::Point2f& imbcv = kfb.keypoints[match.trainIdx].pt; int scale = 1;// << kfb.keypoints[match.trainIdx].octave; Eigen::Vector2d imb(imbcv.x * scale, imbcv.y * scale); // update score for all hypotheses w.r.t. observation oInd(i) for (int h = 0; h < (int) hyp.size(); h++) { Eigen::Vector2d err = cam_[kfb.nSourceCamera].project3DtoPixel(hyp[h].relPose*mpa) - imb; hyp[h].score -= log(1.0 + err.dot(err)); // hyp[h].score += err.dot(err) < 3.0; } i++; int prnext = preemption(i, nHyp_, blockSize_); if (prnext != pr) { // select best hypotheses std::nth_element(hyp.begin(), hyp.begin() + prnext, hyp.end(), Hypothesis3P::compare); // now the first prnext elements of h contain the best hypotheses, erase the rest hyp.erase(hyp.begin() + prnext, hyp.end()); } pr = prnext; } // preemptive scoring is done // select the single best hypothesis of possibly more than one remaining std::nth_element(hyp.begin(),hyp.begin() + 1, hyp.end(), Hypothesis3P::compare); // std::cout << "preemptive scoring using " << i << " observations done." << std::endl; // std::cout << hyp[0].relPose.inverse().matrix(); return hyp[0].relPose.inverse(); }
void FittingCurve2dSDM::assembleInterior (double wInt, double sigma2, unsigned &row) { unsigned nInt = m_data->interior.size (); bool wFunction (true); double ds = 1.0 / (2.0 * sigma2); m_data->interior_line_start.clear (); m_data->interior_line_end.clear (); m_data->interior_error.clear (); m_data->interior_normals.clear (); unsigned updateTNR (false); if (m_data->interior_ncps_prev != m_nurbs.CVCount ()) { if (!m_quiet) printf ("[FittingCurve2dSDM::assembleInterior] updating T, N, rho\n"); m_data->interior_tangents.clear (); m_data->interior_normals.clear (); m_data->interior_rho.clear (); m_data->interior_ncps_prev = m_nurbs.CVCount (); updateTNR = true; } unsigned i1 (0); unsigned i2 (0); for (unsigned p = 0; p < nInt; p++) { Eigen::Vector2d &pcp = m_data->interior[p]; // inverse mapping double param; Eigen::Vector2d pt, t, n; double error; if (p < m_data->interior_param.size ()) { param = inverseMapping (m_nurbs, pcp, m_data->interior_param[p], error, pt, t, in_max_steps, in_accuracy); m_data->interior_param[p] = param; } else { param = findClosestElementMidPoint (m_nurbs, pcp); param = inverseMapping (m_nurbs, pcp, param, error, pt, t, in_max_steps, in_accuracy); m_data->interior_param.push_back (param); } m_data->interior_error.push_back (error); double dt, kappa, rho, rho_prev; Eigen::Vector2d n_prev, t_prev; double pointAndTangents[6]; m_nurbs.Evaluate (param, 2, 2, pointAndTangents); pt (0) = pointAndTangents[0]; pt (1) = pointAndTangents[1]; t (0) = pointAndTangents[2]; t (1) = pointAndTangents[3]; n (0) = pointAndTangents[4]; n (1) = pointAndTangents[5]; dt = t.norm (); t /= dt; Eigen::Vector2d in (t (1), -t (0)); n /= dt; // TODO something is wrong with the normal from nurbs.Evaluate(...) n = in * in.dot (n); kappa = n.norm (); rho = (1.0 / kappa); n *= rho; if (!updateTNR) { if (m_data->interior_rho.size () != nInt) { printf ("[FittingCurve2dSDM::assembleInterior] ERROR: size does not match\n"); } else { n_prev = m_data->interior_normals[p]; t_prev = m_data->interior_tangents[p]; rho_prev = m_data->interior_rho[p]; // m_data->interior_normals[p] = n; // m_data->interior_tangents[p] = t; // m_data->interior_rho[p] = rho; } } else { m_data->interior_tangents.push_back (t); m_data->interior_normals.push_back (n); m_data->interior_rho.push_back (rho); n_prev = n; t_prev = t; rho_prev = rho; } double d; if ((pcp - pt).dot (n) >= 0.0) { d = (pcp - pt).norm (); i1++; } else { d = -(pcp - pt).norm (); i2++; } // evaluate if point lies inside or outside the closed curve Eigen::Vector3d a (pcp (0) - pt (0), pcp (1) - pt (1), 0.0); Eigen::Vector3d b (t (0), t (1), 0.0); Eigen::Vector3d z = a.cross (b); if (p < m_data->interior_weight.size ()) wInt = m_data->interior_weight[p]; if (p < m_data->interior_weight_function.size ()) wFunction = m_data->interior_weight_function[p]; m_data->interior_line_start.push_back (pt); m_data->interior_line_end.push_back (pcp); double w (wInt); if (z (2) > 0.0 && wFunction) w = wInt * exp (-(error * error) * ds); if (w > 1e-6) // avoids ill-conditioned matrix addPointConstraint (m_data->interior_param[p], m_data->interior[p], n_prev, t_prev, rho_prev, d, w, row); } // printf("[FittingCurve2dSDM::assembleInterior] d>0: %d d<0: %d\n", i1, i2); }
double FittingCurve2d::inverseMapping (const ON_NurbsCurve &nurbs, const Eigen::Vector2d &pt, const double &hint, double &error, Eigen::Vector2d &p, Eigen::Vector2d &t, double rScale, int maxSteps, double accuracy, bool quiet) { if (nurbs.Order () == 2) return inverseMappingO2 (nurbs, pt, error, p, t); int cp_red = (nurbs.m_order - 2); int ncpj = (nurbs.m_cv_count - 2 * cp_red); double pointAndTangents[4]; double current, delta; Eigen::Vector2d r; std::vector<double> elements = getElementVector (nurbs); double minU = elements[0]; double maxU = elements[elements.size () - 1]; current = hint; for (int k = 0; k < maxSteps; k++) { nurbs.Evaluate (current, 1, 2, pointAndTangents); p (0) = pointAndTangents[0]; p (1) = pointAndTangents[1]; t (0) = pointAndTangents[2]; t (1) = pointAndTangents[3]; r = p - pt; // step width control int E = findElement (current, elements); double e = elements[E + 1] - elements[E]; delta = -(0.5 * e * rScale) * r.dot (t) / t.norm (); // A.ldlt().solve(b); // e = 0.5 * std::abs<double> (e); // if (delta > e) // delta = e; // if (delta < -e) // delta = -e; if (std::abs<double> (delta) < accuracy) { error = r.norm (); return current; } else { current = current + delta; if (current < minU) current = maxU - (minU - current); else if (current > maxU) current = minU + (current - maxU); } } error = r.norm (); if (!quiet) { printf ("[FittingCurve2d::inverseMapping] Warning: Method did not converge (%e %d).\n", accuracy, maxSteps); printf ("[FittingCurve2d::inverseMapping] hint: %f current: %f delta: %f error: %f\n", hint, current, delta, error); } return current; }
Eigen::Vector2d final_ray_scene_intersection( const Eigen::Vector2d &scene_point, const Eigen::Vector2d &final_ray_normalized, const Eigen::Vector2d &intersection_point_on_curved_surface) { double d = (scene_point-intersection_point_on_curved_surface).dot(Eigen::Vector2d(0,-1))/(final_ray_normalized.dot(Eigen::Vector2d(0,-1))); return final_ray_normalized*d+intersection_point_on_curved_surface; }