/*! Computes centered moments of all available orders. Depends on vpMomentGravityCenter. */ void vpMomentCentered::compute(){ bool found_moment_gravity; values.resize((getObject().getOrder()+1)*(getObject().getOrder()+1)); const vpMomentGravityCenter& momentGravity = static_cast<const vpMomentGravityCenter&>(getMoments().get("vpMomentGravityCenter",found_moment_gravity)); if(!found_moment_gravity) throw vpException(vpException::notInitialized,"vpMomentGravityCenter not found"); unsigned int order = getObject().getOrder()+1; for(register unsigned int j=0;j<(order);j++){ for(register unsigned int i=0;i<order-j;i++){ unsigned int c = order*j+i; values[c]=0; for(register unsigned int k=0;k<=i;k++){ double Xg_i_k = pow(-momentGravity.get()[0],(int)(i-k)); double comb_i_k = static_cast<double>( vpMath::comb(i,k) ); for(register unsigned int l=0;l<=j;l++){ values[c]+= static_cast<double>( comb_i_k*vpMath::comb(j,l) *Xg_i_k *pow(-momentGravity.get()[1],(int)(j-l))*getObject().get(k,l) ); } } } } }
/*! Test if two segments are intersecting. \throw vpException::divideByZeroError if the two lines are aligned ( denominator equal to zero). \param ip1 : The first image point of the first segment. \param ip2 : The second image point of the first segment. \param ip3 : The first image point of the second segment. \param ip4 : The second image point of the second segment. */ bool vpPolygon::testIntersectionSegments(const vpImagePoint& ip1, const vpImagePoint& ip2, const vpImagePoint& ip3, const vpImagePoint& ip4) { double di1 = ip2.get_i() - ip1.get_i(); double dj1 = ip2.get_j() - ip1.get_j(); double di2 = ip4.get_i() - ip3.get_i(); double dj2 = ip4.get_j() - ip3.get_j(); double denominator = di1 * dj2 - dj1 * di2; if(fabs(denominator) < std::numeric_limits<double>::epsilon()){ throw vpException(vpException::divideByZeroError, "Denominator is null, lines are parallels"); } double alpha = - ( ( ip1.get_i() - ip3.get_i() ) * dj2 + di2 * ( ip3.get_j() - ip1.get_j())) / denominator; if(alpha < 0 || alpha >= 1){ return false; } double beta = - (di1 * (ip3.get_j() - ip1.get_j() ) + dj1 * (ip1.get_i() - ip3.get_i()) ) / denominator; if(beta < 0 || beta >= 1){ return false; } return true; }
/*! Gets the desired moment using indexes. \param i : first index of the centered moment. \param j : second index of the centered moment. \return \f$\mu_{ij}\f$ moment. */ double vpMomentCentered::get(unsigned int i,unsigned int j) const { unsigned int order = getObject().getOrder(); assert(i+j<=order); if(i+j>order) throw vpException(vpException::badValue,"The requested value has not been computed, you should specify a higher order."); return values[j*(order+1)+i]; }
/*! Get the last frame index (update the lastFrame attribute). */ void vpVideoReader::findLastFrameIndex() { if (!isOpen) { vpERROR_TRACE("Use the open method before"); throw (vpException(vpException::notInitialized,"file not yet opened")); } if (imSequence != NULL) { char name[FILENAME_MAX]; int image_number = firstFrame; std::fstream file; bool failed; do { sprintf(name,fileName,image_number) ; file.open(name, std::fstream::in); failed = file.fail(); if (!failed) file.close(); image_number++; }while(!failed); lastFrame = image_number - 2; } #ifdef VISP_HAVE_FFMPEG else if (ffmpeg != NULL) lastFrame = (long)(ffmpeg->getFrameNumber() - 1); #endif }
/*! Detect using the cascade classifier. \param I: Grayscale image. \param func: Function pointer to eliminate detections according to specific criterion \param scaleFactor: Parameter specifying how much the image size is reduced at each image scale. \param minNeighbors: Parameter specifying how many neighbors each candidate rectangle should have to retain it. \param minSize: Minimum possible object size. Objects smaller than that are ignored. \param maxSize: Maximum possible object size. Objects larger than that are ignored. */ void vpCascadeClassifier::detect(const vpImage<unsigned char> &I, bool (*func)(const vpImage<unsigned char> &I, const vpRect &boundingBox), const double scaleFactor, const int minNeighbors, const cv::Size &minSize, const cv::Size &maxSize) { if (m_classifierDetector.empty()) { clear(); throw vpException(vpException::fatalError, "Empty classifier !"); } if(m_prevI.getWidth() == 0 || m_prevI.getHeight() == 0) { m_prevI = I; } computeDetection(I, scaleFactor, minNeighbors, minSize, maxSize); //Custom supplied filtering if(func != NULL) { for(std::vector<vpRect>::iterator it = m_objectBoundingBoxes.begin(); it != m_objectBoundingBoxes.end();) { if(!func(I, *it)) { m_vectorOfDetectedObjects.push_back(vpObjectDetection(*it, vpObjectDetection::FILTERED_STATUS)); it = m_objectBoundingBoxes.erase(it); } else { ++it; } } } computeMatching(I); m_prevI = I; }
// Color pictures SetBackGroundImage void vpAR::setImage(vpImage<vpRGBa> &I) { if ((internal_width != I.getWidth()) || (internal_height != I.getHeight())) { vpERROR_TRACE("The image size is different from the view size "); throw(vpException(vpException::dimensionError),"The image size is different from the view size") ; } background = true ; unsigned int k =0 ; for (unsigned int i=0 ; i <I.getHeight() ; i++) { k=0; for (unsigned int j=0 ; j <I.getWidth() ; j++) //le repere image open GL est en bas a gauche donc l'image serait inverse { image_background[i*I.getWidth()*3+k+0]=I[I.getHeight()-i-1][j].R ; image_background[i*I.getWidth()*3+k+1]=I[I.getHeight()-i-1][j].G ; image_background[i*I.getWidth()*3+k+2]=I[I.getHeight()-i-1][j].B ; k+=3; } } }
/*! Compute the rectangular ROI from at least 4 points and set the region of interest on the current image. \param ip : the list of image point. \param nbpt : the number of point. */ void vpPlanarObjectDetector::computeRoi(vpImagePoint* ip, const unsigned int nbpt) { if(nbpt < 3){ throw vpException(vpException::badValue, "Not enough point to compute the region of interest."); } std::vector < vpImagePoint > ptsx(nbpt); std::vector < vpImagePoint > ptsy(nbpt); for(unsigned int i=0; i<nbpt; i++){ ptsx[i] = ptsy[i] = ip[i]; } for(unsigned int i=0; i<nbpt; i++){ for(unsigned int j=0; j<nbpt-1; j++){ if(ptsx[j].get_j() > ptsx[j+1].get_j()){ double tmp = ptsx[j+1].get_j(); ptsx[j+1].set_j(ptsx[j].get_j()); ptsx[j].set_j(tmp); } } } for(unsigned int i=0; i<nbpt; i++){ for(unsigned int j=0; j<nbpt-1; j++){ if(ptsy[j].get_i() > ptsy[j+1].get_i()){ double tmp = ptsy[j+1].get_i(); ptsy[j+1].set_i(ptsy[j].get_i()); ptsy[j].set_i(tmp); } } } }
/*! Return a column vector with elements of \e v that are sorted. \sa invSort() */ vpColVector vpColVector::sort(const vpColVector &v) { if (v.data==NULL) { throw(vpException(vpException::fatalError, "Cannot sort content of column vector: vector empty")) ; } vpColVector tab ; tab = v ; unsigned int nb_permutation = 1 ; unsigned int i = 0 ; while (nb_permutation !=0 ) { nb_permutation = 0 ; for (unsigned int j = v.getRows()-1 ; j >= i+1 ; j--) { if ((tab[j]<tab[j-1])) { double tmp = tab[j] ; tab[j] = tab[j-1] ; tab[j-1] = tmp ; nb_permutation++ ; } } i++ ; } return tab ; }
/*! Compute the image addition: \f$ Ires = I1 - I2 \f$. \param I1 : The first image. \param I2 : The second image. \param Ires : \f$ Ires = I1 - I2 \f$ \param saturate : If true, saturate the result to [0 ; 255] using vpMath::saturate, otherwise overflow may occur. */ void vpImageTools::imageSubtract(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2, vpImage<unsigned char> &Ires, const bool saturate) { if ((I1.getHeight() != I2.getHeight()) || (I1.getWidth() != I2.getWidth())) { throw (vpException(vpException::dimensionError, "The two images do not have the same size")); } if ((I1.getHeight() != Ires.getHeight()) || (I1.getWidth() != Ires.getWidth())) { Ires.resize(I1.getHeight(), I1.getWidth()); } unsigned char *ptr_I1 = I1.bitmap; unsigned char *ptr_I2 = I2.bitmap; unsigned char *ptr_Ires = Ires.bitmap; unsigned int cpt = 0; #if VISP_HAVE_SSE2 if (Ires.getSize() >= 16) { for (; cpt <= Ires.getSize() - 16 ; cpt += 16, ptr_I1 += 16, ptr_I2 += 16, ptr_Ires += 16) { const __m128i v1 = _mm_loadu_si128( (const __m128i*) ptr_I1); const __m128i v2 = _mm_loadu_si128( (const __m128i*) ptr_I2); const __m128i vres = saturate ? _mm_subs_epu8(v1, v2) : _mm_sub_epi8(v1, v2); _mm_storeu_si128( (__m128i*) ptr_Ires, vres ); } } #endif for (; cpt < Ires.getSize(); cpt++, ++ptr_I1, ++ptr_I2, ++ptr_Ires) { *ptr_Ires = saturate ? vpMath::saturate<unsigned char>( (short int) *ptr_I1 - (short int) *ptr_I2 ) : *ptr_I1 - *ptr_I2; } }
/*! Insert a column vector. \param i : Index of the first element to introduce. This index starts from 0. \param v : Column vector to insert. The following example shows how to use this function: \code #include <visp3/core/vpColVector.h> int main() { vpColVector v(4); for (unsigned int i=0; i < v.size(); i++) v[i] = i; std::cout << "v: " << v.t() << std::endl; vpColVector w(2); for (unsigned int i=0; i < w.size(); i++) w[i] = i+10; std::cout << "w: " << w.t() << std::endl; v.insert(1, w); std::cout << "v: " << v.t() << std::endl; } \endcode It produces the following output: \code v: 0 1 2 3 w: 10 11 v: 0 10 11 3 \endcode */ void vpColVector::insert(unsigned int i, const vpColVector &v) { if (i+v.size() > this->size()) throw(vpException(vpException::dimensionError, "Unable to insert a column vector")); for (unsigned int j=0; j < v.size(); j++) (*this)[i+j] = v[j]; }
void vpMbKltTracker::init(const vpImage<unsigned char>& I) { if(!modelInitialised){ throw vpException(vpException::fatalError, "model not initialized"); } bool reInitialisation = false; if(!useOgre) faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation); else{ #ifdef VISP_HAVE_OGRE if(!faces.isOgreInitialised()){ faces.setBackgroundSizeOgre(I.getHeight(), I.getWidth()); faces.setOgreShowConfigDialog(ogreShowConfigDialog); faces.initOgre(cam); // Turn off Ogre config dialog display for the next call to this function // since settings are saved in the ogre.cfg file and used during the next // call ogreShowConfigDialog = false; } faces.setVisibleOgre(I, cam, cMo, angleAppears, angleDisappears, reInitialisation); #else faces.setVisible(I, cam, cMo, angleAppears, angleDisappears, reInitialisation); #endif } reinit(I); }
void vpBiclops::get_fJe(const vpColVector &q, vpMatrix &fJe) { if (q.getRows() != 2) { vpERROR_TRACE("Bad dimension for biclops articular vector"); throw(vpException(vpException::dimensionError, "Bad dimension for biclops articular vector")); } fJe.resize(6,2) ; double s1 = sin(q[0]) ; double c1 = cos(q[0]) ; fJe = 0; if (dh_model_ == DH1) { fJe[3][1] = -s1; fJe[4][1] = c1; fJe[5][0] = 1; } else { fJe[3][1] = s1; fJe[4][1] = -c1; fJe[5][0] = 1; } }
/*! Computes the SURF points in only a part of the current image I and try to matched them with the points in the reference list. The part of the image is a rectangle defined by its top left corner, its height and its width. The parameters of this rectangle must be given in pixel. Only the matched points are stored. \param I : The gray scaled image where the points are computed. \param iP : The top left corner of the rectangle. \param height : height of the rectangle (in pixel). \param width : width of the rectangle (in pixel). \return the number of point which have been matched. */ unsigned int vpKeyPointSurf::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, const unsigned int height, const unsigned int width) { if((iP.get_i()+height) >= I.getHeight() || (iP.get_j()+width) >= I.getWidth()) { vpTRACE("Bad size for the subimage"); throw(vpException(vpImageException::notInTheImage , "Bad size for the subimage")); } vpImage<unsigned char> subImage; vpImageTools::createSubImage(I, (unsigned int)iP.get_i(), (unsigned int)iP.get_j(), height, width, subImage); unsigned int nbMatchedPoint = this->matchPoint(subImage); for(unsigned int k = 0; k < nbMatchedPoint; k++) { (currentImagePointsList[k]).set_i((currentImagePointsList[k]).get_i() + iP.get_i()); (currentImagePointsList[k]).set_j((currentImagePointsList[k]).get_j() + iP.get_j()); } return(nbMatchedPoint); }
/*! Get the robot jacobian expressed in the end-effector frame. \warning Re is not the embedded camera frame. It corresponds to the frame associated to the tilt axis (see also get_cMe). \param q : Articular position for pan and tilt axis. \param eJe : Jacobian between end effector frame and end effector frame (on tilt axis). */ void vpBiclops::get_eJe(const vpColVector &q, vpMatrix &eJe) { eJe.resize(6,2) ; if (q.getRows() != 2) { vpERROR_TRACE("Bad dimension for biclops articular vector"); throw(vpException(vpException::dimensionError, "Bad dimension for biclops articular vector")); } double s2 = sin(q[1]) ; double c2 = cos(q[1]) ; eJe = 0; if (dh_model_ == DH1) { eJe[3][0] = -c2; eJe[4][1] = 1; eJe[5][0] = -s2; } else { eJe[3][0] = -c2; eJe[4][1] = -1; eJe[5][0] = s2; } }
/*! Read data from disk : data are organized as follow oX oY oZ u v \param filename : name of the file */ int vpCalibration::readData(const char* filename) { vpImagePoint ip; std::ifstream f; f.open(filename); if (! f.fail()){ unsigned int n ; f >> n ; std::cout << "There are "<< n <<" point on the calibration grid " << std::endl ; clearPoint() ; if (n > 100000) throw(vpException(vpException::badValue, "Bad number of point in the calibration grid")); for (unsigned int i=0 ; i < n ; i++) { double x, y, z, u, v ; f >> x >> y >> z >> u >> v ; std::cout << x <<" "<<y <<" "<<z<<" "<<u<<" "<<v <<std::endl ; ip.set_u( u ); ip.set_v( v ); addPoint(x, y, z, ip) ; } f.close() ; return 0 ; }
bool vpHomography::degenerateConfiguration(const std::vector<double> &xb, const std::vector<double> &yb, const std::vector<double> &xa, const std::vector<double> &ya) { unsigned int n = (unsigned int)xb.size(); if (n < 4) throw(vpException(vpException::fatalError, "There must be at least 4 matched points")); std::vector<vpColVector> pa(n), pb(n); for (unsigned i=0; i<n;i++) { pa[i].resize(3); pa[i][0] = xa[i]; pa[i][1] = ya[i]; pa[i][2] = 1; pb[i].resize(3); pb[i][0] = xb[i]; pb[i][1] = yb[i]; pb[i][2] = 1; } for (unsigned int i = 0; i < n-2; i++) { for (unsigned int j = i+1; j < n-1; j++) { for (unsigned int k = j+1; k < n ; k++) { if (isColinear(pa[i], pa[j], pa[k])) { return true; } if (isColinear(pb[i], pb[j], pb[k])){ return true; } } } } return false; }
/*! \brief copy from a matrix \warning Handled with care m should be a 1 row matrix */ vpColVector &vpColVector::operator=(const vpMatrix &m) { if (m.getCols() !=1) { vpTRACE(" m should be a 1 cols matrix ") ; throw (vpException(vpException::dimensionError)," m should be a 1 cols matrix "); } try { resize(m.getRows()); } catch(vpException me) { vpERROR_TRACE("Error caught") ; throw ; } memcpy(data, m.data, rowNum*sizeof(double)) ; /* double *md = m.data ; double *d = data ; for (int i=0;i<rowNum;i++) *(d++)= *(md++) ; */ /* for (int i=0; i<rowNum; i++) { for (int j=0; j<colNum; j++) { rowPtrs[i][j] = m.rowPtrs[i][j]; } }*/ return *this; }
void vpPlanarObjectDetector::getReferencePoint(unsigned int _i, vpImagePoint& _imPoint) { if(_i >= refImagePoints.size()){ throw vpException(vpException::fatalError, "index out of bound in getMatchedPoints."); } _imPoint = refImagePoints[_i]; }
//! Division by scalar. Returns a quaternion defined by (x/l,y/l,z/l,w/l). vpQuaternionVector vpQuaternionVector::operator/(const double l) const { if(vpMath::nul(l, std::numeric_limits<double>::epsilon())) { throw vpException(vpException::fatalError, "Division by scalar l==0 !"); } return vpQuaternionVector(x()/l,y()/l,z()/l,w()/l); }
/*! Get a reference to a corner. \throw vpException::dimensionError if the _index is out of range. \param _index : the index of the corner */ vpPoint & vpPolygon3D::getPoint(const unsigned int _index) { if(_index >= nbpt){ throw vpException(vpException::dimensionError, "index out of range"); } return p[_index]; }
/*! Computes normalized gravity center moment. Depends on vpMomentAreaNormalized and on vpMomentGravityCenter. */ void vpMomentGravityCenterNormalized::compute(){ bool found_moment_gravity; bool found_moment_surface_normalized; const vpMomentAreaNormalized& momentSurfaceNormalized = static_cast<const vpMomentAreaNormalized&>(getMoments().get("vpMomentAreaNormalized",found_moment_surface_normalized)); const vpMomentGravityCenter& momentGravity = static_cast<const vpMomentGravityCenter&>(getMoments().get("vpMomentGravityCenter",found_moment_gravity)); if(!found_moment_surface_normalized) throw vpException(vpException::notInitialized,"vpMomentAreaNormalized not found"); if(!found_moment_gravity) throw vpException(vpException::notInitialized,"vpMomentGravityCenter not found"); double Xn = momentGravity.get()[0]*momentSurfaceNormalized.get()[0]; double Yn = momentGravity.get()[1]*momentSurfaceNormalized.get()[0]; values[0] = Xn; values[1] = Yn; }
/*! * Return device end effector velocity. */ vpColVector vpVirtuose::getVelocity() const { if (!m_is_init) { throw(vpException(vpException::fatalError, "Device not initialized. Call init().")); } vpColVector vel(6,0); float speed[6]; if (virtGetSpeed(m_virtContext, speed)) { int err = virtGetErrorCode(m_virtContext); throw(vpException(vpException::fatalError, "Cannot get haptic device velocity: %s", virtGetErrorMessage(err))); } for(unsigned int i=0; i<6; i++) vel[i] = speed[i]; return vel; }
/*! Computes interaction matrix for the normalized surface moment. Called internally. The moment primitives must be computed before calling this. This feature depends on: - vpMomentGravityCenter - vpMomentArea */ void vpFeatureMomentArea::compute_interaction(){ interaction_matrices.resize(1); interaction_matrices[0].resize(1,6); // Retreive the moment object associated with this feature vpMomentObject mobj = moment->getObject(); if (mobj.getType()==vpMomentObject::DISCRETE) { /* * The interaction matrix for the discrete case is zero * since the feature m00 is constant. * Refer thesis of Omar Tahri 2005 [Section 3.4.22] */ interaction_matrices[0][0][0] = 0.; interaction_matrices[0][0][1] = 0.; interaction_matrices[0][0][2] = 0.; interaction_matrices[0][0][3] = 0.; interaction_matrices[0][0][4] = 0.; interaction_matrices[0][0][5] = 0.; } else { // Get Xg and Yg bool found_xgyg; const vpMomentGravityCenter& momentGravity = static_cast<const vpMomentGravityCenter&>(moments.get("vpMomentGravityCenter",found_xgyg)); if (!found_xgyg) throw vpException(vpException::notInitialized,"vpMomentGravityCenter not found"); bool found_m00; const vpMomentArea& areamoment = static_cast<const vpMomentArea&>(moments.get("vpMomentArea", found_m00)); if (!found_m00) throw vpException(vpException::notInitialized,"vpMomentArea not found"); double Xg = momentGravity.getXg(); double Yg = momentGravity.getYg(); double a = areamoment.get()[0]; // Area scalar assert(std::fabs(a-mobj.get(0,0)) < a*std::numeric_limits<double>::epsilon()); interaction_matrices[0][0][0] = -a*A; interaction_matrices[0][0][1] = -a*B; interaction_matrices[0][0][2] = (3*a)*(A*Xg+B*Yg)+(2*C*a); interaction_matrices[0][0][3] = 3*a*Yg; interaction_matrices[0][0][4] = -3*a*Xg; interaction_matrices[0][0][5] = 0.; } }
/*! Remove the feature with the given index as parameter. \param index : Index of the feature to remove. */ void vpKltOpencv::suppressFeature(const int &index) { if ((size_t)index >= m_points[1].size()){ throw(vpException(vpException::badValue, "Feature [%d] doesn't exist", index)); } m_points[1].erase(m_points[1].begin()+index); m_points_id.erase(m_points_id.begin()+index); }
/*! Insert a row vector. \param i : Index of the first element to introduce. This index starts from 0. \param v : Row vector to insert. The following example shows how to use this function: \code #include <visp/vpRowVector.h> int main() { vpRowVector v(4); for (unsigned int i=0; i < v.size(); i++) v[i] = i; std::cout << "v: " << v << std::endl; vpRowVector w(2); for (unsigned int i=0; i < w.size(); i++) w[i] = i+10; std::cout << "w: " << w << std::endl; v.insert(1, w); std::cout << "v: " << v << std::endl; } \endcode It produces the following output: \code v: 0 1 2 3 w: 10 11 v: 0 10 11 3 \endcode */ void vpRowVector::insert(unsigned int i, const vpRowVector &v) { if (i+v.size() > this->size()) throw(vpException(vpException::dimensionError, "Unable to insert (1x%d) row vector in (1x%d) row vector at position (%d)", v.getCols(), colNum, i)); for (unsigned int j=0; j < v.size(); j++) (*this)[i+j] = v[j]; }
/*! * Return force tensor to be applied to the attached object. */ vpColVector vpVirtuose::getForce() const { if (!m_is_init) { throw(vpException(vpException::fatalError, "Device not initialized. Call init().")); } vpColVector force(6,0); float force_[6]; if (virtGetForce(m_virtContext, force_)) { int err = virtGetErrorCode(m_virtContext); throw(vpException(vpException::fatalError, "Error calling virtGetForce: error code %d", err)); } for(unsigned int i=0; i<6; i++) force[i] = force_[i]; return force; }
//! Constructor from a 4-dimension vector of doubles. vpQuaternionVector::vpQuaternionVector(const vpColVector &q) : vpRotationVector(4) { if (q.size() != 4) { throw(vpException(vpException::dimensionError, "Cannot construct a quaternion vector from a %d-dimension col vector", q.size())); } for (unsigned int i=0; i<4; i++) data[i] = q[i]; }
/*! Copy constructor from a 3-dimension vector. */ vpRzyzVector::vpRzyzVector(const vpColVector &rzyz) : vpRotationVector (3) { if (rzyz.size() != 3) { throw(vpException(vpException::dimensionError, "Cannot construct a R-zyz vector from a %d-dimension col vector", rzyz.size())); } for (unsigned int i=0; i< 3; i++) data[i] = rzyz[i]; }
/*! \brief basic initialization with the default parameters */ void vpCameraParameters::init() { if (fabs(this->px)<1e-6) { vpERROR_TRACE("Camera parameter px = 0") ; throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")) ; } if (fabs(this->py)<1e-6) { vpERROR_TRACE("Camera parameter px = 0") ; throw(vpException(vpException::divideByZeroError, "Camera parameter px = 0")) ; } this->inv_px = 1./this->px; this->inv_py = 1./this->py; }
/*! Computes translation-plane-rotation-scale invariants. Depends on vpMomentCentered. All possible invariants are computed here. The selection of the invariant is done afterwards. */ void vpMomentCInvariant::compute(){ if(getObject().getOrder()<5) throw vpException(vpException::notInitialized,"Order is not high enough for vpMomentCInvariant. Specify at least order 5."); bool found_moment_centered; const vpMomentCentered& momentCentered = (static_cast<const vpMomentCentered&>(getMoments().get("vpMomentCentered",found_moment_centered))); if(!found_moment_centered) throw vpException(vpException::notInitialized,"vpMomentCentered not found"); computeI(momentCentered,I); double II3_2 = II[3]*II[3]; double II3_3 = II3_2*II[3]; double a; if(getObject().getType()==vpMomentObject::DISCRETE) a = momentCentered.get(2,0)+momentCentered.get(0,2); else a = getObject().get(0,0); values[0] = I[1]/I[2]; values[1] = I[3]/I[4]; values[2] = I[5]/I[6]; values[3] = I[7]/I[6]; values[4] = I[8]/I[6]; values[5] = I[9]/I[6]; values[6] = I[11]/I[10]; values[7] = I[12]/I[10]; values[8] = I[13]/I[15]; values[9] = I[14]/I[15]; if (flg_sxsynormalization_) calcSxSyNormalized(values[10], values[11]); else calcSxSy(values[10], values[11]); values[12] = II[1]/(II3_2); // Px values[13] = a*II[2]/(II3_3); // Py }