std::vector<glm::mat4> Animation::getKeyPose(const unsigned int& keyFrame, const std::vector<unsigned int>& roots, const std::vector<Joint>& hierarchy) const { std::vector<glm::mat4> pose(timeLine[0].poses.size(), glm::mat4(1.f)); for (unsigned int i = 0; i < roots.size() && keyFrame < timeLine.size(); i++) computePose(keyFrame, pose, glm::mat4(1.f), roots[i], hierarchy); return pose; }
void Cylinder::computePose(Eigen::Vector3f origin, std::vector<std::vector<Eigen::Vector3f> >& contours_3d) { Eigen::Affine3f t; pcl::getTransformationFromTwoUnitVectorsAndOrigin(sym_axis_, sym_axis_.unitOrthogonal(), origin, t); Eigen::Vector3f z_cyl = t * contours_3d[0][0]; z_cyl(1) = 0; Eigen::Vector3f z_axis = t.inverse().rotation() * z_cyl; computePose(origin, z_axis.normalized()); }
// Protected functions void Animation::computePose(const unsigned int& keyFrame, std::vector<glm::mat4>& pose, const glm::mat4& parentPose, unsigned int joint, const std::vector<Joint>& hierarchy) const { glm::mat4 t = glm::translate(timeLine[keyFrame].poses[joint].position); glm::mat4 r = glm::toMat4(timeLine[keyFrame].poses[joint].rotation); glm::mat4 s = glm::scale(timeLine[keyFrame].poses[joint].scale); pose[joint] = parentPose *t * r * s; for (unsigned int i = 0; i < hierarchy[joint].sons.size(); i++) computePose(keyFrame, pose, pose[joint], hierarchy[joint].sons[i], hierarchy); }
int main() { #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && (defined(VISP_HAVE_DC1394_2) || defined(VISP_HAVE_CMU1394)) vpImage<unsigned char> I; #if defined(VISP_HAVE_DC1394_2) vp1394TwoGrabber g; #elif defined(VISP_HAVE_CMU1394) vp1394CMUGrabber g; #endif g.open(I); // Parameters of our camera vpCameraParameters cam(840, 840, I.getWidth()/2, I.getHeight()/2); // The pose container vpHomogeneousMatrix cMo; std::vector<vpDot2> dot(4); std::vector<vpPoint> point(4); double L = 0.06; point[0].setWorldCoordinates(-L, -L, 0); point[1].setWorldCoordinates( L, -L, 0); point[2].setWorldCoordinates( L, L, 0); point[3].setWorldCoordinates(-L, L, 0); bool init = true; #if defined(VISP_HAVE_X11) vpDisplayX d(I); #elif defined(VISP_HAVE_GDI) vpDisplayGDI d(I); #endif while(1){ // Image Acquisition g.acquire(I); vpDisplay::display(I); track(I, dot, init); computePose(point, dot, cam, init, cMo); vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none, 3); vpDisplay::flush(I); if (init) init = false; // turn off the initialisation specific stuff if (vpDisplay::getClick(I, false)) break; } #endif }
void Cylinder::updateAttributes(const Eigen::Vector3f& sym_axis, const Eigen::Vector3f& origin, const Eigen::Vector3f& z_axis) { //origin_= new_origin; sym_axis_ = sym_axis; if (sym_axis_[2] < 0 ) { sym_axis_ = sym_axis_ * -1; } computePose(origin, z_axis.normalized()); //d_ = fabs(pose_.translation().dot(normal_)); //normal_ = new_normal; /*Eigen::Affine3f transform_from_plane_to_world; getTransformationFromPlaneToWorld(sym_axis,normal,origin_,transform_from_plane_to_world); transform_from_world_to_plane=transform_from_plane_to_world.inverse();*/ }
Cylinder::Cylinder(unsigned int id, Eigen::Vector3f origin, Eigen::Vector3f sym_axis, double radius, std::vector<std::vector<Eigen::Vector3f> >& contours_3d, std::vector<bool> holes, std::vector<float> color) : Polygon() { //Cylinder(); id_ = id; sym_axis_ = sym_axis; r_ = radius; holes_ = holes; color_ = color; if (sym_axis_[2] < 0 ) { sym_axis_ = sym_axis_ * -1; } computePose(origin, contours_3d); setContours3D(contours_3d); computeHeight(); }
Cylinder::Cylinder(unsigned int id, Eigen::Vector3f origin, Eigen::Vector3f sym_axis, double radius, std::vector<pcl::PointCloud<pcl::PointXYZ> >& contours_3d, std::vector<bool> holes, std::vector<float> color) : Polygon() { std::vector<std::vector<Eigen::Vector3f> > contours_eigen; for(unsigned int i = 0; i < contours_3d.size(); i++) { std::vector<Eigen::Vector3f> c_eigen; for(unsigned int j = 0; j < contours_3d[i].size(); j++) { Eigen::Vector3f pt = contours_3d[i].points[j].getVector3fMap(); c_eigen.push_back(pt); } contours_eigen.push_back(c_eigen); } id_ = id; sym_axis_ = sym_axis; r_ = radius; //std::cout << "origin " << origin << std::endl; //std::cout << "r " << r_ << std::endl; holes_ = holes; color_ = color; if (sym_axis_[2] < 0 ) { sym_axis_ = sym_axis_ * -1; } computePose(origin, contours_eigen); setContours3D(contours_eigen); computeHeight(); //Cylinder(id, origin, sym_axis, radius, contours_eigen, holes, color); }
/*! Compute the calibration according to the desired method using one pose. \param method : Method that will be used to estimate the parameters. \param cMo_est : estimated homogeneous matrix that defines the pose. \param cam_est : estimated intrinsic camera parameters. \param verbose : set at true if information about the residual at each loop of the algorithm is hoped. \return 0 if the calibration computation succeed. */ int vpCalibration::computeCalibration(vpCalibrationMethodType method, vpHomogeneousMatrix &cMo_est, vpCameraParameters &cam_est, bool verbose) { try{ computePose(cam_est,cMo_est); switch (method) { case CALIB_LAGRANGE : case CALIB_LAGRANGE_VIRTUAL_VS : { calibLagrange(cam_est, cMo_est); } break; case CALIB_VIRTUAL_VS: case CALIB_VIRTUAL_VS_DIST: case CALIB_LAGRANGE_VIRTUAL_VS_DIST: default: break; } switch (method) { case CALIB_VIRTUAL_VS: case CALIB_VIRTUAL_VS_DIST: case CALIB_LAGRANGE_VIRTUAL_VS: case CALIB_LAGRANGE_VIRTUAL_VS_DIST: { if (verbose){std::cout << "start calibration without distortion"<< std::endl;} calibVVS(cam_est, cMo_est, verbose); } break ; case CALIB_LAGRANGE: default: break; } this->cMo = cMo_est; this->cMo_dist = cMo_est; //Print camera parameters if(verbose){ // std::cout << "Camera parameters without distortion :" << std::endl; cam_est.printParameters(); } this->cam = cam_est; switch (method) { case CALIB_VIRTUAL_VS_DIST: case CALIB_LAGRANGE_VIRTUAL_VS_DIST: { if (verbose){std::cout << "start calibration with distortion"<< std::endl;} calibVVSWithDistortion(cam_est, cMo_est, verbose); } break ; case CALIB_LAGRANGE: case CALIB_VIRTUAL_VS: case CALIB_LAGRANGE_VIRTUAL_VS: default: break; } //Print camera parameters if(verbose){ // std::cout << "Camera parameters without distortion :" << std::endl; this->cam.printParameters(); // std::cout << "Camera parameters with distortion :" << std::endl; cam_est.printParameters(); } this->cam_dist = cam_est ; this->cMo_dist = cMo_est; return 0 ; } catch(...){ throw; } }
int main() { #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) && (defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)) try { vpImage<unsigned char> I; #if defined(VISP_HAVE_DC1394) vp1394TwoGrabber g; g.open(I); #elif defined(VISP_HAVE_CMU1394) vp1394CMUGrabber g; g.open(I); #elif defined(VISP_HAVE_OPENCV) cv::VideoCapture g(0); // open the default camera if(!g.isOpened()) { // check if we succeeded std::cout << "Failed to open the camera" << std::endl; return -1; } cv::Mat frame; g >> frame; // get a new frame from camera vpImageConvert::convert(frame, I); #endif // Parameters of our camera vpCameraParameters cam(840, 840, I.getWidth()/2, I.getHeight()/2); // The pose container vpHomogeneousMatrix cMo; std::vector<vpDot2> dot(4); std::vector<vpPoint> point(4); double L = 0.06; point[0].setWorldCoordinates(-L, -L, 0); point[1].setWorldCoordinates( L, -L, 0); point[2].setWorldCoordinates( L, L, 0); point[3].setWorldCoordinates(-L, L, 0); bool init = true; #if defined(VISP_HAVE_X11) vpDisplayX d(I); #elif defined(VISP_HAVE_GDI) vpDisplayGDI d(I); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV d(I); #endif while(1){ // Image Acquisition #if defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) g.acquire(I); #elif defined(VISP_HAVE_OPENCV) g >> frame; vpImageConvert::convert(frame, I); #endif vpDisplay::display(I); track(I, dot, init); computePose(point, dot, cam, init, cMo); vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none, 3); vpDisplay::flush(I); if (init) init = false; // turn off the initialisation specific stuff if (vpDisplay::getClick(I, false)) break; } } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
int main() { #if defined(VISP_HAVE_ZBAR) try { vpImage<unsigned char> I; vpImageIo::read(I, "bar-code.pgm"); #if defined(VISP_HAVE_X11) vpDisplayX d(I); #elif defined(VISP_HAVE_GDI) vpDisplayGDI d(I); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV d(I); #endif // Camera parameters should be adapted to your camera vpCameraParameters cam(840, 840, I.getWidth()/2, I.getHeight()/2); // 3D model of the QRcode: here we consider a 12cm by 12cm QRcode std::vector<vpPoint> point; point.push_back( vpPoint(-0.06, -0.06, 0) ); // QCcode point 0 3D coordinates in plane Z=0 point.push_back( vpPoint( 0.06, -0.06, 0) ); // QCcode point 1 3D coordinates in plane Z=0 point.push_back( vpPoint( 0.06, 0.06, 0) ); // QCcode point 2 3D coordinates in plane Z=0 point.push_back( vpPoint(-0.06, 0.06, 0) ); // QCcode point 3 3D coordinates in plane Z=0 vpHomogeneousMatrix cMo; bool init = true; vpDetectorQRCode detector; while(1) { vpImageIo::read(I, "bar-code.pgm"); vpDisplay::display(I); bool status = detector.detect(I); std::ostringstream legend; legend << detector.getNbObjects() << " bar code detected"; vpDisplay::displayText(I, (int)I.getHeight()-30, 10, legend.str(), vpColor::red); if (status) { // true if at least one QRcode is detected for(size_t i=0; i < detector.getNbObjects(); i++) { std::vector<vpImagePoint> p = detector.getPolygon(i); // get the four corners location in the image for(size_t j=0; j < p.size(); j++) { vpDisplay::displayCross(I, p[j], 14, vpColor::red, 3); std::ostringstream number; number << j; vpDisplay::displayText(I, p[j]+vpImagePoint(15,5), number.str(), vpColor::blue); } computePose(point, p, cam, init, cMo); // resulting pose is available in cMo var std::cout << "Pose translation (meter): " << cMo.getTranslationVector().t() << std::endl << "Pose rotation (quaternion): " << vpQuaternionVector(cMo.getRotationMatrix()).t() << std::endl; vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none, 3); } } vpDisplay::displayText(I, (int)I.getHeight()-15, 10, "A click to quit...", vpColor::red); vpDisplay::flush(I); if (vpDisplay::getClick(I, false)) break; vpTime::wait(40); } } catch(const vpException &e) { std::cout << "Catch an exception: " << e.getMessage() << std::endl; } #else std::cout << "ViSP is not build with zbar 3rd party." << std::endl; #endif }
int main(int argc, const char** argv) { #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100) && (defined(VISP_HAVE_ZBAR) || defined(VISP_HAVE_DMTX)) int opt_device = 0; int opt_barcode = 0; // 0=QRCode, 1=DataMatrix std::string opt_ip = "198.18.0.1"; for (unsigned int i=0; i<argc; i++) { if (std::string(argv[i]) == "--ip") opt_ip = argv[i+1]; else if (std::string(argv[i]) == "--code-type") opt_barcode = atoi(argv[i+1]); else if (std::string(argv[i]) == "--help") std::cout << "Usage: " << argv[0] << " [--ip <robot ip address>] [--code-type <0 for QRcode | 1 for DataMatrix>] [--help]" << std::endl; } try { vpNaoqiGrabber g; if (! opt_ip.empty()) g.setRobotIp(opt_ip); g.setCamera(0); g.open(); vpImage<unsigned char> I; // for gray images g.acquire(I); vpDisplayX d(I); vpDetectorBase *detector; if (opt_barcode == 0) detector = new vpDetectorQRCode; #ifdef VISP_HAVE_DMTX else detector = new vpDetectorDataMatrixCode; #endif vpTemplateTrackerWarpAffine warp; vpTemplateTrackerSSDInverseCompositional tracker(&warp); tracker.setSampling(2,2); tracker.setLambda(0.001); tracker.setIterationMax(5); tracker.setPyramidal(2, 1); state_t state = detection; vpTemplateTrackerZone zone_ref, zone_cur; double area_zone_ref, area_zone_cur, area_zone_prev; vpColVector p; // Estimated parameters std::vector<vpImagePoint> corners_detected; std::vector<vpImagePoint> corners_tracked; std::vector<int> corners_tracked_index; bool target_found = false; vpRect target_bbox; // BBox of the tracked qrcode vpCameraParameters cam = g.getCameraParameters(); vpHomogeneousMatrix cMo; std::vector<vpPoint> P(4); double qrcode_size = 0.035; P[0].setWorldCoordinates( qrcode_size/2, qrcode_size/2, 0); // / \ x P[1].setWorldCoordinates(-qrcode_size/2, qrcode_size/2, 0); // | P[2].setWorldCoordinates(-qrcode_size/2, -qrcode_size/2, 0); // small dot on the qrcode y <--| P[3].setWorldCoordinates( qrcode_size/2, -qrcode_size/2, 0); for(;;) { double t = vpTime::measureTimeMs(); g.acquire(I); vpDisplay::display(I); target_found = false; if (state == detection) { vpDisplay::displayCharString(I, 10,10, "state: detection", vpColor::red); bool status = detector->detect(I); if (status) { vpDisplay::displayText(I, 20, 10, "Bar code detected", vpColor::green); for (size_t i=0; i < detector->getNbObjects(); i++) { if (detector->getMessage(i) == "romeo_left_arm") { corners_detected = detector->getPolygon(i); state = init_tracking; target_found = true; vpDisplay::displayText(I, I.getHeight()-20, 10, "Bar code message: " + detector->getMessage(i), vpColor::green); for(size_t j=0; j < corners_detected.size(); j++) { std::ostringstream s; s << j; vpDisplay::displayText(I, corners_detected[j]+vpImagePoint(-20,-20), s.str(), vpColor::green); vpDisplay::displayCross(I, corners_detected[j], 25, vpColor::green, 2); } } } } } if (target_found && state == init_tracking) { vpDisplay::displayCharString(I, 10,10, "state: init tracking", vpColor::red); try { tracker.resetTracker(); tracker.initFromPoints(I, corners_detected, true); tracker.track(I); //tracker.display(I, vpColor::green); zone_ref = tracker.getZoneRef(); area_zone_ref = zone_ref.getArea(); p = tracker.getp(); warp.warpZone(zone_ref, p, zone_cur); area_zone_prev = area_zone_cur = zone_cur.getArea(); corners_tracked = getTemplateTrackerCorners(zone_cur); corners_tracked_index = computedTemplateTrackerCornersIndexes(corners_detected, corners_tracked); corners_tracked = orderPointsFromIndexes(corners_tracked_index, corners_tracked); computePose(P, corners_tracked, cam, true, cMo); vpDisplay::displayFrame(I, cMo, cam, 0.04, vpColor::none, 3); state = tracking; } catch(...) { std::cout << "Exception init tracking" << std::endl; state = detection; } } else if (state == tracking) { try { vpDisplay::displayCharString(I, 10,10, "state: tracking", vpColor::red); tracker.track(I); //tracker.display(I, vpColor::blue); { // Instantiate and get the reference zone p = tracker.getp(); warp.warpZone(zone_ref, p, zone_cur); area_zone_cur = zone_cur.getArea(); double size_percent = 0.9; double max_target_size = I.getSize()/4; if (area_zone_cur/area_zone_prev < size_percent || area_zone_cur/area_zone_prev > (1+size_percent)) { std::cout << "reinit caused by size" << std::endl; state = detection; } else if(zone_cur.getBoundingBox().getSize() > max_target_size) { std::cout << "reinit caused by size area" << std::endl; state = detection; } else { corners_tracked = getTemplateTrackerCorners(zone_cur); corners_tracked = orderPointsFromIndexes(corners_tracked_index, corners_tracked); computePose(P, corners_tracked, cam, false, cMo); vpDisplay::displayFrame(I, cMo, cam, 0.04, vpColor::none, 3); for(unsigned int j=0; j<corners_tracked.size(); j++) { std::ostringstream s; s << corners_tracked_index[j]; vpDisplay::displayText(I, corners_tracked[j]+vpImagePoint(-10,-10), s.str(), vpColor::blue); vpDisplay::displayCross(I, corners_tracked[j], 15, vpColor::green, 2); } target_found = true; } area_zone_prev = area_zone_cur; } } catch(...) { std::cout << "Exception tracking" << std::endl; state = detection; } } // if (target_found) // vpDisplay::displayRectangle(I, target_bbox, vpColor::red, false, 2); vpDisplay::flush(I); if (vpDisplay::getClick(I, false)) // a click to exit break; std::cout << "Loop time: " << vpTime::measureTimeMs() - t << std::endl; } } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
bool vpBlobsTargetTracker::track(const cv::Mat &cvI, const vpImage<unsigned char> &I ) { if (m_state == detection || m_force_detection) { //std::cout << "STATE: DETECTION "<< std::endl; bool obj_found = false; if (!m_manual_blob_init && !m_full_manual) obj_found = m_colBlob.detect(cvI); // Delete previuos list of blobs m_blob_list.clear(); m_blob_list.resize(4); m_initPose = true; m_target_found = false; if (obj_found || m_manual_blob_init || m_full_manual) { try{ if (m_full_manual) { std::cout << "Full manual" << std::endl; vpDisplay::displayText(I, vpImagePoint(I.getHeight() - 10, 10), "Click on the 4 blobs", vpColor::red); vpDisplay::flush(I); for(std::list<vpDot2>::iterator it=m_blob_list.begin(); it != m_blob_list.end(); ++it) { (*it).setGraphics(true); (*it).setGraphicsThickness(1); (*it).initTracking(I); (*it).track(I); vpDisplay::flush(I); } m_state = tracking; m_force_detection = false; } else { // std::cout << "TARGET FOUND" << std::endl; vpDot2 blob; blob.setGraphics(true); blob.setGraphicsThickness(1); blob.setEllipsoidShapePrecision(0.9); if (m_manual_blob_init) { vpDisplay::displayText(I, vpImagePoint(I.getHeight() - 10, 10), "Click on the colored blob", vpColor::red); vpDisplay::flush(I); blob.initTracking(I); m_manual_blob_init = false; } else { vpImagePoint cog = m_colBlob.getCog(0); vpDisplay::displayCross(I,cog,10, vpColor::red,2 ); blob.initTracking(I,cog); } blob.track(I); // printf("Dot characteristics: \n"); // printf(" width : %lf\n", blob.getWidth()); // printf(" height: %lf\n", blob.getHeight()); // printf(" area: %lf\n", blob.getArea()); // printf(" gray level min: %d\n", blob.getGrayLevelMin()); // printf(" gray level max: %d\n", blob.getGrayLevelMax()); // printf(" grayLevelPrecision: %lf\n", blob.getGrayLevelPrecision()); // printf(" sizePrecision: %lf\n", blob.getSizePrecision()); // printf(" ellipsoidShapePrecision: %lf\n", blob.getEllipsoidShapePrecision()); vpDot2 black_blob = blob; black_blob.setGrayLevelMax(m_grayLevelMaxBlob); black_blob.setGrayLevelMin(m_grayLevelMinBlob); int i,j,aj,ai; if(m_left_hand_target) { i = blob.getCog().get_i()-blob.getHeight()*2.3; j = blob.getCog().get_j()-blob.getWidth()*3.3; ai = blob.getHeight()*5; aj = blob.getWidth()*5; } else { i = blob.getCog().get_i()-blob.getHeight(); j = blob.getCog().get_j()-blob.getWidth()*2.0; ai = blob.getHeight()*4.5; aj = blob.getWidth()*4; } //search similar blobs in the image and store them in blob_list //black_blob.searchDotsInArea(I, 0, 0, I.getWidth(), I.getHeight(), m_blob_list); //vpDisplay::displayRectangle(I, i, j, ai, aj, vpColor::red, false, 1); black_blob.searchDotsInArea(I, j, i, ai, aj, m_blob_list); // vpDisplay::flush(I); // vpDisplay::getClick(I,true); m_blob_list.insert(m_blob_list.begin(),blob); std::cout << "SIZE: " << m_blob_list.size() << std::endl; if(m_blob_list.size() == m_numBlobs) { for(std::list<vpDot2>::iterator it = m_blob_list.begin(); it != m_blob_list.end(); ++it) { //it->setEllipsoidShapePrecision(0.8); it->setEllipsoidShapePrecision(0.75); } if (1){ for(std::list<vpDot2>::iterator it = m_blob_list.begin(); it != m_blob_list.end(); ++it) { it->initTracking(I, it->getCog()); } } m_state = tracking; m_force_detection = false; } else std::cout << "Number blobs found is "<< m_blob_list.size() << ". Expected number: " << m_numBlobs << std::endl; } } catch(vpException &e) { std::cout << "Exception tracking detection: " << e.getStringMessage() << std::endl; m_target_found = false; } } } else if (m_state == tracking) { // std::cout << "STATE: TRACKING "<< std::endl; try { m_cog.set_uv(0.0,0.0); for(std::list<vpDot2>::iterator it = m_blob_list.begin(); it != m_blob_list.end(); ++it) { it->track(I); m_cog += it->getCog(); } m_cog /= m_blob_list.size(); // Display the ACTUAL center of gravity of the object //vpDisplay::displayCross(I,cog_tot,10, vpColor::blue,2 ); // std::vector<vpImagePoint> corners(m_blob_list.begin(),m_blob_list.end()); // Now we create a map of VpPoint in order to order the points std::map< double,vpImagePoint> poly_verteces; double theta; for(std::list<vpDot2>::iterator it=m_blob_list.begin(); it != m_blob_list.end(); ++it) { // Get the cog of the blob vpImagePoint cog = it->getCog(); theta = atan2(cog.get_v() - m_cog.get_v(), cog.get_u() - m_cog.get_u()); // Insert the vertexes in the map (ordered) poly_verteces.insert ( std::pair<double,vpImagePoint>(theta,cog) ); } // Now we create a Vector containing the ordered vertexes std::vector<vpImagePoint> poly_vert; int index_first= 0; unsigned int count = 0; for( std::map<double,vpImagePoint>::iterator it = poly_verteces.begin(); it!=poly_verteces.end(); ++it ) { poly_vert.push_back( it->second ); if (m_blob_list.front().getCog() == it->second ) index_first = count; count++; } std::rotate(poly_vert.begin(), poly_vert.begin() + index_first, poly_vert.end()); //std::cout << "---------------------------------------" << std::endl; for(unsigned int j = 0; j<poly_vert.size();j++) { std::ostringstream s; s << j; vpDisplay::displayText(I, poly_vert[j], s.str(), vpColor::green); //std::cout << "Cog blob " << j << " :" << poly_vert[j] << std::endl; } computePose(m_P, poly_vert, m_cam, m_initPose, m_cMo); bool duplicate = false; for(unsigned int i = 0; i < poly_vert.size()-1; i++) { for(unsigned int j = i+1; j < poly_vert.size(); j++) { //std::cout << "Distance " << i <<"-" << j << " :" << vpImagePoint::sqrDistance(poly_vert[i],poly_vert[j] )<< std::endl; if (vpImagePoint::sqrDistance(poly_vert[i],poly_vert[j]) < 5.0) duplicate = true; } } //std::cout << "---------------------------------------" << std::endl; // std::cout << "Number blobs found is "<< poly_vert.size() << ". Expected number: " << m_numBlobs << std::endl; // int i = m_blob_list.front().getCog().get_i()-m_blob_list.front().getHeight()*2.3; // int j = m_blob_list.front().getCog().get_j()-m_blob_list.front().getWidth()*3.3; // unsigned int ai = m_blob_list.front().getHeight()*5; // unsigned int aj =m_blob_list.front().getWidth()*5; // vpDisplay::displayRectangle(I,i, j, ai,aj, vpColor::red,false,1); if (duplicate) { m_target_found = false; m_state = detection; std::cout << "PROBLEM: tracking failed " << m_numBlobs << std::endl; } else if (poly_vert.size() != m_numBlobs) { m_target_found = false; m_state = detection; std::cout << "PROBLEM: Expected number: " << m_numBlobs << std::endl; } else m_target_found = true; } catch(vpException &e) { std::cout << "Exception tracking: " << e.getStringMessage() << std::endl; m_state = detection; m_target_found = false; } } return m_target_found; }