void MotionPlanningFrame::copyTrajectoryToDisplay(const moveit_msgs::RobotState& start_state, const apc_msgs::PrimitivePlan& plan) { // Get a robot model. const robot_model::RobotModelConstPtr& robot_model = planning_display_->getRobotModel(); // Construct a new robot trajectory. robot_trajectory::RobotTrajectoryPtr display_trajectory(new robot_trajectory::RobotTrajectory(robot_model, "")); // Accumulate joint trajectory over entire plan. trajectory_msgs::JointTrajectory trajectory = trajectory_msgs::JointTrajectory(); for (int i = 0; i < plan.actions.size(); i++) appendToTrajectory(trajectory, plan.actions[i].joint_trajectory); // Copy current plan over to robot trajectory. display_trajectory->setRobotTrajectoryMsg(planning_display_->getPlanningSceneRO()->getCurrentState(), // FIXME start_state, trajectory); // Swap the plan trajectory into our planning display. planning_display_->setTrajectoryToDisplay(display_trajectory); // Display trail. FIXME This doesn't accomplish anything actually. previewButtonClicked(); }
int main() { try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); std::vector<vpPoint> point(4) ; point[0].setWorldCoordinates(-0.1,-0.1, 0); point[1].setWorldCoordinates( 0.1,-0.1, 0); point[2].setWorldCoordinates( 0.1, 0.1, 0); point[3].setWorldCoordinates(-0.1, 0.1, 0); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpHomogeneousMatrix wMc, wMo; vpSimulatorCamera robot; robot.setSamplingTime(0.040); robot.getPosition(wMc); wMo = wMc * cMo; vpImage<unsigned char> Iint(480, 640, 0) ; vpImage<unsigned char> Iext(480, 640, 0) ; #if defined VISP_HAVE_X11 vpDisplayX displayInt(Iint, 0, 0, "Internal view"); vpDisplayX displayExt(Iext, 670, 0, "External view"); #elif defined VISP_HAVE_GDI vpDisplayGDI displayInt(Iint, 0, 0, "Internal view"); vpDisplayGDI displayExt(Iext, 670, 0, "External view"); #elif defined VISP_HAVE_OPENCV vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view"); vpDisplayOpenCV displayExt(Iext, 670, 0, "External view"); #else std::cout << "No image viewer is available..." << std::endl; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); vpHomogeneousMatrix cextMo(0,0,3, 0,0,0); vpWireFrameSimulator sim; sim.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD); sim.setCameraPositionRelObj(cMo); sim.setDesiredCameraPosition(cdMo); sim.setExternalCameraPosition(cextMo); sim.setInternalCameraParameters(cam); sim.setExternalCameraParameters(cam); while(1) { robot.getPosition(wMc); cMo = wMc.inverse() * wMo; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); sim.setCameraPositionRelObj(cMo); vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; sim.getInternalImage(Iint); sim.getExternalImage(Iext); display_trajectory(Iint, point, cMo, cam); vpDisplay::flush(Iint); vpDisplay::flush(Iext); // A click in the internal view to exit if (vpDisplay::getClick(Iint, false)) break; vpTime::wait(1000*robot.getSamplingTime()); } task.kill(); } catch(vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } }
int main() { #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV) try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); vpImage<unsigned char> I(480, 640, 255); vpCameraParameters cam(840, 840, I.getWidth()/2, I.getHeight()/2); std::vector<vpPoint> point(4) ; point[0].setWorldCoordinates(-0.1,-0.1, 0); point[1].setWorldCoordinates( 0.1,-0.1, 0); point[2].setWorldCoordinates( 0.1, 0.1, 0); point[3].setWorldCoordinates(-0.1, 0.1, 0); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpVirtualGrabber g("./target_square.pgm", cam); g.acquire(I, cMo); #if defined(VISP_HAVE_X11) vpDisplayX d(I, 0, 0, "Current camera view"); #elif defined(VISP_HAVE_GDI) vpDisplayGDI d(I, 0, 0, "Current camera view"); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV d(I, 0, 0, "Current camera view"); #else std::cout << "No image viewer is available..." << std::endl; #endif vpDisplay::display(I); vpDisplay::displayText(I, 10, 10, "Click in the 4 dots to initialise the tracking and start the servo", vpColor::red); vpDisplay::flush(I); vpFeaturePoint p[4], pd[4]; std::vector<vpDot2> dot(4); for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); dot[i].setGraphics(true); dot[i].initTracking(I); vpDisplay::flush(I); vpFeatureBuilder::create(p[i], cam, dot[i].getCog()); task.addFeature(p[i], pd[i]); } vpHomogeneousMatrix wMc, wMo; vpSimulatorCamera robot; robot.setSamplingTime(0.040); robot.getPosition(wMc); wMo = wMc * cMo; for (; ; ) { robot.getPosition(wMc); cMo = wMc.inverse() * wMo; g.acquire(I, cMo); vpDisplay::display(I); for (unsigned int i = 0 ; i < 4 ; i++) { dot[i].track(I); vpFeatureBuilder::create(p[i], cam, dot[i].getCog()); vpColVector cP; point[i].changeFrame(cMo, cP) ; p[i].set_Z(cP[2]); } vpColVector v = task.computeControlLaw(); display_trajectory(I, dot); vpServoDisplay::display(task, cam, I, vpColor::green, vpColor::red) ; robot.setVelocity(vpRobot::CAMERA_FRAME, v); vpDisplay::flush(I); if (vpDisplay::getClick(I, false)) break; vpTime::wait( robot.getSamplingTime() * 1000); } task.kill(); } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
void MotionPlanningFrame::computePlanGoalsButtonClicked() { if (!move_group_) return; // Clear status ui_->result_label->setText("Planning..."); // Reset the current plan. current_plan_.reset(new moveit::planning_interface::MoveGroup::Plan()); // The primitive plan is used for actual execution on the robot. HACK // We reset it here. primitive_plan_.reset(new apc_msgs::PrimitivePlan); // Get the list of goals (waypoints) to follow. QListWidget* goals_list = ui_->active_goals_list; // Get the current start state. robot_state::RobotState start_state = *planning_display_->getQueryStartState(); // The target goal state will be initialized to the start state. robot_state::RobotState goal_state = start_state; // For each item in the active goals list, configure for planning and then // append to the plan. for (int i = 0; i < goals_list->count(); i++) { // First get the plan represented by the item. apc_msgs::PrimitivePlan plan = getMessageFromUserData<apc_msgs::PrimitivePlan>(goals_list->item(i)->data(Qt::UserRole)); // Loop through the actions in the plan. for (int j = 0; j < plan.actions.size(); j++) { // Get the last action from the user data because it is the goal state. apc_msgs::PrimitiveAction action = plan.actions.back(); // Get the goal robot state from user data. getStateFromAction(goal_state, action); // HACK Reset move group so that I can plan with a different group... SMH. changePlanningGroupHelper(action.group_name); planning_display_->waitForAllMainLoopJobs(); // I hope there are no cyclic main job loops. // Set move group variables, like start and goal states, etc. configureForPlanning(start_state, goal_state); // Compute plan specific to the current primitive action. moveit::planning_interface::MoveGroup::Plan move_group_plan; // Make a planning service call. This will append any plans to the input. if (!move_group_->plan(move_group_plan)) { ui_->result_label->setText("Failed"); current_plan_.reset(); return; } // Copy plan over to primitive action. action.joint_trajectory = move_group_plan.trajectory_.joint_trajectory; action.duration = move_group_plan.planning_time_; // TODO Eef trajectory support. // TODO Dense trajectory support. // HACK If we are planning for the hands, overwrite plan with linear interpolation. if (action.group_name == "crichton_left_hand" || action.group_name == "crichton_right_hand") { computeLinearInterpPlan(start_state, action); move_group_plan.trajectory_.joint_trajectory = action.joint_trajectory; } // Append plan onto the current plan. move_group_->appendPlan(*current_plan_, move_group_plan); // Append action onto primitive plan. primitive_plan_->actions.push_back(action); // Start the next plan from this goal. start_state = goal_state; } } // Success ui_->execute_button->setEnabled(true); ui_->result_label->setText(QString("Time: ").append( QString::number(current_plan_->planning_time_,'f',3))); // Copy trajectory over to display. { // Get a robot model. const robot_model::RobotModelConstPtr& robot_model = planning_display_->getRobotModel(); // Construct a new robot trajectory. robot_trajectory::RobotTrajectoryPtr display_trajectory(new robot_trajectory::RobotTrajectory(robot_model, "")); // Copy current plan over to robot trajectory. display_trajectory->setRobotTrajectoryMsg(planning_display_->getPlanningSceneRO()->getCurrentState(), current_plan_->start_state_, current_plan_->trajectory_); // Swap the plan trajectory into our planning display. planning_display_->setTrajectoryToDisplay(display_trajectory); // Display trail. FIXME This doesn't accomplish anything actually. previewButtonClicked(); } }
int main() { #if defined(VISP_HAVE_PTHREAD) try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); /* Top view of the world frame, the camera frame and the object frame world, also robot base frame : --> w_y | \|/ w_x object : o_y /|\ | o_x <-- camera : c_y /|\ | c_x <-- */ vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15), vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.))); std::vector<vpPoint> point; point.push_back( vpPoint(-0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1, 0.1, 0) ); point.push_back( vpPoint(-0.1, 0.1, 0) ); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpSimulatorViper850 robot(true); robot.setVerbose(true); // Enlarge the default joint limits vpColVector qmin = robot.getJointMin(); vpColVector qmax = robot.getJointMax(); qmin[0] = -vpMath::rad(180); qmax[0] = vpMath::rad(180); qmax[1] = vpMath::rad(0); qmax[2] = vpMath::rad(270); qmin[4] = -vpMath::rad(180); qmax[4] = vpMath::rad(180); robot.setJointLimit(qmin, qmax); std::cout << "Robot joint limits: " << std::endl; for (unsigned int i=0; i< qmin.size(); i ++) std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl; robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD); robot.set_fMo(wMo); bool ret = true; #if VISP_VERSION_INT > VP_VERSION_INT(2,7,0) ret = #endif robot.initialiseCameraRelativeToObject(cMo); if (ret == false) return 0; // Not able to set the position robot.setDesiredCameraPosition(cdMo); // We modify the default external camera position robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2), vpRotationMatrix(vpRxyzVector(M_PI/2,0,0)))); vpImage<unsigned char> Iint(480, 640, 255); #if defined(VISP_HAVE_X11) vpDisplayX displayInt(Iint, 700, 0, "Internal view"); #elif defined(VISP_HAVE_GDI) vpDisplayGDI displayInt(Iint, 700, 0, "Internal view"); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view"); #else std::cout << "No image viewer is available..." << std::endl; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); // Modify the camera parameters to match those used in the other simulations robot.setCameraParameters(cam); bool start = true; //for ( ; ; ) for (int iter =0; iter < 275; iter ++) { cMo = robot.get_cMo(); for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpDisplay::display(Iint); robot.getInternalView(Iint); if (!start) { display_trajectory(Iint, point, cMo, cam); vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red); } vpDisplay::flush(Iint); vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); // A click to exit if (vpDisplay::getClick(Iint, false)) break; if (start) { start = false; v = 0; robot.setVelocity(vpRobot::CAMERA_FRAME, v); vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue); vpDisplay::flush(Iint); //vpDisplay::getClick(Iint); } vpTime::wait(1000*robot.getSamplingTime()); } task.kill(); } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
int main() { try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); std::vector<vpPoint> point; point.push_back( vpPoint(-0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1, 0.1, 0) ); point.push_back( vpPoint(-0.1, 0.1, 0) ); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpHomogeneousMatrix wMc, wMo; vpSimulatorCamera robot; robot.setSamplingTime(0.040); robot.getPosition(wMc); wMo = wMc * cMo; vpImage<unsigned char> Iint(480, 640, 255) ; vpImage<unsigned char> Iext(480, 640, 255) ; #if defined(VISP_HAVE_X11) vpDisplayX displayInt(Iint, 0, 0, "Internal view"); vpDisplayX displayExt(Iext, 670, 0, "External view"); #elif defined(VISP_HAVE_GDI) vpDisplayGDI displayInt(Iint, 0, 0, "Internal view"); vpDisplayGDI displayExt(Iext, 670, 0, "External view"); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view"); vpDisplayOpenCV displayExt(Iext, 670, 0, "External view"); #else std::cout << "No image viewer is available..." << std::endl; #endif #if defined(VISP_HAVE_DISPLAY) vpProjectionDisplay externalview; for (unsigned int i = 0 ; i < 4 ; i++) externalview.insert(point[i]) ; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); vpHomogeneousMatrix cextMo(0,0,3, 0,0,0); while(1) { robot.getPosition(wMc); cMo = wMc.inverse() * wMo; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; display_trajectory(Iint, point, cMo, cam); vpServoDisplay::display(task, cam, Iint, vpColor::green, vpColor::red); #if defined(VISP_HAVE_DISPLAY) externalview.display(Iext, cextMo, cMo, cam, vpColor::red, true); #endif vpDisplay::flush(Iint); vpDisplay::flush(Iext); // A click to exit if (vpDisplay::getClick(Iint, false) || vpDisplay::getClick(Iext, false)) break; vpTime::wait( robot.getSamplingTime() * 1000); } task.kill(); } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } }