void ParseWordRecognizedArray(std::vector<WordConfidencePair>& results, const AL::ALValue& value, float recognitionThreshold) { // unexpected data if (value.getType() != AL::ALValue::TypeArray) MODULE_ERROR("invalid array type"); // unexpected data if (value.getSize() % 2 != 0) MODULE_ERROR("invalid array size"); for (int i = 0; i < (int)value.getSize(); i += 2) { AL::ALValue wordValue = value[i]; AL::ALValue confidenceValue = value[i + 1]; float confidence = confidenceValue.operator float &(); if (confidence >= recognitionThreshold) { WordConfidencePair pair = { wordValue.toString(), confidence }; results.push_back(pair); } } std::sort(results.begin(), results.end(), WordConfidencePairComparison()); }
void NaoMarkServiceDetection::callback(const std::string &key, const AL::ALValue &value, const AL::ALValue &msg) { AL::ALValue marks = fMemoryProxy.getData("LandmarkDetected"); if(marks.getSize() > 0 && !_isMarkFound) { int TimeStampField = marks[0][1]; if((int)marks[1][0][1][0] == _markToFind) { if((float)marks[1][0][0][3] < 0.2f) { if(_isAllowedToMove) motionProxy->moveToward(0.5f,0,marks[1][0][0][1]); _naoMarkDetected = true; } else { motionProxy->moveToward(0,0,0); _isMarkFound = true; } } else motionProxy->moveToward(0,0,0); } }
/** This method try to match a localized sound to an identified face, and update * accordingly the corresponding 'human'. * * If none is found, a 'virtual' human called 'unknown_speaker' is created, with an * approximate position. */ void InteractionMonitor::identifySpeaker(const AL::ALValue &sounds, map<string, Human>& humans) { // according to the doc // http://www.aldebaran-robotics.com/documentation/naoqi/audio/alaudiosourcelocalization.html // only one sound may be localized at a given step. if (sounds.getSize() == 0) return; map<string, Human>::iterator it; Human h = makeHuman("unknown_speaker", sounds[0][1][0], sounds[0][1][1]); for (it=humans.begin(); it!=humans.end(); ++it) { if (distance(h, it->second) < MAX_DISTANCE_HUMAN_SOUND) { it->second.speaking = true; return; } } humans[h.id] = h; }
bool vpFaceTrackerOkao::detect() { m_message.clear(); m_polygon.clear(); m_nb_objects = 0; m_faces.clear(); m_scores.clear(); bool target_found = false; AL::ALValue result = m_mem_proxy.getData("FaceDetected"); //-- Detect faces if (result.getSize() >=2) { //std::cout << "face" << std::endl; AL::ALValue info_face_array = result[1]; target_found = true; double min_dist = m_image_width*m_image_height; unsigned int index_closest_cog = 0; vpImagePoint closest_cog; for (unsigned int i = 0; i < info_face_array.getSize()-1; i++ ) { //Extract face info // Face Detected [1]/ First face [0]/ Shape Info [0]/ Alpha [1] float alpha = result[1][i][0][1]; float beta = result[1][i][0][2]; float sx = result[1][i][0][3]; float sy = result[1][i][0][4]; std::string name = result[1][i][1][2]; float score = result[1][i][1][1]; std::ostringstream message; if (score > 0.6) message << name; else message << "Unknown"; m_message.push_back( message.str() ); m_scores.push_back(score); // sizeX / sizeY are the face size in relation to the image float h = m_image_height * sx; float w = m_image_width * sy; // Center of face into the image float x = m_image_width / 2 - m_image_width * alpha; float y = m_image_height / 2 + m_image_height * beta; vpImagePoint cog(x,y); double dist = vpImagePoint::distance(m_previuos_cog,cog); if (dist< min_dist) { closest_cog = cog; index_closest_cog = i; min_dist = dist; } std::vector<vpImagePoint> polygon; double x_corner = x - h/2; double y_corner = y - w/2; polygon.push_back(vpImagePoint(y_corner , x_corner )); polygon.push_back(vpImagePoint(y_corner+w, x_corner )); polygon.push_back(vpImagePoint(y_corner+w, x_corner+h)); polygon.push_back(vpImagePoint(y_corner , x_corner+h)); m_polygon.push_back(polygon); m_nb_objects ++; } if (index_closest_cog !=0) std::swap(m_polygon[0], m_polygon[index_closest_cog]); m_previuos_cog = closest_cog; } return target_found; }
void NaoqiJointStates::run() { ros::Rate r(m_rate); ros::Time stamp1; ros::Time stamp2; ros::Time stamp; std::vector<float> odomData; float odomX, odomY, odomZ, odomWX, odomWY, odomWZ; std::vector<float> memData; std::vector<float> positionData; ROS_INFO("Staring main loop. ros::ok() is %d nh.ok() is %d",ros::ok(),m_nh.ok()); while(ros::ok() ) { r.sleep(); ros::spinOnce(); stamp1 = ros::Time::now(); try { memData = m_memoryProxy->getListData(m_dataNamesList); // {SPACE_TORSO = 0, SPACE_WORLD = 1, SPACE_NAO = 2}. (second argument) odomData = m_motionProxy->getPosition("Torso", 1, true); positionData = m_motionProxy->getAngles("Body", true); } catch (const AL::ALError & e) { ROS_ERROR( "Error accessing ALMemory, exiting..."); ROS_ERROR( "%s", e.what() ); //ros.signal_shutdown("No NaoQI available anymore"); } stamp2 = ros::Time::now(); //ROS_DEBUG("dt is %f",(stamp2-stamp1).toSec()); % dt is typically around 1/1000 sec // TODO: Something smarter than this.. stamp = stamp1 + ros::Duration((stamp2-stamp1).toSec()/2.0); /****************************************************************** * IMU *****************************************************************/ if (memData.size() != m_dataNamesList.getSize()) { ROS_ERROR("memData length %zu does not match expected length %u",memData.size(),m_dataNamesList.getSize() ); continue; } // IMU data: m_torsoIMU.header.stamp = stamp; float angleX = memData[1]; float angleY = memData[2]; float angleZ = memData[3]; float gyroX = memData[4]; float gyroY = memData[5]; float gyroZ = memData[6]; float accX = memData[7]; float accY = memData[8]; float accZ = memData[9]; m_torsoIMU.orientation = tf::createQuaternionMsgFromRollPitchYaw( angleX, angleY, angleZ); // yaw currently always 0 m_torsoIMU.angular_velocity.x = gyroX; m_torsoIMU.angular_velocity.y = gyroY; m_torsoIMU.angular_velocity.z = gyroZ; // currently always 0 m_torsoIMU.linear_acceleration.x = accX; m_torsoIMU.linear_acceleration.y = accY; m_torsoIMU.linear_acceleration.z = accZ; // covariances unknown // cf http://www.ros.org/doc/api/sensor_msgs/html/msg/Imu.html m_torsoIMU.orientation_covariance[0] = -1; m_torsoIMU.angular_velocity_covariance[0] = -1; m_torsoIMU.linear_acceleration_covariance[0] = -1; m_torsoIMUPub.publish(m_torsoIMU); /****************************************************************** * Joint state *****************************************************************/ m_jointState.header.stamp = stamp; m_jointState.header.frame_id = m_baseFrameId; m_jointState.position.resize(positionData.size()); for(unsigned i = 0; i<positionData.size(); ++i) { m_jointState.position[i] = positionData[i]; } // simulated model misses some joints, we need to fill: if (m_jointState.position.size() == 22) { m_jointState.position.insert(m_jointState.position.begin()+6,0.0); m_jointState.position.insert(m_jointState.position.begin()+7,0.0); m_jointState.position.push_back(0.0); m_jointState.position.push_back(0.0); } m_jointStatePub.publish(m_jointState); /****************************************************************** * Odometry *****************************************************************/ if (!m_paused) { // apply offset transformation: tf::Pose transformedPose; if (odomData.size()!=6) { ROS_ERROR( "Error getting odom data. length is %zu",odomData.size() ); continue; } double dt = (stamp.toSec() - m_lastOdomTime); odomX = odomData[0]; odomY = odomData[1]; odomZ = odomData[2]; odomWX = odomData[3]; odomWY = odomData[4]; odomWZ = odomData[5]; tf::Quaternion q; // roll and pitch from IMU, yaw from odometry: if (m_useIMUAngles) q.setRPY(angleX, angleY, odomWZ); else q.setRPY(odomWX, odomWY, odomWZ); m_odomPose.setOrigin(tf::Vector3(odomX, odomY, odomZ)); m_odomPose.setRotation(q); if(m_mustUpdateOffset) { if(!m_isInitialized) { if(m_initializeFromIMU) { // Initialization from IMU: Take x, y, z, yaw from odometry, roll and pitch from IMU m_targetPose.setOrigin(m_odomPose.getOrigin()); m_targetPose.setRotation(tf::createQuaternionFromRPY(angleX, angleY, odomWZ)); } else if(m_initializeFromOdometry) { m_targetPose.setOrigin(m_odomPose.getOrigin()); m_targetPose.setRotation(tf::createQuaternionFromRPY(odomWX, odomWY, odomWZ)); } m_isInitialized = true; } else { // Overwrite target pitch and roll angles with IMU data const double target_yaw = tf::getYaw(m_targetPose.getRotation()); if(m_initializeFromIMU) { m_targetPose.setRotation(tf::createQuaternionFromRPY(angleX, angleY, target_yaw)); } else if(m_initializeFromOdometry){ m_targetPose.setRotation(tf::createQuaternionFromRPY(odomWX, odomWY, target_yaw)); } } m_odomOffset = m_targetPose * m_odomPose.inverse(); transformedPose = m_targetPose; m_mustUpdateOffset = false; } else { transformedPose = m_odomOffset * m_odomPose; } // // publish the transform over tf first // m_odomTransformMsg.header.stamp = stamp; tf::transformTFToMsg(transformedPose, m_odomTransformMsg.transform); m_transformBroadcaster.sendTransform(m_odomTransformMsg); // // Fill the Odometry msg // m_odom.header.stamp = stamp; //set the velocity first (old values still valid) m_odom.twist.twist.linear.x = (odomX - m_odom.pose.pose.position.x) / dt; m_odom.twist.twist.linear.y = (odomY - m_odom.pose.pose.position.y) / dt; m_odom.twist.twist.linear.z = (odomZ - m_odom.pose.pose.position.z) / dt; // TODO: calc angular velocity! // m_odom.twist.twist.angular.z = vth; ?? //set the position from the above calculated transform m_odom.pose.pose.orientation = m_odomTransformMsg.transform.rotation; m_odom.pose.pose.position.x = m_odomTransformMsg.transform.translation.x; m_odom.pose.pose.position.y = m_odomTransformMsg.transform.translation.y; m_odom.pose.pose.position.z = m_odomTransformMsg.transform.translation.z; m_odomPub.publish(m_odom); m_lastOdomTime = stamp.toSec(); } } ROS_INFO("naoqi_sensors stopped."); }
void WifiManager::UpdateList() { // reload config ParamEntry::Reload(); std::string serviceOff; std::string serviceOn; #if !WIFI_LOCAL_TEST GetConnectionProxy().scan(); AL::ALValue serviceList = GetConnectionProxy().services(); // Log() << serviceList << std::endl; #else // !LOCAL_TEST AL::ALValue serviceList; #endif // !LOCAL_TEST _services.clear(); for (int i = 0; i < serviceList.getSize(); i++) { // Log() << "item[" << i << "]: " << serviceList[i] << std::endl; std::map<std::string, std::string> details; for (int j = 0; j < serviceList[i].getSize(); j++) { //Log() << " -- " << serviceList[i][j] << std::endl; std::string name = serviceList[i][j][0]; std::string val; if (serviceList[i][j][1].isString()) val = (std::string) serviceList[i][j][1]; else val = serviceList[i][j][1].toString(); details[name] = val; } /*for (auto& item : details) { Log() << item.first << ": " << item.second << std::endl; }*/ WifiService service; service._id = details["ServiceId"]; service._name = details["Name"]; service._state = WifiStateUtils::StringToWifiState(details["State"]); service.FindConfig(); _services.push_back(service); if (service._knownConfig && service._knownConfig->_default && _selectedSSID.empty()) _selectedSSID = service._name; } #if WIFI_LOCAL_TEST WifiService service; service._id = "whatever"; service._name = "YETTI"; service._state = WifiStateUtils::StringToWifiState("Idle"); service.FindConfig(); _services.push_back(service); if (service._knownConfig && service._knownConfig->_default && _selectedSSID.empty()) _selectedSSID = service._name; #endif // WIFI_LOCAL_TEST }
int main(int argc, const char* argv[]) { std::string opt_ip = "131.254.10.126"; bool opt_language_english = true; bool opt_debug = false; for (unsigned int i=0; i<argc; i++) { if (std::string(argv[i]) == "--ip") opt_ip = argv[i+1]; else if (std::string(argv[i]) == "--fr") opt_language_english = false; else if (std::string(argv[i]) == "--debug") opt_debug = true; else if (std::string(argv[i]) == "--help") { std::cout << "Usage: " << argv[0] << "[--ip <robot address>] [--fr]" << std::endl; return 0; } } std::string camera_name = "CameraTopPepper"; // Open the grabber for the acquisition of the images from the robot vpNaoqiGrabber g; if (! opt_ip.empty()) g.setRobotIp(opt_ip); g.setFramerate(30); g.setCamera(0); g.open(); vpCameraParameters cam = vpNaoqiGrabber::getIntrinsicCameraParameters(AL::kQVGA,camera_name, vpCameraParameters::perspectiveProjWithDistortion); vpHomogeneousMatrix eMc = vpNaoqiGrabber::getExtrinsicCameraParameters(camera_name,vpCameraParameters::perspectiveProjWithDistortion); std::cout << "eMc:" << std::endl << eMc << std::endl; std::cout << "cam:" << std::endl << cam << std::endl; vpNaoqiRobot robot; // Connect to the robot if (! opt_ip.empty()) robot.setRobotIp(opt_ip); robot.open(); if (robot.getRobotType() != vpNaoqiRobot::Pepper) { std::cout << "ERROR: You are not connected to Pepper, but to a different Robot. Check the IP. " << std::endl; return 0; } std::vector<std::string> jointNames_head = robot.getBodyNames("Head"); // Open Proxy for the speech AL::ALTextToSpeechProxy tts(opt_ip, 9559); std::string phraseToSay; if (opt_language_english) { tts.setLanguage("English"); phraseToSay = " \\emph=2\\ Hi,\\pau=200\\ How are you ?"; } else { tts.setLanguage("French"); phraseToSay = " \\emph=2\\ Bonjour,\\pau=200\\ comment vas tu ?"; } // Inizialize PeoplePerception AL::ALPeoplePerceptionProxy people_proxy(opt_ip, 9559); AL::ALMemoryProxy m_memProxy(opt_ip, 9559); people_proxy.subscribe("People", 30, 0.0); std::cout << "period: " << people_proxy.getCurrentPeriod() << std::endl; // Open Proxy for the recognition speech AL::ALSpeechRecognitionProxy asr(opt_ip, 9559); // asr.unsubscribe("Test_ASR"); // return 0 ; asr.setVisualExpression(false); asr.setLanguage("English"); std::vector<std::string> vocabulary; vocabulary.push_back("follow me"); vocabulary.push_back("stop"); // Set the vocabulary asr.setVocabulary(vocabulary,false); // Start the speech recognition engine with user Test_ASR asr.subscribe("Test_ASR"); std::cout << "Speech recognition engine started" << std::endl; // Proxy to control the leds AL::ALLedsProxy led_proxy(opt_ip, 9559); //Declare plots vpPlot * plotter_diff_vel; vpPlot * plotter_vel; vpPlot * plotter_error; vpPlot * plotter_distance; if (opt_debug) { // Plotting plotter_diff_vel = new vpPlot (2); plotter_diff_vel->initGraph(0, 2); plotter_diff_vel->initGraph(1, 2); plotter_diff_vel->setTitle(0, "HeadYaw"); plotter_diff_vel->setTitle(1, "HeadPitch"); plotter_vel= new vpPlot (1); plotter_vel->initGraph(0, 5); plotter_vel->setLegend(0, 0, "vx"); plotter_vel->setLegend(0, 1, "vy"); plotter_vel->setLegend(0, 2, "wz"); plotter_vel->setLegend(0, 3, "q_yaw"); plotter_vel->setLegend(0, 4, "q_pitch"); plotter_error = new vpPlot(1); plotter_error->initGraph(0, 3); plotter_error->setLegend(0, 0, "x"); plotter_error->setLegend(0, 1, "y"); plotter_error->setLegend(0, 2, "Z"); plotter_distance = new vpPlot (1); plotter_distance->initGraph(0, 1); plotter_distance->setLegend(0, 0, "dist"); } try { vpImage<unsigned char> I(g.getHeight(), g.getWidth()); vpDisplayX d(I); vpDisplay::setTitle(I, "ViSP viewer"); vpFaceTrackerOkao face_tracker(opt_ip,9559); double dist = 0.0; // Distance between person detected from peoplePerception and faceDetection // Set Visual Servoing: vpServo task; task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ; task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE); // vpAdaptiveGain lambda_adapt; // lambda_adapt.initStandard(1.6, 1.8, 15); vpAdaptiveGain lambda_base(1.2, 1.0, 10); // 2.3, 0.7, 15 vpAdaptiveGain lambda_nobase(5, 2.9, 15); // 4, 0.5, 15 task.setLambda(lambda_base) ; double Z = 0.9; double Zd = 0.9; bool stop_vxy = false; bool move_base = true; bool move_base_prev = true; // Create the desired visual feature vpFeaturePoint s; vpFeaturePoint sd; vpImagePoint ip(I.getHeight()/2, I.getWidth()/2); // Create the current x visual feature vpFeatureBuilder::create(s, cam, ip); vpFeatureBuilder::create(sd, cam, ip); // sd.buildFrom( I.getWidth()/2, I.getHeight()/2, Zd); AL::ALValue limit_yaw = robot.getProxy()->getLimits("HeadYaw"); std::cout << limit_yaw[0][0] << " " << limit_yaw[0][1] << std::endl; // Add the feature task.addFeature(s, sd) ; vpFeatureDepth s_Z, s_Zd; s_Z.buildFrom(s.get_x(), s.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0 s_Zd.buildFrom(sd.get_x(), sd.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0 // Add the feature task.addFeature(s_Z, s_Zd); // Jacobian 6x5 (vx,vy,wz,q_yaq,q_pitch) vpMatrix tJe(6,5); tJe[0][0]= 1; tJe[1][1]= 1; tJe[5][2]= 1; vpMatrix eJe(6,5); double servo_time_init = 0; vpImagePoint head_cog_cur; vpImagePoint head_cog_des(I.getHeight()/2, I.getWidth()/2); vpColVector q_dot; bool reinit_servo = true; unsigned long loop_iter = 0; std::vector<std::string> recognized_names; std::map<std::string,unsigned int> detected_face_map; bool detection_phase = true; unsigned int f_count = 0; // AL::ALValue leg_names = AL::ALValue::array("HipRoll","HipPitch", "KneePitch" ); // AL::ALValue values = AL::ALValue::array(0.0, 0.0, 0.0 ); // robot.getProxy()->setMoveArmsEnabled(false,false); std::vector<std::string> arm_names = robot.getBodyNames("LArm"); std::vector<std::string> rarm_names = robot.getBodyNames("RArm"); std::vector<std::string> leg_names(3); leg_names[0]= "HipRoll"; leg_names[1]= "HipPitch"; leg_names[2]= "KneePitch"; arm_names.insert( arm_names.end(), rarm_names.begin(), rarm_names.end() ); arm_names.insert( arm_names.end(), leg_names.begin(), leg_names.end() ); std::vector<float> arm_values(arm_names.size(),0.0); for (unsigned int i = 0; i < arm_names.size(); i++ ) std::cout << arm_names[i]<< std::endl; robot.getPosition(arm_names, arm_values,false); vpImagePoint best_cog_face_peoplep; bool person_found = false; double t_prev = vpTime::measureTimeSecond(); while(1) { if (reinit_servo) { servo_time_init = vpTime::measureTimeSecond(); t_prev = vpTime::measureTimeSecond(); reinit_servo = false; led_proxy.fadeRGB("FaceLeds","white",0.1); } double t = vpTime::measureTimeMs(); if (0) // (opt_debug) { g.acquire(I); } vpDisplay::display(I); // Detect face bool face_found = face_tracker.detect(); stop_vxy = false; //std::cout << "Loop time face_tracker: " << vpTime::measureTimeMs() - t << " ms" << std::endl; // Check speech recognition result AL::ALValue result_speech = m_memProxy.getData("WordRecognized"); if ( ((result_speech[0]) == vocabulary[0]) && (double (result_speech[1]) > 0.4 )) //move { //std::cout << "Recognized: " << result_speech[0] << "with confidence of " << result_speech[1] << std::endl; task.setLambda(lambda_base) ; move_base = true; } else if ( (result_speech[0] == vocabulary[1]) && (double(result_speech[1]) > 0.4 )) //stop { //std::cout << "Recognized: " << result_speech[0] << "with confidence of " << result_speech[1] << std::endl; task.setLambda(lambda_nobase) ; move_base = false; } if (move_base != move_base_prev) { if (move_base) { phraseToSay = "Ok, I will follow you."; tts.post.say(phraseToSay); } else { phraseToSay = "Ok, I will stop."; tts.post.say(phraseToSay); } } //robot.setStiffness(arm_names,0.0); //robot.getProxy()->setAngles(arm_names,arm_values,1.0); //robot.getProxy()->setAngles("HipRoll",0.0,1.0); //std::cout << "Loop time check_speech: " << vpTime::measureTimeMs() - t << " ms" << std::endl; move_base_prev = move_base; if (face_found) { std::ostringstream text; text << "Found " << face_tracker.getNbObjects() << " face(s)"; vpDisplay::displayText(I, 10, 10, text.str(), vpColor::red); for(size_t i=0; i < face_tracker.getNbObjects(); i++) { vpRect bbox = face_tracker.getBBox(i); if (i == 0) vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 2); else vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1); vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), face_tracker.getMessage(i) , vpColor::red); } led_proxy.post.fadeRGB("FaceLeds","blue",0.1); double u = face_tracker.getCog(0).get_u(); double v = face_tracker.getCog(0).get_v(); if (u<= g.getWidth() && v <= g.getHeight()) head_cog_cur.set_uv(u,v); vpRect bbox = face_tracker.getBBox(0); std::string name = face_tracker.getMessage(0); //std::cout << "Loop time face print " << vpTime::measureTimeMs() - t << " ms" << std::endl; } AL::ALValue result = m_memProxy.getData("PeoplePerception/VisiblePeopleList"); //std::cout << "Loop time get Data PeoplePerception " << vpTime::measureTimeMs() - t << " ms" << std::endl; person_found = false; if (result.getSize() > 0) { AL::ALValue info = m_memProxy.getData("PeoplePerception/PeopleDetected"); int num_people = info[1].getSize(); std::ostringstream text; text << "Found " << num_people << " person(s)"; vpDisplay::displayText(I, 10, 10, text.str(), vpColor::red); person_found = true; if (face_found) // Try to find the match between two detection { vpImagePoint cog_face; double dist_min = 1000; unsigned int index_person = 0; for (unsigned int i = 0; i < num_people; i++) { float alpha = info[1][i][2]; float beta = info[1][i][3]; //Z = Zd; // Centre of face into the image float x = g.getWidth()/2 - g.getWidth() * beta; float y = g.getHeight()/2 + g.getHeight() * alpha; cog_face.set_uv(x,y); dist = vpImagePoint::distance(cog_face, head_cog_cur); if (dist < dist_min) { dist_min = dist; best_cog_face_peoplep = cog_face; index_person = i; } } vpDisplay::displayCross(I, best_cog_face_peoplep, 10, vpColor::white); if (dist_min < 55.) { Z = info[1][index_person][1]; // Current distance } } else // Take the first one on the list { float alpha = info[1][0][2]; float beta = info[1][0][3]; //Z = Zd; // Centre of face into the image float x = g.getWidth()/2 - g.getWidth() * beta; float y = g.getHeight()/2 + g.getHeight() * alpha; head_cog_cur.set_uv(x,y); Z = info[1][0][1]; // Current distance } } else { std::cout << "No distance computed " << std::endl; stop_vxy = true; robot.getProxy()->setAngles("HipRoll",0.0,1.0); //Z = Zd; } // float alpha = info[1][0][2]; // float beta = info[1][0][3]; // //Z = Zd; // // Centre of face into the image // float x = g.getWidth()/2 - g.getWidth() * beta; // float y = g.getHeight()/2 + g.getHeight() * alpha; // vpImagePoint cog_face(y,x); // dist = vpImagePoint::distance(cog_face,head_cog_cur); // if (dist < 55.) // Z = info[1][0][1]; // Current distance // else // stop_vxy = true; // } // else // { // std::cout << "No distance computed " << std::endl; // stop_vxy = true; // //Z = Zd; // } //std::cout << "Loop time before VS: " << vpTime::measureTimeMs() - t << " ms" << std::endl; if (face_found || person_found ) { // Get Head Jacobian (6x2) vpMatrix torso_eJe_head; robot.get_eJe("Head",torso_eJe_head); // Add column relative to the base rotation (Wz) // vpColVector col_wz(6); // col_wz[5] = 1; for (unsigned int i = 0; i < 6; i++) for (unsigned int j = 0; j < torso_eJe_head.getCols(); j++) tJe[i][j+3] = torso_eJe_head[i][j]; // std::cout << "tJe" << std::endl << tJe << std::endl; // vpHomogeneousMatrix torsoMHeadPith( robot.getProxy()->getTransform(jointNames_head[jointNames_head.size()-1], 0, true));// get transformation matrix between torso and HeadRoll vpHomogeneousMatrix torsoMHeadPith( robot.getProxy()->getTransform("HeadPitch", 0, true));// get transformation matrix between torso and HeadRoll vpVelocityTwistMatrix HeadPitchVLtorso(torsoMHeadPith.inverse()); for(unsigned int i=0; i< 3; i++) for(unsigned int j=0; j< 3; j++) HeadPitchVLtorso[i][j+3] = 0; //std::cout << "HeadPitchVLtorso: " << std::endl << HeadPitchVLtorso << std::endl; // Transform the matrix eJe = HeadPitchVLtorso *tJe; // std::cout << "eJe" << std::endl << eJe << std::endl; task.set_eJe( eJe ); task.set_cVe( vpVelocityTwistMatrix(eMc.inverse()) ); vpDisplay::displayCross(I, head_cog_des, 10, vpColor::blue); vpDisplay::displayCross(I, head_cog_cur, 10, vpColor::green); // std::cout << "head_cog_des:" << std::endl << head_cog_des << std::endl; // std::cout << "head_cog_cur:" << std::endl << head_cog_cur << std::endl; // Update the current x feature double x,y; vpPixelMeterConversion::convertPoint(cam, head_cog_cur, x, y); s.buildFrom(x, y, Z); //s.set_xyZ(head_cog_cur.get_u(), head_cog_cur.get_v(), Z); // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix s_Z.buildFrom(s.get_x(), s.get_y(), Z, log(Z/Zd)) ; q_dot = task.computeControlLaw(vpTime::measureTimeSecond() - servo_time_init); //std::cout << "Loop time compute VS: " << vpTime::measureTimeMs() - t << " ms" << std::endl; vpMatrix P = task.getI_WpW(); double alpha = -3.3; double min = limit_yaw[0][0]; double max = limit_yaw[0][1]; vpColVector z_q2 (q_dot.size()); vpColVector q_yaw = robot.getPosition(jointNames_head[0]); z_q2[3] = 2 * alpha * q_yaw[0]/ pow((max - min),2); vpColVector q3 = P * z_q2; //if (q3.euclideanNorm()<10.0) q_dot = q_dot + q3; std::vector<float> vel(jointNames_head.size()); vel[0] = q_dot[3]; vel[1] = q_dot[4]; // Compute the distance in pixel between the target and the center of the image double distance = vpImagePoint::distance(head_cog_cur, head_cog_des); //if (distance > 0.03*I.getWidth()) std::cout << "q:" << std::endl << q_dot << std::endl; // std::cout << "vel" << std::endl << q_dot << std::endl; //std::cout << "distance" << std::endl << distance <<" -- " << 0.03*I.getWidth() << " ++ " << I.getWidth() << std::endl; // if (distance > 0.1*I.getWidth()) robot.setVelocity(jointNames_head,vel); // else // { // std::cout << "Setting hipRoll to zero" << std::endl; // robot.getProxy()->setAngles("HipRoll",0.0,1.0); // } // std::cout << "errorZ: " << task.getError()[2] << std::endl; // std::cout << "stop_vxy: " << stop_vxy << std::endl; if (std::fabs(Z -Zd) < 0.05 || stop_vxy || !move_base) robot.setBaseVelocity(0.0, 0.0, q_dot[2]); else robot.setBaseVelocity(q_dot[0], q_dot[1], q_dot[2]); if (opt_debug) { vpColVector vel_head = robot.getJointVelocity(jointNames_head); for (unsigned int i=0 ; i < jointNames_head.size() ; i++) { plotter_diff_vel->plot(i, 1, loop_iter, q_dot[i+3]); plotter_diff_vel->plot(i, 0, loop_iter, vel_head[i]); } plotter_error->plot(0,loop_iter,task.getError()); plotter_vel->plot(0,loop_iter, q3); plotter_distance->plot(0,0,loop_iter,Z); } // if (detection_phase) // { // //if (score >= 0.4 && distance < 0.06*I.getWidth() && bbox.getSize() > 3000) // if (distance < 0.06*I.getWidth() && bbox.getSize() > 3000) // { // if (opt_debug) // { // vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 1); // vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::red); // } // detected_face_map[name]++; // f_count++; // } // else // { // if (opt_debug) // { // vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1); // vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::green); // } // } // if (f_count>10) // { // detection_phase = false; // f_count = 0; // } // } // else // { // std::string recognized_person_name = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->first; // unsigned int times = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->second; // if (!in_array(recognized_person_name, recognized_names) && recognized_person_name != "Unknown") { // if (opt_language_english) // { // phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ How are you ?"; // } // else // { // phraseToSay = "\\emph=2\\ Salut \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ comment vas tu ?";; // } // std::cout << phraseToSay << std::endl; // tts.post.say(phraseToSay); // recognized_names.push_back(recognized_person_name); // } // if (!in_array(recognized_person_name, recognized_names) && recognized_person_name == "Unknown" // && times > 15) // { // if (opt_language_english) // { // phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\. I don't know you! \\emph=2\\ What's your name?"; // } // else // { // phraseToSay = " \\emph=2\\ Salut \\wait=200\\ \\emph=2\\. Je ne te connais pas! \\emph=2\\ Comment t'appelles-tu ?"; // } // std::cout << phraseToSay << std::endl; // tts.post.say(phraseToSay); // recognized_names.push_back(recognized_person_name); // } // detection_phase = true; // detected_face_map.clear(); // } } else { robot.stop(jointNames_head); robot.stopBase(); std::cout << "Stop!" << std::endl; reinit_servo = true; } //if (opt_debug) vpDisplay::flush(I); if (vpDisplay::getClick(I, false)) break; loop_iter ++; std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl; } robot.stop(jointNames_head); robot.stopBase(); tts.setLanguage("French"); // tts.exit(); people_proxy.unsubscribe("People"); // people_proxy.exit(); asr.unsubscribe("Test_ASR"); asr.setVisualExpression(true); // asr.exit(); tts.setLanguage("French"); // tts.exit(); led_proxy.fadeRGB("FaceLeds","white",0.1); // led_proxy.exit(); vpDisplay::getClick(I, true); } catch(vpException &e) { std::cout << e.getMessage() << std::endl; } catch (const AL::ALError& e) { std::cerr << "Caught exception " << e.what() << std::endl; } std::cout << "The end: stop the robot..." << std::endl; //tts.setLanguage("French"); // tts.exit(); robot.stop(jointNames_head); robot.stopBase(); return 0; }
/** * The method reads all sensors. It also detects if the chest button was pressed * for at least three seconds. In that case, it shuts down the robot. */ void readSensors() { // get new sensor values and copy them to the shared memory block try { // copy sensor values into the shared memory block int writingSensors = 0; if(writingSensors == data->newestSensors) ++writingSensors; if(writingSensors == data->readingSensors) if(++writingSensors == data->newestSensors) ++writingSensors; assert(writingSensors != data->newestSensors); assert(writingSensors != data->readingSensors); float* sensors = data->sensors[writingSensors]; for(int i = 0; i < lbhNumOfSensorIds; ++i) sensors[i] = *sensorPtrs[i]; AL::ALValue value = memory->getData("GameCtrl/RoboCupGameControlData"); if(value.isBinary() && value.getSize() == sizeof(RoboCup::RoboCupGameControlData)) memcpy(&data->gameControlData[writingSensors], value, sizeof(RoboCup::RoboCupGameControlData)); data->newestSensors = writingSensors; // detect shutdown request via chest-button if(*sensorPtrs[chestButtonSensor] == 0.f) startPressedTime = dcmTime; else if(state != shuttingDown && startPressedTime && dcmTime - startPressedTime > 3000) { if(*sensorPtrs[rBumperRightSensor] != 0.f || *sensorPtrs[rBumperLeftSensor] != 0.f || *sensorPtrs[lBumperRightSensor] != 0.f || *sensorPtrs[lBumperLeftSensor] != 0.f) (void) !system("( /home/nao/bin/bhumand stop && sudo shutdown -r now ) &"); else (void) !system("( /home/nao/bin/bhumand stop && sudo shutdown -h now ) &"); state = preShuttingDown; } } catch(AL::ALError& e) { fprintf(stderr, "libbhuman: %s\n", e.toString().c_str()); } // raise the semaphore if(sem != SEM_FAILED) { int sval; if(sem_getvalue(sem, &sval) == 0) { if(sval < 1) { sem_post(sem); frameDrops = 0; } else { if(frameDrops == 0) fprintf(stderr, "libbhuman: dropped sensor data.\n"); ++frameDrops; } } } }
/*! Connect to Romeo robot, grab, display images using ViSP and start face detection with Okao library running on the robot. By default, this example connect to a robot with ip address: 198.18.0.1. */ int main(int argc, const char* argv[]) { try { std::string opt_ip = "198.18.0.1";; if (argc == 3) { if (std::string(argv[1]) == "--ip") opt_ip = argv[2]; } vpNaoqiGrabber g; if (! opt_ip.empty()) g.setRobotIp(opt_ip); g.setCamera(0); g.open(); vpImage<unsigned char> I(g.getHeight(), g.getWidth()); vpDisplayX d(I); vpDisplay::setTitle(I, "ViSP viewer"); // Open Proxy for the speech AL::ALTextToSpeechProxy tts(opt_ip, 9559); tts.setLanguage("English"); std::string phraseToSay = "Hi!"; // int id = tts.post.say(phraseToSay); // tts.wait(id,2000); // Open Face detection proxy AL::ALFaceDetectionProxy df(opt_ip, 9559); // Start the face recognition engine const int period = 1; df.subscribe("Face", period, 0.0); AL::ALMemoryProxy memProxy(opt_ip, 9559); float mImageHeight = g.getHeight(); float mImageWidth = g.getWidth(); std::vector<std::string> names; while (1) { g.acquire(I); vpDisplay::display(I); AL::ALValue result = memProxy.getData("FaceDetected"); if (result.getSize() >=2) { //std::cout << "face" << std::endl; AL::ALValue info_face_array = result[1]; for (unsigned int i = 0; i < info_face_array.getSize()-1; i++ ) { // AL::ALValue info_face = info_face_array[i]; //Extract face info //AL::ALValue shape_face =info_face[0]; //std::cout << "alpha "<< shape_face[1] <<". Beta:" << shape_face[1] << std::endl; // Face Detected [1]/ First face [0]/ Shape Info [0]/ Alpha [1] float alpha = result[1][i][0][1]; float beta = result[1][i][0][2]; float sx = result[1][i][0][3]; float sy = result[1][i][0][4]; std::string name = result[1][i][1][2]; float score = result[1][i][1][1]; // sizeX / sizeY are the face size in relation to the image float sizeX = mImageHeight * sx; float sizeY = mImageWidth * sy; // Centre of face into the image float x = mImageWidth / 2 - mImageWidth * alpha; float y = mImageHeight / 2 + mImageHeight * beta; vpDisplay::displayCross(I, y, x, 10, vpColor::red); vpDisplay::displayRectangle(I,y,x,0.0,sizeX,sizeY,vpColor::cyan,1); std::cout << i << "- size: " << sizeX*sizeY << std::endl; if (score >= 0.4) { std::cout << i << "- Name: " << name <<" score " << score << std::endl; // std::cout << "NAMES: " << names << std::endl; // std::cout << "Esite: " << in_array(name, names) << std::endl; // std::cout << "==================================== " << std::endl; if (!in_array(name, names) && sizeX*sizeY > 4000) { std::string phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\" + name; std::cout << phraseToSay << std::endl; tts.post.say(phraseToSay); names.push_back(name); } } else std::cout << i << "- Face not recognized. " << std::endl; // cv::Point p1(x - (sizeX / 2), y - (sizeY / 2)); // cv::Point p2(x + (sizeX / 2), y + (sizeY / 2)); // cv::rectangle(I, p1, p2, cv::Scalar(255,255,255)); } } //vpTime::sleepMs(60); vpDisplay::flush(I); if (vpDisplay::getClick(I, false)) break; } df.unsubscribe("Face"); } catch (const vpException &e) { std::cerr << "Caught exception: " << e.what() << std::endl; } catch (const AL::ALError &e) { std::cerr << "Caught exception: " << e.what() << std::endl; } return 0; }
void InteractionMonitor::processFaces(const AL::ALValue &faces, map<string, Human>& humans) { try { /** Check that there are faces effectively detected. */ if (faces.getSize() < 2 ) { if (facesCount != 0) { ROS_INFO("No face detected"); humanBuffer = NB_VIEWS_BEFORE_LEARNING; facesCount = 0; } return; } /** Check the number of faces from the FaceInfo field, and check that it has * changed from the last event.*/ if (faces[1].getSize() - 1 != facesCount) { ROS_INFO("%d face(s) detected.", faces[1].getSize() - 1); /** Update the current number of detected faces. */ facesCount = faces[1].getSize() - 1; } } catch (const AL::ALError& e) { ROS_ERROR(e.what()); } for (int i = 0; i < facesCount; ++i) { int naoqiFaceId = faces[1][i][1][0]; float confidence = faces[1][i][1][1]; // Face > [FaceInfo] > FaceInfo > ShapeInfo > yaw float yaw = faces[1][i][0][1]; // Face > [FaceInfo] > FaceInfo > ShapeInfo > pitch float pitch = faces[1][i][0][2]; ROS_INFO("Face (id: %d) confidence:%.2f yaw: %.2f, pitch: %.2f", naoqiFaceId, confidence, yaw, pitch); //ROS_INFO("Face (id: %d) reco confidence: %.2f", naoqiFaceId, confidence); //ROS_INFO("Time_Filtered_Reco_Info: %s", faces[1][i].toString().c_str()); int candidateId = facesCloseTo(yaw, pitch); if (naoqiFaceId == -1 && candidateId == -1) // seen unknown new face { if (humanBuffer > 0) { humanBuffer--; } else { humanBuffer = NB_VIEWS_BEFORE_LEARNING; char faceId[6]; gen_random(faceId, 5); ROS_INFO("Unknown face (confidence: %.2f). Learning it as <%s>", confidence, faceId); if(!m_faceProxy->learnFace(faceId)) { ROS_ERROR("Could not learn the new face!"); } } } else { // face recognized! humanBuffer = NB_VIEWS_BEFORE_LEARNING; // too close to an existing face? re-use the existing face! if (candidateId != -1) { naoqiFaceId = candidateId; } else { assert(naoqiFaceId != -1); string label = faces[1][i][1][2]; id2label[naoqiFaceId] = label; } string label = id2label[naoqiFaceId]; // save the yaw,pitch pose of the recognized face, to // prevent jitter at next round of detections m_lastSeenHumans[naoqiFaceId] = make_pair(yaw, pitch); humans[label] = makeHuman(label, yaw, pitch); } } }