Exemplo n.º 1
0
/*
    Find the position of the cell.
    Returns true when the current time exceeds the limit.
    Returns the position as an index into the track array and the bit number.
*/
bool mfm_harddisk_device::find_position(attotime &from_when, const attotime &limit, int &bytepos, int &bit)
{
	// Frequency
	uint32_t freq = 1000000000/m_cell_size;

	// As we stop some few cells early each track, we adjust our position
	// to the track start
	if (from_when < m_revolution_start_time) from_when = m_revolution_start_time;

	// Calculate the position in the track, given the from_when time and the revolution_start_time.
	int cell = (from_when - m_revolution_start_time).as_ticks(freq);

	from_when += attotime::from_nsec((m_encoding==MFM_BITS)? m_cell_size : (16*m_cell_size));
	if (from_when > limit) return true;

	bytepos = cell / 16;

	// Reached the end
	if (bytepos >= m_trackimage_size)
	{
		if (TRACE_TIMING) logerror("Reached end: rev_start = %s, live = %s\n", tts(m_revolution_start_time).c_str(), tts(from_when).c_str());
		m_revolution_start_time += m_rev_time;
		cell = (from_when - m_revolution_start_time).as_ticks(freq);
		bytepos = cell / 16;
	}

	if (bytepos < 0)
	{
		if (TRACE_TIMING) logerror("Negative cell number: rev_start = %s, live = %s\n", tts(m_revolution_start_time).c_str(), tts(from_when).c_str());
		bytepos = 0;
	}
	bit = cell % 16;

	return false;
}
Exemplo n.º 2
0
void randomtest()
{
    std::string ip = "192.168.11.9";
    std::string port = "9559";

    int portn = std::atoi(port.c_str());

    AL::ALProxy tts("ALTextToSpeech", ip, portn);
    tts.callVoid("say", std::string("hello"));

    AL::ALSpeechRecognitionProxy sr(ip, portn);

    sr.setAudioExpression(true);
    sr.setVisualExpression(true);
    sr.setLanguage("English");

    AL::ALMotionProxy motion(ip, portn);
    int a = motion.post.openHand("RHand");
    tts.callVoid("say", std::string("opening"));
    int b = motion.post.closeHand("RHand");
    tts.callVoid("say", std::string("closing"));

    while (true)
    {
    }
}
Exemplo n.º 3
0
int		Panel::ProcessEvent(int Event, void *Param) {
	if (Event == FE_COMMAND) {
		PCWSTR cmd((PCWSTR)Param);
		if (cmd) {
			int		argc = 0;
			PWSTR	*argv = ::CommandLineToArgvW(cmd, &argc);
			if (argc > 1 && Eqi(argv[0], L"cd")) {
				try {
					psi.Control(this, FCTL_REDRAWPANEL, 0, nullptr);
					ConnectingToServer(argv[1]);
					winstd::shared_ptr<RemoteConnection> tmp(new RemoteConnection(argv[1]));
					WinTS tts(tmp->host());
					m_ts = tts;
					m_conn.swap(tmp);
					psi.Control(this, FCTL_SETCMDLINE, 0, (LONG_PTR)EMPTY_STR);
					psi.Control(this, FCTL_UPDATEPANEL, 0, nullptr);
					psi.Control(this, FCTL_REDRAWPANEL, 0, nullptr);
				} catch (WinError &e) {
					if (e.code() == ERROR_ACCESS_DENIED) {
						AccesDeniedErrorMessage();
					} else {
						farebox_code(e.code(), e.where().c_str());
					}
				}
			}
			::LocalFree(argv);
			return true;
		}
	}
	return	false;
}
Exemplo n.º 4
0
int fdc_pll_t::get_next_bit(attotime &tm, floppy_image_device *floppy, const attotime &limit)
{
	attotime edge = floppy ? floppy->get_next_transition(ctime) : attotime::never;

	attotime next = ctime + period + phase_adjust;

#if 0
	if(!edge.is_never())
		fprintf(stderr, "ctime=%s, transition_time=%s, next=%s, pha=%s\n", tts(ctime).c_str(), tts(edge).c_str(), tts(next).c_str(), tts(phase_adjust).c_str());
#endif

	if(next > limit)
		return -1;

	ctime = next;
	tm = next;

	if(edge.is_never() || edge >= next) {
		// No transition in the window means 0 and pll in free run mode
		phase_adjust = attotime::zero;
		return 0;
	}

	// Transition in the window means 1, and the pll is adjusted

	attotime delta = edge - (next - period/2);

	if(delta.seconds() < 0)
		phase_adjust = attotime::zero - ((attotime::zero - delta)*65)/100;
	else
		phase_adjust = (delta*65)/100;

	if(delta < attotime::zero) {
		if(freq_hist < 0)
			freq_hist--;
		else
			freq_hist = -1;
	} else if(delta > attotime::zero) {
		if(freq_hist > 0)
			freq_hist++;
		else
			freq_hist = 1;
	} else
		freq_hist = 0;

	if(freq_hist) {
		int afh = freq_hist < 0 ? -freq_hist : freq_hist;
		if(afh > 1) {
			attotime aper = attotime::from_double(period_adjust_base.as_double()*delta.as_double()/period.as_double());
			period += aper;

			if(period < min_period)
				period = min_period;
			else if(period > max_period)
				period = max_period;
		}
	}

	return 1;
}
Exemplo n.º 5
0
bool TTSSapi::start(QString *errStr)
{

    m_TTSOpts = RbSettings::subValue("sapi",RbSettings::TtsOptions).toString();
    m_TTSLanguage =RbSettings::subValue("sapi",RbSettings::TtsLanguage).toString();
    m_TTSVoice=RbSettings::subValue("sapi",RbSettings::TtsVoice).toString();
    m_TTSSpeed=RbSettings::subValue("sapi",RbSettings::TtsSpeed).toString();
    m_sapi4 = RbSettings::subValue("sapi",RbSettings::TtsUseSapi4).toBool();

    QFile::remove(QDir::tempPath() +"/sapi_voice.vbs");
    QFile::copy(":/builtin/sapi_voice.vbs",QDir::tempPath() + "/sapi_voice.vbs");
    m_TTSexec = QDir::tempPath() +"/sapi_voice.vbs";

    QFileInfo tts(m_TTSexec);
    if(!tts.exists())
    {
        *errStr = tr("Could not copy the Sapi-script");
        return false;
    }
    // create the voice process
    QString execstring = m_TTSTemplate;
    execstring.replace("%exe",m_TTSexec);
    execstring.replace("%options",m_TTSOpts);
    execstring.replace("%lang",m_TTSLanguage);
    execstring.replace("%voice",m_TTSVoice);
    execstring.replace("%speed",m_TTSSpeed);

    if(m_sapi4)
        execstring.append(" /sapi4 ");

    qDebug() << "init" << execstring;
    voicescript = new QProcess(NULL);
    //connect(voicescript,SIGNAL(readyReadStandardError()),this,SLOT(error()));

    voicescript->start(execstring);
    if(!voicescript->waitForStarted())
    {
        *errStr = tr("Could not start the Sapi-script");
        return false;
    }

    if(!voicescript->waitForReadyRead(300))
    {
        *errStr = voicescript->readAllStandardError();
        if(*errStr != "")
            return false;
    }

    voicestream = new QTextStream(voicescript);
    voicestream->setCodec("UTF16-LE");

    return true;
}
Exemplo n.º 6
0
util_error_t tts_c_locale(time_t t, double tfrac, const char *format, char **sout) {
    char *saved_locale;
    util_error_t err;

    saved_locale = strdup(setlocale(LC_ALL, NULL));
    if (saved_locale == NULL) {
        return ALLOC_FAILED;
    }
    setlocale(LC_ALL, "C");
    err = tts(t, tfrac, format, sout);
    setlocale(LC_ALL, saved_locale);
    free(saved_locale);
    return err;
}
Exemplo n.º 7
0
QStringList TTSSapi::getVoiceList(QString language)
{
    QStringList result;

    QFile::copy(":/builtin/sapi_voice.vbs",QDir::tempPath() + "/sapi_voice.vbs");
    m_TTSexec = QDir::tempPath() +"/sapi_voice.vbs";

    QFileInfo tts(m_TTSexec);
    if(!tts.exists())
        return result;

    // create the voice process
    QString execstring = "cscript //nologo \"%exe\" /language:%lang /listvoices";
    execstring.replace("%exe",m_TTSexec);
    execstring.replace("%lang",language);

    if(RbSettings::value(RbSettings::TtsUseSapi4).toBool())
        execstring.append(" /sapi4 ");

    qDebug() << "init" << execstring;
    voicescript = new QProcess(NULL);
    voicescript->start(execstring);
    qDebug() << "wait for started";
    if(!voicescript->waitForStarted())
        return result;
    voicescript->closeWriteChannel();
    voicescript->waitForReadyRead();

    QString dataRaw = voicescript->readAllStandardError().data();
    result = dataRaw.split(",",QString::SkipEmptyParts);
    if(result.size() > 0)
    {
        result.sort();
        result.removeFirst();
        for(int i = 0; i< result.size();i++)
        {
            result[i] = result.at(i).simplified();
        }
    }

    delete voicescript;
    QFile::setPermissions(QDir::tempPath() +"/sapi_voice.vbs",
              QFile::ReadOwner | QFile::WriteOwner | QFile::ExeOwner
            | QFile::ReadUser  | QFile::WriteUser  | QFile::ExeUser
            | QFile::ReadGroup | QFile::WriteGroup | QFile::ExeGroup
            | QFile::ReadOther | QFile::WriteOther | QFile::ExeOther );
    QFile::remove(QDir::tempPath() +"/sapi_voice.vbs");
    return result;
}
Exemplo n.º 8
0
void mfm_harddisk_device::head_move()
{
	int steps = m_track_delta;
	if (steps < 0) steps = -steps;
	if (TRACE_STEPS) logerror("%s: Moving head by %d step(s) %s\n", tag(), steps, (m_track_delta<0)? "outward" : "inward");

	// We simulate the head movement by pausing for n*step_time with n being the cylinder delta
	m_step_phase = STEP_MOVING;
	m_seek_timer->adjust(m_step_time * steps);

	if (TRACE_TIMING) logerror("%s: Head movement takes %s time\n", tag(), tts(m_step_time * steps).c_str());
	// We pretend that we already arrived
	// TODO: Check auto truncation?
	m_current_cylinder += m_track_delta;
	if (m_current_cylinder < 0) m_current_cylinder = 0;
	if (m_current_cylinder >= m_actual_cylinders) m_current_cylinder = m_actual_cylinders-1;
	m_track_delta = 0;
}
Exemplo n.º 9
0
bool TTSExes::start(QString *errStr)
{
    m_TTSexec = RbSettings::subValue(m_name,RbSettings::TtsPath).toString();
    m_TTSOpts = RbSettings::subValue(m_name,RbSettings::TtsOptions).toString();

    m_TTSTemplate = m_TemplateMap.value(m_name);

    QFileInfo tts(m_TTSexec);
    if(tts.exists())
    {
        return true;
    }
    else
    {
        *errStr = tr("TTS executable not found");
        return false;
    }
}
Exemplo n.º 10
0
attotime mfm_harddisk_device::track_end_time()
{
	// We back up two microseconds before the track end to avoid the
	// index pulse to appear earlier (because of rounding effects)
	attotime nexttime = m_rev_time - attotime::from_nsec(2000);
	attotime endtime = attotime::never;

	if (!m_ready)
	{
		// estimate the next index time; during power-up we assume half the rotational speed
		// Should never be relevant, though, because READY is false.
		nexttime = nexttime * 2;
	}

	if (!m_revolution_start_time.is_never())
	{
		endtime = m_revolution_start_time + nexttime;
		if (TRACE_TIMING) logerror("%s: Track start time = %s, end time = %s\n", tag(), tts(m_revolution_start_time).c_str(), tts(endtime).c_str());
	}
	return endtime;
}
int main(int argc, const char* argv[])
{
  std::string opt_ip = "131.254.10.126";
  bool opt_language_english = true;
  bool opt_debug = false;

  for (unsigned int i=0; i<argc; i++) {
    if (std::string(argv[i]) == "--ip")
      opt_ip = argv[i+1];
    else if (std::string(argv[i]) == "--fr")
      opt_language_english = false;
    else if (std::string(argv[i]) == "--debug")
      opt_debug = true;
    else if (std::string(argv[i]) == "--help") {
      std::cout << "Usage: " << argv[0] << "[--ip <robot address>] [--fr]" << std::endl;
      return 0;
    }
  }

  std::string camera_name = "CameraTopPepper";

  // Open the grabber for the acquisition of the images from the robot
  vpNaoqiGrabber g;
  if (! opt_ip.empty())
    g.setRobotIp(opt_ip);
  g.setFramerate(30);
  g.setCamera(0);
  g.open();


  vpCameraParameters cam = vpNaoqiGrabber::getIntrinsicCameraParameters(AL::kQVGA,camera_name, vpCameraParameters::perspectiveProjWithDistortion);
  vpHomogeneousMatrix eMc = vpNaoqiGrabber::getExtrinsicCameraParameters(camera_name,vpCameraParameters::perspectiveProjWithDistortion);

  std::cout << "eMc:" << std::endl << eMc << std::endl;
  std::cout << "cam:" << std::endl << cam << std::endl;
  vpNaoqiRobot robot;

  // Connect to the robot
  if (! opt_ip.empty())
    robot.setRobotIp(opt_ip);
  robot.open();



  if (robot.getRobotType() != vpNaoqiRobot::Pepper)
  {
    std::cout << "ERROR: You are not connected to Pepper, but to a different Robot. Check the IP. " << std::endl;
    return 0;
  }

  std::vector<std::string> jointNames_head = robot.getBodyNames("Head");

  // Open Proxy for the speech
  AL::ALTextToSpeechProxy tts(opt_ip, 9559);
  std::string phraseToSay;
  if (opt_language_english)
  {
    tts.setLanguage("English");
    phraseToSay = " \\emph=2\\ Hi,\\pau=200\\ How are you ?";
  }
  else
  {
    tts.setLanguage("French");
    phraseToSay = " \\emph=2\\ Bonjour,\\pau=200\\ comment vas  tu ?";
  }

  // Inizialize PeoplePerception
  AL::ALPeoplePerceptionProxy people_proxy(opt_ip, 9559);
  AL::ALMemoryProxy m_memProxy(opt_ip, 9559);
  people_proxy.subscribe("People", 30, 0.0);
  std::cout << "period: " << people_proxy.getCurrentPeriod() << std::endl;

  // Open Proxy for the recognition speech
  AL::ALSpeechRecognitionProxy asr(opt_ip, 9559);
  //  asr.unsubscribe("Test_ASR");
  //   return 0 ;

  asr.setVisualExpression(false);
  asr.setLanguage("English");
  std::vector<std::string> vocabulary;
  vocabulary.push_back("follow me");
  vocabulary.push_back("stop");

  // Set the vocabulary
  asr.setVocabulary(vocabulary,false);

  // Start the speech recognition engine with user Test_ASR
  asr.subscribe("Test_ASR");
  std::cout << "Speech recognition engine started" << std::endl;

  // Proxy to control the leds
  AL::ALLedsProxy led_proxy(opt_ip, 9559);

  //Declare plots
  vpPlot * plotter_diff_vel; vpPlot * plotter_vel;
  vpPlot * plotter_error;  vpPlot * plotter_distance;

  if (opt_debug)
  {
    // Plotting
    plotter_diff_vel = new vpPlot (2);
    plotter_diff_vel->initGraph(0, 2);
    plotter_diff_vel->initGraph(1, 2);
    plotter_diff_vel->setTitle(0,  "HeadYaw");
    plotter_diff_vel->setTitle(1,  "HeadPitch");

    plotter_vel= new vpPlot (1);
    plotter_vel->initGraph(0, 5);
    plotter_vel->setLegend(0, 0, "vx");
    plotter_vel->setLegend(0, 1, "vy");
    plotter_vel->setLegend(0, 2, "wz");
    plotter_vel->setLegend(0, 3, "q_yaw");
    plotter_vel->setLegend(0, 4, "q_pitch");

    plotter_error = new vpPlot(1);
    plotter_error->initGraph(0, 3);
    plotter_error->setLegend(0, 0, "x");
    plotter_error->setLegend(0, 1, "y");
    plotter_error->setLegend(0, 2, "Z");

    plotter_distance = new vpPlot (1);
    plotter_distance->initGraph(0, 1);
    plotter_distance->setLegend(0, 0, "dist");
  }

  try {
    vpImage<unsigned char> I(g.getHeight(), g.getWidth());
    vpDisplayX d(I);
    vpDisplay::setTitle(I, "ViSP viewer");

    vpFaceTrackerOkao face_tracker(opt_ip,9559);

    double dist = 0.0; // Distance between person detected from peoplePerception and faceDetection

    // Set Visual Servoing:
    vpServo task;
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE);
    //    vpAdaptiveGain lambda_adapt;
    //    lambda_adapt.initStandard(1.6, 1.8, 15);
    vpAdaptiveGain lambda_base(1.2, 1.0, 10); // 2.3, 0.7, 15
    vpAdaptiveGain lambda_nobase(5, 2.9, 15); // 4, 0.5, 15
    task.setLambda(lambda_base) ;

    double Z = 0.9;
    double Zd = 0.9;
    bool stop_vxy = false;
    bool move_base = true;
    bool move_base_prev = true;

    // Create the desired  visual feature
    vpFeaturePoint s;
    vpFeaturePoint sd;
    vpImagePoint ip(I.getHeight()/2, I.getWidth()/2);
    // Create the current x visual feature
    vpFeatureBuilder::create(s, cam, ip);
    vpFeatureBuilder::create(sd, cam, ip);

    //   sd.buildFrom( I.getWidth()/2, I.getHeight()/2, Zd);

    AL::ALValue limit_yaw = robot.getProxy()->getLimits("HeadYaw");
    std::cout << limit_yaw[0][0] << " " << limit_yaw[0][1] << std::endl;

    // Add the feature
    task.addFeature(s, sd) ;

    vpFeatureDepth s_Z, s_Zd;
    s_Z.buildFrom(s.get_x(), s.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
    s_Zd.buildFrom(sd.get_x(), sd.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0

    // Add the feature
    task.addFeature(s_Z, s_Zd);

    // Jacobian 6x5 (vx,vy,wz,q_yaq,q_pitch)
    vpMatrix tJe(6,5);
    tJe[0][0]= 1;
    tJe[1][1]= 1;
    tJe[5][2]= 1;
    vpMatrix eJe(6,5);

    double servo_time_init = 0;

    vpImagePoint head_cog_cur;
    vpImagePoint head_cog_des(I.getHeight()/2, I.getWidth()/2);
    vpColVector q_dot;

    bool reinit_servo = true;
    unsigned long loop_iter = 0;

    std::vector<std::string> recognized_names;
    std::map<std::string,unsigned int> detected_face_map;
    bool detection_phase = true;
    unsigned int f_count = 0;
    //    AL::ALValue leg_names  = AL::ALValue::array("HipRoll","HipPitch", "KneePitch" );
    //    AL::ALValue values  = AL::ALValue::array(0.0, 0.0, 0.0 );

    // robot.getProxy()->setMoveArmsEnabled(false,false);
    std::vector<std::string> arm_names = robot.getBodyNames("LArm");
    std::vector<std::string> rarm_names = robot.getBodyNames("RArm");
    std::vector<std::string> leg_names(3);
    leg_names[0]= "HipRoll";
    leg_names[1]= "HipPitch";
    leg_names[2]= "KneePitch";

    arm_names.insert( arm_names.end(), rarm_names.begin(), rarm_names.end() );
    arm_names.insert( arm_names.end(), leg_names.begin(), leg_names.end() );

    std::vector<float> arm_values(arm_names.size(),0.0);

    for (unsigned int i = 0; i < arm_names.size(); i++ )
      std::cout << arm_names[i]<< std::endl;

    robot.getPosition(arm_names, arm_values,false);

    vpImagePoint best_cog_face_peoplep;
    bool person_found = false;

    double t_prev = vpTime::measureTimeSecond();

    while(1) {
      if (reinit_servo) {
        servo_time_init = vpTime::measureTimeSecond();
        t_prev = vpTime::measureTimeSecond();
        reinit_servo = false;
        led_proxy.fadeRGB("FaceLeds","white",0.1);
      }

      double t = vpTime::measureTimeMs();
      if (0) // (opt_debug)
      {
        g.acquire(I);
      }
      vpDisplay::display(I);
      // Detect face
      bool face_found = face_tracker.detect();
      stop_vxy = false;

      //std::cout << "Loop time face_tracker: " << vpTime::measureTimeMs() - t << " ms" << std::endl;

      // Check speech recognition result

      AL::ALValue result_speech = m_memProxy.getData("WordRecognized");

      if ( ((result_speech[0]) == vocabulary[0]) && (double (result_speech[1]) > 0.4 )) //move
      {
        //std::cout << "Recognized: " << result_speech[0] << "with confidence of " << result_speech[1] << std::endl;
        task.setLambda(lambda_base) ;

        move_base = true;
      }
      else if ( (result_speech[0] == vocabulary[1]) && (double(result_speech[1]) > 0.4 )) //stop
      {
        //std::cout << "Recognized: " << result_speech[0] << "with confidence of " << result_speech[1] << std::endl;
        task.setLambda(lambda_nobase) ;
        move_base = false;
      }

      if (move_base != move_base_prev)
      {
        if (move_base)
        {
          phraseToSay = "Ok, I will follow you.";
          tts.post.say(phraseToSay);
        }
        else
        {
          phraseToSay = "Ok, I will stop.";
          tts.post.say(phraseToSay);
        }

      }

      //robot.setStiffness(arm_names,0.0);
      //robot.getProxy()->setAngles(arm_names,arm_values,1.0);
      //robot.getProxy()->setAngles("HipRoll",0.0,1.0);

      //std::cout << "Loop time check_speech: " << vpTime::measureTimeMs() - t << " ms" << std::endl;

      move_base_prev = move_base;

      if (face_found) {
        std::ostringstream text;
        text << "Found " << face_tracker.getNbObjects() << " face(s)";
        vpDisplay::displayText(I, 10, 10, text.str(), vpColor::red);
        for(size_t i=0; i < face_tracker.getNbObjects(); i++) {
          vpRect bbox = face_tracker.getBBox(i);
          if (i == 0)
            vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 2);
          else
            vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1);
          vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), face_tracker.getMessage(i) , vpColor::red);
        }

        led_proxy.post.fadeRGB("FaceLeds","blue",0.1);

        double u = face_tracker.getCog(0).get_u();
        double v = face_tracker.getCog(0).get_v();
        if (u<= g.getWidth() && v <= g.getHeight())
          head_cog_cur.set_uv(u,v);

        vpRect bbox = face_tracker.getBBox(0);
        std::string name = face_tracker.getMessage(0);

        //std::cout << "Loop time face print " << vpTime::measureTimeMs() - t << " ms" << std::endl;
      }

      AL::ALValue result = m_memProxy.getData("PeoplePerception/VisiblePeopleList");

      //std::cout << "Loop time get Data PeoplePerception " << vpTime::measureTimeMs() - t << " ms" << std::endl;

      person_found = false;
      if (result.getSize() > 0)
      {
        AL::ALValue info = m_memProxy.getData("PeoplePerception/PeopleDetected");
        int num_people = info[1].getSize();
        std::ostringstream text;
        text << "Found " << num_people << " person(s)";
        vpDisplay::displayText(I, 10, 10, text.str(), vpColor::red);

        person_found = true;

        if (face_found) // Try to find the match between two detection
        {
          vpImagePoint cog_face;
          double dist_min = 1000;
          unsigned int index_person = 0;

          for (unsigned int i = 0; i < num_people; i++)
          {

            float alpha =  info[1][i][2];
            float beta =  info[1][i][3];
            //Z = Zd;
            // Centre of face into the image
            float x =  g.getWidth()/2 -  g.getWidth() * beta;
            float y =  g.getHeight()/2  + g.getHeight() * alpha;
            cog_face.set_uv(x,y);
            dist = vpImagePoint::distance(cog_face, head_cog_cur);

            if (dist < dist_min)
            {
              dist_min = dist;
              best_cog_face_peoplep = cog_face;
              index_person  = i;
            }
          }

          vpDisplay::displayCross(I, best_cog_face_peoplep, 10, vpColor::white);

          if (dist_min < 55.)
          {
            Z = info[1][index_person][1]; // Current distance
          }

        }
        else // Take the first one on the list
        {
          float alpha =  info[1][0][2];
          float beta =  info[1][0][3];
          //Z = Zd;
          // Centre of face into the image
          float x =  g.getWidth()/2 -  g.getWidth() * beta;
          float y =  g.getHeight()/2  + g.getHeight() * alpha;
          head_cog_cur.set_uv(x,y);
          Z = info[1][0][1]; // Current distance

        }

      }
      else
      {
        std::cout << "No distance computed " << std::endl;
        stop_vxy = true;
        robot.getProxy()->setAngles("HipRoll",0.0,1.0);
        //Z = Zd;
      }
      //          float alpha =  info[1][0][2];
      //          float beta =  info[1][0][3];
      //          //Z = Zd;
      //          // Centre of face into the image
      //          float x =  g.getWidth()/2 -  g.getWidth() * beta;
      //          float y =  g.getHeight()/2  + g.getHeight() * alpha;

      //          vpImagePoint cog_face(y,x);
      //          dist = vpImagePoint::distance(cog_face,head_cog_cur);
      //          if (dist < 55.)
      //            Z = info[1][0][1]; // Current distance
      //          else
      //            stop_vxy = true;
      //        }
      //        else
      //        {
      //          std::cout << "No distance computed " << std::endl;
      //          stop_vxy = true;
      //          //Z = Zd;
      //        }


      //std::cout << "Loop time before VS: " << vpTime::measureTimeMs() - t << " ms" << std::endl;
      if (face_found || person_found )
      {
        // Get Head Jacobian (6x2)
        vpMatrix torso_eJe_head;
        robot.get_eJe("Head",torso_eJe_head);

        // Add column relative to the base rotation (Wz)
        //        vpColVector col_wz(6);
        //        col_wz[5] = 1;
        for (unsigned int i = 0; i < 6; i++)
          for (unsigned int j = 0; j < torso_eJe_head.getCols(); j++)
            tJe[i][j+3] = torso_eJe_head[i][j];

        // std::cout << "tJe" << std::endl << tJe << std::endl;

        //        vpHomogeneousMatrix torsoMHeadPith( robot.getProxy()->getTransform(jointNames_head[jointNames_head.size()-1], 0, true));// get transformation  matrix between torso and HeadRoll
        vpHomogeneousMatrix torsoMHeadPith( robot.getProxy()->getTransform("HeadPitch", 0, true));// get transformation  matrix between torso and HeadRoll

        vpVelocityTwistMatrix HeadPitchVLtorso(torsoMHeadPith.inverse());

        for(unsigned int i=0; i< 3; i++)
          for(unsigned int j=0; j< 3; j++)
            HeadPitchVLtorso[i][j+3] = 0;

        //std::cout << "HeadPitchVLtorso: " << std::endl << HeadPitchVLtorso << std::endl;
        // Transform the matrix
        eJe = HeadPitchVLtorso *tJe;

        // std::cout << "eJe" << std::endl << eJe << std::endl;

        task.set_eJe( eJe );
        task.set_cVe( vpVelocityTwistMatrix(eMc.inverse()) );

        vpDisplay::displayCross(I, head_cog_des, 10, vpColor::blue);
        vpDisplay::displayCross(I, head_cog_cur, 10, vpColor::green);
        //  std::cout << "head_cog_des:" << std::endl << head_cog_des << std::endl;
        //  std::cout << "head_cog_cur:" << std::endl << head_cog_cur << std::endl;

        // Update the current x feature
        double x,y;
        vpPixelMeterConversion::convertPoint(cam, head_cog_cur, x, y);
        s.buildFrom(x, y, Z);
        //s.set_xyZ(head_cog_cur.get_u(), head_cog_cur.get_v(), Z);

        // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
        s_Z.buildFrom(s.get_x(), s.get_y(), Z, log(Z/Zd)) ;

        q_dot = task.computeControlLaw(vpTime::measureTimeSecond() - servo_time_init);

        //std::cout << "Loop time compute VS: " << vpTime::measureTimeMs() - t << " ms" << std::endl;

        vpMatrix P = task.getI_WpW();
        double alpha = -3.3;
        double min = limit_yaw[0][0];
        double max = limit_yaw[0][1];

        vpColVector z_q2 (q_dot.size());
        vpColVector q_yaw = robot.getPosition(jointNames_head[0]);

        z_q2[3] = 2 * alpha * q_yaw[0]/ pow((max - min),2);

        vpColVector q3 = P * z_q2;
        //if (q3.euclideanNorm()<10.0)
        q_dot =  q_dot + q3;

        std::vector<float> vel(jointNames_head.size());
        vel[0] = q_dot[3];
        vel[1] = q_dot[4];

        // Compute the distance in pixel between the target and the center of the image
        double distance = vpImagePoint::distance(head_cog_cur, head_cog_des);
        //if (distance > 0.03*I.getWidth())
         std::cout << "q:" << std::endl << q_dot << std::endl;
        //  std::cout << "vel" << std::endl << q_dot << std::endl;
        //std::cout << "distance" << std::endl << distance <<" -- " << 0.03*I.getWidth() << " ++ " << I.getWidth() << std::endl;

        //        if (distance > 0.1*I.getWidth())
        robot.setVelocity(jointNames_head,vel);
        //        else
        //        {
        //          std::cout << "Setting hipRoll to zero" << std::endl;
        //          robot.getProxy()->setAngles("HipRoll",0.0,1.0);
        //        }
        // std::cout << "errorZ: " << task.getError()[2] << std::endl;
        // std::cout << "stop_vxy: " << stop_vxy << std::endl;

        if (std::fabs(Z -Zd) < 0.05 || stop_vxy || !move_base)
          robot.setBaseVelocity(0.0, 0.0, q_dot[2]);
        else
          robot.setBaseVelocity(q_dot[0], q_dot[1], q_dot[2]);

        if (opt_debug)
        {
          vpColVector vel_head = robot.getJointVelocity(jointNames_head);
          for (unsigned int i=0 ; i < jointNames_head.size() ; i++) {
            plotter_diff_vel->plot(i, 1, loop_iter, q_dot[i+3]);
            plotter_diff_vel->plot(i, 0, loop_iter, vel_head[i]);
          }
          plotter_error->plot(0,loop_iter,task.getError());
          plotter_vel->plot(0,loop_iter, q3);
          plotter_distance->plot(0,0,loop_iter,Z);
        }

        //        if (detection_phase)
        //        {

        //          //if (score >= 0.4 && distance < 0.06*I.getWidth() && bbox.getSize() > 3000)
        //          if (distance < 0.06*I.getWidth() && bbox.getSize() > 3000)
        //          {
        //            if (opt_debug)
        //            {
        //              vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 1);
        //              vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::red);
        //            }
        //            detected_face_map[name]++;
        //            f_count++;
        //          }
        //          else
        //          {
        //            if (opt_debug)
        //            {
        //              vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1);
        //              vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::green);
        //            }
        //          }
        //          if (f_count>10)
        //          {
        //            detection_phase = false;
        //            f_count = 0;
        //          }
        //        }
        //        else
        //        {
        //          std::string recognized_person_name = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->first;
        //          unsigned int times = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->second;

        //          if (!in_array(recognized_person_name, recognized_names) && recognized_person_name != "Unknown") {

        //            if (opt_language_english)
        //            {
        //              phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ How are you ?";
        //            }
        //            else
        //            {
        //              phraseToSay = "\\emph=2\\ Salut \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ comment vas  tu ?";;
        //            }

        //            std::cout << phraseToSay << std::endl;
        //            tts.post.say(phraseToSay);
        //            recognized_names.push_back(recognized_person_name);
        //          }
        //          if (!in_array(recognized_person_name, recognized_names) && recognized_person_name == "Unknown"
        //              && times > 15)
        //          {

        //            if (opt_language_english)
        //            {
        //              phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\. I don't know you! \\emph=2\\ What's your name?";
        //            }
        //            else
        //            {
        //              phraseToSay = " \\emph=2\\ Salut \\wait=200\\ \\emph=2\\. Je ne te connais pas! \\emph=2\\  Comment t'appelles-tu ?";
        //            }

        //            std::cout << phraseToSay << std::endl;
        //            tts.post.say(phraseToSay);
        //            recognized_names.push_back(recognized_person_name);
        //          }

        //          detection_phase = true;
        //          detected_face_map.clear();
        //        }
      }
      else {
        robot.stop(jointNames_head);
        robot.stopBase();
        std::cout << "Stop!" << std::endl;
        reinit_servo = true;
      }

      //if (opt_debug)
      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false))
        break;
      loop_iter ++;
      std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl;
    }
    robot.stop(jointNames_head);
    robot.stopBase();

    tts.setLanguage("French");
  //  tts.exit();

    people_proxy.unsubscribe("People");
   // people_proxy.exit();

    asr.unsubscribe("Test_ASR");
    asr.setVisualExpression(true);

  //  asr.exit();

    tts.setLanguage("French");
 //   tts.exit();

    led_proxy.fadeRGB("FaceLeds","white",0.1);
  //  led_proxy.exit();

    vpDisplay::getClick(I, true);

  }
  catch(vpException &e) {
    std::cout << e.getMessage() << std::endl;
  }
  catch (const AL::ALError& e)
  {
    std::cerr << "Caught exception " << e.what() << std::endl;
  }

  std::cout << "The end: stop the robot..." << std::endl;

  //tts.setLanguage("French");
  //  tts.exit();


  robot.stop(jointNames_head);
  robot.stopBase();

  return 0;
}
Exemplo n.º 12
0
void* say_thread(void* say_port) {

    struct sockaddr_in saddr;
    int sock, status;
    int on = 1;

    memset(&saddr, 0, sizeof(saddr));
    saddr.sin_family      = AF_INET;
    saddr.sin_port        = htons(*(uint16_t*)say_port);
    saddr.sin_addr.s_addr = htonl(INADDR_ANY);

    sock = socket(AF_INET, SOCK_DGRAM, 0);
    if(sock < 0) {
        perror("Infocast sayThread: Error creating socket");
        exit(1);
    }

    status = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
                         (char*)&on, sizeof(on));
    if(status) {
        perror("Infocast sayThread: setsockopt reuseaddr:");
        exit(1);
    }

    if(bind(sock, (struct sockaddr*) &saddr, sizeof(saddr)) == -1) {
        perror("Infocast sayThread: bind");
        exit(1);
    }

    char message[1024];

    while(1) {
        if(recv(sock, message, sizeof(message), 0) < 0) {
            perror("Say Thread Recv");
            continue;
        }

        char* ptr = message;
        int32_t magicByte;

        memcpy(&magicByte, ptr, sizeof(magicByte)); ptr += sizeof(magicByte);
        if(magicByte != (int32_t)SAY_MAGIC_BYTE) {
            printf("Magic byte miss match. Is %x should be %x\n", magicByte, SAY_MAGIC_BYTE);
            continue;
        }

        char version = *ptr; ptr++;
        if(version != SAY_VERSION) {
            printf("Version miss match. Is %d should be %d\n", version, SAY_VERSION);
            continue;
        }

        uint8_t len;
        memcpy(&len, ptr, sizeof(len)); ptr += sizeof(len);

        char* say = (char*)malloc(len+1);
        memcpy(say, ptr, len); ptr += len;
        say[len] = '\0';

        try {
            AL::ALTextToSpeechProxy tts("localhost", NAOQI_PORT);
            tts.say(say);
            free(say);
        } catch(const AL::ALError& e) {
            free(say);
            std::cerr << "Caught exception: " << e.what() << std::endl;
        }
    }

    return NULL;
}
Exemplo n.º 13
0
/*!

  Connect to Romeo robot, grab, display images using ViSP and start face detection with Okao library running on the robot.
  By default, this example connect to a robot with ip address: 198.18.0.1.

 */
int main(int argc, const char* argv[])
{
  try
  {
    std::string opt_ip = "198.18.0.1";;

    if (argc == 3) {
      if (std::string(argv[1]) == "--ip")
        opt_ip = argv[2];
    }

    vpNaoqiGrabber g;
    if (! opt_ip.empty())
      g.setRobotIp(opt_ip);
    g.setCamera(0);
    g.open();

    vpImage<unsigned char> I(g.getHeight(), g.getWidth());
    vpDisplayX d(I);
    vpDisplay::setTitle(I, "ViSP viewer");


    // Open Proxy for the speech
    AL::ALTextToSpeechProxy tts(opt_ip, 9559);
    tts.setLanguage("English");

    std::string phraseToSay = "Hi!";
    //    int id = tts.post.say(phraseToSay);
    //    tts.wait(id,2000);
    // Open Face detection proxy

    AL::ALFaceDetectionProxy df(opt_ip, 9559);


    // Start the face recognition engine
    const int period = 1;

    df.subscribe("Face", period, 0.0);


    AL::ALMemoryProxy memProxy(opt_ip, 9559);

    float  mImageHeight = g.getHeight();
    float  mImageWidth = g.getWidth();
    std::vector<std::string> names;


    while (1)
    {

      g.acquire(I);
      vpDisplay::display(I);


      AL::ALValue result = memProxy.getData("FaceDetected");



      if (result.getSize() >=2)
      {
        //std::cout << "face" << std::endl;
        AL::ALValue info_face_array = result[1];

        for (unsigned int i = 0; i < info_face_array.getSize()-1; i++ )
        {
          // AL::ALValue info_face = info_face_array[i];

          //Extract face info

          //AL::ALValue shape_face =info_face[0];

          //std::cout << "alpha "<< shape_face[1] <<". Beta:" << shape_face[1] << std::endl;
          // Face Detected [1]/ First face [0]/ Shape Info [0]/ Alpha [1]
          float alpha = result[1][i][0][1];
          float beta = result[1][i][0][2];
          float sx = result[1][i][0][3];
          float sy = result[1][i][0][4];

          std::string name = result[1][i][1][2];
          float score = result[1][i][1][1];

          // sizeX / sizeY are the face size in relation to the image

          float sizeX = mImageHeight * sx;
          float sizeY = mImageWidth * sy;

          // Centre of face into the image
          float x = mImageWidth / 2 - mImageWidth * alpha;
          float y = mImageHeight / 2 + mImageHeight * beta;

          vpDisplay::displayCross(I, y, x, 10, vpColor::red);
          vpDisplay::displayRectangle(I,y,x,0.0,sizeX,sizeY,vpColor::cyan,1);

          std::cout << i << "- size: " << sizeX*sizeY << std::endl;

          if (score >= 0.4)
          {
            std::cout << i << "- Name: " << name <<" score "  << score << std::endl;

            //            std::cout << "NAMES: " << names << std::endl;
            //            std::cout << "Esite: " << in_array(name, names) << std::endl;
            //            std::cout << "==================================== " << std::endl;

            if (!in_array(name, names) && sizeX*sizeY > 4000) {
              std::string phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\" + name;
              std::cout << phraseToSay << std::endl;
              tts.post.say(phraseToSay);
              names.push_back(name);

            }
          }

          else
            std::cout << i << "- Face not recognized. " << std::endl;

          //          cv::Point p1(x - (sizeX / 2), y - (sizeY / 2));
          //          cv::Point p2(x + (sizeX / 2), y + (sizeY / 2));
          //          cv::rectangle(I, p1, p2, cv::Scalar(255,255,255));

        }

      }

      //vpTime::sleepMs(60);

      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false))
        break;

    }


    df.unsubscribe("Face");



  }
  catch (const vpException &e)
  {
    std::cerr << "Caught exception: " << e.what() << std::endl;
  }
  catch (const AL::ALError &e)
  {
    std::cerr << "Caught exception: " << e.what() << std::endl;
  }



  return 0;
}
Exemplo n.º 14
0
/*
    Reading bytes from the hard disk.

    Returns true if the time limit will be exceeded before reading the bit or complete byte.
    Otherwise returns the bit at the given position, or the complete data byte with the clock byte.
*/
bool mfm_harddisk_device::read(attotime &from_when, const attotime &limit, UINT16 &cdata)
{
	UINT16* track = m_cache->get_trackimage(m_current_cylinder, m_current_head);

	if (track==NULL)
	{
		// What shall we do in this case?
		throw emu_fatalerror("Cannot read CHD image");
	}

	// Get a copy for later debug output
	attotime fw = from_when;

	int bytepos = 0;
	int bitpos = 0;

	bool offlimit = find_position(from_when, limit, bytepos, bitpos);
	if (offlimit) return true;

	if (m_encoding == MFM_BITS)
	{
		// We will deliver a single bit
		cdata = ((track[bytepos] << bitpos) & 0x8000) >> 15;
		if (TRACE_BITS) logerror("%s: Reading (c=%d,h=%d,bit=%d) at cell %d [%s] = %d\n", tag(), m_current_cylinder, m_current_head, bitpos, ((bytepos<<4) + bitpos), tts(fw).c_str(), cdata);
	}
int main(int argc, const char* argv[])
{
  std::string opt_ip = "131.254.10.126";
  bool opt_language_english = true;

  for (unsigned int i=0; i<argc; i++) {
    if (std::string(argv[i]) == "--ip")
      opt_ip = argv[i+1];
    else if (std::string(argv[i]) == "--fr")
      opt_language_english = false;
    else if (std::string(argv[i]) == "--help") {
      std::cout << "Usage: " << argv[0] << "[--ip <robot address>] [--fr]" << std::endl;
      return 0;
    }
  }

  // USE NEW MODULE

  qi::SessionPtr session = qi::makeSession();
  session->connect("tcp://131.254.10.126:9559");
  qi::AnyObject proxy = session->service("pepper_control");

  proxy.call<void >("start");

  // -------------------------------------


  std::string camera_name = "CameraTopPepper";

  // Open the grabber for the acquisition of the images from the robot
  vpNaoqiGrabber g;
  if (! opt_ip.empty())
    g.setRobotIp(opt_ip);
  g.setFramerate(30);
  g.setCamera(0);
  g.open();
  vpCameraParameters cam = vpNaoqiGrabber::getIntrinsicCameraParameters(AL::kQVGA,camera_name, vpCameraParameters::perspectiveProjWithDistortion);
  vpHomogeneousMatrix eMc = vpNaoqiGrabber::getExtrinsicCameraParameters(camera_name,vpCameraParameters::perspectiveProjWithDistortion);

  std::cout << "eMc:" << std::endl << eMc << std::endl;

  // Connect to the robot
  vpNaoqiRobot robot;
  if (! opt_ip.empty())
    robot.setRobotIp(opt_ip);
  robot.open();

  if (robot.getRobotType() != vpNaoqiRobot::Pepper)
  {
    std::cout << "ERROR: You are not connected to Pepper, but to a different Robot. Check the IP. " << std::endl;
    return 0;
  }

  std::vector<std::string> jointNames_head = robot.getBodyNames("Head");

  // Open Proxy for the speech
  AL::ALTextToSpeechProxy tts(opt_ip, 9559);
  std::string phraseToSay;
  if (opt_language_english)
  {
    tts.setLanguage("English");
    phraseToSay = " \\emph=2\\ Hi,\\pau=200\\ How are you ?";
  }
  else
  {
    tts.setLanguage("French");
    phraseToSay = " \\emph=2\\ Bonjour,\\pau=200\\ comment vas  tu ?";
  }

  // Plotting
  vpPlot plotter_diff_vel (2);
  plotter_diff_vel.initGraph(0, 2);
  plotter_diff_vel.initGraph(1, 2);
  plotter_diff_vel.setTitle(0,  "HeadYaw");
  plotter_diff_vel.setTitle(1,  "HeadPitch");


  vpPlot plotter_error (2);
  plotter_error.initGraph(0, 1);
  plotter_error.initGraph(1, 1);

  plotter_error.setTitle(0,  "HeadYaw");
  plotter_error.setTitle(1,  "HeadPitch");

  try {
    vpImage<unsigned char> I(g.getHeight(), g.getWidth());
    vpDisplayX d(I);
    vpDisplay::setTitle(I, "ViSP viewer");

    vpFaceTrackerOkao face_tracker(opt_ip,9559);

    // Initialize head servoing
    vpServoHead servo_head;
    servo_head.setCameraParameters(cam);
    vpAdaptiveGain lambda(3.5, 0.8, 15); // lambda(0)=2, lambda(oo)=0.1 and lambda_dot(0)=10
    servo_head.setLambda(lambda);

    double servo_time_init = 0;

    vpImagePoint head_cog_cur;
    vpImagePoint head_cog_des(I.getHeight()/2, I.getWidth()/2);
    vpColVector q_dot_head;

    bool reinit_servo = true;
    unsigned long loop_iter = 0;

    std::vector<std::string> recognized_names;
    std::map<std::string,unsigned int> detected_face_map;
    bool detection_phase = true;
    unsigned int f_count = 0;

    double t_prev = vpTime::measureTimeSecond();

    while(1) {
      if (reinit_servo) {
        servo_time_init = vpTime::measureTimeSecond();
        t_prev = vpTime::measureTimeSecond();
        reinit_servo = false;
        //proxy.call<void >("start");

      }

      double t = vpTime::measureTimeMs();
      g.acquire(I);
      vpDisplay::display(I);
      bool face_found = face_tracker.detect();


      if (face_found) {
        std::ostringstream text;
        text << "Found " << face_tracker.getNbObjects() << " face(s)";
        vpDisplay::displayText(I, 10, 10, text.str(), vpColor::red);
        for(size_t i=0; i < face_tracker.getNbObjects(); i++) {
          vpRect bbox = face_tracker.getBBox(i);
          if (i == 0)
            vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 2);
          else
            vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1);
          vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), face_tracker.getMessage(i) , vpColor::red);
        }

        double u = face_tracker.getCog(0).get_u();
        double v = face_tracker.getCog(0).get_v();
        if (u<= g.getWidth() && v <= g.getHeight())
          head_cog_cur.set_uv(u,v);

        vpRect bbox = face_tracker.getBBox(0);
        std::string name = face_tracker.getMessage(0);

        // Get Head Jacobian (6x2)
        vpMatrix eJe = robot.get_eJe("Head");

        // Add column relative to the base rotation (Wz)
        vpColVector col_wz(6);
        col_wz[5] = 1;
        eJe.resize(6,3,false);
        for (unsigned int i =0; i <6; i++)
          eJe[i][2] = col_wz[i];

        //        std::cout << "JAC" << std::endl << eJe << std::endl;

        servo_head.set_eJe( eJe );
        servo_head.set_cVe( vpVelocityTwistMatrix(eMc.inverse()) );

        servo_head.setCurrentFeature(head_cog_cur);
        servo_head.setDesiredFeature(head_cog_des);
        //vpDisplay::setFont(I, "-*-*-bold-*-*-*-*-*-*-*-*-*-*-*");
        //vpDisplay::displayText(I, face_tracker.getFace().getTopLeft()+vpImagePoint(-20,0), "Coraline", vpColor::red);
        //vpServoDisplay::display(servo_head.m_task_head, cam, I, vpColor::green, vpColor::red, 3);

        q_dot_head = servo_head.computeControlLaw(vpTime::measureTimeSecond() - servo_time_init);

        std::vector<float> vel(jointNames_head.size());
        for (unsigned int i=0 ; i < jointNames_head.size() ; i++) {
          vel[i] = q_dot_head[i];
        }

        // Compute the distance in pixel between the target and the center of the image
        double distance = vpImagePoint::distance(head_cog_cur, head_cog_des);
        //if (distance > 0.03*I.getWidth())

        std::cout << "vel" << std::endl << q_dot_head << std::endl;
        proxy.async<void >("setDesJointVelocity", jointNames_head,vel );
        robot.getProxy()->move(0.0, 0.0, q_dot_head[2]);

        vpColVector vel_head = robot.getJointVelocity(jointNames_head);
        for (unsigned int i=0 ; i < jointNames_head.size() ; i++) {
          plotter_diff_vel.plot(i,1,loop_iter,q_dot_head[i]);
          plotter_diff_vel.plot(i,0,loop_iter,vel_head[i]);
          plotter_error.plot(i,0,loop_iter,servo_head.m_task_head.getError()[i]);
        }

        if (detection_phase)
        {

          //if (score >= 0.4 && distance < 0.06*I.getWidth() && bbox.getSize() > 3000)
          if (distance < 0.06*I.getWidth() && bbox.getSize() > 3000)
          {
            vpDisplay::displayRectangle(I, bbox, vpColor::red, false, 1);
            vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::red);
            detected_face_map[name]++;
            f_count++;
          }
          else
          {
            vpDisplay::displayRectangle(I, bbox, vpColor::green, false, 1);
            vpDisplay::displayText(I, (int)bbox.getTop()-10, (int)bbox.getLeft(), name, vpColor::green);
          }
          if (f_count>10)
          {
            detection_phase = false;
            f_count = 0;
          }
        }
        else
        {
          std::string recognized_person_name = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->first;
          unsigned int times = std::max_element(detected_face_map.begin(), detected_face_map.end(), pred)->second;

          if (!in_array(recognized_person_name, recognized_names) && recognized_person_name != "Unknown") {

            if (opt_language_english)
            {
              phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ How are you ?";
            }
            else
            {
              phraseToSay = "\\emph=2\\ Salut \\wait=200\\ \\emph=2\\" + recognized_person_name + "\\pau=200\\ comment vas  tu ?";;
            }

            std::cout << phraseToSay << std::endl;
            tts.post.say(phraseToSay);
            recognized_names.push_back(recognized_person_name);
          }
          if (!in_array(recognized_person_name, recognized_names) && recognized_person_name == "Unknown"
              && times > 15)
          {

            if (opt_language_english)
            {
              phraseToSay = "\\emph=2\\ Hi \\wait=200\\ \\emph=2\\. I don't know you! \\emph=2\\ What's your name?";
            }
            else
            {
              phraseToSay = " \\emph=2\\ Salut \\wait=200\\ \\emph=2\\. Je ne te connais pas! \\emph=2\\  Comment t'appelles-tu ?";
            }

            std::cout << phraseToSay << std::endl;
            tts.post.say(phraseToSay);
            recognized_names.push_back(recognized_person_name);
          }

          detection_phase = true;
          detected_face_map.clear();
        }

      }
      else {
        proxy.call<void >("stopJoint");
        robot.getProxy()->move(0.0, 0.0, 0.0);
        std::cout << "Stop!" << std::endl;
        reinit_servo = true;
      }

      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false))
        break;
      loop_iter ++;
      std::cout << "Loop time: " << vpTime::measureTimeMs() - t << " ms" << std::endl;
    }

    vpDisplay::getClick(I, true);

    proxy.call<void >("stop");
    robot.getProxy()->move(0.0, 0.0, 0.0);


  }
  catch(vpException &e) {
    std::cout << e.getMessage() << std::endl;
  }
  catch (const AL::ALError& e)
  {
    std::cerr << "Caught exception " << e.what() << std::endl;
  }

  std::cout << "The end: stop the robot..." << std::endl;
  proxy.call<void >("stop");
  robot.getProxy()->move(0.0, 0.0, 0.0);



  return 0;
}