コード例 #1
0
int main(){
  FaceTracker F;
  vector <string> class_name;
  vector <int> class_label;
  vector <Rect> face_pos;
  Mat frame,prev_img,buffer;
  vector < vector <Point2f> > features;
  string classifier="fisher";
  
  VideoCapture cap;
  cap.open("/home/rajeev/Dropbox/vios_team_use/face_recognition/videos/bbt_s04e18_hd.avi");
  namedWindow("Features",1);
  while(1){
    cap >> frame;
    if (frame.empty())
       break;
    F.klt_track_face(class_name, class_label, face_pos, frame, prev_img, features, classifier);   
    frame.copyTo(prev_img);
    frame.copyTo(buffer);
    for (int i=0; i<features.size();i++){
      for (int j=0; j<features[i].size();j++){
        circle(buffer, features[i][j], 2, CV_RGB(0,255,0), 1, 8, 0);
      }
    }
    imshow("Features",frame);
    if(waitKey(20) == 27)
     break;
  }
  return 0;
}
コード例 #2
0
ファイル: main.cpp プロジェクト: schue/face-analysis-sdk
int get_facial_points(Mat& face, vector<Point_<double> >& points)
{
    FaceTracker * tracker = LoadFaceTracker(DefaultFaceTrackerModelPathname().c_str());
    FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(DefaultFaceTrackerParamsPathname().c_str());

    Mat frame_gray;
    cvtColor(face, frame_gray, CV_RGB2GRAY );

    int result = tracker->NewFrame(frame_gray, tracker_params);

    vector<Point_<double> > shape;
    Pose pose;

    if (result >= 1) {
        points = tracker->getShape();
        pose = tracker->getPose();
    } else {
        return 0;
    }

    delete tracker;
    delete tracker_params; 

    return 1;
}
コード例 #3
0
ファイル: CameraToolBox.cpp プロジェクト: bakarih/hifi
void CameraToolBox::toggleMute() {
    delete _doubleClickTimer;
    _doubleClickTimer = NULL;

    FaceTracker* faceTracker = Application::getInstance()->getSelectedFaceTracker();
    if (faceTracker) {
        faceTracker->toggleMute();
    }
}
コード例 #4
0
DWORD WINAPI KinectWindow::FaceTrackingStaticThread(PVOID lpParam)
{
    KinectWindow* context = static_cast<KinectWindow*>(lpParam);
    if (context)
    {
		FaceTracker* pFaceTracker;
		context->GetFaceTraker(&pFaceTracker);

		return pFaceTracker->FaceTrackingThread();
    }
    return 0;
}
コード例 #5
0
int main(int, char**)
{
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened())  return -1; // check if we succeeded

    Mat frame;
    namedWindow("tracking",1);

    while(true)
    {
        cap >> frame;

        tracker.track(frame);

        for(auto face_center : tracker.faces_centers){
             ellipse( frame, face_center, Size( 5, 5), 0, 0, 360, Scalar( 255, 255, 0 ), 4, 8, 0 ); // could be boxe-rectangle ~ size of face
        }

        imshow("tracking", frame);
        if(waitKey(30) >= 0) break;	

    }

    return 0;
}
コード例 #6
0
ファイル: main.cpp プロジェクト: 23119841/face-analysis-sdk
int
run_image_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{  
  FaceTracker * tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  cv::Mat image;
  cv::Mat_<uint8_t> gray_image = load_grayscale_image(image_argument->c_str(), &image);

  int result = tracker->NewFrame(gray_image, tracker_params);

  std::vector<cv::Point_<double> > shape;
  std::vector<cv::Point3_<double> > shape3;
  Pose pose;
  
  if (result >= cfg.tracking_threshold) {
    shape = tracker->getShape();
    shape3 = tracker->get3DShape();
    pose = tracker->getPose();
  }

  if (!have_argument_p(landmarks_argument)) {
    display_data(cfg, image, shape, pose); 
  } else if (shape.size() > 0) {
    if (cfg.save_3d_points)
      save_points3(landmarks_argument->c_str(), shape3);
    else
      save_points(landmarks_argument->c_str(), shape);
  }
 
  delete tracker;
  delete tracker_params; 
  
  return 0;
}
コード例 #7
0
ファイル: moustachizer.cpp プロジェクト: drjou/Unlogo
// ------------------------------
void Moustachizer::process(Mat frame) {
	
	//circle(frame, Point(300,300), 300, Scalar(255,0,0), 3);
	Mat grayFrame = frame.clone();
	cvtColor(frame, grayFrame, CV_RGB2GRAY);
	equalizeHist(grayFrame, grayFrame);
	imshow("grayFrame", grayFrame);
	faceTracker.search( grayFrame );
	
	
	
	for(int i=0; i<faceTracker.faces.size(); i++)
	{
		Face face = faceTracker.faces[i];
		face.draw( frame );
		
		float scale =  (float)face.boundingBox.width / stache.size().width;
		
		Mat stache_resized;
		Mat mask_resized;
		resize(stache, stache_resized, Size(), scale, scale);
		resize(mask, mask_resized, Size(), scale, scale);
		
		float xpos = face.boundingBox.x;
		float ypos = face.boundingBox.y + (face.boundingBox.height * .60);
		Rect pos = Rect(xpos, ypos, stache_resized.size().width, stache_resized.size().height);
		
		/*
		 Rect frame = Rect(0, 0, input.size().width, input.size().height);
		 Rect intersection = pos & frame;
		 Mat fg = stache_resized(Rect(0,0,intersection.width,intersection.height));
		 Mat bg = input(Rect(xpos,ypos,intersection.width,intersection.height));
		 */
		
		Mat bg = frame(pos);
		stache_resized.copyTo(bg, mask_resized);	
	}
	
	//cvtColor(input, input, CV_GRAY2RGB);
	imshow("preview", frame);
	
	cvWaitKey(1);
}
コード例 #8
0
ファイル: moustachizer.cpp プロジェクト: drjou/Unlogo
// ------------------------------
int Moustachizer::init(const char* argstr) {

	faceTracker.init();
	
	const char* fileName = "images/moustache4.jpg";
	stache = imread(fileName, 1);
	
	// OpenCV can't load 4 channel images, which is a huge pain
	// so I am pulling out the Value channel from the moustache image
	// to use as a mask for drawing the moustache into the main frame.
	Mat hsvimg;
	cvtColor(stache, hsvimg, CV_RGB2HSV);
	vector<Mat> hsvchannels;
	split(hsvimg, hsvchannels);	
	bitwise_not(hsvchannels[2], mask); 
	erode(mask, mask, Mat(), Point(-1,-1), 4);
	dilate(mask, mask, Mat(), Point(-1,-1), 2);
	
	return 0;
}
コード例 #9
0
ファイル: Head.cpp プロジェクト: hoster123/hifi
void Head::simulate(float deltaTime, bool isMine, bool billboard) {
    //  Update audio trailing average for rendering facial animations
    const float AUDIO_AVERAGING_SECS = 0.05f;
    const float AUDIO_LONG_TERM_AVERAGING_SECS = 30.0f;
    _averageLoudness = glm::mix(_averageLoudness, _audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));

    if (_longTermAverageLoudness == -1.0f) {
        _longTermAverageLoudness = _averageLoudness;
    } else {
        _longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
    }

    if (isMine) {
        MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
        
        // Only use face trackers when not playing back a recording.
        if (!myAvatar->isPlaying()) {
            FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
            _isFaceTrackerConnected = faceTracker != NULL && !faceTracker->isMuted();
            if (_isFaceTrackerConnected) {
                _blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();

                if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {

                    if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
                        calculateMouthShapes();

                        const int JAW_OPEN_BLENDSHAPE = 21;
                        const int MMMM_BLENDSHAPE = 34;
                        const int FUNNEL_BLENDSHAPE = 40;
                        const int SMILE_LEFT_BLENDSHAPE = 28;
                        const int SMILE_RIGHT_BLENDSHAPE = 29;
                        _blendshapeCoefficients[JAW_OPEN_BLENDSHAPE] += _audioJawOpen;
                        _blendshapeCoefficients[SMILE_LEFT_BLENDSHAPE] += _mouth4;
                        _blendshapeCoefficients[SMILE_RIGHT_BLENDSHAPE] += _mouth4;
                        _blendshapeCoefficients[MMMM_BLENDSHAPE] += _mouth2;
                        _blendshapeCoefficients[FUNNEL_BLENDSHAPE] += _mouth3;
                    }

                    applyEyelidOffset(getFinalOrientationInWorldFrame());
                }
            }

            auto eyeTracker = DependencyManager::get<EyeTracker>();
            _isEyeTrackerConnected = eyeTracker->isTracking();
        }

        if (!myAvatar->getStandingHMDSensorMode()) {
            //  Twist the upper body to follow the rotation of the head, but only do this with my avatar,
            //  since everyone else will see the full joint rotations for other people.  
            const float BODY_FOLLOW_HEAD_YAW_RATE = 0.1f;
            const float BODY_FOLLOW_HEAD_FACTOR = 0.66f;
            float currentTwist = getTorsoTwist();
            setTorsoTwist(currentTwist + (getFinalYaw() * BODY_FOLLOW_HEAD_FACTOR - currentTwist) * BODY_FOLLOW_HEAD_YAW_RATE);
        }
    }
   
    if (!(_isFaceTrackerConnected || billboard)) {

        if (!_isEyeTrackerConnected) {
            // Update eye saccades
            const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
            const float AVERAGE_SACCADE_INTERVAL = 6.0f;
            const float MICROSACCADE_MAGNITUDE = 0.002f;
            const float SACCADE_MAGNITUDE = 0.04f;
            const float NOMINAL_FRAME_RATE = 60.0f;

            if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
                _saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
            } else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
                _saccadeTarget = SACCADE_MAGNITUDE * randVector();
            }
            _saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
        } else {
            _saccade = glm::vec3();
        }

        //  Detect transition from talking to not; force blink after that and a delay
        bool forceBlink = false;
        const float TALKING_LOUDNESS = 100.0f;
        const float BLINK_AFTER_TALKING = 0.25f;
        if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) {
            _timeWithoutTalking = 0.0f;
        
        } else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
            forceBlink = true;
        }
                                 
        //  Update audio attack data for facial animation (eyebrows and mouth)
        const float AUDIO_ATTACK_AVERAGING_RATE = 0.9f;
        _audioAttack = AUDIO_ATTACK_AVERAGING_RATE * _audioAttack + (1.0f - AUDIO_ATTACK_AVERAGING_RATE) * fabs((_audioLoudness - _longTermAverageLoudness) - _lastLoudness);
        _lastLoudness = (_audioLoudness - _longTermAverageLoudness);
        
        const float BROW_LIFT_THRESHOLD = 100.0f;
        if (_audioAttack > BROW_LIFT_THRESHOLD) {
            _browAudioLift += sqrtf(_audioAttack) * 0.01f;
        }
        _browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
        
        const float BLINK_SPEED = 10.0f;
        const float BLINK_SPEED_VARIABILITY = 1.0f;
        const float BLINK_START_VARIABILITY = 0.25f;
        const float FULLY_OPEN = 0.0f;
        const float FULLY_CLOSED = 1.0f;
        if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
            // no blinking when brows are raised; blink less with increasing loudness
            const float BASE_BLINK_RATE = 15.0f / 60.0f;
            const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
            if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) *
                    ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
                _leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
                _rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
                if (randFloat() < 0.5f) {
                    _leftEyeBlink = BLINK_START_VARIABILITY;
                } else {
                    _rightEyeBlink = BLINK_START_VARIABILITY;
                }
            }
        } else {
            _leftEyeBlink = glm::clamp(_leftEyeBlink + _leftEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
            _rightEyeBlink = glm::clamp(_rightEyeBlink + _rightEyeBlinkVelocity * deltaTime, FULLY_OPEN, FULLY_CLOSED);
            
            if (_leftEyeBlink == FULLY_CLOSED) {
                _leftEyeBlinkVelocity = -BLINK_SPEED;
            
            } else if (_leftEyeBlink == FULLY_OPEN) {
                _leftEyeBlinkVelocity = 0.0f;
            }
            if (_rightEyeBlink == FULLY_CLOSED) {
                _rightEyeBlinkVelocity = -BLINK_SPEED;
            
            } else if (_rightEyeBlink == FULLY_OPEN) {
                _rightEyeBlinkVelocity = 0.0f;
            }
        }
        
        // use data to update fake Faceshift blendshape coefficients
        calculateMouthShapes();
        DependencyManager::get<Faceshift>()->updateFakeCoefficients(_leftEyeBlink,
                                                                    _rightEyeBlink,
                                                                    _browAudioLift,
                                                                    _audioJawOpen,
                                                                    _mouth2,
                                                                    _mouth3,
                                                                    _mouth4,
                                                                    _blendshapeCoefficients);

        applyEyelidOffset(getOrientation());

    } else {
        _saccade = glm::vec3();
    }
    if (Menu::getInstance()->isOptionChecked(MenuOption::FixGaze)) { // if debug menu turns off, use no saccade
        _saccade = glm::vec3();
    }
    
    _leftEyePosition = _rightEyePosition = getPosition();
    if (!billboard) {
        _faceModel.simulate(deltaTime);
        if (!_faceModel.getEyePositions(_leftEyePosition, _rightEyePosition)) {
            static_cast<Avatar*>(_owningAvatar)->getSkeletonModel().getEyePositions(_leftEyePosition, _rightEyePosition);
        }
    }
    _eyePosition = calculateAverageEyePosition();
}
コード例 #10
0
/// <summary>
/// Process Kinect window menu commands
/// </summary>
/// <param name="commanId">ID of the menu item</param>
/// <param name="param">Parameter passed in along with the commmand ID</param>
/// <param name="previouslyChecked">Check status of menu item before command is issued</param>
void KinectSettings::ProcessMenuCommand(WORD commandId, WORD param, bool previouslyChecked)
{
	DWORD ExitCode;
	FaceTracker* pFaceTracker;
	InbedAPPs* pFallDetect;
	DepthInbedAPPs* pDepthInbedApps;
	LegRaisExcer* pLegRaisExer;
	HandRaisExcer* pHandRaisExer;
	
	m_pKinectWindow->GetFaceTraker(&pFaceTracker);
	m_pPrimaryView->GetFallDetect(&pFallDetect);
	m_pDepthStream->GetDepthInbedAPPs(&pDepthInbedApps);
	m_pPrimaryView->GetLegRaisExcer(&pLegRaisExer);
	m_pPrimaryView->GetHandRaisExcer(&pHandRaisExer);

    if (ID_COLORSTREAM_PAUSE == commandId)
    {
        // Pause color stream
        if (m_pColorStream)
        {
            m_pColorStream->PauseStream(!previouslyChecked);
        }
    }
    else if (ID_COLORSTREAM_RESOLUTION_START <= commandId && ID_COLORSTREAM_RESOLUTION_END >= commandId)
    {
        // Set color stream format and resolution
        if (!m_pColorStream)
        {
            return;
        }

        switch (commandId)
        {
        case ID_RESOLUTION_RGBRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RGBRESOLUTION1280X960FPS12:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_1280x960);
            break;

        case ID_RESOLUTION_YUVRESOLUTION640X480FPS15:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_YUV);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_INFRAREDRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_INFRARED);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RAWBAYERRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_RAW_BAYER);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RAWBAYERRESOLUTION1280X960FPS12:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_RAW_BAYER);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_1280x960);
            break;

        default:
            return;
        }

        m_pColorStream->OpenStream();
    }
    else if (ID_DEPTHSTREAM_PAUSE == commandId)
    {
        // Pause depth stream
        if(m_pDepthStream)
        {
            m_pDepthStream->PauseStream(!previouslyChecked);
        }
    }
    else if (ID_DEPTHSTREAM_RANGEMODE_START <= commandId && ID_DEPTHSTREAM_RANGEMODE_END >= commandId)
    {
        // Set depth stream range mode
        bool nearMode = false;
        switch (commandId)
        {
        case ID_RANGEMODE_DEFAULT:
            nearMode = false;
            break;

        case ID_RANGEMODE_NEAR:
            nearMode = true;
            break;

        default:
            return;
        }

        if (m_pDepthStream)
        {
            m_pDepthStream->SetNearMode(nearMode);
        }

        if (m_pSkeletonStream)
        {
            m_pSkeletonStream->SetNearMode(nearMode);
        }
    }
    else if (ID_DEPTHSTREAM_RESOLUTION_START <= commandId && ID_DEPTHSTREAM_RESOLUTION_END >= commandId)
    {
        // Set depth stream resolution
        NUI_IMAGE_RESOLUTION resolution = (NUI_IMAGE_RESOLUTION)(commandId - ID_DEPTHSTREAM_RESOLUTION_START);
        if (m_pDepthStream)
        {
            m_pDepthStream->OpenStream(resolution);
        }
    }
    else if (ID_DEPTHSTREAM_DEPTHTREATMENT_START <= commandId && ID_DEPTHSTREAM_DEPTHTREATMENT_END >= commandId)
    {
        // Set depth stream treatment mode
        DEPTH_TREATMENT treatment = (DEPTH_TREATMENT)(commandId - ID_DEPTHSTREAM_DEPTHTREATMENT_START);
        if (m_pDepthStream)
        {
            m_pDepthStream->SetDepthTreatment(treatment);
        }
    }
    else if (ID_SKELETONSTREAM_PAUSE == commandId)
    {
        // Pause skeleton stream
        if (m_pSkeletonStream)
        {
            m_pSkeletonStream->PauseStream(!previouslyChecked);
        }
    }
    else if (ID_SKELETONSTREAM_TRACKINGMODE_START <= commandId && ID_SKELETONSTREAM_TRACKINGMODE_END >= commandId)
    {
        // Set skeleton track mode
        if (!m_pSkeletonStream)
        {
            return;
        }

        switch (commandId)
        {
        case ID_TRACKINGMODE_DEFAULT:
            m_pSkeletonStream->SetSeatedMode(false);
            break;

        case ID_TRACKINGMODE_SEATED:
            m_pSkeletonStream->SetSeatedMode(true);
            break;

        default:
            return;
        }
    }
    else if (ID_SKELETONSTREAM_CHOOSERMODE_START <= commandId && ID_SKELETONSTREAM_CHOOSERMODE_END >= commandId)
    {
        // Set skeleton chooser mode
        if(!m_pSkeletonStream)
        {
            return;
        }

        m_pSkeletonStream->SetChooserMode(ConvertCommandIdToChooserMode(commandId));
    }
    else
    {
        switch (commandId)
        {
            // Bring up camera color setting dialog
        case ID_CAMERA_COLORSETTINGS:
            m_pColorSettingsView->ShowView();
            break;

            // Bring up camera exposure setting dialog
        case ID_CAMERA_EXPOSURESETTINGS:
            m_pExposureSettingsView->ShowView();
            break;

            // Switch the stream display on primary and secondary stream viewers
        case ID_VIEWS_SWITCH:
            if (m_pColorStream && m_pDepthStream)
            {
                m_pColorStream->SetStreamViewer(m_pDepthStream->SetStreamViewer(m_pColorStream->SetStreamViewer(nullptr)));
            }
            break;

        case ID_FORCE_OFF_IR:
            m_pNuiSensor->NuiSetForceInfraredEmitterOff(param);
            break;

		//////Recording
		case ID_RECORDING_AUDIO:
			if (m_pAudioStream && !m_pAudioStream->GetRecordingStauts())
			{
				m_pAudioStream->SetRecordingStatus(true);
			}
			else if (m_pAudioStream && m_pAudioStream->GetRecordingStauts())
			{
				m_pAudioStream->m_pWaveWriter->Stop();
				delete(m_pAudioStream->m_pWaveWriter);
				m_pAudioStream->m_pWaveWriter=NULL;
				m_pAudioStream->SetRecordingStatus(false);
			}
			break;

		case ID_RECORDING_RGB:
			if (m_pColorStream && !m_pColorStream->GetRecordingStauts())
			{
				m_pColorStream->SetRecordingStatus(true);
			}
			else if (m_pColorStream && m_pColorStream->GetRecordingStauts())
			{
				cvReleaseVideoWriter(&m_pColorStream->m_pwriter);
				m_pColorStream->m_pwriter=nullptr;
				m_pColorStream->SetRecordingStatus(false);
			}
			break;

		case ID_RECORDING_DEPTH:
			if (m_pDepthStream && !m_pDepthStream->GetRecordingStauts())
			{
				m_pDepthStream->SetRecordingStatus(true);
			} 
			else if (m_pDepthStream && m_pDepthStream->GetRecordingStauts())
			{
				cvReleaseVideoWriter(&m_pDepthStream->m_pwriter);
				m_pDepthStream->m_pwriter=nullptr;
				m_pDepthStream->SetRecordingStatus(false);
			}
			break;

		case ID_RECORDING_ALL:
			////RGB
			if (m_pColorStream && !m_pColorStream->GetRecordingStauts())
			{
				m_pColorStream->SetRecordingStatus(true);
			}
			else if (m_pColorStream && m_pColorStream->GetRecordingStauts())
			{
				cvReleaseVideoWriter(&m_pColorStream->m_pwriter);
				m_pColorStream->m_pwriter=nullptr;
				m_pColorStream->SetRecordingStatus(false);
			}
			////Depth
			if (m_pDepthStream && !m_pDepthStream->GetRecordingStauts())
			{
				m_pDepthStream->SetRecordingStatus(true);
			} 
			else if (m_pDepthStream && m_pDepthStream->GetRecordingStauts())
			{
				cvReleaseVideoWriter(&m_pDepthStream->m_pwriter);
				m_pDepthStream->m_pwriter=nullptr;
				m_pDepthStream->SetRecordingStatus(false);
			}
			//////Audio
			if (m_pAudioStream && !m_pAudioStream->GetRecordingStauts())
			{
				m_pAudioStream->SetRecordingStatus(true);
			}
			else if (m_pAudioStream && m_pAudioStream->GetRecordingStauts())
			{
				m_pAudioStream->m_pWaveWriter->Stop();
				delete(m_pAudioStream->m_pWaveWriter);
				m_pAudioStream->m_pWaveWriter=NULL;
				m_pAudioStream->SetRecordingStatus(false);
			}
			break;
		
		////Recording 3D facial model
		case ID_RECORDING_3DFACIALMODEL:
			if (!(pFaceTracker->GetFTRecordingStatus())) 
				pFaceTracker->SetFTRecordingStatus(true);
			else
			{
				pFaceTracker->SetFTRecordingStatus(false);
				pFaceTracker->ResetAUSUcounts();
				pFaceTracker->CloseAUSUfile();
			}
			break;
		////Speech recognition dication pad
		case ID_SPEECHRECOGNITION:
			/*if (!m_threadRun)
			{*/
				//m_hSpeechRecogThread=CreateThread(NULL, 0, m_pKinectWindow->SpeechRecogStaticThread, (PVOID)m_pKinectWindow, 0, 0);
				//m_threadRun=true;
		/*	}
			else
			{*/
				//if (m_hSpeechRecogThread)
				//{
				//	CSimpleDict* pSimpleDict;
				//	m_pKinectWindow->GetSimpleDict(&pSimpleDict);
				//	pSimpleDict->~CSimpleDict();
				//    WaitForSingleObject(m_hSpeechRecogThread, 200);
			 //       CloseHandle(m_hSpeechRecogThread);
				//    m_threadRun=false;
				//}
			//}
			break;

		////In-bed detection
		case ID_FALLDETECTION:
			if (!(pFallDetect->getIsRunFallDetect()))
			{
				pFallDetect->setIsRunFallDetect(TRUE);
				if (!m_FallDetectThreadRun)
				{
				    m_hFallDetectTxt2SpeechThread = CreateThread(NULL, 0, pFallDetect->Txt2SpeechStaticThread, (PVOID)pFallDetect, 0, 0);
			        m_FallDetectThreadRun = TRUE;
				}
			}
			else
			{
				pFallDetect->setIsRunFallDetect(FALSE);
				if (m_FallDetectThreadRun)
				{
					DWORD lpExitCode;
					GetExitCodeThread(m_hFallDetectTxt2SpeechThread, &lpExitCode);
					TerminateThread(m_hFallDetectTxt2SpeechThread, lpExitCode);
					WaitForSingleObject(m_hFallDetectTxt2SpeechThread, 200);
				    CloseHandle(m_hFallDetectTxt2SpeechThread);
				    m_FallDetectThreadRun = FALSE;
				}
			}
			break;

		case ID_MOVEMENTDETECTION:	
			if (!(pFallDetect->getIsRunMovementDetect()))
			{
				pFallDetect->setIsRunMovementDetect(TRUE);
			}
			else
			{
				pFallDetect->setIsRunMovementDetect(FALSE);
			}
			break;

		case ID_OUTOFBEDDETECTION: 		  
			break;

		case ID_LYANGLEDETECTION:
			if (!(pDepthInbedApps->getIsRunLyAngleDetect()))
			{
				pDepthInbedApps->setIsRunLyAngleDetect(TRUE);
			}
			else
			{
				pDepthInbedApps->setIsRunLyAngleDetect(FALSE);
			}
			break;

		case ID_CALLNURSINGBYHANDRAISING:
			if (!(pFallDetect->getIsRunHandsMovementRIC()))
			{
				pFallDetect->setIsRunHandsMovementRIC(TRUE);
				if (!m_NurseCallThreadRun)
				{
				    m_hNurseCallTxt2SpeechThread = CreateThread(NULL, 0, m_pPrimaryView->Txt2SpeechStaticThread, (PVOID)m_pPrimaryView, 0, 0);
			        m_NurseCallThreadRun = TRUE;
				}
				
			}
			else
			{
				pFallDetect->setIsRunHandsMovementRIC(FALSE);
				if (m_NurseCallThreadRun)
				{
					DWORD lpExitCode;
					GetExitCodeThread(m_hNurseCallTxt2SpeechThread, &lpExitCode);
					TerminateThread(m_hNurseCallTxt2SpeechThread, lpExitCode);
					WaitForSingleObject(m_hNurseCallTxt2SpeechThread, 200);
				    CloseHandle(m_hNurseCallTxt2SpeechThread);
				    m_NurseCallThreadRun = FALSE;
				}
			}
			break;

		case ID_VIEWDETECTIONRECS:
			if (!m_processFlag)
			{
				ViewDetectionRes();
			    m_processFlag = TRUE;
			}
			else
			{
				GetViewDetecResProcessStatus(&ExitCode);
				TerminateProcess(m_pi.hProcess, ExitCode);
				m_processFlag = FALSE;
			}
			break;

		case ID_STANDMOVELEGOUTWARD:
			if (!(pLegRaisExer->isRunningExcer()))
			{
				Sleep(5000);
				pLegRaisExer->setRunningExer(TRUE);
				if (!m_LegRaisexcerThreadRun)
				{
				    m_hLegRaisExcerTxt2SpeechThread = CreateThread(NULL, 0, pLegRaisExer->Txt2SpeechStaticThread, (PVOID)pLegRaisExer, 0, 0);
			        m_LegRaisexcerThreadRun = TRUE;
				}
			}
			else
			{
				pLegRaisExer->setRunningExer(FALSE);
				pLegRaisExer->Reset();
				if (m_LegRaisexcerThreadRun)
				{
					DWORD lpExitCode;
					GetExitCodeThread(m_hLegRaisExcerTxt2SpeechThread, &lpExitCode);
					TerminateThread(m_hLegRaisExcerTxt2SpeechThread, lpExitCode);
					WaitForSingleObject(m_hLegRaisExcerTxt2SpeechThread, 200);
				    CloseHandle(m_hLegRaisExcerTxt2SpeechThread);
				    m_LegRaisexcerThreadRun = FALSE;
				}
			}
			break;

		case ID_STANDARMSLIFTWEIGHTS:
			if (!(pHandRaisExer->isRunningExcer()))
			{
				Sleep(5000);
				pHandRaisExer->setRunningExer(TRUE);
				if (!m_HandRaisexcerThreadRun)
				{
				    m_hHandRaisExcerTxt2SpeechThread = CreateThread(NULL, 0, pHandRaisExer->Txt2SpeechStaticThread, (PVOID)pHandRaisExer, 0, 0);
			        m_HandRaisexcerThreadRun = TRUE;
				}
			}
			else
			{
				pHandRaisExer->setRunningExer(FALSE);
				pHandRaisExer->Reset();
				if (m_HandRaisexcerThreadRun)
				{
					DWORD lpExitCode;
					GetExitCodeThread(m_hHandRaisExcerTxt2SpeechThread, &lpExitCode);
					TerminateThread(m_hHandRaisExcerTxt2SpeechThread, lpExitCode);
					WaitForSingleObject(m_hHandRaisExcerTxt2SpeechThread, 200);
				    CloseHandle(m_hHandRaisExcerTxt2SpeechThread);
				    m_HandRaisexcerThreadRun = FALSE;
				}
			}
			break;

        default:
            break;
        }
    }
}
コード例 #11
0
ファイル: main.cpp プロジェクト: scanavan/PoseEstimation
int main()
{
	try
	{
		FaceTracker faceTracker;
		faceTracker.Initialize();
		faceTracker.Start(true);
		FatigueDetection fatigueDetection;
		
		int simpleCounter = 0;

		int* IDPtr = faceTracker.GetIDs();

		//std::cout << *IDPtr << std::endl;
;

		for (;;) {
			IDPtr = faceTracker.GetIDs();
			if (simpleCounter % 15 == 0) {
				/*fatigueDetection.DetectFatigue(faceTracker.GetPose());
				fatigueDetection.DetectYawn(faceTracker.GetFaceFeatures());*/

				std::cout << '#' << simpleCounter / 15;
				
				int personNum = 0;
				for (personNum; personNum < 6; personNum++) {
					if (*(IDPtr + personNum) != -1) {
						std::cout << "\t" << *(IDPtr + personNum) << '\t';
					}
				}

				std::cout << std::endl;

				if (faceTracker.FaceFound()) {
					std::cout << faceTracker.GetFaceFeatures();
					std::cout << "Is Yawning? : " << fatigueDetection.DetectYawn(faceTracker.GetFaceFeatures()) << std::endl;

				}
				
				else {
					std::cout << "FACE NOT FOUND! >:(" << std::endl;
					personNum = 0;
					for (personNum; personNum < 6; personNum++) {
						IDPtr[personNum] = -1;
					}
				}
				std::cout << std::endl;
			}
			simpleCounter++;

			if (cv::waitKey(33) == 'a')
			{
				break;
			}
		}
	}
	catch (FaceTrackerException& error)
	{
		std::cout << error.what() << std::endl;
	}
	return EXIT_SUCCESS;
}
コード例 #12
0
ファイル: main.cpp プロジェクト: voidnoise/kswipe
/** @function main */
int main( int argc, const char** argv )
{
	// check arguements
//	if(argc < 2)
//	{
//		printf("ERROR: Please enter window position x & y\n");
//		exit(0);
//	}
//	
//	int windowX = atoi(argv[1]); // usually 5
//	int windowY = atoi(argv[2]); // usually 14
	int windowX = 1274; // usually 5
	int windowY = 280; // usually 14
	
	CvCapture* capture;
	IplImage* frame;
	FaceTracker* faceTracker = new FaceTracker("/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
	capture = cvCaptureFromCAM(2);
	
	if(capture)
	{
		cvNamedWindow ("img", CV_WINDOW_AUTOSIZE);
		cvMoveWindow("img", windowX, windowY);
		
		//Set the background to black
		image = cvLoadImage( "../../data/grandTetons_very_small.jpg", CV_LOAD_IMAGE_UNCHANGED) ; 
		
		//background = cvCreateImage(cvSize(1024, 768),image->depth,image->nChannels);
		background = cvCreateImage(cvSize(resX, resY),image->depth,image->nChannels);
		
		bgMax = Point(0,0);
		bgMax = Point(background->width-1,background->height-1);
		cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
		
		setupParticles();
		
		Face face;
		face.x = background->width/2;
		face.y = background->height/2;
		face.radius = 1;
		
		while( true )
		{
			
			frame = cvQueryFrame( capture );
			faceTracker->findFace(frame);
			
			if (faceTracker->numFaces) {
						
				// scale face position in relation to background width				
				int xPerc = frame->width - faceTracker->face.x; // invert to flip
				
				xPerc = (float)xPerc * ((float)image->width/(float)frame->width);
				xPerc += xOffset;
				
				
				int yPerc = faceTracker->face.y;
				
				yPerc = (float)yPerc * ((float)image->height/(float)frame->height);
				yPerc += yOffset;
				
				
				int rPerc = faceTracker->face.radius;				// scale face position in relation to background width				
				rPerc = (float)rPerc * ((float)image->width/(float)frame->width);
				
				//printf("x %d y %d r %d\n", xPerc, yPerc, rPerc);

				face.x = xPerc;
				face.y = yPerc;
				face.radius = rPerc;
				
				face.radius *= 1.8;
				
				cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
				update(&face);
				
				// put video behind image
				cvFlip(frame, NULL, 1);		// flip image so it mirrors the user
				cvSetImageROI(background, cvRect(xOffset, yOffset, image->width-1, image->height));
				cvResize(frame, background, CV_INTER_LINEAR);
				cvResetImageROI(background);
			}
			else {
				face.x = 0;
				face.y = 0;
				face.radius = 1;
				cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
				update(&face);
			}

			draw(background);
				
			cvShowImage("img", background);
			
			int c = waitKey(1);
			switch (c) {
				case 'c':
				case 'C':
					cursorMode = ( cursorMode + 1 > 1 ) ? 0 : 1 ; 
					break ; 
				case 's':
				case 'S':
					springEnabled = !springEnabled ; 
					break ; 
				case 'r':
				case 'R':
					reset(); 
					break ; 	
			
			}
			
			//cvReleaseImage(&frame);
		}
	}
	else{ printf("ERROR: Camera not loaded\n"); }
	
	return 0;
}
コード例 #13
0
ファイル: AvatarInputs.cpp プロジェクト: DaveDubUK/hifi
void AvatarInputs::toggleCameraMute() {
    FaceTracker* faceTracker = Application::getInstance()->getSelectedFaceTracker();
    if (faceTracker) {
        faceTracker->toggleMute();
    }
}
コード例 #14
0
ファイル: main.cpp プロジェクト: 23119841/face-analysis-sdk
int
run_video_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{
  FaceTracker *tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  assert(tracker);
  assert(tracker_params);

  cv::VideoCapture input(image_argument->c_str());
  if (!input.isOpened())
    throw make_runtime_error("Unable to open video file '%s'", image_argument->c_str());

  cv::Mat image;

  std::vector<char> pathname_buffer;
  pathname_buffer.resize(1000);

  input >> image;
  int frame_number = 1;

  while ((image.rows > 0) && (image.cols > 0)) {
    if (cfg.verbose) {
      printf(" Frame number %d\r", frame_number);
      fflush(stdout);
    }

    cv::Mat_<uint8_t> gray_image;
    if (image.type() == cv::DataType<cv::Vec<uint8_t,3> >::type)
      cv::cvtColor(image, gray_image, CV_BGR2GRAY);
    else if (image.type() == cv::DataType<uint8_t>::type)
      gray_image = image;
    else
      throw make_runtime_error("Do not know how to convert video frame to a grayscale image.");

    int result = tracker->Track(gray_image, tracker_params);

    std::vector<cv::Point_<double> > shape;
    std::vector<cv::Point3_<double> > shape3D;
    Pose pose;

    if (result >= cfg.tracking_threshold) {
      shape = tracker->getShape();
      shape3D = tracker->get3DShape();
      pose = tracker->getPose();
    } else {
      tracker->Reset();
    }

    if (!have_argument_p(landmarks_argument)) {
      display_data(cfg, image, shape, pose);
    } else if (shape.size() > 0) {
      snprintf(pathname_buffer.data(), pathname_buffer.size(), landmarks_argument->c_str(), frame_number);

      if (cfg.save_3d_points)	
	save_points3(pathname_buffer.data(), shape3D);
      else
	save_points(pathname_buffer.data(), shape);

      if (cfg.verbose)
	display_data(cfg, image, shape, pose);
    } else if (cfg.verbose) {
      display_data(cfg, image, shape, pose);
    }

    input >> image;
    frame_number++;
  }

  delete tracker;
  delete tracker_params; 

  return 0;
}
コード例 #15
0
ファイル: main.cpp プロジェクト: 23119841/face-analysis-sdk
// Helpers
int
run_lists_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{
  FaceTracker * tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  std::list<std::string> image_pathnames = read_list(image_argument->c_str());
  std::list<std::string> landmark_pathnames;
  if (have_argument_p(landmarks_argument)) {
    landmark_pathnames = read_list(landmarks_argument->c_str());
    if (landmark_pathnames.size() != image_pathnames.size())
      throw make_runtime_error("Number of pathnames in list '%s' does not match the number in '%s'",
			       image_argument->c_str(), landmarks_argument->c_str());
  }

  std::list<std::string>::const_iterator image_it     = image_pathnames.begin();
  std::list<std::string>::const_iterator landmarks_it = landmark_pathnames.begin();
  const int number_of_images = image_pathnames.size();
  int current_image_index = 1;

  for (; image_it != image_pathnames.end(); image_it++) {
    if (cfg.verbose) {
      printf(" Image %d/%d\r", current_image_index, number_of_images);    
      fflush(stdout);
    }
    current_image_index++;

    cv::Mat image;
    cv::Mat_<uint8_t> gray_image = load_grayscale_image(image_it->c_str(), &image);
    int result = tracker->NewFrame(gray_image, tracker_params);

    std::vector<cv::Point_<double> > shape;
    std::vector<cv::Point3_<double> > shape3D;
    Pose pose;
    if (result >= cfg.tracking_threshold) {
      shape = tracker->getShape();
      shape3D = tracker->get3DShape();
      pose = tracker->getPose();
    } else {
      tracker->Reset();
    }

    if (!have_argument_p(landmarks_argument)) {
      display_data(cfg, image, shape, pose);
    } else if (shape.size() > 0) {
      if (cfg.save_3d_points)	
	save_points3(landmarks_it->c_str(), shape3D);
      else
	save_points(landmarks_it->c_str(), shape);

      if (cfg.verbose)
	display_data(cfg, image, shape, pose);
    } else if (cfg.verbose) {
      display_data(cfg, image, shape, pose);
    }

    if (have_argument_p(landmarks_argument))
      landmarks_it++;
  }  

  delete tracker;
  delete tracker_params; 
  
  return 0;
}