Esempio n. 1
0
TEST (KalmanFilterTest, Works) {
    KalmanFilter *kf = new KalmanFilter(true);

    ufvector4 initX = boost::numeric::ublas::zero_vector<float> (4);
    initX(0) = 50.f;
    initX(1) = 0.f;
    initX(2) = 0.f;
    initX(3) = 0.f;
    ufmatrix4 initCov = boost::numeric::ublas::identity_matrix<float> (4);
    initCov(0,0) = 5.f;
    initCov(1,1) = 5.f;
    initCov(2,2) = 0.f;
    initCov(3,3) = 0.f;

    kf->initialize(initX, initCov);

    kf->predict(kf->genOdometry(-5.f, 0.f, 0.f),10.f);
    kf->updateWithObservation(kf->genVisBall(58.f, 0.f));

    kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
    kf->updateWithObservation(kf->genVisBall(55.f, .05f));
    kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
    kf->updateWithObservation(kf->genVisBall(55.f, .05f));
    kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
    kf->updateWithObservation(kf->genVisBall(55.f, .05f));
    kf->predict(kf->genOdometry(0.f, 0.f, 0.f),.1f);
    kf->updateWithObservation(kf->genVisBall(83.f, .05f));
}
Esempio n. 2
0
int main(int /*argc*/, char * /*argv*/[]) {
	std::srand(std::time(NULL));

	/// We want a simple 2d state
	typedef SimpleState<2> state_t;

	/// Our process model is the simplest possible: doesn't change the mean
	typedef ConstantProcess<2, state_t> process_t;

	/// Create a kalman filter instance with our chosen state and process types
	KalmanFilter<state_t, process_t> kf;

	/// Set our process model's variance
	kf.processModel.sigma = state_t::VecState::Constant(6.5);

	double dt = 0.5;
	double sumSquaredError = 0;
	
	const double measurementVariance = 9; //NOISE_AMPLITUDE / 2.0;

	/// for sine curve
	
	std::vector<StatePair> data = generateSineData(dt);
	/// CSV header row
	std::cout << "actual x,actual y,measurement x,measurement y,filtered x,filtered y,squared error" << std::endl;
	double t = 0;
	for (unsigned int i = 0; i < data.size(); ++i) {
		/// Predict step: Update Kalman filter by predicting ahead by dt
		kf.predict(dt);

		/// "take a measurement" - in this case, noisify the actual measurement
		Eigen::Vector2d pos = data[i].first;
		Eigen::Vector2d m = data[i].second;
		AbsoluteMeasurement<state_t> meas;
		meas.measurement = m;
		meas.covariance = Eigen::Vector2d::Constant(measurementVariance).asDiagonal();

		/// Correct step: incorporate information from measurement into KF's state
		kf.correct(meas);

		/// Output info for csv
		double squaredError = (pos[0] - kf.state.x[0]) * (pos[0] - kf.state.x[0]);
		sumSquaredError += squaredError;
		std::cout << pos[0] << "," << pos[1] << ",";
		std::cout << meas.measurement[0] << "," << meas.measurement[1] << ",";
		std::cout << kf.state.x[0] << "," << kf.state.x[1] << ",";
		std::cout << squaredError;
		std::cout << std::endl;
		
		t += dt;

	}
	std::cerr << "Sum squared error: " << sumSquaredError << std::endl;
	return 0;

}
int Kalman_Perdict(KalmanFilter &F)
{
	if(notstarted[0] and notstarted[1] and notstarted[2])
	{
		Mat prediction = F.predict();
		Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
		MyFilledCircle(skin2,predictPt,1);
	}
	return 0;
}
Esempio n. 4
0
KalmanFilter execute_time_step(KalmanFilter KF,
			      Mat TransitionModel,
			      Mat MeasurementModel,
			      Mat ControlModel,
			      Mat measurement,
			      Mat control)
{
  KF.transitionMatrix = TransitionModel;
  KF.measurementMatrix = MeasurementModel;
  KF.controlMatrix = ControlModel;
  KF.predict(control);
  KF.correct(measurement);
  return KF;  
}
Esempio n. 5
0
int main(int argc, char **argv)
{
  ros::init(argc, argv, "pose_filter");
  ros::NodeHandle nh;

  tf::TransformListener lr;
  tf::StampedTransform laser_trans, kinect_trans;

  geometry_msgs::PoseStamped target_pose;
  ros::Publisher pub = nh.advertise<geometry_msgs::PoseStamped>("leader_pose", 1);
  target_pose.pose.position.z = 0; target_pose.pose.orientation = tf::createQuaternionMsgFromYaw(0);
  target_pose.header.frame_id = "map";

  Mat measurement = Mat(2, 1, CV_32FC1);
  KalmanFilter kalman = KalmanFilter(4,2,0);
  setup_kalman(kalman);

  ros::Rate kalman_rate(KALMAN_HZ);
  while(ros::ok())
  {
	try
	{
		lr.lookupTransform("map", "laser_target", ros::Time(0), laser_trans);
	}
	catch(tf::TransformException ex) {}
	/*try
	{
		lr.lookupTransform("odom", "kinect_target", ros::Time(0), kinect_trans);
	}
	catch(tf::TransformException ex) {}

	measurement.at<float>(0,0) = (laser_trans.getOrigin().x() + kinect_trans.getOrigin().x())/2;
	measurement.at<float>(1,0) = (laser_trans.getOrigin().y() + kinect_trans.getOrigin().y())/2;*/
	measurement.at<float>(0,0) = laser_trans.getOrigin().x();
	measurement.at<float>(1,0) = laser_trans.getOrigin().y();

	Mat predict = kalman.predict();	kalman.correct(measurement);

	target_pose.pose.position.x = kalman.statePost.at<float>(0,0);
	target_pose.pose.position.y = kalman.statePost.at<float>(1,0);
	target_pose.header.stamp = laser_trans.stamp_;

	pub.publish(target_pose);
	kalman_rate.sleep();
  }

  return 0;
}
cv::Point getKalmanCentroid(int x, int y)
{
  cv::Point kalman_centroid;
  mouse_info.x = x;
  mouse_info.y = y;
  
  Mat prediction = KF.predict();
  Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
  
  measurement(0) = mouse_info.x;
  measurement(1) = mouse_info.y;
  
  Point measPt(measurement(0),measurement(1));
  mousev.push_back(measPt);
  // generate measurement
  //measurement += KF.measurementMatrix*state;
  
  Mat estimated = KF.correct(measurement);
  Point statePt(estimated.at<float>(0),estimated.at<float>(1));
  kalmanv.push_back(statePt);
  
  //drawCross( statePt, Scalar(0,255,0), 5 ); // Kalman
  //drawCross( measPt, Scalar(0,0,255), 5 ); // Original
    
  // for (int i = 0; i < mousev.size()-1; i++) {
  //   line(kalman_img, mousev[i], mousev[i+1], Scalar(255,255,0), 1);
  // }
  // for (int i = 0; i < kalmanv.size()-1; i++) {
  //   line(kalman_img, kalmanv[i], kalmanv[i+1], Scalar(0,255,0), 1);
  // }
  
  // if (contador > 30)
  //   {
  //     initializeKalman();
  //   }
  // else
  //   {
  //     //ROS_INFO(">>> MEDIDA(%d,%d) KALMAN(%d,%d)", measPt.x, measPt.y, statePt.x, statePt.y);
  //   }
  //imshow("mi img", img);

  //imshow("Kalman", kalman_img);
  kalman_centroid.x = statePt.x;
  kalman_centroid.y = statePt.y; 
  return kalman_centroid;
}
Esempio n. 7
0
vector<Point> Widget::findKalmanCenters(vector<Point> dataInput){
    vector<Point> kalmanCenters;
    if(dataInput.empty()){
        errorMsg("Nao ha objetos a serem detectados!");
        return kalmanCenters;
    }
    kalmanCenters.resize(dataInput.size());
    KalmanFilter *KF;

    for(unsigned int i = 0; i < targets.size(); i++){
        KF = targets[i]->KF;

        //apenas conversao de estruturas usadas
        Mat_<float> measurement(2,1);
        measurement(0) = dataInput[i].x;
        measurement(1) = dataInput[i].y;

        Mat estimated;
        Mat prediction = KF->predict();
        if(detectionSucess[i] == true)
            {
            //caso a detecção tenha sido um sucesso jogamos a nova medida no filtro
            //ao chamar correct já estamos adicionando a nova medida ao filtro
            estimated = KF->correct(measurement);
            }
        else{
            /*caso a medição tenha falhada realimentamos o filtro com a própria
            predição anterior*/
            Mat_<float> predictedMeasurement(2,1);
            predictedMeasurement(0) = prediction.at<float>(0);
            predictedMeasurement(1) = prediction.at<float>(1);
            estimated = KF->correct(predictedMeasurement);
            KF->errorCovPre.copyTo(KF->errorCovPost);
            //copiar a covPre para covPos [dica de um usuário de algum fórum]
            }
        Point statePt(estimated.at<float>(0),estimated.at<float>(1));

        kalmanCenters[i] = statePt;
        /*existe o centro medido pela previsão e o centro que o filtro de kalman
        acredita ser o real. O centro de kalman é uma ponderação das medidas e do modelo
        com conhecimento prévio de um erro aleatório*/
    }

    return kalmanCenters;
}
Esempio n. 8
0
TEST (KalmanFilterTest, FilterCanPredict) {
    KalmanFilter *kf = new KalmanFilter(false);
    kf->initialize();
    float initX = 0.f;
    float initY = 0.f;
    float initXVel = 1.f;
    float initYVel = 0.5f;
    float finalX = 4.f;
    float finalY = 2.f;
    ASSERT_EQ(initX, kf->getRelXPosEst());
    ASSERT_EQ(initY, kf->getRelYPosEst());

    messages::RobotLocation odometry;
    odometry.set_x(0.f);
    odometry.set_y(0.f);
    odometry.set_h(0.f);
    kf->predict(odometry, 4.f);

    ASSERT_EQ(finalX, kf->getRelXPosEst());
    ASSERT_EQ(finalY, kf->getRelYPosEst());
}
Esempio n. 9
0
JNIEXPORT jlong JNICALL Java_org_opencv_video_KalmanFilter_predict_11
  (JNIEnv* env, jclass , jlong self)
{
    try {
        LOGD("video::predict_11()");
        KalmanFilter* me = (KalmanFilter*) self; //TODO: check for NULL
        Mat _retval_ = me->predict(  );
        return (jlong) new Mat(_retval_);
    } catch(cv::Exception e) {
        LOGD("video::predict_11() catched cv::Exception: %s", e.what());
        jclass je = env->FindClass("org/opencv/core/CvException");
        if(!je) je = env->FindClass("java/lang/Exception");
        env->ThrowNew(je, e.what());
        return 0;
    } catch (...) {
        LOGD("video::predict_11() catched unknown exception (...)");
        jclass je = env->FindClass("java/lang/Exception");
        env->ThrowNew(je, "Unknown exception in JNI code {video::predict_11()}");
        return 0;
    }
}
Esempio n. 10
0
vector<vector<Point> > Widget::predictFuture(int futureSize){
    vector<vector<Point> > future;
    future.resize(targets.size());

    for(unsigned int i = 0; i < targets.size(); i++)
        {
        KalmanFilter *KF;
        KF = targets[i]->KF;

        KalmanFilter DelfusOracle = myMath::copyKF(*KF);
            /*para ver o futuro copiamos o estado do filtro atual e
              o realimentamos com suas próprias previsões um número fixo de vezes*/
        for(int j = 0; j < futureSize; j++)
                //CALCULA PONTOS DO FUTURO
            {

            Mat prediction = DelfusOracle.predict();
            Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

            future[i].push_back(predictPt);

            Mat_<float> predictedMeasurement(2,1);
            predictedMeasurement(0) = prediction.at<float>(0);
            predictedMeasurement(1) = prediction.at<float>(1);

            DelfusOracle.correct(predictedMeasurement);

            DelfusOracle.errorCovPre.copyTo(DelfusOracle.errorCovPost);
                //copiamos covPre para covPost seguindo a dica de um fórum
                //o Vidal não gosta dessa dica, diz ele que isso engana o filtro
            }


        }//end for each color

    return future;
}
Esempio n. 11
0
Point kalmanPredict() 
{
    Mat prediction = KF.predict();
    Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
    return predictPt;
}
Esempio n. 12
0
void CHumanTracker::detectAndTrackFace()
{
    static ros::Time probe;

	// Do ROI
	debugFrame = rawFrame.clone();
	Mat img =  this->rawFrame(searchROI);

	faces.clear();
	ostringstream txtstr;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
        CV_RGB(0,128,255),
        CV_RGB(0,255,255),
        CV_RGB(0,255,0),
        CV_RGB(255,128,0),
        CV_RGB(255,255,0),
        CV_RGB(255,0,0),
        CV_RGB(255,0,255)} ;
    Mat gray;
    Mat frame( cvRound(img.rows), cvRound(img.cols), CV_8UC1 );
    cvtColor( img, gray, CV_BGR2GRAY );
    resize( gray, frame, frame.size(), 0, 0, INTER_LINEAR );
    //equalizeHist( frame, frame );

	// This if for internal usage
    const ros::Time _n = ros::Time::now();
	double dt = (_n - probe).toSec();
	probe = _n;


	CvMat _image = frame;

    if (!storage.empty())
    {
        cvClearMemStorage(storage);
    }
    CvSeq* _objects = cvHaarDetectObjects(&_image, cascade, storage,
			1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);

	vector<CvAvgComp> vecAvgComp;
	Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);

	// End of using C API

	isFaceInCurrentFrame = (vecAvgComp.size() > 0);

    // This is a hack
    bool isProfileFace = false;
    if ((profileHackEnabled) && (!isFaceInCurrentFrame) && ((trackingState == STATE_REJECT) || (trackingState == STATE_REJECT)))
    {
        ROS_DEBUG("Using Profile Face hack ...");

        if (!storageProfile.empty()) {
            cvClearMemStorage(storageProfile);
        }
        CvSeq* _objectsProfile = cvHaarDetectObjects(&_image, cascadeProfile, storageProfile,
                1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);
        vecAvgComp.clear();
        Seq<CvAvgComp>(_objectsProfile).copyTo(vecAvgComp);
        isFaceInCurrentFrame = (vecAvgComp.size() > 0);
        if (isFaceInCurrentFrame)
        {
            ROS_DEBUG("The hack seems to work!");
        }
        isProfileFace = true;
    }

	if (trackingState == STATE_LOST)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
			trackingState = STATE_DETECT;
		}
	}

	if (trackingState == STATE_DETECT)
	{
		if (isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_LOST;
		}

		if (stateCounter > minDetectFrames)
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

	}

	if (trackingState == STATE_TRACK)
	{
		if (!isFaceInCurrentFrame)
		{
			trackingState = STATE_REJECT;
		}
	}

	if (trackingState == STATE_REJECT)
	{
		float covNorm = sqrt(
                        pow(KFTracker.errorCovPost.at<float>(0,0), 2) +
                        pow(KFTracker.errorCovPost.at<float>(1,1), 2)
						);

		if (!isFaceInCurrentFrame)
		{
			stateCounter++;
		}
		else
		{
			stateCounter = 0;
			trackingState = STATE_TRACK;
		}

		if ((stateCounter > minRejectFrames) && (covNorm > maxRejectCov))
		{
			trackingState = STATE_LOST;
			stateCounter = 0;
			resetKalmanFilter();
            reset();
		}
	}

	if ((trackingState == STATE_TRACK) || (trackingState == STATE_REJECT))
	{
		bool updateFaceHist = false;
		// This is important:

        KFTracker.transitionMatrix.at<float>(0,2) = dt;
        KFTracker.transitionMatrix.at<float>(1,3) = dt;

        Mat pred = KFTracker.predict();

		if (isFaceInCurrentFrame)
		{
            //std::cout << vecAvgComp.size() << " detections in image " << std::endl;
			float minCovNorm = 1e24;
			int i = 0;
			for( vector<CvAvgComp>::const_iterator rr = vecAvgComp.begin(); rr != vecAvgComp.end(); rr++, i++ )
			{
				copyKalman(KFTracker, MLSearch);
				CvRect r = rr->rect;
				r.x += searchROI.x;
				r.y += searchROI.y;
				double nr = rr->neighbors;
				Point center;
				Scalar color = colors[i%8];

				float normFaceScore = 1.0 - (nr / 40.0);
				if (normFaceScore > 1.0) normFaceScore = 1.0;
				if (normFaceScore < 0.0) normFaceScore = 0.0;
                setIdentity(MLSearch.measurementNoiseCov, Scalar_<float>::all(normFaceScore));

				center.x = cvRound(r.x + r.width*0.5);
				center.y = cvRound(r.y + r.height*0.5);

				measurement.at<float>(0) = r.x;
				measurement.at<float>(1) = r.y;
				measurement.at<float>(2) = r.width;
				measurement.at<float>(3) = r.height;

                MLSearch.correct(measurement);

				float covNorm = sqrt(
                    pow(MLSearch.errorCovPost.at<float>(0,0), 2) +
                    pow(MLSearch.errorCovPost.at<float>(1,1), 2)
				);

				if (covNorm < minCovNorm)
				{
					minCovNorm = covNorm;
					MLFace = *rr;
				}

//                if ((debugLevel & 0x02) == 0x02)
//                {
                    rectangle(debugFrame, center - Point(r.width*0.5, r.height*0.5), center + Point(r.width*0.5, r.height * 0.5), color);

                    txtstr.str("");
                    txtstr << "   Sc:" << rr->neighbors << " S:" << r.width << "x" << r.height;

                    putText(debugFrame, txtstr.str(), center, FONT_HERSHEY_PLAIN, 1, color);
//                }
			}

			// TODO: I'll fix this shit
            Rect r(MLFace.rect);
			r.x += searchROI.x;
			r.y += searchROI.y;
			faces.push_back(r);
			double nr = MLFace.neighbors;
			faceScore = nr;
            if (isProfileFace) faceScore = 0.0;
			float normFaceScore = 1.0 - (nr / 40.0);
			if (normFaceScore > 1.0) normFaceScore = 1.0;
			if (normFaceScore < 0.0) normFaceScore = 0.0;
            setIdentity(KFTracker.measurementNoiseCov, Scalar_<float>::all(normFaceScore));
			measurement.at<float>(0) = r.x;
			measurement.at<float>(1) = r.y;
			measurement.at<float>(2) = r.width;
			measurement.at<float>(3) = r.height;
            KFTracker.correct(measurement);

            // We see a face
			updateFaceHist = true;
		}
		else
		{
            KFTracker.statePost = KFTracker.statePre;
            KFTracker.errorCovPost = KFTracker.errorCovPre;
		}

        // TODO: MOVE THIS
        for (unsigned int k = 0; k < faces.size(); k++) {
            rectangle(debugFrame, faces.at(k), CV_RGB(128, 128, 128));
        }

        beleif.x = max<int>(KFTracker.statePost.at<float>(0), 0);
        beleif.y = max<int>(KFTracker.statePost.at<float>(1), 0);
        beleif.width = min<int>(KFTracker.statePost.at<float>(4), iWidth - beleif.x);
        beleif.height = min<int>(KFTracker.statePost.at<float>(5), iHeight - beleif.y);

		Point belCenter;
		belCenter.x = beleif.x + (beleif.width * 0.5);
		belCenter.y = beleif.y + (beleif.height * 0.5);

        if ((debugLevel & 0x02) == 0x02)
        {
            txtstr.str("");
//            txtstr << "P:" << std::setprecision(3) << faceUncPos << " S:" << beleif.width << "x" << beleif.height;
//            putText(debugFrame, txtstr.str(), belCenter + Point(0, 50), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,0,0));

//            circle(debugFrame, belCenter, belRad, CV_RGB(255,0,0));
//            circle(debugFrame, belCenter, (belRad - faceUncPos < 0) ? 0 : (belRad - faceUncPos), CV_RGB(255,255,0));
//            circle(debugFrame, belCenter, belRad + faceUncPos, CV_RGB(255,0,255));
        }

        //searchROI.x = max<int>(belCenter.x - KFTracker.statePost.at<float>(4) * 2, 0);
        //searchROI.y = max<int>(belCenter.y - KFTracker.statePost.at<float>(5) * 2, 0);
        //int x2 = min<int>(belCenter.x + KFTracker.statePost.at<float>(4) * 2, iWidth);
        //int y2 = min<int>(belCenter.y + KFTracker.statePost.at<float>(4) * 2, iHeight);
		//searchROI.width = x2 - searchROI.x;
		//searchROI.height = y2 - searchROI.y;


		if ((updateFaceHist) && (skinEnabled))
		{
            //updateFaceHist is true when we see a real face (not all the times)
            Rect samplingWindow;
//            samplingWindow.x = beleif.x + (0.25 * beleif.width);
//            samplingWindow.y = beleif.y + (0.1 * beleif.height);
//            samplingWindow.width = beleif.width * 0.5;
//            samplingWindow.height = beleif.height * 0.9;
            samplingWindow.x = measurement.at<float>(0) + (0.25 * measurement.at<float>(2));
            samplingWindow.y = measurement.at<float>(1) + (0.10 * measurement.at<float>(3));
            samplingWindow.width = measurement.at<float>(2) * 0.5;
            samplingWindow.height = measurement.at<float>(3) * 0.9;
            if ((debugLevel & 0x04) == 0x04)
            {
                rectangle(debugFrame, samplingWindow, CV_RGB(255,0,0));
            }
			Mat _face = rawFrame(samplingWindow);
			generateRegionHistogram(_face, faceHist);
		}


	}


//    if ((debugLevel & 0x02) == 0x02)
//    {
//        rectangle(debugFrame, searchROI, CV_RGB(0,0,0));
//        txtstr.str("");
//        txtstr << strStates[trackingState] << "(" << std::setprecision(3) << (dt * 1e3) << "ms )";
//        putText(debugFrame, txtstr.str() , Point(30,300), FONT_HERSHEY_PLAIN, 2, CV_RGB(255,255,255));
//    }

//	dt =  ((double) getTickCount() - t) / ((double) getTickFrequency()); // In Seconds
}
Esempio n. 13
0
void *thread_sensor(void* arg)
{
	//setup();
	/*
	mpu6050.Init();

	measurement.setTo(Scalar(0));
	kalman.transitionMatrix = 
		*(Mat_<float>(4, 4) << 
		1, 0, 1, 0, 
		0, 1, 0, 1, 
		0, 0, 1, 0, 
		0, 0, 0, 1);
	kalman.statePre.at<float>(0) = mpu6050.accelX();
	kalman.statePre.at<float>(1) = mpu6050.accelY();
	kalman.statePre.at<float>(2) = mpu6050.accelZ();
	kalman.statePre.at<float>(3) = 0.0;
	setIdentity(kalman.measurementMatrix);
	setIdentity(kalman.processNoiseCov, Scalar::all(1e-4));
	setIdentity(kalman.measurementNoiseCov, Scalar::all(10));
	setIdentity(kalman.errorCovPost, Scalar::all(.1));
	*/

	//double x, y, z;
	//double xSpeed = 0, ySpeed = 0, zSpeed = 0;
	//double X = 0, Y = 0, Z = 0;

	Quaternion q;           // [w, x, y, z]         quaternion container

	while (1)
	{
		kalman.predict();

		/*
		x = mpu6050.accelX();
		y = mpu6050.accelY();
		z = mpu6050.accelZ();

		measurement(0) = x;
		measurement(1) = y;
		measurement(2) = z;
		*/

		readFIFO();

		// Calcurate Gravity and Yaw, Pitch, Roll
		mpu.dmpGetQuaternion(&q, fifoBuffer);
		mpu.dmpGetAccel(&accelRaw, fifoBuffer);
		mpu.dmpGetGravity(&gravity, &q);
		mpu.dmpGetLinearAccel(&accelReal, &accelRaw, &gravity);
		mpu.dmpGetLinearAccelInWorld(&accelWorld, &accelReal, &q);

		measurement(0) = accelWorld.x;
		measurement(1) = accelWorld.y;
		measurement(2) = accelWorld.z;
		estimated = kalman.correct(measurement);
		
		/*
		xSpeed += estimated.at<float>(0) * INTERVAL / 1000; // speed m/s
		ySpeed += estimated.at<float>(1) * INTERVAL / 1000;
		zSpeed += estimated.at<float>(2) * INTERVAL / 1000;
		X += xSpeed * INTERVAL / 1000; // speed * INTERVAL / 1000 * 1000 displacement in mm
		Y += ySpeed * INTERVAL / 1000;
		Z += zSpeed * INTERVAL / 1000;
		Quaternion q(0.0, x, y, z);
		VectorFloat vector[3];
		GetGravity(vector, &q);
		float ypr[3];
		GetYawPitchRoll(ypr, &q, vector);
		//mpu6050.Next();
		*/

		printf("%8.5f, %8.5f, %8.5f\n", 
			estimated.at<float>(0), estimated.at<float>(1), estimated.at<float>(2));
		usleep(1000 * INTERVAL);
	}
	return NULL;
}
Mat faceNormalize(Mat face, int desiredFaceWidth, bool &sucess) {
  int im_height = face.rows;
  Mat faceProcessed;

  cv::resize(face, face, Size(400, 400), 1.0, 1.0, INTER_CUBIC);

  const float EYE_SX = 0.16f;
  const float EYE_SY = 0.26f;
  const float EYE_SW = 0.30f;
  const float EYE_SH = 0.28f;

  int leftX = cvRound(face.cols * EYE_SX);
  int topY = cvRound(face.rows * EYE_SY);
  int widthX = cvRound(face.cols * EYE_SW);
  int heightY = cvRound(face.rows * EYE_SH);
  int rightX = cvRound(face.cols * (1.0-EYE_SX-EYE_SW) );  

  Mat topLeftOfFace = face(Rect(leftX, topY, widthX, heightY));
  Mat topRightOfFace = face(Rect(rightX, topY, widthX, heightY));

  CascadeClassifier haar_cascade;
  haar_cascade.load(PATH_CASCADE_LEFTEYE);

  vector< Rect_<int> > detectedRightEye;
  vector< Rect_<int> > detectedLeftEye;
  Point leftEye = Point(-1, -1), rightEye = Point(-1, -1);
  // Find the left eye:
  haar_cascade.detectMultiScale(topLeftOfFace, detectedLeftEye);
  for(int i = 0; i < detectedLeftEye.size(); i++) {
    Rect eye_i = detectedLeftEye[i];
    eye_i.x += leftX;
    eye_i.y += topY;
    leftEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
  }
  // If cascade fails, try another
  if(detectedLeftEye.empty()) {
    haar_cascade.load(PATH_CASCADE_BOTHEYES);
    haar_cascade.detectMultiScale(topLeftOfFace, detectedLeftEye);
    for(int i = 0; i < detectedLeftEye.size(); i++) {
      Rect eye_i = detectedLeftEye[i];
      eye_i.x += leftX;
      eye_i.y += topY;
      leftEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
    }
  }
  //Find the right eye
  haar_cascade.load(PATH_CASCADE_RIGHTEYE);
  haar_cascade.detectMultiScale(topRightOfFace, detectedRightEye);
  for(int i = 0; i < detectedRightEye.size(); i++) {
    Rect eye_i = detectedRightEye[i];
    eye_i.x += rightX;;
    eye_i.y += topY;
    rightEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
  }
  // If cascade fails, try another
  if(detectedRightEye.empty()) {
    haar_cascade.load(PATH_CASCADE_BOTHEYES);
    haar_cascade.detectMultiScale(topLeftOfFace, detectedRightEye);
    for(int i = 0; i < detectedRightEye.size(); i++) {
      Rect eye_i = detectedRightEye[i];
      eye_i.x += leftX;
      eye_i.y += topY;
      rightEye = Point(eye_i.x + eye_i.width/2, eye_i.y + eye_i.height/2);
    }
  }
  //	Inicializacao dos kalman filters
  if(initiKalmanFilter && leftEye.x >= 0 && rightEye.x >= 0) {
    KFrightEye.statePre.at<float>(0) = rightEye.x;
    KFrightEye.statePre.at<float>(1) = rightEye.y;
    KFrightEye.statePre.at<float>(2) = 0;
    KFrightEye.statePre.at<float>(3) = 0;
    KFrightEye.transitionMatrix = (Mat_<float>(4, 4) << 1,0,0,0,   0,1,0,0,  0,0,1,0,  0,0,0,1);
    setIdentity(KFrightEye.measurementMatrix);
    setIdentity(KFrightEye.processNoiseCov, Scalar::all(1e-4));
    setIdentity(KFrightEye.measurementNoiseCov, Scalar::all(1e-1));
    setIdentity(KFrightEye.errorCovPost, Scalar::all(.1));

    KFleftEye.statePre.at<float>(0) = leftEye.x;
    KFleftEye.statePre.at<float>(1) = leftEye.y;
    KFleftEye.statePre.at<float>(2) = 0;
    KFleftEye.statePre.at<float>(3) = 0;
    KFleftEye.transitionMatrix = (Mat_<float>(4, 4) << 1,0,0,0,   0,1,0,0,  0,0,1,0,  0,0,0,1);
    setIdentity(KFleftEye.measurementMatrix);
    setIdentity(KFleftEye.processNoiseCov, Scalar::all(1e-4));
    setIdentity(KFleftEye.measurementNoiseCov, Scalar::all(1e-1));
    setIdentity(KFleftEye.errorCovPost, Scalar::all(.1));
    initiKalmanFilter = false;
  }
  //	Predicao e correcao dos kalman filter
  if(!initiKalmanFilter && leftEye.x >= 0) {
    Mat prediction = KFleftEye.predict();
    Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
    measurement(0) = leftEye.x;
    measurement(1) = leftEye.y;
      
    Point measPt(measurement(0),measurement(1));
    Mat estimated = KFleftEye.correct(measurement);
    Point statePt(estimated.at<float>(0),estimated.at<float>(1));
    leftEyePredict.x = statePt.x;
    leftEyePredict.y = statePt.y;
  }
  if(!initiKalmanFilter && rightEye.x >= 0) {
    Mat prediction = KFrightEye.predict();
    Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
    measurement(0) = rightEye.x;
    measurement(1) = rightEye.y;
      
    Point measPt(measurement(0),measurement(1));
    Mat estimated = KFrightEye.correct(measurement);
    Point statePt(estimated.at<float>(0),estimated.at<float>(1));
    rightEyePredict.x = statePt.x;
    rightEyePredict.y = statePt.y;
  }
  //if both eyes were detected
  Mat warped;
  if (leftEye.x >= 0 && rightEye.x >= 0 && ((rightEye.x - leftEye.x) > (face.cols/4))) {
    sucess = true;
    framesLost = 0;
    int desiredFaceHeight = desiredFaceWidth;
    // Get the center between the 2 eyes.
    Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
    // Get the angle between the 2 eyes.
    double dy = (rightEye.y - leftEye.y);
    double dx = (rightEye.x - leftEye.x);
    double len = sqrt(dx*dx + dy*dy);
    double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

    // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
    const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
    // Get the amount we need to scale the image to be the desired fixed size we want.
    double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
    double scale = desiredLen / len;
    // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
    Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
    // Shift the center of the eyes to be the desired center between the eyes.
    rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
    rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

    // Rotate and scale and translate the image to the desired angle & size & position!
    // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
    warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
    warpAffine(face, warped, rot_mat, warped.size());
    equalizeHist(warped, warped);
  }
  else {
  	framesLost++;
  	if(framesLost < MAX_FRAME_LOST && (leftEyePredict.x >= 0 || rightEyePredict.x >= 0)) {
  		if(leftEye.x < 0) {
  			leftEye.x = leftEyePredict.x;
  			leftEye.y = leftEyePredict.y;
  		}
  		if(rightEye.x < 0) {
  			rightEye.x = rightEyePredict.x;
  			rightEye.y = rightEyePredict.y;
  		}
  		if((rightEye.x - leftEye.x) > (face.cols/4)) {
	  		sucess = true;
	  		int desiredFaceHeight = desiredFaceWidth;
		    // Get the center between the 2 eyes.
		    Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
		    // Get the angle between the 2 eyes.
		    double dy = (rightEye.y - leftEye.y);
		    double dx = (rightEye.x - leftEye.x);
		    double len = sqrt(dx*dx + dy*dy);
		    double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

		    // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
		    const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
		    // Get the amount we need to scale the image to be the desired fixed size we want.
		    double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
		    double scale = desiredLen / len;
		    // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
		    Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
		    // Shift the center of the eyes to be the desired center between the eyes.
		    rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
		    rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

		    // Rotate and scale and translate the image to the desired angle & size & position!
		    // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
		    warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
		    warpAffine(face, warped, rot_mat, warped.size());
		    equalizeHist(warped, warped);
	    }
	    else {
	    	sucess = false;
			return face;
	    }
  	}
  	else {
		sucess = false;
		return face;
  	}
  }
  bilateralFilter(warped, faceProcessed, 0, 20.0, 2.0);
  im_height = faceProcessed.rows;
  Mat mask = Mat(faceProcessed.size(), CV_8U, Scalar(0)); // Start with an empty mask.
  Point faceRect = Point( im_height/2, cvRound(im_height * FACE_ELLIPSE_CY) );
  Size size = Size( cvRound(im_height * FACE_ELLIPSE_W), cvRound(im_height * FACE_ELLIPSE_H) );
  ellipse(mask, faceRect, size, 0, 0, 360, Scalar(255), CV_FILLED);
  // Use the mask, to remove outside pixels.
  Mat dstImg = Mat(faceProcessed.size(), CV_8U, Scalar(128)); // Clear the output image to a default gray.
  faceProcessed.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
  return dstImg;
}
Esempio n. 15
0
int main()
{
    float dt = 0.02f;

    KalmanFilter<float, 12, 3> kalmanFilter;
    KalmanFilter<float, 6, 3> kalmanFilter6;

    kalmanFilter.getFMatrix() <<
        1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,  -dt, 0.0f, 0.0f,
		0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,  -dt, 0.0f,
		0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,  -dt,
		0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,   dt, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,   dt, 0.0f, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,   dt, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
		0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f;

    kalmanFilter6.getFMatrix() <<
        1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f;

    kalmanFilter.getBMatrix() <<
	      dt, 0.0f, 0.0f,
	    0.0f,   dt, 0.0f,
	    0.0f, 0.0f,   dt,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f,
	    0.0f, 0.0f, 0.0f;

    kalmanFilter6.getBMatrix() <<
          dt, 0.0f, 0.0f,
        0.0f,   dt, 0.0f,
        0.0f, 0.0f,   dt,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f;

    kalmanFilter.getHMatrix() <<
	    1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f;

    kalmanFilter6.getHMatrix() <<
        1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f;

	float p = 1.0f;

	kalmanFilter.getPMatrix() <<
	       p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p;

	kalmanFilter6.getPMatrix() <<
           p, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    p, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    p, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    p, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    p, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    p;

	float q = 0.2f * dt;

	kalmanFilter.getQMatrix() <<
           q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q;

	kalmanFilter6.getQMatrix() <<
           q, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    q, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    q, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    q, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    q, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    q;

	float r = 0.6f;

	kalmanFilter.getRMatrix() <<
           r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f;

	kalmanFilter6.getRMatrix() <<
           r, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f,    r, 0.0f, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f,    r, 0.0f, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f,    r, 0.0f, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f,    r, 0.0f,
        0.0f, 0.0f, 0.0f, 0.0f, 0.0f,    r;

	float accelConversionFactor = 0.0039f;
	float gyroConversionFactor  = 1.0f / 14.375f;

	int16_t accelRawData[1000][3];
	float accelData[1000][3];
	int16_t gyroRawData[1000][3];
	float gyroData[1000][3];
	int16_t magRawData[1000][3];
	float magData[1000][3];

	uint16_t dTime;

	ifstream inputFile;
//	inputFile.open("imu_output_pitch_roll_3.csv");
	inputFile.open("imu_output_3.csv");
	string line;

	int i = 0;

	while (getline(inputFile, line) && i < 1000)
	{
	    sscanf(line.data(),
	           "%i,%i,%i,%i,%i,%i,%i,%i,%i,%i\n",
	           &(accelRawData[i][0]),
	           &(accelRawData[i][1]),
	           &(accelRawData[i][2]),
	           &(gyroRawData[i][0]),
	           &(gyroRawData[i][1]),
	           &(gyroRawData[i][2]),
	           &(magRawData[i][0]),
	           &(magRawData[i][1]),
	           &(magRawData[i][2]),
	           &dTime);

	    accelData[i][0] = (float) accelRawData[i][0] * accelConversionFactor;
        accelData[i][1] = (float) accelRawData[i][1] * accelConversionFactor;
        accelData[i][2] = (float) accelRawData[i][2] * accelConversionFactor;

        gyroData[i][0] = (float) gyroRawData[i][0] * gyroConversionFactor;
        gyroData[i][1] = (float) gyroRawData[i][1] * gyroConversionFactor;
        gyroData[i][2] = (float) gyroRawData[i][2] * gyroConversionFactor;

        magData[i][0] = (float) magRawData[i][0];
        magData[i][1] = (float) magRawData[i][1];
        magData[i][2] = (float) magRawData[i][2];

	    i++;
	}

	inputFile.close();


	int nSamples = i + 1;

	int nSamplesToAverage = 50;

	float accelSums[3] = {0.0f, 0.0f, 0.0f};;
	float gyroSums[3] = {0.0f, 0.0f, 0.0f};
	float magSums[3] = {0.0f, 0.0f, 0.0f};

	float accelMeans[3] = {0.0f, 0.0f, 0.0f};
	float gyroMeans[3] = {0.0f, 0.0f, 0.0f};
	float magMeans[3] = {0.0f, 0.0f, 0.0f};

	// Find sum of first N accel, gyro, and mag samples
	for (i = 0; i < nSamplesToAverage; i++)
	{
	    accelSums[0] += accelData[i][0];
	    accelSums[1] += accelData[i][1];
	    accelSums[2] += accelData[i][2];

	    gyroSums[0] += gyroData[i][0];
	    gyroSums[1] += gyroData[i][1];
	    gyroSums[2] += gyroData[i][2];

	    magSums[0] += magData[i][0];
	    magSums[1] += magData[i][1];
	    magSums[2] += magData[i][2];
	}

	// Calculate mean of accel, gyro, and mag sums
	// (this will be used to remove the sensor offset)

	accelMeans[0] = accelSums[0] / (float) nSamplesToAverage;
	accelMeans[1] = accelSums[1] / (float) nSamplesToAverage;
	accelMeans[2] = accelSums[2] / (float) nSamplesToAverage;

    gyroMeans[0] = gyroSums[0] / (float) nSamplesToAverage;
    gyroMeans[1] = gyroSums[1] / (float) nSamplesToAverage;
    gyroMeans[2] = gyroSums[2] / (float) nSamplesToAverage;

    magMeans[0] = magSums[0] / (float) nSamplesToAverage;
    magMeans[1] = magSums[1] / (float) nSamplesToAverage;
    magMeans[2] = magSums[2] / (float) nSamplesToAverage;

    ofstream filteredStatesFile;
    filteredStatesFile.open("filtered_states.csv");

    ofstream filteredStatesFile6;
    filteredStatesFile6.open("filtered_states_6.csv");

    ofstream accelAnglesFile;
    accelAnglesFile.open("accel_angles.csv");

    float lastMeasurement[12] =
        {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
    float lastMeasurement6[6] =
        {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};

    // TEST
//    nSamples = 1;

    for (i = 0; i < nSamples; i++)
	{
        float accelX = accelData[i][0];
	    float accelY = accelData[i][1];
	    float accelZ = accelData[i][2];

	    float accelXMean = accelX - accelMeans[0];
	    float accelYMean = accelY - accelMeans[1];
	    float accelZMean = accelZ - accelMeans[2];

	    float gyroX = gyroData[i][0] - gyroMeans[0];
	    float gyroY = gyroData[i][1] - gyroMeans[1];
	    float gyroZ = gyroData[i][2] - gyroMeans[2];

	    float magX = magData[i][0];
	    float magY = magData[i][1];
	    float magZ = magData[i][2];

	    float pX = 0.0f;
	    float pY = 0.0f;
	    float pZ = 0.0f;

	    float vX = lastMeasurement[3] + (accelXMean * dt);
	    float vY = lastMeasurement[4] + (accelYMean * dt);
	    float vZ = lastMeasurement[5] + (accelZMean * dt);

	    float accelAngleZ = accelZ + (1.0f - accelMeans[2]);

	    // Calculate attitude_x from y and z component of gravity vector
	    float aX = atan2(accelY, accelAngleZ) * (180.0f / 3.14f);
	    // Calculate attitude_y from x and z component of gravity vector
	    float aY = atan2(accelX, accelAngleZ) * (180.0f / 3.14f);
	    // Calculate attitude_z from x and z component of mag reading
	    // (probably will not be accurate)
	    float aZ = atan2(magX, magZ) * (180.0f / 3.14f);

	    accelAnglesFile << aX << ", " << aY << ", " << aZ << endl;

	    float measurement[12] =
	    {
	        aX,
            aY,
            aZ,
	        pX,
	        pY,
	        pZ,
	        vX,
	        vY,
	        vZ,
	        0.0f,
	        0.0f,
	        0.0f
	    };

	    float measurement6[6] =
        {
            aX,
            aY,
            aZ,
            pX,
            pY,
            pZ
        };

	    kalmanFilter.getUMatrix() << gyroX, gyroY, gyroZ;
	    kalmanFilter6.getUMatrix() << gyroX, gyroY, gyroZ;

	    // Predict
	    kalmanFilter.predict();
	    kalmanFilter6.predict();

	    kalmanFilter.update(measurement);
	    kalmanFilter6.update(measurement6);

	    filteredStatesFile <<
	        kalmanFilter.getXMatrix()(0, 0) << ", " <<
	        kalmanFilter.getXMatrix()(1, 0) << ", " <<
	        kalmanFilter.getXMatrix()(2, 0) << ", " <<
	        kalmanFilter.getXMatrix()(3, 0) << ", " <<
	        kalmanFilter.getXMatrix()(4, 0) << ", " <<
	        kalmanFilter.getXMatrix()(5, 0) << ", " <<
	        kalmanFilter.getXMatrix()(6, 0) << ", " <<
	        kalmanFilter.getXMatrix()(7, 0) << ", " <<
	        kalmanFilter.getXMatrix()(8, 0) << ", " <<
	        kalmanFilter.getXMatrix()(9, 0) << ", " <<
	        kalmanFilter.getXMatrix()(10, 0) << ", " <<
	        kalmanFilter.getXMatrix()(11, 0);
	    filteredStatesFile << endl;

	    filteredStatesFile6 <<
            kalmanFilter6.getXMatrix()(0, 0) << ", " <<
            kalmanFilter6.getXMatrix()(1, 0) << ", " <<
            kalmanFilter6.getXMatrix()(2, 0) << ", " <<
            kalmanFilter6.getXMatrix()(3, 0) << ", " <<
            kalmanFilter6.getXMatrix()(4, 0) << ", " <<
            kalmanFilter6.getXMatrix()(5, 0);
	    filteredStatesFile6 << endl;

	    memcpy(measurement, lastMeasurement, 12 * 4);
	    memcpy(measurement6, lastMeasurement6, 6 * 4);
	}

	filteredStatesFile.close();
	filteredStatesFile6.close();
	accelAnglesFile.close();

	return 0;
}