Ejemplo n.º 1
1
void opticalFlow::occMatpEst( Mat_<Vec2f> &flow12, Mat_<Vec2f> &flow21, Mat_<uchar>&occMap)
{
	int iy, ix;

	const float FLOW_PIXEL_THRESH = 2;

	occMap.setTo(255);
	for (iy=0; iy<height1; ++iy)
	{
		for (ix=0; ix<width1; ++ix)
		{
			Vec2f fFlow = flow12[iy][ix];
			int ny, nx;
			ny = floor(iy+fFlow[1]+0.5);
			nx = floor(ix+fFlow[0]+0.5);

			if (ny>=0 && ny<height1 && nx>=0 && nx<width1) 
			{
				cv::Vec2f bFlow = flow21[ny][nx];
				if (fabs(bFlow[1]+ny-iy)<FLOW_PIXEL_THRESH && fabs(bFlow[0]+nx-ix)<FLOW_PIXEL_THRESH)
				{
					continue;
				}
			}
			occMap[iy][ix] = 0;
		}
	}

	Mat bw = occMap;
    Mat labelImage(occMap.size(), CV_32S);
    int nLabels = connectedComponents(bw, labelImage, 8);

	occMap[iy][ix] = 0;
	vector<int> hist(nLabels,0);
	for (iy=0; iy<height1; ++iy)
		for (ix=0; ix<width1; ++ix)
			hist[labelImage.at<int>(iy,ix)]++;
	vector<int> rmv_list;
	rmv_list.reserve(20);
	for (int i=0;i<nLabels;++i){
		if (hist[i]<50)
			rmv_list.push_back(i);
	}
	for (iy=0; iy<height1; ++iy)
	{
		for (ix=0; ix<width1; ++ix)
		{
			for (int r=0; r<rmv_list.size(); ++r)
			if(labelImage.at<int>(iy,ix) == rmv_list[r])
				occMap[iy][ix] = 0;
		}
	}
}
Ejemplo n.º 2
0
//===========================================================================
void Multi_SVR_patch_expert::Response(const Mat_<float> &area_of_interest, Mat_<double> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// For the purposes of the experiment only use the response of normal intensity, for fair comparison

	if(svr_patch_experts.size() == 1)
	{
		svr_patch_experts[0].Response(area_of_interest, response);		
	}
	else
	{
		// responses from multiple patch experts these can be gradients, LBPs etc.
		response.setTo(1.0);
		
		Mat_<double> modality_resp(response_height, response_width);

		for(size_t i = 0; i < svr_patch_experts.size(); i++)
		{			
			svr_patch_experts[i].Response(area_of_interest, modality_resp);			
			response = response.mul(modality_resp);	
		}	
		
	}

}
int Kalman_adjust(Point A,int biggest,KalmanFilter &F)
{
	Mat_<float> measurement(2,1);
	if(!notstarted[biggest])
	{
		notstarted[biggest]=true;
		 measurement.setTo(Scalar(0));
		F.statePre.at<float>(0) = A.x;
		F.statePre.at<float>(1) = A.y;
		F.statePre.at<float>(2) = 0;
		F.statePre.at<float>(3) = 0;
		F.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,0,0,   0,1,0,0,  0,0,1,0,  0,0,0,1);
		setIdentity(F.measurementMatrix);
		setIdentity(F.processNoiseCov, Scalar::all(1e-4));
		setIdentity(F.measurementNoiseCov, Scalar::all(1e-1));
		setIdentity(F.errorCovPost, Scalar::all(.1));
	}
	else
	{
		measurement(0) = A.x;
		measurement(1) = A.y;
		Mat estimated = F.correct(measurement);
		Point statePt(estimated.at<float>(0),estimated.at<float>(1));
	}
}
Ejemplo n.º 4
0
/**
 * Similar to heightEstimation, except that when a white-black run within tolerance is encountered, the run is colored in colorResult.
 * The function also considers black-white runs.
 */
void RLEDetector::classifyStaffPix(const Mat& thresholdedImage, Mat_<uchar>& staffPixResult, int lineheight, int spaceheight, float lineTolerance, float spaceTolerance){
	int    state = WHITE;
	int    whiteRun = 0;
	int    blackRun = 0;
	uchar  data;

	staffPixResult.setTo(0);

	for(int x=0; x < thresholdedImage.cols; x++){
		whiteRun = 0;
		for(int y=0; y < thresholdedImage.rows; y++){
			data = thresholdedImage.at<uchar>(y,x);
			switch(state){
			case WHITE:
				if(data == BLACK){
					state = BLACK;
					blackRun = 1;
				}
				else
					whiteRun++;
				break;

			case BLACK:
				if(data == WHITE || y == thresholdedImage.rows-1){				// color fitting white-black runs (with tolerance)
					state = WHITE;
					if( abs(blackRun-lineheight) <= lineheight*lineTolerance &&
							abs(whiteRun-spaceheight) <= spaceheight*spaceTolerance){
						for(int b=y-blackRun; b<y; b++){
							staffPixResult.at<uchar>(b,x) = 255;
						}
					}
					else if( abs(blackRun-lineheight) <= lineheight*lineTolerance){		// color fitting black-white runs (with tolerance)
						//check subsequent white run
						int ly = y+1; // ly: local white run y
						while(ly < thresholdedImage.rows &&
								thresholdedImage.at<uchar>(ly,x)==WHITE)
							ly++;
						//if whiterun is o.k. accept black-white run.
						if(abs(ly-y - spaceheight) <= spaceheight*spaceTolerance){
							for(int b=y-blackRun; b<y; b++)
								staffPixResult.at<uchar>(b,x) = 255;
						}

					}
					whiteRun = 1;
					blackRun = 0;
				} // if data == white
				else
					blackRun++;
				break;
			} // state switch
		} // row iteration
	} // column iteration
}
Ejemplo n.º 5
0
void myaccumarray(Mat & subs,Mat & val,Mat_<T> & accumM,cv::Size size){
	
	accumM.create(size);
	accumM.setTo(Scalar::all(0));
	cout<<"channels: "<<val.channels()<<endl;
	for (int i=0;i<subs.rows;i++)
	{
		for (int c=0;c<val.channels();c++)
		{
			//cout<<(subs.at<int>(i,0))<<","<<(subs.at<int>(i,1))<<" "<<endl;
			//cout<<val.at<T>(i,0)[c]<<endl;
			accumM.at<T>((subs.at<int>(i,0)),(subs.at<int>(i,1)))[c] += val.at<T>(i,0)[c];
			//cout<<(subs.at<int>(i,0))<<","<<(subs.at<int>(i,1))<<" "<<accumM.at<T>((subs.at<int>(i,0)),(subs.at<int>(i,1)))[c]<<endl;
		}
	}
}
Ejemplo n.º 6
0
static inline void linearizeHomographyAt( const Mat_<double>& H, const Point2f& pt, Mat_<double>& A )
{
    A.create(2,2);
    double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2),
           p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2),
           p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2),
           p3_2 = p3*p3;
    if( p3 )
    {
        A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx
        A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy

        A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx
        A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx
    }
    else
        A.setTo(Scalar::all(numeric_limits<double>::max()));
}
Ejemplo n.º 7
0
  // find the widdest paths in the graph.
  void segment_iteration_paths()
  {
    while(true)
    {
      cout << "segment_iteration_paths" << endl;
      auto msg = pipe_segment_circles_paths.pull(true);
      vector<Circle> circles = msg.circles;
      Mat ZDT = msg.ZDT;
      // start all pairs as having width = 0, try to increase this
  
      /// compute the edge weights 
      Mat_<float> pwWidths;
      // this is the slowest line...
      pwWidths = deformable_depth::pwWidths(ZDT,circles);
      Mat_<float> pwDists = deformable_depth::pwDists(ZDT,circles);
      pwDists.setTo(inf,pwWidths < 4);
      
      /// find the MST (maximum spanning tree)
      vector<Edge> mst = MST_Kruskal(-pwDists,-inf);
      
      // draw lines between the circles
      const bool DRAW = true;
      if(DRAW)
      {
	Mat rgbLines = msg.RGBcap.clone();
	for(Circle c : circles)
	  rgbLines = draw(rgbLines,c);
	for(Edge edge : mst)
	{
	  Point2i 
	    p1 = circles[edge.v1].center(),
	    p2 = circles[edge.v2].center();
	  cout << "weight = " << edge.weight << endl;
	  cv::line(rgbLines,p1,p2,Scalar(255,0,0));	
	}  
	imshow("graph",rgbLines);
	cvWaitKey(1);
      }
      
      // to the next stage of the pipeline!
      pipe_segment_paths_detect.push(
	Message_segment_paths_to_detect(msg.RGBcap,msg.ZDT,circles,mst));
    }
  }
Ejemplo n.º 8
0
void BackgroundSubtractorGMGImpl::initialize(Size frameSize, double minVal, double maxVal)
{
    CV_Assert(minVal < maxVal);
    CV_Assert(maxFeatures > 0);
    CV_Assert(learningRate >= 0.0 && learningRate <= 1.0);
    CV_Assert(numInitializationFrames >= 1);
    CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
    CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0);

    minVal_ = minVal;
    maxVal_ = maxVal;

    frameSize_ = frameSize;
    frameNum_ = 0;

    nfeatures_.create(frameSize_);
    colors_.create(frameSize_.area(), maxFeatures);
    weights_.create(frameSize_.area(), maxFeatures);

    nfeatures_.setTo(Scalar::all(0));
}
Ejemplo n.º 9
0
void initializeKalman(int x, int y)
{
  contador = 0;
  measurement.setTo(Scalar(0));

  if (mouse_info.x < 0 || mouse_info.y < 0) {
    cv::waitKey(30);
    //    continue;
  }
  KF.statePre.at<float>(0) = x;
  KF.statePre.at<float>(1) = y;
  KF.statePre.at<float>(2) = 0;
  KF.statePre.at<float>(3) = 0;
  KF.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1);
	
  setIdentity(KF.measurementMatrix);
  setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
  setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
  setIdentity(KF.errorCovPost, Scalar::all(.1));
	
  mousev.clear();
  kalmanv.clear();
} 
Ejemplo n.º 10
0
void SegmenterHumanSimple::segment(const cv::Mat& img, Mat_<uchar>& mask)
{
	Mat imgBGR;
	Mat imgLAB;
	Mat imgBGRo;

	float rate = 500.0f/img.cols;

	GaussianBlur(img,imgBGRo,Size(),0.8,0.8);

	vector<Rect> faces;

	resize(imgBGRo,imgBGRo,Size(),rate,rate);
	cv::CascadeClassifier faceModel(this->_m_filenameFaceModel);
	faceModel.detectMultiScale(imgBGRo,faces);

	imgBGRo.convertTo( imgBGR, CV_32F, 1.0/255. );

	cvtColor( imgBGR, imgLAB, CV_BGR2Lab );

	Superpixel sp(1000,1,5);

	Mat_<int> segmentation = sp.segment(imgLAB);
	vector<SuperpixelStatistic> stat = sp.stat(imgLAB,imgBGR,segmentation);

	Mat_<float> prob;
	this->getPixelProbability(imgBGRo,prob,faces);
	Mat_<float> sprob;
	UtilsSuperpixel::Stat(segmentation,prob,stat,sprob);

	Mat_<int> initial(int(stat.size()),1);
	initial.setTo(1,sprob>0.5);
	initial.setTo(0,sprob<=0.5);
	Mat_<float> probaColor;
	int myx = cv::countNonZero(initial);
	this->_getColorProba(stat,initial,probaColor);

	Mat_<float> fgdInit,bgdInit,fgdColor,bgdColor;
	this->_prob2energy(sprob,fgdInit,bgdInit);
	this->_prob2energy(probaColor,fgdColor,bgdColor);
	Mat_<float> fgdEnergy, bgdEnergy;
	
	fgdEnergy = fgdInit + fgdColor;
	bgdEnergy = bgdInit + bgdColor;

	Mat_<int> label;
	mask.create(imgBGRo.rows,imgBGRo.cols);

	UtilsSegmentation::MaxFlowSuperpixel(stat,fgdEnergy,bgdEnergy,50.0,label);

	for( int i=0;i<mask.rows;i++)
	{
		for(int j=0;j<mask.cols;j++)
		{
			if ( label(segmentation(i,j)) > 0.5)
			{
				mask(i,j) = 255;
			}
			else
			{
				mask(i,j) = 0;
			}
		}
	}

	cv::resize(mask,mask,Size(img.cols,img.rows));
	mask.setTo(255,mask>128);
	mask.setTo(0,mask<=128);
}
int main(int argc, char **argv)
{
  ros::init(argc, argv, "rovioTest");
  ros::NodeHandle n;
  imageClient = n.serviceClient<rovio_base::image>("rovioImage");
  controlClient = n.serviceClient<rovio_base::manDrv>("rovioControl");
  reportClient = n.serviceClient<rovio_base::report>("rovioReport");

  Mat imgOrd = getImage();
  cvtColor(imgOrd, imgOrd, CV_RGB2GRAY);
  const int AREA_SIZE = 16;
  int ordRows = imgOrd.rows;
  int ordCols = imgOrd.cols;
  int areaNumY = ordRows / AREA_SIZE;
  int areaNumX = ordCols / AREA_SIZE;
  int marginY = ordRows % AREA_SIZE;
  int areaNum = areaNumX * areaNumY;
  Rect ior(0, marginY, areaNumX * AREA_SIZE, areaNumY * AREA_SIZE);
  Mat img = getImg(imgOrd, ior);
  VideoWriter videoWriter("/home/viki/Rovio.avi", CV_FOURCC('M', 'J', 'P', 'G'), 3.0, Size(img.cols, img.rows));
  int lastDirection = 1;
  for (int i = 0; i < 1000; i++)
  {
    Mat img = getImg(getImage(), ior);
    Mat_<int> regionMap(img.size());
    regionMap.setTo(0);
    regionMap(regionMap.rows - 18, regionMap.cols / 2) = 1;
    regionMap(regionMap.rows - 10, regionMap.cols / 2) = 1;
    RegionGrowthAlg alg;
    alg.calcRegionMap(img, regionMap);

    // Detect forward
    int fAreaWidth = img.cols - 200;
    int fAreaHeight = img.rows / 4;
    int fTopX = (img.cols - fAreaWidth) / 2;
    int fTopY = img.rows - fAreaHeight;
    int ignorePixels = 1000;
    Mat_<int> fArea = regionMap(Rect(fTopX, fTopY, fAreaWidth, fAreaHeight));
    int fAreaSum = 0;
    for (int i = 0; i < fArea.rows; i++)
    {
      int* pRow = fArea.ptr<int>(i);
      for (int j = 0; j < fArea.cols; j++)
      {
        if (pRow[j] == 0)
          fAreaSum++;
      }
    }
    bool flagForward = fAreaSum < ignorePixels;

    // Detect left and right
    int lrAreaWidth = 100;
    int marginX = 0;
    int lrAreaHeight = fAreaHeight;
    int lrTopY = img.rows - lrAreaHeight;
    int lTopX = marginX;
    int rTopX = img.cols - marginX - lrAreaWidth;
    int lrIgnorePixels = 1000;
    Mat_<int> lArea = regionMap(Rect(lTopX, lrTopY, lrAreaWidth, lrAreaHeight));
    Mat_<int> rArea = regionMap(Rect(rTopX, lrTopY, lrAreaWidth, lrAreaHeight));
    int lAreaSum = 0;
    int rAreaSum = 0;
    for (int i = 0; i < lArea.rows; i++)
    {
      int* plRow = lArea.ptr<int>(i);
      int* prRow = rArea.ptr<int>(i);
      for (int j = 0; j < lArea.cols; j++)
      {
        if (plRow[j] == 0)
          lAreaSum++;
        if (prRow[j] == 0)
          rAreaSum++;
      }
    }
    bool flagLeft = lAreaSum < lrIgnorePixels;
    bool flagRight = rAreaSum < lrIgnorePixels;

    //fArea.setTo(2);
    lArea.setTo(3);
    rArea.setTo(4);

    Utility util;
    util.drawSegmentBorder(img, regionMap);

    // Mark info
    //标记
    int leftSum = 0;
    int rightSum = 0;
    int loopi = img.rows;
    int loopj = img.cols;
    for (int i = 0; i < loopi; i++)
    {
      int* pLeftRow = regionMap.ptr<int>(i);
      int* pRIghtRow = pLeftRow + loopj / 2;
      int loop = loopj / 2;
      for (int j = 0; j < loop; j++)
      {
        if (pLeftRow[j] > 0)
        {
          leftSum++;
        }
        if (pRIghtRow[j] > 0)
        {
          rightSum++;
        }
      }
    }
    Point pos(loopj / 2 - 150, loopi / 2);
    std::stringstream ss;
    string tmp;
    ss << leftSum;
    ss >> tmp;
    putText(img, tmp, pos, FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0));
    pos.x = loopj / 2 + 100;
    ss.str("");
    ss.clear();
    ss << rightSum;
    ss >> tmp;
    putText(img, tmp, pos, FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 255, 0));
    int textLen = 40;
    pos.x = fArea.cols / 2 - textLen + fTopX;
    pos.y = fArea.rows / 2 + fTopY;
    ss.str("");
    ss.clear();
    ss << fAreaSum;
    ss >> tmp;
    putText(img, tmp, pos, FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 50, 255));
    pos.x = lArea.cols / 2 - textLen + lTopX;
    pos.y = lArea.rows / 2 + lrTopY;
    ss.str("");
    ss.clear();
    ss << lAreaSum;
    ss >> tmp;
    putText(img, tmp, pos, FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 50, 255));
    pos.x = rArea.cols / 2 - textLen + rTopX;
    pos.y = rArea.rows / 2 + lrTopY;
    ss.str("");
    ss.clear();
    ss << rAreaSum;
    ss >> tmp;
    putText(img, tmp, pos, FONT_HERSHEY_SIMPLEX, 0.6, Scalar(0, 50, 255));

    //检测直线区域
    int lineLen = 200;
    int lineStartX = img.cols / 2 - lineLen / 2;
    int lineEndX = img.cols / 2 + lineLen / 2;
    int lineY = img.rows - 140;
    Point lineStart(lineStartX, lineY);
    Point lineEnd(lineEndX, lineY);
    line(img, lineStart, lineEnd, Scalar(255, 20, 20));

    int blockNum = 0;
    int* pLineRow = regionMap.ptr<int>(lineY);
    for (int j = lineStartX; j < lineEndX; j++)
    {
      if (pLineRow[j] == 0)
      {
        blockNum++;
      }
    }
    bool isBlocked = blockNum > lineLen / 2;

    //视频
    //cvtColor(img, img, CV_GRAY2RGB);
    imshow("", img);
    waitKey(10);
    videoWriter << img;

    //控制
    isBlocked = (!flagLeft && !flagRight) || isBlocked;
    int waitTime = 1;
    rvMCUReport rvMcu = getReport();
    if (rvMcu.isIrOn && rvMcu.isDetectedBarrier)
      isBlocked = true;
    if (true)
    {
      if (isBlocked)
      {
        int maxDif = 5000;
        if (leftSum - rightSum > maxDif)
        {
          lastDirection = -1;
        }
        else if (rightSum - leftSum > maxDif)
        {
          lastDirection = 1;
        }
        if (lastDirection == -1)
        {
          control(5, 8);
          waitKey(waitTime);
        }
        else
        {
          control(6, 8);
          waitKey(waitTime);
        }
      }
      else if (flagForward)
      {
        control(1, 6);
      }
      else if (flagLeft)
      {
        control(3, 8);
      }
      else if (flagRight)
      {
        control(4, 8);
      }
      else
      {
        printf("Error control");
      }
    }

  }

  return 0;
}
Ejemplo n.º 12
0
void spm_bp::init_label_super(Mat_<Vec2f>& label_super_k, Mat_<float>& dCost_super_k) //, vector<vector<Vec2f> > &label_saved, vector<vector<Mat_<float> > > &dcost_saved)
{
    printf("==================================================\n");
    printf("Initiating particles...Done!\n");
    vector<Vec2f> label_vec;
    Mat_<float> localDataCost;
    for (int sp = 0; sp < numOfSP1; ++sp) {
        int id = repPixels1[sp];
        int y = subRange1[id][0];
        int x = subRange1[id][1];
        int h = subRange1[id][3] - subRange1[id][1] + 1;
        int w = subRange1[id][2] - subRange1[id][0] + 1;

        label_vec.clear();
        int k = 0;

        while (k < NUM_TOP_K) {
            float du = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_u;
            float dv = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_v;
            du = floor(du * upScale + 0.5) / upScale;
            dv = floor(dv * upScale + 0.5) / upScale;

            if (du >= -disp_range_u && du <= disp_range_u && dv >= -disp_range_v && dv <= disp_range_v) {
                int index_tp = 1;
                for (int k1 = 0; k1 < k; ++k1) {
                    if (checkEqual_PMF_PMBP(label_super_k[repPixels1[sp]][k1], Vec2f(du, dv)))
                        index_tp = 0;
                }

                if (index_tp == 1) {
                    for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii)
                        label_super_k[superpixelsList1[sp][ii]][k] = Vec2f(du, dv);

                    label_vec.push_back(Vec2f(du, dv));
                    ++k;
                }
            }
        }
#if USE_CLMF0_TO_AGGREGATE_COST
        cv::Mat_<cv::Vec4b> leftCombinedCrossMap;
        leftCombinedCrossMap.create(h, w);
        subCrossMap1[sp].copyTo(leftCombinedCrossMap);
        CFFilter cff;
#endif
        int vec_size = label_vec.size();
        localDataCost.create(h, w * vec_size);
        localDataCost.setTo(0);
#pragma omp parallel for num_threads(NTHREADS)
        for (int i = 0; i < vec_size; ++i) {
            int kx = i * w;
            Mat_<float> rawCost;
            getLocalDataCostPerlabel(sp, label_vec[i], rawCost);
#if USE_CLMF0_TO_AGGREGATE_COST
            cff.FastCLMF0FloatFilterPointer(leftCombinedCrossMap, rawCost, rawCost);
#endif
            rawCost.copyTo(localDataCost(cv::Rect(kx, 0, w, h)));
        }

        //getLocalDataCost( sp, label_vec, localDataCost);

        int pt, px, py, kx;
        for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii) {
            //cout<<ii<<endl;
            pt = superpixelsList1[sp][ii];
            px = pt / width1;
            py = pt % width1;
            for (int kk = 0; kk < NUM_TOP_K; kk++) {
                kx = kk * w;
                const Mat_<float>& local = localDataCost(cv::Rect(kx, 0, w, h));
                dCost_super_k[pt][kk] = local[px - x][py - y];
            }
        }
    }
    printf("==================================================\n");
}
Ejemplo n.º 13
0
static void setup() {
	// initialize device
	printf("Initializing I2C devices...\n");
	mpu.initialize();

	// verify connection
	printf("Testing device connections...\n");
	printf(mpu.testConnection() ? "MPU6050 connection successful\n" : "MPU6050 connection failed\n");

	// load and configure the DMP
	printf("Initializing DMP...\n");
	devStatus = mpu.dmpInitialize();

	// make sure it worked (returns 0 if so)
	if (devStatus == 0) {
		// turn on the DMP, now that it's ready
		printf("Enabling DMP...\n");
		mpu.setDMPEnabled(true);

		// enable Arduino interrupt detection
		//Serial.println(F("Enabling interrupt detection (Arduino external interrupt 0)..."));
		//attachInterrupt(0, dmpDataReady, RISING);
		mpuIntStatus = mpu.getIntStatus();

		// set our DMP Ready flag so the main loop() function knows it's okay to use it
		printf("DMP ready! Waiting for first interrupt...\n");
		dmpReady = true;

		// get expected DMP packet size for later comparison
		packetSize = mpu.dmpGetFIFOPacketSize();
	}
	else {
		// ERROR!
		// 1 = initial memory load failed
		// 2 = DMP configuration updates failed
		// (if it's going to break, usually the code will be 1)
		printf("DMP Initialization failed (code %d)\n", devStatus);
	}

	/*
	adjAccel[0] = adjAccel[1] = adjAccel[2] = 0;
	adjGyro[0] = adjGyro[1] = adjGyro[2] = 0;
	for (int i = 0; i < 20; i++)
	{
		readFIFO();
		mpu.dmpGetAccel(accel, fifoBuffer);
		mpu.dmpGetGyro(gyro, fifoBuffer);
		adjAccel[0] += accel[0];
		adjAccel[1] += accel[1];
		adjAccel[2] += accel[2];
		adjGyro[0] += gyro[0];
		adjGyro[1] += gyro[1];
		adjGyro[2] += gyro[2];
	}
	adjAccel[0] /= 20;
	adjAccel[1] /= 20;
	adjAccel[2] /= 20;
	adjGyro[0] /= 20;
	adjGyro[1] /= 20;
	adjGyro[2] /= 20;
	printf("ADJUST: %d, %d, %d\n", adjAccel[0], adjAccel[1], adjAccel[2]);
	*/

	measurement.setTo(cv::Scalar(0));
	kalman.transitionMatrix =
		*(cv::Mat_<float>(4, 4) <<
		1, 0, 1, 0,
		0, 1, 0, 1,
		0, 0, 1, 0,
		0, 0, 0, 1);
	readFIFO();
	mpu.dmpGetAccel(accel, fifoBuffer);
	kalman.statePre.at<float>(0) = accel[0];
	kalman.statePre.at<float>(1) = accel[1];
	kalman.statePre.at<float>(2) = accel[2];
	kalman.statePre.at<float>(3) = 0.0;
	setIdentity(kalman.measurementMatrix);
	setIdentity(kalman.processNoiseCov, cv::Scalar::all(1e-4));
	setIdentity(kalman.measurementNoiseCov, cv::Scalar::all(10));
	setIdentity(kalman.errorCovPost, cv::Scalar::all(.1));
}
Ejemplo n.º 14
0
bool invert(const Mat_<_Tp, chs>&src, Mat_<_Tp, chs>& dst, int method = DECOMP_LU)
{
	FBC_Assert(src.data != NULL && dst.data != NULL);
	FBC_Assert(src.cols > 0 && src.rows > 0 && dst.cols > 0 && dst.rows > 0);
	FBC_Assert(typeid(double).name() == typeid(_Tp).name() || typeid(float).name() == typeid(_Tp).name());
	FBC_Assert(src.cols == dst.rows && src.rows == dst.cols);

	bool result = false;
	size_t esz = sizeof(_Tp) * chs; // size_t esz = CV_ELEM_SIZE(type);
	int m = src.rows, n = src.cols;

	if (method == DECOMP_SVD) { // TODO
		FBC_Assert(0);
	}

	FBC_Assert(m == n);

	if (method == DECOMP_EIG) { // TODO
		FBC_Assert(0);
	}

	FBC_Assert(method == DECOMP_LU || method == DECOMP_CHOLESKY);

	if (n <= 3) {
		const uchar* srcdata = src.ptr();
		uchar* dstdata = const_cast<uchar*>(dst.ptr());
		size_t srcstep = src.step;
		size_t dststep = dst.step;

		if (n == 2) { // TODO
			FBC_Assert(0);
		} else if (n == 3) {
			if (typeid(float).name() == typeid(_Tp).name() && chs == 1) {
				double d = det3(Sf);

				if (d != 0.) {
					double t[12];

					result = true;
					d = 1./d;
					t[0] = (((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
					t[1] = (((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
					t[2] = (((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);

					t[3] = (((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
					t[4] = (((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
					t[5] = (((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);

					t[6] = (((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
					t[7] = (((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
					t[8] = (((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);

					Df(0,0) = (float)t[0]; Df(0,1) = (float)t[1]; Df(0,2) = (float)t[2];
					Df(1,0) = (float)t[3]; Df(1,1) = (float)t[4]; Df(1,2) = (float)t[5];
					Df(2, 0) = (float)t[6]; Df(2, 1) = (float)t[7]; Df(2, 2) = (float)t[8];
				}
			} else {
				double d = det3(Sd);
				if (d != 0.) {
					result = true;
					d = 1. / d;
					double t[9];

					t[0] = (Sd(1, 1) * Sd(2, 2) - Sd(1, 2) * Sd(2, 1)) * d;
					t[1] = (Sd(0, 2) * Sd(2, 1) - Sd(0, 1) * Sd(2, 2)) * d;
					t[2] = (Sd(0, 1) * Sd(1, 2) - Sd(0, 2) * Sd(1, 1)) * d;

					t[3] = (Sd(1, 2) * Sd(2, 0) - Sd(1, 0) * Sd(2, 2)) * d;
					t[4] = (Sd(0, 0) * Sd(2, 2) - Sd(0, 2) * Sd(2, 0)) * d;
					t[5] = (Sd(0, 2) * Sd(1, 0) - Sd(0, 0) * Sd(1, 2)) * d;

					t[6] = (Sd(1, 0) * Sd(2, 1) - Sd(1, 1) * Sd(2, 0)) * d;
					t[7] = (Sd(0, 1) * Sd(2, 0) - Sd(0, 0) * Sd(2, 1)) * d;
					t[8] = (Sd(0, 0) * Sd(1, 1) - Sd(0, 1) * Sd(1, 0)) * d;

					Dd(0, 0) = t[0]; Dd(0, 1) = t[1]; Dd(0, 2) = t[2];
					Dd(1, 0) = t[3]; Dd(1, 1) = t[4]; Dd(1, 2) = t[5];
					Dd(2, 0) = t[6]; Dd(2, 1) = t[7]; Dd(2, 2) = t[8];
				}
			}
		} else {
			assert(n == 1);

			if (typeid(float).name() == typeid(_Tp).name() && chs == 1)
			{
				double d = Sf(0, 0);
				if (d != 0.) {
					result = true;
					Df(0, 0) = (float)(1. / d);
				}
			} else {
				double d = Sd(0, 0);
				if (d != 0.) {
					result = true;
					Dd(0, 0) = 1. / d;
				}
			}
		}

		if (!result)
			dst.setTo(Scalar(0));
		return result;
	}

	FBC_Assert(0); // TODO

	return result;
}
int main(int argc, char *argv[])
{
  	//KALMAN
  	measurement.setTo(Scalar(0));
  	measurement = Mat_<float>(2,1); 
  	KFrightEye = KalmanFilter(4, 2, 0);
  	KFleftEye = KalmanFilter(4, 2, 0);
  	state = Mat_<float>(4, 1); 
  	processNoise = Mat(4, 1, CV_32F);
  	// Reconhecimento continuo
  	float P_Safe_Ultimo = 1.0, P_notSafe_Ultimo = 0.0;
  	float P_Atual_Safe, P_Atual_notSafe;
  	float P_Safe_Atual, P_notSafe_Atual;
  	float P_Safe;
  	float timeLastObs = 0, timeAtualObs = 0, tempo = 0;
    // These vectors hold the images and corresponding labels:
    vector<Mat> images;
    vector<Mat> video;
    vector<int> labels;
    // Create a FaceRecognizer and train it on the given images:
    Ptr<FaceRecognizer> model = createLBPHFaceRecognizer();
    //leitura dos frames
    STREAM *ir = createStream("../build/ir.log");
    bool flag_ir;
    flag_ir = streamNext(ir);
    struct timeval now;
    gettimeofday(&now, NULL);
    
    CascadeClassifier haar_cascade;
    haar_cascade.load(PATH_CASCADE_FACE);
    bool sucess = false;
    int login = 0;
    Point ultimaFace = Point(-1, -1);
    
    while(flag_ir) {
    	  Mat frame = ir->infrared;
        frame.convertTo(frame, CV_8UC3, 255, 0);
        vector< Rect_<int> > faces;
        // Find the faces in the frame:
        haar_cascade.detectMultiScale(frame, faces);
        tempo = getTickCount();
        for(int j = 0; j < faces.size(); j++) {
         	    Mat face = frame(faces[j]);
		    Rect faceRect = faces[j];
		    Point center = Point(faceRect.x + faceRect.width/2, faceRect.y + faceRect.width/2);
		    if(ultimaFace.x < 0) {
		    		ultimaFace.x = center.x;
		    		ultimaFace.y = center.y;
		    }
		    else {
		    		double res = cv::norm(ultimaFace-center);
		    		if(res < 50.0) {
		    			ultimaFace.x = center.x;
			    		ultimaFace.y = center.y;
			    		Mat faceNormalized = faceNormalize(face, FACE_SIZE, sucess);
			          if(sucess) {
			                if(login < FRAMES_LOGIN) {
			                  images.push_back(faceNormalized);
			                  labels.push_back(0);
			                  login++;
			                  if(login == FRAMES_LOGIN) 
			                    model->train(images, labels);
			                }
			                else {
			                    timeLastObs= timeAtualObs;
			                    timeAtualObs = tempo;
			                    double confidence;
			                    int prediction;
			                    model->predict(faceNormalized, prediction, confidence);
			                    //reconhecimento continuo
			                    if(timeLastObs == 0) {
			                    	P_Atual_Safe = 1 - ((1 + erf((confidence-Usafe) / (Rsafe*sqrt(2))))/2);
			                    	P_Atual_notSafe = ((1 + erf((confidence-UnotSafe) / (RnotSafe*sqrt(2))))/2);
			                        float deltaT = (timeLastObs - timeAtualObs)/getTickFrequency();
			                        float elevado = (deltaT * LN2)/K_DROP;
			                    	P_Safe_Atual = P_Atual_Safe + pow(E,elevado) * P_Safe_Ultimo;
			              		    P_notSafe_Atual = P_Atual_notSafe + pow(E,elevado) * P_notSafe_Ultimo;
			                    }
			                    else {
			                    	P_Atual_Safe = 1 - ((1 + erf((confidence-Usafe) / (Rsafe*sqrt(2))))/2);
			                    	P_Atual_notSafe = ((1 + erf((confidence-UnotSafe) / (RnotSafe*sqrt(2))))/2);
			                    	P_Safe_Ultimo = P_Safe_Atual;
			                    	P_notSafe_Ultimo = P_notSafe_Atual;
			                    	float deltaT = (timeLastObs - timeAtualObs)/getTickFrequency();
			                      float elevado = (deltaT * LN2)/K_DROP;
			                      P_Safe_Atual = P_Atual_Safe + pow(E,elevado) * P_Safe_Ultimo;
			                      P_notSafe_Atual = P_Atual_notSafe + pow(E,elevado) * P_notSafe_Ultimo;
			                    }
			                }
			            }
				 }
		    }
        }
        if(P_Safe_Atual != 0) {
          float deltaT = -(tempo - timeAtualObs)/getTickFrequency();
          float elevado = (deltaT * LN2)/K_DROP;
          P_Safe = (pow(E,elevado) * P_Safe_Atual)/(P_Safe_Atual+P_notSafe_Atual);
          if(P_Safe > 0)
            cout << P_Safe << endl;
        }
        flag_ir = streamNext(ir);
    }
    return 0;
}