Exemple #1
1
void MainWindow::on_pushButtonWarpImage_clicked()
{
    latestMatImage = cv::imread("test4.jpg", 1);
    fd->detectFiducials(QStringList() << "test4.jpg");
    while (fd->isRunning()) ;
    double topLeftR, topLeftC, botLeftR, botLeftC, topRightR, topRightC, botRightR, botRightC;
    bool s1, s2, s3, s4;
    qDebug() << "prior to get";
    s1 = fd->getFiducialLocation(3, 0, topLeftR, topLeftC);
    s2 = fd->getFiducialLocation(5, 0, botLeftR, botLeftC);
    s3 = fd->getFiducialLocation(8, 0, topRightR, topRightC);
    s4 = fd->getFiducialLocation(10, 0, botRightR, botRightC);
    qDebug() << "after get";
    if (! (s1 && s2 && s3 && s4))
    {
        updateLog("Need all 4 fiducials on board!");
        return;
    }
    // try with perspective transform first
    /*
    cv::Point2f board_pts_ideal[4];    // top left, bottom left, top right, bottom right
    board_pts_ideal[0].x = -1; board_pts_ideal[0].y = 4;
    board_pts_ideal[1].x = -1; board_pts_ideal[1].y = 11;
    board_pts_ideal[2].x = 16; board_pts_ideal[2].y = 3;
    board_pts_ideal[3].x = 16; board_pts_ideal[3].y = 11;

    cv::Point2f board_pts_found[4];
    board_pts_found[0].x = topLeftC; board_pts_found[0].y = topLeftR;
    board_pts_found[1].x = botLeftC; board_pts_found[1].y = botLeftR;
    board_pts_found[2].x = topRightC; board_pts_found[2].y = topRightR;
    board_pts_found[3].x = botRightC; board_pts_found[3].y = botRightR;
    */

    cv::Point2f pts_ideal[] = {cv::Point2f(-50,200),
                          cv::Point2f(-50,550),
                          cv::Point2f(800,150),
                          cv::Point2f(800,550)};

    cv::Point2f pts_actual[] = {cv::Point2f(topLeftC, topLeftR),
                          cv::Point2f(botLeftC, botLeftR),
                          cv::Point2f(topRightC, topRightR),
                          cv::Point2f(botRightC, botRightR)};


    //cv::Mat transformMat = cv::getPerspectiveTransform(board_pts_found, board_pts_ideal);
    cv::Mat transformMat = cv::getPerspectiveTransform(pts_actual, pts_ideal);
    qDebug() << "transformation mat" << transformMat.size().width << transformMat.size().height;

    cv::Mat newImage = latestMatImage.clone();
    qDebug() << newImage.size().width << newImage.size().height;
    qDebug() << latestMatImage.size().width << latestMatImage.size().height;

    cv::Mat grayImage;
    cv::cvtColor(latestMatImage, grayImage, CV_RGB2GRAY);

    cv::warpPerspective(grayImage, newImage, transformMat, latestMatImage.size(), cv::INTER_LINEAR);
    qDebug() << "here 5";
    cv::Mat rgbNew;
    cv::cvtColor(newImage, rgbNew, CV_GRAY2RGB);

    IplImage img = IplImage(rgbNew);
    qDebug() << "here 6";
    picItem->setPixmap(QPixmap::fromImage(Utilities::IplImageToQImage(&img)));
}
void SkinDetector_OPENCV::detectSkin()
{
	CvAdaptiveSkinDetector SkinDetector(1, CvAdaptiveSkinDetector::MORPHING_METHOD_NONE);
	IplImage *SrcImgPtr = &SrcImage.operator IplImage();
	IplImage *DstImgPtr = &DstImage.operator IplImage();
	SkinDetector.process(SrcImgPtr, DstImgPtr);
}
/**
 * @function detectAndDisplay
 */
vector<Rect> detectFace(IplImage *frame, bool detectall)
{
   std::vector<Rect> faces, rects;
   Mat frame_gray;
   Mat frame1(frame,0);
   cvtColor( frame1, frame_gray, COLOR_BGR2GRAY );

   equalizeHist( frame_gray, frame_gray );

   //-- Detect faces
   face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0, Size(80, 80) );
   if (faces.size()>=1)
    {
      Mat faceROI = frame_gray( faces[0] );
      //-- Draw the face
      Point center( faces[0].x + faces[0].width/2, faces[0].y + faces[0].height/2 );
//      ellipse( frame1, center, Size( faces[0].width/2, faces[0].height/2), 0, 0, 360, Scalar( 255, 0, 0 ), 2, 8, 0 );
      Point face_topleft(faces[0].x+5, faces[0].y);
      Point face_bottomright(faces[0].x+faces[0].width-5, faces[0].y+faces[0].height+25);
      rectangle(frame1, face_topleft,face_bottomright, Scalar(0,255,0),3, 8, 0);
      rects.push_back(faces[0]);
      if (detectall)
      {
          std::vector<Rect> eyes;
          //-- In each face, detect eyes
          eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
          if( eyes.size() == 2)
          {
              for( size_t j = 0; j < eyes.size(); j++ )
              { //-- Draw the eyes
                  Point eye_center( faces[0].x + eyes[j].x + eyes[j].width/2, faces[0].y + eyes[j].y + eyes[j].height/2 );
                  int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
                  circle( frame1, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
                  //            rectangle(frame1,Rect(faces[0].x+eyes[j].x, faces[0].y+eyes[j].y,eyes[j].width,eyes[j].height),Scalar(255,0,255),3,8,0);
                  rects.push_back(eyes[j]);
              }

          }
          std::vector<Rect> mouths;
          mouth_cascade.detectMultiScale(faceROI, mouths, 1.1, 2, 0, Size(30, 30));
          if (mouths.size()>=1)
          {
              rectangle(frame1, Rect(faces[0].x+mouths[0].x, faces[0].y+mouths[0].y,mouths[0].width,mouths[0].height),Scalar(0,255,255),3,8,0);
              rects.push_back(mouths[0]);
          }

          std::vector<Rect> noses;
          nose_cascade.detectMultiScale(faceROI, noses, 1.1, 2, 0, Size(30, 30));
          if (noses.size()==1)
          {
              Point nose_center( faces[0].x + noses[0].x + noses[0].width/2, faces[0].y + noses[0].y + noses[0].height/2 );
              int radius = cvRound( (noses[0].width + noses[0].height)*0.15 );
              circle( frame1, nose_center, radius, Scalar( 255, 0, 255), 3, 8, 0 );
              //          rectangle(frame1, Rect(faces[0].x+noses[0].x, faces[0].y+noses[0].y,noses[0].width,noses[0].height),Scalar(0,255,255),3,8,0);
          }
      }
    }
   frame = &IplImage(frame1);
   return rects;
}
Mat initModify::histogramEqualization(Mat & sourceImage) {
	// change the Mat to IplImage, which can make whole process quicker.
	IplImage * src;
	src = &IplImage(sourceImage);
	IplImage * imgChannel[4] = { 0, 0, 0, 0 };
	IplImage * dist = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);

	if (src) {
		for (int i = 0; i < src->nChannels; i++) {
			imgChannel[i] = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		}

		// split all the channels (R, G, B, A)
		cvSplit(src, imgChannel[0], imgChannel[1], imgChannel[2], imgChannel[3]);
		for (int i = 0; i < dist->nChannels; i++) {
			cvEqualizeHist(imgChannel[i], imgChannel[i]);
		}
		// merge all the channels
		cvMerge(imgChannel[0], imgChannel[1], imgChannel[2], imgChannel[3], dist);
		Mat resultImage = cvarrToMat(dist, true);
		cvReleaseImage(&dist);
		return resultImage;
	}
	else {
		return Mat();
	}
}
Exemple #5
0
void MainWindow::on_pushButtonGetPicture_clicked()
{
    latestMatImage = cameraHandler->requestImage();
    IplImage img = IplImage(latestMatImage);
 //   QImage im("Picture 7.jpg");

    picItem->setPixmap(QPixmap::fromImage(Utilities::IplImageToQImage(&img)));
}
Exemple #6
0
bool SCSM::annotate() {
	cvNamedWindow("image", 1);
	cvShowImage("image", &IplImage(img));
	cvSetMouseCallback("image", on_mouse, 0);
	cvWaitKey(0);
    cvDestroyWindow("image");
	return 0;
}
Exemple #7
0
IplImage* CvCapture_Android::OutputMap::getIplImagePtr()
{
    if( mat.empty() )
        return 0;

    iplHeader = IplImage(mat);
    return &iplHeader;
}
float DLBPClassification(const Mat& imgPatch, LBPDetector& classifier)
{
	Mat grayPatch;

	grayPatch = imgPatch;

	if (imgPatch.channels() == 3)
		cv::cvtColor(imgPatch, grayPatch, CV_BGR2GRAY);

	Mat rsz;
	cv::resize(grayPatch, rsz, Size(sg_tw+2, sg_th+2));
	BlockDenseLBP dlbp((&grayPatch.operator IplImage()), sg_tw, sg_th, sg_bw, sg_bh, sg_sw, sg_sh, 0, 0);
	dlbp.ResetBuffer(&grayPatch.operator IplImage());

	const CvRect rect = cvRect(1, 1, sg_tw, sg_th);
	dlbp.ExtractDenseLBP(&grayPatch.operator IplImage(), rect);

	float score = classifier.CalDetectionScore(dlbp.GetDenseLBPFeature()) - classifier.GetDetectionThreshold();
	return score;
}
Mat CodeBookBackGround::process(Mat inputMat, Mat &foreMat)
{
	Mat temp = inputMat.clone();
	IplImage * tempRGB  = &IplImage(temp);//
	IplImage * rawImage = cvCreateImage(cvGetSize(tempRGB), 8, 3);
	cvCvtColor(tempRGB, rawImage, CV_RGBA2BGR);
	if (nframes == 1 && !inputMat.empty())
	{

		yuvImage = cvCloneImage(rawImage);
		//对于mat到IplImage指针的转换,不能直接将mat的地址传给IplImage*;要用函数先转IplImage
		ImaskCodeBook = cvCreateImage(cvGetSize(yuvImage), IPL_DEPTH_8U, 1);
		ImaskCodeBookCC = cvCreateImage(cvGetSize(yuvImage), IPL_DEPTH_8U, 1);
		cvSet(ImaskCodeBook, cvScalar(0));

		//cvNamedWindow("Raw", 1);
	//	cvNamedWindow("ForegroundCodeBook", 1);
	//	cvNamedWindow("CodeBook_ConnectComp", 1);
	}
	else{


		cvCvtColor(rawImage, yuvImage, CV_BGR2YCrCb);//YUV For codebook method
		//This is where we build our background model
		if ( nframes - 1 < nframesToLearnBG)//这nframesToLearnBG帧之前都是跟新
			cvBGCodeBookUpdate(model, yuvImage);

		if (nframes - 1 == nframesToLearnBG)
			cvBGCodeBookClearStale(model, model->t / 2);//根据时间跟新

		//Find the foreground if any
		if (nframes - 1 >= nframesToLearnBG)
		{
			// Find foreground by codebook method
			cvBGCodeBookDiff(model, yuvImage, ImaskCodeBook);//判断是否在码本范围内

			// This part just to visualize bounding boxes and centers if desired
			cvCopy(ImaskCodeBook, ImaskCodeBookCC);
			cvSegmentFGMask(ImaskCodeBookCC);//对前景做连通域分割
		}
		//Display
	//	cvShowImage("Raw", rawImage);
	//	cvShowImage("ForegroundCodeBook", ImaskCodeBook);
	//	cvShowImage("CodeBook_ConnectComp", ImaskCodeBookCC);

	}

	foreMat = ImaskCodeBook;
	++nframes;

//	cout << "coodebook  第" << nframes << "帧   " << endl;

	return foreMat;
}
Exemple #10
0
	//图像增强函数ESSR,基于SSR
	void EnhanceSsr::ESSR(Mat& image, Mat& result)
	{
		IplImage* frog;
		frog = &IplImage(image);
		IplImage* frog1 = cvCreateImage(cvGetSize(frog), IPL_DEPTH_32F, frog->nChannels);
		cvConvertScale(frog, frog1, 1.0 / 255, 0);

		SSR(frog, 30, 2);
		Mat(frog, true);
		result = frog;
	}
Exemple #11
0
//get from opensift
//get from opensift
void FlannMatcher::drawInliers(cv::Mat& image1,cv::Mat& image2,
                                    vector<cv::KeyPoint>& keypoints1,
                                    vector<cv::KeyPoint>& keypoints2,
                                    vector<cv::DMatch>& matches,
                                    vector<cv::DMatch>& bestMatches)
{
    IplImage* stacked_img=NULL;
    IplImage test1=IplImage(image1);
    IplImage test2=IplImage(image2);
    IplImage* tmp_img1=&test1;
    IplImage* tmp_img2=&test2;
    
    stacked_img=stack_imgs(tmp_img1,tmp_img2);
    //change c to mat
    cv::Mat mat_img(stacked_img,true); //deep copy
    
    for(unsigned int i=0;i<matches.size();++i)
    {
       cv::Point2f point1;
       cv::Point2f point2;
       
       point1=keypoints1[matches[i].queryIdx].pt;
       point2.x=keypoints2[matches[i].trainIdx].pt.x;
       point2.y=keypoints2[matches[i].trainIdx].pt.y+image1.rows;
       cv::line(mat_img,point1,point2,CV_RGB(255,0,0), 1, 8, 0);
    }
    
     for(unsigned int i=0;i<bestMatches.size();++i)
    {
       cv::Point2f point1;
       cv::Point2f point2;
       
       point1=keypoints1[bestMatches[i].queryIdx].pt;
       point2.x=keypoints2[bestMatches[i].trainIdx].pt.x;
       point2.y=keypoints2[bestMatches[i].trainIdx].pt.y+image1.rows;
       cv::line(mat_img,point1,point2,CV_RGB(0,255,0), 1, 8, 0);
    }
    
    cv::imshow("ransac inliers",mat_img);
}                                    
void BlobExtractor_Processor::onNewImage() {
	LOG(LTRACE) << "BlobExtractor_Processor::onNewImage() called!\n";

	Common::Timer timer;
	timer.restart();

	cv::Mat in = in_img.read();
	in.convertTo(img_uchar, CV_8UC1);
	IplImage ipl_img = IplImage(img_uchar);
//  cv::Mat mat_img = img_uchar;
//	cv::Mat out = cv::Mat::zeros(in.size(), CV_8UC3);

	Types::Blobs::Blob_vector res;
	bool success;

	try
	{
		success = ComponentLabeling( &ipl_img, NULL, props.bkg_color, res );
	}
	catch(...)
	{
		success = false;
		LOG(LWARNING) << "blob find error\n";
	}

		try {
		if( !success ) {
			LOG(LERROR) << "Blob find error\n";
		} else {
			LOG(LTRACE) << "blobs found";
			Types::Blobs::BlobResult result(res);

			result.Filter( result, B_EXCLUDE, Types::Blobs::BlobGetArea(), B_LESS, min_size );

			out_blobs.write(result);
			LOG(LTRACE) << "blobs written";
			newBlobs->raise();
			LOG(LTRACE) << "blobs sent";
		//	result.draw(out, CV_RGB(255, 0, 0), 0, 0);
		//	out_img.write(in);
		//	newImage->raise();
		}

		LOG(LINFO) << "Blobing took " << timer.elapsed() << " seconds\n";
	}
	catch(...)
	{
		LOG(LERROR) << "BlobExtractor onNewImage failure";
	}
}
void CHandGestureRecognitionSystemDlg::PlayImage(Mat& image, int ID, CRect& rect)
{
    // get HDC
    CDC* pDC = GetDlgItem(ID)->GetDC(); // 获得显示控件的 DC
    HDC hDC = pDC ->GetSafeHdc();       // 获取HDC(设备句柄)进行绘图操作
    // MAT TO IplImage
    IplImage img = IplImage(image);
    // IplImage TO CvvImage
    CvvImage cimg;
    cimg.CopyOf(&img);                  // 复制图片
    cimg.DrawToHDC(hDC, &rect);         // 将图片绘制到显示控件的指定区域
    // release CDC
    ReleaseDC(pDC);
}
bool SparseRec2View::detect()
{
	double tt = (double)getTickCount();
	detector->detect(igrey1, key1);
	detector->detect(igrey2, key2);
	TagI("key number for image 1 = %d\n",(int)key1.size());
	TagI("key number for image 2 = %d\n",(int)key2.size());
	tt = (double)getTickCount() - tt;
	TagI("detect time = %lf ms\n", tt/getTickFrequency()*1000.0);

	//draw
	for(int i=0; i<(int)key1.size(); ++i) {
		int radius = cvRound(key1[i].size*1.2/9.*2);
		circle(img1, key1[i].pt, 2, CV_BLUE, 1);
		circle(img1, key1[i].pt, radius, CV_RED, 1);
	}
	for(int i=0; i<(int)key2.size(); ++i) {
		int radius = cvRound(key2[i].size*1.2/9.*2);
		circle(img2, key2[i].pt, 2, CV_BLUE, 1);
		circle(img2, key2[i].pt, radius, CV_RED, 1);
	}

	//combined
	int cw = img1.cols + img2.cols;
	int ch = std::max(img1.rows, img2.rows);
	combined.create(ch, cw, img1.type());

	IplImage imgl = IplImage(combined);
	cvSetImageROI(&imgl, cvRect(0,0,img1.cols,img1.rows));
	cvCopy(&IplImage(img1), &imgl);

	IplImage imgr = IplImage(combined);
	cvSetImageROI(&imgr, cvRect(img1.cols-1,0,img2.cols,img2.rows));
	cvCopy(&IplImage(img2), &imgr);

	return true;
}
void operation(framedeal &Frame)
{
	Mat *frame = NULL;
	Frame.smoke.frame  = Frame.variable.frame.clone() ; 
	frame = &(Frame.smoke.frame);
	resize(*frame, *frame, Size(960, 540));
	try//系统异常情况的处理
	{
		if(!frame->empty()) 
		{
		//	Frame.smoke.alarmCap = Mat(frame->rows, frame->cols, CV_8UC3);//三通道  z++
			frame->copyTo(Frame.smoke.alarmCap);                                       //z++

			//烟雾检测
			if(Frame.smoke.periodDetectable && Frame.smoke.detectable)
			{
				vector<Rect> smokeRegion = detectSmoke(frame,Frame);//监测烟雾区域
				
				if(smokeRegion.size() > 0) 
				{
					Frame.smoke.alarmIs = true;

					for(unsigned int i = 0;i < smokeRegion.size();i++) 
					{
					//	rectangle(smokeAlarmCap, smokeRegion[i], Scalar(0, 0, 255),2, 8, 0);//red
						rectangle(Frame.smoke.alarmCap, smokeRegion[i], Scalar(0, 0, 255), 2, 8, 0);
					}
				}
			}
			else
			{
				Frame.smoke.alarmIs = false;
			}
			//所有监测处理后的处理
			IplImage iplImage = Frame.smoke.alarmCap.operator IplImage();
			//imshow("SmokeDetector", Frame.smoke.alarmCap);
		//	Frame.smoke.alarmCap.release();
		}

	}
	catch(Exception e) 
	{

	}

}
Exemple #16
0
int main()
{
#if 1
	cv::Mat dst;
	
	cv::VideoCapture cap(0);
	cv::Mat frame;
	cv::waitKey(1000);
	
	//cv::namedWindow("removeLightEffect",CV_WINDOW_NORMAL | CV_WINDOW_KEEPRATIO );
	cv::Mat_<cv::Vec3d> src;
	
	if(cap.isOpened())
	{
		int push_key=0;
		cap>>frame;
		double* w=new double[3];
		w[0]=0.3333;w[1]=0.3333;w[2]=0.3333;
		double * sigma=new double[3];
		sigma[0]=15;sigma[1]=60;;sigma[2]=120;
		/*cv::Mat_<cv::Vec3b> result;
		
		result.create(cv::Size(frame.cols,frame.rows));
		dst.create(cv::Size(frame.cols,frame.rows),CV_64FC3);
		src.create(cv::Size(frame.cols,frame.rows));*/
		cv::imshow("src",frame);
		
		while(cap.read(frame))
		{
			if(frame.empty())
				continue;
			if(push_key==27)
				break;
			IplImage* img_=&IplImage(frame);
			//Retinex(img_,15);
			MultiScaleRetinexCR(img_,3,w,sigma);
			cv::Mat dst(img_);

			
			cv::imshow("Retinx",dst);
			push_key=cv::waitKey(30);
			
		}

	}
Exemple #17
0
void CImageProcess::SIFTInstance(IplImage *pImage, const char *pName)
{
	cv::Mat src(pImage, 0);
	Vector<Keypoint> features;
	
	CTeatures sift;
	sift.Sift(src, features, 1.6);
	sift.DrawKeyPoints(src, features);
	sift.DrawSiftFeatures(src, features);

	char des[256] = {0};
	sprintf_s(des, sizeof(des), ".\\%s_descriptor.txt", pName);
	sift.write_features(features, des);

	IplImage *pImg = nullptr;
	pImg = &IplImage(src);
	

	/*cvNamedWindow(pName, CV_WINDOW_AUTOSIZE);
	cvShowImage(pName,pImg);*/
	//imshow(pName, src);
}
bool MS_Barcode_Decide::onStep()
{
	blobs_ready = hue_ready = false;

	try {
		int i;
		IplImage h = IplImage(hue_img);
		Types::Blobs::Blob *currentBlob;
		Types::DrawableContainer signs;

		// iterate through all found blobs
		for (i = 0; i < blobs.GetNumBlobs(); i++ )
		{
			currentBlob = blobs.GetBlob(i);

			// get mean color from area coverd by blob (from hue component)
			double me = currentBlob->Mean(&h);
			double st = currentBlob->StdDev(&h);

			// get blob bounding rectangle and ellipse
			//CvBox2D be = currentBlob->GetEllipse();
			cv::Rect bb = currentBlob->GetBoundingBox();

			signs.add(new Types::Rectangle(bb.x, bb.y, bb.width, bb.height));

		}

		out_signs.write(signs);

		newImage->raise();

		return true;
	} catch (...) {
		LOG(LERROR) << "MS_Sign_Decide::onNewImage failed\n";
		return false;
	}
}
Exemple #19
0
bool ExtractBlocks_Processor::onStep()
{
	LOG(LTRACE) << "ExtractBlocks_Processor::step\n";
  
	blobs_ready = hue_ready = false;

	try {
		int id = 0;
		int i;
		IplImage h = IplImage(hue_img);
		Types::Blobs::Blob *currentBlob;
		Types::DrawableContainer blocks;

		// iterate through all found blobs
		for (i = 0; i < blobs.GetNumBlobs(); i++ )
		{
			currentBlob = blobs.GetBlob(i);

			// get blob bounding rectangle
			CvRect r2 = currentBlob->GetBoundingBox();

			++id;

			blocks.add(new Types::Rectangle(r2.x, r2.y, r2.width, r2.height));

			out_blocks.write(blocks);

			newImage->raise();
		}

		return true;
	}   catch (...) {
		LOG(LERROR) << "ExtractBlocks_Processor::onNewImage failed\n";
		return false;
  }
}
void ComputeDLBP(const Mat& imagePatch, vector<float>& hist)
{
	float epsilon = 1e-2;
	// Whether has already been verified
	
	Mat rszI, grayI;
	if (!imagePatch.data) return;

	Mat grayPatch;
	if (imagePatch.channels() == 3) {
		cv::cvtColor(imagePatch, grayPatch, CV_BGR2GRAY);
	} else {
		grayPatch = imagePatch;
	}

	resize(grayPatch, rszI, Size(sg_tw+2, sg_th+2));
	IplImage patch = rszI.operator IplImage();	
	BlockDenseLBP dlbp((&patch), 32, 32, 32, 32, 16, 16, 0, 0);
	dlbp.ResetBuffer((&patch));
	const CvRect rect = cvRect(1, 1, sg_tw, sg_th);
	dlbp.ExtractDenseLBP(&patch, rect);

	hist = std::vector<float>(dlbp.GetDenseLBPFeature(), dlbp.GetDenseLBPFeature() + dlbp.GetDimension());	
}
Exemple #21
0
DWORD WINAPI video_test(void* data) {
	MyFrame* f = (MyFrame*)data;
	wxClientDC dc(f->left_bottom);
	cap.open(0);
	cv::Mat img;
	cap >> img;

	Sleep(2000);
	initFrame->Hide();
	// 开始显示
	for (int i = 0; i < 50; i++) {
		f->SetTransparent(i + 206);
		Sleep(i / 10);
	}

	while (cap>>img,!img.empty()) {
		IplImage image = img.operator IplImage();
		wxImage wximg = wx_from_cv(&image);
		wximg.Rescale(320, 240);
		dc.DrawBitmap(wxBitmap(wximg), wxDefaultPosition);
	}
	
	return 0;
}
//imagenes de color
//acuerdate de cambiar la cantidad de caracteristicas a 12
void makeFrgb(const Mat& img, vector<Mat>& F, config_SystemParameter *param){
	CV_Assert(F.size() == param->numFeature);
	CV_Assert(img.channels() == 3);
	//por el momento, deberia ser borrado este assert
	CV_Assert(param->numFeature == 12);
	
	Mat greyImg,greyAux;
	cvtColor(img, greyImg, CV_RGB2GRAY);
	equalizeHist(greyImg, greyAux );
	greyAux.convertTo(greyImg, CV_32F);
	//greyAux.release();
	
	
	vector<Mat> Mrgb;
	split(img, Mrgb);
	
	//INICIALIZANDO VECTOR CON VALORES FLOATS
	for (int i=0; i<param->numFeature; i++) {
		F[i]  = Mat(Mrgb[2].rows,Mrgb[2].cols,CV_32F);
	}
	
	//CREANDO MATRICES X E Y
	for(int i = 0; i < F[0].rows; i++)
	{
		float* MatF0i = F[0].ptr<float>(i);
		float* MatF1i = F[1].ptr<float>(i);
		
		for(int j = 0; j < F[0].cols; j++){
			MatF0i[j] = (float)j;
			MatF1i[j] = (float)i;
		}
	}
	
	//MATRICES RGB PERO CON FORMATO FLOAT
	
	greyAux.release();
	Mrgb[0].convertTo(F[2], CV_32F);
	equalizeHist(Mrgb[1], greyAux );
	greyAux.convertTo(F[3], CV_32F);
	greyAux.release();
	equalizeHist(Mrgb[2], greyAux );
	greyAux.convertTo(F[4], CV_32F);
	
	
	//PRIMERA DERIVADA
	cv::Ptr<FilterEngine> Fc = createDerivFilter(F[2].type(), CV_32F, 1, 0, 3);
	Fc->apply(greyImg,F[5], cv::Rect(0,0,-1,-1),cv::Point(0,0));
	F[5] = abs(F[5]);
	
	Fc = createDerivFilter(F[2].type(), CV_32F, 0, 1, 3);
	Fc->apply(greyImg,F[6], cv::Rect(0,0,-1,-1),cv::Point(0,0));
	F[6] = abs(F[6]);
	
	//MAGNITUD DE LA DERIVADA
	magnitude(F[5], F[6], F[7]);
	
	//SEGUNDA DERIVADA
	Fc = createDerivFilter(F[2].type(), CV_32F, 2, 0, 3);
	Fc->apply(greyImg,F[8], cv::Rect(0,0,-1,-1),cv::Point(0,0));
	F[8] = abs(F[8]);
	
	Fc = createDerivFilter(F[2].type(), CV_32F, 0, 2, 3);
	Fc->apply(greyImg,F[9], cv::Rect(0,0,-1,-1),cv::Point(0,0));
	F[9] = abs(F[9]);
	
	//PHASE DE LA PRIMERA DERIVADA EN X E Y
	phase(F[5], F[6], F[10]);
	
	//SILENCY
	
	IplImage srcImg, *dstImg;
	
	srcImg = IplImage(greyAux);
	dstImg = cvCreateImage(cvSize(srcImg.width, srcImg.height), 8, 1);
	Saliency *saliency = new Saliency;
	
	saliency->calcIntensityChannel(&srcImg, dstImg);
	
	F[11] = Mat(dstImg);
	Mat aux(Mrgb[2].rows,Mrgb[2].cols,CV_32F);
	F[11].convertTo(aux,F[9].type());
	F[11] = aux;
	
	greyAux.release();
	greyImg.release();
	delete saliency;
	
}
int main(int argc, char **argv) {

    ros::init(argc, argv, APP_NAME);
    ros::NodeHandle nh;

    cv::namedWindow(APP_NAME);
    cv::startWindowThread();

    image_transport::ImageTransport it(nh);
    image_transport::Subscriber sub = it.subscribe("/video", 1, imageCallback);

    ros::ServiceServer addColor_service = nh.advertiseService("addColor", addColor);
    ros::ServiceServer addPercept_service = nh.advertiseService("addPercept", addPercept);
    ros::Publisher percepts_pub = nh.advertise<obj_rec::Percepts>("/percepts", 100);


    // Color table management
    colorTable.setColorClassSpecifics( 0, cv::Scalar(0,0,0), false);
    for( int i = 1; i < 7; i++) {
      colorTable.setColorClassSpecifics( i, getColorFromId(i), true);
    }
    colorTable.setColorClassSpecifics( 7, cv::Scalar(255,255,255), false);


    // The main recognition loop
    ros::Rate loop_rate(100);
    
    while (ros::ok()) {
        if( 0 == frame.empty()) {
            boost::mutex::scoped_lock lock(data_locker);
            cv::Mat image, seg;
            frame.copyTo(image);
            frame.copyTo(seg);
            // cv::rectangle(seg,cv::Point(0,0), cv::Point(seg.cols, seg.rows), cv::Scalar(0,0,0), -1);

            IplImage _lastImage = IplImage(image);
            IplImage * lastImage = &_lastImage;

            IplImage _segImage = IplImage(seg);
            IplImage * segImage = &_segImage;

            colorTable.segment(lastImage, segImage, SEG_X, SEG_Y);	// segment using the current CT

            // Recognition
            std::list < Blob > blobs;
            blobs = Blob::extractBlobs( lastImage, colorTable, MERGING_DISTANCE_X, MERGING_DISTANCE_Y);

            Blob::drawBlobs( colorTable, blobs, segImage, MIN_AREA);

            objs = Object::extractObjects( blobs, MIN_AREA, MERGING_DISTANCE_X *2, MERGING_DISTANCE_Y *2);

            recognized.clear();
            recognized = Object::recognizeObjects( objs, objects_memory);

            obj_rec::Percepts percepts_msg;

            RecognizedObjects::iterator feat_it;
            for( feat_it = recognized.begin(); feat_it != recognized.end(); feat_it++) {
                CvScalar color = getColorFromId(feat_it->first);

                Object::drawObjects( feat_it->second, segImage, color);

                for( Objects::iterator obj_it = feat_it->second.begin();
                     obj_it != feat_it->second.end();
                     obj_it++) {
                    // compose msg
                    obj_rec::Percept percept_msg;

                    percept_msg.id = feat_it->first;
                    percept_msg.u = obj_it->getAvg().x;
                    percept_msg.v = obj_it->getAvg().y;
                    percept_msg.width = obj_it->getBottomRight().x - obj_it->getTopLeft().x;
                    percept_msg.height = obj_it->getBottomRight().y - obj_it->getTopLeft().y;
                    percept_msg.area = obj_it->getArea();

                    percepts_msg.percepts.push_back(percept_msg);
                }
            }

            percepts_msg.header = header;
            percepts_pub.publish(percepts_msg);

            cv::imshow(APP_NAME, seg);

        }

        ros::spinOnce();
        loop_rate.sleep();
    }
    
    cv::destroyWindow(APP_NAME);
}
void ColorGuiFrame::DrawImage(const sensor_msgs::ImageConstPtr& msg)
{
  IplImage cvImageRef, *cvImage;
	CvSize size;

	const sensor_msgs::Image img = *msg;

	// Get the image as and RGB image
        cv_bridge::CvImagePtr image_ptr = cv_bridge::toCvCopy(msg, "rgb8");
        cvImageRef = IplImage(image_ptr->image);
        cvImage = &cvImageRef;

	size = cvGetSize(cvImage);

  if (width_ != size.width || height_ != size.height)
  {
    if (!rgb_image_)
      delete[] rgb_image_;
    rgb_image_ = new unsigned char[size.width * size.height * 3];

    if (!uyvy_image_)
      delete[] uyvy_image_;
    uyvy_image_ = new unsigned char[size.width * size.height * 2];

    if (!(vision_->initialize(size.width, size.height)))
    {
      width_ = height_ = 0;
      ROS_ERROR("Vision init failed.");
      return;
    }
  }

  width_ = size.width;
  height_ = size.height;

  memcpy(rgb_image_, cvImage->imageData, width_ * height_ * 3);

  // Convert image to YUV color space
  rgb2uyvy(rgb_image_, uyvy_image_, width_ * height_);

	// Find the color blobs
	if (!vision_->processFrame(reinterpret_cast<image_pixel*> (uyvy_image_)))
	{
		ROS_ERROR("Frame error.");
		return;
	}

  int xsrc = (scale_pos_x_*scale_) - scale_pos_x_;
  int ysrc = (scale_pos_y_*scale_) - scale_pos_y_;

  wxImage image(width_, height_, rgb_image_, true);
  image.Rescale(width_*scale_,height_*scale_);

  wxBitmap bitmap(image);

  wxMemoryDC memDC;
  memDC.SelectObject(bitmap);

  wxClientDC dc(image_panel_);
  if (xsrc < 0 || ysrc < 0)
    dc.Clear();

  dc.Blit(0,0, 640, 480, &memDC, xsrc, ysrc);

	// Get all the blobs
	for (int ch = 0; ch < CMV_MAX_COLORS; ++ch)
	{
		// Get the descriptive color
		rgb c = vision_->getColorVisual(ch);

		// Grab the regions for this color
		CMVision::region* r = NULL;

		for (r = vision_->getRegions(ch); r != NULL; r = r->next)
		{
      dc.SetBrush(*wxTRANSPARENT_BRUSH);
      int x1 = (r->x1*scale_) - xsrc;
      int y1 = (r->y1*scale_) - ysrc;
      int x2 = (r->x2*scale_) - xsrc;
      int y2 = (r->y2*scale_) - ysrc;

      int w = x2 - x1;
      int h = y2 - y1;

      dc.DrawRectangle(x1, y1, w, h);
		}
	}

  int x, y;
  GetPosition(&x, &y);
// Setting size is commented out because it breaks the gui:
// Text boxes become invisible.
//  SetSize(x,y, width_, height_+80);
}
Exemple #25
0
int main (int argc, char **argv)
{

    
    char* ntry = (char*)"";
    if (argc > 1) {
        ntry = argv[1];
    }

    double fps = FPS;
    double target_dur = 1.0/fps;
    double tol = 1.0e-3;
    double total_dur = 0.0;

    dc1394_t * d = dc1394_new(); 
    if (!d) {
        return 1;
    }
    dc1394camera_list_t * list;
    dc1394error_t err = dc1394_camera_enumerate (d, &list);
    DC1394_ERR_RTN(err,"Failed to enumerate cameras");
    if (list->num == 0) {                                                  /* Verify that we have at least one camera */
        dc1394_log_error("No cameras found");
        return 1;
    }

    gCamera.init(d, list->ids[0].guid);
    if (!gCamera.cam()) {
        dc1394_log_error("Failed to initialize camera with guid %ld", list->ids[0].guid);
        dc1394_camera_free_list (list);

        return 1;
    }
    dc1394_camera_free_list (list);

    /*-----------------------------------------------------------------------
     *  have the camera start sending us data
     *-----------------------------------------------------------------------*/
    err = gCamera.start_transmission();
    DC1394_ERR_CLN_RTN(err,cleanup_and_exit(gCamera),"Could not start camera iso transmission");

    
    /*-----------------------------------------------------------------------
     *  capture one frame
     *-----------------------------------------------------------------------*/
    uint32_t width = 0;
    uint32_t height = 0;
    gCamera.get_image_size(&width, &height);
    cv::Mat mapping = cv::getRotationMatrix2D(cv::Point2f(width/2.0, height/2.0), 180.0, 1.0);

#ifdef USE_SDL
    static char *var = (char*)"SDL_VIDEO_WINDOW_POS=\"1280,480\"";
    int ret = putenv(var);
    
    if (SDL_Init(SDL_INIT_VIDEO) != 0) {
        std::cerr << "DC1394: Unable to initialize SDL: " <<  SDL_GetError() << std::endl;
        return 1;
    }
    atexit(SDL_Quit);
    SDL_Surface *screen;
    screen = SDL_SetVideoMode(width, height, 24, SDL_HWSURFACE);
    if (screen == NULL) {
        std::cerr << "DC1394: Unable to set SDL video mode:" << SDL_GetError() << std::endl;
    }
    SDL_Event event;
#endif

#ifndef LICKOMETER    
    pthread_t save_thread, acq_thread;
    pthread_create( &save_thread, NULL, &thread_save_image, NULL);
#endif

    pthread_t save_thread, acq_thread;
    pthread_create( &acq_thread, NULL, &thread_acq_image, NULL);

    timespec t_sleep, t_rem;
    t_sleep.tv_sec = 0;
    t_sleep.tv_nsec = 1000;
    
#ifndef STANDALONE
    int s;
    if ((s = socket(SOCKTYPE, SOCK_STREAM, 0)) < 0) {
        perror("DC1394: client: socket");
        cleanup_and_exit(gCamera);
        return 1;
    }

    /*
     * Create the address we will be connecting to.
     */
#ifndef INET
    sockaddr_un sa;
    sa.sun_family = AF_UNIX;

    std::ostringstream tmpfn;
    tmpfn << "fwsocket" << ntry;
    std::cout << "DC1394: socket name " << tmpfn.str() << std::endl;
    
    int nameLen = strlen(tmpfn.str().c_str());
    if (nameLen >= (int) sizeof(sa.sun_path) -1) { /* too long? */
        cleanup_and_exit(gCamera);
        return 1;
    }
    
    sa.sun_path[0] = '\0';  /* abstract namespace */
    strcpy(sa.sun_path+1, tmpfn.str().c_str());
    int len = 1 + nameLen + offsetof(struct sockaddr_un, sun_path);
#else
    sockaddr_in sa;
    bzero((char *) &sa, sizeof(sa));
    sa.sin_family = AF_INET;
    hostent *server = gethostbyname("128.40.156.129");
    bcopy((char *)server->h_addr, 
          (char *)&sa.sin_addr.s_addr,
          server->h_length);
    sa.sin_port = htons(35000);
    int len = sizeof(sa);
#endif    
    /*
     * Try to connect to the address.  For this to
     * succeed, the server must already have bound
     * this address, and must have issued a listen()
     * request.
     *
     * The third argument indicates the "length" of
     * the structure, not just the length of the
     * socket name.
     */
    std::cout << "DC1394: Waiting for connection... " << std::flush;
    while (true) {
        // wait for connection:
        if (connect(s, (sockaddr*)&sa, len) < 0) {
            nanosleep(&t_sleep, &t_rem);
        } else {
            break;
        }
    }
    std::cout << "done" << std::endl;
    bool connected = false;
    std::vector<char> data(BUFSIZE);
    int nrec = recv(s, &data[0], data.size(), 0);
    std::string datastr(data.begin(), data.end());
    if (nrec<=0) {
        std::cerr << "DC1394: Didn't receive start message; exiting now" << std::endl;
        cleanup_and_exit(gCamera);
	close(s);
        return 1;
    }
    connected = true;
    
    std::string ready = "ready";
    while (send(s, ready.c_str(), ready.size(), 0) < 0) {
        perror("DC1394: client: send");
    }

    int flags = 0;
    if (-1 == (flags = fcntl(s, F_GETFL, 0)))
        flags = 0;

    if (fcntl(s, F_SETFL, flags | O_NONBLOCK)==-1) {
        perror("DC1394: client: unblock");
    }
#endif
    
    /* pthread_mutex_lock( &camera_mutex );
       gCamera.wait_for_trigger();
       pthread_mutex_unlock( &camera_mutex );

       Wait for acq_frame_buffer to fill instead
    */
    

    int ncount = 0;
    cv::Mat im(cv::Size(width, height), CV_8UC1);
    cv::Mat thresh = cv::Mat::ones(cv::Size(width, height), CV_8UC1);
    cv::Mat prevs(cv::Size(width, height), CV_8UC1);
    cv::Mat gray(cv::Size(width, height), CV_8UC1);
    
    // wait for image:
    int nframes = get_image(im, mapping, false, -1, "", ncount);
    std::cout << "DC1394: Waiting for first image to arrive... " << std::flush;
    int nwait = 0;
    while (!nframes) {
        nanosleep(&t_sleep, &t_rem);
        std::cout << "." << std::flush;
        nframes = get_image(im, mapping, false, -1, "", ncount);
        nwait++;
#ifdef STANDALONE
	if (nwait > 1000) {
#else
	if (nwait > 100000) {
#endif
            std::cout << "Time out, stopping now\n";
            cleanup_and_exit(gCamera);
	}
    }
    timespec time0;
    clock_gettime(CLOCK_REALTIME, &time0);
    std::cout << "DC1394: image arrived: "
              << IplImage(im).depth << " bits, "
              << IplImage(im).nChannels << " channels, "
              << IplImage(im).widthStep << " step width"  << std::endl;

#ifdef USE_SDL
    SDL_Surface *surface =
        SDL_CreateRGBSurfaceFrom((void*)im.data,
                                 im.cols,
                                 im.rows,
                                 IplImage(im).depth*IplImage(im).nChannels,
                                 IplImage(im).widthStep,
                                 0xffffff, 0xffffff, 0xffffff, 0);
    screen = SDL_GetVideoSurface();
    if(SDL_BlitSurface(surface, NULL, screen, NULL) == 0)
        SDL_UpdateRect(screen, 0, 0, 0, 0);
#else
    cv::namedWindow("DC1394", CV_WINDOW_AUTOSIZE);
    cvMoveWindow("DC1394", 1280, 480);

    cv::imshow("DC1394", im);
#endif

    timespec time1 = time0;
    timespec time2 = time0;
    timespec time3 = time0;
    timespec time4 = time0;
    timespec t_disconnect = time0;
    timespec t_notrigger = time0;

#ifdef STANDALONE
    int s = -1;
#endif

    std::string fn = "";
#ifdef LICKOMETER
    std::string fn_lick = "";
    FILE* fp_lick = NULL;
#endif
    int key = 0;
    int nloop = 0;
    while (true) {
        clock_gettime( CLOCK_REALTIME, &time1);
#ifndef STANDALONE
        std::vector<char> data(BUFSIZE);
        int nrec = recv(s, &data[0], data.size(), 0);
        std::string datastr(data.begin(), data.end());
#endif

        nframes += get_image(im, mapping, false, s, fn, ncount);

#ifndef STANDALONE

        // no update from blender in a long time, terminate process
        if (datastr.find("1")==std::string::npos) {
            if (connected) {
                t_disconnect = time1;
                connected = false;
            } else {
                if (tdiff(time1, t_disconnect) > TIMEOUT) {
                    std::cout << "DC1394: Received termination signal" << std::endl;
                    close(s);
                    pthread_cancel(acq_thread);
                    pthread_cancel(save_thread);
                    return 0;
                }
            }
        } else {
            connected = true;
        }

	/* Explicit termination */
        if (datastr.find("quit")!=std::string::npos) {
            std::cout << "DC1394: Game over signal." << std::endl;
            std::string sclose = "close";
            while (send(s, sclose.c_str(), sclose.size(), 0) < 0) {
                perror("DC1394: client: send");
            }
            close(s);
            pthread_cancel(acq_thread);
            pthread_cancel(save_thread);
            return 0;
        }

        // Stop recording
        if (datastr.find("stop") != std::string::npos && fn != "") {
            fn = "";
#ifdef LICKOMETER
	    fn_lick = "";
	    if (fp_lick) {
                fclose(fp_lick);
		fp_lick = NULL;
            }
#endif
            std::cout << "DC1394: Stopping video" << std::endl;
            connected = true;
            ncount = 0;
        }

        // Start recording
        if (datastr.find("avi") != std::string::npos && datastr.find("stop") == std::string::npos && fn == "") {
            std::size_t startpos = datastr.find("begin")+5; 
            std::size_t endpos = datastr.find("end") - datastr.find("begin") - 5; 
            fn = datastr.substr(startpos, endpos);
            fn = std::string(trunk) + "data/" + fn;
#ifdef LICKOMETER
	    fn_lick = fn + "_lick";
	    fp_lick = fopen(fn_lick.c_str(), "wb");
            std::cout << "DC1394: Recording lick detection, writing to " << fn_lick << std::endl;
#else
            boost::filesystem::path path(fn);
            boost::filesystem::path writepath(path);

            // Test whether dir exists:
            if (!boost::filesystem::exists(writepath)) {
                std::cout << "DC1394: Creating directory " << writepath << std::endl;
                boost::filesystem::create_directories(writepath);
            }
            fn += "/";

            /* check save frame buffer */
            std::size_t nfb = save_frame_buffer.size();
            if (nfb)
                std::cerr << "DC1394: Frame buffer isn't empty!" << std::endl;

            std::cout << "DC1394: Starting video, writing to " << fn << std::endl;
            connected = true;
            ncount = 0;
#endif
        }
#endif // #nstandalone

#ifdef USE_SDL
        if (SDL_PollEvent(&event)) {
#ifdef STANDALONE
            /* Any of these event types will end the program */
            if (event.type == SDL_QUIT
                || event.type == SDL_KEYDOWN
                || event.type == SDL_KEYUP) {
                std::cout << std::endl;
                std::cout << std::endl << "DC1394: Total number of frames was " << nframes << std::endl;
                std::cout << std::endl << "DC1394: Frame buffer: " << acq_frame_buffer.size() << " frames left" << std::endl;
                close(s);
                pthread_cancel(acq_thread);
                pthread_cancel(save_thread);
                return 0;
            }
#endif // STANDALONE
        }
        surface->pixels = (void*)im.data;
        // SDL_CreateRGBSurfaceFrom((void*)IplImage(im).imageData,
        //                          IplImage(im).width,
        //                          IplImage(im).height,
        //                          IplImage(im).depth*IplImage(im).nChannels,
        //                          IplImage(im).widthStep,
        //                          1, 1, 1, 0);
        screen = SDL_GetVideoSurface();
        if(SDL_BlitSurface(surface, NULL, screen, NULL) == 0)
            SDL_UpdateRect(screen, 0, 0, 0, 0);
#else // not SDL
        key = cv::waitKey(2);
        cv::imshow("DC1394", im);
        if (key == 1114155 || key == 65579 || key==43 /*+*/) {
            uint32_t gain = 0;
            err = dc1394_feature_get_value(gCamera.cam(), DC1394_FEATURE_GAIN, &gain);
            DC1394_ERR_CLN_RTN(err,cleanup_and_exit(gCamera),"Can't get gain");
            if (gain < gCamera.get_maxgain()-10) {
                gain += 10;
                pthread_mutex_lock( &camera_mutex );
                err = dc1394_feature_set_value(gCamera.cam(), DC1394_FEATURE_GAIN, gain);
                pthread_mutex_unlock( &camera_mutex );
                std::cout << "DC1394: New gain value: " << gain << std::endl;
                DC1394_ERR_CLN_RTN(err,cleanup_and_exit(gCamera),"Can't set gain");
            }
        }
        if (key == 1114207 || key == 45 /*-*/) {
            uint32_t gain = 0;
            err = dc1394_feature_get_value(gCamera.cam(), DC1394_FEATURE_GAIN, &gain);
            DC1394_ERR_CLN_RTN(err,cleanup_and_exit(gCamera),"Can't get gain");
            if (gain > gCamera.get_mingain()+10) {
                gain -= 10;
                pthread_mutex_lock( &camera_mutex );
                err = dc1394_feature_set_value(gCamera.cam(), DC1394_FEATURE_GAIN, gain);
                pthread_mutex_unlock( &camera_mutex );
                DC1394_ERR_CLN_RTN(err,cleanup_and_exit(gCamera),"Can't set gain");
            }
        }
#endif // not SDL

#ifdef LICKOMETER        
	/* IS THIS ALL YOU NEED THEN?
	   Lick detection */
	/* Not required because the captured image is already gray
	   cv::Mat gray = bgr2gray(im); */
	gray = thresholding(im, LICK_FRAME_THRESHOLD);

        if (nloop != 0) {
	    cv::absdiff(prevs, gray, thresh);
	    double pixel_sum_thresh = cv::sum(thresh)[0];
	    double pixel_sum_gray = cv::sum(gray)[0];
	    if (pixel_sum_thresh > LICK_SUM_THRESHOLD) {
	      std::cout << "DC1394: Lick" << std::endl;
	    }
	    if (fp_lick != NULL) {
                fwrite(&pixel_sum_thresh, sizeof(pixel_sum_thresh), 1, fp_lick);
	        fwrite(&pixel_sum_gray, sizeof(pixel_sum_gray), 1, fp_lick);
	    }
	}

	prevs = gray.clone();
	nloop++;
#endif
#ifdef STANDALONE
        if (key == 1048689 || key == 113 /*q*/) {
            std::cout << "DC1394: Mean frame rate was " << nframes/total_dur << " fps" << std::endl;
            pthread_cancel(acq_thread);
            pthread_cancel(save_thread);
            return 0;
        }
        if (key == 1048691 /*s*/) {
            fn = "";
            std::cout << "DC1394: Stopping video" << std::endl;
            ncount = 0;
        }
        if (key == 1048690 /*r*/) {
            fn = trunk + std::string("tmp/");
            std::cout << "DC1394: Starting video, writing to " << fn << std::endl;
            ncount = 0;
        }
#endif // #standalone
        clock_gettime( CLOCK_REALTIME, &time2);
        double loop_dur = tdiff(time2, time3);
        clock_gettime( CLOCK_REALTIME, &time3);
        double meanfps = 0;

        total_dur = tdiff(time3, time0);
        if (total_dur > 0)
            meanfps = nframes / total_dur;
        double currentfps = ret / loop_dur;
        std::cout << "DC1394: Current fps: " << std::setprecision(7) << currentfps
                  << " Average fps: " << std::setprecision(7) << meanfps << "\r" << std::flush;
#ifdef STANDALONE
        // std::cout << capture_dur << "\t" << target_dur << "\t" << rem << "\t" << loop_dur << std::endl;
#endif
    }

    if (d) {
        dc1394_free(d);
    }

#ifndef STANDALONE
    close(s);
#endif
    return 0;
}
Exemple #26
0
int main(int argc, char** argv) {
	//int _X[22] = {7, 198, 37, 155, 76, 138, 126, 123, 177, 112, 229, 114, 277, 110, 363, 136, 399, 151, 433, 196, 439, 222,};
	//Mat X(11,2,CV_32SC1,_X);
	
//	int _X_bar[22] = {6, 125, 67, 130, 97, 92, 150, 99, 188, 68, 208, 89, 285, 78, 305, 105, 369, 100, 390, 137, 432, 150,};
//	//int _X_bar[22] = {38, 232, 20, 184, 28, 127, 53, 69, 92, 33, 154, 12, 200, 12, 270, 13, 318, 26, 357, 46, 386, 76,};
////	int _X_bar[22] = {5, 217, 22, 168, 68, 141, 111, 125, 158, 115, 202, 111, 235, 106, 283, 111, 366, 136, 395, 151, 432, 188,};
//	//Mat X_bar(11,2,CV_32SC1,_X_bar); Mat X_bar_32f; X_bar.convertTo(X_bar_32f,CV_32F);
	double theta = -0.01*CV_PI;
	float _RotM[4] = { cos(theta), -sin(theta), sin(theta), cos(theta) };

	//RNG rng;
	//Mat X32f; X.convertTo(X32f,CV_32F);
	//Mat n(X32f.size(),X32f.type()); rng.fill(n,RNG::UNIFORM,Scalar(-35),Scalar(35));

	//Mat X_bar_32f = (X32f);
	////X_bar_32f(Range(0,X32f.rows),Range(1,2)) -= 45;
	//X_bar_32f = X_bar_32f * Mat(2,2,CV_32FC1,_RotM) + n;

	//Mat destinations; X_bar_32f.convertTo(destinations,CV_32S);

	RNG rng;
	Mat X(10,2,CV_32SC1);
	for(int i=0;i<10;i++) {
		X.at<Point>(i,0) = Point(100+sin(((double)i/10.0)*CV_PI)*50.0,100+cos(((double)i/10.0)*CV_PI)*50.0);
	}
	Mat destinations(50,2,X.type()); 
	rng.fill(destinations,RNG::NORMAL,Scalar(150.0),Scalar(125.0,50.0));

	//Mat X_1ch = X.reshape(2,X.rows);
	//Mat X_bar_1ch = destinations.reshape(2,X_bar.rows);

	//findBestTransform(X_1ch,X_bar_1ch);

	//ICP(X,destinations);

	MyFeatureDetector detector = MyFeatureDetector(42, 65, 30);
	
	Mat img1 = imread("./data/img/0095.png", 0);

	detector.setDetectedMat(img1);
	detector.usingSTAR();

	vector<Point2f> features1;
	detector.getFeaturePoints(features1, 5);

	IplImage *object1 = &IplImage(img1);
	IplImage *object_color1 = cvCreateImage(cvGetSize(object1), 8, 3);
	//The resource matrix of this function must have a head of IplImage
	cvCvtColor(object1, object_color1, CV_GRAY2BGR);

	Mat showMat1 = Mat(object_color1);
	namedWindow("STARFeatures1", CV_WINDOW_AUTOSIZE);
	for(int i=0;i<features1.size();i++){
		circle(showMat1, features1[i], 2, Scalar(0,255,0,0), -1); 
	}
	imshow("STARFeatures1", showMat1);



	Mat img2 = imread("./data/img/0087.png", 0);

	detector.setDetectedMat(img2);
	detector.usingSTAR();

	vector<Point2f> features2;
	detector.getFeaturePoints(features2, 5);

	IplImage *object2 = &IplImage(img2);
	IplImage *object_color2 = cvCreateImage(cvGetSize(object2), 8, 3);
	//The resource matrix of this function must have a head of IplImage
	cvCvtColor(object2, object_color2, CV_GRAY2BGR);

	Mat showMat2 = Mat(object_color2);
	namedWindow("STARFeatures2", CV_WINDOW_AUTOSIZE);
	for(int i=0;i<features2.size();i++){
		circle(showMat2, features2[i], 2, Scalar(0,255,0,0), -1); 
	}
	imshow("STARFeatures2", showMat2);

	Mat X1 = Mat(features1);
	Mat X2 = Mat(features2);

	X1 = X1.reshape(1);
	X2 = X2.reshape(1);

	ICP(X1,X2);

	cvWaitKey(0);

	char a;
	cin>>a;
	return 0;
}
void Objectness::illuTestReults(const vector<vector<Vec4i>> &boxesTests)
{
	/*CStr resDir = _voc.localDir + "ResIlu/";
	CmFile::MkDir(resDir);*/

	const int TEST_NUM = _voc.testSet.size();

	//#define SAVE_RESULT
	//libModel = load_model("modelFile/libModel.txt");
	mySVM.load("modelFile/G_sign_svm_model3.xml");

	/*============= PCA parameter prepare ================*/
	int filterNum = 8;
	vector<int> NumFilters;
	NumFilters.push_back(filterNum);
	NumFilters.push_back(filterNum);
	vector<int> blockSize;
	blockSize.push_back(7);
	blockSize.push_back(7);

	PCANet pcaNet = {
		2,
		7,
		NumFilters,
		blockSize,
		0.5
	};

	PCA_Train_Result* result = new PCA_Train_Result;
	FileStorage fs;
	fs.open("modelFile/G_sign_PCA_model3.xml", FileStorage::READ);

	if (!fs.isOpened())
	{
		std::cout << "cannot open model file\n";
	}
	Mat m, n;
	fs["filter1"] >> m;
	result->Filters.push_back(m);
	fs["filter2"] >> n;
	result->Filters.push_back(n);

	/*VideoWriter videoWriter;
	videoWriter.open("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), 30, Size(800, 600), true);
	if (!videoWriter.isOpened()) cout << "cannot create video" << endl;*/


	// get testing samples
	for (int i = 0; i < TEST_NUM; i++){
		const vector<Vec4i> &boxes = boxesTests[i];
		const vector<Vec4i> &boxesGT = boxesTests[i]; //_voc.gtTestBoxes[i];
		
		const int gtNumCrnt = 50; //boxesGT.size(); 
		CStr imgPath = format(_S(_voc.imgPathW), _S(_voc.testSet[i]));
		//CStr resNameNE = CmFile::GetNameNE(imgPath);
		//Mat img = imread("C:/Users/TerryChen/Desktop/M.jpg"); 
		Mat img = imread(imgPath);
//		Mat bboxMatchImg = Mat::zeros(img.size(), CV_32F);
//
//		vecD score(gtNumCrnt);
//		vector<Vec4i> bboxMatch(gtNumCrnt);
//
//# pragma omp parallel for 
//		for (int j = 0; j < boxes.size(); j++){
//			const Vec4i &bb = boxes[j];
//			for (int k = 0; k < gtNumCrnt; k++)	{
//				double mVal = DataSetVOC::interUnio(boxes[j], boxesGT[k]);
//				if (mVal < score[k])
//					continue;
//				score[k] = mVal;
//				bboxMatch[k] = boxes[j];
//			}
//		}

		//for (int k = 0; k < 40; k++){
		//	const Vec4i &bb = bboxMatch[k];
		//	//rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(0), 3);
		//	rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(255, 255, 255), 2);
		//	//rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(0, 0, 255), 1);
		//}
		
		/*============ get patches ============*/
		/*vector< vector < float> > v_descriptorsValues;
		vector<Mat> Hogfeats;*/
		Rect box;
		vector<Rect> bboxes;
		vector<Mat> testImg;
		//vector<string>	imgnames;

		IplImage* img2;
		IplImage* change;
		cv::Mat* bmtx;
		Mat im_gray;

		for (int k = 0; k < gtNumCrnt; k++){
			const Vec4i &bb = boxes[k];
			box.x = bb[0];
			box.y = bb[1];
			box.width = bb[2] - bb[0];
			box.height = bb[3] - bb[1];
			cv::Mat croppedImage;
			img(box).copyTo(croppedImage);
			bboxes.push_back(box);

			// save patches
			/*CStr patchDir = "patch/";
			CmFile::MkDir(patchDir);
			char* patch_name = new char[30];
			sprintf(patch_name, "%s%d%s", "patch/", k+1,".jpg");
			imwrite(patch_name, croppedImage);
			imgnames.push_back(patch_name);*/

			/*============== prepare PCANet =============== */
			
			cvtColor(croppedImage, im_gray, CV_RGB2GRAY);
			img2 = &IplImage(im_gray);
			change = cvCreateImage(cvGetSize(img2), IPL_DEPTH_64F, img2->nChannels);
			cvConvertScale(img2, change, 1.0 / 255, 0);
			bmtx = new cv::Mat(change);
			resize(*bmtx, *bmtx, Size(28, 28));
			testImg.push_back(*bmtx);
			delete bmtx;
			cvReleaseImage(&change);
		}
		PCANet_pred(img, testImg, bboxes, result, pcaNet);
		imshow("result", img);

		#ifdef SAVE_RESULT
		CStr patchDir = "result/";
		CmFile::MkDir(patchDir);
		char* patch_name = new char[30];
		sprintf(patch_name, "%s%d%s", "result/", k + 1, ".jpg");
		imwrite(patch_name, img);
		k++;
		#endif


		if (waitKey(1) == 'q')
			break;

		//videoWriter << img;
		//waitKey(1);
		//imwrite("matched/_Match.jpg", img); //resDir + resNameNE + 
		//imshow("match",img);
		//waitKey();
	}
	//videoWriter.release();
}
void GestureDetector::updateTimer()
{
	timeCount = (timeCount + DELAY) % RECINT; //increment counter on a cycle
	warnCount = (warnCount + DELAY) % WARNINT; //do same with the warning count
	cv::Mat image, filtered;
    cap >> image;

	//set the image from the camera, process it and get filtered
	SkinDetectController::getInstance()->setInputImage(image);
	SkinDetectController::getInstance()->process();
	filtered = SkinDetectController::getInstance()->getLastResult(); //binary image of blobs

	//retrieve the hand gestures from the image from the image
	std::vector<Hand> hands = detect(image, filtered);

	if(timeCount < DELAY)
	{ // Only store the hands when the timeCount
		//rolls over after iterating to the record
		//interval (RECINT)

		pw.addHandSet(hands);

		imageCache.push_back(image.clone());

		//print out the captured hands
		if(hands.size() > 1)
			ui->textEdit->append(hands[0].toQString()
						 + "\n  and: " + hands[1].toQString());
		else
			ui->textEdit->append(hands[0].toQString());

	
	    // Determine if it is time to check the password
	    if(pw.doCheck(hands))
	    {
			if(pw.checkPassword())
				ui->textEdit->append("----------------------\n"
	                                "PASSWORD ACCEPTED!!!!!!\n"
	                                "----------------------");
			else
	            ui->textEdit->append("----------------------\n"
	                                "INTRUDER... INTRUDER....\n"
	                                "----------------------");

	        char filename[200];
	        //record pictures for documentation
	        for(int i = 0; i < imageCache.size(); i++)
	        {
	        	sprintf(filename, "%s_%d_%d.jpg", DESKTOP, setCount, i);
                cvSaveImage(filename, &(IplImage(imageCache[i])));
            }


			pw.reset();
			setCount++;
			imageCache.clear();
	     }
	}

	// warn the user of the time to capture
	// ticks down from WARNMAX to capture
	if(warnCount < DELAY)
	{
		int timeLeft = WARNMAX - timeCount/WARNINT;
		if(timeLeft == WARNMAX)
			ui->textEdit->append("\n\nPREPARE FOR CAPTURE IN....");
		ui->textEdit->append(QString("%1...........").arg(timeLeft));
	}
	
	// display the processed image
	displayMat(image);
}
Exemple #29
0
ReturnType HumanDetection::onExecute()
{
	// 영상을 Inport로부터 취득
	opros_any *pData = ImageIn.pop();
	RawImage result;
	std::vector<PositionDataType> data;

	if(pData != NULL){
		
		// 포트로 부터 이미지 취득
		RawImage Image = ImageIn.getContent(*pData);
		RawImageData *RawImage = Image.getImage();

		// 현재영상의 크기를 취득
		m_in_width = RawImage->getWidth();
		m_in_height = RawImage->getHeight();

		// 원본영상의 이미지영역 확보
		if(m_orig_img == NULL){
			m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}
		if(m_result_img == NULL){
			m_result_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		}

		// 영상에 대한 정보를 확보!memcpy
		memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());

		m_img_mat = Mat(m_orig_img);

		vector<Rect> found, found_filtered;

		m_hog.detectMultiScale(m_img_mat, found, 0, Size(8,8), Size(32,32), 1.05, 2);

		int i, j;

		// 사이즈의 검출
		for(i = 0; i < found.size(); i++){
		
			Rect r = found[i];

			for(j = 0; j < found.size(); j++){
			
				if(j != i && (r & found[i]) == r){
				
					break;
				
				}
			
			}
			if(j == found.size()){
			
				found_filtered.push_back(r);
			
			}
		
		}
		outPoint.x = 0;
		outPoint.y = 0;
		rect_width = 0; 
		rect_height = 0;

		// 사람 검출에대한 위치지정
		for(i = 0; i < found_filtered.size(); i++){
		
			Rect r = found_filtered[i];

			r.x += cvRound(r.width*0.1);
			r.width = cvRound(r.width*0.8);
			r.y += cvRound(r.height*0.07);
			r.height = cvRound(r.height*0.8);
			rectangle(m_img_mat, r.tl(), r.br(), cv::Scalar(0, 255, 0), 3);

			//사각형 시작점 보존
			outPoint.x = r.tl().x;
			outPoint.y = r.tl().y;

			rect_width = r.br().x - r.tl().x;
			rect_height = r.br().y - r.tl().y;
		
		}

		// 위치 정보 출력
		if(outPoint.x != 0 && outPoint.y !=0)
		{
			PositionDataType base;

			base.setName("");
			base.setX(outPoint.x);
			base.setY(outPoint.y);
			base.setHeight(rect_height);
			base.setWidth(rect_width);
			
			base.setRadian(NULL);
			
			data.push_back(base);

			PositionDataOut.push(data);
		}

		m_result_img = &IplImage(m_img_mat);


		// RawImage의 이미지 포인터 변수 할당
		RawImageData *pimage = result.getImage();
		
		// 입력된 이미지 사이즈 및 채널수로 로 재 설정
		pimage->resize(m_result_img->width, m_result_img->height, m_result_img->nChannels);
		
		// 영상의 총 크기(pixels수) 취득
		int size = m_result_img->width * m_result_img->height * m_result_img->nChannels;
		
		// 영상 데이터로부터 영상값만을 할당하기 위한 변수
		unsigned char *ptrdata = pimage->getData();
		
		// 현재 프레임 영상을 사이즈 만큼 memcpy
		memcpy(ptrdata, m_result_img->imageData, size);

		// 포트아웃
		opros_any mdata = result;
		ImageOut.push(result);//전달

		delete pData;
	}

	return OPROS_SUCCESS;
}
int _tmain(int argc, char* argv[])
{
	CvCapture* pCapture = NULL;
	if(argc == 2)
	{
		char* _tempname = "e:\\201505280048_22.mp4";
		if( !(pCapture = cvCaptureFromFile(_tempname)))     
		{   
			fprintf(stderr, "Can not open video file %s\n", argv[1]);   
			return -2;     
		}
	}
	if (argc == 1)
	{
		if( !(pCapture = cvCaptureFromCAM(1)))     
		{   
			fprintf(stderr, "Can not open camera.\n");   
			return -2;     
		} 
	}

	IplImage* pFrame = NULL;
	int countx=0;
	while (pFrame =cvQueryFrame(pCapture))  
	{ 
		countx++;
		IplImage* img1 = cvCreateImage(cvGetSize(pFrame), IPL_DEPTH_8U, 1);//创建目标图像 
		cvCvtColor(pFrame,img1,CV_BGR2GRAY);//cvCvtColor(src,des,CV_BGR2GRAY) 
		//边缘检测   
		cv::Mat result(img1); 
		cv::Mat contours;  
		cv::Canny (result,contours,50,150);
		img1 =&IplImage(contours);	


		int nVer = 1;
		int nHor = 2;
		IplConvKernel* VerKer;
		IplConvKernel* HorKer;
		VerKer = cvCreateStructuringElementEx(1,nVer,0,nVer/2,CV_SHAPE_RECT);
		HorKer = cvCreateStructuringElementEx(nHor,1,nHor/2,0,CV_SHAPE_RECT);
		cvDilate(img1,img1,VerKer);
		cvDilate(img1,img1,HorKer);

		cvMorphologyEx(img1, img1, NULL, NULL, CV_MOP_CLOSE);

		cvSaveImage("a.jpg",img1);

		cv::Mat image(pFrame);
		LineFinder finder;  
		finder.setMinVote (600);  
		finder.setLineLengthAndGap (680,500);
		std::vector<cv::Vec4i> li;
		li = finder.findLines (contours); 
		finder.drawDetectedLines (image); 
		imwrite("123.jpg",image);
		//选择第一条直线 
		//黑色的图像  

// 		for(int i = 0; i < li.size();i++)
// 		{
// 			int n= i;
// 			cv::Mat oneLine(image.size(),CV_8U,cv::Scalar(0));  
// 			cv::Mat oneLineInv;  
// 			//白线  
// 			line(oneLine,cv::Point(li[n][0],li[n][1]),cv::Point(li[n][2],li[n][3]),cv::Scalar(255),5);  
// 			//将轮廓与白线按位与  
// 			bitwise_and(contours,oneLine,oneLine); 		
// 			threshold(oneLine,oneLineInv,128,255,cv::THRESH_BINARY_INV);
// 			//把点集中的点插入到向量中  
// 			std::vector<cv::Point> points;  
// 			//遍历每个像素  
// 			for(int y = 0; y < oneLine.rows;y++)  
// 			{  
// 				uchar* rowPtr = oneLine.ptr<uchar>(y);  
// 				for(int x = 0;x < oneLine.cols;x++)  
// 				{  
// 					if(rowPtr[x])  
// 					{  
// 						points.push_back(cv::Point(x,y));  
// 					}  
// 				}  
// 			}  
// 			//储存拟合直线的容器  
// 			cv::Vec4f line;  
// 			//直线拟合函数  
// 			fitLine(cv::Mat(points),line,CV_DIST_L12,0,0.01,0.01);  
// 			//画一个线段  
// 			int x0= line[2];  
// 			int y0= line[3];  
// 			int x1= x0-200*line[0];  
// 			int y1= y0-200*line[1]; 
// 			if(y0 == y1 /*|| x0 == x1*/)
// 			{
// 				cv::line(image,cv::Point(x0,y0),cv::Point(x1,y1),cv::Scalar(0,255,0),1); 	
// 				imwrite("123.jpg",image);
// 			}
// 		}
// 		
	}
	return 0;
}