コード例 #1
0
bool init_opencv()
{
	if (!capture.open(0)) {
		std::cerr << "error: capture.open() failed..." << std::endl;
		exit(-1);
	}
	capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);

	result_img.create(cv::Size(640, 480), CV_8UC3);

	return true;
}
コード例 #2
0
void cameraInit(){
	/*
		cv::Mat frame;
		cap >> frame; // get a new frame from camera
		//cvtColor(frame, edges, CV_BGR2GRAY);
		//GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
		//Canny(edges, edges, 0, 30, 3);
		imshow("edges", frame);
		cv::waitKey(30);
	}
	*/
	cap.set(CV_CAP_PROP_FRAME_WIDTH,1280);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT,720);
	cap.set(CV_CAP_PROP_FPS, 30);
}
コード例 #3
0
ファイル: main.cpp プロジェクト: peterborkuti/opencv
void openCamera(cv::VideoCapture& cap, int argc, const char** argv) {
	cv::CommandLineParser parser(argc, argv, keys);
	int camNum = parser.get<int>("1");

	cap.open(camNum);

	if (!cap.isOpened()) {
		//help();
		std::cout << "***Could not initialize capturing...***\n";
		std::cout << "Current parameter's value: \n";
		exit (-1);
	}

	cap.set(CV_CAP_PROP_FRAME_WIDTH, 160);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 120);
}
コード例 #4
0
ファイル: mono-playback.cpp プロジェクト: fengmushu/flight
void mono_handler(const lcm_recv_buf_t *rbuf, const char* channel, const lcmt_stereo *msg, void *user) {
    // open the frame of the video specified by the message

    // check to see if the current video is the correct video file

    if (current_video_number != msg->video_number) {

        video_capture.release();

        std::string newfile = GetMonoFilename(msg->timestamp, msg->video_number);

        if (newfile.empty()) {
            return;
        }

        std::cout << "Opening file: " << newfile << std::endl;

        if (!video_capture.open(newfile)) {
            std::cerr << "Failed to open file: " << newfile << std::endl;
            return;
        }
        current_video_number = msg->video_number;
    }

    video_capture.set(CV_CAP_PROP_POS_FRAMES, msg->frame_number + frame_offset);

    cv::Mat frame;

    video_capture >> frame;

    SendImageOverLcm(lcm_, image_channel, frame);

}
コード例 #5
0
ファイル: main.cpp プロジェクト: vadimgg/Quadcopter495
cv::Mat readFrameIndex(cv::VideoCapture cap, int frameNum){
	int idx = frameNum-1;
	cv::Mat frame;
	bool success;
	cap.set ( CV_CAP_PROP_POS_FRAMES , idx );
	success = cap.retrieve(frame);
	return frame;
}
コード例 #6
0
ファイル: camExample.cpp プロジェクト: Overdr0ne/motld
void Init(cv::VideoCapture& capture)
{
  if(!capture.isOpened()){
    std::cout << "error starting video capture" << std::endl;
    exit(0);
  }
  //propose a resolution
  capture.set(CV_CAP_PROP_FRAME_WIDTH, RESOLUTION_X);
  capture.set(CV_CAP_PROP_FRAME_HEIGHT, RESOLUTION_Y);
  //get the actual (supported) resolution
  ivWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH);
  ivHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
  std::cout << "camera/video resolution: " << ivWidth << "x" << ivHeight << std::endl;

  cv::namedWindow("MOCTLD", 0); //CV_WINDOW_AUTOSIZE );
  // cv::resizeWindow("MOCTLD", ivWidth, ivHeight);
  cv::setMouseCallback("MOCTLD", MouseHandler);
}
コード例 #7
0
void shutterCB(int pos, void* param){
struct timeval t;

cap.set(CV_CAP_PROP_EXPOSURE,pos);

//fcount=0; // Reset frame counter, so we dont have do wait for the avg to "catch" up



std::cout << "CALLBACK !!!: pos:  " << pos << "Shutter read: " << cap.get(CV_CAP_PROP_EXPOSURE) << std::endl;
}
コード例 #8
0
void init()
{
    //摄像头
    camera.open(0);

    if(!camera.isOpened())
    {
        std::cout << "Can not find camera!" << std::endl;
        exit(-1);
    }

    camera.set(cv::CAP_PROP_FRAME_WIDTH,160);
    camera.set(cv::CAP_PROP_FRAME_HEIGHT,120);

/*    double width = camera.get(cv::CAP_PROP_FRAME_WIDTH);
    double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT);

    std::cout << "width:" << width << "\t";
    std::cout << "height:" << height << "\t" << std::endl;*/

    face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";

    if(!face_cascade.load(face_cascade_name))
    {
        std::cout << "can not find face_cascade_file!" << std::endl;
        exit(-1);
    }

    running = true;
    pthread_mutex_init(&mutexLock, NULL);
    pthread_create(&grabThread, NULL, grabFunc, NULL);

    //Setup edi robot mraa control lib
    spider_init();

    signal(SIGINT, clear);
    signal(SIGTERM, clear);
}
コード例 #9
0
void OpenCVTemplateApp::update()
{
    if(currentState == PLAY) {
        frameIndex = video.get(cv::CAP_PROP_POS_FRAMES);
        frameIndex += frameSpeed;
        if(frameIndex >= video.get(cv::CAP_PROP_FRAME_COUNT)-1) {
            frameIndex = 0;
        }
        video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
    }
}
コード例 #10
0
/**
 * @function main
 */
int main( int argc, char* argv[] ) {
  
  // Filter
  gFilterLimits.resize(6);
  //gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect
  gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton


  ObjectsDatabase mOd;
  mOd.init_classifier();
  mOd.load_dataset();

  gCapture.open( cv::CAP_OPENNI2 );
  
  if( !gCapture.isOpened() ) {
    printf("\t [ERROR] Could not open the capture object \n");
    return -1;
  }

  gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 );
  gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 );
  gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH );

  cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE );
 
  ::google::InitGoogleLogging( argv[0] );
  
  for(;;) {

    if( !gCapture.grab() ) {
      printf("\t * ERROR Could not grab a frame \n");
      return -1;
    }

    gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE );
    if( gIsSegmentedFlag ) { drawSegmented(); }
    cv::imshow( gWindowName, gRgbImg );
    
    gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP );


    cv::imshow( gWindowName, gRgbImg );

    char k = cv::waitKey(30);
    if( k == 'q' ) {
      printf("\t Finishing program \n");
      break;
    } 

    /** Recognize */
    else if( k == 'i' ) {

      // Process image
      process();
      gLabels.resize(gClusters.size() );
      gIndex.resize(gClusters.size() );      
      // Store images
      for( int i = 0; i < gClusters.size(); ++i ) {

	int xl = gBoundingBoxes[i](0);
	int yl = gBoundingBoxes[i](1);
	int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0);
	int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1);
	
	cv::Mat img( gRgbImg, cv::Rect( xl, yl,
					xw, yw ) );
	
	// Predict 
	mOd.classify( img, gIndex[i], gLabels[i] );
	
	cv::putText( gRgbImg,
		     gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ),
		     cv::FONT_HERSHEY_SIMPLEX, 1, 
		     gColors[i],
		     2 );

	mOd.sayIt(gIndex[i]);
      }
      
      
      
    } // else
    
    
  } // for
  
} // main
コード例 #11
0
void OutputDeviceWindow::output(cv::VideoCapture& in_video) {
  const std::string WIN_NAME = "mudou_win";
  const int KEY_CODE_ESC = 27;
  const int KEY_CODE_J   = 106;
  const int KEY_CODE_K   = 107;

  TiledImageCreator tic;

  cv::Mat image;

  cv::namedWindow(WIN_NAME);

  in_video >> image;
  if(!image.empty()) {
    if(image.rows < image.cols){
      image_size = image.cols;
    }else{
      image_size = image.rows;
    }
    small_image_size = image_size / (1 << division);
    if(small_image_size < 1){
      small_image_size = 1;
    }

    tic.set_cell_size(small_image_size, small_image_size);
  }

  while(!image.empty()) {
    cv::imshow(WIN_NAME, tic.create_tiled_img(image));
    
    int key = cv::waitKey(1);
    // std::cout << "key = " << key << std::endl;
    if( key == KEY_CODE_ESC ) {
      //break;
      return;
    }
    if( key == KEY_CODE_J ) {
      // 部品画像を小さくする
      division++;
      small_image_size = image_size / (1 << division);

      if(small_image_size < min_small_image_size){
        small_image_size = min_small_image_size;
        division--;
      }

      tic.set_cell_size(small_image_size, small_image_size);
    }
    if( key == KEY_CODE_K ) {
      // 部品画像を大きくする
      division--;
      if(division < 0){
        division = 0;
      }

      small_image_size = image_size / (1 << division);
      tic.set_cell_size(small_image_size, small_image_size);
    }

    in_video >> image;
    if(image.empty()){
      in_video.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
      in_video >> image;
    }
  }
コード例 #12
0
int main(int argc, char**argv)
{
	capture.open(0);
	if (capture.isOpened() == false)
	{
		std::cerr << "no capture device found" << std::endl;
		return 1;
	}
	capture.set(CV_CAP_PROP_FRAME_WIDTH,  vgaSize.width);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height);
	if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height)
	{
		std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl;
		return 2;
	}
	cv::Mat image;
	capture >> image;

	cv::namedWindow(windowName);
	cv::imshow(windowName, image);

	initCuda();
	initArray(image);

	char key = -1;
	enum device statusDevice = useCpuSimd;
	enum precision statusPrecision = precisionFloat;
	int index = 1;
	cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
	cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
	double elapsedTime;
	while (isFinish(key) == false)
	{
		capture >> image;

		switch (key)
		{
		case 'h':
		case 'H':
			// switch to half precision
			statusPrecision = precisionHalf;
			std::cout << std::endl << header << "half  " << std::endl;
			stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
			break;
		case 'f':
		case 'F':
			// switch to single precision
			statusPrecision = precisionFloat;
			std::cout << std::endl << header << "single" << std::endl;
			stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
			break;
		case 'b':
		case 'B':
			// switch to gray gain
			statusPrecision = precisionByte;
			std::cout << std::endl << header << "char" << std::endl;
			gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
			break;
		case '0':
		case '1':
			index = key - '0';
			switch (statusPrecision)
			{
			case precisionHalf:
				// precision half
				stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
				break;
			case precisionFloat:
				// precision single
				stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
				break;
			case precisionByte:
				// precision single
				gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
				break;
			default:
				break;
			}
			break;
		case 'c':
		case 'C':
			std::cout << std::endl << "Using CPU SIMD           " << std::endl;
			statusDevice = useCpuSimd;
			break;
		case 'g':
		case 'G':
			std::cout << std::endl << "Using GPU                " << std::endl;
			statusDevice = useGpu;
			break;
		default:
			break;
		}

		if (statusDevice == useCpuSimd)
		{
			elapsedTime = multiplyImage(image, gain);
		}
		else
		{
#ifdef HAVE_CUDA
			// CUDA
			elapsedTime = multiplyImageCuda(image, gain);
#endif // HAVE_CUDA
		}
		computeStatistics(elapsedTime, key);

		if (key == 's' || key == 'S')
		{
			cv::imwrite(dumpFilename, image);
		}

		cv::imshow(windowName, image);
		key = cv::waitKey(1);
	}
	std::cout << std::endl;
	cv::destroyAllWindows();
	releaseArray();

	return 0;
}
コード例 #13
0
void OpenCVTemplateApp::makeGUI() {
    interface->clear();
    interface->addButton("load image", [this] {
        auto path = ci::app::getOpenFilePath();
        image = cv::imread(path.string());
        std::cout <<"cols "<<image.cols << std::endl;
        std::cout <<"rows "<<image.rows << std::endl;
        std::cout <<"channels "<<image.channels() << std::endl;
        imageTexture = gl::Texture::create(fromOcv(image));
    });
    interface->addButton("load video", [this] {
        auto path = ci::app::getOpenFilePath();
        video.open(path.string());
        frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
        frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
        totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
        makeGUI();
    });
    interface->addSeparator();
    if(frameTexture) {
        interface->addParam("gray scale", &isGrayScale).updateFn([this] {
            video.retrieve(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
            makeGUI();
        });
        interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
        if(isGrayScale) {
            interface->addButton("get feature points", [this] {
                cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
            });
        }
        interface->addSeparator();
        interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
            video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
            video.read(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
        });
        interface->addSeparator();
        interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
        interface->addButton("play",[this] {
            currentState = PLAY;
            makeGUI();
        });
        if(currentState == PLAY) {
            interface->addButton("pause",[this] {
                currentState = PAUSE;
                makeGUI();
            });
        }
    }
}
コード例 #14
0
void *cameraThread(void *arg){
  
 
std::vector<cmdData> cmd;
  cv::Mat frame; 
  
  cv::namedWindow("Color", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"

cv::namedWindow("Thresholded", CV_WINDOW_AUTOSIZE); //create a window with the name "HSV"
//cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
int initShutter = 242;
//int initShutter = 0;

int shutterVal = initShutter;
int cannyMin = 30;

int blockSize = 89;

int fps = 60;


// Shutter slider
cv::createTrackbar("Shutter","Color",&shutterVal,4095,shutterCB,NULL);

// Canny treshold

cv::createTrackbar("Threshold","Color",&cannyMin,255,NULL,NULL);
cv::createTrackbar("BlockSize","Color",&blockSize,255,NULL,NULL);

cv::Mat colorFrame;
cv::Mat tresholdedFrame;
cv::Mat hsvFrame;
cv::Mat grey,tmp;
cv::Mat contourOutput;

  


cap.open(CV_CAP_DC1394); // Open first firewire camera. in 2.3 use CV_CAP, in 2.5 use CV::CAP


cap.set(CV_CAP_PROP_WHITE_BALANCE_BLUE_U,794); // 736
cap.set(CV_CAP_PROP_WHITE_BALANCE_RED_V,437);
cap.set(CV_CAP_PROP_EXPOSURE,initShutter); // "Shutter" in coriander
cap.set(CV_CAP_PROP_FPS,fps);
cap.set(CV_CAP_PROP_GAMMA,0);
cap.set(CV_CAP_PROP_GAIN,30);

while(runState){

 
cap >> frame;


std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;


// Get color image, decode bayer BGGR.  
cv::cvtColor(frame,colorFrame,CV_BayerBG2RGB,0);
cv::cvtColor(colorFrame, grey, CV_RGB2GRAY );


// Remove gripper from img
cv::Rect roi = cv::Rect(0,0,640,360);
cv::Mat submatrix = cv::Mat(grey,roi);
//submatrix.setTo(cv::Scalar(255));

cv::threshold(submatrix,tresholdedFrame,cannyMin,255,cv::THRESH_BINARY_INV);

if(blockSize % 2 == 0){
//Adaptive threshold block size SKAL være ulige..
 
 blockSize = blockSize -1;
}

// cv::adaptiveThreshold(submatrix,tresholdedFrame,255,cv::ADAPTIVE_THRESH_GAUSSIAN_C,cv::THRESH_BINARY_INV,blockSize,0);



cv::Moments mu;

mu = cv::moments(tresholdedFrame,true);

// Find center
cv::Point2f mc = cv::Point2f( mu.m10/mu.m00 , mu.m01/mu.m00 );

// Count non zero pixels. Used for determining if we are screwed (getting large "white" areas.)
cameraError.areaOfObject =  cv::countNonZero(tresholdedFrame);


// Draw it - convert to RGB to we can draw on it with colors
cv::cvtColor(tresholdedFrame, tresholdedFrame, CV_GRAY2RGB);
//cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 );
cv::Scalar color = cv::Scalar( 0,255,0 );
cv::circle( tresholdedFrame, mc, 5, color, -1, 8, 0 );




 
// Calculate distance from center of image

cv::Size s = tresholdedFrame.size(); 
cv::Point centerOfFrame = cv::Point(s.width/2,s.height/2);  
float distX = centerOfFrame.x-mc.x;
float distY = centerOfFrame.y-mc.y;
 
cameraError.x = distX;
cameraError.y = distY; 
 



/* 
 OLD STUFF BELOW
 
 
 */

// cv::Canny( tresholdedFrame, tmp, cannyMin, cannyMin*2, 3 );
// 
// cv::findContours(tmp,contours,hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
// 
// int blablalba = 0 ;
// 
//  /// Get the moments
//   std::vector<cv::Moments> mu(contours.size() );
//   for( int i = 0; i < contours.size(); i++ )
//      { mu[i] = moments( contours[i], false ); }
// 
//   ///  Get the mass centers:
//   std::vector<cv::Point2f> mc( contours.size() );
//   for( int i = 0; i < contours.size(); i++ )
//      { mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
// 
//  
//     std::vector<cv::Rect> boundRect( contours.size() );
//     std::vector<std::vector<cv::Point> > contours_poly( contours.size() );
// // Draw some countours, and maybe some bounding box
//  
//  
// cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 );
// 
// 
// cv::Point centerOfBlock;
// 
//   for( int i = 0; i< contours.size(); i++ )
//      {
// 
// 	// Filter out small areas, and filter out contours that are holes 
// 	// See http://stackoverflow.com/questions/8461612/using-hierarchy-in-findcontours-in-opencv
//        if(cv::contourArea(contours[i]) < 200  ){
// 	
// 	 
//       }else{
//      
//        
//        cv::Scalar color = cv::Scalar( 0,255,0 );
// 	//draw center of mass
//       cv::circle( drawing, mc[i], 5, color, -1, 8, 0 );
// 	
// 	
// 	cv::Point xx = cv::Point(boundRect[i].tl().x+(boundRect[i].width/2),boundRect[i].tl().y+(boundRect[i].height/2));
// 	//cv::circle( drawing, xx, 2, color, -1, 8, 0 );
// 	 
// 	centerOfBlock = mc[i];
// 	
// 	
//       }
//      }
//      
//      
// 
//   /*
//    * Calculate distance. 
//    
//    */
//   
//   
//      
//    cv::Size s = tresholdedFrame.size(); 
//      
//    cv::Point centerOfFrame = cv::Point(s.width/2,s.height/2);  
//      
//    float distX = centerOfFrame.x-centerOfBlock.x;
//    
//    float distY = centerOfFrame.y-centerOfBlock.y;
//    
//    if(centerOfBlock.x == 0 || centerOfBlock.y == 0 || (fabs(distX) < 3 && fabs(distY) < 3)){
// 	    
//   
// 
//      
//   }else{
//     
// 
//    // cout << "(Dist X, Dist Y) " << distX << ", " << distY << endl;
//   
//     cameraError.x = distX;
//     cameraError.y = distY;
//     
//   //  std::cout << outss << std::endl;
// /*cmdData tmp;
// 
// tmp.x = x_out;
// tmp.y = y_out;
// tmp.z = 0;
// tmp.r = 0;
// tmp.p =0; 
// tmp.ya =0;
// tmp.a = a_out;
// tmp.t_min = t_min_out;
// tmp.distX = distX;
// tmp.distY = distY;
// 
// cmd.push_back(tmp);
// */
//     
//     
//   }

   
  
/* END OLD STUFF */
   
     

 


if(!frame .data) break;


if(debugMonitor){
  
  cv::imshow("Thresholded",tresholdedFrame); // Uncomment this line to see the actual picture. It will give an unsteady FPS


cv::imshow("Color",grey); // Uncomment this line to see the actual picture. It will give an unsteady FPS

//cv::imshow( "Center", drawing );
}




if(cv::waitKey(1) >= 27){ break;  } // We wait 1ms - so that the frame can be drawn. break on ESC



}

cv::destroyWindow("Color"); //destroy the window with the name, "MyWindow"
cv::destroyWindow("Thresholded"); 



}
コード例 #15
0
ファイル: detect-live.cpp プロジェクト: chili-epfl/chilitags
int main(int argc, char* argv[])
{
    signal(SIGINT, quitFunction);

    // Simple parsing of the parameters related to the image acquisition
    int xRes = 640;
    int yRes = 480;
    int cameraIndex = 0;
    if (argc > 2) {
        xRes = std::atoi(argv[1]);
        yRes = std::atoi(argv[2]);
    }
    if (argc > 3) {
        cameraIndex = std::atoi(argv[3]);
    }

    // The source of input images
    capture.open(cameraIndex);
    if (!capture.isOpened())
    {
        std::cerr << "Unable to initialise video capture." << std::endl;
        return 1;
    }
#ifdef OPENCV3
    capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
    capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
    cv::Mat inputImage;

    // The tag detection happens in the Chilitags class.
    chilitags::Chilitags chilitags;

    // The detection is not perfect, so if a tag is not detected during one frame,
    // the tag will shortly disappears, which results in flickering.
    // To address this, Chilitags "cheats" by keeping tags for n frames
    // at the same position. When tags disappear for more than 5 frames,
    // Chilitags actually removes it.
    // Here, we cancel this to show the raw detection results.
    chilitags.setFilter(0, 0.0f);

    cv::namedWindow("DisplayChilitags");
    // Main loop, exiting when 'q is pressed'
    for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {

        // Capture a new image.
        capture.read(inputImage);

        // Start measuring the time needed for the detection
        int64 startTime = cv::getTickCount();

        // Detect tags on the current image (and time the detection);
        // The resulting map associates tag ids (between 0 and 1023)
        // to four 2D points corresponding to the corners positions
        // in the picture.
        chilitags::TagCornerMap tags = chilitags.find(inputImage);

        // Measure the processing time needed for the detection
        int64 endTime = cv::getTickCount();
        float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();


        // Now we start using the result of the detection.

        // First, we set up some constants related to the information overlaid
        // on the captured image
        const static cv::Scalar COLOR(255, 0, 255);
        // OpenCv can draw with sub-pixel precision with fixed point coordinates
        static const int SHIFT = 16;
        static const float PRECISION = 1<<SHIFT;

        // We dont want to draw directly on the input image, so we clone it
        cv::Mat outputImage = inputImage.clone();

        for (const std::pair<int, chilitags::Quad> & tag : tags) {

            int id = tag.first;
            // We wrap the corner matrix into a datastructure that allows an
            // easy access to the coordinates
            const cv::Mat_<cv::Point2f> corners(tag.second);

            // We start by drawing the borders of the tag
            for (size_t i = 0; i < 4; ++i) {
                cv::line(
                    outputImage,
                    PRECISION*corners(i),
                    PRECISION*corners((i+1)%4),
#ifdef OPENCV3
                    COLOR, 1, cv::LINE_AA, SHIFT);
#else
                    COLOR, 1, CV_AA, SHIFT);
#endif
            }

            // Other points can be computed from the four corners of the Quad.
            // Chilitags are oriented. It means that the points 0,1,2,3 of
            // the Quad coordinates are consistently the top-left, top-right,
            // bottom-right and bottom-left corners.
            // (i.e. clockwise, starting from top-left)
            // Using this, we can compute (an approximation of) the center of
            // tag.
            cv::Point2f center = 0.5f*(corners(0) + corners(2));
            cv::putText(outputImage, cv::format("%d", id), center,
                        cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);
        }

        // Some stats on the current frame (resolution and processing time)
        cv::putText(outputImage,
                    cv::format("%dx%d %4.0f ms (press q to quit)",
                               outputImage.cols, outputImage.rows,
                               processingTime),
                    cv::Point(32,32),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);

        // Finally...
        cv::imshow("DisplayChilitags", outputImage);
    }

    cv::destroyWindow("DisplayChilitags");
    capture.release();

    return 0;
}
コード例 #16
0
ファイル: subsextractor.cpp プロジェクト: egrosclaude/subs
	void					realTrack() { 
								cap->set(CV_CAP_PROP_POS_FRAMES,StartFrame); 
							};