コード例 #1
0
///////////////////////////////////////////////////////////////////////////////
// initalize the video for processing
///////////////////////////////////////////////////////////////////////////////
void initVideo( string videoFile ){

	vidCap = cv::VideoCapture( videoFile );

	if( !vidCap.isOpened() ){
		cout << "Video did not open" << endl;
		shutItDown(-1);
	}
	width = vidCap.get(CV_CAP_PROP_FRAME_WIDTH);
	height = vidCap.get(CV_CAP_PROP_FRAME_HEIGHT);
	cout << "width: " << vidCap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
	cout << "height: " << vidCap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;

	currentImage = cv::Mat(640,480, CV_8UC3);
	frameCount = 0;
	vidCap.read(currentImage);
	cv::cvtColor(currentImage, currGray, CV_BGR2GRAY);
	swap(prevGray, currGray); swap(prevImage, currentImage);
	flow = cv::Mat(currentImage.size(), CV_32FC2);

	termcrit = cv::TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
	needToInit = true;

	rel_vec_x = 1.0f;
	rel_vec_y = 0.0f;
}
コード例 #2
0
ファイル: vcrop.cpp プロジェクト: Kazz47/my_utils
VCrop::VCrop(cv::VideoCapture &capture, const float &x, const float &y, const float &size) : capture(&capture) {
    if (!capture.isOpened()) {
        std::string error_message = "Error when reading input stream";
        LOG(ERROR) << error_message;
        throw error_message;
    }

    int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
    int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    VLOG(2) << "Frame Width: " << frame_width;
    VLOG(2) << "Frame Height: " << frame_height;
    LOG_IF(FATAL, frame_width <= 0) << "Frame width is less than zero.";
    LOG_IF(FATAL, frame_height <= 0) << "Frame height is less than zero.";
    float diameter = sqrt(frame_width * frame_width + frame_height * frame_height);
    cv::Point2i top_left(frame_width * x, frame_height * y);
    cv::Size2i rect_size(diameter * size, diameter * size);
    if (top_left.x + rect_size.width > frame_width || top_left.y + rect_size.height > frame_height) {
        LOG(ERROR) << "Size(" << rect_size << ") to too large for given x(" << top_left.x << ") and y(" << top_left.y << ") coordinate.";
    }
    roi = new cv::Rect(top_left, rect_size);
    VLOG(1) << "RoI: \t" << *roi;

    frame_rate = capture.get(CV_CAP_PROP_FPS);
    if (isnan(frame_rate) || frame_rate <= 0) {
        LOG(WARNING) << "Failed to get frame rate, setting rate to 10fps.";
        frame_rate = 10;
    }
    VLOG(1) << "Frame Rate: \t" << frame_rate;
}
コード例 #3
0
void OpenCVTemplateApp::update()
{
    if(currentState == PLAY) {
        frameIndex = video.get(cv::CAP_PROP_POS_FRAMES);
        frameIndex += frameSpeed;
        if(frameIndex >= video.get(cv::CAP_PROP_FRAME_COUNT)-1) {
            frameIndex = 0;
        }
        video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
    }
}
コード例 #4
0
ファイル: subsextractor.cpp プロジェクト: egrosclaude/subs
bool SubsExtractor::open(string file)
{
	videofile = file;
	bool o = cap->open(videofile);
	StartFrame = 0;
	EndFrame = cap->get(CV_CAP_PROP_FRAME_COUNT);
	return o;
}
コード例 #5
0
ファイル: camExample.cpp プロジェクト: Overdr0ne/motld
void Init(cv::VideoCapture& capture)
{
  if(!capture.isOpened()){
    std::cout << "error starting video capture" << std::endl;
    exit(0);
  }
  //propose a resolution
  capture.set(CV_CAP_PROP_FRAME_WIDTH, RESOLUTION_X);
  capture.set(CV_CAP_PROP_FRAME_HEIGHT, RESOLUTION_Y);
  //get the actual (supported) resolution
  ivWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH);
  ivHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
  std::cout << "camera/video resolution: " << ivWidth << "x" << ivHeight << std::endl;

  cv::namedWindow("MOCTLD", 0); //CV_WINDOW_AUTOSIZE );
  // cv::resizeWindow("MOCTLD", ivWidth, ivHeight);
  cv::setMouseCallback("MOCTLD", MouseHandler);
}
コード例 #6
0
std::vector<Mat> GetFrames(cv::VideoCapture &cap)
{
	std::vector<Mat> ansvect;
	for(int i=0;;i++)
	{
		//std::cout << i <<"\n";
		cv::Mat frame;
		if (int(cap.get(CV_CAP_PROP_POS_FRAMES)) == int(cap.get(CV_CAP_PROP_FRAME_COUNT)))
			break;
		//std::cout << cap.get(CV_CAP_PROP_POS_FRAMES) <<"\t"<<cap.get(CV_CAP_PROP_FRAME_COUNT) <<"\n";
		if (!cap.read(frame))             
			break;
		ansvect.push_back(frame);
		//cv::imshow("window", frame);
		//char key = cvWaitKey(0);
	}
	return ansvect;
}
コード例 #7
0
void shutterCB(int pos, void* param){
struct timeval t;

cap.set(CV_CAP_PROP_EXPOSURE,pos);

//fcount=0; // Reset frame counter, so we dont have do wait for the avg to "catch" up



std::cout << "CALLBACK !!!: pos:  " << pos << "Shutter read: " << cap.get(CV_CAP_PROP_EXPOSURE) << std::endl;
}
コード例 #8
0
void VirtualKinect::showVideoInfo(cv::VideoCapture& video)
{
#if (defined WIN32 || defined _WIN32 || defined WINCE) // for Windows
    SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE),FOREGROUND_INTENSITY | FOREGROUND_GREEN);
#endif

    std::cout << cv::format("frame count:%.0f, size (%.0f,%.0f), fps:%.2f, fourcc:",
        video.get(CV_CAP_PROP_FRAME_COUNT),
        video.get(CV_CAP_PROP_FRAME_WIDTH),
        video.get(CV_CAP_PROP_FRAME_HEIGHT),
        video.get(CV_CAP_PROP_FPS));

    int ex = static_cast<int>(video.get(CV_CAP_PROP_FOURCC));     // Get Codec Type- Int form
    char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
    std::cout << EXT << std::endl << std::endl;

#if (defined WIN32 || defined _WIN32 || defined WINCE) // for Windows
    SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE),FOREGROUND_INTENSITY | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
#endif
}
コード例 #9
0
/**
 * @function main
 */
int main( int argc, char* argv[] ) {
  
  // Filter
  gFilterLimits.resize(6);
  //gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect
  gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton


  ObjectsDatabase mOd;
  mOd.init_classifier();
  mOd.load_dataset();

  gCapture.open( cv::CAP_OPENNI2 );
  
  if( !gCapture.isOpened() ) {
    printf("\t [ERROR] Could not open the capture object \n");
    return -1;
  }

  gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 );
  gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 );
  gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH );

  cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE );
 
  ::google::InitGoogleLogging( argv[0] );
  
  for(;;) {

    if( !gCapture.grab() ) {
      printf("\t * ERROR Could not grab a frame \n");
      return -1;
    }

    gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE );
    if( gIsSegmentedFlag ) { drawSegmented(); }
    cv::imshow( gWindowName, gRgbImg );
    
    gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP );


    cv::imshow( gWindowName, gRgbImg );

    char k = cv::waitKey(30);
    if( k == 'q' ) {
      printf("\t Finishing program \n");
      break;
    } 

    /** Recognize */
    else if( k == 'i' ) {

      // Process image
      process();
      gLabels.resize(gClusters.size() );
      gIndex.resize(gClusters.size() );      
      // Store images
      for( int i = 0; i < gClusters.size(); ++i ) {

	int xl = gBoundingBoxes[i](0);
	int yl = gBoundingBoxes[i](1);
	int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0);
	int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1);
	
	cv::Mat img( gRgbImg, cv::Rect( xl, yl,
					xw, yw ) );
	
	// Predict 
	mOd.classify( img, gIndex[i], gLabels[i] );
	
	cv::putText( gRgbImg,
		     gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ),
		     cv::FONT_HERSHEY_SIMPLEX, 1, 
		     gColors[i],
		     2 );

	mOd.sayIt(gIndex[i]);
      }
      
      
      
    } // else
    
    
  } // for
  
} // main
コード例 #10
0
int main(int argc, char**argv)
{
	capture.open(0);
	if (capture.isOpened() == false)
	{
		std::cerr << "no capture device found" << std::endl;
		return 1;
	}
	capture.set(CV_CAP_PROP_FRAME_WIDTH,  vgaSize.width);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height);
	if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height)
	{
		std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl;
		return 2;
	}
	cv::Mat image;
	capture >> image;

	cv::namedWindow(windowName);
	cv::imshow(windowName, image);

	initCuda();
	initArray(image);

	char key = -1;
	enum device statusDevice = useCpuSimd;
	enum precision statusPrecision = precisionFloat;
	int index = 1;
	cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
	cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
	double elapsedTime;
	while (isFinish(key) == false)
	{
		capture >> image;

		switch (key)
		{
		case 'h':
		case 'H':
			// switch to half precision
			statusPrecision = precisionHalf;
			std::cout << std::endl << header << "half  " << std::endl;
			stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
			break;
		case 'f':
		case 'F':
			// switch to single precision
			statusPrecision = precisionFloat;
			std::cout << std::endl << header << "single" << std::endl;
			stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
			break;
		case 'b':
		case 'B':
			// switch to gray gain
			statusPrecision = precisionByte;
			std::cout << std::endl << header << "char" << std::endl;
			gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
			break;
		case '0':
		case '1':
			index = key - '0';
			switch (statusPrecision)
			{
			case precisionHalf:
				// precision half
				stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
				break;
			case precisionFloat:
				// precision single
				stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
				break;
			case precisionByte:
				// precision single
				gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
				break;
			default:
				break;
			}
			break;
		case 'c':
		case 'C':
			std::cout << std::endl << "Using CPU SIMD           " << std::endl;
			statusDevice = useCpuSimd;
			break;
		case 'g':
		case 'G':
			std::cout << std::endl << "Using GPU                " << std::endl;
			statusDevice = useGpu;
			break;
		default:
			break;
		}

		if (statusDevice == useCpuSimd)
		{
			elapsedTime = multiplyImage(image, gain);
		}
		else
		{
#ifdef HAVE_CUDA
			// CUDA
			elapsedTime = multiplyImageCuda(image, gain);
#endif // HAVE_CUDA
		}
		computeStatistics(elapsedTime, key);

		if (key == 's' || key == 'S')
		{
			cv::imwrite(dumpFilename, image);
		}

		cv::imshow(windowName, image);
		key = cv::waitKey(1);
	}
	std::cout << std::endl;
	cv::destroyAllWindows();
	releaseArray();

	return 0;
}
コード例 #11
0
void OpenCVTemplateApp::makeGUI() {
    interface->clear();
    interface->addButton("load image", [this] {
        auto path = ci::app::getOpenFilePath();
        image = cv::imread(path.string());
        std::cout <<"cols "<<image.cols << std::endl;
        std::cout <<"rows "<<image.rows << std::endl;
        std::cout <<"channels "<<image.channels() << std::endl;
        imageTexture = gl::Texture::create(fromOcv(image));
    });
    interface->addButton("load video", [this] {
        auto path = ci::app::getOpenFilePath();
        video.open(path.string());
        frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
        frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
        totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
        makeGUI();
    });
    interface->addSeparator();
    if(frameTexture) {
        interface->addParam("gray scale", &isGrayScale).updateFn([this] {
            video.retrieve(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
            makeGUI();
        });
        interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
        if(isGrayScale) {
            interface->addButton("get feature points", [this] {
                cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
            });
        }
        interface->addSeparator();
        interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
            video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
            video.read(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
        });
        interface->addSeparator();
        interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
        interface->addButton("play",[this] {
            currentState = PLAY;
            makeGUI();
        });
        if(currentState == PLAY) {
            interface->addButton("pause",[this] {
                currentState = PAUSE;
                makeGUI();
            });
        }
    }
}
コード例 #12
0
			cv::imshow(targetName, targetImage);
		}
		targetSelected = true;
    }
}
void processMouseActions() {
	cv::setMouseCallback(imageName,CallBackFunc,NULL);
}
void showHistograms() {
	cv::imshow("Hue Histogram", Hist.getHueHistogramImage(targetImage));
	cv::imshow("Sat Histogram", Hist.getSatHistogramImage(targetImage));
	cv::imshow("Val Histogram", Hist.getValHistogramImage(targetImage));
}

// Quadrant Drawing
int frameWidth = int(camera.get(CV_CAP_PROP_FRAME_WIDTH));
int frameHeight = int(camera.get(CV_CAP_PROP_FRAME_HEIGHT));
int centerRectSize = 50;
cv::Point centerPoint = cv::Point(frameWidth/2, frameHeight/2);
cv::Rect centerRectangle = cv::Rect(cv::Point(centerPoint.x-centerRectSize, centerPoint.y-centerRectSize),
	cv::Point(centerPoint.x+centerRectSize, centerPoint.y+centerRectSize));
bool targetInQ1 = false;
bool targetInQ2 = false;
bool targetInQ3 = false;
bool targetInQ4 = false;
bool targetCentered = false;

void drawCenterBox(int thickness=1) {
	cv::rectangle(image, centerRectangle, redColor, thickness);
}
void drawCenterDot() {
コード例 #13
0
ファイル: subsextractor.cpp プロジェクト: egrosclaude/subs
int SubsExtractor::run()
{

	namedWindow("Control", CV_WINDOW_AUTOSIZE);
    //createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), (void (*)(int,void *))&SubsExtractor::onSFtb, 0);
	createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), onSFtb, this);
	createTrackbar("EF", "Control", &EndFrame, 
                   cap->get(CV_CAP_PROP_FRAME_COUNT), 0, 0);
    createTrackbar("T1", "Control", &th1,255,NULL,0);
    createTrackbar("T2", "Control", &th2,255,NULL,0);

	int xmax = cap->get(CV_CAP_PROP_FRAME_WIDTH); 
    int ymax = cap->get(CV_CAP_PROP_FRAME_HEIGHT); 
	int x = xmax/2 - 50; int y = ymax - 110; 
	int xw = 100; int yh = 100;
	// 800x90+240+590 convert
	// (240,590) -> (240+800,590+90) = (1040,680)
	fprintf(stderr,"FRAME (%d %d) -> (%d %d)\n", x, y, x+xw, y+yh);
	fprintf(stderr,"STARTFRAME %d ENDFRAME %d\n", StartFrame, EndFrame);

	int subs;
	int frame = 0;
	char subtext[1024] = "";
	char same[] = " .   ";
	string f;
	char chronline[500];
	while(true) {
		if(!cap->read(img)) { 
    		cout << "Cannot read a frame from video stream" << endl;
			break;
		}
		if((frame = cap->get(CV_CAP_PROP_POS_FRAMES)) >= EndFrame) {
			cout << "Beyond EndFrame" << endl;
            break;
		}
        ////fprintf(stderr,"%ld\r",frame);
		subs = haysubs(x, x + xw, y, y + yh);
		fprintf(stderr,"subs %d\n", subs);
		switch(subs) {
			case SAME:
				//fprintf(stderr,"%s           \r", same + frame % 4);
				break;
			case START:
				if(ocr(subtext))
					setchron(cap->get(CV_CAP_PROP_POS_MSEC));
				//fprintf(stderr, "STR frame %ld\n",frame);
				break;
			case END:
				//fprintf(stderr,"END\n");
				getchron(cap->get(CV_CAP_PROP_POS_MSEC), chronline);
                                printf("%s\n%s\n\n",chronline,subtext);
				//fprintf(stderr, "END frame %d %s\n",frame,subtext);
				break;
			case CHANGE:
				//fprintf(stderr,"CHANGE\n");
                                //string s = getchron();
                                //setchron(cap->get(CV_CAP_PROP_POS_MSEC);
                                //imwrite(f, img);
                                //chron = cap->get(CV_CAP_PROP_POS_MSEC);
                                //intchron(CHRON_START,chron); 
				//fprintf(stderr, "CHG frame %d\n",frame);
				break;
			default:
				fprintf(stderr,"ERROR SUBS\n");
		}
		if (waitKey(30) == 27) {
			cout << "esc key pressed by user" << endl;
			break; 
		}
	}
	return 0;
}
コード例 #14
0
void VideoFormat::setFormat(cv::VideoCapture & videoCapture){
    _frameWidth =  videoCapture.get(CV_CAP_PROP_FRAME_WIDTH);
    _frameHeight = videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT);
    _framesPerSecond = videoCapture.get(CV_CAP_PROP_FPS);
    _frameCount = videoCapture.get(CV_CAP_PROP_FRAME_COUNT);
}