Beispiel #1
0
int main ()
{
		printf("Enter file adress\n");
	char *fileAdress = new char[maxFileAdressLength];
	gets(fileAdress);
	FILE *file;
	file = fopen(fileAdress, "r");
	int adjMatrix[maxVertexNumber][maxVertexNumber];
	for (int i = 0; i < maxVertexNumber; ++i)
		for (int j = 0; j < maxVertexNumber; ++j)
			adjMatrix[i][j] = maxWay;
	int numb = 0;
	fscanf (file, "%d", &numb);
	int i = 0;
	int j = 0;
	int length = 0;
	while (!feof(file))
	{
		fscanf (file, "%d %d %d", &i, &j, &length);
		adjMatrix[i][j] = length;
		adjMatrix[j][i] = length;
	}
	int isVisited[maxVertexNumber];
	isVisited[1] = 1;
	for (int i = 2; i <= numb; ++i)
		isVisited[i] = 0;
	int minToFirst[maxVertexNumber];
	minToFirst[1] = 0;
	for (int i = 2; i <= numb; ++i)
		minToFirst[i] = maxWay;
	String *ways[maxVertexNumber];
	for (int i = 1; i <= numb; ++i)
		ways[i] = createString();
	addSymbol(ways[1], '1');
	capture(adjMatrix, isVisited, numb, minToFirst, ways);
	fclose(file);
	delete []fileAdress;
	for (int i = 1; i <= numb; ++i)
	{
		deleteString(ways[i]);
		ways[i] = nullptr;
	}
	scanf("%d", &i);
}
Beispiel #2
0
std::vector<pf::Corners> getCornersSamples(size_t index) {

    cv::Size numSquares(NUM_HOR_SQUARES, NUM_VER_SQUARES);
    cv::VideoCapture capture(index + 1);

    if (!capture.isOpened()) {
        std::cerr << "Can't open the camera" << std::endl;
        std::exit(-1);
    }

    capture.set(CV_CAP_PROP_FPS, FRAMES_PER_SECOND);

    std::vector<pf::Corners> cornersSamples;
    bool started = false;
    clock_t time = 0;

    while (cornersSamples.size() < NUM_FRAMES) {

        // Capture frame
        cv::Mat frame;
        capture >> frame;

        // Find chessboard corners
        auto found = findChessboardCorners(frame);

        if (found.second && started && clock() - time > DELAY_BETWEEN_FRAMES) {
            time = clock();
            cornersSamples.push_back(found.first);
            cv::bitwise_not(frame, frame);
        }

        // Show image
        cv::drawChessboardCorners(frame, numSquares, cv::Mat(found.first), found.second);
        cv::imshow("Calibrate", frame);

        // Wait for 's' to start
        if (cv::waitKey(100) == 's') {
            started = true;
        }
    }

    return cornersSamples;
}
Beispiel #3
0
int forward (class zmq::socket_base_t *from_,
             zmq_socket_stats_t *from_stats,
             class zmq::socket_base_t *to_,
             zmq_socket_stats_t *to_stats,
             class zmq::socket_base_t *capture_,
             zmq::msg_t &msg_)
{
    int more;
    size_t moresz;
    size_t complete_msg_size = 0;
    while (true) {
        int rc = from_->recv (&msg_, 0);
        if (unlikely (rc < 0))
            return -1;

        complete_msg_size += msg_.size ();

        moresz = sizeof more;
        rc = from_->getsockopt (ZMQ_RCVMORE, &more, &moresz);
        if (unlikely (rc < 0))
            return -1;

        //  Copy message to capture socket if any
        rc = capture (capture_, msg_, more);
        if (unlikely (rc < 0))
            return -1;

        rc = to_->send (&msg_, more ? ZMQ_SNDMORE : 0);
        if (unlikely (rc < 0))
            return -1;

        if (more == 0)
            break;
    }

    // A multipart message counts as 1 packet:
    from_stats->msg_in++;
    from_stats->bytes_in += complete_msg_size;
    to_stats->msg_out++;
    to_stats->bytes_out += complete_msg_size;

    return 0;
}
Beispiel #4
0
int L2Window::check(){
    QImage image;
    image = capture();
    if(image.isNull()) {
        qDebug("image.isNull()");
        findBars();
        return status;
    }
    if(status == L2_OFF || image_width != image.width() || image_height != image.height()){
        findBars();
        status = L2_OFF;
        image_width = image.width();
        image_height = image.height();
        for(int i = idCP; i < BARNUM; i++ ){
            status |= findXP(i, image);
        }


        for(int j = idCP; j < BARNUM; j++ ){
            for(int i=bar[j].getBegin();i<bar[j].getEnd();i++){
                 image.setPixel(i, bar[j].getY(), qRgb(128, 128, 128));
            }
        }


        image.save("image.png");
        QImage icotmp=image.copy(bar[idCP].getBegin()+18, bar[idCP].getY()-29, 18, 18);
    //    icotmp.save("icotmp.png");
        QPixmap pixmap(20,20);
        pixmap.convertFromImage(icotmp);
    //    pixmap.save("pixmap.png");
        if(L2icon) delete L2icon;
        L2icon = new QIcon(pixmap);

    }

    //QImage barimg=image.copy(bar[idCP].getBegin(), bar[idCP].getY(), bar[idCP].getEnd()-bar[idCP].getBegin(), 1);
    //barimg.save("CP.png");
    for(int i = idCP; i < BARNUM; i++ ){
        bar[i].checkXPBar(image);
    }
    return status;
}
Beispiel #5
0
bool EditPointOP::OnMouseMove(int x, int y)
{
	if (ee::ZoomViewOP::OnMouseMove(x, y)) return true;

	int tolerance = m_node_capture ? m_node_capture->GetValue() : 0;
	if (tolerance == 0) {
		return false;
	}

	sm::vec2 pos = m_stage->TransPosScrToProj(x, y);
	NodeCapture capture(m_shapes_impl, tolerance);
	auto old = m_captured.shape;
	capture.captureEditable(pos, m_captured);
	if (old && !m_captured.shape || !old && m_captured.shape) {
		ee::SetCanvasDirtySJ::Instance()->SetDirty();
	}

	return false;
}
Beispiel #6
0
int main(int argc, char* argv[]){
  if(argc < 2){
    std::cerr << "usage: detfps filename"<<std::endl;
    exit(EXIT_FAILURE);
  }
  for(int i=1;i<=argc-1;i++){
    std::string filename(argv[i]);
    std::string vInput(filename);
    
    cv::VideoCapture capture(vInput);
    double fpsIn = capture.get(CV_CAP_PROP_FPS);

    int frameCount = capture.get(CV_CAP_PROP_FRAME_COUNT);
    std::cout << "INPUT: " << vInput << std::endl;
    std::cout << "* Number of frames: " << frameCount << std::endl;
    std::cout << "* Frames per second: " << fpsIn << std::endl;
  }
  return EXIT_SUCCESS;
}
Beispiel #7
0
int Cam::showCam() {

    cv::VideoCapture capture(0); // open default camera
    if ( capture.isOpened() == false )
        return -1;

    cv::namedWindow("Test OpenCV",1);
    cv::Mat frame;

    while ( true )
    {
        capture >> frame;
        cv::imshow("Test OpenCV", frame );
        int key = cv::waitKey(1);
        if ( key == 27 )
            break;
    }
    return 0;
}
Beispiel #8
0
int main(int argc, char * argv[])
{
    cv::namedWindow("depth");
    cv::namedWindow("color");

    // Open Kinect sensor
    cv::VideoCapture capture(cv::CAP_OPENNI2);
    if (!capture.isOpened())
    {
        printf("Could not open OpenNI-capable sensor\n");
        return -1;
    }
    capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
    double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
    //printf("Focal length = %f\n", focal_length);

    cv::Mat color, depth;
    bool isLooping = true;
    while (isLooping)
    {
        capture.grab();
        capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
        capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);

        cv::imshow("depth", depth);
        cv::imshow("color", color);

        char key = (char)cv::waitKey(10);
        if (key == 'q')
            break;

        switch (key)
        {
        case 'q':
            isLooping = false;
            break;
        default:
            ;
        }
    }
    return 0;
}
int main(int argc, char** argv)
{
    std::cout << "INITIALIZING COLOR_RECOGNIZER..." << std::endl;
    ros::init(argc, argv, "color_recog");
    ros::NodeHandle n;
    ros::Rate loop(30);

    std::cout << "ColorRecognizer.->Triying to initialize kinect sensor... " << std::endl;
    cv::VideoCapture capture(CV_CAP_OPENNI);
    if(!capture.isOpened())
    {
        std::cout << "ColorRecognizer.->Cannot open kinect :'(" << std::endl;
        return 1;
    }
    capture.set(CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON);
    std::cout << "ColorRecognizer.->Kinect sensor started :D" << std::endl;

    cv::Mat matDepth;
    cv::Mat matColor;
    pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pclFrame(new pcl::PointCloud<pcl::PointXYZRGBA>);
    pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pclNotPlane(new pcl::PointCloud<pcl::PointXYZRGBA>);
    pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pclPlane(new pcl::PointCloud<pcl::PointXYZRGBA>);
    pcl::visualization::CloudViewer viewer("Original");
    while(ros::ok() && cv::waitKey(10) != 27 && !viewer.wasStopped())
    {
        if(!capture.grab())
        {
            loop.sleep();
            ros::spinOnce();
            continue;
        }
        capture.retrieve(matDepth, CV_CAP_OPENNI_POINT_CLOUD_MAP);
        capture.retrieve(matColor, CV_CAP_OPENNI_BGR_IMAGE);

        cvMat2Pcl(matColor, matDepth, *pclFrame);
        extractPlane(pclFrame,  pclFrame, pclPlane, pclNotPlane);
        
        cv::imshow("Original", matColor);
        viewer.showCloud(pclFrame);
        ros::spinOnce();
    }
}
Beispiel #10
0
void testApp::keyPressed(int key) {	
	switch (key) {
		case ' ':
			capture();
			break;
		case 'j':
			deviceId--;
			
			videoGrabber.setDeviceID(deviceId);
			videoGrabber.initGrabber(640, 480);
			break;
			
		case 'k':
			deviceId++;
			
			videoGrabber.setDeviceID(deviceId);
			videoGrabber.initGrabber(640, 480);
			break;
	}
}
int main(int argc , char ** argv)
{
    if (argc < 2)
        return 1;

    cv::VideoCapture capture(argv[1]);
    if (!capture.isOpened())
        return 1;

    cv::namedWindow("RTSP Stream", CV_WINDOW_AUTOSIZE);
    cv::Mat frame;

    while(true)
    {
        if (!capture.read(frame))
            return 1;
        cv::imshow("TEST", frame);
        cv::waitKey(1);
    }
}
Beispiel #12
0
//--------------------------------------------------------------
void testApp::update(){
	
	FOREACH_CAMERA
		camera[iCam].update();
	
	if (NUM_CAMERAS==2)
		if (camera[0].bangAdded && camera[1].bangAdded)
		{
			camera[0].bangAdded = false;
			camera[1].bangAdded = false;
			calcTransforms();
		}
	
	if (timerOn)
		if (ofGetElapsedTimef() > (lastCaptureTime+TIMER_PERIOD) && !camera[0].doAdd && !camera[1].doAdd)
		{
			capture();
			lastCaptureTime = ofGetElapsedTimef();
		}
}
Beispiel #13
0
static PyObject *
unicap_capture(PyObject *self, PyObject *args)
{
  unicap_handle_t handle = getHandle();
  
  capture(handle);
  unicap_close(handle);

  PyObject* imgarray = PyList_New(nrOfPixel);
  int i;
  for(i = 0; i < nrOfPixel; i++)
    {
      long l = buf[i];
      PyObject* intensity = PyLong_FromLong(l);
      PyList_SetItem(imgarray, i, intensity);
    }
  free(buf);
  buf = NULL;
  return imgarray;
}
Beispiel #14
0
void LSCWorker::start()
{
    const double period = (1.0/mFps)*1000;

    int currentFrame = 0;
    const int framesCnt = mFps*mDuration;

    qDebug() << "period:"<<period << "mFps:" << mFps << "duration:" << mDuration << "framesCnt:"<<framesCnt;

    while ( currentFrame < framesCnt || (currentFrame == 0 && framesCnt == 1))
    {
        const qint64 started = QDateTime::currentMSecsSinceEpoch();

        while( !mQueue.isEmpty() )
        {
            continue;
        }

        qDebug() << "request for capture" << currentFrame << "\t\t" << QDateTime::currentMSecsSinceEpoch();
        mQueue.enqueue( "capture" );
        emit capture( currentFrame );
//        qApp->processEvents();

        if( framesCnt > 1 )
        {
            const qint64 spent = QDateTime::currentMSecsSinceEpoch() - started;
            const qint64 wait = period - spent;

            if ( wait>0 ) customWait( wait );
        }

        ++currentFrame;

    }

    mMutex.lock(); // to avoid dequeueing from LSCCapturer::onCaptureRequested
    mQueue.clear();
    mMutex.unlock();

    emit finished();
}
//创建按钮
void MeasureMarkersWindow::createWidget() {
  //预览按钮
  mPlay = new QPushButton();
  mPlay->setText(QString::fromWCharArray(L"预览"));
  connect(mPlay, SIGNAL(pressed()), this, SLOT(preview()));

  //标签
  mCaps = new QLabel();
  mCaps->setText(" 0/0");
  mCaps->setAlignment(Qt::AlignRight | Qt::AlignCenter);

  //捕获按钮
  mCapture = new QPushButton();
  mCapture->setText(QString::fromWCharArray(L"捕获"));
  connect(mCapture, SIGNAL(pressed()), this, SLOT(capture()));
  mCapture->setDisabled(true);

  //删除按钮
  mDelete = new QPushButton();
  mDelete->setText(QString::fromWCharArray(L"删除"));
  connect(mDelete, SIGNAL(pressed()), this, SLOT(deleteImg()));
  mDelete->setDisabled(true);

  //滑块
  mSlider = new QSlider(Qt::Horizontal);
  mSlider->setTickPosition(QSlider::TicksBelow);
  mSlider->setMinimum(1);
  mSlider->setMaximum(1);
  mSlider->setTickInterval(1);
  mSlider->setValue(1);
  mSlider->setTracking(true);
  mSlider->setDisabled(true);
  connect(mSlider, SIGNAL(valueChanged(int)), this, SLOT(updateCurrent(int)));

  mCtrlLayout->addWidget(mPlay);
  mCtrlLayout->addWidget(mCaps);
  mCtrlLayout->addWidget(mCapture);
  mCtrlLayout->addWidget(mDelete);

  mTimeLineLayout->addWidget(mSlider);
}
Beispiel #16
0
vector<Mat> Utils::extractVideoHistograms(string videoPath) {
	vector< pair<int, Mat> > hTemp;
	vector<Mat> histograms;
	// Each threads will be real fast...
	unsigned nThreads = thread::hardware_concurrency() * 100;
	vector<thread> pool;

	try {	
		Mat frame;
		VideoCapture capture(videoPath);
		
		for(int num = 0; capture.read(frame); num++) {
			if(pool.size() > nThreads) {
				for(auto &t : pool) {
					t.join();
				}
				pool.clear();
			}
			Mat fTemp;
			frame.copyTo(fTemp);
			pool.push_back(thread(&Utils::extractHistogram, fTemp, num, std::ref(hTemp)));	
		}
		
		for(auto &t : pool) {
			t.join();
		}
		pool.clear();
		frame.release();
		capture.release();		
	} catch(exception &e) {
		cout << "The video file is corrupt or of an unsupported format" << endl;
	}

	std::sort(hTemp.begin(), hTemp.end(), Utils::pairCompare);
		
	for(pair<int, Mat> t : hTemp) {
		histograms.push_back(t.second);
	}

	return histograms;	
}
Beispiel #17
0
bool EditBezierOP::onMouseMove(int x, int y)
{
	if (ZoomViewOP::onMouseMove(x, y)) return true;

	Vector pos = m_editPanel->transPosScreenToProject(x, y);
	int tolerance;
	if (m_cmpt)
		tolerance = m_cmpt->getNodeCaptureDistance();
	else
		tolerance = 0;
	if (tolerance != 0)
	{	
		NodeCapture capture(m_shapesImpl, tolerance);
		IShape* old = m_captured.shape;
		capture.captureEditable(pos, m_captured);
		if (old && !m_captured.shape || !old && m_captured.shape)
			m_editPanel->Refresh();
	}

	return false;
}
content2::content2(QString ip,QString port,int pp, QWidget *parent)
    : QWidget(parent),ip_(ip),port_(port)
{
    QPalette palette;
    palette.setBrush(QPalette::Window,QBrush(QColor(250,255,240,150)));
    setPalette(palette);
    setAutoFillBackground(true);
    button = new QPushButton(this);
    button->setText("开始录制");
    button->setCheckable(true);
    connect(button,SIGNAL(toggled(bool)),this,SLOT(button_text(bool)));

    button2 = new QPushButton(this);
    button2->setText("屏幕截图");


    picture = new pic_show(this,ip,port);
    button->move( width()-10-button->width(),10 );
    connect(picture,SIGNAL(showmessage(QString)),this,SIGNAL(showmess(QString)));
    connect(button2,SIGNAL(clicked()),picture,SLOT(capture()));
}
int main(int argc, char **argv)
{
    cv::VideoCapture capture(0);
    cv::Size size(int(capture.get(cv::CAP_PROP_FRAME_WIDTH)), int(capture.get(cv::CAP_PROP_FRAME_HEIGHT)));
    
    size = size / 4;
    
    // Create the context
    gatherer::graphics::GLContextWindow window(size, "display");
    gatherer::graphics::OEGLGPGPUTest test(&window, window.getResolution().x);
    
    while( /*capture */ true )
    {
        cv::Mat frame;
        capture >> frame;
        cv::resize(frame, frame, size);
        cv::cvtColor(frame, frame, cv::COLOR_BGR2BGRA);
        test.captureOutput(frame);
        window.swapBuffers();
    }
}
Beispiel #20
0
const util::string&
StackWalker::printStack(HANDLE thread) noexcept
{
	try
	{
		if (_internal->isModuleLoaded())
		{
			CaptureContext capture(thread);

			this->clear();

			this->printStack(capture.thread(), capture.context());
		}

		return this->str();
	}
	catch (...)
	{
		return this->str();
	}
}
Beispiel #21
0
 void  CPlyRender::key(GLFWwindow *window, int key, int scancode, int action, int mods)
 {
	 if (action == GLFW_PRESS) {
		 switch (key)
		 {
		 case GLFW_KEY_ESCAPE:// ESCキーでウィンドウのクローズフラグを設定。
							  // Sets the close flag of the specified window.
			 glfwSetWindowShouldClose(window, GL_TRUE);
			 break;
		 case GLFW_KEY_C:
			 capture(window);
			 break;
		 case GLFW_KEY_F:
			 m_zoom += 0.1;
			 break;
		 case GLFW_KEY_B:
			 m_zoom -= 0.1;
			 break;
		 }
	 }
 }
Beispiel #22
0
bool EditPointOP::OnMouseRightDown(int x, int y)
{
	if (ee::ZoomViewOP::OnMouseRightDown(x, y)) return true;

	int tolerance = m_node_capture ? m_node_capture->GetValue() : 0;
	if (tolerance == 0) {
		return false;
	}

	sm::vec2 pos = m_stage->TransPosScrToProj(x, y);
	NodeCapture capture(m_shapes_impl, tolerance);
	capture.captureEditable(pos, m_captured);
	if (m_captured.shape) {
		ee::RemoveShapeSJ::Instance()->Remove(m_captured.shape);
		m_shapes_impl->GetShapeSelection()->Clear();
		m_captured.Clear();

	}

	return false;
}
Beispiel #23
0
bool CaViewerCore::openAvi(ImageInfo info, int nSequence)
{
    static const QString sProgressFormat = QObject::tr("Loading... %p%");

    int nPrevImgCount = 0;
    for(int count = 0; count < nSequence; ++count)
        nPrevImgCount += info.imageSize(count);

    cv::VideoCapture capture(info.fullName(nSequence).toStdString());
    if(capture.isOpened())
    {
        const int totalFrames = info.heightSize() * info.timeSize();

        for(int imgCount = 0; imgCount < totalFrames; ++imgCount)
        {
            const int globalCount = nPrevImgCount + imgCount;
            const int height = globalCount % info.heightSize();
            const int time = globalCount / info.heightSize();

            cv::Mat matVideo;
            capture >> matVideo;

            if(matVideo.depth() == CV_8U)
                matVideo.convertTo(matVideo, CV_16U, qreal(256));

            if(matVideo.channels() == 3) //  BGR
                cv::cvtColor(matVideo, m_lImage->at(height).at(time), CV_BGR2GRAY);
            else
                (*m_lImage)[height][time] = matVideo;

            emit changedProgressValue(100.0 * globalCount / m_lImage->size(), sProgressFormat);
        }

        capture.release();

        return true;
    }
    else
        return false;
Beispiel #24
0
std::string Move::string() const {
    std::string temp;
    if (!data) {
        return "null"; }
    temp += (const char*[]) {"","R","B","Q","N","","K" }[piece() & 7];
    temp += (from() & 7) + 'a';
    temp += (from() >> 3) + '1';
    temp += capture() ? 'x' : '-';
    temp += (to() & 7) + 'a';
    temp += (to() >> 3) + '1';
    if (isSpecial()) {
        if ((piece() & 7) < Pawn) {
            temp += " RBQN"[piece() & 7];
            temp = temp.substr(1); }
        else if ((piece() & 7) == King) {
            if ((to() & 7) == 6)
                return "O-O";
            else
                return "O-O-O"; }
        else
            temp += " ep"; }
    return temp; }
Beispiel #25
0
int main(int argc, const char * argv[]) {
    // insert code here...
    std::cout << "OpenCV webcam demo!\n";
    

    cv::VideoCapture capture(0);
    cv::Mat webcam, contours;
    
    while (true) {
        
        capture.read(webcam);
        cv::resize(webcam, webcam, cv::Size(640, 360), 0, 0, cv::INTER_CUBIC);
        
        //cv::Canny(webcam,contours,10,350);

        cv::imshow("contours", webcam);
        //cv::waitKey(0);
        
    }
    
    return 0;
}
void
SoGLLineWidthElement::pop(SoState *state, const SoElement *childElt)
//
////////////////////////////////////////////////////////////////////////
{
    // Since popping this element has GL side effects, make sure any
    // open caches capture it.  We may not send any GL commands, but
    // the cache dependency must exist even if we don't send any GL
    // commands, because if the element changes, the _lack_ of GL
    // commands here is a bug (remember, GL commands issued here are
    // put inside the cache).
    capture(state);
    copiedFromParent = FALSE;

    // If the previous element didn't have the same value...
    const SoGLLineWidthElement *child =
	(const SoGLLineWidthElement *)childElt;
	
    // Restore previous line width
    if (data != child->data)
	send();
}
void
SoGLPolygonOffsetElement::pop(SoState *state, const SoElement *childElt)
{
   // Since popping this element has GL side effects, make sure any
   // open caches capture it.  We may not send any GL commands, but
   // the cache dependency must exist even if we don't send any GL
   // commands, because if the element changes, the _lack_ of GL
   // commands here is a bug (remember, GL commands issued here are
   // put inside the cache).
   capture(state);
   _copiedFromParent = FALSE;
   
   // If the previous element didn't have the same value...
   const SoGLPolygonOffsetElement *child = (const SoGLPolygonOffsetElement*)childElt;

   _currentstyles       = child->_currentstyles;
   _currentoffsetfactor = child->_currentoffsetfactor;
   _currentoffsetunits  = child->_currentoffsetunits;

   if(*this != *child)
      send();
}
Beispiel #28
0
int main( int argc, char** argv ) {
	setlocale(LC_ALL, "ru_RU.UTF-8");

	VideoLib::FrameCapturePtr capture(new VideoLib::FrameCapture(".\\vid2.avi"));
	VideoLib::StabilizerPtr stabilizer(new VideoLib::Stabilizer(BUFFER_SIZE,cv::Size(320,240),cv::Size(320,240)));

	stabilizer->setCapture(capture);

	GUI::VisualHolder vs_holder(stabilizer);

	if(!capture->grab()){
		std::cout << "Can't open video file";
		cv::waitKey();
		return 0;
	}

	for(;;){
		GUI::Builder::showWnds(vs_holder);
		if(cv::waitKey(30) >= 0) break;
	}

	return 0;
}
int main(int argc, char **argv)
{
    cv::CommandLineParser parser(argc, argv, argsDefs);
    std::string videoFileName = parser.get<std::string>("video");
    std::string imgsDirName = parser.get<std::string>("dir");
    if (videoFileName.empty() || imgsDirName.empty())
    {
        std::cerr << "Incorrect input parameters." << std::endl;
        printHelp(std::cerr);
        return ReturnCode::IncorrectInputParameters;
    }
    std::string imgsExt = (parser.get<std::string>("ext").empty()) ? 
        "png" : parser.get<std::string>("ext");
    cv::VideoCapture capture(videoFileName);
    if (!capture.isOpened())
    {
        std::cerr << "Video file was not opened." << std::endl;
        printHelp(std::cerr);
        return ReturnCode::VideoFileNotOpened;
    }
    int frameIdx = 0;
    cv::Mat frame;
    capture >> frame;
    while (!frame.empty())
    {
        std::string fileName;
        getImageNameByIdx(frameIdx, imgsExt, fileName);
        std::ostringstream oss;
        oss << imgsDirName << "/" << fileName;
        cv::imwrite(oss.str(), frame);
        capture >> frame;
        frameIdx++;
    }
    capture.release();
    return ReturnCode::Success;
}
Beispiel #30
0
int main()
{
	// Open the video file
	cv::VideoCapture capture("video1.avi");
	// check if video successfully opened
	if (!capture.isOpened())
		return 1;

	// Get the frame rate
	double rate= capture.get(CV_CAP_PROP_FPS);
	bool stop(false);
	cv::Mat frame; // current video frame
	cv::namedWindow("Extracted Frame");

	// Delay between each frame in ms
	// corresponds to video frame rate
	int delay= 1000/rate;

	// for all frames in video
	while (!stop) {
	// read next frame if any

	if (!capture.read(frame))
		break;
	cv::imshow("Extracted Frame",frame);

	// introduce a delay
	// or press key to stop
	if (cv::waitKey(delay)>=0)
		stop= true;
	}

	// Close the video file.
	// Not required since called by destructor
	capture.release();
}