コード例 #1
0
	void SelectCameraWindow::onTestEyeCamClicked()
	{
		#ifndef _WIN32
			ui.lEyeCam->setText("Loading...");

			VideoCapture videoCapture(ui.cbEyeCamIndex->currentIndex());
			videoCapture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
			videoCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 240);

			Mat frame;
			
			/* If camera could not be opened */
			if(!videoCapture.isOpened())
			{
				ui.lEyeCam->setText("Cannot open camera");
				qDebug() << "Capture not opened";
				return;
			}

			for( ; ; )
			{
				videoCapture.read(frame);
				ui.lEyeCam->setPixmap(QPixmap::fromImage(cvMatToQImage(frame)));
				waitKey();
			}
		#else

			// If thread is running
			if(eyeCamFrameReaderThread->isRunning())
			{
				qDebug() << "Eye Camera Thread is already running";

				// If we are already sreaming this cam, go out of there!
				if(eyeCamFrameReaderThread->getCameraIndex() == ui.cbEyeCamIndex->currentIndex())
				{
					qDebug() << "Same camera selected!";
					return;
				}

				// If a cam selected to be tested while streaming from another cam
				else
				{
					qDebug() << "Starting stream with diferent camera";
					eyeCamFrameReaderThread->terminate();
					eyeCamFrameReaderThread->setCameraIndex(ui.cbEyeCamIndex->currentIndex());
					eyeCamFrameReaderThread->start();
				}
			}

			// If thread is not started yet
			else
			{
				qDebug() << "Eye Camera Thread starting first time";
				eyeCamFrameReaderThread->setCameraIndex(ui.cbEyeCamIndex->currentIndex());
				eyeCamFrameReaderThread->start();
			}

		#endif
		return;
	}
コード例 #2
0
ファイル: main4.cpp プロジェクト: JloveU/RayTracer
// 修改视频帧率
int main()
{
    std::string videoFileName("D:\\ProgramData\\VS2010Projects\\RayTracer\\RayTracer\\RenderResults\\animation1.7_2.avi");
    cv::VideoCapture videoCapture(videoFileName);

    if(!videoCapture.isOpened())
    {
        return -1;
    }

    cv::Mat frame;

    std::string videoFileNameNew("D:\\ProgramData\\VS2010Projects\\RayTracer\\RayTracer\\RenderResults\\animation1.7_2_fps15.avi");
    double fpsNew = 15;
    cv::VideoWriter videoWriter(videoFileNameNew, CV_FOURCC('M', 'J', 'P', 'G'), fpsNew, cv::Size(videoCapture.get(CV_CAP_PROP_FRAME_WIDTH), videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT)), true);

    while (videoCapture.read(frame))
    {
        videoWriter << frame;
    }

    videoCapture.release();
    videoWriter.release();

    std::cout << "修改后的视频已保存到文件 " << videoFileNameNew << std::endl;
    std::cout << "按回车键退出..." << std::endl;
    std::cin.get();

    return 0;
}
コード例 #3
0
ファイル: Commons.cpp プロジェクト: nskarakoc/EyeTracker
void FrontCameraFrameReaderThread::run(void)
{
    emit onCameraTextChanged("Loading...");

    VideoCapture videoCapture(cameraIndex);
    Mat frame;

    /* If camera could not be opened */
    if(!videoCapture.isOpened())
    {
        emit onCameraTextChanged("Cannot open camera");
        qDebug() << "Capture not opened";
        return;
    }

    for( ; ; )
    {
        videoCapture.read(frame);
        emit onCameraPixmapChanged(QPixmap::fromImage(cvMatToQImage(frame)));
        waitKey();
    }
}
コード例 #4
0
    void BroxOpticalFlow::runOnFolder(std::string currentFolder)
    {
        std::stringstream videoCapturePath;
        videoCapturePath << currentFolder << "/%3d.jpg";
        cv::VideoCapture videoCapture(videoCapturePath.str());

        std::string currentOpticalOutputFolder = replaceString(currentFolder, m_sourceFolder, m_opticalOutputFolder);
        
        // create neccessary directories
        Poco::File opticalOutputPath(currentOpticalOutputFolder);
        opticalOutputPath.createDirectories();
        std::cout << "Directory Created: " << opticalOutputPath.path() << std::endl;

        cv::Mat CurrentFrame;

        cv::Mat PreviousFrameGray;
        cv::Mat CurrentFrameGray;

        cv::Mat PreviousFrameGrayFloat;
        cv::Mat CurrentFrameGrayFloat;

        // Prepare receiving variables
        cv::gpu::GpuMat FlowXGPU;
        cv::gpu::GpuMat FlowYGPU;

        cv::Mat FlowX;
        cv::Mat FlowY;

        cv::Mat NormImageOutputFlowX;
        cv::Mat NormImageOutputFlowY;

        // Create optical flow object
        cv::gpu::BroxOpticalFlow broxOpticalFlow = cv::gpu::BroxOpticalFlow(0.197f, 50.0f, 0.8f, 10, 77, 10);

        int i = 0;

        int outputId = 0;

        bool successX, successY;

        while (videoCapture.grab()){
            // std::cout << "extracting image " << i << std::endl;
            i++;

            CurrentFrameGray.copyTo(PreviousFrameGray);
            videoCapture.retrieve(CurrentFrame);

            cv::cvtColor(CurrentFrame, CurrentFrameGray, CV_RGB2GRAY);

            if (PreviousFrameGray.empty() || CurrentFrameGray.empty()){
                continue;
            }

            PreviousFrameGray.convertTo(PreviousFrameGrayFloat, CV_32FC1, 1.0/255.0);
            CurrentFrameGray.convertTo(CurrentFrameGrayFloat, CV_32FC1, 1.0/255.0);

            // Upload images to GPU
            cv::gpu::GpuMat PreviousFrameGPU(PreviousFrameGrayFloat);
            cv::gpu::GpuMat CurrentFrameGPU(CurrentFrameGrayFloat);

            // Perform optical flow
            broxOpticalFlow(PreviousFrameGPU, CurrentFrameGPU, FlowXGPU, FlowYGPU);

            // Download flow from GPU
            FlowXGPU.download(FlowX);
            FlowYGPU.download(FlowY);

            int flowFormat = CV_8UC1;
            std::string flowFileEnding = ".jpg";

            // mean subtraction
            cv::Scalar meanScalarX = mean(FlowX);
            cv::Scalar meanScalarY = mean(FlowY);
            FlowX = FlowX - meanScalarX;
            FlowY = FlowY - meanScalarY;

            // normalization
            FlowX.convertTo(NormImageOutputFlowX, flowFormat, m_alpha, m_beta);
            FlowY.convertTo(NormImageOutputFlowY, flowFormat, m_alpha, m_beta);

            // // check for equal frames
            // double minVal; double maxVal; 
            // minMaxLoc(NormImageOutputFlowX, &minVal, &maxVal);
            // std::cout << outputId << ": " << minVal << " | " << maxVal << std::endl;
            // int tolerance = (int) std::floor(m_alpha / 2.0);
            // if (minVal > 127 - tolerance && maxVal < 127 + tolerance) {
            //     continue;
            // }

            double reverseAlpha = -1.0;
            double reverseBeta = 255.0;

            NormImageOutputFlowX.convertTo(NormImageOutputFlowX, flowFormat, reverseAlpha, reverseBeta);
            NormImageOutputFlowY.convertTo(NormImageOutputFlowY, flowFormat, reverseAlpha, reverseBeta);

            // write positive output
            std::stringstream outputX;
            std::stringstream outputY;

            if (outputId < 10) {
                outputX << currentOpticalOutputFolder << "/X00" << outputId << flowFileEnding;
                outputY << currentOpticalOutputFolder << "/Y00" << outputId << flowFileEnding;
            } else {
                if (outputId < 100) {
                    outputX << currentOpticalOutputFolder << "/X0" << outputId << flowFileEnding;
                    outputY << currentOpticalOutputFolder << "/Y0" << outputId << flowFileEnding;
                } else {
                    outputX << currentOpticalOutputFolder << "/X" << outputId << flowFileEnding;
                    outputY << currentOpticalOutputFolder << "/Y" << outputId << flowFileEnding;
                }
            }

            successX = cv::imwrite(outputX.str(), NormImageOutputFlowX);
            successY = cv::imwrite(outputY.str(), NormImageOutputFlowY);

            if (!successX || !successY)
            {
                std::cerr << "failed saving image " << outputX.str() << " or " << outputY.str() << std::endl;
                break;
            }

            // // write negative output
            // std::stringstream negOutputX;
            // std::stringstream negOutputY;

            // negOutputX << currentOpticalOutputFolder << "/-X" << outputId << flowFileEnding;
            // negOutputY << currentOpticalOutputFolder << "/-Y" << outputId << flowFileEnding;

            // successX = cv::imwrite(negOutputX.str(), NormImageOutputFlowX);
            // successY = cv::imwrite(negOutputY.str(), NormImageOutputFlowY);

            // if (!successX || !successY)
            // {
            //     std::cerr << "failed saving image " << negOutputX.str() << " or " << negOutputY.str() << std::endl;
            //     break;
            // }

            // // write resized output
            // std::stringstream resizedOutput;
            // resizedOutput << currentResizedOutputFolder << "/" << outputId << ".jpg";
            // success = cv::imwrite(resizedOutput.str(), ResizedFrame);

            // if (!success)
            // {
            //     std::cerr << "failed saving image " << resizedOutput.str() << std::endl;
            //     break;
            // }

            // // debug output
            // cv::imshow("flowX", FlowX);
            // cv::imshow("flowY", FlowY);
            // cv::waitKey(0);

            outputId++;
        }
    }
コード例 #5
0
int main()
{
	int Wait_Time = 10;

	std::cout << "Hello OpenCV" << std::endl;
	cv::VideoCapture videoCapture(2); //interne Wiedergabe der 1. Quelle (Webcam)!

	//cv::VideoCapture videoCapture("V:/TestVideo/BigBuckBunny/big_buck_bunny_480p_h264.mp4"); //Datei als Quelle WICHTIG: left Slash (von allen System erkannt!)

	//cv::VideoCapture videoCapture("http://docs.gstreamer.com/media/sintel_cropped_multilingual.webm"); //Stream als Quelle


	bool ok = videoCapture.open(2);
	if (!ok)
	{
		std::cerr << "no camera found, exit" << std::endl;
	}


	//cv::VideoWriter videoWriter("Testvideo.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), 25.0, cv::Size(w, h));
	

	videoCapture.set(cv::CAP_PROP_FRAME_WIDTH, 1920.0);
	videoCapture.set(cv::CAP_PROP_FRAME_HEIGHT, 1080.0);

	int w = videoCapture.get(cv::CAP_PROP_FRAME_WIDTH);
	int h = videoCapture.get(cv::CAP_PROP_FRAME_HEIGHT);

	std::cout << w << " " << h << std::endl;

	while (1)
	{
		cv::Mat image; //Mat=Matrix
		cv::Mat filter_image;

		videoCapture >> image;
		if (image.empty())
		{
			std::cerr << "no camera image" << std::endl;
			break;
		}

		cv::namedWindow("Hello", 0);
		cv::imshow("Hello", image); //Fenster öffnen
		
		//cv::GaussianBlur(image, filter_image, cv::Size(5, 5), 1.0); //gefiltertes Bild öffnen!

		//cv::imshow("Filter", filter_image);
		
		int c = cv::waitKey(Wait_Time); //entspricht "auffrischen" des Fensters: fragt nachrichten ab, erlaubt OS das Auffrischen des Fensters dazwischen
		
		//videoWriter.write(image);
		
		
		switch (c)
		{
		case 'q': /*videoWriter.release(); */ return 0;
			case 'f': Wait_Time = 1; break;
			case 'l': Wait_Time = 500; break;
			case 'e': Wait_Time = 0; break;
			case 's':
			{
				cv::imwrite("Testfile.png", image);
				//cv::imwrite("Testfile_filter.png", filter_image);
				break;
			}
		}
	}

	return 0;
}
コード例 #6
0
int main(int argc, char** argv)
{
//    signal(SIGSEGV, CMainFunctions::printTrace);
//    signal(SIGABRT, CMainFunctions::printTrace);
    CMainFunctions::parseParams(argc, argv, __INSTALL_BIN_DIR__);
    CConfig& config = CConfig::getInstance();

    NVideoInput::CVideoCapture videoCapture(*config.mInputConfig);
    LOG_DEBUG << "videoCapture created";

    std::vector<std::shared_ptr<NNet::IServer>> servers;

#ifdef __BUILD_MJPEG__
    auto ptr = NMJPEG::CMJPEGFactory::createMJPEGServer(*config.mMJPEGServerConfig);
    if (ptr)
    {
        servers.push_back(ptr);
    }
#endif

#ifdef __BUILD_SERVERS__

    NDB::CDBConnection::getInstance(*CConfig::getInstance().mDataBaseConfig);


    CImageServer::addServer(config.mConnectionSettings->mTCPPort,
                            NNet::TypeProtocol::TCP, servers);
    CImageServer::addServer(config.mConnectionSettings->mUDPPort,
                            NNet::TypeProtocol::UDP, servers);

//    CImageServer::addServer (10000, ImageType::IMAGE_RGB_RESIZED,
//                             TypeProtocol::TCP_SSL, servers);
#endif
//    size_t bufferSizeWithoutConnection = videoCapture.getBufferSizeWithoutConnection();

#ifdef __BUILD_CLIENTS__
//    size_t connectionSize = config.mInputConfig->mConnectionList.size();
//    CImageBuffer::getBufferInstance().resizeBuffersToSize(
//                connectionSize + bufferSizeWithoutConnection);

    std::vector<NNet::pWorker> workerList;
    std::vector<NNet::pClient> clientList;

    for (size_t connectionId = 0;
         connectionId < config.mInputConfig->mInputList.size();
         connectionId++)
    {
        if (config.mInputConfig->mInputList[connectionId].mType == NVideoInput::InputType::URL)
        {
            CClient::addClientToList(
                        clientList, workerList,
                        connectionId,
                        "");
        }
    }
#endif

    std::shared_ptr<NWebDownwloader::CWebDownloader> webDownloader;
    if (!CConfig::getInstance().mWebDownloaders->mWebDownloaders.empty())
    {
        webDownloader.reset(new NWebDownwloader::CWebDownloader(
                                *CConfig::getInstance().mWebDownloaders));
    }

#ifdef __WITH_SERIAL__
    for (const auto& serialConfig : config.mSerialPortsConfig)
    {
        NSerial::CSerialPorts::getInstance().addPort(*serialConfig);
    }
#endif

    std::vector<std::shared_ptr<NAlgorithms::IAlertAlgorithm>> algorithms;
#ifdef __BUILD_VIDEOCAPTURE__
    CMainFunctions::addAlgorithm(algorithms);
#ifdef __BUILD_SERVERS__
    std::vector<std::thread> buffers(videoCapture.getBufferSizeWithoutConnection());

    for (size_t bufferId = 0; bufferId < videoCapture.getBufferSizeWithoutConnection(); bufferId++)
    {
        buffers[bufferId] = std::thread (loopOne,
                                         std::ref(videoCapture),
                                         bufferId,
                                         std::ref(algorithms)
                                         );
    }
    NNet::CServerFactory::runMainLoop();
    buffers[0].join();
#else //__BUILD_SERVERS__
    loop(videoCapture, algorithms);
#endif //__BUILD_SERVERS__


#else //__BUILD_VIDEOCAPTURE__
    NNet::CServerFactory::runMainLoop();
#endif //__BUILD_VIDEOCAPTURE__

    return EXIT_SUCCESS;
}