int main(int argc, const char **argv)
{
    // create an image (3 channels, 16 bit image depth,
    // 650 high, 600 wide, (0, 50000, 50000)) assigned for
    // Blue, Green and Red plane respectively.)
    Mat img(650, 600, CV_16UC3, Scalar(0, 50000, 50000));

    if (img.empty())
    {
        cout << "ERROR : Image cannot be loaded..!!" << endl;
        return -1;
    }

    // vector that stores the compression parameters of the image
    vector<int> compression_params;
    
    // specify the compression technique
    compression_params.push_back(CV_IMWRITE_JPEG_QUALITY);
    // specify the compression quality
    compression_params.push_back(98);

    // write the image to file
    bool bSuccess = imwrite("./testImage.jpg", img, compression_params);

    if (!bSuccess)
    {
        cout << "ERROR : Failed to save the image" << endl;
    }

    // create a window with the name "MyWindow"
    namedWindow("MyWindow", CV_WINDOW_AUTOSIZE);
    // display the image which is stored in the 'img' in the "MyWindow" window
    imshow("MyWindow", img);

    waitKey(0);

    destroyWindow("MyWindow");
    

    // write video to file
    VideoCapture cap(0); // open the video camera no. 0

    if (!cap.isOpened())  // if not success, exit program
    {
        cout << "ERROR: Cannot open the video file" << endl;
        return -1;
    }

    namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
     
    double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
    double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video

    cout << "Frame Size = " << dWidth << "x" << dHeight << endl;

    Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));

    VideoWriter oVideoWriter ("./MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object 

    if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
    {
        cout << "ERROR: Failed to write the video" << endl;
        return -1;
    }
    
    while (1)
    {
        Mat frame;

        bool bSuccess = cap.read(frame); // read a new frame from video

        if (!bSuccess) //if not success, break loop
        {
            cout << "ERROR: Cannot read a frame from video file" << endl;
            break;
        }
        
        oVideoWriter.write(frame); //writer the frame into the file
        
        imshow("MyVideo", frame); //show the frame in "MyVideo" window

        if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
        {
            cout << "esc key is pressed by user" << endl;
            break; 
        }
    }

    return 0;
}
示例#2
0
void grabarVideo(Mat frame, VideoCapture cap)
{
	bool static isRecording = false;
	VideoWriter static writer;
	time_t static vidDelta = 0;


	int vidFps = 10;
	int fourcc = CV_FOURCC(vidCodec[0],vidCodec[1],vidCodec[2], vidCodec[3]);
	int imgInterval = 60; // seconds
	int imgNum = 0;
	time_t sec;
	long static frameNum = 0;
	bool isDisplayEnabled = false;
//	int delay = 1;
	int vidNum = 1;
	bool isRecordingEnabled = vidNum > 0 ? true : false;

	bool isImageCaptureEnabled = imgNum > 0 ? true : false;

	time_t vidTime = 20;

	int vidTotal = 0;
	time_t imgTime = 0;
	time_t imgDelta = 0;
	int imgTotal = 0;

	int vidInterval = 60; // seconds
	double fps = 0.0;

	    	sec = time(NULL);
	        frameNum++;

	        if (isDisplayEnabled)
	        {
	        	if(!frame.empty())
	        	imshow("Current Frame", frame);
	        }



	        // Decide whether to create new video file
	        if ((isRecordingEnabled) && (!isRecording))
	        {
	            int width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
	            int height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
	            writer = createVideoFile(vidDir, width, height, vidFps, fourcc, sec);
	            if(writer.isOpened())
	            {
	            	vidTime = sec;
	            	isRecording = true;
	            	frameNum = 0;
	            }
	            else
	            {
	            	cout<< "No se pudo abrir el directorio: "<<vidDir<<endl;
	            	isRecordingEnabled=false;
	            }
	        }

	        // Write frame to video, calculate time interval and whether or not to create new video file
	        if (isRecordingEnabled)
	        {
	            writer.write(frame);
	            vidDelta = sec - vidTime;
//	            cout << "vidDelta "<<vidDelta<<" >= "<<vidInterval<<endl;

	            if (vidDelta >= vidInterval) {
	//                isRecording = false;
	                vidTotal = vidTotal + 1;
//	                cout << "Videos recorded =" << vidTotal << "/" << vidNum << endl;
//	                cout << "vidTotal="<<vidTotal<<" vidNum="<<vidNum<<endl;

	                if (vidTotal >= vidNum) {
	                    isRecordingEnabled = false;

	                    if (vidDelta > 0) {
	                            fps = frameNum / vidDelta;
	                            frameNum = 0;
	                    }

//	                    cout << "Recording completed fps=" << fps << endl;

	                    if (isDisplayEnabled) {
	                            writer = VideoWriter();
	                    }

	                }
	            }

	        }

	        if (isImageCaptureEnabled) {
	            imgDelta = (sec - imgTime);

	            if (imgDelta >= imgInterval) {
	                writeImageFile(imgDir, frame, imgFmt, sec);
	                imgTime = sec;
	                imgTotal = imgTotal + 1;

	                if (imgTotal >= imgNum) {
	                    isImageCaptureEnabled = false;
	                }

	            }
	        }




}
示例#3
0
文件: autofocus.cpp 项目: 4ker/opencv
int main(int argc, char ** argv)
{
    if (!parseArguments(argc, argv))
    {
        showHelp(argv[0], false);
        return -1;
    }
    VideoCapture cap(GlobalArgs.deviceName);
    if (!cap.isOpened())
    {
        cout << "Cannot find device " << GlobalArgs.deviceName << endl;
        showHelp(argv[0], false);
        return -1;
    }

    VideoWriter videoWriter;
    Mat frame;
    FocusState state = createInitialState();
    bool focus = true;
    bool lastSucceeded = true;
    namedWindow(windowOriginal, 1);

    // Get settings:
    if (GlobalArgs.verbose)
    {
        if ((cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == 0)
                || (cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == -1))
        {
            // Some VideoCapture implementations can return -1, 0.
            cout << "This is not GPHOTO2 device." << endl;
            return -2;
        }
        cout << "List of camera settings: " << endl
                << (const char *) (intptr_t) cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE)
                << endl;
        cap.set(CAP_PROP_GPHOTO2_COLLECT_MSGS, true);
    }

    cap.set(CAP_PROP_GPHOTO2_PREVIEW, true);
    cap.set(CAP_PROP_VIEWFINDER, true);
    cap >> frame; // To check PREVIEW output Size.
    if (GlobalArgs.output != NULL)
    {
        Size S = Size((int) cap.get(CAP_PROP_FRAME_WIDTH), (int) cap.get(CAP_PROP_FRAME_HEIGHT));
        int fourCC = CV_FOURCC('M', 'J', 'P', 'G');
        videoWriter.open(GlobalArgs.output, fourCC, GlobalArgs.fps, S, true);
        if (!videoWriter.isOpened())
        {
            cerr << "Cannot open output file " << GlobalArgs.output << endl;
            showHelp(argv[0], false);
            return -1;
        }
    }
    showHelp(argv[0], true); // welcome msg

    if (GlobalArgs.minimumFocusStep == 0)
    {
        state.minFocusStep = findMinFocusStep(cap, FOCUS_STEP / 16, -FOCUS_DIRECTION_INFTY);
    }
    else
    {
        state.minFocusStep = GlobalArgs.minimumFocusStep;
    }
    focusDriveEnd(cap, -FOCUS_DIRECTION_INFTY); // Start with closest

    char key = 0;
    while (key != 'q' && key != 27 /*ESC*/)
    {
        cap >> frame;
        if (frame.empty())
        {
            break;
        }
        if (GlobalArgs.output != NULL)
        {
            videoWriter << frame;
        }

        if (focus && !GlobalArgs.measure)
        {
            int stepToCorrect = correctFocus(lastSucceeded, state, rateFrame(frame));
            lastSucceeded = cap.set(CAP_PROP_ZOOM,
                    max(stepToCorrect, state.minFocusStep) * state.direction);
            if ((!lastSucceeded) || (stepToCorrect < state.minFocusStep))
            {
                if (--GlobalArgs.breakLimit <= 0)
                {
                    focus = false;
                    state.step = state.minFocusStep * 4;
                    cout << "In focus, you can press 'f' to improve with small step, "
                            "or 'r' to reset." << endl;
                }
            }
            else
            {
                GlobalArgs.breakLimit = DEFAULT_BREAK_LIMIT;
            }
        }
        else if (GlobalArgs.measure)
        {
            double rate = rateFrame(frame);
            if (!cap.set(CAP_PROP_ZOOM, state.minFocusStep))
            {
                if (--GlobalArgs.breakLimit <= 0)
                {
                    break;
                }
            }
            else
            {
                cout << rate << endl;
            }
        }

        if ((focus || GlobalArgs.measure) && GlobalArgs.verbose)
        {
            cout << "STATE\t" << state << endl;
            cout << "Output from camera: " << endl
                    << (const char *) (intptr_t) cap.get(CAP_PROP_GPHOTO2_FLUSH_MSGS) << endl;
        }

        imshow(windowOriginal, frame);
        switch (key = static_cast<char>(waitKey(30)))
        {
            case 'k': // focus out
                cap.set(CAP_PROP_ZOOM, 100);
                break;
            case 'j': // focus in
                cap.set(CAP_PROP_ZOOM, -100);
                break;
            case ',': // Drive to closest
                focusDriveEnd(cap, -FOCUS_DIRECTION_INFTY);
                break;
            case '.': // Drive to infinity
                focusDriveEnd(cap, FOCUS_DIRECTION_INFTY);
                break;
            case 'r': // reset focus state
                focus = true;
                state = createInitialState();
                break;
            case 'f': // focus switch on/off
                focus ^= true;
                break;
        }
    }

    if (GlobalArgs.verbose)
    {
        cout << "Captured " << (int) cap.get(CAP_PROP_FRAME_COUNT) << " frames"
                << endl << "in " << (int) (cap.get(CAP_PROP_POS_MSEC) / 1e2)
                << " seconds," << endl << "at avg speed "
                << (cap.get(CAP_PROP_FPS)) << " fps." << endl;
    }

    return 0;
}
示例#4
0
void constructGraph(const string outputLocation){

    string line;
    vector<string> stringVector;
    string edgeWeightsTxtName = outputLocation + "/AllFaces/edgeWeights.txt";

    int                                                 noOfVertices, vertexA, vertexB;
    float                                               edgeWeight;
    vector< list < pair<int, float> > > adjacencyList;
    int noOfVerticesLeft=0;

    ifstream edgeWeightPtr(edgeWeightsTxtName.c_str());


    // Open the file to read all the edge weights.
    if(edgeWeightPtr.is_open()) {

        // Line 1 contains, number of vertices.
        if(getline(edgeWeightPtr, line)) {
            try {
                noOfVertices      = lexical_cast<int>(line);
                noOfVerticesLeft  = noOfVertices;
            }
            catch(bad_lexical_cast const&) {
                cout << "Error: input string was not valid" << endl;
            }
        }

        list< pair<int, float> > val;
        for(int i=0; i<noOfVertices; i++) {
            adjacencyList.push_back( val );
        }

        // Read each edge weights.
        while(getline(edgeWeightPtr, line)) {
            //cout << line << endl;
            split(stringVector, line, boost::is_any_of("-="));
            // Adding edges and weights
            try {
                //cout << "Debug 00:: " << stringVector[0].c_str() << " " << stringVector[1].c_str() << " " << stringVector[2].c_str() << endl;

                vertexA = lexical_cast<int>(stringVector[0].c_str());
                vertexB = lexical_cast<int>(stringVector[1].c_str());
                edgeWeight = lexical_cast<float>(stringVector[2].c_str());
            }
            catch(bad_lexical_cast const&) {
                cout << "Error: input string was not valid" << endl;
            }

            // Fill in the adjacency list
            //cout << "Debug 11:: Maybe here ::" << vertexA << " " << vertexB << " " << edgeWeight << "adjacencyList size:" << adjacencyList.size() << endl;
            adjacencyList[vertexA].push_back(make_pair(vertexB, edgeWeight));
            //cout << "Debug 22:: Maybe here" << endl;
            adjacencyList[vertexB].push_back(make_pair(vertexA, edgeWeight));
        }

    }
    else {
        cout << "Couldn't open " << edgeWeightsTxtName << endl;
        return;
    }

    // Traverse the graph
    list<int> traverseList;
    int currentVertex = 1, endVertex = 80, nextVertex;

    traverseList.push_back(currentVertex);

    nextVertex = findNextVertex(currentVertex, adjacencyList, noOfVerticesLeft);
    cout << "Debug 44:: Next Vertex" << nextVertex << "adjacencyList size:" << adjacencyList.size() << endl;

    int debugWhile = 0;
    while((noOfVerticesLeft!=1) && (nextVertex != endVertex)){
        cout << "Debug 55:: Inside while. Next Vertex:: " << nextVertex << " VerticesLeft:: " << noOfVerticesLeft << " debugWhile::" << debugWhile << endl;
        traverseList.push_back(nextVertex);
        currentVertex = nextVertex;
        nextVertex = findNextVertex(currentVertex, adjacencyList, noOfVerticesLeft);
        debugWhile++;
    }

    // Print the traverse route
    cout << "Final traversal of Vertices and size" << traverseList.size() << endl;
    for(list<int>::iterator it=traverseList.begin(); it!=traverseList.end(); it++) {
        cout << *it << " - ";
    }
    cout << endl;


    // Display the video
    cout << "Expression animation" << endl;
    string listOfFacesFileName = outputLocation + "/AllFaces/ListOfFaces.txt";
    ifstream listOfFacesFileNameHandle(listOfFacesFileName.c_str());
    vector<string> faceMap;

    // Collect the mapping
    cout << "Collecting the mapping" << endl;
    if(listOfFacesFileNameHandle.is_open()) {
        while(getline(listOfFacesFileNameHandle, line)) {
            split(stringVector, line, boost::is_any_of(" "));
            //cout << "DEBUG 66:: stringVector[0]=" << stringVector[0] << endl;
            faceMap.push_back(stringVector[0]);
        }
    }

    Mat faceMat, prevMat, midMat;
    const char* EXPRESSION_DISPLAY = "Expressions";
    namedWindow(EXPRESSION_DISPLAY, CV_WINDOW_AUTOSIZE);

    // Display the traversed faces and make a video of the same

    Size sizeT(200, 200);
    const string NAME = "Animation.avi";
    cout << "DEBUG 11: " << NAME << endl;

    VideoWriter outputVideo;
    //outputVideo.open(  , -1, 20, sizeT, true);
    outputVideo.open("/home/mallikarjun/Desktop/test1.avi", CV_FOURCC('D','I','V','X'), 5, Size (200, 200), true );
    if (!outputVideo.isOpened())
    {
        perror("Could not open the output video for write");
    }

/*    Size sizeT(200, 200);
    CvVideoWriter *writer = cvCreateVideoWriter(
            "data4.avi",
            CV_FOURCC('M','J','P','G'),
            30,
            sizeT);

    cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
    cvMoveWindow("mainWin", 200, 200);
*/

    bool firstTime_bool = true;
    cout << "Displaying the traversed faces" << endl;
    for(list<int>::iterator it=traverseList.begin(); it!=traverseList.end(); it++) {
        int faceNumber = *it;
        //cout << "DEBUG 88:: faceMap[i]=" << faceMap[faceNumber] << endl;
        string strTemp = outputLocation + "/AllFaces/Sample Set/" +  faceMap[faceNumber];
        //cout << "DEBUG 77:: strTemp=" << strTemp << endl;
        //IplImage* img=cvLoadImage(strTemp.c_str());
        faceMat = imread(strTemp.c_str(), CV_LOAD_IMAGE_COLOR);
        if(!firstTime_bool){
            addWeighted(prevMat, 0.5, faceMat, 0.5, 0, midMat, -1);
            //putText(midMat, "Bridge Image", cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
            outputVideo << midMat;
            putText(faceMat, faceMap[faceNumber].c_str(), cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
            outputVideo << faceMat;
        }
        else{
            putText(faceMat, faceMap[faceNumber].c_str(), cvPoint(30,30), FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,200,250), 1, CV_AA);
            outputVideo << faceMat;
            firstTime_bool = false;
        }
        prevMat = faceMat.clone();

        //cvShowImage("mainWin", img );
        //cvWriteFrame(writer,img);
        imshow(EXPRESSION_DISPLAY, faceMat);
        cvWaitKey(10);
    }
    //cvReleaseVideoWriter(&writer);
}
示例#5
0
int main(int argc, char** argv ){


    //init capture devices
    cap0 = ConfigVideoCapture(cap0dev);
    cap1 = ConfigVideoCapture(cap1dev);
    namedWindow("cap0",WINDOW_NORMAL);
    namedWindow("cap1",WINDOW_NORMAL);

    outputVideocap0.open("RecoredVideo/Cam0.avi",CV_FOURCC('M', 'J', 'P', 'G'),11,Size(720,960),true);
    outputVideocap1.open("RecoredVideo/Cam1.avi",CV_FOURCC('M', 'J', 'P', 'G'),11,Size(720,960),true);
    if (!outputVideocap0.isOpened() || !outputVideocap1.isOpened())
    {
            printf("Output video could not be opened\n");
            return 0;
    }


    if (!cap0.isOpened() || !cap1.isOpened()){
            printf("Output video could not be opened\n");
            return 0;
    }

    //record video
    printf("Starting to record video... \n(Press 'c'-key to stop)\n");
    fflush(stdout);
    for(;;){
            clock_t begin = clock();
            thread Grab0(threadGrab0);
            thread Grab1(threadGrab1);
            Grab0.join();
            Grab1.join();
            
	    

            char c = (char)waitKey(1);
            if( c == 'c')
                break;

	    clock_t end = clock();
	    double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
	    double fps = 1.0/elapsed_secs;
	    printf("FPS: %f (Press 'c'-key to stop)\n",fps);
	    fflush(stdout);
    }
    
    printf("Writeing video to harddrive...");
    fflush(stdout);
    for(Mat img : leftImgs)
    {
        outputVideocap0.write(img);
    }

    for(Mat img : rightImgs)
    {
        outputVideocap1.write(img);
    }
    outputVideocap0.release();
    outputVideocap1.release();

    printf(" done\n");
    fflush(stdout);
    return 0;
}
int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, params);

    if (parser.get<bool>("help"))
    {
        cout << about << endl;
        parser.printMessage();
        return 0;
    }

    String modelConfiguration = parser.get<string>("proto");
    String modelBinary = parser.get<string>("model");

    //! [Initialize network]
    dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
    //! [Initialize network]

    if (parser.get<bool>("opencl"))
    {
        net.setPreferableTarget(DNN_TARGET_OPENCL);
    }

    if (net.empty())
    {
        cerr << "Can't load network by using the following files: " << endl;
        cerr << "prototxt:   " << modelConfiguration << endl;
        cerr << "caffemodel: " << modelBinary << endl;
        cerr << "Models can be downloaded here:" << endl;
        cerr << "https://github.com/chuanqi305/MobileNet-SSD" << endl;
        exit(-1);
    }

    VideoCapture cap;
    if (parser.get<String>("video").empty())
    {
        int cameraDevice = parser.get<int>("camera_device");
        cap = VideoCapture(cameraDevice);
        if(!cap.isOpened())
        {
            cout << "Couldn't find camera: " << cameraDevice << endl;
            return -1;
        }
    }
    else
    {
        cap.open(parser.get<String>("video"));
        if(!cap.isOpened())
        {
            cout << "Couldn't open image or video: " << parser.get<String>("video") << endl;
            return -1;
        }
    }

    Size inVideoSize;
    inVideoSize = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    //Acquire input size
                       (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    Size cropSize;
    if (inVideoSize.width / (float)inVideoSize.height > WHRatio)
    {
        cropSize = Size(static_cast<int>(inVideoSize.height * WHRatio),
                        inVideoSize.height);
    }
    else
    {
        cropSize = Size(inVideoSize.width,
                        static_cast<int>(inVideoSize.width / WHRatio));
    }

    Rect crop(Point((inVideoSize.width - cropSize.width) / 2,
                    (inVideoSize.height - cropSize.height) / 2),
              cropSize);

    VideoWriter outputVideo;
    outputVideo.open(parser.get<String>("out") ,
                     static_cast<int>(cap.get(CV_CAP_PROP_FOURCC)),
                     cap.get(CV_CAP_PROP_FPS), cropSize, true);

    for(;;)
    {
        Mat frame;
        cap >> frame; // get a new frame from camera/video or read image

        if (frame.empty())
        {
            waitKey();
            break;
        }

        if (frame.channels() == 4)
            cvtColor(frame, frame, COLOR_BGRA2BGR);

        //! [Prepare blob]
        Mat inputBlob = blobFromImage(frame, inScaleFactor,
                                      Size(inWidth, inHeight), meanVal, false); //Convert Mat to batch of images
        //! [Prepare blob]

        //! [Set input blob]
        net.setInput(inputBlob, "data"); //set the network input
        //! [Set input blob]

        //! [Make forward pass]
        Mat detection = net.forward("detection_out"); //compute output
        //! [Make forward pass]

        vector<double> layersTimings;
        double freq = getTickFrequency() / 1000;
        double time = net.getPerfProfile(layersTimings) / freq;

        Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

        frame = frame(crop);

        ostringstream ss;
        if (!outputVideo.isOpened())
        {
            ss << "FPS: " << 1000/time << " ; time: " << time << " ms";
            putText(frame, ss.str(), Point(20,20), 0, 0.5, Scalar(0,0,255));
        }
        else
            cout << "Inference time, ms: " << time << endl;

        float confidenceThreshold = parser.get<float>("min_confidence");
        for(int i = 0; i < detectionMat.rows; i++)
        {
            float confidence = detectionMat.at<float>(i, 2);

            if(confidence > confidenceThreshold)
            {
                size_t objectClass = (size_t)(detectionMat.at<float>(i, 1));

                int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
                int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
                int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
                int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);

                ss.str("");
                ss << confidence;
                String conf(ss.str());

                Rect object((int)xLeftBottom, (int)yLeftBottom,
                            (int)(xRightTop - xLeftBottom),
                            (int)(yRightTop - yLeftBottom));

                rectangle(frame, object, Scalar(0, 255, 0));
                String label = String(classNames[objectClass]) + ": " + conf;
                int baseLine = 0;
                Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
                rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),
                                      Size(labelSize.width, labelSize.height + baseLine)),
                          Scalar(255, 255, 255), CV_FILLED);
                putText(frame, label, Point(xLeftBottom, yLeftBottom),
                        FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,0));
            }
        }

        if (outputVideo.isOpened())
            outputVideo << frame;

        imshow("detections", frame);
        if (waitKey(1) >= 0) break;
    }

    return 0;
} // main
int main(int argc, char** argv) {
	/* the input and output dir */
	string input_dir = "/home/user/ccv/data/sunny_day-img-left";

	/* initialize the ccv states */
	ccv_enable_default_cache();
	ccv_dpm_mixture_model_t* model = ccv_dpm_read_mixture_model(argv[1]);

	/* set the pedestrian detection parameters */
	ccv_dpm_param_t myparameters;
	myparameters.threshold = 0.4;
	myparameters.interval = 8;
	myparameters.min_neighbors = 1;
	myparameters.flags = 0;

	/* debug */
	string source = "/home/user/ccv/demo1.avi";
	VideoCapture inputVideo(source);              // Open input
	if (!inputVideo.isOpened()) {
		cout  << "Could not open the input video: " << source << endl;
		return -1;
	}
	int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC));     // Get Codec Type- Int form
	cout<<"The coding is "<<ex<<endl;
	cout<<"The fps is "<<inputVideo.get(CV_CAP_PROP_FPS)<<endl;

	/* initialize the video writer */
	Mat getSize = imread(input_dir + "/image_00000100_0.png");
	Size videoSize = getSize.size();
	getSize.release();
	VideoWriter outputVideo;
	outputVideo.open("/home/user/ccv/data/output/eth2_reg_overlaps.avi", ex, fps, videoSize, true);
	if (!outputVideo.isOpened()) {
		cout<<"Could not open the output video"<<endl;
		return false;
	}


	/* process one by one */
	for (int iImage = imageStart; iImage <= imageEnd; iImage++) {

		/* read the image, ccv_image for detection, and opencv Mat for recording */
		string imageTail;
		if (iImage < 10) imageTail = "0000000" + patch::to_string(iImage);
		else if (iImage < 100) imageTail = "000000" + patch::to_string(iImage);
		else imageTail = "00000" + patch::to_string(iImage);
		string image_name = input_dir + "/image_" + imageTail + "_0.png";

		ccv_dense_matrix_t* image = 0;
		ccv_read(image_name.c_str(), &image, CCV_IO_ANY_FILE);
		Mat plot_result = imread(image_name);
		if (image == 0) cerr<<"The reading of dataset failed!"<<endl;
		cout<<"Image succussfully read"<<endl;

		/* processing the image one by one */
		unsigned int elapsed_time = get_current_time();
		ccv_array_t* seq = ccv_dpm_detect_objects(image, &model, 1, myparameters);
		elapsed_time = get_current_time() - elapsed_time;
		cout<<"Using "<<elapsed_time<<"ms on detecting the "<<iImage<<"th image"<<endl;

		if (seq != NULL) { 
			/* get the overlaps */
			bool* flag = new bool[seq->rnum];
			for (int i = 0; i < seq->rnum; i++) flag[i] = true;
			for (int i = 0; i < seq->rnum; i++) {
				for (int j = 0; i < seq->rnum; i++) {
					/* a bigger area */
					ccv_root_comp_t* comp1 = (ccv_root_comp_t*)ccv_array_get(seq, i); /* get the ith number */
					ccv_root_comp_t* comp2 = (ccv_root_comp_t*)ccv_array_get(seq, j); /* get the jth number */
					float dx1 = comp1->rect.x - comp2->rect.x;
					float dx2 = comp1->rect.x + comp1->rect.width - comp2->rect.x + comp2->rect.width;
					if (abs(dx1) / comp1->rect.width < 0.2 && abs(dx2) / comp2->rect.width < 0.2 &&
							abs(dx1) / comp2->rect.width < 0.2 && abs(dx2) / comp1->rect.width < 0.2 &&
							get_overlaps(comp1, comp2) > 0.5) {
						rectangle(plot_result, 
								cv::Point(int(min(comp1->rect.x, comp2->rect.x)), int(min(comp1->rect.y, comp2->rect.y))),
								cv::Point(int(max(comp1->rect.x + comp1->rect.width, comp2->rect.x + comp2->rect.width)), 
										int(max(comp1->rect.y + comp1->rect.height, comp2->rect.y + comp2->rect.height))),
								cvScalar(255, 0, 0), 2, 8, 0);
					}

				}
			}
			/* the detection has something to say */
			for (int i = 0; i < seq->rnum; i++) {
				ccv_root_comp_t* comp = (ccv_root_comp_t*)ccv_array_get(seq, i); /* get the ith number */
				/* a simple regression trick */
				float predHeight = ((float)videoSize.height / 2 - comp->rect.y) * 2 + 10;
				if (predHeight - comp->rect.height > predHeight * 0.5) {
					rectangle(plot_result, 
							cv::Point(int(comp->rect.x), int(comp->rect.y)),
							cv::Point(int(comp->rect.x + comp->rect.width), int(comp->rect.y + comp->rect.height)),
							cvScalar(0, 0, 255), 2, 8, 0);
				} else{
					rectangle(plot_result, 
							cv::Point(int(comp->rect.x), int(comp->rect.y)),
							cv::Point(int(comp->rect.x + comp->rect.width), int(comp->rect.y + comp->rect.height)),
							cvScalar(0, 255, 0), 2, 8, 0);
				}
			}
			ccv_array_free(seq); /* release the sequence */
		}
		outputVideo << plot_result;

		/* free the images */
		ccv_matrix_free(image);
		plot_result.release();
	}


	outputVideo.release();
	ccv_drain_cache();
	ccv_dpm_mixture_model_free(model);
	return 0;
}
示例#8
0
int main(int argc, const char* argv[])
{
    useOclChanged = false;
    CommandLineParser cmd(argc, argv,
        "{ v video      |           | Input video }"
        "{ o output     |           | Output video }"
        "{ s scale      | 4         | Scale factor }"
        "{ i iterations | 180       | Iteration count }"
        "{ t temporal   | 4         | Radius of the temporal search area }"
        "{ f flow       | farneback | Optical flow algorithm (farneback, simple, tvl1, brox, pyrlk) }"
        "{ g            | false     | CPU as default device, cuda for CUDA and ocl for OpenCL }"
        "{ h help       | false     | Print help message }"
    );

    if (cmd.get<bool>("help"))
    {
        cout << "This sample demonstrates Super Resolution algorithms for video sequence" << endl;
        cmd.printMessage();
        return 0;
    }

    const string inputVideoName = cmd.get<string>("video");
    const string outputVideoName = cmd.get<string>("output");
    const int scale = cmd.get<int>("scale");
    const int iterations = cmd.get<int>("iterations");
    const int temporalAreaRadius = cmd.get<int>("temporal");
    const string optFlow = cmd.get<string>("flow");
    string gpuOption = cmd.get<string>("gpu");

    std::transform(gpuOption.begin(), gpuOption.end(), gpuOption.begin(), ::tolower);

    bool useCuda = false;
    bool useOcl = false;

    if(gpuOption.compare("ocl") == 0)
        useOcl = true;
    else if(gpuOption.compare("cuda") == 0)
        useCuda = true;

#ifndef HAVE_OPENCV_OCL
    if(useOcl)
    {
        {
            cout<<"OPENCL is not compiled\n";
            return 0;
        }
    }
#endif
#if defined(HAVE_OPENCV_OCL)
    if(useCuda)
    {
        CV_Assert(!useOcl);
    }
#endif
    Ptr<SuperResolution> superRes;


#if defined(HAVE_OPENCV_OCL)
    if(useOcl)
    {
        Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow);
        if (of.empty())
            exit(-1);
        if(useOclChanged)
        {
            superRes = createSuperResolution_BTVL1();
            useOcl = !useOcl;
        }else
            superRes = createSuperResolution_BTVL1_OCL();
        superRes->set("opticalFlow", of);
    }
    else
#endif
    {
        if (useCuda)
            superRes = createSuperResolution_BTVL1_CUDA();
        else
            superRes = createSuperResolution_BTVL1();

        Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);

        if (of.empty())
            exit(-1);
        superRes->set("opticalFlow", of);
    }

    superRes->set("scale", scale);
    superRes->set("iterations", iterations);
    superRes->set("temporalAreaRadius", temporalAreaRadius);

    Ptr<FrameSource> frameSource;
    if (useCuda)
    {
        // Try to use gpu Video Decoding
        try
        {
            frameSource = createFrameSource_Video_CUDA(inputVideoName);
            Mat frame;
            frameSource->nextFrame(frame);
        }
        catch (const cv::Exception&)
        {
            frameSource.release();
        }
    }
    if (!frameSource)
        frameSource = createFrameSource_Video(inputVideoName);

    // skip first frame, it is usually corrupted
    {
        Mat frame;
        frameSource->nextFrame(frame);
        cout << "Input           : " << inputVideoName << " " << frame.size() << endl;
        cout << "Scale factor    : " << scale << endl;
        cout << "Iterations      : " << iterations << endl;
        cout << "Temporal radius : " << temporalAreaRadius << endl;
        cout << "Optical Flow    : " << optFlow << endl;
#if defined(HAVE_OPENCV_OCL)
        cout << "Mode            : " << (useCuda ? "CUDA" : useOcl? "OpenCL" : "CPU") << endl;
#else
        cout << "Mode            : " << (useCuda ? "CUDA" : "CPU") << endl;
#endif
    }

    superRes->setInput(frameSource);

    VideoWriter writer;

    for (int i = 0;; ++i)
    {
        cout << '[' << setw(3) << i << "] : ";
        Mat result;

#if defined(HAVE_OPENCV_OCL)
        cv::ocl::oclMat result_;

        if(useOcl)
        {
            MEASURE_TIME(superRes->nextFrame(result_));
        }
        else
#endif
        {
            MEASURE_TIME(superRes->nextFrame(result));
        }

#ifdef HAVE_OPENCV_OCL
        if(useOcl)
        {
            if(!result_.empty())
            {
                result_.download(result);
            }
        }
#endif
        if (result.empty())
            break;

        imshow("Super Resolution", result);

        if (waitKey(1000) > 0)
            break;

        if (!outputVideoName.empty())
        {
            if (!writer.isOpened())
                writer.open(outputVideoName, VideoWriter::fourcc('X', 'V', 'I', 'D'), 25.0, result.size());
            writer << result;
        }
    }

    return 0;
}
int main(int argc, char* argv[])
{
	
	koordinate.open("F:/TRAKASNIMCI/log.txt",fstream::app);

	//pauza i resume koda
	bool pause = false;

	//kalibracija boja
	bool calibrationMode = true;
	//UPUTSTVO
	cout<<"CONTROLS\n";
	cout<<"************************************\n";
	cout<<"Press C to reset UKUPNO and SVI \n";
	cout<<"Press P to pause program \n";
	cout<<"************************************\n";
	cout<<"Press M to enter manual record  mode\n";
	cout<<"Press A to return to automatic record mode \n";
	cout<<"Press N to start new record \n";
	cout<<"************************************\n";
	cout<<"Current record mode > AUTOMATIC\n";
	cout<<"************************************\n";

	


	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat HSV;
	
	capture.open(0);


	if(calibrationMode){
		//kreiraj slajdere na treshold prozoru
		createTrackbars();
	} else {
		//kreiraj slajdere na glavnom prozoru pokretna traka
		trackbarWaitkey();
	}
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	
	
	
	//Video writer

	VideoWriter oVideoWriter;//create videoWriter object, not initialized yet
	double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
	//set framesize for use with videoWriter
	Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));

	if(!capture.isOpened()){
		cout<<"GRESKA PRILIKOM PREUZIMANJA VIDEA\n";
		getchar();
		return -1;
	}

	
	Objekat crven("crven"), zelen("zelen"), zut("zut"), plav("plav");	
	
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		
		//store image to matrix
		capture.read(cameraFeed);

		
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);


		if(calibrationMode==true){
			//if in calibration mode, we track objects based on the HSV slider values.
			inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
			morphOps(threshold);
			imshow(thresholdWindow,threshold);
			trackFilteredObject(threshold,HSV,cameraFeed);
		} else {

			//crni kvadrat
			rectangle(cameraFeed,Point(200,380),Point(650,460),crBoja,-1);
		
			//crvene
			inRange(HSV,crven.getHSVmin(),crven.getHSVmax(),threshold);
			//morphOps(threshold);
			trackFilteredObject(crven,threshold,HSV,cameraFeed);

			//zute
			inRange(HSV,zut.getHSVmin(),zut.getHSVmax(),threshold);
			//morphOps(threshold);
			trackFilteredObject(zut,threshold,HSV,cameraFeed);
			
			//zelene
			inRange(HSV,zelen.getHSVmin(),zelen.getHSVmax(),threshold);
			//morphOps(threshold);
			trackFilteredObject(zelen,threshold,HSV,cameraFeed);
			
			//plave
			inRange(HSV,plav.getHSVmin(),plav.getHSVmax(),threshold);
			//morphOps(threshold);
			trackFilteredObject(plav,threshold,HSV,cameraFeed);
			
		
			line(cameraFeed,Point(xMIN,0),Point(xMIN,480),crBoja,2,8,0);

			line(cameraFeed,Point(xMAX,0),Point(xMAX,480),beBoja,2,8,0);

			//ISPIS DATUMA I VREMENA
			rectangle(cameraFeed,Point(0,460),Point(200,480),beBoja,-1);
			putText(cameraFeed,getDateTime(),Point(0,480),1,1,Scalar(0,0,0),2);

			//ukupno crvenih
			putText(cameraFeed,"UKUPNO",cv::Point(200,440),1,1,cBoja);
			putText(cameraFeed,intToString(cr.size()),cv::Point(200,460),1,1,cBoja);

			//ukupno zelenih
			putText(cameraFeed,"UKUPNO",cv::Point(300,440),1,1,zeBoja);
			putText(cameraFeed,intToString(ze.size()),cv::Point(300,460),1,1,zeBoja);

			//ukupno zutih
			putText(cameraFeed,"UKUPNO",cv::Point(400,440),1,1,zuBoja);
			putText(cameraFeed,intToString(zu.size()),cv::Point(400,460),1,1,zuBoja);

			//ukupno plavih
			putText(cameraFeed,"UKUPNO",cv::Point(500,440),1,1,pBoja);
			putText(cameraFeed,intToString(pl.size()),cv::Point(500,460),1,1,pBoja);

			//ukupno svi
			putText(cameraFeed,"SVI",cv::Point(600,440),1,1,beBoja);
			putText(cameraFeed,intToString(pl.size()+cr.size()+ze.size()+zu.size()),cv::Point(600,460),1,1,beBoja);
		}

		if(startRecording == true) {

				oVideoWriter  = VideoWriter("F:/TRAKASNIMCI/Video"+intToString(inc)+".avi", CV_FOURCC('D', 'I', 'V', '3'), 15, frameSize, true); //initialize the VideoWriter object 
				cout<<"New video file created F:/TRAKASNIMCI/Video"+intToString(inc)+".avi "<<endl;
				startRecording = false;
				if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
				{
					cout << "ERROR: Failed to initialize video writing" << endl;
					getchar();
					return -1;
				}
			}

		//automatsko snimanje
		if(manualRecordingMode == false) {
			if(recording) {
				oVideoWriter.write(cameraFeed);
				//show "REC" in top left corner in red
				//be sure to do this AFTER you write to the file so that "REC" doesn't show up
				//on the recorded video.
				putText(cameraFeed,"REC",Point(0,60),1,2,Scalar(0,0,255),2);
			}
		}

		//manualno snimanje 

		if(manualRecordingMode == true) {
			if(recordingM) {
				oVideoWriter.write(cameraFeed);
				//show "REC" in top left corner in red
				//be sure to do this AFTER you write to the file so that "REC" doesn't show up
				//on the recorded video.
				putText(cameraFeed,"mREC",Point(0,60),1,2,Scalar(0,0,255),2);
			}
		}

		//prikaz videa
		imshow(originalWindow,cameraFeed);
		//imshow(hsvWindow,HSV);
		//imshow(thresholdWindow,threshold);
		
		//KONTROLA

		switch(waitKey(milisek)){

		case 27: //'esc' key has been pressed, exit program.
			return 0;
		case 112: //'p' has been pressed. this will pause/resume the code.
			pause = !pause;
			if(pause == true){ cout<<"Code paused, press 'p' again to resume\n";
			cout<<"****************************************\n";
			while (pause == true){
				//stay in this loop until 
				switch (waitKey()){
					//a switch statement inside a switch statement? Mind blown.
				case 112: 
					//change pause back to false
					pause = false;
					cout<<"Code Resumed\n"<<endl;
					cout<<"****************************************\n";
					break;
				}
			}

		case 114:
			//'r' has been pressed.
			//toggle recording mode
			if(manualRecordingMode) {
			recordingM = true;
			cout << "Recording Started\n" << endl;
			cout<<"****************************************\n";
			}
			break;

		case 115:
			//'s' has been pressed.
			//toggle recording mode
			if(manualRecordingMode) {
			recordingM = false;
			cout << "Recording Stopped, press R to continue recording to"<< "Video"<<intToString(inc)
				<< "\n or press N to save current video and start recording new video\n";
			cout<<"****************************************\n";
			}

			break;

		case 99:
			//'c' has been pressed
			cr.clear();
			ze.clear();
			zu.clear();
			pl.clear();
			cout<<"Counters reseted\n";
			cout<<"****************************************\n";

			break;
		
		case 109:
			//'m' has been pressed
			//Manual recording
			manualRecordingMode = true;
			cout << "Manual recording mode \n New Video recording started, press R to record\n" << endl;
			cout<<"****************************************\n";
			//increment video file name
			inc+=1;

			break;

		case 97:

			manualRecordingMode = false;
			cout<<" Automatic recording mode \n";
			cout<<"****************************************\n";
			break;

		case 110:
			//'n' has been pressed
			//start new video file
			startRecording = true;
			cout<<"NOW RECORDING \n";
			cout<<"****************************************\n";
			//increment video file name
			inc+=1;
			break; 

			}
			
		} 
			
	}
	capture.release();
	koordinate.close();
	return 0;

}
示例#10
0
int main( int argc, const char** argv )
{
    VideoCapture cap;
    Rect trackWindow;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;
    CommandLineParser parser(argc, argv, keys);
    if (parser.has("help"))
    {
        help();
        return 0;
    }
    int camNum = parser.get<int>(0);
    cap.open(camNum);

    if( !cap.isOpened() )
    {
        help();
        cout << "***Could not initialize capturing...***\n";
        cout << "Current parameter's value: \n";
        parser.printMessage();
        return -1;
    }


    Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    VideoWriter videoStream;
    videoStream.open("./VirtualPiano.mp4", -1, cap.get(CV_CAP_PROP_FPS), S, true);
    if (!videoStream.isOpened())
    {
        cout  << "Could not open the output video." << endl;
        return -1;
    }
    cout << hot_keys;
    //namedWindow( "Histogram", 0 );
    namedWindow( "VirtualPiano", 0 );
    resizeWindow( "VirtualPiano", WINDOW_WIDTH, WINDOW_HEIGHT);
    setMouseCallback( "VirtualPiano", onMouse, 0 );
    //createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    //createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    //createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    RotatedRect trackBox;
    bool paused = false;

    for(;;)
    {
        if( !paused )
        {
            cap >> frame;
            if( frame.empty() )
                break;
        }

        frame.copyTo(image);
        Mat flippedImage;
        flip(image, flippedImage, 1);
        image = flippedImage;
        if( !paused )
        {
            cvtColor(image, hsv, COLOR_BGR2HSV);

            if( trackObject )
            {
                int _vmin = vmin, _vmax = vmax;

                inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                int ch[] = {0, 0};
                hue.create(hsv.size(), hsv.depth());
                mixChannels(&hsv, 1, &hue, 1, ch, 1);

                if( trackObject < 0 )
                {
                    Mat roi(hue, selection), maskroi(mask, selection);
                    calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                    normalize(hist, hist, 0, 255, NORM_MINMAX);

                    trackWindow = selection;
                    trackObject = 1;

                    histimg = Scalar::all(0);
                    int binW = histimg.cols / hsize;
                    Mat buf(1, hsize, CV_8UC3);
                    for( int i = 0; i < hsize; i++ )
                        buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                    cvtColor(buf, buf, COLOR_HSV2BGR);

                    for( int i = 0; i < hsize; i++ )
                    {
                        int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                        rectangle( histimg, Point(i*binW,histimg.rows),
                                   Point((i+1)*binW,histimg.rows - val),
                                   Scalar(buf.at<Vec3b>(i)), -1, 8 );
                    }
                }

                calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
                backproj &= mask;
                trackBox = CamShift(backproj, trackWindow,
                                    TermCriteria( TermCriteria::EPS | TermCriteria::COUNT, 10, 1 ));
                if( trackWindow.area() <= 1 )
                {
                    int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
                    trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
                                       trackWindow.x + r, trackWindow.y + r) &
                                  Rect(0, 0, cols, rows);
                }

                if( backprojMode )
                    cvtColor( backproj, image, COLOR_GRAY2BGR );
                ellipse( image, trackBox, Scalar(0,0,255), 3, LINE_AA );

            }
        }
        else if( trackObject < 0 )
            paused = false;

        if( selectObject && selection.width > 0 && selection.height > 0 )
        {
            Mat roi(image, selection);
            bitwise_not(roi, roi);
        }
        Size size = image.size();
        int thickness;
        for(int x = 0; x < NOTES_IN_ROW; ++x){
            for(int y = 0; y < NOTES_IN_COLUMN; ++y){
                Rect rect(Point(x*size.width/NOTES_IN_ROW, y*size.height/NOTES_IN_COLUMN), Point( (x+1)*size.width/NOTES_IN_ROW,(y+1)*size.height/NOTES_IN_COLUMN));
                if ( rect.contains(trackBox.center) && trackObject){
                    thickness = -1;
                }
                else{
                    thickness = 1;
                }
                rectangle(image, rect, NOTE_COLORS[x*NOTES_IN_ROW + y], thickness, 8);
            }
        }

        imshow( "VirtualPiano", image); 
        videoStream.write( image);
        //imshow( "Histogram", histimg );

        char c = (char)waitKey(10);
        if( c == 27 ){
            break;
        }
        switch(c)
        {
        case 'b':
            backprojMode = !backprojMode;
            break;
        case 'c':
            trackObject = 0;
            histimg = Scalar::all(0);
            break;
        case 'h':
            showHist = !showHist;
            if( !showHist )
                destroyWindow( "Histogram" );
            else
                namedWindow( "Histogram", 1 );
            break;
        case 'p':
            paused = !paused;
            break;
        default:
            ;
        }
    }
int main(){
	//set recording and startNewRecording initially to false.
	bool recording = false;
	bool startNewRecording = false;
	int inc=0;
	bool firstRun = true;
	//if motion is detected in the video feed, we will know to start recording.
	bool motionDetected = false;

	//pause and resume code (if needed)
	bool pause = false;
	//set debug mode and trackingenabled initially to false
	//these can be toggled using 'd' and 't'
	debugMode = false;
	trackingEnabled = true;
	//set up the matrices that we will need
	//the two frames we will be comparing
	Mat frame1,frame2;
	//their grayscale images (needed for absdiff() function)
	Mat grayImage1,grayImage2;
	//resulting difference image
	Mat differenceImage;
	//thresholded difference image (for use in findContours() function)
	Mat thresholdImage;
	//video capture object.
	VideoCapture capture;
	capture.open(0);
	VideoWriter oVideoWriter;//create videoWriter object, not initialized yet
	double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
	//set framesize for use with videoWriter
	Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));

	if(!capture.isOpened()){
		cout<<"ERROR ACQUIRING VIDEO FEED\n";
		getchar();
		return -1;
	}
	while(1){


		//read first frame
		capture.read(frame1);
		//convert frame1 to gray scale for frame differencing
		cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
		//copy second frame
		capture.read(frame2);
		//convert frame2 to gray scale for frame differencing
		cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
		//perform frame differencing with the sequential images. This will output an "intensity image"
		//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
		cv::absdiff(grayImage1,grayImage2,differenceImage);
		//threshold intensity image at a given sensitivity value
		cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
		if(debugMode==true){
			//show the difference image and threshold image
			cv::imshow("Difference Image",differenceImage);
			cv::imshow("Threshold Image", thresholdImage);
		}else{
			//if not in debug mode, destroy the windows so we don't see them anymore
			cv::destroyWindow("Difference Image");
			cv::destroyWindow("Threshold Image");
		}
		//blur the image to get rid of the noise. This will output an intensity image
		cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
		//threshold again to obtain binary image from blur output
		cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
		if(debugMode==true){
			//show the threshold image after it's been "blurred"

			imshow("Final Threshold Image",thresholdImage);

		}
		else {
			//if not in debug mode, destroy the windows so we don't see them anymore
			cv::destroyWindow("Final Threshold Image");
		}

		//if tracking enabled, search for Motion
		if(trackingEnabled){

			//check for motion in the video feed
			//the detectMotion function will return true if motion is detected, else it will return false.
			//set motionDetected boolean to the returned value.
			motionDetected = detectMotion(thresholdImage,frame1);

		}else{ 
			//reset our variables if tracking is disabled
			motionDetected = false;

		}

////////////**STEP 1**//////////////////////////////////////////////////////////////////////////////////////////////////////////////
		//draw time stamp to video in bottom left corner. We draw it before we write so that it is written on the video file.


		//if we're in recording mode, write to file
		if(recording){

			//check if it's our first time running the program so that we don't create a new video file over and over again.
			//we use the same boolean check to create a new recording if we want.
			if(firstRun == true || startNewRecording == true){

//////////**STEP 3**///////////////////////////////////////////////////////////////////////////////////////////////////////////////
				//Create a unique filename for each video based on the date and time the recording has started
				string videoFileName = "D:/MyVideo"+intToString(inc)+".avi";

				cout << "File has been opened for writing: " << videoFileName<<endl;
				
				cout << "Frame Size = " << dWidth << "x" << dHeight << endl;

				oVideoWriter  = VideoWriter(videoFileName, CV_FOURCC('D', 'I', 'V', '3'), 20, frameSize, true);

				if ( !oVideoWriter.isOpened() ) 
				{
					cout << "ERROR: Failed to initialize video writing" << endl;
					getchar();
					return -1;
				}
				//reset our variables to false.
				firstRun = false;
				startNewRecording = false;


			}

			oVideoWriter.write(frame1);
			//show "REC" in top left corner in red
			//be sure to do this AFTER you write to the file so that "REC" doesn't show up on the recorded video file.
			//Cut and paste the following line above "oVideoWriter.write(frame1)" to see what I'm talking about.
			putText(frame1,"REC",Point(0,60),2,2,Scalar(0,0,255),2);


		}



		//check if motion is detected in the video feed.
		if(motionDetected){
			//show "MOTION DETECTED" in bottom left corner in green
			putText(frame1,"MOTION DETECTED",cv::Point(0,420),2,2,cv::Scalar(0,255,0));

//////////**STEP 2**///////////////////////////////////////////////////////////////////////////////////////////////////////////////
			//set recording to true since there is motion in the video feed.
			//else recording should be false.


		}
		//show our captured frame
		imshow("Frame1",frame1);

		//check to see if a button has been pressed.
		//the 30ms delay is necessary for proper operation of this program
		//if removed, frames will not have enough time to referesh and a blank image will appear.
		switch(waitKey(30)){

		case 27: //'esc' key has been pressed, exit program.
			return 0;
		case 116: //'t' has been pressed. this will toggle tracking (disabled for security cam)
			/*trackingEnabled = !trackingEnabled;
			if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
			else cout<<"Tracking enabled."<<endl;*/
			break;
		case 100: //'d' has been pressed. this will debug mode
			debugMode = !debugMode;
			if(debugMode == false) cout<<"Debug mode disabled."<<endl;
			else cout<<"Debug mode enabled."<<endl;
			break;
		case 112: //'p' has been pressed. this will pause/resume the code.
			pause = !pause;
			if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
			while (pause == true){
				//stay in this loop until 
				switch (waitKey()){
					//a switch statement inside a switch statement? Mind blown.
				case 112: 
					//change pause back to false
					pause = false;
					cout<<"Code Resumed"<<endl;
					break;
				}
			}
			}

		case 114:
			//'r' has been pressed.
			//toggle recording mode
			recording =!recording;

			if (!recording)cout << "Recording Stopped" << endl;

			else cout << "Recording Started" << endl;

			break;

		case 110:
			//'n' has been pressed
			//start new video file
			startNewRecording = true;
			recording = true;
			cout << "New Recording Started" << endl;
			//increment video file name
			inc+=1;
			break; 

		}

	}

	return 0;

}
int main(int argc,char** argv){
    int houghVote = 100;
    Mat src1;
    //cout<<"linenumber="<<linenumber;
      float rho_values[linenumber];
      float theta_values[linenumber];
            Mat src,contours,contoursInv,ortho,H;
    float theta_max = 1000000;
    float theta_min = -1000000;
    float rho_min,rho_max;
//VideoCapture capture(1);   
VideoCapture capture(1);
   namedWindow("ortho", CV_WINDOW_AUTOSIZE);
double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
   double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video

   cout << "Frame Size = " << dWidth << "x" << dHeight << endl;

   Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));

 VideoWriter oVideoWriter ("wierd.avi", CV_FOURCC('D', 'I', 'V', '3'), 20, frameSize, true); //initialize the VideoWriter object 
 if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
   {
cout << "ERROR: Failed to write the video" << endl;
return -1;
   } int count=0;
    while(true){

        vector<vector<Point> > cnt;
        vector<Vec4i> hierarchy;  
        capture >> src;
src.copyTo(src1);
      //  src = imread(argv[1]);
        imshow("Input image",src1);
       // imwrite("input.jpg",src1);
        vector<Point2f> source_points;
	vector<Point2f> dest_points;
	source_points.push_back(cv::Point2f(169,386));
source_points.push_back(cv::Point2f(449,313));
source_points.push_back(cv::Point2f(212,111));
source_points.push_back(cv::Point2f(429,98));
	
dest_points.push_back(cv::Point2f(120,347));
dest_points.push_back(cv::Point2f(448,276));
dest_points.push_back(cv::Point2f(217,177));
dest_points.push_back(cv::Point2f(419,154));
H = getPerspectiveTransform( source_points, dest_points);
        warpPerspective(src, src, H, src.size(), INTER_CUBIC | WARP_INVERSE_MAP);
        imshow("ortho",src);
       // imwrite("ortho.jpg",src);
      // imwrite("input1.jpg",src);
        cvtColor(src,src,CV_RGB2HSV);
        inRange(src,  Scalar(0,0,200), Scalar(140,255,255), src);
        //imshow("image",src);
        erode(src,src,cv::Mat());
        imshow("erode",src);
        Canny(src,contours,50,150,3);
        //equalizeHist(contours,contours);
        imshow("Canny",contours);
        //imwrite("canny.jpg",contours);
        //threshold(contours,contoursInv,128,255,THRESH_BINARY_INV);
        //imshow("threshold",contoursInv);
        std::vector<Vec2f> lines;
       if (houghVote < 1 or lines.size() > 2){ 
                houghVote = 100; 
        }
        else{ houghVote += 25;} 
        while(lines.size() < 5 && houghVote > 0){
            HoughLines(contours,lines,1,CV_PI/180, houghVote);
                houghVote -= 5;
        }
        //std::cout << houghVote << "\n";
        Mat hough(contours.rows,contours.cols,CV_8U,Scalar(0));
        Mat result1(contours.rows,contours.cols,CV_8U,Scalar(0));
        //src.copyTo(hough);
        std::vector<Vec2f>::const_iterator it= lines.begin();
        //Mat hough(src.size(),CV_8U,Scalar(0));
        //if(count==0)
    //   cout<<"no. of lines="<<lines.end()-lines.begin()<<endl;
      // int val=0;
        while (it!=lines.end()) {
            float rho= (*it)[0];   
            float theta= (*it)[1]; 
            /*if (theta < theta_min)
            {
                theta_min = theta;
                rho_min = rho;
            }
[email protected]
            else if (theta > theta_max)
            {
                theta_max = theta;
                rho_max = rho;
            }*/
            rho_values[it-lines.begin()]=rho;
            theta_values[it-lines.begin()]=theta;
          // cout<<"rho="<<rho_values[it-lines.begin()]<<"theta="<<theta_values[it-lines.begin()]<<endl;
       Point pt1(rho/cos(theta),0);        
        Point pt2((rho-hough.rows*sin(theta))/cos(theta),hough.rows);
      //  if(count==0)
        //cout<<"rho="<<rho<<", theta="<<theta<<endl;
       // line( result, pt1, pt2, Scalar(255), 2); 
        line( hough, pt1, pt2, Scalar(255), 2);
       // cout<<"point 1="<<pt1.x<<","<<pt1.y<<endl;
     //   cout<<"point2="<<pt2.x<<", "<<pt2.y<<endl;
        //cout<<endl;//if(count==0)
       // {
        //for(int k=0; k<lines.end()-lines.begin();k++)
        //cout<<rho_values[it-lines.begin()]<<endl;//}
       // cout<<pt1.x<<" "<<pt1.y<<endl;
        //std::cout << "line: (" << rho << "," << theta << ")\n"; 
        ++it;
        }
        //removing extra lines in hough
         float rho_final[lines.end()-lines.begin()];
      float theta_final[lines.end()-lines.begin()];
        std::vector<Vec2f>::const_iterator it1= lines.begin();
        Mat result(src.size(),CV_8U,Scalar(0));
       line(result,Point(rho_values[0]/cos(theta_values[0]),0),Point((rho_values[0]-result.rows*sin(theta_values[0]))/cos(theta_values[0]),result.rows),Scalar(255),2);
       int b=1;
       rho_final[0]=rho_values[0],theta_final[0]=theta_values[0];
       int line_result=1;
        while(it1!=lines.end()){
                std::vector<Vec2f>::const_iterator it2= lines.begin();
                
                while(it2<it1)
                {
        if((abs(rho_values[it1-lines.begin()]-rho_values[it2-lines.begin()])<5)&&(abs(theta_values[it1-lines.begin()]-theta_values[it2-lines.begin()])<0.5))
        break;
        else
        {
        Point pt3(rho_values[it2-lines.begin()]/(cos(theta_values[it2-lines.begin()])),0);
        Point pt4((rho_values[it2-lines.begin()]-result.rows*sin(theta_values[it2-lines.begin()]))/cos(theta_values[it2-lines.begin()]),result.rows);
        //cout<<"rho val of line="<<rho_values[it2-lines.begin()]<<" theta val of line= "<<theta_values[it2-lines.begin()]<<endl;
                //cout<<"rho val of pt4="<<rho_valuesit2-lines.begin()]<<" theta val of pt3="<<theta_values[it2-lines.begin()];
        //cout<<"point3="<<pt3.x<<","<<pt3.y<<endl;
        //cout<<"point4="<<pt4.x<<","<<pt4.y<<endl;
        //cout<<endl;
       //cv::circle(result,pt3,13,cvScalar(255,0,0));
       //cv::circle(result,pt4,16,cvScalar(125,0,0));
        line(result,pt3,pt4,Scalar(255),2);
        rho_final[b]=rho_values[it2-lines.begin()];
        theta_final[b]=theta_values[it2-lines.begin()];
       // cout<<"rho_final ="<<rho_final[b]<<" theta_final= "<<theta_final[b]<<endl;
        b++;
        line_result++;
        }
        ++it2;}
        ++it1;}
        //cout<<"b ="<<b;
        
        //finding all possible distances
       float dist_max=0;
       float dist_cmp=0;
              for(int c=0;c<b;c++)
              {
             // cout<<"rho= "<<rho_final[c]<<" theta= "<<theta_final[c]<<endl;   
              for(int d=0;d<c;d++)
              {
              dist_cmp=(rho_final[c]-rho_final[d])*0.28285;
              if(dist_cmp>=dist_max)
              dist_max=dist_cmp;
              }
              }
              cout<<"the maximum distance is "<<dist_max;
              cout<<endl;          
       // cout<<"lines drawn in result="<<line_result<<endl;
        //imshow("result1",result1);
        imshow("result_b",result);
        addWeighted(result1,1,result,1,0., result);
        if(count%1==0)
        imshow("result_a",result);
              /*  std::vector<Vec2f>::const_iterator it1= lines.begin();
        for(it1=lines.begin();it1<lines.end();it1++)
        {
        }
        //Mat output(contours.rows,contours.cols,CV_8U,Scalar(255));
        // removing two similar lines
       /* for(int k=0;k<lines.end()-lines.begin()-1;k++)
        {
        if((rho_values[k]-rho_values[k+1]<=1)&&(theta_values[k]-theta_values[k+1]<=0.1))
        cout<<"do nothing"<<endl;
    	else
    	// line(( output, (rho_values[k]/cos(theta_values[k]),0), (rho_values[k]-output.rows*sin(theta_values[k]))/cos(theta_values[k]),output.rows), Scalar(255), 2); 
    	}*/
        imshow("Hough",hough);
        //imwrite("hough.jpg",hough);
       // imshow("output",output);
      /*  for(int i=0; i<hough.rows;i++)
        {
        for(int j=0; j<hough.cols;j++)
        { if(hough.at<uchar>(i,j))
        {
       // cout<<"white"<<endl;
       //if(count==0)
       //cout<<"The white point is ("<<j<<","<<i<<")"<<endl;
        }}}*/
            oVideoWriter.write(ortho); //writer the frame into the file
       // imwrite("hough1.jpg",hough);
        //imshow("result",result);*/
      
      
        int k;
        //if((k = waitKey() & 255)==27)
          //  return 0;
        cvWaitKey(30);
        src.release();
        ortho.release();
        hough.release();
        //houghP.release();
       // result.release();
        contours.release();
        contoursInv.release();
        lines.clear();
        count++;
        result.copyTo(result1);
        //lines1.clear();
        //li.clear();
  }
    return 0;
}	
int main(int argc, char** argv)
{
    if(argc >= 3)
    {
        VideoCapture inputVideo(argv[1]); // open the default camera
        if(!inputVideo.isOpened())  // check if we succeeded
            return -1; 
        
        // Initialize
        VideoWriter outputVideo;  // Open the output
        const string source      = argv[2];                                // the source file name
        const string NAME = source + ".mp4";   // Form the new name with container
        int ex = inputVideo.get(CV_CAP_PROP_FOURCC);                       // Get Codec Type- Int form
        std::cout << ex << "\n" << (int)inputVideo.get(CV_CAP_PROP_FOURCC) << "\n";
        Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),       //Acquire input size
                      (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));    
        outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, false);
        char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
        cout << "Input codec type: " << EXT << endl;

       if (!outputVideo.isOpened())
        {
            cout  << "Could not open the output video for write \n";
            return -1;
        }
        
        // Basketball Color
        int iLowH = 180;
        int iHighH = 16;
        
        int iLowS =  95;
        int iHighS = 200;
        
        int iLowV = 75;
        int iHighV = 140;
        
        // court Color
        int courtLowH = 0;
        int courtHighH = 20;
        
        int courtLowS = 50;
        int courtHighS = 150;
        
        int courtLowV = 160;
        int courtHighV = 255;
        
        namedWindow("Result Window", 1);
        //namedWindow("Court Window", 1);
        
        // Mat declaration
        Mat prev_frame, prev_gray, cur_frame, cur_gray;
        Mat frame_blurred, frameHSV, frameGray;
        
        // take the first frame
        inputVideo >> prev_frame;
        
        /* manual ball selection */
        MouseParams mp;
        prev_frame.copyTo( mp.ori );
        prev_frame.copyTo( mp.img );
        setMouseCallback("Result Window", BallSelectFunc, &mp );
        
        int enterkey = 0;
        while(enterkey != 32 && enterkey != 113)
        {
            enterkey = waitKey(30) & 0xFF;
            imshow("Result Window", mp.img);
        }
        Rect  lastBallBox;
        Point lastBallCenter;
        Point lastMotion;
        
        /* Kalman Filter Initialization */
        KalmanFilter KF(4, 2, 0);
        float transMatrixData[16] = {1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1};
        KF.transitionMatrix = Mat(4, 4, CV_32F, transMatrixData);
        Mat_<float> measurement(2,1);
        measurement.setTo(Scalar(0));
        
        KF.statePre.at<float>(0) = mp.pt.x;
        KF.statePre.at<float>(1) = mp.pt.y;
        KF.statePre.at<float>(2) = 0;
        KF.statePre.at<float>(3) = 0;
        setIdentity(KF.measurementMatrix);
        setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
        setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
        setIdentity(KF.errorCovPost, Scalar::all(.1));
        int pre_status_7=0;
        
        /* start tracking */
        setMouseCallback("Result Window", CallBackFunc, &frameHSV);
        
        for(int frame_num=1; frame_num < inputVideo.get(CAP_PROP_FRAME_COUNT); ++frame_num)
        {
            int cur_status_7=pre_status_7;
            
            inputVideo >> cur_frame; // get a new frame
            // Blur & convert frame to HSV color space
            cv::GaussianBlur(prev_frame, frame_blurred, cv::Size(5, 5), 3.0, 3.0);
            cvtColor(frame_blurred, frameHSV, COLOR_BGR2HSV);
            
            // gray scale current frame
            cvtColor(prev_frame, prev_gray, CV_BGR2GRAY);
            cvtColor(cur_frame, cur_gray, CV_BGR2GRAY);
            
            /*
             * STAGE 1: mask generation
             * creating masks for balls and courts.
             */
            Mat mask, mask1, mask2, court_mask;
            inRange(frameHSV, Scalar(2, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), mask1);
            inRange(frameHSV, Scalar(iLowH, iLowS, iLowV), Scalar(180, iHighS, iHighV), mask2);
            inRange(frameHSV, Scalar(courtLowH, courtLowS, courtLowV), Scalar(courtHighH, courtHighS, courtHighV), court_mask);
            
            mask = mask1 + mask2;
            
            // morphological opening (remove small objects from the foreground)
            erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            // morphological closing (fill small holes in the foreground)
            dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            /*
             * Method:  HoughCircles
             * creating circles and radius.
             */
            // Basketball Color for Hough circle
            
            int iLowH = 180;
            int iHighH = 16;
            
            int iLowS =  95;
            int iHighS = 200;
            
            int iLowV = 75;
            int iHighV = 140;
            
            Mat mask1_circle, mask2_circle, mask_circle, frameHSV_circle, frameFiltered,frameGray2;
            cvtColor(frame_blurred, frameHSV_circle, COLOR_BGR2HSV);
            inRange(frameHSV_circle, Scalar(0, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), mask1_circle);
            inRange(frameHSV_circle, Scalar(iLowH, iLowS, iLowV),Scalar(180, iHighS, iHighV), mask2_circle);
            mask_circle = mask1_circle + mask2_circle;
            erode(mask_circle, mask_circle, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            dilate(mask_circle, mask_circle, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            prev_frame.copyTo( frameFiltered, mask_circle );
            cv::cvtColor( frameFiltered, frameGray2, CV_BGR2GRAY );
            vector<cv::Vec3f> circles;
            cv::GaussianBlur(frameGray2, frameGray2, cv::Size(5, 5), 3.0, 3.0);
            HoughCircles( frameGray2, circles, CV_HOUGH_GRADIENT, 1, frameGray2.rows/8, 120, 18, 5,300);
            
            /*
             * STAGE 2: contour generation
             * creating contours with masks.
             */
            vector< vector<cv::Point> > contours_ball;
            vector< vector<cv::Point> > contours_court;
            cv::findContours(mask, contours_ball, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
            
            Mat result;
            
            prev_frame.copyTo( result );
            
            /*
             // court mask refinement: eliminate small blocks
             Mat buffer;
             court_mask.copyTo( buffer );
             cv::findContours(buffer, contours_court, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
             
             for (size_t i = 0; i < contours_court.size(); i++)
             {
                double tmp_area = contourArea( contours_court[i] );
                if(tmp_area < 900.0)
             drawContours(court_mask, contours_court, i, 0, CV_FILLED);
             }
             bitwise_not(court_mask, court_mask);
             court_mask.copyTo( buffer );
             cv::findContours(buffer, contours_court, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
             for (size_t i = 0; i < contours_court.size(); i++)
             {
                double tmp_area = contourArea( contours_court[i] );
                if(tmp_area < 900.0)
             drawContours(court_mask, contours_court, i, 0, CV_FILLED);
             }
             bitwise_not(court_mask, court_mask);
             
             Mat canny_mask;
             Canny(court_mask, canny_mask, 50, 150, 3);
             vector<Vec4i> lines;
             HoughLinesP(canny_mask, lines, 1, CV_PI/180, 80, 30, 10);
             
             Point l_top( mask.cols/2, mask.rows );
             Point l_bot( mask.cols/2, mask.rows );
             
             for( size_t i = 0; i < lines.size(); i++ )
             {
             Point p1 = Point(lines[i][0], lines[i][1]);
             Point p2 = Point(lines[i][2], lines[i][3]);
             
             if(p1.y < l_top.y)
             {
             l_top = p1;
             l_bot = p2;
             }
             if(p2.y < l_top.y)
             {
             l_top = p2;
             l_bot = p1;
             }
             }
             // stretch the line
             Point v_diff = l_top - l_bot;
             Point p_left, p_right;
             
             
             int left_t  = l_top.x / v_diff.x;
             int right_t = (mask.cols - l_top.x) / v_diff.x;
             
             p_left = l_top - v_diff * left_t;
             p_right = l_top + v_diff * right_t;
             
             line( court_mask, p_left, p_right, Scalar(128), 2, 8 );
             imshow("Court Window", court_mask);
             */
            
            // sieves
            vector< vector<cv::Point> > balls;
            vector<cv::Point2f> prev_ball_centers;
            vector<cv::Rect> ballsBox;
            Point best_candidate;
            for (size_t i = 0; i < contours_ball.size(); i++)
            {
                drawContours(result, contours_ball, i, CV_RGB(255,0,0), 1);  // fill the area
                
                cv::Rect bBox;
                bBox = cv::boundingRect(contours_ball[i]);
                Point center;
                center.x = bBox.x + bBox.width / 2;
                center.y = bBox.y + bBox.height / 2;
                
                // meet prediction!
                if( mp.pt.x > bBox.x && mp.pt.x < bBox.x + bBox.width &&
                   mp.pt.y > bBox.y && mp.pt.y < bBox.y + bBox.height)
                {
                    // initialization of ball position at first frame
                    if( frame_num == 1 || ( bBox.area() <= lastBallBox.area() * 1.5 && bBox.area() >= lastBallBox.area() * 0.5) )
                    {
                        lastBallBox = bBox;
                        lastBallCenter = center;
                        
                        balls.push_back(contours_ball[i]);
                        prev_ball_centers.push_back(center);
                        ballsBox.push_back(bBox);
                        best_candidate = center;
                    }
                    else
                    {
                        cout << "area changed!" << endl;
                        // if the block containing ball becomes too large,
                        // we use last center + motion as predicted center
                        balls.push_back(contours_ball[i]);
                        prev_ball_centers.push_back( lastBallCenter+lastMotion );
                        ballsBox.push_back(bBox);
                        best_candidate = lastBallCenter + lastMotion;
                    }
                }
                else
                {
                    // ball size sieve
                    
                    if(  bBox.area() > 1600 )
                        continue;
                    
                    // ratio sieve
                    //                     float ratio = (float) bBox.width / (float) bBox.height;
                    //                     if( ratio < 1.0/2.0 || ratio > 2.0 )
                    //                     continue;
                    
                    // ball center sieve: since we've done dilate and erode, not necessary to do.
                    /*
                     uchar center_v = mask.at<uchar>( center );*
                     if(center_v != 1)
                     continue;
                     */
                    
                    // ball-on-court sieve: not useful in basketball =(
                    //if(court_mask.at<uchar>(center) != 255)
                    //  continue;
                    
                    balls.push_back(contours_ball[i]);
                    prev_ball_centers.push_back(center);
                    ballsBox.push_back(bBox);
                }
            }
            
            
            // store the center of the hough circle
            vector<cv::Point2f> prev_ball_centers_circle;
            for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
            {
                Point center_circle(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                int radius_circle = cvRound(circles[circle_i][2]);
                prev_ball_centers_circle.push_back(center_circle);
            }
            // Kalman Filter Prediction
            //Mat prediction = KF.predict();
            //Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
            // Kalman Filter Update
            //Mat estimated = KF.correct( best_candidate );
            
            //OpticalFlow for HSV
            vector<Point2f> cur_ball_centers;
            vector<uchar> featuresFound;
            Mat err;
            TermCriteria termcrit(TermCriteria::COUNT|TermCriteria::EPS, 20, 0.03);
            Size winSize(31, 31);
            if( prev_ball_centers.size() > 0 )
                calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers, cur_ball_centers, featuresFound, err, winSize, 3, termcrit, 0, 0.001);
            
            //OpticalFlow for circle
            vector<Point2f> cur_ball_centers_circle;
            vector<uchar> featuresFound_circle;
            Mat err2;
            if( prev_ball_centers_circle.size() > 0 )
                calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers_circle, cur_ball_centers_circle, featuresFound_circle, err2, winSize, 3, termcrit, 0, 0.001);
            
            //plot MP
            circle(result, mp.pt, 2, CV_RGB(255,255,255), 5);
            cout<<"frame_num :"<<frame_num<<endl;
            cout<<"lastMotion"<<lastMotion<<endl;
            bool ball_found = false;
            
            for (size_t i = 0; i < balls.size(); i++)
            {
                cv::Point center;
                center.x = ballsBox[i].x + (ballsBox[i].width / 2);
                center.y = ballsBox[i].y + (ballsBox[i].height/2);
                // consider hough circle
                int circle_in_HSV=0;
                int in=0;
                for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                {
                    
                    
                    Point center2(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                    int radius = cvRound(circles[circle_i][2]);
                    double dis_center =  sqrt(pow(center2.x-center.x,2)+pow(center2.y-center.y,2));
                    
                    if( frame_num >2 && radius<40 && dis_center<radius+3 && mp.pt.x > ballsBox[i].x && mp.pt.x < ballsBox[i].x + ballsBox[i].width && mp.pt.y > ballsBox[i].y && mp.pt.y < ballsBox[i].y + ballsBox[i].height){
                        circle_in_HSV=1;
                        Point motion = cur_ball_centers_circle[circle_i] - prev_ball_centers_circle[circle_i];
                        mp.pt = Point2f(cur_ball_centers_circle[circle_i].x, cur_ball_centers_circle[circle_i].y);
                        lastMotion = motion;
                        cout<<mp.pt<<endl;
                        cout<<"status 1"<<endl;
                        cout<<motion<<endl;
                        ball_found = true;
                        in=1;
                        cout<<in<<endl;
                        cv::circle( result, center2, radius, Scalar(0,255,0), 2 );
                    }

//                    if(radius<40){
//                        stringstream sstr;
//                        sstr << "(" << center2.x << "," << center2.y << ")";
////                        cv::putText(result, sstr.str(),
////                                    cv::Point(center2.x + 3, center2.y - 3),
////                                    cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
//                        cv::circle( result, center2, radius, Scalar(12,12,255), 2 );}
                }
                
                // see if any candidates contains out ball
                if( circle_in_HSV==0 && mp.pt.x > ballsBox[i].x && mp.pt.x < ballsBox[i].x + ballsBox[i].width && mp.pt.y > ballsBox[i].y && mp.pt.y < ballsBox[i].y + ballsBox[i].height)
                {
                    cv::rectangle(result, ballsBox[i], CV_RGB(0,255,0), 2);
                    Point motion = cur_ball_centers[i] - prev_ball_centers[i];
                    // update points and lastMotion
                    
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    if( ballsBox[i].area() < 1000 && ratio>0.7 && ratio<1.35 && ballsBox[i].area() > 200){
                        mp.pt = Point2f(center.x, center.y);
                        cout<<"status 2"<<endl;
                        cout<<"AREA:"<<ballsBox[i].area()<<endl;
                    }else{
                        mp.pt = Point2f(mp.pt.x+motion.x, mp.pt.y+motion.y);
                        cout<<"status 3"<<endl;
                    }
                    // TODO replace with predicted points of kalman filter here.
                    lastMotion = motion;
                    ball_found = true;
                }
                
                // draw optical flow
                if(!featuresFound[i])
                    continue;
                
                cv::Point2f prev_center = prev_ball_centers[i];
                cv::Point2f curr_center = cur_ball_centers[i];
                cv::line( result, prev_center, curr_center, CV_RGB(255,255,0), 2);
                
            }
            
            // if ball is not found, search for the closest ball candidate within a distance.
            if(!ball_found)
            {
                int search_distance_threshold = 35*35;
                int closest_dist      = 2000;
                //                int closest_dist2      = 2000;
                int closest_area_diff = 10000;
                int best_i = 0;
                
                for (size_t i = 0; i < balls.size(); i++)
                {
                    int diff_x = prev_ball_centers[i].x - mp.pt.x;
                    int diff_y = prev_ball_centers[i].y - mp.pt.y;
                    int area_threshold_high = 100*100;
                    int area_threshold_low = 15*15;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    int area_diff = abs(ballsBox[i].area()-lastBallBox.area());
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    //                    if(distance<closest_dist2){
                    //                        closest_dist2=distance;
                    //                        best_i = i;}
                    // if distance is small
                    if( distance < search_distance_threshold &&
                       distance < closest_dist && ratio>0.7 && ratio<1.45 && ballsBox[i].area()<area_threshold_high && ballsBox[i].area()>area_threshold_low)
                    {
                        closest_dist      = distance;
                        closest_area_diff =  area_diff;
                        best_i = i;
                        ball_found = true;
                    }
                }
                //                cout<<"ballsBox[i].area()"<<ballsBox[best_i].area()<<endl;
                //                cout<<"Ratio"<<(float) ballsBox[best_i].width / (float) ballsBox[best_i].height<<endl;
                int best_radius;
                if(ball_found)
                {
                    // reset mp.pt
                    cout<<"here! yello"<<endl;
                    
                    int search_distance_threshold = 80*80;
                    int closest_dist = 2000;
                    int best_circle_i = 0;
                    bool circle_found = false;
                    for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                    {
                        int radius = cvRound(circles[circle_i][2]);
                        int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                        int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                        int distance  = diff_x * diff_x + diff_y * diff_y;
                        if( distance < search_distance_threshold && radius>8 && radius<13)
                        {
                            closest_dist      = distance;
                            best_circle_i = circle_i;
                            circle_found = true;
                            cout<<"radius"<<radius<<endl;
                            best_radius = radius;
                        }
                    }
                    if(circle_found){
                        cv::circle( result, cur_ball_centers_circle[best_circle_i], best_radius, CV_RGB(255,255,0), 2 );
                        mp.pt = Point2f(cur_ball_centers_circle[best_circle_i].x, cur_ball_centers_circle[best_circle_i].y);
                        cout<<"status 4"<<endl;
                    } else{
                        cv::rectangle(result, ballsBox[best_i], CV_RGB(255,255,0), 2);
                        Point motion = cur_ball_centers[best_i] - prev_ball_centers[best_i];
                        mp.pt = Point2f(cur_ball_centers[best_i].x, cur_ball_centers[best_i].y);
                        lastMotion = motion;
                        cout<<"status 5"<<endl;
                    }
                    
                }
                else
                {
                    // if ball still not found... stay at the same direction
                    circle(result, mp.pt, 5, CV_RGB(255,255,255), 2);
                    int search_distance_threshold, closest_dist,best_i,radius_threshold_low, radius_threshold_high, ball_found;
                    if(cur_status_7>1){
                        search_distance_threshold = 200*200;
                        closest_dist      = 55000;
                        best_i = 0;
                        radius_threshold_low=4;
                        radius_threshold_high=16;
                        ball_found = false;}
                    else{
                        search_distance_threshold = 80*80;
                        closest_dist      = 6000;
                        best_i = 0;
                        radius_threshold_low=7;
                        radius_threshold_high=13;
                        ball_found = false;
                    }
                    int best_radius;
                    for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                    {
                        int radius = cvRound(circles[circle_i][2]);
                        int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                        int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                        int distance  = diff_x * diff_x + diff_y * diff_y;
                        if( distance < search_distance_threshold && radius>radius_threshold_low && radius<radius_threshold_high)
                        {
                            closest_dist      = distance;
                            best_i = circle_i;
                            ball_found = true;
                            best_radius =radius;
                            cout<<"radius"<<radius<<endl;
                            cout<<mp.pt<<endl;
                        }
                    }
                    if(ball_found){
                        cv::circle( result, cur_ball_centers_circle[best_i], best_radius, CV_RGB(255,255,0), 2 );
                        Point motion = cur_ball_centers_circle[best_i] - prev_ball_centers_circle[best_i];
                        mp.pt = Point2f(cur_ball_centers_circle[best_i].x, cur_ball_centers_circle[best_i].y);
                        lastMotion = motion;
                        cout<<mp.pt<<endl;
                        cout<<motion<<endl;
                        cout<<"status 6"<<endl;
                    }else{
                        //                        mp.pt = lastBallCenter + lastMotion;
                        cout<<"status 7"<<endl;
                        cout<<"lastBallCenter"<<lastBallCenter<<endl;
                    }
                    //                          mp.pt = Point2f(mp.pt.x+lastMotion.x, mp.pt.y+lastMotion.y);
                    pre_status_7+=1;
                }
            }
            
            if(lastMotion.x*lastMotion.x+lastMotion.y*lastMotion.y>1200){
                cout<<"HIGH SPEED"<<endl;
                cout<<"HIGH SPEED"<<endl;
                cout<<"HIGH SPEED"<<endl;
                cout<<"MP before"<<mp.pt<<endl;
                int search_distance_threshold = 200*200;
                int closest_dist = 55000;
                int best_circle_i = 0;
                int best_i=0;
                bool ball_found = false;
                for (size_t i = 0; i < balls.size(); i++)
                {
                    int diff_x = prev_ball_centers[i].x - mp.pt.x;
                    int diff_y = prev_ball_centers[i].y - mp.pt.y;
                    int area_threshold_high = 100*100;
                    int area_threshold_low = 10*10;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    int area_diff = abs(ballsBox[i].area()-lastBallBox.area());
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    //                    if(distance<closest_dist2){
                    //                        closest_dist2=distance;
                    //                        best_i = i;}
                    // if distance is small
                    if( distance < search_distance_threshold &&
                       distance < closest_dist && ratio>0.7 && ratio<1.45 && ballsBox[i].area()<area_threshold_high && ballsBox[i].area()>area_threshold_low)
                    {
                        closest_dist    = distance;
                        best_i = i;
                        ball_found = true;
                    }
                }
                if(ball_found)
                {
                    cv::rectangle(result, ballsBox[best_i], CV_RGB(255,255,0), 2);
                    Point motion = cur_ball_centers[best_i] - prev_ball_centers[best_i];
                    mp.pt = Point2f(cur_ball_centers[best_i].x, cur_ball_centers[best_i].y);
                    lastMotion = motion;
                    cout<<"ball"<<endl;
                }
                
                circle(result, mp.pt, 5, CV_RGB(255,255,255), 2);
                int radius_threshold_low, radius_threshold_high;
                int best_radius;
                search_distance_threshold = 200*200;
                closest_dist      = 55000;
                best_i = 0;
                radius_threshold_low=5;
                radius_threshold_high=13;
                ball_found = false;
                
                for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                {
                    int radius = cvRound(circles[circle_i][2]);
                    int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                    int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    if( distance < search_distance_threshold && radius>radius_threshold_low && radius<radius_threshold_high)
                    {
                        closest_dist      = distance;
                        best_i = circle_i;
                        ball_found = true;
                        best_radius = radius;
                    }
                }
                if(ball_found)
                {
//                    cv::circle( result, cur_ball_centers_circle[best_i], best_radius, Scalar(255,255,0), 2 );
                    Point motion = cur_ball_centers_circle[best_i] - prev_ball_centers_circle[best_i];
                    mp.pt = Point2f(cur_ball_centers_circle[best_i].x, cur_ball_centers_circle[best_i].y);
                    lastMotion = motion;
                    cout<<"circle"<<endl;
                }
                cout<<"MP after"<<mp.pt<<endl;
                
            }
            
            for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
            {
                
                
                Point center2(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                double dis_center =  sqrt(pow(center2.x-mp.pt.x,2)+pow(center2.y-mp.pt.y,2));
                int radius = cvRound(circles[circle_i][2]);
                if(dis_center<200)
                    continue;
                cv::circle( result, center2, radius, Scalar(12,12,255), 2 );
                
                }
                
            if(mp.pt.x<1)
                mp.pt.x=1;
            if(mp.pt.x>1279)
                mp.pt.x=1279;
            if(mp.pt.y<1)
                mp.pt.y=1;
            if(mp.pt.y>719)
                mp.pt.y=719;
            if(pre_status_7==cur_status_7)
                pre_status_7=0;
            imshow("Result Window", result);
            
            /* UPDATE FRAME */
            cur_frame.copyTo( prev_frame );
            
            /* KEY INPUTS */
            int keynum = waitKey(30) & 0xFF;
            if(keynum == 113)      // press q
                break;
            else if(keynum == 32)  // press space
            {
                keynum = 0;
                while(keynum != 32 && keynum != 113)
                    keynum = waitKey(30) & 0xFF;
                if(keynum == 113)
                    break;
            }
        }
        inputVideo.release();
        outputVideo.release();
    }
示例#14
0
int main(int argc, char** argv)
{
	// variable initialization
	int keyInput = 0;
	int nFrames = 0, nSmoothFrames = 0, nFailedFrames = 0, nBlindFrames = 0;
	int lastDx = 0, lastDy = 0;
	
	bool bOverlay = true;			// plot overlay?
	bool bTrace = true & bOverlay;	// plot 'bubble' trace? (only when overlay active)
	
	Ptr<BackgroundSubtractor> pMOG2;

	VideoCapture capture;		// input video capture
	VideoWriter outputVideo;	// output video writer

	Mat curFrame,		// current original frame
		fgMaskMOG2,		// foreground mask from MOG2 algorithm
		bgImg,			// container for background image from MOG2
		grayFrame,		// grayscale conversion of original frame
		frameDil,		// dilated grayscale frame
		canny_out;		// output of Canny algorithm for shape outline detection

	Mat *pOutMat = &curFrame;	// pointer to image that will be rendered once per input video frame
	Mat strucElem = getStructuringElement(MORPH_RECT, Size(3, 3)); // dilatation base element

	// containers for output of findContours()
	vector<Mat> contours;
	vector<Vec4i> hierarchy;
	
	// read video input filename from command line and construct output filename
	if (argc < 2) {
		cerr << "Please provide input video filename." << endl;
		return EXIT_FAILURE;
	}
	string filename(argv[1]);
	string outName = filename.substr(0, filename.length() - 4) + "_out.avi";

	Rect lastKnownRect, lastRect;
	Point lastKnownPos, lastPos, estimatePos, plotPos;
	list<Point> lastKnownPositions;

	// init 'live' video output window
	namedWindow("Motion tracking");

	// try to open input file
	capture.open(filename);
	if (!capture.isOpened()) {
		cerr << "Unable to open file '" << filename << "'." << endl;
		return EXIT_FAILURE;
	} else	{
		cout << "Successfully opened file '" << filename << "'." << endl;
	}

	// try to write to output file
	Size vidS = Size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT));
	outputVideo.open(outName, CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), vidS, true);
	if (!outputVideo.isOpened()) {
		cerr << "Unable to write to output video." << endl;
		return EXIT_FAILURE;
	}

	// build frame buffer and background subtractor
	pMOG2 = createBackgroundSubtractorMOG2(500, 30., true);
	
	// main loop over frames
	while (capture.read(curFrame) && (char)keyInput != 'q')
	{
		++nFrames;
		
		cvtColor(curFrame, grayFrame, CV_BGR2GRAY);	// convert to grayscale
		threshold(grayFrame, grayFrame, 128., 0., CV_THRESH_TRUNC); // try to mitigate (white) reflections by truncating the current frame
		GaussianBlur(grayFrame, grayFrame, Size(7, 7), 0, 0);

		pMOG2->apply(grayFrame, fgMaskMOG2);
		
		// erode and dilate to remove some noise
		erode(fgMaskMOG2, frameDil, strucElem);
		dilate(frameDil, frameDil, strucElem);

		// dilate and erode to remove holes from foreground
		dilate(frameDil, frameDil, strucElem);
		erode(frameDil, frameDil, strucElem);

		// canny to find foreground outlines
		Canny(frameDil, canny_out, 100, 200, 3);

		// find contours, sort by contour size (descending)
		findContours(canny_out, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); // find contours
		sort(contours.begin(), contours.end(), rvs_cmp_contour_area); // sort by contour area, beginning with the largest

		// determine largest "moving" object
		int iMaxSize = 0;
		bool bFoundCloseContour = false;
		for (unsigned int i = 0; i < contours.size(); i++)
		{
			if (contourArea(contours[i]) < CONTOUR_AREA_THRESH) // ignore contours which are too small (noise)
				break;

			// ignore contours which are too far away from the last frame
			Rect boun = boundingRect(contours[i]); // bounding rect
			Point bounCenter = (boun.tl() + boun.br())/2;

			if (i == 0) // preemptively save largest contour to get back to if no "close" contour is found.
			{
				lastRect = boun;
				lastPos = bounCenter;
			}

			// distance validity check, but only if we recently had track of the object
			if (nFrames > 1 && nFailedFrames < 10)
			{
				int dx = bounCenter.x - lastPos.x;
				int dy = bounCenter.y - lastPos.y;
				int dist2 = dx*dx + dy*dy;
				//cout << bounCenter << " " << lastPos << endl;
				if (dist2 > DELTA_SQ_THRESH) // too far away... try next contour
					continue;
			}

			lastRect = boun;
			lastPos = bounCenter;
			bFoundCloseContour = true;
			++nSmoothFrames;
			break;
		}

		if (contours.size() == 0) {
			// we don't see anything.
			++nBlindFrames;
		} else { nBlindFrames = 0; }

		// update last known position if smooth transition occured
		if (bFoundCloseContour) {
			nFailedFrames = 0;
			lastDx = lastPos.x - lastKnownPos.x;
			lastDy = lastPos.y - lastKnownPos.y;

			lastKnownRect = lastRect;
			lastKnownPos = lastPos;

			plotPos = lastKnownPos;

			if (bTrace) { // draw trace
				if (lastKnownPositions.size() > LAST_POS_BUFFER_SIZE)
					lastKnownPositions.pop_front();
				lastKnownPositions.push_back(lastPos);
				
				list<Point>::iterator it;
				int i = 0;
				for (it = lastKnownPositions.begin(); it != lastKnownPositions.end(); it++)	{
					Scalar color(180, 90, 30);
					circle(*pOutMat, *it, 5, color, 2 * i);
					++i;
				}
			}
		} else {
			++nFailedFrames;
			// guess based on velocity extrapolation
			estimatePos.x = lastKnownPos.x + nFailedFrames*lastDx;
			estimatePos.y = lastKnownPos.y + nFailedFrames*lastDy;

			if (estimatePos.x < 0 || estimatePos.y < 0 || estimatePos.x >= capture.get(CV_CAP_PROP_FRAME_WIDTH) ||
				estimatePos.y >= capture.get(CV_CAP_PROP_FRAME_HEIGHT || nFailedFrames >= 10)) {
				// we've totally lost track, cancel velocity extrapolation guess
				plotPos = lastKnownPos;
				nFailedFrames = 0;
			} else {
				plotPos = estimatePos;
			}
		}

		// draw overlay (rect frame, mid point and text)
		if (bOverlay) {
			if (nBlindFrames < 6 && bFoundCloseContour) {
				circle(*pOutMat, plotPos, 5, Scalar(255, 120, 0), 10, 8);
				rectangle(*pOutMat, lastKnownRect, Scalar(0, 255, 0), 3);
			}

			vector<ostringstream> text(4);
			const int lineSkip = 16;
			text[0] << "Frame: " << nFrames; // frame counter
			text[1] << "Object X: " << lastKnownPos.x; // moving object coordinates
			text[2] << "Object Y: " << lastKnownPos.y;
			text[3] << "Smooth rate: " << setprecision(3) << 100.0*nSmoothFrames / nFrames << "%"; // tracking percentage

			for (unsigned int line = 0; line < text.size(); line++) {
				putText(*pOutMat, text[line].str(), Point(10, 22 + line*lineSkip), CV_FONT_HERSHEY_PLAIN, 1., Scalar(180., 0., 0.));
			}
		}
		
		// cleanup temporary vectors (VS2013 stability issues)
		contours.clear();
		hierarchy.clear();

		outputVideo << *pOutMat; // add output video frame
		imshow("Motion tracking", *pOutMat); // draw frame
		keyInput = waitKey(5); // allow time for event loop
	}

	// release files
	outputVideo.release(); 
	capture.release();

	return EXIT_SUCCESS;
}
示例#15
0
int main(int argc, char *argv[])
{
  help();

  if (argc != 4)
  {
    cout << "Not enough parameters" << endl;
    return -1;
  }

  const string source    = argv[1];       // the source file name
  const bool askOutputType = argv[3][0] =='Y';  // If false it will use the inputs codec type

  VideoCapture inputVideo(source);        // Open input
  if (!inputVideo.isOpened())
  {
    cout  << "Could not open the input video: " << source << endl;
    return -1;
  }

  string::size_type pAt = source.find_last_of('.');          // Find extension point
  const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi";   // Form the new name with container
  int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC));   // Get Codec Type- Int form

  // Transform from int to char via Bitwise operators
  char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};

  Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),  // Acquire input size
          (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));

  VideoWriter outputVideo;                    // Open the output
  if (askOutputType)
    outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true);
  else
    outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true);

  if (!outputVideo.isOpened())
  {
    cout  << "Could not open the output video for write: " << source << endl;
    return -1;
  }

  cout << "Input frame resolution: Width=" << S.width << "  Height=" << S.height
     << " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
  cout << "Input codec type: " << EXT << endl;

  int channel = 2; // Select the channel to save
  switch(argv[2][0])
  {
  case 'R' : channel = 2; break;
  case 'G' : channel = 1; break;
  case 'B' : channel = 0; break;
  }
  Mat src, res;
  vector<Mat> spl;

  for(;;) //Show the image captured in the window and repeat
  {
    inputVideo >> src;        // read
    if (src.empty()) break;     // check if at end

    split(src, spl);        // process - extract only the correct channel
    for (int i =0; i < 3; ++i)
      if (i != channel)
        spl[i] = Mat::zeros(S, spl[0].type());
     merge(spl, res);

     //outputVideo.write(res); //save or
     outputVideo << res;
  }

  cout << "Finished writing" << endl;
  return 0;
}
示例#16
0
int main(int argc, char** argv)
{
	MEMORYSTATUSEX statex;

	statex.dwLength = sizeof(statex);

	GlobalMemoryStatusEx(&statex);

	unsigned int BufferMax = statex.ullAvailPhys / 4.;
	cout << "Buffer-Arbeitsspeicher: " << BufferMax / 1024. / 1024. << " Mb " << endl;


	string filename;
	cout << "Output datei: [.avi wird angeh\u00E4ngt]:";
	while (filename.empty())
		cin >> filename;
	filename.append(".avi");
	float timer = 0;
	cout << "Timer zwischen Frames [bsp:\"1\" / \"0.5\" / \"0.3\"]:";
	while (timer <= 0)
		cin >> timer;
	double outfps = 0;
	cout << "Output FPS[H\u00F6here werte geben schnelleres Video, standart:30, BLEIB UNTER 30]:";
	while (outfps <= 0 && outfps < 30)
		cin >> outfps;



	WIN win("Preview... Beenden mit ESCAPE");
	int framec = 0;
	clock_t lsttime = clock();
	Mat minput = hwnd2mat(GetDesktopWindow());
	queue<Mat> toWrite;
	cv::Size sizefull = minput.size();
	VideoWriter file;
	cout << "FEHLERMELDUNG IGNORIEREN:::" << endl;
	cv::Size sizeprev = sizefull;
	sizeprev.height = sizefull.height / 2;
	sizeprev.width = sizefull.width / 2;
	bool run = true;
	system("cls");
#pragma omp parallel num_threads(2) shared(run)
	{
		if (omp_get_thread_num() == 0)
		{
			while (waitKey(1) != 27)
			{
				lsttime = clock();

				//Get
				minput = hwnd2mat(GetDesktopWindow());
				//write
				//file.write(minput);
#pragma omp critical(list)
				toWrite.push(minput);
				if (omp_get_num_threads() == 1)
					if (((minput.dataend - minput.datastart) * toWrite.size()) > BufferMax)
					{
						cout << "ONLY ONE THREAD!!!" << endl;
						cout << endl;
						while (!toWrite.empty())
						{
							cout << "\rToWrite:" << toWrite.size() << "   ";
							file.write(toWrite.front());
							toWrite.pop();
						}
						cout << endl;
					}
				//Convert for preview
				Mat halfsized;
				cv::resize(minput, halfsized, sizeprev);
				//show
				cout << "\rFrame:[ " << setw(5) << ++framec << "]  " << setprecision(5) << ((minput.dataend - minput.datastart) * toWrite.size()) / 1024. / 1024. << "  MB   " << toWrite.size() << "     ";
				do{ win.show(halfsized); } while (double(clock() - lsttime) / CLOCKS_PER_SEC < timer);
			}
			run = false;
#pragma omp flush(run)
		}
		if (omp_get_thread_num() == 1 || omp_get_num_threads() == 1)
		{
			file.open(filename, CV_FOURCC('M', 'S', 'V', 'C'), outfps, sizefull, true);
			//file.write(minput);
			if (!file.isOpened())
			{
				system("pause");
				//return NULL;
			}
			system("cls");
			while (run)
			{
				if (((minput.dataend - minput.datastart) * toWrite.size()) > BufferMax)
					while (!toWrite.empty())
					{
#pragma omp critical(list)
						file.write(toWrite.front());
						toWrite.pop();
					}
				Sleep(1000);
#pragma omp flush(run)
			}
			while (!toWrite.empty())
			{
				cout << "\rToWrite:" << toWrite.size() << "   ";
#pragma omp critical(list)
				file.write(toWrite.front());
				toWrite.pop();
			}
		}
	}
}