Example #1
0
int main(int argc, char * argv[])
{
    //init UART
    senderInit();
    VideoCapture capture;
    capture.open(0);
    // Init camera
    if (!capture.isOpened())
    {
        cout << "capture device failed to open!" << endl;
        return 1;
    }
    namedWindow("CT", CV_WINDOW_NORMAL);
    setWindowProperty("CT", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
    // CT framework
    CompressiveTracker ct;
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    //box Init
    box = Rect(130, 130, 60, 70);//Rect(x,y,width,height)
    int x0=box.x;
    int y0=box.y;
    int dx=0,dy=0;
    // Run-time
    if(!open_error_flag)
       write(fd,unlock, strlen(unlock));
    while(capture.read(frame))
    {
        if(first_flag){
            cvtColor(frame, first_frame, CV_RGB2GRAY);
            first_flag=0;
        }
        skinmask = cvCreateMat(frame.rows, frame.cols, CV_8UC1);
        // get frame
        cvtColor(frame, current_gray, CV_RGB2GRAY);
        absdiff(first_frame,current_gray,fore_frame);
       //imshow("foreFrame",fore_frame);
        if(!gotHand){
            gotHand=getHand();
            if(gotHand){
                ctInitFlag=1;
            }
            if ((c = waitKey(15)) == 27){
                return 0;
            }
            continue;
        }
        // Process Frame
        skintracker(frame,skinmask);
        fore_frame = fore_frame.mul(skinmask);
        // CT initialization
        if(ctInitFlag){
            ct.init(fore_frame, box);
            ctInitFlag=0;
        }
        //imshow("fore&skinmasked", fore_frame);
        ct.processFrame(fore_frame, box);
        rectangle(frame, box, Scalar(rgb_b,rgb_g,rgb_r));
        // Display
        flip(frame, frame, 1);
        imshow("CT", frame);
        dx=x0-box.x;
        dy=y0-box.y;
        getGasValue(dy);
        getDirValue(dx);
        calControlStr(gasValue,dirValue);
        sendControlStr();
        if ((c = waitKey(15)) == 27){
            if(!open_error_flag)
                write(fd,lock, strlen(lock));
            sleep(1);
            break;
        }
    }
    
    return 0;
}
/// Calculates the corner pixel locations as detected by each camera
/// In: s: board settings, includes size, square size and the number of corners
///     inputCapture1: video capture for camera 1
///     inputCapture2: video capture for camera 2
///     iterations: number of chessboard images to take
/// Out: imagePoints1: pixel coordinates of chessboard corners for camera 1
///      imagePoints2: pixel coordinates of chessboard corners for camera 2
bool RetrieveChessboardCorners(vector<vector<Point2f> >& imagePoints1, vector<vector<Point2f> >& imagePoints2, BoardSettings s, VideoCapture inputCapture1,VideoCapture inputCapture2, int iterations)
{
    destroyAllWindows();
    Mat image1,image2;
    vector<Point2f> pointBuffer1;
    vector<Point2f> pointBuffer2;
    clock_t prevTimeStamp = 0;
    bool found1,found2;
    int count = 0;
    while (count != iterations){
        char c = waitKey(15);
        if (c == 's'){
            cerr << "Calibration stopped" << endl;
            return false;
        }
        // try find chessboard corners
        else if(c == 'c'){
            // ADAPTIVE_THRESH -> use adaptive thresholding to convert image to B&W
            // FAST_CHECK -> Terminates call earlier if no chessboard in image
            // NORMALIZE_IMAGE -> normalize image gamma before thresholding
            // FILTER_QUADS -> uses additional criteria to filter out false quads
            found1 = findChessboardCorners(image1, s.boardSize, pointBuffer1,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            found2 = findChessboardCorners(image2, s.boardSize, pointBuffer2,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            
            if (found1 && found2 && (pointBuffer1.size() >= s.cornerNum) && (pointBuffer2.size() >= s.cornerNum)){
                // if time delay passed refine accuracy and store
                if ((clock() - prevTimeStamp) > CAPTURE_DELAY * 1e-3*CLOCKS_PER_SEC){
                    Mat imageGray1, imageGray2;
                    cvtColor(image1, imageGray1, COLOR_BGR2GRAY);
                    cvtColor(image2, imageGray2, COLOR_BGR2GRAY);
                    
                    // refines corner locations
                    // Size(11,11) -> size of the search window
                    // Size(-1,-1) -> indicates no dead zone in search size
                    // TermCriteria -> max 1000 iteration, to get acuraccy of 0.01
                    cornerSubPix(imageGray1, pointBuffer1, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    cornerSubPix(imageGray2, pointBuffer2, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    
                    drawChessboardCorners(image1, s.boardSize, Mat(pointBuffer1), found1);
                    imshow("Image View1", image1);
                    drawChessboardCorners(image2, s.boardSize, Mat(pointBuffer2), found2);
                    imshow("Image View2", image2);
                    
                    // user verifies the correct corners have been found
                    c = waitKey(0);
                    if (c == 's'){
                        return false;
                    }
                    if (c == 'y'){
                        // store the points and store time stamp
                        imagePoints1.push_back(pointBuffer1);
                        imagePoints2.push_back(pointBuffer2);
                        prevTimeStamp = clock();
                        count++;
                        cerr << "Count: " << count << endl;
                    }
                }
            }
        }
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
    // found all corners
    return true;
}
Example #3
0
int main(int argc, char* argv[])
{
    /*        CONFIGURAÇÃO SERIAL      */
    struct termios tio;
    struct termios stdio;
    struct termios old_stdio;
    int tty_usb;
    char buffer [33];

    /*
     * CONFIGURE USB PORT
     */
    configureOximeter(&stdio, &tio, &old_stdio);
    
    /*
     * OPENNING USB PORT TO I/O COMMUNICATION
     */
    
    openUSB(&tty_usb, &tio);
    /*
     * LEITURA E ESCRITA NA PORTA USB
     *
     */
    
    /*     FIM DA CONFIGURAÇÃO SERIAL */
    
    
	//some boolean variables for different functionality within this
	//program
    bool trackObjects = true;
    bool useMorphOps = true;
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	//matrix storage for HSV image
	Mat HSV;
	//matrix storage for binary threshold image
	Mat threshold;
	//x and y values for the location of the object
	int x=0, y=0;
	//create slider bars for HSV filtering
	createTrackbars();
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		//filter HSV image between values and store filtered image to
		//threshold matrix
		inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
		//perform morphological operations on thresholded image to eliminate noise
		//and emphasize the filtered object(s)
		if(useMorphOps)
            morphOps(threshold);
		//pass in thresholded frame to our object tracking function
		//this function will return the x and y coordinates of the
		//filtered object
		if(trackObjects)
        {
			trackFilteredObject(x,y,threshold,cameraFeed);
            write(tty_usb, &y, 8); //escreve p na porta serial
            //read(tty_usb, &leitura, 4); //ler a porta serial

        }
        //exibir os frames
		imshow(windowName2,threshold);
		imshow(windowName,cameraFeed);
		imshow(windowName1,HSV);
		
        
		//delay de 30ms para a atualização da tela.
		//sem esse comando não aparece imagem!!!!
		waitKey(200);
	}
    
    
    
    
    
    
	return 0;
}
int main(int argc, char* argv[])
{
	//some boolean variables for different functionality within this
	//program
	bool trackObjects = true;
	bool useMorphOps = true;
	calibrationMode = true;
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	//matrix storage for HSV image
	Mat HSV;
	//matrix storage for binary threshold image
	Mat threshold;
	//x and y values for the location of the object
	int x = 0, y = 0;
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);
	//must create a window before setting mouse callback
	cv::namedWindow(windowName);
	capture.read(cameraFeed);
	HSVMouseSelector hsvMouseSelector(&hsv, &cameraFeed);
	//set mouse callback function to be active on "Webcam Feed" window
	//we pass the handle to our "frame" matrix so that we can draw a rectangle to it
	//as the user clicks and drags the mouse
	setMouseCallback(windowName, ImageUtils::MouseCallback, &hsvMouseSelector);
	//initiate mouse move and drag to false 

	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while (1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed, HSV, COLOR_BGR2HSV);
		//set HSV values from user selected region
		hsvMouseSelector.UpdateFrame(&HSV);
		//filter HSV image between values and store filtered image to
		//threshold matrix
		inRange(HSV, hsv.ToMin(), hsv.ToMax(), threshold);
		//perform morphological operations on thresholded image to eliminate noise
		//and emphasize the filtered object(s)
		if (useMorphOps)
			morphOps(threshold);
		//pass in thresholded frame to our object tracking function
		//this function will return the x and y coordinates of the
		//filtered object
		if (trackObjects)
			trackFilteredObject(x, y, threshold, cameraFeed);

		//show frames 
		if (calibrationMode == true){

			//create slider bars for HSV filtering
			createTrackbars();
			imshow(windowName1, HSV);
			imshow(windowName2, threshold);
		}
		else{

			destroyWindow(windowName1);
			destroyWindow(windowName2);
			destroyWindow(trackbarWindowName);
		}
		imshow(windowName, cameraFeed);
		


		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		//also use waitKey command to capture keyboard input
		switch (waitKey(30)) {
			case 99:
				calibrationMode = !calibrationMode;//if user presses 'c', toggle calibration mode
				break;
			case 27:
				return 0;
		}
	}






	return 0;
}
Example #5
0
// In general a suffix of 1 means previous frame, and 2 means current frame.
// However, we start processing the next frame while the GPU is working on current...
// So at a certain point frame 1 shifts down to 0, 2 shifts down to 1, and the new 2 is loaded.
int main( int argc, char** argv ) {
    // gpuFacade gpu;
    // gpu.set_values(3,4);
    // cerr << "!! " << gpu.area() << endl;

    // This must be an integer multiple of 512.
    // Specifically, half-multiples of the number of SM's for your GPU are sensible.
    // I have 10 streaming multiprocessors, so I chose 15*512 = 7680.
    const int maxKP = 512 * 15;
    const bool showMatches = true;
    // Shows every Nth processed frame's matches.
    const int showMatchesInterval = 10;
    const bool showVideo = true;
    // Shows every Nth processed frame.
    const int showVideoInterval = 1;
    int WIDTH, HEIGHT, totalMatches, totalInliers = 0;
    const int matchThreshold = 12;
    // Discard this many frames for each one processed. Change with +/- keys while running.
    int skipFrames = 0;
    // Threshold for FAST detector
    int threshold = 20;
    int targetKP = 3000;
    int tolerance = 200;
    int maxLoops = 100;//4200;
    const bool gnuplot = true;
    double defect = 0.0;
    int extractions = 0;

    VideoCapture cap;
    if (argc == 1) {
        cap = VideoCapture(0);
        WIDTH  = cap.get(CAP_PROP_FRAME_WIDTH);
        HEIGHT = cap.get(CAP_PROP_FRAME_HEIGHT);
    }
    if (argc == 2 || argc == 3) {
        cap = VideoCapture(argv[1]);
        WIDTH  = cap.get(CAP_PROP_FRAME_WIDTH);
        HEIGHT = cap.get(CAP_PROP_FRAME_HEIGHT);
        if (argc == 3) {
            for (int i=0; i<atoi(argv[2]); i++) {
                cap.grab();
            }
        }
    }
    if (argc == 4) {
        cap = VideoCapture(0);
        WIDTH  = atoi(argv[2]);
        HEIGHT = atoi(argv[3]);
        cap.set(CAP_PROP_FRAME_WIDTH,  WIDTH);
        cap.set(CAP_PROP_FRAME_HEIGHT, HEIGHT);
    }

    double f = 0.4;
    double data[]= {f*WIDTH,  0.0,  WIDTH*0.5,  0.0, f*HEIGHT, HEIGHT*0.5, 0.0, 0.0, 1.0};
    Mat K(3, 3, CV_64F, data);
    Mat F, R, T, rod, mask;
    Mat img0, img1, img2, img1g, img2g, imgMatches, E, rodOld;

    cap >> img1;
    cap >> img2;
    cv::cvtColor(img1, img1g, CV_BGR2GRAY);
    cv::cvtColor(img2, img2g, CV_BGR2GRAY);
    if (showMatches) {
        namedWindow("Matches", WINDOW_NORMAL);
    }
    waitKey(1);
    if (showVideo) {
        namedWindow("Video", WINDOW_NORMAL);
    }
    waitKey(1);
    resizeWindow("Matches", 1920/2, 540/2);
    resizeWindow("Video", 960, 540);
    moveWindow("Matches", 0, 540+55);
    moveWindow("Video", 0, 0);
    waitKey(1);

    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);

    vector<KeyPoint> keypoints0, keypoints1, keypoints2;
    vector<DMatch> goodMatches;
    vector<Point2f> p1, p2; // Point correspondences for recovering pose.
    int numKP0, numKP1, numKP2; // The actual number of keypoints we are dealing with: just keypoints#.size(), but capped at maxKP.
    int key = -1;
    clock_t timer, timer2;
    float time;

    // Sizes for device and host pointers
    size_t sizeK = maxKP * sizeof(float) * 5; // K for keypoints
    size_t sizeI = WIDTH * HEIGHT * sizeof(unsigned char); // I for Image
    size_t sizeD = maxKP * (2048 / 32) * sizeof(unsigned int); // D for Descriptor
    size_t sizeM = maxKP * sizeof(int); // M for Matches
    size_t sizeMask = 64 * sizeof(float);

    // Host pointers
    float *h_K1, *h_K2;
    cudaMallocHost((void **) &h_K1, sizeK);
    cudaMallocHost((void **) &h_K2, sizeK);
    // For reasons opaque to me, allocating both (but not either) h_M1 or h_M2
    // with cudaMallocHost segfaults, apparently after graceful exit? So neither of them are pinned.
    int h_M1[maxKP];
    int h_M2[maxKP];
    float h_mask[64];
    for (int i=0; i<64; i++) { h_mask[i] = 1.0f; }

    // Device pointers
    unsigned char *d_I;
    unsigned int *d_D1, *d_D2, *uIntSwapPointer;
    int *d_M1, *d_M2;
    float *d_K, *d_mask;
    cudaCalloc((void **) &d_K, sizeK);
    cudaCalloc((void **) &d_D1, sizeD);
    cudaCalloc((void **) &d_D2, sizeD);
    cudaCalloc((void **) &d_M1, sizeM);
    cudaCalloc((void **) &d_M2, sizeM);
    cudaCalloc((void **) &d_mask, sizeM);

    // The patch triplet locations for LATCH fits in texture memory cache.
    cudaArray* patchTriplets;
    initPatchTriplets(patchTriplets);
    size_t pitch;
    initImage(&d_I, WIDTH, HEIGHT, &pitch);
    initMask(&d_mask, h_mask);

    // Events allow asynchronous, nonblocking launch of subsequent kernels after a given event has happened,
    // such as completion of a different kernel on a different stream.
    cudaEvent_t latchFinished;
    cudaEventCreate(&latchFinished);
    // You should create a new stream for each bitMatcher kernel you want to launch at once.
    cudaStream_t streanumKP1, streanumKP2;
    cudaStreamCreate(&streanumKP1);
    cudaStreamCreate(&streanumKP2);

    FAST(img1g, keypoints1, threshold);
    extractions += keypoints1.size();
    latch( img1g, d_I, pitch, h_K1, d_D1, &numKP1, maxKP, d_K, &keypoints1, d_mask, latchFinished );
    FAST(img2g, keypoints2, threshold); // This call to fast is concurrent with above execution.
    extractions += keypoints2.size();
    latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished );
    bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished );
    bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished );
    timer = clock();
    getMatches(maxKP, h_M1, d_M1);
    getMatches(maxKP, h_M2, d_M2);
    for (int i=0; i<numKP1; i++) {
        if (h_M1[i] >= 0 && h_M1[i] < numKP2 && h_M2[h_M1[i]] == i) {
            goodMatches.push_back( DMatch(i, h_M1[i], 0)); // For drawing.
            p1.push_back(keypoints1[i].pt); // For recovering pose.
            p2.push_back(keypoints2[h_M1[i]].pt);
        }
    }

    img1.copyTo(img0);
    img2.copyTo(img1);
    cap.read(img2);
    cvtColor(img2, img2g, CV_BGR2GRAY);

    keypoints0 = keypoints1;
    keypoints1 = keypoints2;

    uIntSwapPointer = d_D1;
    d_D1 = d_D2;
    d_D2 = uIntSwapPointer;

    numKP0 = numKP1;
    numKP1 = numKP2;

    FAST(img2g, keypoints2, threshold);
    int loopIteration = 0;
    for (; loopIteration < maxLoops || maxLoops == -1; loopIteration++) { // Main Loop.
        { // GPU code for descriptors and matching.
            cudaEventRecord(start, 0);
            extractions += keypoints2.size();
            latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished);
            bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished );
            bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished );
            cudaEventRecord(stop, 0);
        }
        timer = clock();
        { // Put as much CPU code here as possible.
            { // Display matches and/or video to user.
                bool needToDraw = false;
                if (showMatches && loopIteration % showMatchesInterval == 0) { // Draw matches.
                    drawMatches( img0, keypoints0, img1, keypoints1,
                        goodMatches, imgMatches, Scalar::all(-1), Scalar::all(-1),
                        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
                    imshow( "Matches", imgMatches );
                    needToDraw = true;
                }
                if (showVideo && loopIteration % showVideoInterval == 0) {
                    imshow("Video", img1);
                    needToDraw = true;
                }
                if (needToDraw) {
                    key = waitKey(1);
                }
            }
            { // Handle user input.
                switch (key) {
                    case (-1):
                    break;
                    case (1048689): // q
                    case (113): // also q
                        return 0;
                    break;
                    case (1048695): // w
                        waitKey(0);
                    break;
                    case (1114027): // +
                        skipFrames++;
                        cerr << "For each processed frame we are now skipping " << skipFrames << endl;
                    break;
                    case (1114029): // -
                        skipFrames = max(1, --skipFrames);
                        cerr << "For each processed frame we are now skipping " << skipFrames << endl;
                    break;
                    default:
                        cerr << "Currently pressed key is:   " << key << endl;
                    break;
                }
                key = -1;
            }
            { // Iterate the "logical" loop (get ready to process next frame)
                img1.copyTo(img0);
                img2.copyTo(img1);
                for (int i=0; i<skipFrames; i++) {
                    cap.grab();
                }
                cap.read(img2);
                if (img2.cols == 0) break;
                cvtColor(img2, img2g, CV_BGR2GRAY);

                keypoints0 = keypoints1;
                keypoints1 = keypoints2;

                uIntSwapPointer = d_D1;
                d_D1 = d_D2;
                d_D2 = uIntSwapPointer;

                numKP0 = numKP1;
                numKP1 = numKP2;
            }
            { // Solve for and output rotation vector (this gets piped to feedgnuplot).
                if (10 < p1.size() && 10 < p2.size()) {
                    E = findEssentialMat(p1, p2, f*WIDTH, Point2d(WIDTH*0.5f, HEIGHT*0.5f), RANSAC, 0.999, 3.0, mask);
                    int inliers = 0;
                    for (int i=0; i<mask.rows; i++) {
                        inliers += mask.data[i];
                    }
                    totalInliers += inliers;
                    double size = p1.size();
                    double r = inliers/max((double)size, 150.0);
                    r = 1.0 - min(r + 0.05, 1.0);
                    defect += r*r;
                    cout << "11:" << r*r << endl;

                    recoverPose(E, p1, p2, R, T, f*WIDTH, Point2d(WIDTH*0.5f, HEIGHT*0.5f), mask);
                    Rodrigues(R, rod);
                    if (loopIteration==0) {
                        rod.copyTo(rodOld);
                    }
                    if (dist2(rod, rodOld) < 1.0) {
                        rod.copyTo(rodOld);
                    } else {
                        cerr << "Rejecting the recovered pose: " << rod.t() * 57.2957795 << endl;
                        // This commented out chunk of code is good for webcams. If you initialize with a bad value it will recover.
                        // const double alpha = 0.1; // Move our region of acceptable responses (only a little) closer to the observed (but presumed erroneous) value.
                        // for (int i=0; i<3; i++) {
                        //     rodOld.at<double>(i) = rodOld.at<double>(i)*(1.0-alpha) + rod.at<double>(i)*alpha;
                        // }
                        rodOld.copyTo(rod);
                    }
                } else {
                    defect += 1.0;
                    cout << "11:" << 1.0 << endl;
                    cerr << "Too few matches! Not going to try to recover pose this frame." << endl;
                }
                // To prevent the graphs from desynchronizing from each other, we have to output this unconditionally.
                if (gnuplot) {
                    for (int i=0; i<3; i++) {
                        cout << i << ":" << rod.at<double>(i) * 57.2957795 << endl; // Output Rodrigues vector, rescaled to degrees
                    }
                    // T is unit norm (scale-less) and often erroneously sign-reversed.
                    // if (T.at<double>(2) < 0) T = -T; // Assume dominate motion is forward... (this is not an elegant assumption)
                    // double theta = atan2(T.at<double>(0), T.at<double>(2));
                    // double phi = atan2(T.at<double>(1), T.at<double>(2));
                    // cout << 3 << ":" << theta * 57.2957795 << endl; // Plot polar translation angle
                    // cout << 4 << ":" << phi * 57.2957795 << endl; // Plot azimuthal translation angle
                }
            }
            { // run FAST detector on the CPU for next frame (get ready for next loop iteration).
                FAST(img2g, keypoints2, threshold);
                // Apply proportional control to threshold to drive it towards targetKP.
                int control = (int)(((float)keypoints2.size() - (float)targetKP) / (float)tolerance);
                threshold += min(100, control);
                if (threshold < 1) threshold = 1;
            }
        }
        if (gnuplot) {
            time = (1000*(clock() - timer)/(double)CLOCKS_PER_SEC);
            cout << "9:" << time << endl; // Plot CPU time.
            timer = clock();
        }
        { // Get new GPU results
            p1.clear();
            p2.clear();
            goodMatches.clear();
            getMatches(maxKP, h_M1, d_M1);
            getMatches(maxKP, h_M2, d_M2);
            cudaEventElapsedTime(&time, start, stop);
            if (gnuplot) {
                cout << "10:" << (time+(1000*(clock() - timer)/(double)CLOCKS_PER_SEC)) << endl; // Plot total asynchronous GPU time.
            }
            for (int i=0; i<numKP0; i++) {
                if (h_M1[i] >= 0 && h_M1[i] < numKP1 && h_M2[h_M1[i]] == i) {
                    goodMatches.push_back( DMatch(i, h_M1[i], 0)); // For drawing matches.
                    p1.push_back(keypoints0[i].pt); // For recovering pose.
                    p2.push_back(keypoints1[h_M1[i]].pt);
                }
            }
        }
        if (gnuplot) {
            cout << "6:" << numKP1 << endl; // Plot number of keypoints.
            cout << "7:" << p1.size() << endl; // Plot number of matches.
            cout << "8:" << 100*threshold << endl; // Plot current threshold for FAST.
        }
        totalMatches += p1.size();
    }
    cudaFreeArray(patchTriplets);
    cudaFree(d_K);
    cudaFree(d_D1);
    cudaFree(d_D2);
    cudaFree(d_M1);
    cudaFree(d_M2);
    cudaFreeHost(h_K1);
    cudaFreeHost(h_K2);
    cerr << "Total matches: " << totalMatches << endl;
    cerr << "Total inliers: " << totalInliers << endl;
    cerr << "Defect: " << defect << endl;
    cerr << "Loop iteration: " << loopIteration << endl;
    cerr << "Extractions: " << extractions << endl;

    return 0;
}
int main(){
    
	//some boolean variables for added functionality
	bool objectDetected = false;
	//these two can be toggled by pressing 'd' or 't'
	bool debugMode = false;
	bool trackingEnabled = true;
	//pause and resume code
	bool pause = false;
	//set up the matrices that we will need
	//the two frames we will be comparing
	Mat frame1,frame2;
	//their grayscale images (needed for absdiff() function)
	Mat grayImage1,grayImage2;
	//resulting difference image
	Mat differenceImage;
	//thresholded difference image (for use in findContours() function)
	Mat thresholdImage;
	//video capture object.
	VideoCapture capture;
    
	for(int control=0; control <= 3; control++){
        
		//we can loop the video by re-opening the capture every time the video reaches its last frame
        
		capture.open("test2.mp4");
        //capture.open(0);
        
		if(!capture.isOpened()){
			cout<<"ERROR ACQUIRING VIDEO FEED\n";
			getchar();
			return -1;
		}
        
		//check if the video has reach its last frame.
		//we add '-1' because we are reading two frames from the video at a time.
		//if this is not included, we get a memory error!
		while(//1
              capture.get(CV_CAP_PROP_POS_FRAMES)<capture.get(CV_CAP_PROP_FRAME_COUNT)-2
              ){
            
			//read first frame
			capture.read(frame1);
			//convert frame1 to gray scale for frame differencing
			cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
			//copy second frame
			capture.read(frame2);
			//convert frame2 to gray scale for frame differencing
			cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
			//perform frame differencing with the sequential images. This will output an "intensity image"
			//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
			cv::absdiff(grayImage1,grayImage2,differenceImage);
			//threshold intensity image at a given sensitivity value
			cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the difference image and threshold image
				cv::imshow("Difference Image",differenceImage);
				cv::imshow("Threshold Image", thresholdImage);
			}else{
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Difference Image");
				cv::destroyWindow("Threshold Image");
			}
			//blur the image to get rid of the noise. This will output an intensity image
			cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
			//threshold again to obtain binary image from blur output
			cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
			if(debugMode==true){
				//show the threshold image after it's been "blurred"
                
				imshow("Final Threshold Image",thresholdImage);
                
			}
			else {
				//if not in debug mode, destroy the windows so we don't see them anymore
				cv::destroyWindow("Final Threshold Image");
			}
            
			//if tracking enabled, search for contours in our thresholded image
			if(trackingEnabled){
                
				searchForMovement(thresholdImage,frame1);
			}
            
			//show our captured frame
			imshow("Frame1",frame1);
			//check to see if a button has been pressed.
			//this 10ms delay is necessary for proper operation of this program
			//if removed, frames will not have enough time to referesh and a blank
			//image will appear.
			switch(waitKey(10)){
                    
                case 27: //'esc' key has been pressed, exit program.
                    return 0;
                case 116: //'t' has been pressed. this will toggle tracking
                    trackingEnabled = !trackingEnabled;
                    if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
                    else cout<<"Tracking enabled."<<endl;
                    break;
                case 100: //'d' has been pressed. this will debug mode
                    debugMode = !debugMode;
                    if(debugMode == false) cout<<"Debug mode disabled."<<endl;
                    else cout<<"Debug mode enabled."<<endl;
                    break;
                case 112: //'p' has been pressed. this will pause/resume the code.
                    pause = !pause;
                    if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
                        while (pause == true){
                            //stay in this loop until 
                            switch (waitKey()){
                                    //a switch statement inside a switch statement? Mind blown.
                                case 112: 
                                    //change pause back to false
                                    pause = false;
                                    cout<<"Code Resumed"<<endl;
                                    break;
                            }
                        }
                    }
                    
                    
                    
			}
		}
		//release the capture before re-opening and looping again.
		capture.release();
	}
    
	return 0;
    
}
Example #7
0
int main(int argc, const char* argv[])
{
    const char* keys =
        "{ h help           |                 | print help message }"
        "{ l left           |                 | specify left image }"
        "{ r right          |                 | specify right image }"
        "{ c camera         | 0               | enable camera capturing }"
        "{ v video          |                 | use video as input }"
        "{ o output         | pyrlk_output.jpg| specify output save path when input is images }"
        "{ points           | 1000            | specify points count [GoodFeatureToTrack] }"
        "{ min_dist         | 0               | specify minimal distance between points [GoodFeatureToTrack] }"
        "{ m cpu_mode       | false           | run without OpenCL }";

    CommandLineParser cmd(argc, argv, keys);

    if (cmd.has("help"))
    {
        cout << "Usage: pyrlk_optical_flow [options]" << endl;
        cout << "Available options:" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }

    bool defaultPicturesFail = true;
    string fname0 = samples::findFile(cmd.get<string>("left"));
    string fname1 = samples::findFile(cmd.get<string>("right"));
    string vdofile = cmd.get<string>("video");
    string outfile = cmd.get<string>("output");
    int points = cmd.get<int>("points");
    double minDist = cmd.get<double>("min_dist");
    int inputName = cmd.get<int>("c");

    UMat frame0;
    imread(fname0, IMREAD_GRAYSCALE).copyTo(frame0);
    UMat frame1;
    imread(fname1, IMREAD_GRAYSCALE).copyTo(frame1);

    vector<cv::Point2f> pts(points);
    vector<cv::Point2f> nextPts(points);
    vector<unsigned char> status(points);
    vector<float> err;

    cout << "Points count : " << points << endl << endl;

    if (frame0.empty() || frame1.empty())
    {
        VideoCapture capture;
        UMat frame, frameCopy;
        UMat frame0Gray, frame1Gray;
        UMat ptr0, ptr1;

        if(vdofile.empty())
            capture.open( inputName );
        else
            capture.open(vdofile.c_str());

        int c = inputName ;
        if(!capture.isOpened())
        {
            if(vdofile.empty())
                cout << "Capture from CAM " << c << " didn't work" << endl;
            else
                cout << "Capture from file " << vdofile << " failed" <<endl;
            if (defaultPicturesFail)
                return EXIT_FAILURE;
            goto nocamera;
        }

        cout << "In capture ..." << endl;
        for(int i = 0;; i++)
        {
            if( !capture.read(frame) )
                break;

            if (i == 0)
            {
                frame.copyTo( frame0 );
                cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
            }
            else
            {
                if (i%2 == 1)
                {
                    frame.copyTo(frame1);
                    cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
                    ptr0 = frame0Gray;
                    ptr1 = frame1Gray;
                }
                else
                {
                    frame.copyTo(frame0);
                    cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
                    ptr0 = frame1Gray;
                    ptr1 = frame0Gray;
                }


                pts.clear();
                goodFeaturesToTrack(ptr0, pts, points, 0.01, 0.0);
                if(pts.size() == 0)
                    continue;
                calcOpticalFlowPyrLK(ptr0, ptr1, pts, nextPts, status, err);

                if (i%2 == 1)
                    frame1.copyTo(frameCopy);
                else
                    frame0.copyTo(frameCopy);
                drawArrows(frameCopy, pts, nextPts, status, Scalar(255, 0, 0));
                imshow("PyrLK [Sparse]", frameCopy);
            }
            char key = (char)waitKey(10);

            if (key == 27)
                break;
            else if (key == 'm' || key == 'M')
            {
                ocl::setUseOpenCL(!cv::ocl::useOpenCL());
                cout << "Switched to " << (ocl::useOpenCL() ? "OpenCL" : "CPU") << " mode\n";
            }
        }
        capture.release();
    }
    else
    {
nocamera:
        if (cmd.has("cpu_mode"))
        {
            ocl::setUseOpenCL(false);
            std::cout << "OpenCL was disabled" << std::endl;
        }
        for(int i = 0; i <= LOOP_NUM; i ++)
        {
            cout << "loop" << i << endl;
            if (i > 0) workBegin();

            goodFeaturesToTrack(frame0, pts, points, 0.01, minDist);
            calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts, status, err);

            if (i > 0 && i <= LOOP_NUM)
                workEnd();

            if (i == LOOP_NUM)
            {
                cout << "average time (noCamera) : ";

                cout << getTime() / LOOP_NUM << " ms" << endl;

                drawArrows(frame0, pts, nextPts, status, Scalar(255, 0, 0));
                imshow("PyrLK [Sparse]", frame0);
                imwrite(outfile, frame0);
            }
        }
    }

    waitKey();

    return EXIT_SUCCESS;
}
Example #8
0
/**
* @main function
**/
int main(int argc,char const *argv[])
{
    if (argc != 5)
    {
        printf("Invalid argumen!\n");
        printf("-- LightMusic <camera_number> <buffer_length> <low_freq> <hi_freq>\n");
        printf("-- Press Esc to exit\n");
        printf("ex : LightMusic 1 5620 261 1760\n");
        printf("-- <camera_number>  : device number of camera (from 1 to 99)\n");
        printf("-- <buffer_lenght>  : buffer lenght used (from 1000 to 20000)\n");
        printf("-- <low_freq>       : freq of lowest tone, low 261, mid 523, hi 1046\n");
        printf("-- <hi_freq>        : freq of highest tone, low 493, mid 987, hi 1760\n");
        printf("CAUTION!!\n");
        printf("-- bigger number of buffer length, slower frame scan run\n");
        printf("-- smaller number of buffer length, bigger playback sound glitch occur\n");
        printf("-- find right number of buffer length depending on your hardware\n");
        printf("LightMusic -- developed by Lonehack\n");
        return 0;
    }
    int cam = atoi(argv[1]);
    BUFFER_LEN = atoi(argv[2]);
    lotone = atoi(argv[3]);
    hitone = atoi(argv[4]);
	//-- Video prepare
	VideoCapture capture;
	capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, 360);

	Mat frame;
	time_t start, finish;

	//-- Sound error handling
    if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, 0)) < 0)
    {
        printf(" --(!) Playback open error: %s  --\n", snd_strerror(err));
        exit(EXIT_FAILURE);
    }
    if ((err = snd_pcm_set_params(handle,
                                  SND_PCM_FORMAT_FLOAT,
                                  SND_PCM_ACCESS_RW_INTERLEAVED,
                                  1,
                                  44100,		//samplerate, standart 44100
                                  1,
                                  80200)) < 0)	//latency, standart 2x samplerate
    {
        printf(" --(!) Playback open error: %s --\n", snd_strerror(err));
        exit(EXIT_FAILURE);
    }

	//-- Opening video stream
//	for (int cam=1;cam<100;cam++)
//	{
//        capture.open( cam );	//-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file
//    }
    capture.open( cam ); //-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file

	//-- Checking interface
	if ( ! capture.isOpened() )
	{
		printf("--(!)Error opening video capture --\n");
		return -1;
	}

	//-- Start the clock
    time(&start);
    int counter=0;

	//-- Read captured
	while ( capture.read(frame) )
	{

		if( frame.empty() )
		{
			printf(" --(!) No captured frame -- Break!\n");
			break;
		}

        //-- fix image resolution
        resize(frame, frame, Size(640, 360), 0, 0, INTER_CUBIC);

		//-- Show original frame
		//namedWindow(window_name_0,CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO);
		//imshow( window_name_0,frame );

		//-- flip frame
        flip(frame, frame, -1);

		//-- Apply the lightDetect
		lightDetect(frame);
		//printf("X = %d, Y = %d, Inten = %d \n", PosX, PosY, inten_frame);

		//-- apply sound parameter
		SineWave(PosX, PosY);

		//Stop the clock and show FPS
        time(&finish);
        counter++;
        double sec=difftime(finish,start);
        double fps=counter/sec;
        printf("fps = %lf\n",fps);

		//-- bail out if escape was pressed
		int c = waitKey(10);
		if( (char)c == 27 )
		{
			printf("\nStoped by User\n");
			break;
		}
	}

	//-- Closing program
	snd_pcm_close(handle);
	capture.release();
	return 0;
}
Example #9
0
static void loop() {
	stream1.read(frame);
	//resize(frame, frame, Size(frame.cols*0.8, frame.rows*0.8));
	cvtColor(frame, grey, COLOR_BGR2GRAY);

	if (started) {
		cv::Mat rvec(3, 1, cv::DataType<double>::type);
		cv::Mat tvec(3, 1, cv::DataType<double>::type);
		float scale = 0;

		detector->detect(grey, kpts2);
		cv::KeyPointsFilter::retainBest(kpts2, MAX_FEATURES);
		descriptor->compute(grey, kpts2, desc2);
		frames++;
		if (desc2.cols > 5 && frames > 0) {
			frames = 0;
			matcher.match(desc1, desc2, matches);
			if (matches.size() > 5) {
				double max_dist = 0; double min_dist = 1000;
				std::vector< DMatch > good_matches;
				std::vector<KeyPoint> matched1, matched2;
				for (int i = 0; i < matches.size(); i++) {
					if (matches[i].distance < 20) {
						good_matches.push_back(matches[i]);
						matched1.push_back(kpts1[matches[i].queryIdx]);
						matched2.push_back(kpts2[matches[i].trainIdx]);
					}
				}
				KeyPoint::convert(matched1, init);
				KeyPoint::convert(matched2, points2);
				float avg_dist = 0;
				for (size_t i = 0; i < good_matches.size(); i++) {
					double dist = norm(init[i] - points2[i]);
					avg_dist += dist;
					if (dist < min_dist) min_dist = dist;
					if (dist > max_dist) max_dist = dist;
				}
				avg_dist = avg_dist / good_matches.size();
				int k = 0;
				for (int i = 0; i < init.size(); i++) {
					double dist = norm(init[i] - points2[i]);
					//printf("%f\n", dist);
					if (dist > avg_dist){
						continue;
					}
					points2[k] = points2[i];
					init[k] = init[i];
					k++;
				}
				points2.resize(k);
				init.resize(k);

				if (good_matches.size() > 10 && init.size() > 6) {

					float f = K.at<double>(0, 0);
					Point2f pp(K.at<double>(0, 2), K.at<double>(1, 2));
					E = findEssentialMat(init, points2, f, pp, RANSAC, 0.999, 1.0, mask);
					int inliers = recoverPose(E, init, points2, R, T, f, pp, mask);
					if (inliers > 10){
						printf("%d\n", inliers);
						hconcat(R, T, M1);
						cv::Mat row = cv::Mat::zeros(1, 4, CV_64F);
						row.at<double>(0, 3) = 1;
						M1.push_back(row);
						//print(M1);
						totalT = totalT*M1;

						Mat rot;
						totalT(cv::Range(0, 3), cv::Range(0, 3)).copyTo(rot);
						Mat rotv;
						Rodrigues(rot, rotv);
						poseplot(Range(0, 100), Range(0, 300)) = 0;

						char buff1[50];
						int fontFace = QT_FONT_NORMAL;
						double fontScale = 0.5f;
						int thickness = 1;
						sprintf(buff1, "x:%+.1f y:%+.1f z:%+.1f", rotv.at<double>(0, 0) * (180 / CV_PI), 
							(rotv.at<double>(1, 0) / CV_PI) * 180, (rotv.at<double>(2, 0) / CV_PI) * 180);
						string text(buff1);
						putText(poseplot, text, Point(0, 20), fontFace, fontScale, Scalar::all(255), thickness, 8);

						circle(poseplot, Point(100 + totalT.at<double>(0, 3) * 3, 100 + totalT.at<double>(1, 3)) * 3, 2, Scalar(0, 255, 0));
					}
					kpts1.clear();
					for (int i = 0; i < kpts2.size(); i++) {
						kpts1.push_back(kpts2[i]);
					}
					desc2.copyTo(desc1);
				}
			}
		}
	}
	if (mask.rows > 0) {
		for (size_t i = 0; i < min(init.size(), points2.size()); i++) {
			circle(frame, init[i], 2, Scalar(0, 255, 0));
			if ((int)mask.at<uchar>(i, 0)) {
				line(frame, init[i], points2[i], Scalar(0, 255, 0));
			}
			else{
				line(frame, init[i], points2[i], Scalar(0, 0, 255));
			}
		}
	}
	imshow("cam", frame);

	int key = waitKey(15);
	if (key == ' '|| kpts1.size() < 30) {
		started = 1;
		kpts1.clear();
		detector->detect(grey, kpts1);
		cv::KeyPointsFilter::retainBest(kpts1, MAX_FEATURES);
		descriptor->compute(grey, kpts1, desc1);
		KeyPoint::convert(kpts1, points1);
		poseplot.setTo(cv::Scalar(0, 0, 0));
		totalT = Mat::eye(4, 4, CV_64F);
	}
	else if (key == 'q') {
		return;
	}

	grey.copyTo(prevGray);

	imshow("pose", poseplot);
}
int main(int argc, char** argv)
{
    CV_TRACE_FUNCTION();

    cv::CommandLineParser parser(argc, argv,
        "{help h ? |     | help message}"
        "{n        | 100 | number of frames to process }"
        "{@video   | 0   | video filename or cameraID }"
    );
    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    VideoCapture capture;
    std::string video = parser.get<string>("@video");
    if (video.size() == 1 && isdigit(video[0]))
        capture.open(parser.get<int>("@video"));
    else
        capture.open(video);
    int nframes = 0;
    if (capture.isOpened())
    {
        nframes = (int)capture.get(CAP_PROP_FRAME_COUNT);
        cout << "Video " << video <<
            ": width=" << capture.get(CAP_PROP_FRAME_WIDTH) <<
            ", height=" << capture.get(CAP_PROP_FRAME_HEIGHT) <<
            ", nframes=" << nframes << endl;
    }
    else
    {
        cout << "Could not initialize video capturing...\n";
        return -1;
    }

    int N = parser.get<int>("n");
    if (nframes > 0 && N > nframes)
        N = nframes;

    cout << "Start processing..." << endl
        << "Press ESC key to terminate" << endl;

    UMat frame;
    for (int i = 0; N > 0 ? (i < N) : true; i++)
    {
        CV_TRACE_REGION("FRAME"); // OpenCV Trace macro for named "scope" region
        {
            CV_TRACE_REGION("read");
            capture.read(frame);

            if (frame.empty())
            {
                cerr << "Can't capture frame: " << i << std::endl;
                break;
            }

            // OpenCV Trace macro for NEXT named region in the same C++ scope
            // Previous "read" region will be marked complete on this line.
            // Use this to eliminate unnecessary curly braces.
            CV_TRACE_REGION_NEXT("process");
            process_frame(frame);

            CV_TRACE_REGION_NEXT("delay");
            if (waitKey(1) == 27/*ESC*/)
                break;
        }
    }

    return 0;
}
Example #11
0
void loop() {



  /// Add coordinate axes
  myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem());;

  // Read camera frame
  stream1.read(frame);
  //resize(frame, frame, Size(frame.cols*0.8, frame.rows*0.8));
  cvtColor(frame, grey, COLOR_BGR2GRAY);

  // move to function?
  if (!points1.empty()) {
    calcOpticalFlowPyrLK(prevGray, grey, points1, points2, status, err, winSize, 3, termcrit, 0, 0.001);
    // remove bad tracks
    size_t k;
    for (size_t i = k = 0; i < points2.size(); i++) {
      if (!status[i])
        continue;
      points2[k] = points2[i];
      points1[k] = points1[i];
      init[k] = init[i];

      if(rdpoints){
        init3dpoints[k] = init3dpoints[i];
      }
      k++;
      circle(frame, points2[i], 2, Scalar(0, 255, 0), -1, 8);
      if (!rdpoints){
        line(frame, init[i], points2[i], Scalar(0, 255, 0));
      }
    }
    points1.resize(k);
    points2.resize(k);
    init.resize(k);
    if (rdpoints) {
      init3dpoints.resize(k);
    }
  }

  if (points1.size() > 8) {
    totalT = totalT + T;

    cv::Mat rvec(3, 1, cv::DataType<double>::type);
    cv::Mat tvec(3, 1, cv::DataType<double>::type);
    float scale = 0;
    if (init3dpoints.size() > 0) {
      solvePnPRansac(init3dpoints, points2, K, noArray(), rvec, tvec, false, 200,4);
      T = tvec;
      Rodrigues(rvec, R);
      /*frames++;
      T = T + tvec;
      if (frames == 3) {
        T = T / 3;
        circle(poseplot, Point(200 + T.at<double>(0, 0) * 100, 200 + T.at<double>(1, 0) * 100), 2, Scalar(0, 255, 0));
        T = Mat::zeros(3, 1, CV_64F);
        frames = 0;
      }*/
    }
  }

  imshow("cam", frame);

  int key = waitKey(15);

  if (key == ' ') {
    if (started && !rdpoints) {
      rdpoints = 1;
      float f = K.at<double>(0, 0);
      Point2f pp(K.at<double>(0, 2), K.at<double>(1, 2));
      Mat E = findEssentialMat(init, points2, f, pp, RANSAC, 0.99, 1.0, mask);
      int inliers = recoverPose(E, init, points2, R, T, f, pp);
      hconcat(R, T, M1);
      triangulate_points(K*M0, K*M1, init, points2, &init3dpoints);
      c3dpoints.clear();
      for (int i = 0; i < init3dpoints.size(); i++) {
          c3dpoints.push_back(init3dpoints[i]/10);
      }
    }
  }

  std::swap(points2, points1);
  cv::swap(prevGray, grey);

  if (key == ' ' && !rdpoints) {
    started = 1;
    // features and keypoints for object
    img1 = grey.clone();
    keyframes.push_back(img1);
    kpts1.clear();
    init.clear();
    goodFeaturesToTrack(img1, points1, MAX_FEATURES, 0.01, 15, Mat(), 3, 0, 0.04);
    //cornerSubPix(img1, points1, subPixWinSize, Size(-1, -1), termcrit);
    for (size_t i = 0; i < points1.size(); i++) {
      kpts1.push_back(cv::KeyPoint(points1[i], 1.f));
      init.push_back(Point2f(points1[i]));
    }
  }
  else if (key == 'q') {
    exit(0);
  }
  imshow("pose", poseplot);

  // Plot 3D points
  if (!init3dpoints.empty()) {

    viz::WCloud cw(c3dpoints);
    cw.setRenderingProperty(viz::POINT_SIZE, 5);
    myWindow.showWidget("CloudWidget", cw);
    /// Let's assume camera has the following properties
    //double sz[3] = {0,0,-1};
    //Mat fD(3,sz, CV_64F, Scalar::all(0));
    Mat fD = (Mat_<double>(3,1) << 0, 0, -1);
    Mat tmp = R*fD;
    Vec3d cam_pos(T), cam_focal_point(tmp), cam_y_dir(-1.0f,0.0f,0.0f);

    /// We can get the pose of the cam using makeCameraPose
    Affine3f cam_pose = viz::makeCameraPose(cam_pos, cam_focal_point, cam_y_dir);

    /// We can get the transformation matrix from camera coordinate system to global using
    /// - makeTransformToGlobal. We need the axes of the camera
    //Affine3f transform = viz::makeTransformToGlobal(Vec3f(0.0f,-1.0f,0.0f), Vec3f(-1.0f,0.0f,0.0f), Vec3f(0.0f,0.0f,-1.0f), cam_pos);

    /// Create a cloud widget.
    //Mat bunny_cloud = cvcloud_load();
    //viz::WCloud cloud_widget(bunny_cloud, viz::Color::green());

    /// Pose of the widget in camera frame
    //Affine3f cloud_pose = Affine3f().translate(Vec3f(0.0f,0.0f,3.0f));
    /// Pose of the widget in global frame
    //Affine3f cloud_pose_global = transform * cloud_pose;

    /// Visualize camera frame

    viz::WCameraPosition cpw(0.5); // Coordinate axes
    viz::WCameraPosition cpw_frustum(Vec2f(0.889484, 0.523599)); // Camera frustum
    myWindow.showWidget("CPW", cpw, cam_pose);
    myWindow.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose);

    /// Visualize widget
    //myWindow.showWidget("bunny", cloud_widget, cloud_pose_global);

    /// Set the viewer pose to that of camera
    //myWindow.setViewerPose(cam_pose);

    /// Start event loop.
    //myWindow.spin();
  }
   myWindow.spinOnce(1, true);
}
Example #12
0
int main(int argc, char * argv[]){
    VideoCapture capture;
    FileStorage fs;
    FileStorage detector_file;
    bool fromfile=false;
    //Read options
    CommandLineParser parser(argc, argv, keys);
    int init_frame = parser.get<int>("i");
    string param_file = parser.get<string>("p");
    string video = parser.get<string>("s");
    string init_bb  = parser.get<string>("b");
    
    fs.open(param_file, FileStorage::READ);
    if (video != "null"){
        fromfile=true;
        capture.open(video);
    }else{
        fromfile=false;
        capture.open(0);
    }
    if (init_bb !="null"){
        readBB(init_bb.c_str());
        gotBB =true;
    }
    
    //Init camera
    if (!capture.isOpened()){
        cout << "capture device failed to open!" << endl;
        return 1;
    }
    //Register mouse callback to draw the bounding box
    cvNamedWindow("Tracker",CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Features",CV_WINDOW_AUTOSIZE);
    cvSetMouseCallback( "Tracker", mouseHandler, NULL );
    
    FILE  *bb_file = fopen("bounding_boxes.txt","w");
    
    Mat frame;
    Mat last_gray;
    Mat first;
    if (fromfile){
        capture.set(CV_CAP_PROP_POS_FRAMES,init_frame);
        capture.read(frame);
        last_gray.create(frame.rows,frame.cols,CV_8U);
        cvtColor(frame, last_gray, CV_BGR2GRAY);
        frame.copyTo(first);
    }

    ///Initialization
GETBOUNDINGBOX:
    while(!gotBB){
        if (!fromfile) capture.read(frame);
        else first.copyTo(frame);
        cvtColor(frame, last_gray, CV_BGR2GRAY);
        drawBox(frame,box);
        imshow("Tracker", frame);
        if (cvWaitKey(30) == 'q')
            return 0;
    }
    if (min(box.width,box.height)<(int)fs.getFirstTopLevelNode()["min_win"]){
        cout << "Bounding box too small, try again." << endl;
        gotBB = false;
        goto GETBOUNDINGBOX;
    }
    drawBox(frame,box);
    imshow("Tracker", frame);
    //Remove callback
    cvSetMouseCallback( "Tracker", NULL, NULL );
    printf("Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height);
    //Output file
    fprintf(bb_file,"%d,%d,%d,%d,%f\n",box.x,box.y,box.br().x,box.br().y,1.0);
    
INIT:
    // Framework
    Alien tracker(fs.getFirstTopLevelNode());
    tracker.init(last_gray,box);
    
    cvWaitKey();
    ///Run-time
    Mat current_gray;
    RotatedRect pbox;
    bool status=true;
    int frames = 1;
    int detections = 1;
    float conf;
    while(capture.read(frame)){
        cvtColor(frame, current_gray, CV_BGR2GRAY);
        cout << endl;
        //Process Frame
        double t=(double)getTickCount();
        conf = tracker.processFrame(last_gray,current_gray,pbox,status);
        t = ((double)getTickCount() - t)*1000/getTickFrequency();
        //Draw Box
        if (status){
            drawBox(frame,pbox);
            fprintf(bb_file,"%f,%f,%f,%f,%f,%f,%f\n",pbox.center.x, pbox.center.y, pbox.size.height,pbox.size.width, pbox.angle,conf,t);
            detections++;
        }
        else{
            fprintf(bb_file,"NaN,NaN,NaN,NaN,%f,%f\n",conf,t);
        }
        //Display
        imshow("Tracker", frame);
        swap(last_gray,current_gray);
        frames++;
        printf("Detection rate: %d/%d, period: %fms\n",detections,frames,t);
        if (cvWaitKey(30) == 'q') break;
    }
    tracker.save("Detector.yml");
    fclose(bb_file);
    capture.release();
    return 0;
}
Example #13
0
	CPoint TFind()
	{	char op[255];
		//	cout<<"NO CALL?";
		//	cout<<"??"<<endl;
		CPoint cvoid = {-1 -1};
		if(!cap->isOpened()){cout<<"ERROR: no camera";return cvoid;}
		Mat mat,mat2;
		//	cout << "Pre Cap";
		//	cout << endl;	
		{
		AGAIN:
		
		cap->read(mat);
		cvtColor(mat, mat2, CV_BGR2HLS);
		static int PK=0;
		sprintf(op,"_%d.bmp",PK++);	
		imwrite(op,mat2);
		
		//		(*cap) >> mat2;
		//	cout << "Post Cap";
		//	cout << "FAIL_CONVERT?";
		
		Mat_ <Vec3b> Frame(mat);
		Mat_ <Vec3b> OFrame(mat2);
		cv::Size sks = Frame.size();
		int i,j;
		int SX,SY,ct;
		SX=0;SY=0;ct=0;
		int FW = 5;
		for(i=FW;i<sks.height-FW;i++)
		for(j=FW;j<sks.width-FW;j++)
		{
			/*
			int a=(OFrame(i-FW,j)[0] + OFrame(i,j+FW)[0] + 
			OFrame(i+FW,j)[0] + OFrame(i,j-FW)[0]-4*OFrame(i,j)[0]);
			if(a<0)a+=256;
			Frame(i,j)[0]=a;
			Frame(i,j)[1]=OFrame(i,j)[1];//(OFrame(i-FW,j)[1] + OFrame(i,j+FW)[1] + 
			//OFrame(i+FW,j)[1] + OFrame(i,j-FW)[1]-4*OFrame(i,j)[1]);
			
			Frame(i,j)[2]=OFrame(i,j)[2];//(OFrame(i-FW,j)[2] + OFrame(i,j-FW)[2] + 
			*/
			//OFrame(i+FW,j)[2] + OFrame(i,j+FW)[2]-4*OFrame(i,j)[2]);
			
			if((OFrame(i,j)[0] < 21 && OFrame(i,j)[0] > 14))// && OFrame(i,j)[0] < 20 )
			{  //the value is red hue (20 > h | h > 240)
				if(OFrame(i,j)[1] > 70 && OFrame(i,j)[2] > 165)// && OFrame(i,j)[1] > 65 && OFrame(i,j)[2] > 170 && OFrame(i,j)[2] < 240) //vary the luminance values.  Black and White can be red with Very Low or Very High luminance
				//off-white can be red with low saturation and high luminance. something similar for black
				{
					
					SX+=j;
					SY+=i;
					ct++;
					OFrame(i,j)[0]=90;
					OFrame(i,j)[1]=140;
					OFrame(i,j)[2]=255;
				}
				
			}
			else
			{
				//OFrame(i,j)[0]=255;
				//OFrame(i,j)[1]=0;
				//OFrame(i,j)[2]=0;
			}

		}
		//		cout << ct;
		//		cout << endl;
		if(ct !=0){
			SX = SX / ct;
			SY = SY / ct;
		}else{SX=SY=-1;}
		//	cout << "SEE SOMETHING?";
		//	cout << endl;
		for(i=0;i<5;i++)
		for(j=0;j<5;j++)
		OFrame(i,j)[0]=255;
		OFrame(i,j)[1]=157;
		OFrame(i,j)[2]=10;
		cvtColor(mat2, mat, CV_HLS2BGR);
	//if(SX > 4 && SX < 356 && SY > 4 && SY < 236)
	//for(i=-3;i<=3;i++)
	//for(j=-3;j<=3;j++)
	//{OFrame(SY+i,SX+j)[0]=0; OFrame(SY+i,SX+j)[1]=255; OFrame(SY+i,SX+j)[2]=0;} //green (the gotton format is BGR here)
	
		
		sprintf(op,"_%d-f.bmp",PK++);
		
		imwrite(op,mat);
		sleep(5);
		goto AGAIN;
		}
		CPoint RT;
		//RT.x=SX;
		//RT.y=SY;
		return RT;
	}
Example #14
0
int main( int argc, char** argv )
{
    // Benchmark variables
    //double filteringTime=0 , noiseTime=0, contourTime=0, readTime=0, waitFinal=0, totalTime, finalLoop; 
    Benchmark bench;

    // Video vars
    VideoCapture cap;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    Size subPixWinSize(10,10), winSize(31,31);

    // Get the video (filename or device)
    cap.open("IMG_6214.JPG");
    if( !cap.isOpened() )
    {
        cout << "Could not initialize capturing...\n";
        return 0;
    }

    // Display vars
    int i = 0 ;
    int n = cap.get(CV_CAP_PROP_FRAME_COUNT);

    #ifdef DISPLAY
        // Create the windows
        namedWindow(WINDOW_ORIGIN) ;
        namedWindow(WINDOW_THRESHOLD);
        namedWindow(WINDOW_THRESHOLD_NOISE);
        namedWindow(WINDOW_THRESHOLD_NOISE_BLUR);  
        namedWindow(WINDOW_CONFIG);  

        createTrackbar(TRACKBAR_HUE_MIN, WINDOW_CONFIG, &H_min, 255) ;  
        createTrackbar(TRACKBAR_HUE_MAX, WINDOW_CONFIG, &H_max, 255) ;  
        createTrackbar(TRACKBAR_SATURATION_MIN, WINDOW_CONFIG, &S_min, 255) ;  
        createTrackbar(TRACKBAR_SATURATION_MAX, WINDOW_CONFIG, &S_max, 255) ;  
        createTrackbar(TRACKBAR_VALUE_MIN, WINDOW_CONFIG, &V_min, 255) ;  
        createTrackbar(TRACKBAR_VALUE_MAX, WINDOW_CONFIG, &V_max, 255) ;  

        moveWindow(WINDOW_ORIGIN, 0, 0) ;
        moveWindow(WINDOW_THRESHOLD, 0, 0);
        moveWindow(WINDOW_THRESHOLD_NOISE, 0, 0);
        moveWindow(WINDOW_THRESHOLD_NOISE_BLUR, 0, 0);
        moveWindow(WINDOW_CONFIG, 0, 0);

        namedWindow(WINDOW_TEST);
        moveWindow(WINDOW_TEST, 0, 0);
    #endif

    #ifdef OCL
        ocl::PlatformsInfo platforms;
        ocl::getOpenCLPlatforms(platforms);
        ocl::DevicesInfo devices;
        ocl::getOpenCLDevices(devices);
        std::cout << "platforms " << platforms.size() << "  devices " << devices.size() << " " << devices[0]->deviceName << std::endl;
        ocl::setDevice(devices[0]);

    #endif

    // déclaration des threads
    pthread_t threadMaths, threadWall;
    
    //pthread_create(&threadWall, NULL, mathsRoutine, arg)

    gettimeofday(&bench.beginTime, NULL);
    BallState ballState_t1, ballState_t2;
    int numberOfTreatedLoop = 0, numberOfNonTreatedLoop = 0, noTreatment = 0;
    for(i=0 ; i < n ; i++) // boucle for car vidéo, avec un stream, sûrement un while(1)
    {
        if(noTreatment <= 0)//la balle doit être dans une position intéressante pour la vue à approfondir
        {
            numberOfTreatedLoop++;
            numberOfNonTreatedLoop = 0;
            ballState_t1 = ballState_t2;
            std::vector<CircleFound> circles;
            CircleFound ballCircle;
            findBall(cap, bench, circles);
            ballCircle = getBestCircle(circles);
            if(ballCircle.radius == 0) // means no circle
                //break;
            getBallPosition(ballCircle.x, ballCircle.y, ballCircle.radius*2, ballState_t2);
            calculateBallSpeed(ballState_t2);
            if(ballState_t2.vy < 0) // si la balle revient en arrière, on arrete le traitement un instant
            {
                noTreatment = 3;
            }
            copyStateToMaths(ballState_t1, ballState_t2, CI);

            // Start maths part
            if(ballState_t2.v)
                pthread_create(&threadMaths, NULL, Pb_Inv, NULL);

            #ifdef DEBUG
                cout << "x : "<< ballState.x << "   y : " << ballState.y << "   z : " << ballState.z <<endl;
            #endif
            #ifdef DISPLAY
                #ifdef PICTURE
                    char c = (char)waitKey(100000);
                #else
                    char c = (char)waitKey(1);
                #endif
                if( c == 27 )
                {
                    //break;
                }
            #endif
        }
        else
        {
            Mat tmp;
            cap.read(tmp);
        }

        noTreatment--;      
    }
    
    bench.finalLoop = (double) (bench.tv_end.tv_sec - bench.tv_begin.tv_sec) + ((double) (bench.tv_end.tv_usec - bench.tv_begin.tv_usec)/1000000);
    gettimeofday(&bench.endTime, NULL);
    bench.totalTime = (double) (bench.endTime.tv_sec - bench.beginTime.tv_sec) + ((double) (bench.endTime.tv_usec - bench.beginTime.tv_usec)/1000000);
    

    #ifndef DISPLAY
        //cout << stringOutput.str() ;
    #endif
    cout << "FRAMES : " << n << "  THREADS : " << getNumThreads() << "  CPUs : " << getNumberOfCPUs() <<endl;
    cout << "Total Time : " << bench.totalTime << "s" << endl;
    cout << " Read time : " << bench.readTime << "s" << endl;
    cout << " Filtering time : " << bench.filteringTime << "s" << endl;
    cout << " Noise cancellation time : " << bench.noiseTime << "s" << endl;
    cout << " Contour determination time : " << bench.contourTime << "s" << endl;
    cout << " Final Loop : " << bench.finalLoop << "s" <<  "  Supposed total : " << bench.readTime+bench.filteringTime+bench.noiseTime+bench.contourTime+bench.finalLoop << endl;

    return 0;
}
Example #15
0
bool grabFrame()
{	
	Mat cap, frame, red, red_lower, red_upper, yellow;
	bool frame_available;

	frame_available = camera.read(cap);
	if(!frame_available)
	{
		cout << "Unable to grab frame from camera, might not be initialized or maybe unpluged?\n";
		return 1;
	}
	cvtColor(cap, frame, CV_BGR2HSV); //convert to HSV from RGB

	inRange(frame, lowLowerRed, highLowerRed, red_lower);	
	inRange(frame, lowUpperRed, highUpperRed, red_upper);
	inRange(frame, lowYellow, highYellow, yellow);

	cleanThresholdedImage(red_lower);
	cleanThresholdedImage(red_upper);
	cleanThresholdedImage(yellow);

	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	vector<Rect> bounding_rects;

	//same kit as above for the yellow
	findContours(yellow, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
	for( int i = 0; i < contours.size(); i++ )
	{
		bounding_rects.push_back( boundingRect( Mat(contours[i]) ) );
	}
	if(bounding_rects.size() > 0)
	{
		Rect largest = largestRectInFrame(bounding_rects);
		rectangle( yellow, largest, Scalar(150, 127, 200), 1, 8);
		if(largest.area() > AREA_THRESHOLD)
		{	
			cout << "Yellow object center at: (" << (largest.x + largest.width/2) 
			<< ", " << (largest.y + largest.height/2) << ")" << endl;
			yellow_object_seen = true;
		}
		else
		{
			cout << "There's somehting yellow there, but not big enough." << endl;
			yellow_object_seen = false;
		}
	}
	else
	{
		cout << "Nothing yellow, fam." << endl;
		yellow_object_seen = false;
	}

	contours.clear();
	hierarchy.clear();
	bounding_rects.clear();

	//same thing for the red

	red = red_lower + red_upper;
	findContours(red, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
	for( int i = 0; i < contours.size(); i++ )
	{
		bounding_rects.push_back( boundingRect( Mat(contours[i]) ) );
	}
	if(bounding_rects.size() > 0)
	{
		Rect largest = largestRectInFrame(bounding_rects);
		rectangle( red, largest, Scalar(150, 127, 200), 1, 8);
		if(largest.area() > AREA_THRESHOLD)
		{
			cout << "Red object center at: (" << (largest.x + largest.width/2)
			<< ", " << (largest.y + largest.height/2) << ")" << endl;
			red_object_seen = true;
		}
		else
		{
			cout << "There's somehting red there, but not big enough." << endl;
			red_object_seen = false;
		}
	}
	else
	{
		cout << "No red, fam." << endl;
		red_object_seen = false;
	}

	contours.clear();
	hierarchy.clear();
	bounding_rects.clear();

	return 0;
}
Example #16
0
int trainData() {

    std:: string videoName="";

    int n_frames[1000];
    //create dictionary
    int dict_size=100;//***

    Mat features;
    for(int i=1; i<no_videos; i++) {


        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/trainvideos/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);
        if(!cap.isOpened())  // check if we succeeded
            return -1;

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        //create window to show image
        //namedWindow("Video",1);
        //cout<<count<<endl;
        int jump=count/N;
        int j=1;

        int u=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            ////////EXTRACT INTEREST POINTS USING SIFT////
            // vector of keypoints
            std::vector<cv::KeyPoint> keypoints;
            // Construct the SIFT feature detector object
            SiftFeatureDetector sif(0.03,10.); // threshold  //***
            //Detect interest points
            sif.detect(gray_image,keypoints);

            ////////IMSHOW THE FRAMES EXTRACTED///////////

            //copy video stream to image
            //cap>>image;
            //print image to screen
            //imshow("Video",image);


            ///////////Save the frames//////////////

            stringstream temp2;
            temp2<<j;
            std::string no2=temp2.str();
            std::string frame_name="frame"+no2+".jpg";
            imwrite(frame_name,image);


            //////////////Draw the keypoints////////////

            /*
            Mat featureImage;
            // Draw the keypoints with scale and orientation information
            drawKeypoints(image, // original image
            keypoints, // vector of keypoints
            featureImage, // the resulting image
            Scalar(255,0,255), // color of the points
            DrawMatchesFlags::DRAW_RICH_KEYPOINTS); //flag
            //std::string name="image"+i;
            imshow(frame_name, featureImage );
            */

            ////////////////////detect decriptors//////////////////

            SiftDescriptorExtractor siftExtractor;
            Mat siftDesc;
            siftExtractor.compute(gray_image,keypoints,siftDesc);
            features.push_back(siftDesc);//add the descriptors from each frame..to create one for a video

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }

        //store number of frames per video
        n_frames[i-1]=j-1;



    }

    TermCriteria term(CV_TERMCRIT_ITER,100,0.001);//***

    //retries number ***
    int retries=1;

    int flags=KMEANS_PP_CENTERS;
    BOWKMeansTrainer bowTrainer(dict_size,term,retries,flags);
    //cluster the feature vectors
    Mat dictionary=bowTrainer.cluster(features);

    //for further process
    full_dictionary.push_back(dictionary);
    ///////////////////////////////////////////////////
    FileStorage fs("full_dictionary.yml", FileStorage::WRITE);
    fs << "vocabulary" << full_dictionary;
    fs.release();
    //Created Vocabulary

    //Calculate histograms for the train videos
    //idf_vector(full_dictionary);

    return 0;
}
int main(int argc, char* argv[])
{
	//some boolean variables for different functionality within this
	//program
    bool trackObjects = false;
    bool useMorphOps = false;
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	//matrix storage for HSV image
	Mat HSV;
	//matrix storage for binary threshold image
	Mat threshold;
	//x and y values for the location of the object
	int x=0, y=0;
	//create slider bars for HSV filtering
	createTrackbars();
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		//filter HSV image between values and store filtered image to
		//threshold matrix
		inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
		//perform morphological operations on thresholded image to eliminate noise
		//and emphasize the filtered object(s)
		if(useMorphOps)
		morphOps(threshold);
		//pass in thresholded frame to our object tracking function
		//this function will return the x and y coordinates of the
		//filtered object
		if(trackObjects)
			trackFilteredObject(x,y,threshold,cameraFeed);

		//show frames 
		imshow(windowName2,threshold);
		imshow(windowName,cameraFeed);
		imshow(windowName1,HSV);
		

		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}






	return 0;
}
Example #18
0
void idf_vector(Mat full_dictionary) {
    ofstream myfile;
    myfile.open ("example.txt");
    myfile << "Calculating IDF_VECTORS.\n";

    std:: string videoName="";

    int n_frames[100];
    //create dictionary
    int dict_size=100;//***


    //create a nearest neighbor matcher
    Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
    //create Sift feature point extracter
    Ptr<FeatureDetector> detector(new SiftFeatureDetector());
    //create Sift descriptor extractor
    Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
    //create BoF (or BoW) descriptor extractor
    BOWImgDescriptorExtractor bowDE(extractor,matcher);
    //Set the dictionary with the vocabulary we created in the first step
    bowDE.setVocabulary(full_dictionary);

    for(int i=1; i<no_videos; i++) {

        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        int cnt=0;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;
        while(cnt<count) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );
            imagesData++;//Number of images in the database

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;

            ///next frame for the same video
        }



        /*myfile<<desc.rows<<endl;
        myfile<<desc.cols<<endl;

        int tf=0;
        for(int i=0;i<desc.rows;i++){
        	for(int j=0;j<desc.cols;j++){
        		if(desc.at<float>(i,j)>0){

        			//cout<<bowDescriptor.at<float>(i,j)<<endl;
        			tf++;
        		}
        	}
        }

        myfile<<"Term Frequency:"<<tf<<"\n";
        float idf=0;
        float logcal=count/tf;
        idf=log(logcal);
        myfile<<"IDF:"<<idf<<"\n";
        idfVector[i-1][j]=idf;

        myfile<<idfVector[i-1][j];*/

        //store number of frames per video
        n_frames[i-1]=j;


    }
    myfile<<"IDF done";
    myfile.close();



}
Example #19
0
int main(int argc, char** argv) {
	// Open capture
	VideoCapture cap;
	Mat img;
	cap.open(0);
	if (!cap.isOpened()) {
			std::cout << "Could not initialize capturing...\n" << std::endl;
			exit(EXIT_FAILURE);
		}
	cap.read(img);



	int width = img.cols, height = img.rows;
	int marker0 = 492, marker1 = 493;
	Mat pose0 = (Mat_<float>(8,1) << 0, 0, 0, 0, 0, 0, 1, 1);
	Mat pose1 = (Mat_<float>(8,1) << 0, 0, 0, 0, 0, 0, 1, 1); 

	
	IplImage* img_proc = cvCreateImage(cvSize(width, height),
		IPL_DEPTH_8U, img.channels());
// Initialise Artoolkitplus
	TrackerMultiMarker* tracker = new TrackerMultiMarker(width, height, 8, 6, 6, 6, 0);
	tracker->setPixelFormat(ARToolKitPlus::PIXEL_FORMAT_LUM);

	// load a camera file.
	if (!tracker->init("../src/Logitech_Notebook_Pro.cal",	"../src/markerboard_480-499.cfg", 1.0f, 1000.0f))
	{
		printf("ERROR: init() failed\n");
		exit(EXIT_FAILURE);
	}

	// tracker->getCamera()->printSettings();

	// the marker in the BCH test image has a thin border...
	tracker->setBorderWidth(0.125);

	// set a threshold. alternatively we could also activate automatic thresholding
	tracker->setThreshold(120);

	// let's use lookup-table undistortion for high-speed
	// note: LUT only works with images up to 1024x1024
	tracker->setUndistortionMode(ARToolKitPlus::UNDIST_LUT);

	// switch to simple ID based markers
	// use the tool in tools/IdPatGen to generate markers
	tracker->setMarkerMode(ARToolKitPlus::MARKER_ID_SIMPLE);




// Start fifo
	int fd;
	char * myfifo = "/tmp/autoarm";
/* create the FIFO (named pipe) */
	mkfifo(myfifo, 0666);
	fd = open(myfifo, O_WRONLY);

	while (1){
		cap.read(img);
		img_proc->imageData = (char *) img.data;
		int num = getNumDetected(img_proc, tracker, width,	height);
		cout << num << " marker(s) detected"<< endl;

		if (num == 2) {
			for (int i = 0; i < num; i ++) {
				int markerID = tracker->getDetectedMarker(i).id;

				if ( markerID == marker0 ) {
					getMarkerPosition(tracker, i, pose0); 
				} else if ( markerID == marker1 ) {
					getMarkerPosition(tracker, i, pose1); 
				}
			}
			float out[6] = {pose0.at<float>(0, 0),pose0.at<float>(1, 0),pose0.at<float>(2, 0),pose1.at<float>(0, 0),pose1.at<float>(1, 0),pose1.at<float>(2, 0)};
			write(fd, out, 6*sizeof(float));
			usleep(100000);
		}
	}
	cap.release();
	close(fd);
}
Example #20
0
int main(int argc, char** argv) {

    //Check arguments
    //***

    int set=trainData();

    //If set=0, proceed
    if(set==0) {


        //Take the two video inputs and measure the similarity
        float firstTF[1000];//***
        float secondTF[1000];
        int n_frames[1000];

        //////////////////////////////////////////////////////////////////////////////////////////////////
        Mat dicty;
        FileStorage fs("full_dictionary.yml", FileStorage::READ);
        fs["vocabulary"] >> dicty;
        fs.release();

        //set dictionary
        int dict_size=100;//***
        //create a nearest neighbor matcher
        Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
        //create Sift feature point extracter
        Ptr<FeatureDetector> detector(new SiftFeatureDetector());
        //create Sift descriptor extractor
        Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
        //create BoF (or BoW) descriptor extractor
        BOWImgDescriptorExtractor bowDE(extractor,matcher);
        //Set the dictionary with the vocabulary we created in the first step
        bowDE.setVocabulary(dicty);

//////////////////////////////First Video//////////////////////////////////////////////////////////

        ofstream myfile;
        myfile.open ("first_video.txt");
        myfile << "Calculating TF_VECTORS.\n";

        //initialize capture
        VideoCapture cap;
        cap.open(argv[1]); //***

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;

        int u=0;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }
        //FileStorage fs("descriptor.yml", FileStorage::WRITE);
        //fs << "descriptor" << desc;
        //fs.release();



        for(int k=0; k<desc.cols; k++) {
            int tf=0;
            for(int l=0; l<desc.rows; l++) {
                if(desc.at<float>(l,k)>0) {

                    //cout<<bowDescriptor.at<float>(i,j)<<endl;
                    tf++;
                }


            }
            myfile<<"Term Frequency:"<<tf<<"\n";
            firstTF[k]=tf;

        }



        myfile<<"TF done";
        myfile.close();

//////////////////////////////Second Video//////////////////////////////////////////////////////////

        ofstream myfile3;
        myfile3.open ("second_video.txt");
        myfile3 << "Calculating IDF_VECTORS.\n";

        //initialize capture
        cap.open(argv[2]); //***

        count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        jump=count/N; //extract 10 frames from the video ***
        j=0;
        if(count<10) {
            jump=1;
        }
        cnt=jump;
        myfile3<<"Reading Video";
        Mat desc2;
        u=0;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc2.push_back(bowDescriptor);
            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }


        for(int k=0; k<desc2.cols; k++) {
            int tf=0;
            for(int l=0; l<desc2.rows; l++) {
                if(desc2.at<float>(l,k)>0) {

                    //cout<<bowDescriptor.at<float>(i,j)<<endl;
                    tf++;
                }


            }
            myfile3<<"Term Frequency:"<<tf<<"\n";
            secondTF[k]=tf;

        }

        myfile3<<"TF done";
        myfile3.close();

//////////////////////////////////////////////////////////////////////////////////////////////////////////

        //Display the similarity score

        //Dot product of TF vectors


        float similarity=0;
        ofstream my3;
        my3.open("Similarity.txt");

        for(int i=0; i<dict_size; i++) {
            similarity+=firstTF[i]*secondTF[i];

        }
        my3<<"\n";
        my3<<similarity<<" ";
        my3.close();

        cout<<"Similarity Score:"<<similarity<<endl;

    }
int main( int argc, char* argv[])
{
    cout << "Projekti C++, Image Processing Endi Zhupani IE206" <<endl;
    cout << "Shtypni 1 per te filluar ekzekutimin ose -1 per te dale nga programi. " ;
    int ekzekutimi;
    cin >> ekzekutimi;
    int f_v; // hap foto apo video
    int llojiIVideos;// nga sistemi apo webcam
    int zgjedhjaPerdoruesit = -1;
    Menu shfaqMenu; //objekt qe shfaq menute e ndryshme
    ToneNgjyrash shtoTon; //objekt qe shton tone ngjyrash
    Efekte shtoEfekt;
    Editim edit;
    while (ekzekutimi != -1) // user do te vazhdoje ekzekutimin
    {   //shfaqet menuja kryesore
        cout << "MENU" <<endl;
        cout << "Butoni"<<setw(5) << " "<< "Veprimi" << endl;
        cout << setw(6) << "1" << setw(5) << " " << "Load Foto" <<endl;
        cout << setw(6) << "2" << setw(5) <<  " " <<"Load Video" <<endl;
        namedWindow("Zgjidh"); //per te perdorur waitKey()
        f_v = waitKey(0); // pret shtypjen e nje butoni
        destroyWindow("Zgjidh");
        while (true){ // mbaron kur mbaron switch
        switch (f_v){
            case 50: // shtyp '2' -> zgjedh video
                llojiIVideos= shfaqMenu.zgjedhjaVideo();//zgjedh nga do ta hapi videon
                if (llojiIVideos==49){//shtyp '1' -> zgjedh webcam
                VideoCapture cap(0);
                if (!cap.isOpened()){ //behet kontrolli a eshte hapur
                    cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl;
                    return -1;
                }
                    else cout << "Ngarkimi u krye me sukses" <<endl;
                    shfaqMenu.menuVideo(); //shfaqet menuja qe jep veprimet qe kryhen + butonat
                    namedWindow("VideoDemo", CV_WINDOW_NORMAL);//dritarja grafike
                    int vleraSliderBrightenes = 25; //vlera fillestare ne slider
                    int vleraSliderContrast = 25;
                    //krijohen trackbaret me vlere nga 0 - 50
                    createTrackbar("Brightenes", "VideoDemo", &vleraSliderBrightenes, 50);
                    createTrackbar("Contrast", "VideoDemo", &vleraSliderContrast, 50);
                    while (1)//fillohet cikli qe do shfaqe 1 pas nje te gjitha frameqe lexohen nga video
                    {
                        Mat frame;
                        Mat *framePtr = &frame;
                        bool uLexua = cap.read(frame); // lexohet frame e rradhes ne video
                        if (!uLexua) // lexim i suksesshem apo jo
                        {
                            cout <<"Nuk u lexua dot frame. Programi po mbyllet!" << endl;
                            return -1;
                        }
                        resize(*framePtr,*framePtr,Size(512,412), CV_INTER_AREA);//INTER_AREA per te patur kualitet te mire
                        if (zgjedhjaPerdoruesit == 49) // shtyp '1'
                        {
                            shtoTon.sepiaPerVideo(frame);
                        }
                        else if (zgjedhjaPerdoruesit == 50) //shtyp '2'
                        {
                            cvtColor(frame, frame, CV_BGR2GRAY); //converton nga BGR (Blue Green Red) ne Bardhe e zi
                        }
                        else if (zgjedhjaPerdoruesit == 51) // kerkon invertim ngjyrash
                        {
                            bitwise_not(frame, frame);
                        }
                        //do te jape ndryshimin e brightenes merr vlera nga -250 - 250
                        int ndryshimiBrightenes = vleraSliderBrightenes*10-250;
                        //do jape ndryshimin e contrast. merr vlera 0 - 2
                        double ndryshimiContrast = vleraSliderContrast*10/250.0;
                        // behet convertimi i imazhit sipas brightenesit dhe contrastit qe kerkohet
                        //nqs sliderat ne trackbar skane ndryshuar imazhi mbetet sic eshte.
                        frame.convertTo(frame,-1,ndryshimiContrast,ndryshimiBrightenes);
                        int tastIShtypur = waitKey(10); //i jepet perdoruesit kohe per te shtypur buton
                        imshow("VideoDemo", frame);
                        if (tastIShtypur == 112) //shtyp 'p'
                            waitKey(0); // ngecet ekzekutimi deri sa te shtypi buton tjeter
                        else if (tastIShtypur == 49) zgjedhjaPerdoruesit = tastIShtypur; // shtyp 1
                        else if (tastIShtypur == 50) zgjedhjaPerdoruesit = tastIShtypur; // 2
                        else if (tastIShtypur == 51) zgjedhjaPerdoruesit = tastIShtypur;// 3
                        // shtypet b. zgjedhja perdoruesit merr vleren 98 dhe shfaqen frame origjinale
                        // keshtu krijohet veprimi back.
                        else if (tastIShtypur == 98)
                        {
                            zgjedhjaPerdoruesit = tastIShtypur;
                            setTrackbarPos("Brightenes", "VideoDemo", 25);
                            setTrackbarPos("Contrast", "VideoDemo", 25);
                            vleraSliderBrightenes = 25;
                            vleraSliderContrast = 25;
                        }
                        else if (tastIShtypur == 27) //shtyp ESC
                        {
                            cout << "Dritarja po mbyllet"<< endl;
                            break;
                        }
                    }
                    destroyAllWindows();
                }
                if (llojiIVideos == 50){ // zgjedh te hape video nga sistemi
                    /*Funksionon njelloj si video nga webcam me perjashtim te shtimit te nje 
                     treckbari qe mban pozicionin e videos me te cilin video mund te levizet para 
                     ose mbrapa.
                     */
                    destroyAllWindows();
                    cout << "Ju lutem jepni filepath te PLOTE te file qe doni te hapni." <<endl;
                    string filepath;
                    cin.ignore(); //behet flush console sepse mund te kete karaktere '/n' te cilat perfundojne funksionin getline pa e marre inputin.
                    getline(cin, filepath);
                    cap.open(filepath);
                    if (!cap.isOpened()){
                        cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl;
                        return -1;
                    }
                    else cout << "Ngarkimi u krye me sukses" <<endl;
                    shfaqMenu.menuVideo();
                    namedWindow("VideoDemo", CV_WINDOW_NORMAL);
                    int vleraSliderBrightenes = 25; //vlera fillestare ne slider
                    int vleraSliderContrast = 25;
                    int NumriFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); //merret numri i frames
                    int framesTekaluara = 0;//sa frames jane shfaqur
                    //krijohet trackbari pozicioni i cili updateon poziv=cionin e videos sa her leviz slideri
                    createTrackbar("Pozicioni", "VideoDemo", &sliderPoz, NumriFrames/100,leviziSlideri);
                    createTrackbar("Brightenes", "VideoDemo", &vleraSliderBrightenes, 50);
                    createTrackbar("Contrast", "VideoDemo", &vleraSliderContrast, 50);
                    while (1)
                    {
                        Mat frame;
                        Mat *framePtr = &frame;
                        if (framesTekaluara>=NumriFrames) // arrihet ne fund te videos
                        {
                            cout << "Video mbaroi. Shtypni 'p' per ta filluar edhe nje here ose cdo buton tjeter per te vazhduar" << endl;
                            int vazhdo = waitKey(0);
                            if (vazhdo == 112)
                                cap.set(CV_CAP_PROP_POS_FRAMES,1);
                            else //nqs nuk zgjedh te vazhdoje dil nga cikli i shfaqjes se frameve
                            break;
                        }
                        bool uLexua = cap.read(frame);
                        if (!uLexua)
                        {
                            cout <<"Nuk u lexua dot frame. Programi po mbyllet!" << endl;
                            return -1;
                        }
                        framesTekaluara = cap.get(CV_CAP_PROP_POS_FRAMES); // updatohet numri i frames qe jane shfaqur
                        //levizet slideri me cdo 100 frames qe kalojne
                        setTrackbarPos("Pozicioni", "VideoDemo", cap.get(CV_CAP_PROP_POS_FRAMES)/100);
                        resize(*framePtr,*framePtr,Size(512,412), CV_INTER_AREA);
                        int tastIShtypur =-1;
                        if (zgjedhjaPerdoruesit == 49)
                        {
                            shtoTon.sepiaPerVideo(frame);
                        }
                        if (zgjedhjaPerdoruesit == 50)
                        {
                            cvtColor(frame, frame, CV_BGR2GRAY);
                        }
                        if (zgjedhjaPerdoruesit == 51)
                        {
                            bitwise_not(frame, frame);
                        }
                        double ndryshimiBrightenes = vleraSliderBrightenes*10-250;
                        double ndryshimiContrast = vleraSliderContrast*10/250.0;
                        frame.convertTo(frame,-1,ndryshimiContrast,ndryshimiBrightenes);
                        imshow("VideoDemo", frame);
                        tastIShtypur = waitKey(10);
                        if (tastIShtypur == 112)
                            waitKey(0);
                        else if (tastIShtypur == 49) zgjedhjaPerdoruesit = tastIShtypur;
                        else if (tastIShtypur == 50) zgjedhjaPerdoruesit = tastIShtypur;
                        else if (tastIShtypur == 51) zgjedhjaPerdoruesit = tastIShtypur;
                        else if (tastIShtypur == 98)
                        {
                            zgjedhjaPerdoruesit = tastIShtypur;
                            setTrackbarPos("Brightenes", "VideoDemo", 25);
                            setTrackbarPos("Contrast", "VideoDemo", 25);
                            vleraSliderBrightenes = 25;
                            vleraSliderContrast = 25;
                        }
                        else if (tastIShtypur == 27)
                        {
                            cout << "Dritarja po mbyllet"<< endl;
                            break;
                        }
                    }
                    destroyAllWindows();
                }
                break; //end case 2
                case 49: //zgjedh te hape foto
                {
                shfaqMenu.menuFoto();
                
                    cout << "Ju lutem jepni filepath te PLOTE te file qe doni te hapni." <<endl;
                    string filepath;
                    cin.ignore(); //behet flush console sepse mund te kete karaktere '/n' te cilat perfundojne funksionin getline pa e marre inputin.
                    getline(cin, filepath);
                    cout << filepath <<endl;
                //lexohet imazhi ne strukturen Mat te openCV. Mat - matrice qe mund te marre deri ne 4 dimensione. Varet nga numri i kanaleve te pixelave dhe nga faktore te tjere
                Mat img = imread(filepath);//vendoset filepath plote
                Mat *imgptr = &img;
                if (img.empty())
                {
                    cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl;
                    cout << "Hint: Kontrollo filepath"<< endl;
                    return -1;
                }
                else cout << "Ngarkimi u krye me sukses" <<endl;
                img.convertTo(img, CV_8U); // pixelat konvertohen ne pixela unsigned char me 3 kanale dhe 3 bite
                resize(*imgptr,*imgptr,Size(512,512),  CV_INTER_AREA);
                string dritarja = "Demo Foto"; // emri i dritares ku shfaqet fotoja
                namedWindow(dritarja,CV_WINDOW_NORMAL);
                imshow(dritarja,img);
                //kopjohet fotoja origjinale ne data members fotoOrigjinale te seciles prej klasave qe do te ndryshojne foton
                shtoTon.fotoOrigjinale = img.clone();
                shtoEfekt.fotoOrigjinale = img.clone();
                edit.fotoOrigjinale = img.clone();
                int efektiParaardhes = -1;
                    zgjedhjaPerdoruesit = -1;
                while (true){ // futet ne nje cikel qe e lejon user te ndryshoje foton duke shtypur butona
                    int tastIshtypur= waitKey(20);
                    if (zgjedhjaPerdoruesit == 49) //kerkon sepia
                    {
                        //nqs imazhi ka qene negativ ose bardhe e zi kthehet ne origjinal njehere
                        //sepia nuk mund te punoje me formatet e mesiperme
                        if (efektiParaardhes == 50){
                            shtoEfekt.goBack(img);
                        }
                        shtoTon.sepia(*imgptr,dritarja);
                        tastIshtypur = waitKey(0);//ndalet ekzekutimi derisa te shtypet buton tjeter
                        zgjedhjaPerdoruesit = -1;
                        efektiParaardhes = 49;
                    }
                    else if (zgjedhjaPerdoruesit == 50) // zgjedh bardhe e zi
                    {
                        if (efektiParaardhes == 51)
                            shtoEfekt.goBack(img);
                        if(efektiParaardhes == 50)
                        {}
                        else {
                        cvtColor(img, img, CV_BGR2GRAY);
                        imshow(dritarja,img);
                        tastIshtypur = waitKey(0);
                        zgjedhjaPerdoruesit = -1;
                            efektiParaardhes = 50;}
                    }
                    else if (zgjedhjaPerdoruesit == 51) // zgjedh negative (invertimin e ngjyrave)
                    {
                        if (efektiParaardhes == 50)
                            shtoEfekt.goBack(img);
                        bitwise_not(img, img);
                        imshow(dritarja,img);
                        tastIshtypur = waitKey(0);
                        zgjedhjaPerdoruesit = -1;
                        efektiParaardhes = 51;
                    }
                    else if (zgjedhjaPerdoruesit == 52) //zgjedh blur / smooth
                    {
                        shtoEfekt.Blur_Or_Smooth(*imgptr, dritarja);
                        zgjedhjaPerdoruesit = -1;
                        destroyWindow(dritarja);
                    }
                    else if (zgjedhjaPerdoruesit == 98) // zgjedh te kthehet ne foton origjinale
                    {
                        shtoEfekt.goBack(img);
                        imshow(dritarja,img);
                        zgjedhjaPerdoruesit =-1;
                    }
                    else if (zgjedhjaPerdoruesit == 99) // zgjedh te ndryshoje kontrastin dhe brightenesin
                    {
                        if (efektiParaardhes == 51)
                            shtoEfekt.goBack(img);
                        shtoEfekt.Contrast_Brightenes(*imgptr, dritarja);
                        zgjedhjaPerdoruesit =-1;
                        destroyWindow(dritarja);
                    }
                    else if (zgjedhjaPerdoruesit == 114) // zgjedh te beje rotate imazhin
                    {
                        edit.rotullo(*imgptr, dritarja);
                        zgjedhjaPerdoruesit = -1;
                        destroyWindow(dritarja);
                    }
                    else if (zgjedhjaPerdoruesit == 115) // zgjedh sharpen
                    {
                        if (efektiParaardhes == 51) //shihet nqs ka qene i invertuar
                            shtoEfekt.goBack(img);
                        shtoEfekt.Sharpen(*imgptr);
                        imshow(dritarja, img);
                        tastIshtypur = waitKey(0);
                        zgjedhjaPerdoruesit = -1;
                    }
                    else if (zgjedhjaPerdoruesit == 27) // zgjedh te mbylle foton
                    {
                        cout << "Deshironi te ruani foton? ('y' = po; 'n' = jo)" <<endl;
                        int ruaj = waitKey(0);
                        destroyWindow(dritarja);
                        if (ruaj == 121){ //zgjedh y
                            cout << "Ju lutem jepni filepath ku doni te ruani imazhin." <<endl;
                            string filepathRuajtje;
                            cin.ignore();
                            getline(cin, filepathRuajtje);
                            string s1 = "/";
                            filepathRuajtje = s1+filepathRuajtje;
                            cout << filepathRuajtje<<endl;
                            //specifikohet se si do te ruhet imazhi
                            vector<int> parametrat_e_ruajtjes;
                            parametrat_e_ruajtjes.push_back(CV_IMWRITE_JPEG_QUALITY);
                            parametrat_e_ruajtjes.push_back(97); // 0 - 100 sa me i larte aq me mire
                            // thirret funksioni imwrite te cilit i jepen filepath ku do te ruhet,
                            // imazhi qe do te ruhet
                            // menyra se si do te ruhet
                            bool uRuajt = imwrite (filepathRuajtje,img,parametrat_e_ruajtjes);
                            if (uRuajt) // nese imwrite kthen true
                                cout << "Imazhi u ruajt me sukses." <<endl;
                            else cout << "Imazhi nuk u ruajt." <<endl;
                        }
                        cout << "Dritarja po mbyllet" <<endl;
                        zgjedhjaPerdoruesit = -1;
                        break; //mbasi ruhet ose jo dilet nga cikli
                    }
                    if (tastIshtypur == 49) zgjedhjaPerdoruesit = tastIshtypur;// 1
                    else if (tastIshtypur == 27) zgjedhjaPerdoruesit = tastIshtypur;//ESC
                    else if (tastIshtypur == 50) zgjedhjaPerdoruesit = tastIshtypur;// 2
                    else if (tastIshtypur == 51) zgjedhjaPerdoruesit = tastIshtypur;// 3
                    else if (tastIshtypur == 52) zgjedhjaPerdoruesit = tastIshtypur;// 4
                    else if (tastIshtypur == 98) zgjedhjaPerdoruesit = tastIshtypur;// b
                    else if (tastIshtypur == 99) zgjedhjaPerdoruesit = tastIshtypur;// c
                    else if (tastIshtypur == 114) zgjedhjaPerdoruesit = tastIshtypur;// r
                    else if (tastIshtypur == 115) zgjedhjaPerdoruesit = tastIshtypur;// s
                    namedWindow(dritarja); // krijohet e njejta dritare sepse ne disa prej funksioneve ajo shkaterrohet
                    imshow(dritarja,img);
                } //end while
            } //end case 1
                break;
            default: // nga menuja kryesore nuk zgjedh 1 ose 2
                ekzekutimi = -1;
                break;
            }//end switch
            cout << "Shtypni 'M' per tu kthyer ne menune kryesore, 'ESC' per te dale nga programi ose cdo buton tjeter per te vazhduar me veprimin aktual" << endl;
            namedWindow("Zgjidh");
            int butShtypur = waitKey(0);
            if (butShtypur == 109){ //shtypet 'm'
                destroyWindow("Zgjidh");
                break; // dilet nga while dhe vazhdohet ekzekutimi tek while i pare (shfaqja e menuse kryesore
            }
            if (butShtypur == 27) // shtypet ESC
            {
                ekzekutimi = -1; //qe te mos ekzekutohet as while i pare
                cout << "Programi po mbyllet!" <<endl;
                destroyWindow("Zgjidh");
                break; // dilet nga while
            }
        }//end while 2
    }//end while 1
    return 0;
}//end main
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;
	
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat filteredImage;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(1);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		// flip(cameraFeed,cameraFeed,1); //flip camera
		filteredImage = cameraFeed.clone();
		filteredImage = filterRed(filteredImage);
		
		//convert frame from BGR to HSV colorspace
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){
		//if in calibration mode, we track objects based on the HSV slider values.
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(filteredImage,Scalar(254,254,254),Scalar(255,255,255),threshold);
		morphOps(threshold);
		imshow(windowName2,threshold);
		trackFilteredObject(threshold,filteredImage,cameraFeed);
		}

		//show frames 
		imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		imshow(windowName1,filteredImage);


		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}






	return 0;
}
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;
	
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat HSV;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){
		//if in calibration mode, we track objects based on the HSV slider values.
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
		morphOps(threshold);
		imshow(windowName2,threshold);
		trackFilteredObject(threshold,HSV,cameraFeed);
		}else{
		//create some temp fruit objects so that
		//we can use their member functions/information
		Fruit apple("apple"), banana("banana"), cherry("cherry");

		
		//first find apples
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(HSV,apple.getHSVmin(),apple.getHSVmax(),threshold);
		morphOps(threshold);
		trackFilteredObject(apple,threshold,HSV,cameraFeed);
		//then bananas
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(HSV,banana.getHSVmin(),banana.getHSVmax(),threshold);
		morphOps(threshold);
		trackFilteredObject(banana,threshold,HSV,cameraFeed);
		//then cherries
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(HSV,cherry.getHSVmin(),cherry.getHSVmax(),threshold);
		morphOps(threshold);
		trackFilteredObject(cherry,threshold,HSV,cameraFeed);



		}

		//show frames 
		//imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		//imshow(windowName1,HSV);


		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}






	return 0;
}
Example #24
0
int main(int argc , char *argv[]) {

	VideoCapture video;
	video.open(argv[1]);

	vector<vector<Point2f> > img_points;
	vector<vector<Point3f> > obj_points;

	Mat camMatrix;
	Mat distCoeff;
	vector<Mat> rvec;
	vector<Mat> tvec;


	Mat frame;

	char start;
	bool found = false;
	bool search = false;

	Size imgsize;

	while(video.read(frame)){


		cvtColor(frame,frame , CV_BGR2GRAY);

		imgsize = frame.size();



		if(search){
			vector<Point2f> corners;
			found = findChessboard(frame , corners , Size(6,3));

			if(found){
				drawChessboardCorners(frame, Size(6,3), Mat(corners), found);

				vector<Point3f>obj;
				for(int i = 0 ; i<3 ; i++){
					for(int j = 0 ; j<6 ; j++){
					obj.push_back(Point3f(i*2.36 , j*2.36 , 0.0f));
					}
				}

				obj_points.push_back(obj);
				img_points.push_back(corners);

			}

		}

		resize(frame,frame, Size(1280/2 , 1024/2));
		//resize(right,right, Size(1280/2 , 1024/2));

		imshow("video" , frame);

		start=waitKey(50);
		if(start=='q' && search == false){
			search = true;
		}else if(start=='q' && search == true){
			search = false;
		}else if(start=='x'){
			video.release();
			break;
		}
	}

	calibrate(imgsize , obj_points , img_points , camMatrix , distCoeff , rvec , tvec);


	double fovx , fovy , focallength , aspect;
	Point2d principal;

	calibrationMatrixValues(camMatrix , Size(1280,1024) , 6.9 , 5.5 , fovx , fovy , focallength , principal , aspect );

	cout<<"fovx: "<<fovx<<" fovy: "<<fovy<<" focal: "<<focallength<<" aspect: "<<aspect<<" principal.x: "<<principal.x<<
	" principal.y: "<<principal.y<< endl;

	video = VideoCapture();
	video.open(argv[1]);

	Mat undistorted;
	char save;
	while(video.read(frame)){

		undistort(frame , undistorted , camMatrix , distCoeff);
		imshow("original" , frame);
		imshow("undistorted" , undistorted);
		save =waitKey(5);
		if(save =='s'){
			FileStorage file("right_camMatrix.yml" , FileStorage::WRITE);
			file<<"camMatrix"<<camMatrix<<"distCoeffs"<<distCoeff;
			file.release();
		}
	}

	return 0;
}
Example #25
0
/// Calibrates the extrinsic parameters of the setup and saves it to an XML file
/// Press'r' to retreive chessboard corners
///      's' to save and exit
///      'c' to exit without saving
/// In: inputCapture1: video feed of camera 1
///     inputCapture2: video feed of camera 2
void CalibrateEnvironment(VideoCapture& inputCapture1, VideoCapture& inputCapture2)
{
    Size boardSize;
    boardSize.width = BOARD_WIDTH;
    boardSize.height = BOARD_HEIGHT;
    
    const string fileName1 = "CameraIntrinsics1.xml";
    const string fileName2 = "CameraIntrinsics2.xml";
    
    cerr << "Attempting to open configuration files" << endl;
    FileStorage fs1(fileName1, FileStorage::READ);
    FileStorage fs2(fileName2, FileStorage::READ);
    
    Mat cameraMatrix1, cameraMatrix2;
    Mat distCoeffs1, distCoeffs2;
    
    fs1["Camera_Matrix"] >> cameraMatrix1;
    fs1["Distortion_Coefficients"] >> distCoeffs1;
    fs2["Camera_Matrix"] >> cameraMatrix2;
    fs2["Distortion_Coefficients"] >> distCoeffs2;
    
    if (cameraMatrix1.data == NULL || distCoeffs1.data == NULL ||
        cameraMatrix2.data == NULL || distCoeffs2.data == NULL)
    {
        cerr << "Could not load camera intrinsics\n" << endl;
    }
    else{
        cerr << "Loaded intrinsics\n" << endl;
        cerr << "Camera Matrix1: " << cameraMatrix1 << endl;
        cerr << "Camera Matrix2: " << cameraMatrix2 << endl;
        
    }
    
    Mat translation;
    Mat image1, image2;
    Mat mapX1, mapX2, mapY1, mapY2;
    inputCapture1.read(image1);
    Size imageSize = image1.size();
    bool rotationCalibrated = false;
    
    while(inputCapture1.isOpened() && inputCapture2.isOpened())
    {
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        
        if (rotationCalibrated)
        {
            Mat t1 = image1.clone();
            Mat t2 = image2.clone();
            remap(t1, image1, mapX1, mapY1, INTER_LINEAR);
            remap(t2, image2, mapX2, mapY2, INTER_LINEAR);
            t1.release();
            t2.release();
        }
        
        char c = waitKey(15);
        if (c == 'c')
        {
            cerr << "Cancelling..." << endl;
            return;
        }
        else if(c == 's' && rotationCalibrated)
        {
            cerr << "Saving..." << endl;
            const string fileName = "EnvironmentCalibration.xml";
            FileStorage fs(fileName, FileStorage::WRITE);
            fs << "Camera_Matrix_1" <<  getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0);
            fs << "Camera_Matrix_2" <<  getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0);
            fs << "Mapping_X_1" << mapX1;
            fs << "Mapping_Y_1" << mapY1;
            fs << "Mapping_X_2" << mapX2;
            fs << "Mapping_Y_2" << mapY2;
            fs << "Translation" << translation;
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if(c == 's' && !rotationCalibrated)
        {
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if (c == 'r')
        {
            BoardSettings s;
            s.boardSize.width = BOARD_WIDTH;
            s.boardSize.height = BOARD_HEIGHT;
            s.cornerNum = s.boardSize.width * s.boardSize.height;
            s.squareSize = (float)SQUARE_SIZE;
            
            vector<Point3f> objectPoints;
            vector<vector<Point2f> > imagePoints1, imagePoints2;
            
            if (RetrieveChessboardCorners(imagePoints1, imagePoints2, s, inputCapture1, inputCapture2, ITERATIONS))
            {
                vector<vector<Point3f> > objectPoints(1);
                CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                Mat R, T, E, F;
                Mat rmat1, rmat2, rvec;
                
                double rms = stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                             TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                             CV_CALIB_FIX_INTRINSIC);
                
                cerr << "Original translation: " << T << endl;
                cerr << "Reprojection error reported by camera: " << rms << endl;
                
                // convert to rotation vector and then remove 90 degree offset
                Rodrigues(R, rvec);
                rvec.at<double>(1,0) -= 1.570796327;
                
                // equal rotation applied to each image...not necessarily needed
                rvec = rvec/2;
                Rodrigues(rvec, rmat1);
                invert(rmat1,rmat2);
                
                initUndistortRectifyMap(cameraMatrix1, distCoeffs1, rmat1,
                                        getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0), imageSize, CV_32FC1, mapX1, mapY1);
                initUndistortRectifyMap(cameraMatrix2, distCoeffs2, rmat2,
                                        getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0), imageSize, CV_32FC1, mapX2, mapY2);
                
                
                // reproject points in camera 1 since its rotation has been changed
                // need to find the translation between cameras based on the new camera 1 orientation
                for  (int i = 0; i < imagePoints1.size(); i++)
                {
                    Mat pointsMat1 = Mat(imagePoints1[i]);
                    Mat pointsMat2 = Mat(imagePoints2[i]);
                    
                    
                    undistortPoints(pointsMat1, imagePoints1[i], cameraMatrix1, distCoeffs1, rmat1,getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1, imageSize, 0));
                    undistortPoints(pointsMat2, imagePoints2[i], cameraMatrix2, distCoeffs2, rmat2,getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0));
                    
                    pointsMat1.release();
                    pointsMat2.release();
                }
                
                Mat temp1, temp2;
                R.release();
                T.release();
                E.release();
                F.release();
                
                // TODO: remove this
                // CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                // objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                CV_CALIB_FIX_INTRINSIC);
                
                // need to alter translation matrix so
                // [0] = distance in X direction (right from perspective of camera 1 is positive)
                // [1] = distance in Y direction (away from camera 1 is positive)
                // [2] = distance in Z direction (up is positive)
                translation = T;
                double temp = -translation.at<double>(0,0);
                translation.at<double>(0,0) = translation.at<double>(2,0);
                translation.at<double>(2,0) = temp;
                
                cerr << "Translation reproj: " << translation << endl;
                Rodrigues(R, rvec);
                cerr << "Reprojected rvec: " << rvec << endl;
                
                imagePoints1.clear();
                imagePoints2.clear();
                
                rvec.release();
                rmat1.release();
                rmat2.release();
                R.release();
                T.release();
                E.release();
                F.release();
                
                rotationCalibrated = true;
            }
        }
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
}
Example #26
0
int main(int argc, char** argv) {

  bool use_gui = false;
  double learningRate = -1;

  // time measurement
  timespec time_init;
  timespec time_now;
  timespec time_past;
  char fps[10] = "";

  clock_gettime(CLOCK_MONOTONIC, &time_init);
  clock_gettime(CLOCK_MONOTONIC, &time_now);

  // video source
  VideoCapture cap;

  if (argc > 1) {
    for (int i = 1; i < argc; i++) {
      
      // -d <deviceid>
      if (string(argv[i]) == "-d") {
        int device_id = -1;
        sscanf(argv[i+1], "%i", &device_id);
        cap.open(device_id);
        i++;

        if (cap.isOpened() != true) {
          cerr << "Error: Device " << device_id << " could not be opened.\n exiting..." << endl;
          return -1;
        }
      }

      // -f <filename>
      else if (string(argv[i]) == "-f") {
        string filename = string(argv[i+1]);
        cap.open(filename);
        i++;

        if (cap.isOpened() != true) {
          cerr << "Error: \"" << filename << "\" could not be opened.\n exiting..." << endl;
          return -1;
        }
      }
      // -g (gui)
      else if (string(argv[i]) == "-g") {
        use_gui = true;
      }

      // noise

      // learning rate
	  else if (string(argv[i]) == "-l") {
        sscanf(argv[i+1], "%lf", &learningRate);
		i++;
      }  
 
      // mode

      else {
        cerr << "Error: unknown parameter \"" << string(argv[i]) << "\"\n";
        usage();
        return -1;
      }
    }
  }
        
  if (cap.isOpened() != true) {
    cap.open(0);
  }

  if (cap.isOpened()!= true) {
    cerr << "Error: Cannot read device 0.\n exiting..." << endl;
    return -1;
  }

  Mat frame;  // the current frame
  Mat foreground, background;

  BackgroundSubtractorMOG2 bg(300, 16, false);

  std::vector<std::vector<cv::Point> > contours;

//  vector<string> detectors, detector_names;
//  detectors.push_back("/home/thomas/cv/tarantula/person.xml");
//  detector_names.push_back("person");

  if (use_gui == true) {
    namedWindow("frame", CV_WINDOW_AUTOSIZE); // current frame
//    namedWindow("foreground", CV_WINDOW_NORMAL);
    namedWindow("background", CV_WINDOW_NORMAL);
  }

//  LatentSvmDetector detector = LatentSvmDetector(detectors, detector_names);
//  vector<LatentSvmDetector::ObjectDetection> detections;
  
  cout << cap.get(CV_CAP_PROP_FRAME_WIDTH) << " x " << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
  
  cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
  cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
//  cap.set(CV_CAP_PROP_FPS, 30);
  //cap.set();

  // main loop
  for (int f=0;;f++) {
    // write time
    clock_gettime(CLOCK_MONOTONIC, &time_past);

    if (!cap.read(frame)) {
      continue;
    }

    bg.operator() (frame, foreground, learningRate);
    if (use_gui == true) bg.getBackgroundImage(background);

    erode(foreground, foreground, Mat(), Point(-1, -1), 3);
    dilate(foreground, foreground, Mat(), Point(-1, -1), 3);

	if (use_gui == true) {
      findContours(foreground, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
      drawContours(frame, contours, -1, Scalar(0,0,255), 1);
	}

    double area;
    int size = contours.size();
	vector<vector<Point> > contours_poly( contours.size() );
	vector<RotatedRect> boundRect( contours.size() ) ;

    for(int i = 0; i < size; i++) {
      area = contourArea(contours[i]);
      if (area > 2000) {
//        cout << i+1 << "/" << size << ": " << area << endl;
        if (use_gui == true) {
		  drawContours(frame, contours, i, Scalar(0,255,255), 2);
		  approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
		  boundRect[i] = minAreaRect( contours_poly[i] );
        }
      }
    }

    // show images
    if (use_gui == true) {
	
	  for( int i = 0; i< contours.size(); i++ ) {
        //ellipse( frame, boundRect[i], Scalar(255,255,255), 2, 8 );
	    circle( frame, boundRect[i].center, 6, Scalar(0, 255, 0), 3); 
      }
		
      imshow("frame", frame);
//      imshow("foreground", foreground);
      imshow("background", background);
    }
    
    // calculate fps and display
    clock_gettime(CLOCK_MONOTONIC, &time_now);
	
    sprintf(fps, "%.2f fps, frame: %i, time: %.3f s, l: %.2e", getFps(calcTimeDiff (time_past, time_now)), f, calcTimeDiff (time_init, time_now), learningRate);
    if (use_gui == true) {
	  displayOverlay("frame", fps, 0);
	}
    cout << fps << endl;

    int c = waitKey(1);
    if (c == 'q' || c == 'Q' || (c & 255) == 27) {
    break;
    }
  }

  return 0;

}
Example #27
0
int main(int argc, char* argv[])
{

	VideoCapture vcap;

	Mat image;    
	const string videoStreamAddress = "http://localhost:8082";//put here the camera IP 

	//open the video stream and make sure it's opened

	if (!body_cascade.load(body_cascade_name))
	{
		printf("--(!)Error loading\n"); return -1;
	}

	if (!vcap.open(0))//use videoStreamAddress in brackets if you want to use IP camera
	{
		cout << "Error opening video stream or file" <<endl;
		return -1;
	}
	
	
	bool result,result_1;
	int i = 0;
	char x[50];
	string z;
	bool detect =true;

	while (1)
	{
		if (!vcap.read(image))
		{
			cout << "No frame" << endl;
			waitKey(0);
		}
		
		result = detection(image);

		if ((result == true) && detect==true)
		{ 
			cout << "detected" << endl;
		//sostituire il percosro per il file Npersone.txt
		ofstream file("/home/nico/node/Npersone.txt"); //se il file non esiste lo crea, altrimenti lo sovrascrive!used on ubuntu system
	        if(!file) {
				cout<<"Errore nella creazione del file!";
				return -1;
	   		  }

		    file <<persone;
		    file.close(); //close the file

			sprintf(x, "curl http://localhost:3000/geofence");
			z.assign(x);
			system(z.c_str());
			detect = false;

		}

		if ((detect == false))
		{
			i = 0;
			////creiamo un tempo d'attesa di 5 (9000) minuti prima di inviare un'altra mail in presenza di persone
			for (i; i < 9000;i++)
			{
				vcap.read(image);
				imshow(window_name, image);
				waitKey(10);
			}
			detect = true;
		}
		else
		{
			cout << "nobody" << endl;
		}

		
	}
		
	}
Example #28
0
void colorKeying() {
    // Video laden
    VideoCapture video;
    video.open(videoPath);
    int width = video.get(CV_CAP_PROP_FRAME_WIDTH);
    int height = video.get(CV_CAP_PROP_FRAME_HEIGHT);

    namedWindow("Video");
    namedWindow("Hue");
    createTrackbar("Lower", "Hue", 0, 180);
    setTrackbarPos("Lower", "Hue", lowerHue);
    createTrackbar("Upper", "Hue", 0, 180);
    setTrackbarPos("Upper", "Hue", upperHue);

    namedWindow("Saturation");
    createTrackbar("Select", "Saturation", 0, 255);
    setTrackbarPos("Select", "Saturation", threshSaturation);
    namedWindow("Maske");

    Mat hsvImage(height, width, CV_8UC3);
    HandMotion handMotion;
    MotionVelocity handVelocity;
    Point lastCentroid;
    int frameNumber = 0;
    while(true) {
        Mat videoFrame;
        if (video.read(videoFrame) == false) {
            break;
        }

        // in HSV wandeln
        cvtColor(videoFrame, hsvImage, CV_BGR2HSV);
        vector<Mat> hsvPlanes;
        split(hsvImage, hsvPlanes);

        // Schwellwertbildung Saturation
        threshold(hsvPlanes[1], hsvPlanes[1], getTrackbarPos("Select", "Saturation"), 255, THRESH_BINARY);
        imshow("Saturation", hsvPlanes[1]);

        // Schwellwertbildung Hue
        inRange(hsvPlanes[0], getTrackbarPos("Lower", "Hue"),
                getTrackbarPos("Upper", "Hue"), hsvPlanes[0]);
        imshow("Hue", hsvPlanes[0]);

        // Kombination der beiden Masken
        multiply(hsvPlanes[0], hsvPlanes[1], hsvPlanes[0]);

        // Median Filter
        medianBlur(hsvPlanes[0], hsvPlanes[0], 5);

        // Konturen finden
        imshow("Maske", hsvPlanes[0]);

        // Schwerpunkt berechnen
        Point center = centroidOfWhitePixels(hsvPlanes[0]);
        cross(videoFrame, center, crossLength, colorGreen);

        // Geschwindigkeit anzeigen
        handVelocity.setPosition(center);
        Point velocity = handVelocity.getVelocity();
//		Point velocity = handMotion.calculateVelocity(center);
//		Point velocity = center - lastCentroid;
//		lastCentroid = center;
        // Vektor anzeigen
        line(videoFrame, center, center+velocity*2, Scalar(255,0,0), 3);


        imshow("Video", videoFrame);
        waitKey(100);
    }
}
Example #29
0
int main(int argc, char** argv) {
    VideoCapture capture;
    Mat currentFrame;
    Mat lastGrayFrame;
    Mat currentGrayFrame;
    BoundingBox pbox;
    vector<Point2f> points1;
    vector<Point2f> points2;
    bool isDetected = true;
    
    capture.open(0);
    
    if(!capture.isOpened()){
        // вебка недоступна, завершаем программу
        return 1;
    }

    cvNamedWindow("VKR Kostenko 472SE", CV_WINDOW_AUTOSIZE);
    cvSetMouseCallback("VKR Kostenko 472SE", mouseClickHandler, NULL);

    capture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240);

    
    while(true){
        while(noRect)
        {
            capture >> currentFrame;
            cvtColor(currentFrame, lastGrayFrame, CV_RGB2GRAY);

            drawBox(currentFrame, rect);

            // показывваем новый кадр
            imshow("VKR Kostenko 472SE", currentFrame);

            if(cvWaitKey(33)=='q'){
                return 0;
            }
        }

        if(rect.width > Constants::min_win && rect.height > Constants::min_win){
            // обвел квадрат подходящего размера
            // удаляем обработчик мышки
            cvSetMouseCallback("VKR Kostenko 472SE", NULL, NULL );
            break;
        }else{
            // необходимо выделить новый квадрат
            noRect = true;
        }
    }
    

    Watcher watcher(lastGrayFrame, rect);

    while(capture.read(currentFrame)){
        cvtColor(currentFrame, currentGrayFrame, CV_RGB2GRAY);
        
        // ищем совпадение
        watcher.processFrame(lastGrayFrame, currentGrayFrame, points1,points2, pbox, isDetected, flag);
        
        // рисуем квадрат, если нашли совпадение
        if(isDetected){
            drawBox(currentFrame, pbox);
        }
        
        // показывваем новый кадр
        imshow("VKR Kostenko 472SE", currentFrame);
        
        swap(lastGrayFrame, currentGrayFrame);
        points1.clear();
        points2.clear();
        
        if(cvWaitKey(33)=='q'){
            break;
        }
    }
    
    return 0;
}
int main()
{
	int numBoards = 0;
	int numCornersHor;
	int numCornersVer;

  #ifdef __unix__
     signal(SIGINT,quit_signal_handler); // listen for ctrl-C
  #endif

	printf("Enter number of corners along width: ");
	scanf("%d", &numCornersHor);

	printf("Enter number of corners along height: ");
	scanf("%d", &numCornersVer);

	printf("Enter number of boards: ");
	scanf("%d", &numBoards);

	int numSquares = numCornersHor * numCornersVer;
	Size board_sz = Size(numCornersHor, numCornersVer);

	vector < vector < Point3f > >object_points;
	vector < vector < Point2f > >image_points;

	vector < Point2f > corners;
	int successes = 0;

	Mat image;
	Mat gray_image;
	
  //video capture object to acquire webcam feed
	const string videoStreamAddress = "http://192.168.1.90/mjpg/video.mjpg";
	VideoCapture capture;

  capture.open(videoStreamAddress); //set to 0 to use the webcam
  
  //set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,1280);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,720);
	
	//store image to matrix
  capture.read(image);

	vector < Point3f > obj;
	for (int j = 0; j < numSquares; j++)
		obj.push_back(Point3f
			      (j / numCornersHor, j % numCornersHor, 0.0f));

	while (successes < numBoards) {

		cvtColor(image, gray_image, CV_BGR2GRAY);
		bool found = findChessboardCorners(image, board_sz, corners,
						   CALIB_CB_ADAPTIVE_THRESH |
						   CALIB_CB_FILTER_QUADS);

		if (found) {
			cornerSubPix(gray_image, corners, Size(11, 11),
				     Size(-1, -1),
				     TermCriteria(CV_TERMCRIT_EPS |
						  CV_TERMCRIT_ITER, 30, 0.1));
			drawChessboardCorners(gray_image, board_sz, corners,
					      found);
		}

		imshow("win1", image);
		imshow("win2", gray_image);

		capture.read(image);
    if (quit_signal) exit(0); // exit cleanly on interrupt
		
		int key = waitKey(50);

		if (key == 27) {
			return 0;
		}

		if (key == ' ' && found != 0) {
			image_points.push_back(corners);
			object_points.push_back(obj);

			printf("Snap stored!");

			successes++;

			if (successes >= numBoards)
				break;
		}
	}
	
	// TODO: Output these into XML using OpenCV
	Mat intrinsic = Mat(3, 3, CV_32FC1);
	Mat distCoeffs;
	vector < Mat > rvecs;
	vector < Mat > tvecs;

	intrinsic.ptr < float >(0)[0] = 1;
	intrinsic.ptr < float >(1)[1] = 1;

	calibrateCamera(object_points, image_points, image.size(), intrinsic,
			distCoeffs, rvecs, tvecs);

	Mat imageUndistorted;
	while (1) {
		capture.read(image);
    if (quit_signal) exit(0); // exit cleanly on interrupt 
		undistort(image, imageUndistorted, intrinsic, distCoeffs);

		imshow("win1", image);
		imshow("win2", imageUndistorted);
		waitKey(1);
	}
	capture.release();

	return 0;
}