int main(int argc, char** argv) { int flag_use_image = 0; if( argc != 2 ) { std::cout<< "Usage: ./init num" << std::endl; std::cout<< "num: 0 - image" << std::endl << " 1 - video" << std::endl << " 2 - dataset" << std::endl; return -1; } else { std::string val = argv[1]; if(val == "0") { } else if(val == "1") { flag_use_image = 1; } else if(val == "2") { flag_use_image = 2; } else { std::cout<< "num error" << std::endl; } } std::string winName = "Image"; namedWindow(winName, WINDOW_NORMAL); mat_canvas = imread( "data/book.jpg"); if(mat_canvas.data == NULL) { std::cout<< "Image is not opened." << std::endl; return -1; } if(flag_use_image == 0) { setMouseCallback(winName, mouseEvent); // // write mat to file // std::string fileName = "mat_descriptors.yml"; // FileStorage fs(fileName, FileStorage::WRITE); // fs << "descriptors" << mat_descriptors; // fs.release(); // std::cout<< fileName << " is generated." << std::endl; // Mat copy; // FileStorage fs2("mat_descriptors.yml", FileStorage::READ); // fs2["descriptors"] >> copy; // fs2.release(); // FileStorage fs3("test.yml", FileStorage::WRITE); // fs3 << "descriptors" << copy; // fs3.release(); ////////////////////////////////////////////////////////// // std::vector<cv::Point3f> vec_pois; // vec_pois.push_back(Point3f(0, 0, 0)); // vec_pois.push_back(Point3f(1.1, 0.1, 0)); // vec_pois.push_back(Point3f(0.3, 2.1, 0)); // vec_pois.push_back(Point3f(7.3, 2, 0)); // vec_pois.push_back(Point3f(1.3, 4.1, 0)); // FileStorage fs3("POIs.yml", FileStorage::WRITE); // fs3 << "POIs" << vec_pois; // fs3.release(); ////////////////////////////////////////////////////////// while(1) { imshow(winName, mat_canvas ); waitKey(30); } } //-- use dataset else if(flag_use_image == 2) { useDataset(); while(1) { imshow(winName, mat_canvas ); waitKey(30); } } else // video input: tracking features { VideoCapture cap; cap.open(1); if(!cap.isOpened()) // check if we succeeded return -1; cap.set(CV_CAP_PROP_FRAME_WIDTH, 800); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 600); namedWindow("Keypoints", WINDOW_NORMAL); Mat mat_image; int num_vecKeypoints; int num_trackingPoints = 50; Mat mat_descriptors; char keyInput; //-- Step 1: Detect the keypoints using Detector // int minHessian = 400; OrbFeatureDetector detector; FREAK extractor; while(1) { cap >> mat_image; std::vector<KeyPoint> vec_keypoints, vec_goodKeypoints; detector.detect( mat_image, vec_keypoints ); num_vecKeypoints = vec_keypoints.size(); std::sort(vec_keypoints.begin(), vec_keypoints.end(), jlUtilities::sort_feature_response); if(num_vecKeypoints > num_trackingPoints) { num_vecKeypoints = num_trackingPoints; vec_keypoints.erase(vec_keypoints.begin() + num_vecKeypoints, vec_keypoints.end()); } extractor.compute( mat_image, vec_keypoints, mat_descriptors ); // write mat to file std::string fileName = "mat_descriptors.yml"; FileStorage fs(fileName, FileStorage::WRITE); fs << "descriptors" << mat_descriptors; fs.release(); std::cout<< fileName << " is generated." << std::endl; // Mat copy; // FileStorage fs2("mat_descriptors.yml", FileStorage::READ); // fs2["descriptors"] >> copy; // fs2.release(); // FileStorage fs3("test.yml", FileStorage::WRITE); // fs3 << "descriptors" << copy; // fs3.release(); ////////////////////////////////////////////////////////// // std::vector<cv::Point3f> vec_pois; // vec_pois.push_back(Point3f(0, 0, 0)); // vec_pois.push_back(Point3f(1.1, 0.1, 0)); // vec_pois.push_back(Point3f(0.3, 2.1, 0)); // vec_pois.push_back(Point3f(7.3, 2, 0)); // vec_pois.push_back(Point3f(1.3, 4.1, 0)); // FileStorage fs3("POIs.yml", FileStorage::WRITE); // fs3 << "POIs" << vec_pois; // fs3.release(); ////////////////////////////////////////////////////////// //-- Draw keypoints Mat mat_kpImage; drawKeypoints( mat_image, vec_keypoints, mat_kpImage, Scalar::all(-1), DrawMatchesFlags::DEFAULT ); for (int i=0; i<num_trackingPoints; i++) { cv::circle(mat_kpImage, vec_keypoints[i].pt, // center 3, // radius cv::Scalar(0,0,255), // color -1); // negative thickness=filled char szLabel[50]; sprintf(szLabel, "%d", i); putText (mat_kpImage, szLabel, vec_keypoints[i].pt, cv::FONT_HERSHEY_PLAIN, // font face 1.0, // font scale cv::Scalar(255,0,0), // font color 1); // thickness } //-- Show detected (drawn) keypoints imshow("Keypoints", mat_kpImage ); waitKey(30); } } return 0; }
static AssessmentRes assessment(char* video,char* gt_str, char* algorithms[],char* initBoxes_str[],int algnum){ char buf[200]; int start_frame=0; int linecount=0; Rect2d boundingBox; vector<double> averageMillisPerFrame(algnum,0.0); static int videoNum=0; videoNum++; FILE* gt=fopen(gt_str,"r"); if(gt==NULL){ printf("cannot open the ground truth file %s\n",gt_str); exit(EXIT_FAILURE); } for(linecount=0;fgets(buf,sizeof(buf),gt)!=NULL;linecount++); if(linecount==0){ printf("ground truth file %s has no lines\n",gt_str); exit(EXIT_FAILURE); } fseek(gt,0,SEEK_SET); if(fgets(buf,sizeof(buf),gt)==NULL){ printf("ground truth file %s has no lines\n",gt_str); exit(EXIT_FAILURE); } std::vector<Rect2d> initBoxes(algnum); for(int i=0;i<algnum;i++){ printf("%s %s\n",algorithms[i],initBoxes_str[CMDLINEMAX*i]); if(lineToRect(initBoxes_str[CMDLINEMAX*i],boundingBox)<0){ printf("please, specify bounding box for video %s, algorithm %s\n",video,algorithms[i]); printf("FYI, initial bounding box in ground truth is %s\n",buf); if(gt!=NULL){ fclose(gt); } exit(EXIT_FAILURE); }else{ initBoxes[i].x=boundingBox.x; initBoxes[i].y=boundingBox.y; initBoxes[i].width=boundingBox.width; initBoxes[i].height=boundingBox.height; } } VideoCapture cap; cap.open( String(video) ); cap.set( CAP_PROP_POS_FRAMES, start_frame ); if( !cap.isOpened() ){ printf("cannot open video %s\n",video); help(); } Mat frame; namedWindow( "Tracking API", 1 ); std::vector<Ptr<Tracker> >trackers(algnum); for(int i=0;i<algnum;i++){ trackers[i] = Tracker::create( algorithms[i] ); if( trackers[i] == NULL ){ printf("error in the instantiation of the tracker %s\n",algorithms[i]); if(gt!=NULL){ fclose(gt); } exit(EXIT_FAILURE); } } cap >> frame; frame.copyTo( image ); if(lineToRect(buf,boundingBox)<0){ if(gt!=NULL){ fclose(gt); } exit(EXIT_FAILURE); } rectangle( image, boundingBox,palette[0], 2, 1 ); for(int i=0;i<(int)trackers.size();i++){ rectangle(image,initBoxes[i],palette[i+1], 2, 1 ); if( !trackers[i]->init( frame, initBoxes[i] ) ){ printf("could not initialize tracker %s with box %s at video %s\n",algorithms[i],initBoxes_str[i],video); if(gt!=NULL){ fclose(gt); } exit(EXIT_FAILURE); } } imshow( "Tracking API", image ); int frameCounter = 0; AssessmentRes res((int)trackers.size()); for ( ;; ){ if( !paused ){ cap >> frame; if(frame.empty()){ break; } frame.copyTo( image ); if(fgets(buf,sizeof(buf),gt)==NULL){ printf("ground truth is over\n"); break; } if(lineToRect(buf,boundingBox)<0){ if(gt!=NULL){ fclose(gt); } exit(EXIT_FAILURE); } rectangle( image, boundingBox,palette[0], 2, 1 ); frameCounter++; for(int i=0;i<(int)trackers.size();i++){ bool trackerRes=true; clock_t start;start=clock(); trackerRes=trackers[i]->update( frame, initBoxes[i] ); start=clock()-start; averageMillisPerFrame[i]+=1000.0*start/CLOCKS_PER_SEC; if(trackerRes==false){ initBoxes[i].height=initBoxes[i].width=-1.0; }else{ rectangle( image, initBoxes[i], palette[i+1], 2, 1 ); } for(int j=0;j<(int)res.results[i].size();j++) res.results[i][j]->assess(boundingBox,initBoxes[i]); } imshow( "Tracking API", image ); if(saveImageKey){ char inbuf[LINEMAX]; sprintf(inbuf,"image%d_%d.jpg",videoNum,frameCounter); imwrite(inbuf,image); } if((frameCounter+1)>=ASSESS_TILL){ break; } char c = (char) waitKey( 2 ); if( c == 'q' ) break; if( c == 'p' ) paused = !paused; } }
/* * To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and * configure OpenCV with WITH_OPENNI flag is ON (using CMake). */ int main( int argc, char* argv[] ) { time_t start = time(0); bool isColorizeDisp, isFixedMaxDisp; int imageMode; bool retrievedImageFlags[5]; string filename; bool isVideoReading; //parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading ); if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud_golden) == -1) //* load the file { PCL_ERROR ("Couldn't read file test_pcd.pcd \n"); return (-1); } std::cout << "Loaded " << cloud_golden->width * cloud_golden->height << " data points from test_pcd.pcd with the following fields: " << std::endl; // pcl::copyPointCloud (*cloud_golden, *cloud_transformed); cout << "Device opening ..." << endl; cout << CV_CAP_OPENNI <<endl; VideoCapture capture; if( isVideoReading ) capture.open( filename ); else capture.open(CV_CAP_OPENNI); cout << "done." << endl; if( !capture.isOpened() ) { cout << "Can not open a capture object." << endl; return -1; } if( !isVideoReading ) { bool modeRes=false; switch ( imageMode ) { case 0: modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ ); break; case 1: modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ ); break; case 2: modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ ); break; //The following modes are only supported by the Xtion Pro Live case 3: modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ ); break; case 4: modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ ); break; default: CV_Error( CV_StsBadArg, "Unsupported image mode property.\n"); } if (!modeRes) cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl; } if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1); // Print some avalible device settings. cout << "\nDepth generator output mode:" << endl << "FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl << "FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl << "FPS " << capture.get( CV_CAP_PROP_FPS ) << endl << "REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl; if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) { cout << "\nImage generator output mode:" << endl << "FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl << "FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl; } else { cout << "\nDevice doesn't contain image generator." << endl; if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2]) return 0; } if( !face_cascade.load( cascade_name[0] ) ) { printf("--(!)Error loading\n"); return -1; }; if( !eyes_cascade.load( cascade_name[1] ) ) { printf("--(!)Error loading\n"); return -1; }; //printf("Entering for\n"); int last_printed = 0; int WAIT_SEC = 10; viewer = simpleVis(cloud_transformed); for(;;) { Mat depthMap; Point image_center; Mat Display_image; Mat validDepthMap; Mat disparityMap; Mat bgrImage; Mat grayImage; Mat show; double seconds_since_start = difftime( time(0), start); if( !capture.grab() ) { cout << "Can not grab images." << endl; return -1; } else { if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) ) { const float scaleFactor = 0.05f; depthMap.convertTo( show, CV_8UC1, scaleFactor ); //imshow( "depth map", show ); } if( capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) { // Align nose with the circle int rad = 40; int row_rgb = bgrImage.rows; int col_rgb = bgrImage.cols; image_center.y = row_rgb/2 - 100; image_center.x = col_rgb/2; Display_image = bgrImage.clone(); // Copying bgrImage so that circle is shown temporarily only circle( Display_image, image_center, rad, Scalar( 255, 0, 0 ), 3, 8, 0 ); imshow( "rgb image", Display_image ); // Wait for a key Press //std::cin.ignore(); // Now it will capture Golden data } /* if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) ) imshow( "gray image", grayImage );*/ int seconds = int(seconds_since_start); if(last_printed<seconds && seconds<=WAIT_SEC){ printf(" Capturing Golden Face template after %d Seconds ...\n\n", WAIT_SEC - seconds); last_printed=seconds; } if(!depthMap.empty() && !bgrImage.empty() && (seconds_since_start > WAIT_SEC)) detectAndDisplay(bgrImage, depthMap, argc, argv); //writeMatToFile("depth.txt",depthMap); } viewer->spinOnce (10); boost::this_thread::sleep (boost::posix_time::microseconds (10)); viewer->removePointCloud("sample cloud"); viewer->addPointCloud<pcl::PointXYZ> (cloud_transformed, "sample cloud"); viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud"); if( waitKey( 30 ) >= 0 ) break; } Trans_dump.close(); return 0; }
int main(int argc, const char **argv) { VideoCapture cap; Tracker objTracker; CommandLineParser parser(argc, argv, keys); if (parser.has("help")) { help(); return 0; } cap.open(argv[1]); if (!cap.isOpened()) { help(); cout << "***Could not access file...***\n"; return -1; } Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT)); cout << hot_keys; bool paused = false; Mat frame; cap >> frame; objTracker.Init(S, Tracker::InitParams()); int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC)); VideoWriter outputVideo; // outputVideo.open("output.mp4" , ex, cap.get(CV_CAP_PROP_FPS), S, true); Mat out; try { while (1) { if (!paused && Tracker::g_initTracking) { cap >> frame; if (frame.empty()) break; } if (!paused) { objTracker.ProcessFrame(frame, out); } imshow("CamShift", out); // outputVideo << out; char c = (char)waitKey(10); if (c == 27) break; switch (c) { case 'b': objTracker.ToggleShowBackproject(); break; case 'c': // trackObject = 0; // histimg = Scalar::all(0); break; case 'h': objTracker.HideControlsGUI(); // showHist = !showHist; // if (!showHist) // destroyWindow("Histogram"); // else // namedWindow("Histogram", 1); // break; case 'p': paused = !paused; break; case 'r': cap.set(CV_CAP_PROP_POS_AVI_RATIO, 0); // outputVideo.set(CV_CAP_PROP_POS_AVI_RATIO, 0); cap >> frame; objTracker.Init(S, Tracker::InitParams()); break; default: ; } } } catch (const cv::Exception &e) { std::cerr << e.what(); cap.release(); outputVideo.release(); return 1; } cap.release(); outputVideo.release(); return 0; }
int main(int argc, char* argv[]) { int numBoards = atoi(argv[1]); int board_w = atoi(argv[2]); int board_h = atoi(argv[3]); Size board_sz = Size(board_w, board_h); int board_n = board_w*board_h; vector<vector<Point3f> > object_points; vector<vector<Point2f> > image_points; vector<Point2f> corners; vector<Point3f> obj; for (int j=0; j<board_n; j++) { obj.push_back(Point3f(j/board_h, j%board_h, 0.0f)); } Mat img, gray; cout << "Device opening ..." << endl; VideoCapture capture; capture.open( CV_CAP_OPENNI ); //registration if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) { capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1); cout << "\nImages have been registered ..." << endl; } //cout << cv::getBuildInformation() << endl; if( !capture.isOpened() ) { cout << "Can not open a capture object." << endl; return -1; } if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) { cout << "\nImage generator output mode:" << endl << "FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << " | FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << " | FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl; } else { cout << "\nDevice doesn't contain image generator." << endl; } int success = 0; int k = 0; bool found = false; while (success < numBoards) { if( !capture.grab() ) { cout << "Can not grab images." << endl; return -1; } //capture.retrieve( depth, CV_CAP_OPENNI_DEPTH_MAP ); capture.retrieve( img, CV_CAP_OPENNI_BGR_IMAGE ); cvtColor(img, gray, CV_BGR2GRAY); found = findChessboardCorners(gray, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE); if (found) { cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); drawChessboardCorners(gray, board_sz, corners, found); } // imshow("image", img); imshow("corners", gray); k = waitKey(10); if (found) { k = waitKey(0); } if (k == 27) { break; } if (k == ' ' && found !=0) { image_points.push_back(corners); object_points.push_back(obj); printf ("Corners stored\n"); success++; if (success >= numBoards) { break; } } } destroyAllWindows(); printf("Starting calibration\n"); Mat intrinsic = Mat(3, 3, CV_32FC1); Mat distcoeffs; vector<Mat> rvecs, tvecs; intrinsic.at<float>(0, 0) = 1; intrinsic.at<float>(1, 1) = 1; calibrateCamera(object_points, image_points, img.size(), intrinsic, distcoeffs, rvecs, tvecs); FileStorage fs1("mycalib.yml", FileStorage::WRITE); fs1 << "CM1" << intrinsic; fs1 << "D1" << distcoeffs; fs1 << "Image Points " << image_points; printf("calibration done\n"); // Mat imgU; // while(1) // { // cap >> img; // undistort(img, imgU, intrinsic, distcoeffs); // // imshow("image", img); // imshow("undistort", imgU); // // k = waitKey(5); // if (k == 27) // { // break; // } // } // cap.release(); return(0); }
int main(int argc, char* argv[]) { /* CONFIGURAÇÃO SERIAL */ struct termios tio; struct termios stdio; struct termios old_stdio; int tty_usb; char buffer [33]; /* * CONFIGURE USB PORT */ configureOximeter(&stdio, &tio, &old_stdio); /* * OPENNING USB PORT TO I/O COMMUNICATION */ openUSB(&tty_usb, &tio); /* * LEITURA E ESCRITA NA PORTA USB * */ /* FIM DA CONFIGURAÇÃO SERIAL */ //some boolean variables for different functionality within this //program bool trackObjects = true; bool useMorphOps = true; //Matrix to store each frame of the webcam feed Mat cameraFeed; //matrix storage for HSV image Mat HSV; //matrix storage for binary threshold image Mat threshold; //x and y values for the location of the object int x=0, y=0; //create slider bars for HSV filtering createTrackbars(); //video capture object to acquire webcam feed VideoCapture capture; //open capture object at location zero (default location for webcam) capture.open(0); //set height and width of capture frame capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH); capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT); //start an infinite loop where webcam feed is copied to cameraFeed matrix //all of our operations will be performed within this loop while(1){ //store image to matrix capture.read(cameraFeed); //convert frame from BGR to HSV colorspace cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); //filter HSV image between values and store filtered image to //threshold matrix inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold); //perform morphological operations on thresholded image to eliminate noise //and emphasize the filtered object(s) if(useMorphOps) morphOps(threshold); //pass in thresholded frame to our object tracking function //this function will return the x and y coordinates of the //filtered object if(trackObjects) { trackFilteredObject(x,y,threshold,cameraFeed); write(tty_usb, &y, 8); //escreve p na porta serial //read(tty_usb, &leitura, 4); //ler a porta serial } //exibir os frames imshow(windowName2,threshold); imshow(windowName,cameraFeed); imshow(windowName1,HSV); //delay de 30ms para a atualização da tela. //sem esse comando não aparece imagem!!!! waitKey(200); } return 0; }
int main( int argc, char** argv ) { VideoCapture cap; help(); if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) cap.open(argc == 2 ? argv[1][0] - '0' : 0); else if( argc >= 2 ) { cap.open(argv[1]); if( cap.isOpened() ) cout << "Video " << argv[1] << ": width=" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << ", height=" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << ", nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl; if( argc > 2 && isdigit(argv[2][0]) ) { int pos; sscanf(argv[2], "%d", &pos); cout << "seeking to frame #" << pos << endl; cap.set(CV_CAP_PROP_POS_FRAMES, pos); } } if( !cap.isOpened() ) { cout << "Could not initialize capturing...\n"; return -1; } namedWindow( "Laplacian", 0 ); createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 ); Mat smoothed, laplace, result; for(;;) { Mat frame; cap >> frame; if( frame.empty() ) break; int ksize = (sigma*5)|1; if(smoothType == CV_GAUSSIAN) GaussianBlur(frame, smoothed, Size(ksize, ksize), sigma, sigma); else if(smoothType == CV_BLUR) blur(frame, smoothed, Size(ksize, ksize)); else medianBlur(frame, smoothed, ksize); Laplacian(smoothed, laplace, CV_16S, 5); convertScaleAbs(laplace, result, (sigma+1)*0.25); imshow("Laplacian", result); int c = waitKey(30); if( c == ' ' ) smoothType = smoothType == CV_GAUSSIAN ? CV_BLUR : smoothType == CV_BLUR ? CV_MEDIAN : CV_GAUSSIAN; if( c == 'q' || c == 'Q' || (c & 255) == 27 ) break; } return 0; }
int main( int argc, char** argv ) { help(); VideoCapture capture; SerializeHelper sHelp = SerializeHelper(); Forest forest = sHelp.loadForest("adult.txt"); // Open webcam capture.open(CV_CAP_INTELPERC); //cap.open("d2.avi"); if( !capture.isOpened() ) { cout << "Could not initialize capturing...\n"; return -1; } capture.set(CV_CAP_INTELPERC_IMAGE_GENERATOR | CV_CAP_PROP_INTELPERC_PROFILE_IDX, 0); capture.set(CV_CAP_INTELPERC_DEPTH_GENERATOR | CV_CAP_PROP_INTELPERC_PROFILE_IDX, 0); namedWindow( "Depth", 1 ); namedWindow( "Color", 1 ); Mat gray, prevGray, image; vector<Point2f> points[2]; //rect = ImagePacket images = getFrames(capture); Mat threshDepth; int threshDist = 750; threshold(images.getDepth(), threshDepth, threshDist, 100000, THRESH_TOZERO_INV); threshDepth.convertTo(threshDepth, CV_8U); Mat segmentedImage = forest.classifyImage(threshDepth); imshow("Segmentation", convertToColorForBaby(segmentedImage)); Rect rect = isolateBodyPart(segmentedImage, HEAD); TemplateTracker hTrack = TemplateTracker(); cvWaitKey(10); hTrack.initialize(rect, images.getColor(), threshDepth, 0); Mat color, uvMap; for(;;) { ImagePacket images = getFrames(capture); threshold(images.getDepth(), threshDepth, threshDist, 100000, THRESH_TOZERO_INV); threshDepth.convertTo(threshDepth, CV_8U); hTrack.track(images.getColor(), threshDepth); forehead = getForeheadFromHead(hTrack.getTrackedRegion()); color = images.getColor(); uvMap = images.getUVMap(); Mat foreheadDepth = threshDepth(forehead); imshow("forehead", foreheadDepth); transpose(threshDepth, threshDepth); transpose(color, color); transpose(foreheadDepth, foreheadDepth); for(int i = 0; i < foreheadDepth.rows; i++) { for(int j = 0; j < foreheadDepth.cols; j++) { if(foreheadDepth.at<uchar>(i,j) != 0) { Point cPoint = translateDepthToColor(Point(j+forehead.y, i+forehead.x), color, uvMap); if(cPoint.x < color.cols && cPoint.y < color.rows) circle( color, cPoint, 3, Scalar(0,255,0), -1, 8); } } } transpose(threshDepth, threshDepth); transpose(color, color); rectangle(threshDepth, hTrack.getTrackedRegion(), Scalar(255, 0, 0), 2, 8, 0); rectangle(threshDepth, forehead, Scalar(255, 0, 0), 2, 8, 0); imshow("Depth", threshDepth); imshow("Color", color); char c = (char)waitKey(10); if( c == 27 ) break; } return 0; }
int main(int argc, char* argv[]) { //some boolean variables for different functionality within this //program bool trackObjects = false; bool useMorphOps = false; //Matrix to store each frame of the webcam feed Mat cameraFeed; //matrix storage for HSV image Mat HSV; //matrix storage for binary threshold image Mat threshold; //x and y values for the location of the object int x=0, y=0; //create slider bars for HSV filtering createTrackbars(); //video capture object to acquire webcam feed VideoCapture capture; //open capture object at location zero (default location for webcam) capture.open(0); //set height and width of capture frame capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH); capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT); //start an infinite loop where webcam feed is copied to cameraFeed matrix //all of our operations will be performed within this loop while(1){ //store image to matrix capture.read(cameraFeed); //convert frame from BGR to HSV colorspace cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); //filter HSV image between values and store filtered image to //threshold matrix inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold); //perform morphological operations on thresholded image to eliminate noise //and emphasize the filtered object(s) if(useMorphOps) morphOps(threshold); //pass in thresholded frame to our object tracking function //this function will return the x and y coordinates of the //filtered object if(trackObjects) trackFilteredObject(x,y,threshold,cameraFeed); //show frames imshow(windowName2,threshold); imshow(windowName,cameraFeed); imshow(windowName1,HSV); //delay 30ms so that screen can refresh. //image will not appear without this waitKey() command waitKey(30); } return 0; }
int main( int argc, char** argv ){ CommandLineParser parser( argc, argv, keys ); String tracker_algorithm = parser.get<String>( 0 ); String video_name = parser.get<String>( 1 ); int start_frame = parser.get<int>( 2 ); if( tracker_algorithm.empty() || video_name.empty() ) { help(); return -1; } int coords[4]={0,0,0,0}; bool initBoxWasGivenInCommandLine=false; { String initBoundingBox=parser.get<String>(3); for(size_t npos=0,pos=0,ctr=0;ctr<4;ctr++){ npos=initBoundingBox.find_first_of(',',pos); if(npos==string::npos && ctr<3){ printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n"); printf("got: %s\n",initBoundingBox.substr(pos,string::npos).c_str()); printf("manual selection of bounding box will be employed\n"); break; } int num=atoi(initBoundingBox.substr(pos,(ctr==3)?(string::npos):(npos-pos)).c_str()); if(num<=0){ printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n"); printf("got: %s\n",initBoundingBox.substr(pos,npos-pos).c_str()); printf("manual selection of bounding box will be employed\n"); break; } coords[ctr]=num; pos=npos+1; } if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0){ initBoxWasGivenInCommandLine=true; } } //open the capture VideoCapture cap; cap.open( video_name ); cap.set( CAP_PROP_POS_FRAMES, start_frame ); if( !cap.isOpened() ) { help(); cout << "***Could not initialize capturing...***\n"; cout << "Current parameter's value: \n"; parser.printMessage(); return -1; } Mat frame; paused = true; namedWindow( "Tracking API", 1 ); setMouseCallback( "Tracking API", onMouse, 0 ); //instantiates the specific Tracker Ptr<Tracker> tracker = Tracker::create( tracker_algorithm ); if( tracker == NULL ) { cout << "***Error in the instantiation of the tracker...***\n"; return -1; } //get the first frame cap >> frame; /*Size imageSize,imageSize2; imageSize = frame.size(); imageSize2 = frame.size(); Rect ROI = Rect(int(imageSize.width/5), int(imageSize.height*0.22),int(imageSize.width*0.6), int(imageSize.height*0.55)); Mat temp = frame.clone(); Mat map1, map2; initUndistortRectifyMap(Camera_Matrix, Distortion_Coeff, Mat(), getOptimalNewCameraMatrix(Camera_Matrix, Distortion_Coeff, imageSize, 0.8, imageSize2, 0), imageSize2, CV_16SC2, map1, map2); remap(temp, frame, map1, map2, INTER_LINEAR); Mat image = frame(ROI); frame = image.clone();*/ // Prabhudev Prakash pMOG2 = createBackgroundSubtractorMOG2(5000); // Prabhudev Prakash frame.copyTo( image ); if(initBoxWasGivenInCommandLine){ selectObject=true; paused=false; boundingBox.x = coords[0]; boundingBox.y = coords[1]; boundingBox.width = std::abs( coords[2] - coords[0] ); boundingBox.height = std::abs( coords[3]-coords[1]); printf("bounding box with vertices (%d,%d) and (%d,%d) was given in command line\n",coords[0],coords[1],coords[2],coords[3]); rectangle( image, boundingBox, Scalar( 255, 0, 0 ), 2, 1 ); } imshow( "Tracking API", image ); bool initialized = false; int frameCounter = 0; for ( ;; ) { if( !paused ) { if(initialized){ cap >> frame; if(frame.empty()){ break; } // Image rectify Section //pMOG2->apply(frame, frame); //blur( frame, frame, Size(8,8) ); //Canny( frame, frame, 100, 100*2, 3 ); /*Size imageSize,imageSize2; imageSize = frame.size(); imageSize2 = frame.size(); Rect ROI = Rect(int(imageSize.width/5), int(imageSize.height*0.22),int(imageSize.width*0.6), int(imageSize.height*0.55)); Mat temp = frame.clone(); Mat map1, map2; initUndistortRectifyMap(Camera_Matrix, Distortion_Coeff, Mat(), getOptimalNewCameraMatrix(Camera_Matrix, Distortion_Coeff, imageSize, 0.8, imageSize2, 0), imageSize2, CV_16SC2, map1, map2); remap(temp, frame, map1, map2, INTER_LINEAR); Mat image = frame(ROI); frame = image.clone();*/ // Image Rectify Section GaussianBlur(fgMaskMOG2, fgMaskMOG2, cv::Size(5, 5), 2, 2); pMOG2->apply(frame, fgMaskMOG2); // Find contours Section /*Canny( fgMaskMOG2, fgMaskMOG2, 100, 100*2, 3 ); vector<vector<Point> > contours; vector<Vec4i> hierarchy; findContours( fgMaskMOG2, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); Mat drawing = Mat::zeros( fgMaskMOG2.size(), CV_8UC3 ); for( int i = 0; i< contours.size(); i++ ) { drawContours( drawing, contours, i, cv::Scalar(255,255,255), 2, 8, hierarchy, 0, Point() ); } cout << "number of contours" << contours.size() << endl; */ // Find Contours section // Blob detection section SimpleBlobDetector::Params params; // Change thresholds params.minThreshold = 1; params.maxThreshold = 255; // Filter by Area. params.filterByArea = true; params.minArea = 20; // Filter by Circularity params.filterByCircularity = false; params.minCircularity = 0.1; // Filter by Convexity params.filterByConvexity = false; params.minConvexity = 0.87; // Filter by Inertia params.filterByInertia = false; params.minInertiaRatio = 0.2; // Storage for blobs vector<KeyPoint> keypoints; // Set up detector with params Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params); detector->detect( fgMaskMOG2, keypoints); cout << "Keypoints" << keypoints.size()<< endl; Mat frame_with_keypoints; frame_with_keypoints = frame.clone(); //drawKeypoints( frame, keypoints, frame_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS ); for (int i = 0;i < keypoints.size();i++) { Point top; Point bottom; top.x = keypoints[i].pt.x - keypoints[i].size/4; top.y = keypoints[i].pt.y - keypoints[i].size; bottom.x = keypoints[i].pt.x + keypoints[i].size/4; bottom.y = keypoints[i].pt.y + keypoints[i].size; //top.push_back(keypoints[i].pt - keypoints[i].size/2); rectangle(frame_with_keypoints,top,bottom,Scalar(0,0,255),2); } // Blob Detection section //Prabhudev Prakash //Mat temp_Frame; //frame.copyTo(temp_Frame,drawing); //image = temp_Frame.clone(); image = frame_with_keypoints.clone(); } if( !initialized && selectObject ) { //initializes the tracker if( !tracker->init( image, boundingBox ) ) { cout << "***Could not initialize tracker...***\n"; return -1; } initialized = true; cout << boundingBox; } else if( initialized ) { //updates the tracker if( tracker->update( image, boundingBox ) ) { rectangle( frame, boundingBox, Scalar( 255, 255, 255 ), 2, 1 ); } } imshow( "Tracking API", image ); frameCounter++; } char c = (char) waitKey( 2 ); if( c == 'q' ) break; if( c == 'p' ) paused = !paused; } return 0; }
int main(int argc, char *argv[]) { int listenfd,connfd; pid_t pid; socklen_t chilen; struct sockaddr_in cliaddr,servaddr; void sig_chld(int signo) ; if(!cap.isOpened()) { printf("no camera!"); return -1; } cap.set(CV_CAP_PROP_FRAME_WIDTH,640); cap.set(CV_CAP_PROP_FRAME_HEIGHT,480); if(!cap1.isOpened()) { printf("no camera1!"); return -1; } cap1.set(CV_CAP_PROP_FRAME_WIDTH,320); cap1.set(CV_CAP_PROP_FRAME_HEIGHT,240); if(!cap2.isOpened()) { printf("no camera2!"); return -1; } cap2.set(CV_CAP_PROP_FRAME_WIDTH,320); cap2.set(CV_CAP_PROP_FRAME_HEIGHT,240); listenfd=socket(AF_INET,SOCK_STREAM,0); bzero(&servaddr,sizeof(servaddr)); servaddr.sin_family=AF_INET; // IPV4 servaddr.sin_port=htons(portnumber); // (将本机器上的short数据转化为网络上的short数据)端口号 servaddr.sin_addr.s_addr=htonl(INADDR_ANY); // IP地址 bind(listenfd,(sockaddr *)&servaddr,sizeof(sockaddr)); listen(listenfd,10); signal(SIGCHLD,sig_chld); for(;;) { chilen=sizeof(cliaddr); if( (connfd=accept(listenfd,(sockaddr *)&cliaddr,&chilen))<0 ) { if(errno==EINTR) continue; else system("echo accept "); } if((pid=fork())==0) { close(listenfd); str_echo(connfd); close(connfd); exit(0); } close(connfd); } }
void onTrackbarSlide2(int pos, void* userData=NULL){ cap.set(CV_CAP_PROP_POS_FRAMES,pos); }
int main(int argc, char** argv) { pthread_t thread_c; int key; if (argc < 3) { quit("Usage: netcv_client <server_ip> <server_port> <input_file>(optional)", 0); } if (argc == 4) { capture.open(argv[3]); } else { capture.open(0); } if (!capture.isOpened()) { quit("\n--> cvCapture failed", 1); } server_ip = argv[1]; server_port = atoi(argv[2]); capture.set(CV_CAP_PROP_FRAME_WIDTH,320); capture.set(CV_CAP_PROP_FRAME_HEIGHT,240); capture >> img0; img1 = Mat::zeros(img0.rows, img0.cols ,CV_8UC1); // run the streaming client as a separate thread if (pthread_create(&thread_c, NULL, streamClient, NULL)) { quit("\n--> pthread_create failed.", 1); } cout << "\n--> Press 'q' to quit. \n\n" << endl; /* print the width and height of the frame, needed by the client */ cout << "\n--> Transferring (" << img0.cols << "x" << img0.rows << ") images to the: " << server_ip << ":" << server_port << endl; //namedWindow("stream_client", CV_WINDOW_AUTOSIZE); //flip(img0, img0, 1); //cvtColor(img0, img1, CV_BGR2GRAY); while(key != 'q') { /* get a frame from camera */ //capture >> img0; //if (img0.empty()) break; pthread_mutex_lock(&mutex); capture >> img0; if (img0.empty()) break; //flip(img0, img0, 1); //cvtColor(img0, img1, CV_BGR2GRAY); is_data_ready = 1; pthread_mutex_unlock(&mutex); /*also display the video here on client */ imshow("stream_client", img2); key = waitKey(30); } /* user has pressed 'q', terminate the streaming client */ if (pthread_cancel(thread_c)) { quit("\n--> pthread_cancel failed.", 1); } /* free memory */ //destroyWindow("stream_client"); quit("\n--> NULL", 0); return 0; }
static void focusDriveEnd(VideoCapture & cap, int direction) { while (cap.set(CAP_PROP_ZOOM, (double) MAX_FOCUS_STEP * direction)) ; }
int main( int argc, char** argv ) { VideoCapture cap; cv::CommandLineParser parser(argc, argv, "{help h | | }{ c | 0 | }{ p | | }"); if ( parser.has("help") ) { help(); return 0; } if( parser.get<string>("c").size() == 1 && isdigit(parser.get<string>("c")[0]) ) cap.open(parser.get<int>("c")); else cap.open(parser.get<string>("c")); if( cap.isOpened() ) cout << "Video " << parser.get<string>("c") << ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) << ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) << ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl; if( parser.has("p") ) { int pos = parser.get<int>("p"); if (!parser.check()) { parser.printErrors(); return -1; } cout << "seeking to frame #" << pos << endl; cap.set(CAP_PROP_POS_FRAMES, pos); } if( !cap.isOpened() ) { cout << "Could not initialize capturing...\n"; return -1; } namedWindow( "Laplacian", 0 ); createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 ); Mat smoothed, laplace, result; for(;;) { Mat frame; cap >> frame; if( frame.empty() ) break; int ksize = (sigma*5)|1; if(smoothType == GAUSSIAN) GaussianBlur(frame, smoothed, Size(ksize, ksize), sigma, sigma); else if(smoothType == BLUR) blur(frame, smoothed, Size(ksize, ksize)); else medianBlur(frame, smoothed, ksize); Laplacian(smoothed, laplace, CV_16S, 5); convertScaleAbs(laplace, result, (sigma+1)*0.25); imshow("Laplacian", result); char c = (char)waitKey(30); if( c == ' ' ) smoothType = smoothType == GAUSSIAN ? BLUR : smoothType == BLUR ? MEDIAN : GAUSSIAN; if( c == 'q' || c == 'Q' || c == 27 ) break; } return 0; }
bool ProcessKeys() { char c = (char)waitKey(30); if( c == 27 ) { return false; } switch( c ) { case 'c': case 'd': // capture from cam { int camNum = (c=='c') ? 0 : 1; bool res = cap.open(camNum); if(res) { printf("Init capture from cam %d - ok\n", camNum); } else { printf("Init capture from cam %d - failed %s\n", camNum, cvErrorStr(cvGetErrStatus())); } cap.set(CV_CAP_PROP_CONVERT_RGB, 1.); IsCam = true; } break; case 'f': case 'g': { const char *fname = IoFiles[c-'f']; bool res = cap.open(fname); if(res) { printf("Init read from file %s - ok\n", fname); } else { printf("Init read from file %s - failed %s\n", fname, cvErrorStr(cvGetErrStatus())); } IsCam = false; } break; case 's': case 't': { if(IoFiles[c-'s'] == NULL) { Error("No file defined\n"); break; } if(!Vwrite.open(IoFiles[c-'s'], CV_FOURCC('I', 'Y', 'U', 'V'), FRAMES_SECOND, LastImageSize, true)) { Error("Could not capture to file\n"); break; } } break; default: break; } return true; }
// In general a suffix of 1 means previous frame, and 2 means current frame. // However, we start processing the next frame while the GPU is working on current... // So at a certain point frame 1 shifts down to 0, 2 shifts down to 1, and the new 2 is loaded. int main( int argc, char** argv ) { // gpuFacade gpu; // gpu.set_values(3,4); // cerr << "!! " << gpu.area() << endl; // This must be an integer multiple of 512. // Specifically, half-multiples of the number of SM's for your GPU are sensible. // I have 10 streaming multiprocessors, so I chose 15*512 = 7680. const int maxKP = 512 * 15; const bool showMatches = true; // Shows every Nth processed frame's matches. const int showMatchesInterval = 10; const bool showVideo = true; // Shows every Nth processed frame. const int showVideoInterval = 1; int WIDTH, HEIGHT, totalMatches, totalInliers = 0; const int matchThreshold = 12; // Discard this many frames for each one processed. Change with +/- keys while running. int skipFrames = 0; // Threshold for FAST detector int threshold = 20; int targetKP = 3000; int tolerance = 200; int maxLoops = 100;//4200; const bool gnuplot = true; double defect = 0.0; int extractions = 0; VideoCapture cap; if (argc == 1) { cap = VideoCapture(0); WIDTH = cap.get(CAP_PROP_FRAME_WIDTH); HEIGHT = cap.get(CAP_PROP_FRAME_HEIGHT); } if (argc == 2 || argc == 3) { cap = VideoCapture(argv[1]); WIDTH = cap.get(CAP_PROP_FRAME_WIDTH); HEIGHT = cap.get(CAP_PROP_FRAME_HEIGHT); if (argc == 3) { for (int i=0; i<atoi(argv[2]); i++) { cap.grab(); } } } if (argc == 4) { cap = VideoCapture(0); WIDTH = atoi(argv[2]); HEIGHT = atoi(argv[3]); cap.set(CAP_PROP_FRAME_WIDTH, WIDTH); cap.set(CAP_PROP_FRAME_HEIGHT, HEIGHT); } double f = 0.4; double data[]= {f*WIDTH, 0.0, WIDTH*0.5, 0.0, f*HEIGHT, HEIGHT*0.5, 0.0, 0.0, 1.0}; Mat K(3, 3, CV_64F, data); Mat F, R, T, rod, mask; Mat img0, img1, img2, img1g, img2g, imgMatches, E, rodOld; cap >> img1; cap >> img2; cv::cvtColor(img1, img1g, CV_BGR2GRAY); cv::cvtColor(img2, img2g, CV_BGR2GRAY); if (showMatches) { namedWindow("Matches", WINDOW_NORMAL); } waitKey(1); if (showVideo) { namedWindow("Video", WINDOW_NORMAL); } waitKey(1); resizeWindow("Matches", 1920/2, 540/2); resizeWindow("Video", 960, 540); moveWindow("Matches", 0, 540+55); moveWindow("Video", 0, 0); waitKey(1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); vector<KeyPoint> keypoints0, keypoints1, keypoints2; vector<DMatch> goodMatches; vector<Point2f> p1, p2; // Point correspondences for recovering pose. int numKP0, numKP1, numKP2; // The actual number of keypoints we are dealing with: just keypoints#.size(), but capped at maxKP. int key = -1; clock_t timer, timer2; float time; // Sizes for device and host pointers size_t sizeK = maxKP * sizeof(float) * 5; // K for keypoints size_t sizeI = WIDTH * HEIGHT * sizeof(unsigned char); // I for Image size_t sizeD = maxKP * (2048 / 32) * sizeof(unsigned int); // D for Descriptor size_t sizeM = maxKP * sizeof(int); // M for Matches size_t sizeMask = 64 * sizeof(float); // Host pointers float *h_K1, *h_K2; cudaMallocHost((void **) &h_K1, sizeK); cudaMallocHost((void **) &h_K2, sizeK); // For reasons opaque to me, allocating both (but not either) h_M1 or h_M2 // with cudaMallocHost segfaults, apparently after graceful exit? So neither of them are pinned. int h_M1[maxKP]; int h_M2[maxKP]; float h_mask[64]; for (int i=0; i<64; i++) { h_mask[i] = 1.0f; } // Device pointers unsigned char *d_I; unsigned int *d_D1, *d_D2, *uIntSwapPointer; int *d_M1, *d_M2; float *d_K, *d_mask; cudaCalloc((void **) &d_K, sizeK); cudaCalloc((void **) &d_D1, sizeD); cudaCalloc((void **) &d_D2, sizeD); cudaCalloc((void **) &d_M1, sizeM); cudaCalloc((void **) &d_M2, sizeM); cudaCalloc((void **) &d_mask, sizeM); // The patch triplet locations for LATCH fits in texture memory cache. cudaArray* patchTriplets; initPatchTriplets(patchTriplets); size_t pitch; initImage(&d_I, WIDTH, HEIGHT, &pitch); initMask(&d_mask, h_mask); // Events allow asynchronous, nonblocking launch of subsequent kernels after a given event has happened, // such as completion of a different kernel on a different stream. cudaEvent_t latchFinished; cudaEventCreate(&latchFinished); // You should create a new stream for each bitMatcher kernel you want to launch at once. cudaStream_t streanumKP1, streanumKP2; cudaStreamCreate(&streanumKP1); cudaStreamCreate(&streanumKP2); FAST(img1g, keypoints1, threshold); extractions += keypoints1.size(); latch( img1g, d_I, pitch, h_K1, d_D1, &numKP1, maxKP, d_K, &keypoints1, d_mask, latchFinished ); FAST(img2g, keypoints2, threshold); // This call to fast is concurrent with above execution. extractions += keypoints2.size(); latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished ); bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished ); bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished ); timer = clock(); getMatches(maxKP, h_M1, d_M1); getMatches(maxKP, h_M2, d_M2); for (int i=0; i<numKP1; i++) { if (h_M1[i] >= 0 && h_M1[i] < numKP2 && h_M2[h_M1[i]] == i) { goodMatches.push_back( DMatch(i, h_M1[i], 0)); // For drawing. p1.push_back(keypoints1[i].pt); // For recovering pose. p2.push_back(keypoints2[h_M1[i]].pt); } } img1.copyTo(img0); img2.copyTo(img1); cap.read(img2); cvtColor(img2, img2g, CV_BGR2GRAY); keypoints0 = keypoints1; keypoints1 = keypoints2; uIntSwapPointer = d_D1; d_D1 = d_D2; d_D2 = uIntSwapPointer; numKP0 = numKP1; numKP1 = numKP2; FAST(img2g, keypoints2, threshold); int loopIteration = 0; for (; loopIteration < maxLoops || maxLoops == -1; loopIteration++) { // Main Loop. { // GPU code for descriptors and matching. cudaEventRecord(start, 0); extractions += keypoints2.size(); latch( img2g, d_I, pitch, h_K2, d_D2, &numKP2, maxKP, d_K, &keypoints2, d_mask, latchFinished); bitMatcher( d_D1, d_D2, numKP1, numKP2, maxKP, d_M1, matchThreshold, streanumKP1, latchFinished ); bitMatcher( d_D2, d_D1, numKP2, numKP1, maxKP, d_M2, matchThreshold, streanumKP2, latchFinished ); cudaEventRecord(stop, 0); } timer = clock(); { // Put as much CPU code here as possible. { // Display matches and/or video to user. bool needToDraw = false; if (showMatches && loopIteration % showMatchesInterval == 0) { // Draw matches. drawMatches( img0, keypoints0, img1, keypoints1, goodMatches, imgMatches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); imshow( "Matches", imgMatches ); needToDraw = true; } if (showVideo && loopIteration % showVideoInterval == 0) { imshow("Video", img1); needToDraw = true; } if (needToDraw) { key = waitKey(1); } } { // Handle user input. switch (key) { case (-1): break; case (1048689): // q case (113): // also q return 0; break; case (1048695): // w waitKey(0); break; case (1114027): // + skipFrames++; cerr << "For each processed frame we are now skipping " << skipFrames << endl; break; case (1114029): // - skipFrames = max(1, --skipFrames); cerr << "For each processed frame we are now skipping " << skipFrames << endl; break; default: cerr << "Currently pressed key is: " << key << endl; break; } key = -1; } { // Iterate the "logical" loop (get ready to process next frame) img1.copyTo(img0); img2.copyTo(img1); for (int i=0; i<skipFrames; i++) { cap.grab(); } cap.read(img2); if (img2.cols == 0) break; cvtColor(img2, img2g, CV_BGR2GRAY); keypoints0 = keypoints1; keypoints1 = keypoints2; uIntSwapPointer = d_D1; d_D1 = d_D2; d_D2 = uIntSwapPointer; numKP0 = numKP1; numKP1 = numKP2; } { // Solve for and output rotation vector (this gets piped to feedgnuplot). if (10 < p1.size() && 10 < p2.size()) { E = findEssentialMat(p1, p2, f*WIDTH, Point2d(WIDTH*0.5f, HEIGHT*0.5f), RANSAC, 0.999, 3.0, mask); int inliers = 0; for (int i=0; i<mask.rows; i++) { inliers += mask.data[i]; } totalInliers += inliers; double size = p1.size(); double r = inliers/max((double)size, 150.0); r = 1.0 - min(r + 0.05, 1.0); defect += r*r; cout << "11:" << r*r << endl; recoverPose(E, p1, p2, R, T, f*WIDTH, Point2d(WIDTH*0.5f, HEIGHT*0.5f), mask); Rodrigues(R, rod); if (loopIteration==0) { rod.copyTo(rodOld); } if (dist2(rod, rodOld) < 1.0) { rod.copyTo(rodOld); } else { cerr << "Rejecting the recovered pose: " << rod.t() * 57.2957795 << endl; // This commented out chunk of code is good for webcams. If you initialize with a bad value it will recover. // const double alpha = 0.1; // Move our region of acceptable responses (only a little) closer to the observed (but presumed erroneous) value. // for (int i=0; i<3; i++) { // rodOld.at<double>(i) = rodOld.at<double>(i)*(1.0-alpha) + rod.at<double>(i)*alpha; // } rodOld.copyTo(rod); } } else { defect += 1.0; cout << "11:" << 1.0 << endl; cerr << "Too few matches! Not going to try to recover pose this frame." << endl; } // To prevent the graphs from desynchronizing from each other, we have to output this unconditionally. if (gnuplot) { for (int i=0; i<3; i++) { cout << i << ":" << rod.at<double>(i) * 57.2957795 << endl; // Output Rodrigues vector, rescaled to degrees } // T is unit norm (scale-less) and often erroneously sign-reversed. // if (T.at<double>(2) < 0) T = -T; // Assume dominate motion is forward... (this is not an elegant assumption) // double theta = atan2(T.at<double>(0), T.at<double>(2)); // double phi = atan2(T.at<double>(1), T.at<double>(2)); // cout << 3 << ":" << theta * 57.2957795 << endl; // Plot polar translation angle // cout << 4 << ":" << phi * 57.2957795 << endl; // Plot azimuthal translation angle } } { // run FAST detector on the CPU for next frame (get ready for next loop iteration). FAST(img2g, keypoints2, threshold); // Apply proportional control to threshold to drive it towards targetKP. int control = (int)(((float)keypoints2.size() - (float)targetKP) / (float)tolerance); threshold += min(100, control); if (threshold < 1) threshold = 1; } } if (gnuplot) { time = (1000*(clock() - timer)/(double)CLOCKS_PER_SEC); cout << "9:" << time << endl; // Plot CPU time. timer = clock(); } { // Get new GPU results p1.clear(); p2.clear(); goodMatches.clear(); getMatches(maxKP, h_M1, d_M1); getMatches(maxKP, h_M2, d_M2); cudaEventElapsedTime(&time, start, stop); if (gnuplot) { cout << "10:" << (time+(1000*(clock() - timer)/(double)CLOCKS_PER_SEC)) << endl; // Plot total asynchronous GPU time. } for (int i=0; i<numKP0; i++) { if (h_M1[i] >= 0 && h_M1[i] < numKP1 && h_M2[h_M1[i]] == i) { goodMatches.push_back( DMatch(i, h_M1[i], 0)); // For drawing matches. p1.push_back(keypoints0[i].pt); // For recovering pose. p2.push_back(keypoints1[h_M1[i]].pt); } } } if (gnuplot) { cout << "6:" << numKP1 << endl; // Plot number of keypoints. cout << "7:" << p1.size() << endl; // Plot number of matches. cout << "8:" << 100*threshold << endl; // Plot current threshold for FAST. } totalMatches += p1.size(); } cudaFreeArray(patchTriplets); cudaFree(d_K); cudaFree(d_D1); cudaFree(d_D2); cudaFree(d_M1); cudaFree(d_M2); cudaFreeHost(h_K1); cudaFreeHost(h_K2); cerr << "Total matches: " << totalMatches << endl; cerr << "Total inliers: " << totalInliers << endl; cerr << "Defect: " << defect << endl; cerr << "Loop iteration: " << loopIteration << endl; cerr << "Extractions: " << extractions << endl; return 0; }
/* * To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and * configure OpenCV with WITH_OPENNI flag is ON (using CMake). */ int main( int argc, char* argv[] ) { bool isColorizeDisp, isFixedMaxDisp; int imageMode; bool retrievedImageFlags[5]; string filename; bool isVideoReading; parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading ); cout << "Device opening ..." << endl; VideoCapture capture; if( isVideoReading ) capture.open( filename ); else { capture.open( CAP_OPENNI2 ); if( !capture.isOpened() ) capture.open( CAP_OPENNI ); } cout << "done." << endl; if( !capture.isOpened() ) { cout << "Can not open a capture object." << endl; return -1; } if( !isVideoReading ) { bool modeRes=false; switch ( imageMode ) { case 0: modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_VGA_30HZ ); break; case 1: modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_15HZ ); break; case 2: modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_30HZ ); break; //The following modes are only supported by the Xtion Pro Live case 3: modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_30HZ ); break; case 4: modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_60HZ ); break; default: CV_Error( Error::StsBadArg, "Unsupported image mode property.\n"); } if (!modeRes) cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl; } // Print some avalible device settings. cout << "\nDepth generator output mode:" << endl << "FRAME_WIDTH " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_HEIGHT " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl << "FRAME_MAX_DEPTH " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl << "FPS " << capture.get( CAP_PROP_FPS ) << endl << "REGISTRATION " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl; if( capture.get( CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) { cout << "\nImage generator output mode:" << endl << "FRAME_WIDTH " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_WIDTH ) << endl << "FRAME_HEIGHT " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_HEIGHT ) << endl << "FPS " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FPS ) << endl; } else { cout << "\nDevice doesn't contain image generator." << endl; if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2]) return 0; } for(;;) { Mat depthMap; Mat validDepthMap; Mat disparityMap; Mat bgrImage; Mat grayImage; if( !capture.grab() ) { cout << "Can not grab images." << endl; return -1; } else { if( retrievedImageFlags[0] && capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) ) { const float scaleFactor = 0.05f; Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor ); imshow( "depth map", show ); } if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CAP_OPENNI_DISPARITY_MAP ) ) { if( isColorizeDisp ) { Mat colorDisparityMap; colorizeDisparity( disparityMap, colorDisparityMap, isFixedMaxDisp ? getMaxDisparity(capture) : -1 ); Mat validColorDisparityMap; colorDisparityMap.copyTo( validColorDisparityMap, disparityMap != 0 ); imshow( "colorized disparity map", validColorDisparityMap ); } else { imshow( "original disparity map", disparityMap ); } } if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CAP_OPENNI_VALID_DEPTH_MASK ) ) imshow( "valid depth mask", validDepthMap ); if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CAP_OPENNI_BGR_IMAGE ) ) imshow( "rgb image", bgrImage ); if( retrievedImageFlags[4] && capture.retrieve( grayImage, CAP_OPENNI_GRAY_IMAGE ) ) imshow( "gray image", grayImage ); } if( waitKey( 30 ) >= 0 ) break; } return 0; }
int main(int argc, char** argv) { VideoCapture capture; Mat currentFrame; Mat lastGrayFrame; Mat currentGrayFrame; BoundingBox pbox; vector<Point2f> points1; vector<Point2f> points2; bool isDetected = true; capture.open(0); if(!capture.isOpened()){ // вебка недоступна, завершаем программу return 1; } cvNamedWindow("VKR Kostenko 472SE", CV_WINDOW_AUTOSIZE); cvSetMouseCallback("VKR Kostenko 472SE", mouseClickHandler, NULL); capture.set(CV_CAP_PROP_FRAME_WIDTH, 320); capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240); while(true){ while(noRect) { capture >> currentFrame; cvtColor(currentFrame, lastGrayFrame, CV_RGB2GRAY); drawBox(currentFrame, rect); // показывваем новый кадр imshow("VKR Kostenko 472SE", currentFrame); if(cvWaitKey(33)=='q'){ return 0; } } if(rect.width > Constants::min_win && rect.height > Constants::min_win){ // обвел квадрат подходящего размера // удаляем обработчик мышки cvSetMouseCallback("VKR Kostenko 472SE", NULL, NULL ); break; }else{ // необходимо выделить новый квадрат noRect = true; } } Watcher watcher(lastGrayFrame, rect); while(capture.read(currentFrame)){ cvtColor(currentFrame, currentGrayFrame, CV_RGB2GRAY); // ищем совпадение watcher.processFrame(lastGrayFrame, currentGrayFrame, points1,points2, pbox, isDetected, flag); // рисуем квадрат, если нашли совпадение if(isDetected){ drawBox(currentFrame, pbox); } // показывваем новый кадр imshow("VKR Kostenko 472SE", currentFrame); swap(lastGrayFrame, currentGrayFrame); points1.clear(); points2.clear(); if(cvWaitKey(33)=='q'){ break; } } return 0; }
int main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "uh = live usb hdr | un = live usb raw | img1-3 | movie file \n usage: %s uh \n %s un \n %s img1 img2 img3 \n %s video-filename \n\n", argv[0], argv[0], argv[0], argv[0]); return 1; } MSG("loading..."); if (argc == 4) { IplImage* img1 = cvLoadImage(argv[1], 1); Mat im1(img1); IplImage* img2 = cvLoadImage(argv[2], 1); Mat im2(img2); IplImage* img3 = cvLoadImage(argv[3], 1); Mat im3(img3); namedWindow("im1", CV_WINDOW_KEEPRATIO); imshow("im1", im1); namedWindow("im2", CV_WINDOW_KEEPRATIO); imshow("im2", im2); namedWindow("im3", CV_WINDOW_KEEPRATIO); imshow("im3", im3); if (0 == m_init) { namedWindow("tmo", CV_WINDOW_KEEPRATIO); } runHDR(im1, im2, im3); while (1) { char key; key = (char) cvWaitKey(10); if (key == 27 || key == 'q' || key == 'Q') { break; } } return 0; } else { string s = argv[1]; if (s.compare(string("uh")) == 0) { m_live_usb = 1; m_cap_hdr = 1; } else if (s.compare(string("un")) == 0) { m_live_usb = 1; m_cap_hdr = 0; } else { m_live_usb = 0; m_cap_hdr = 0; } namedWindow("trackbar", CV_WINDOW_KEEPRATIO); cvMoveWindow("trackbar", 10, 10); imshow("trackbar", Mat(70, 300, CV_8UC1)); createTrackbar("mode", "trackbar", &m_mode, SLIDER_MAX, 0); createTrackbar("test", "trackbar", &m_test, 20, 0); createTrackbar("test2", "trackbar", &m_test2, 20, 0); // camera setup Mat cimage; VideoCapture capture; if (!m_live_usb) { capture.open(argv[1]); } else { int w = 320; int h = 240; capture.set(CV_CAP_PROP_FRAME_WIDTH, w); capture.set(CV_CAP_PROP_FRAME_HEIGHT, h); capture.open(0); //try to open capture.set(CV_CAP_PROP_FRAME_WIDTH, w); capture.set(CV_CAP_PROP_FRAME_HEIGHT, h); } if (!capture.isOpened()) { //if this fails... cerr << "Failed to open a video device or video file!\n" << endl; return 1; } if (0 == m_init) { namedWindow("cam", CV_WINDOW_KEEPRATIO); cvMoveWindow("cam", 10, 200); } if (0 == m_init) { namedWindow("tmo", CV_WINDOW_KEEPRATIO); cvMoveWindow("tmo", 10, 500); } if (0 == m_init) { namedWindow("tmp", CV_WINDOW_KEEPRATIO); cvMoveWindow("tmp", 300, 500); } if (0 == m_init) { namedWindow("tmp2", CV_WINDOW_KEEPRATIO); cvMoveWindow("tmp2", 300, 500); } // camera process loop while (1) { // get camera image capture >> cimage; if (cimage.empty()) { cout << "Couldn't load " << endl; break; } imshow("cam", cimage); if (m_live_usb && m_cap_hdr) { // hdr capture capture.set(CV_CAP_PROP_BRIGHTNESS, 0.05); capture >> cimage; capture >> cimage; capture >> cimage; capture >> cimage; Mat img1; cimage.copyTo(img1); capture.set(CV_CAP_PROP_BRIGHTNESS, 0.45); capture >> cimage; capture >> cimage; capture >> cimage; capture >> cimage; Mat img2; cimage.copyTo(img2); capture.set(CV_CAP_PROP_BRIGHTNESS, 0.99); capture >> cimage; capture >> cimage; capture >> cimage; capture >> cimage; Mat img3; cimage.copyTo(img3); capture.set(CV_CAP_PROP_BRIGHTNESS, 0.45); capture >> cimage; capture >> cimage; capture >> cimage; capture >> cimage; runHDR(img1, img2, img3); } else if (m_live_usb && !m_cap_hdr) { // raw usb Mat img1, img2, img3; cimage.copyTo(img1); runHDR(img1, img2, img3); } else { // loaded video Mat img1, img2, img3; cimage.copyTo(img1); Mat hdr = runHDR(img1, img2, img3); Mat mtmp = Mat::zeros(hdr.rows, hdr.cols, CV_8UC3); // clamp hdr = min(hdr, 255); hdr = max(hdr, 0); hdr.convertTo(mtmp, CV_8UC3); imshow("tmo", mtmp); } // quit? char key; // key = (char) cvWaitKey(10); // for real-time key = (char) cvWaitKey(1000); // for testing if (key == 27 || key == 'q' || key == 'Q') { break; } m_init = 1; }
int main(int argc, char **argv) { Size boardSize, imageSize; float squareSize, aspectRatio; Mat cameraMatrix, distCoeffs; string outputFilename; string inputFilename = ""; int i, nframes; bool writeExtrinsics, writePoints; bool undistortImage = false; int flags = 0; VideoCapture capture; bool flipVertical; bool showUndistorted; bool videofile; int delay; clock_t prevTimestamp = 0; int mode = DETECTION; int cameraId = 0; vector<vector<Point2f> > imagePoints; vector<string> imageList; Pattern pattern = CHESSBOARD; cv::CommandLineParser parser(argc, argv, "{help ||}{w||}{h||}{pt|chessboard|}{n|10|}{d|1000|}{s|1|}{o|out_camera_data.yml|}" "{op||}{oe||}{zt||}{a|1|}{p||}{v||}{V||}{su||}" "{@input_data|0|}"); if (parser.has("help")) { help(); return 0; } boardSize.width = parser.get<int>("w"); boardSize.height = parser.get<int>("h"); if (parser.has("pt")) { string val = parser.get<string>("pt"); if (val == "circles") pattern = CIRCLES_GRID; else if (val == "acircles") pattern = ASYMMETRIC_CIRCLES_GRID; else if (val == "chessboard") pattern = CHESSBOARD; else return fprintf(stderr, "Invalid pattern type: must be chessboard or circles\n"), -1; } squareSize = parser.get<float>("s"); nframes = parser.get<int>("n"); aspectRatio = parser.get<float>("a"); delay = parser.get<int>("d"); writePoints = parser.has("op"); writeExtrinsics = parser.has("oe"); if (parser.has("a")) flags |= CALIB_FIX_ASPECT_RATIO; if (parser.has("zt")) flags |= CALIB_ZERO_TANGENT_DIST; if (parser.has("p")) flags |= CALIB_FIX_PRINCIPAL_POINT; flipVertical = parser.has("v"); videofile = parser.has("V"); if (parser.has("o")) outputFilename = parser.get<string>("o"); showUndistorted = parser.has("su"); if (isdigit(parser.get<string>("@input_data")[0])) cameraId = parser.get<int>("@input_data"); else inputFilename = parser.get<string>("@input_data"); if (!parser.check()) { help(); parser.printErrors(); return -1; } if (squareSize <= 0) return fprintf(stderr, "Invalid board square width\n"), -1; if (nframes <= 3) return printf("Invalid number of images\n"), -1; if (aspectRatio <= 0) return printf("Invalid aspect ratio\n"), -1; if (delay <= 0) return printf("Invalid delay\n"), -1; if (boardSize.width <= 0) return fprintf(stderr, "Invalid board width\n"), -1; if (boardSize.height <= 0) return fprintf(stderr, "Invalid board height\n"), -1; if (!inputFilename.empty()) { if (!videofile && readStringList(inputFilename, imageList)) mode = CAPTURING; else capture.open(inputFilename); } else capture.open(cameraId); if (!capture.isOpened() && imageList.empty()) return fprintf(stderr, "Could not initialize video (%d) capture\n", cameraId), -2; if (!imageList.empty()) nframes = (int)imageList.size(); if (capture.isOpened()) printf("%s", liveCaptureHelp); capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280); capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720); namedWindow("Image View", 1); for (i = 0;; i++) { Mat view, viewGray; bool blink = false; if (capture.isOpened()) { Mat view0; capture >> view0; view0.copyTo(view); } else if (i < (int)imageList.size())
void leviziSlideri(int,void*) { cap.set(CV_CAP_PROP_POS_FRAMES, sliderPoz*100); }
int main(int argc, char **argv) { //Create a CMT object Config config("FAST", "BRISK","RANSAC",0.5); CMT cmt(config); //Initialization bounding box Rect rect; //Parse args int challenge_flag = 0; int loop_flag = 0; int verbose_flag = 0; int bbox_flag = 0; int skip_frames = 0; int skip_msecs = 0; int output_flag = 0; string input_path; string output_path; const int detector_cmd = 1000; const int descriptor_cmd = 1001; const int bbox_cmd = 1002; const int no_scale_cmd = 1003; const int with_rotation_cmd = 1004; const int skip_cmd = 1005; const int skip_msecs_cmd = 1006; const int output_file_cmd = 1007; struct option longopts[] = { //No-argument options {"challenge", no_argument, &challenge_flag, 1}, {"loop", no_argument, &loop_flag, 1}, {"verbose", no_argument, &verbose_flag, 1}, {"no-scale", no_argument, 0, no_scale_cmd}, {"with-rotation", no_argument, 0, with_rotation_cmd}, //Argument options {"bbox", required_argument, 0, bbox_cmd}, {"detector", required_argument, 0, detector_cmd}, {"descriptor", required_argument, 0, descriptor_cmd}, {"output-file", required_argument, 0, output_file_cmd}, {"skip", required_argument, 0, skip_cmd}, {"skip-msecs", required_argument, 0, skip_msecs_cmd}, {0, 0, 0, 0} }; int index = 0; int c; while((c = getopt_long(argc, argv, "v", longopts, &index)) != -1) { switch (c) { case 'v': verbose_flag = true; break; case bbox_cmd: { //TODO: The following also accepts strings of the form %f,%f,%f,%fxyz... string bbox_format = "%f,%f,%f,%f"; float x,y,w,h; int ret = sscanf(optarg, bbox_format.c_str(), &x, &y, &w, &h); if (ret != 4) { cerr << "bounding box must be given in format " << bbox_format << endl; return 1; } bbox_flag = 1; rect = Rect(x,y,w,h); } break; case detector_cmd: cmt.str_detector = optarg; break; case descriptor_cmd: cmt.str_descriptor = optarg; break; case output_file_cmd: output_path = optarg; output_flag = 1; break; case skip_cmd: { int ret = sscanf(optarg, "%d", &skip_frames); if (ret != 1) { skip_frames = 0; } } break; case skip_msecs_cmd: { int ret = sscanf(optarg, "%d", &skip_msecs); if (ret != 1) { skip_msecs = 0; } } break; case no_scale_cmd: cmt.consensus.estimate_scale = false; break; case with_rotation_cmd: cmt.consensus.estimate_rotation = true; break; case '?': return 1; } } // Can only skip frames or milliseconds, not both. if (skip_frames > 0 && skip_msecs > 0) { cerr << "You can only skip frames, or milliseconds, not both." << endl; return 1; } //One argument remains if (optind == argc - 1) { input_path = argv[optind]; } else if (optind < argc - 1) { cerr << "Only one argument is allowed." << endl; return 1; } //Set up logging FILELog::ReportingLevel() = verbose_flag ? logDEBUG : logINFO; Output2FILE::Stream() = stdout; //Log to stdout //Challenge mode if (challenge_flag) { //Read list of images ifstream im_file("images.txt"); vector<string> files; string line; while(getline(im_file, line )) { files.push_back(line); } //Read region ifstream region_file("region.txt"); vector<float> coords = getNextLineAndSplitIntoFloats(region_file); if (coords.size() == 4) { rect = Rect(coords[0], coords[1], coords[2], coords[3]); } else if (coords.size() == 8) { //Split into x and y coordinates vector<float> xcoords; vector<float> ycoords; for (size_t i = 0; i < coords.size(); i++) { if (i % 2 == 0) xcoords.push_back(coords[i]); else ycoords.push_back(coords[i]); } float xmin = *min_element(xcoords.begin(), xcoords.end()); float xmax = *max_element(xcoords.begin(), xcoords.end()); float ymin = *min_element(ycoords.begin(), ycoords.end()); float ymax = *max_element(ycoords.begin(), ycoords.end()); rect = Rect(xmin, ymin, xmax-xmin, ymax-ymin); cout << "Found bounding box" << xmin << " " << ymin << " " << xmax-xmin << " " << ymax-ymin << endl; } else { cerr << "Invalid Bounding box format" << endl; return 0; } //Read first image Mat im0 = imread(files[0]); Mat im0_gray; cvtColor(im0, im0_gray, CV_BGR2GRAY); //Initialize cmt cmt.initialize(im0_gray, rect); //Write init region to output file ofstream output_file("output.txt"); output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl; //Process images, write output to file for (size_t i = 1; i < files.size(); i++) { FILE_LOG(logINFO) << "Processing frame " << i << "/" << files.size(); Mat im = imread(files[i]); Mat im_gray; cvtColor(im, im_gray, CV_BGR2GRAY); cmt.processFrame(im_gray); if (verbose_flag) { display(im, cmt); } rect = cmt.bb_rot.boundingRect(); output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl; } output_file.close(); return 0; } //Normal mode //Create window namedWindow(WIN_NAME); VideoCapture cap; bool show_preview = true; //If no input was specified if (input_path.length() == 0) { cap.open(0); //Open default camera device } //Else open the video specified by input_path else { cap.open(input_path); if (skip_frames > 0) { cap.set(CV_CAP_PROP_POS_FRAMES, skip_frames); } if (skip_msecs > 0) { cap.set(CV_CAP_PROP_POS_MSEC, skip_msecs); // Now which frame are we on? skip_frames = (int) cap.get(CV_CAP_PROP_POS_FRAMES); } show_preview = false; } //If it doesn't work, stop if(!cap.isOpened()) { cerr << "Unable to open video capture." << endl; return -1; } //Show preview until key is pressed while (show_preview) { Mat preview; cap >> preview; screenLog(preview, "Press a key to start selecting an object."); imshow(WIN_NAME, preview); char k = waitKey(10); if (k != -1) { show_preview = false; } } //Get initial image Mat im0; cap >> im0; //If no bounding was specified, get it from user if (!bbox_flag) { rect = getRect(im0, WIN_NAME); } FILE_LOG(logINFO) << "Using " << rect.x << "," << rect.y << "," << rect.width << "," << rect.height << " as initial bounding box."; //Convert im0 to grayscale Mat im0_gray; if (im0.channels() > 1) { cvtColor(im0, im0_gray, CV_BGR2GRAY); } else { im0_gray = im0; } //Initialize CMT cmt.initialize(im0_gray, rect); int frame = skip_frames; //Open output file. ofstream output_file; if (output_flag) { int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC); output_file.open(output_path.c_str()); output_file << OUT_FILE_COL_HEADERS << endl; output_file << frame << "," << msecs << ","; output_file << cmt.points_active.size() << ","; output_file << write_rotated_rect(cmt.bb_rot) << endl; } //Main loop while (true) { frame++; Mat im; //If loop flag is set, reuse initial image (for debugging purposes) if (loop_flag) im0.copyTo(im); else cap >> im; //Else use next image in stream if (im.empty()) break; //Exit at end of video stream Mat im_gray; if (im.channels() > 1) { cvtColor(im, im_gray, CV_BGR2GRAY); } else { im_gray = im; } //Let CMT process the frame cmt.processFrame(im_gray); //Output. if (output_flag) { int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC); output_file << frame << "," << msecs << ","; output_file << cmt.points_active.size() << ","; output_file << write_rotated_rect(cmt.bb_rot) << endl; } else { //TODO: Provide meaningful output FILE_LOG(logINFO) << "#" << frame << " active: " << cmt.points_active.size(); } //Display image and then quit if requested. char key = display(im, cmt); if(key == 'q') break; } //Close output file. if (output_flag) output_file.close(); return 0; }
int main( int argc, char* argv[]) { cout << "Projekti C++, Image Processing Endi Zhupani IE206" <<endl; cout << "Shtypni 1 per te filluar ekzekutimin ose -1 per te dale nga programi. " ; int ekzekutimi; cin >> ekzekutimi; int f_v; // hap foto apo video int llojiIVideos;// nga sistemi apo webcam int zgjedhjaPerdoruesit = -1; Menu shfaqMenu; //objekt qe shfaq menute e ndryshme ToneNgjyrash shtoTon; //objekt qe shton tone ngjyrash Efekte shtoEfekt; Editim edit; while (ekzekutimi != -1) // user do te vazhdoje ekzekutimin { //shfaqet menuja kryesore cout << "MENU" <<endl; cout << "Butoni"<<setw(5) << " "<< "Veprimi" << endl; cout << setw(6) << "1" << setw(5) << " " << "Load Foto" <<endl; cout << setw(6) << "2" << setw(5) << " " <<"Load Video" <<endl; namedWindow("Zgjidh"); //per te perdorur waitKey() f_v = waitKey(0); // pret shtypjen e nje butoni destroyWindow("Zgjidh"); while (true){ // mbaron kur mbaron switch switch (f_v){ case 50: // shtyp '2' -> zgjedh video llojiIVideos= shfaqMenu.zgjedhjaVideo();//zgjedh nga do ta hapi videon if (llojiIVideos==49){//shtyp '1' -> zgjedh webcam VideoCapture cap(0); if (!cap.isOpened()){ //behet kontrolli a eshte hapur cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl; return -1; } else cout << "Ngarkimi u krye me sukses" <<endl; shfaqMenu.menuVideo(); //shfaqet menuja qe jep veprimet qe kryhen + butonat namedWindow("VideoDemo", CV_WINDOW_NORMAL);//dritarja grafike int vleraSliderBrightenes = 25; //vlera fillestare ne slider int vleraSliderContrast = 25; //krijohen trackbaret me vlere nga 0 - 50 createTrackbar("Brightenes", "VideoDemo", &vleraSliderBrightenes, 50); createTrackbar("Contrast", "VideoDemo", &vleraSliderContrast, 50); while (1)//fillohet cikli qe do shfaqe 1 pas nje te gjitha frameqe lexohen nga video { Mat frame; Mat *framePtr = &frame; bool uLexua = cap.read(frame); // lexohet frame e rradhes ne video if (!uLexua) // lexim i suksesshem apo jo { cout <<"Nuk u lexua dot frame. Programi po mbyllet!" << endl; return -1; } resize(*framePtr,*framePtr,Size(512,412), CV_INTER_AREA);//INTER_AREA per te patur kualitet te mire if (zgjedhjaPerdoruesit == 49) // shtyp '1' { shtoTon.sepiaPerVideo(frame); } else if (zgjedhjaPerdoruesit == 50) //shtyp '2' { cvtColor(frame, frame, CV_BGR2GRAY); //converton nga BGR (Blue Green Red) ne Bardhe e zi } else if (zgjedhjaPerdoruesit == 51) // kerkon invertim ngjyrash { bitwise_not(frame, frame); } //do te jape ndryshimin e brightenes merr vlera nga -250 - 250 int ndryshimiBrightenes = vleraSliderBrightenes*10-250; //do jape ndryshimin e contrast. merr vlera 0 - 2 double ndryshimiContrast = vleraSliderContrast*10/250.0; // behet convertimi i imazhit sipas brightenesit dhe contrastit qe kerkohet //nqs sliderat ne trackbar skane ndryshuar imazhi mbetet sic eshte. frame.convertTo(frame,-1,ndryshimiContrast,ndryshimiBrightenes); int tastIShtypur = waitKey(10); //i jepet perdoruesit kohe per te shtypur buton imshow("VideoDemo", frame); if (tastIShtypur == 112) //shtyp 'p' waitKey(0); // ngecet ekzekutimi deri sa te shtypi buton tjeter else if (tastIShtypur == 49) zgjedhjaPerdoruesit = tastIShtypur; // shtyp 1 else if (tastIShtypur == 50) zgjedhjaPerdoruesit = tastIShtypur; // 2 else if (tastIShtypur == 51) zgjedhjaPerdoruesit = tastIShtypur;// 3 // shtypet b. zgjedhja perdoruesit merr vleren 98 dhe shfaqen frame origjinale // keshtu krijohet veprimi back. else if (tastIShtypur == 98) { zgjedhjaPerdoruesit = tastIShtypur; setTrackbarPos("Brightenes", "VideoDemo", 25); setTrackbarPos("Contrast", "VideoDemo", 25); vleraSliderBrightenes = 25; vleraSliderContrast = 25; } else if (tastIShtypur == 27) //shtyp ESC { cout << "Dritarja po mbyllet"<< endl; break; } } destroyAllWindows(); } if (llojiIVideos == 50){ // zgjedh te hape video nga sistemi /*Funksionon njelloj si video nga webcam me perjashtim te shtimit te nje treckbari qe mban pozicionin e videos me te cilin video mund te levizet para ose mbrapa. */ destroyAllWindows(); cout << "Ju lutem jepni filepath te PLOTE te file qe doni te hapni." <<endl; string filepath; cin.ignore(); //behet flush console sepse mund te kete karaktere '/n' te cilat perfundojne funksionin getline pa e marre inputin. getline(cin, filepath); cap.open(filepath); if (!cap.isOpened()){ cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl; return -1; } else cout << "Ngarkimi u krye me sukses" <<endl; shfaqMenu.menuVideo(); namedWindow("VideoDemo", CV_WINDOW_NORMAL); int vleraSliderBrightenes = 25; //vlera fillestare ne slider int vleraSliderContrast = 25; int NumriFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); //merret numri i frames int framesTekaluara = 0;//sa frames jane shfaqur //krijohet trackbari pozicioni i cili updateon poziv=cionin e videos sa her leviz slideri createTrackbar("Pozicioni", "VideoDemo", &sliderPoz, NumriFrames/100,leviziSlideri); createTrackbar("Brightenes", "VideoDemo", &vleraSliderBrightenes, 50); createTrackbar("Contrast", "VideoDemo", &vleraSliderContrast, 50); while (1) { Mat frame; Mat *framePtr = &frame; if (framesTekaluara>=NumriFrames) // arrihet ne fund te videos { cout << "Video mbaroi. Shtypni 'p' per ta filluar edhe nje here ose cdo buton tjeter per te vazhduar" << endl; int vazhdo = waitKey(0); if (vazhdo == 112) cap.set(CV_CAP_PROP_POS_FRAMES,1); else //nqs nuk zgjedh te vazhdoje dil nga cikli i shfaqjes se frameve break; } bool uLexua = cap.read(frame); if (!uLexua) { cout <<"Nuk u lexua dot frame. Programi po mbyllet!" << endl; return -1; } framesTekaluara = cap.get(CV_CAP_PROP_POS_FRAMES); // updatohet numri i frames qe jane shfaqur //levizet slideri me cdo 100 frames qe kalojne setTrackbarPos("Pozicioni", "VideoDemo", cap.get(CV_CAP_PROP_POS_FRAMES)/100); resize(*framePtr,*framePtr,Size(512,412), CV_INTER_AREA); int tastIShtypur =-1; if (zgjedhjaPerdoruesit == 49) { shtoTon.sepiaPerVideo(frame); } if (zgjedhjaPerdoruesit == 50) { cvtColor(frame, frame, CV_BGR2GRAY); } if (zgjedhjaPerdoruesit == 51) { bitwise_not(frame, frame); } double ndryshimiBrightenes = vleraSliderBrightenes*10-250; double ndryshimiContrast = vleraSliderContrast*10/250.0; frame.convertTo(frame,-1,ndryshimiContrast,ndryshimiBrightenes); imshow("VideoDemo", frame); tastIShtypur = waitKey(10); if (tastIShtypur == 112) waitKey(0); else if (tastIShtypur == 49) zgjedhjaPerdoruesit = tastIShtypur; else if (tastIShtypur == 50) zgjedhjaPerdoruesit = tastIShtypur; else if (tastIShtypur == 51) zgjedhjaPerdoruesit = tastIShtypur; else if (tastIShtypur == 98) { zgjedhjaPerdoruesit = tastIShtypur; setTrackbarPos("Brightenes", "VideoDemo", 25); setTrackbarPos("Contrast", "VideoDemo", 25); vleraSliderBrightenes = 25; vleraSliderContrast = 25; } else if (tastIShtypur == 27) { cout << "Dritarja po mbyllet"<< endl; break; } } destroyAllWindows(); } break; //end case 2 case 49: //zgjedh te hape foto { shfaqMenu.menuFoto(); cout << "Ju lutem jepni filepath te PLOTE te file qe doni te hapni." <<endl; string filepath; cin.ignore(); //behet flush console sepse mund te kete karaktere '/n' te cilat perfundojne funksionin getline pa e marre inputin. getline(cin, filepath); cout << filepath <<endl; //lexohet imazhi ne strukturen Mat te openCV. Mat - matrice qe mund te marre deri ne 4 dimensione. Varet nga numri i kanaleve te pixelave dhe nga faktore te tjere Mat img = imread(filepath);//vendoset filepath plote Mat *imgptr = &img; if (img.empty()) { cout << "Ngarkimi nuk u krye me sukses. Programi po mbyllet" << endl; cout << "Hint: Kontrollo filepath"<< endl; return -1; } else cout << "Ngarkimi u krye me sukses" <<endl; img.convertTo(img, CV_8U); // pixelat konvertohen ne pixela unsigned char me 3 kanale dhe 3 bite resize(*imgptr,*imgptr,Size(512,512), CV_INTER_AREA); string dritarja = "Demo Foto"; // emri i dritares ku shfaqet fotoja namedWindow(dritarja,CV_WINDOW_NORMAL); imshow(dritarja,img); //kopjohet fotoja origjinale ne data members fotoOrigjinale te seciles prej klasave qe do te ndryshojne foton shtoTon.fotoOrigjinale = img.clone(); shtoEfekt.fotoOrigjinale = img.clone(); edit.fotoOrigjinale = img.clone(); int efektiParaardhes = -1; zgjedhjaPerdoruesit = -1; while (true){ // futet ne nje cikel qe e lejon user te ndryshoje foton duke shtypur butona int tastIshtypur= waitKey(20); if (zgjedhjaPerdoruesit == 49) //kerkon sepia { //nqs imazhi ka qene negativ ose bardhe e zi kthehet ne origjinal njehere //sepia nuk mund te punoje me formatet e mesiperme if (efektiParaardhes == 50){ shtoEfekt.goBack(img); } shtoTon.sepia(*imgptr,dritarja); tastIshtypur = waitKey(0);//ndalet ekzekutimi derisa te shtypet buton tjeter zgjedhjaPerdoruesit = -1; efektiParaardhes = 49; } else if (zgjedhjaPerdoruesit == 50) // zgjedh bardhe e zi { if (efektiParaardhes == 51) shtoEfekt.goBack(img); if(efektiParaardhes == 50) {} else { cvtColor(img, img, CV_BGR2GRAY); imshow(dritarja,img); tastIshtypur = waitKey(0); zgjedhjaPerdoruesit = -1; efektiParaardhes = 50;} } else if (zgjedhjaPerdoruesit == 51) // zgjedh negative (invertimin e ngjyrave) { if (efektiParaardhes == 50) shtoEfekt.goBack(img); bitwise_not(img, img); imshow(dritarja,img); tastIshtypur = waitKey(0); zgjedhjaPerdoruesit = -1; efektiParaardhes = 51; } else if (zgjedhjaPerdoruesit == 52) //zgjedh blur / smooth { shtoEfekt.Blur_Or_Smooth(*imgptr, dritarja); zgjedhjaPerdoruesit = -1; destroyWindow(dritarja); } else if (zgjedhjaPerdoruesit == 98) // zgjedh te kthehet ne foton origjinale { shtoEfekt.goBack(img); imshow(dritarja,img); zgjedhjaPerdoruesit =-1; } else if (zgjedhjaPerdoruesit == 99) // zgjedh te ndryshoje kontrastin dhe brightenesin { if (efektiParaardhes == 51) shtoEfekt.goBack(img); shtoEfekt.Contrast_Brightenes(*imgptr, dritarja); zgjedhjaPerdoruesit =-1; destroyWindow(dritarja); } else if (zgjedhjaPerdoruesit == 114) // zgjedh te beje rotate imazhin { edit.rotullo(*imgptr, dritarja); zgjedhjaPerdoruesit = -1; destroyWindow(dritarja); } else if (zgjedhjaPerdoruesit == 115) // zgjedh sharpen { if (efektiParaardhes == 51) //shihet nqs ka qene i invertuar shtoEfekt.goBack(img); shtoEfekt.Sharpen(*imgptr); imshow(dritarja, img); tastIshtypur = waitKey(0); zgjedhjaPerdoruesit = -1; } else if (zgjedhjaPerdoruesit == 27) // zgjedh te mbylle foton { cout << "Deshironi te ruani foton? ('y' = po; 'n' = jo)" <<endl; int ruaj = waitKey(0); destroyWindow(dritarja); if (ruaj == 121){ //zgjedh y cout << "Ju lutem jepni filepath ku doni te ruani imazhin." <<endl; string filepathRuajtje; cin.ignore(); getline(cin, filepathRuajtje); string s1 = "/"; filepathRuajtje = s1+filepathRuajtje; cout << filepathRuajtje<<endl; //specifikohet se si do te ruhet imazhi vector<int> parametrat_e_ruajtjes; parametrat_e_ruajtjes.push_back(CV_IMWRITE_JPEG_QUALITY); parametrat_e_ruajtjes.push_back(97); // 0 - 100 sa me i larte aq me mire // thirret funksioni imwrite te cilit i jepen filepath ku do te ruhet, // imazhi qe do te ruhet // menyra se si do te ruhet bool uRuajt = imwrite (filepathRuajtje,img,parametrat_e_ruajtjes); if (uRuajt) // nese imwrite kthen true cout << "Imazhi u ruajt me sukses." <<endl; else cout << "Imazhi nuk u ruajt." <<endl; } cout << "Dritarja po mbyllet" <<endl; zgjedhjaPerdoruesit = -1; break; //mbasi ruhet ose jo dilet nga cikli } if (tastIshtypur == 49) zgjedhjaPerdoruesit = tastIshtypur;// 1 else if (tastIshtypur == 27) zgjedhjaPerdoruesit = tastIshtypur;//ESC else if (tastIshtypur == 50) zgjedhjaPerdoruesit = tastIshtypur;// 2 else if (tastIshtypur == 51) zgjedhjaPerdoruesit = tastIshtypur;// 3 else if (tastIshtypur == 52) zgjedhjaPerdoruesit = tastIshtypur;// 4 else if (tastIshtypur == 98) zgjedhjaPerdoruesit = tastIshtypur;// b else if (tastIshtypur == 99) zgjedhjaPerdoruesit = tastIshtypur;// c else if (tastIshtypur == 114) zgjedhjaPerdoruesit = tastIshtypur;// r else if (tastIshtypur == 115) zgjedhjaPerdoruesit = tastIshtypur;// s namedWindow(dritarja); // krijohet e njejta dritare sepse ne disa prej funksioneve ajo shkaterrohet imshow(dritarja,img); } //end while } //end case 1 break; default: // nga menuja kryesore nuk zgjedh 1 ose 2 ekzekutimi = -1; break; }//end switch cout << "Shtypni 'M' per tu kthyer ne menune kryesore, 'ESC' per te dale nga programi ose cdo buton tjeter per te vazhduar me veprimin aktual" << endl; namedWindow("Zgjidh"); int butShtypur = waitKey(0); if (butShtypur == 109){ //shtypet 'm' destroyWindow("Zgjidh"); break; // dilet nga while dhe vazhdohet ekzekutimi tek while i pare (shfaqja e menuse kryesore } if (butShtypur == 27) // shtypet ESC { ekzekutimi = -1; //qe te mos ekzekutohet as while i pare cout << "Programi po mbyllet!" <<endl; destroyWindow("Zgjidh"); break; // dilet nga while } }//end while 2 }//end while 1 return 0; }//end main
int main(void) { Parameters inParam; // DE_Airport2 -- US_Detroit -- DE_Lehre2 bool readStatus = readParamRoadScan("../../../../Ford/inVehicle/inVehicle/config/DE_Airport.txt", inParam); // Calculate H and H inverse for road scan and traffic sign detection ns_roadScan::calHAndInvertH(inParam, H, invertH); if( !readStatus ) { cout<<"read parameters error"<<endl; return -1; } int ChooseVideo = Airport; int videoIndex = 0; int locNum[2], holeNum[2]; for(int kk = 0; kk < 6; kk++) // Airport2 { VideoCapture capture; FILE* gpsFile; if(ChooseVideo == Airport2) { if (videoIndex == 0) { capture.open("F:/roadDB/Airport 2/cam_20150811111511.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811111511.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 1) { capture.open("F:/roadDB/Airport 2/cam_20150811112010.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811112010.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 2) { capture.open("F:/roadDB/Airport 2/cam_20150811112510.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811112510.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 3) { capture.open("F:/roadDB/Airport 2/cam_20150811113010.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811113010.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 4) { capture.open("F:/roadDB/Airport 2/cam_20150811113510.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811113510.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 5) { capture.open("F:/roadDB/Airport 2/cam_20150811114010.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811114010.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 6) { capture.open("F:/roadDB/Airport 2/cam_20150811114510.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811114510.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 7) { capture.open("F:/roadDB/Airport 2/cam_20150811115010.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811115010.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 8) { capture.open("F:/roadDB/Airport 2/cam_20150811115510.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811115510.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 9) { capture.open("F:/roadDB/Airport 2/cam_20150811120010.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811120010.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } else if (videoIndex == 10) { capture.open("F:/roadDB/Airport 2/cam_20150811120510.mp4"); gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811120510.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } } else if(ChooseVideo == Airport) { if (videoIndex == 0) { capture.open("F:/roadDB/Airport/cam_20150806120920.mp4"); gpsFile = fopen("F:/roadDB/Airport/gps/list_20150806120920.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } } else if(ChooseVideo == Ford) { if (videoIndex == 0) { capture.open("F:/roadDB/Ford/NewcoData/MKS360_20130722_003_Uncompressed.avi"); gpsFile = fopen("F:/roadDB/Ford/NewcoData/gps_003.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } } else if(ChooseVideo == VW2) { if (videoIndex == 0) { capture.open("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/cam_20150722110100.mp4"); gpsFile = fopen("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/list_20150722110100.txt","r"); capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1); } } int number_of_frames = capture.get(CV_CAP_PROP_POS_FRAMES); if ( !capture.isOpened() ) // if not success, exit program { cout<<"error" <<endl; return -1; } else { capture.set(CV_CAP_PROP_POS_AVI_RATIO, 0); double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video } Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH), (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT)); S.height *= inParam.imageScaleHeight; S.width *= inParam.imageScaleWidth; vector<dataEveryRow> roadPaintData; vector<dataEveryRow> roadPaintDataALL; vector<gpsInformationAndInterval> GPSAndInterval; //////////////////////////////////////////////////////////////////////////// Mat history = Mat::zeros(S.height *HH*SCALE,S.width, CV_8UC1); int rowIndex = 0; int IntervalTmp = 0; int Interval = 0; int GPStmp = 0; Point2d GPS_next; gpsInformationAndInterval gpsAndInterval; Mat image; int intrtmp = 0; int frames = 350; vector<Point2d> gps_points; while(!feof(gpsFile)) { fscanf(gpsFile,"%lf,%lf\n",&GPS_next.x,&GPS_next.y); gps_points.push_back(GPS_next); } for (int n = 0; n < 150; n++) { if ((n*frames + 1) > number_of_frames) { break; } capture.set(CV_CAP_PROP_POS_FRAMES, n*frames + 1); for(int index = 0; index < frames; index++)//number_of_frames { capture >> image; if (image.data && ((n*frames + index + 1) < gps_points.size())) { roadImageGen(image, history, &rowIndex, &gps_points[n*frames+index], &gps_points[n*frames+index+1], &gpsAndInterval, &intrtmp, inParam); if (gpsAndInterval.intervalOfInterception) { GPSAndInterval.push_back(gpsAndInterval); } if(((index == (frames - 1)) || ((n*frames + index + 1) == gps_points.size())) && ( !GPSAndInterval.empty())) { rowIndex -= GPSAndInterval[GPSAndInterval.size()-1].intervalOfInterception; } } else { break; } } Mat historyROI = history(Rect(0, rowIndex, history.cols, history.rows - rowIndex)); imwrite("historyroi.png", historyROI); rowIndex = 0; intrtmp = 0; roadImageProc2(historyROI, GPSAndInterval, roadPaintData, inParam); history = Mat::zeros(S.height*HH*SCALE, S.width, CV_8UC1); int H = historyROI.rows; for(int i = 0; i < roadPaintData.size(); i++) { roadPaintDataALL.push_back(roadPaintData[i]); } roadPaintData.clear(); GPSAndInterval.clear(); } char texname[32]; if (ChooseVideo == VW) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } else if (ChooseVideo == Ford) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } else if ((ChooseVideo == Honda) || (ChooseVideo == Honda2)) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } else if (ChooseVideo == Other) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } else if (ChooseVideo == Airport) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } else if (ChooseVideo == Airport2) { sprintf(texname, "dataStruct_%d.txt", videoIndex); } ofstream dataStruct(texname); dataStruct<<setprecision(20)<<inParam.GPSref.x<<" "<<inParam.GPSref.y<<endl; for(int i = 0; i<roadPaintDataALL.size(); i++) { dataStruct<<setprecision(20)<<roadPaintDataALL[i].Left_Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Left_Middle_RelGPS.y<<" "<<roadPaintDataALL[i].isPaint_Left<<" " <<roadPaintDataALL[i].Left_Paint_Edge[0].x<<" "<<roadPaintDataALL[i].Left_Paint_Edge[0].y<<" " <<roadPaintDataALL[i].Left_Paint_Edge[1].x<<" "<<roadPaintDataALL[i].Left_Paint_Edge[1].y<<" " <<roadPaintDataALL[i].Left_Area_Pixel_Mean<<" " <<roadPaintDataALL[i].Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Middle_RelGPS.y<<" "<<roadPaintDataALL[i].Middle_Area_Pixel_Mean<<" " <<roadPaintDataALL[i].Right_Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Right_Middle_RelGPS.y<<" "<<roadPaintDataALL[i].isPaint_Right<<" " <<roadPaintDataALL[i].Right_Paint_Edge[0].x<<" "<<roadPaintDataALL[i].Right_Paint_Edge[0].y<<" " <<roadPaintDataALL[i].Right_Paint_Edge[1].x<<" "<<roadPaintDataALL[i].Right_Paint_Edge[1].y<<" " <<roadPaintDataALL[i].Right_Area_Pixel_Mean<<endl; } ////output real middle real GPS //vector<Point2d> actualGPS; //for(int index = 0; index < roadPaintDataALL.size(); index++) //{ // Point2d middleRealGPS = Point2d(0.0, 0.0); // //calculate real GPS // if(roadPaintDataALL[index].isPaint_Right == 1) // { // calActualGPSFromRef(roadPaintDataALL[index].Right_Middle_RelGPS, inParam.GPSref, middleRealGPS); // } // else // { // calActualGPSFromRef(roadPaintDataALL[index].Middle_RelGPS, inParam.GPSref, middleRealGPS); // } // actualGPS.push_back(middleRealGPS); //} //ofstream realGPS("realGPS.txt"); //for(int index = 0; index < actualGPS.size(); index++) //{ // realGPS<<setprecision(20)<<actualGPS[index].y<<","<<actualGPS[index].x<<","<<0<<endl; //} //realGPS.close(); ////end output cout<<"output finish"<<endl; dataStruct.close(); roadPaintDataALL.clear(); videoIndex++; } }
int main(int argc, char* argv[]) { //some boolean variables for different functionality within this //program bool trackObjects = true; bool useMorphOps = true; calibrationMode = true; //Matrix to store each frame of the webcam feed Mat cameraFeed; //matrix storage for HSV image Mat HSV; //matrix storage for binary threshold image Mat threshold; //x and y values for the location of the object int x = 0, y = 0; //video capture object to acquire webcam feed VideoCapture capture; //open capture object at location zero (default location for webcam) capture.open(0); //set height and width of capture frame capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH); capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT); //must create a window before setting mouse callback cv::namedWindow(windowName); capture.read(cameraFeed); HSVMouseSelector hsvMouseSelector(&hsv, &cameraFeed); //set mouse callback function to be active on "Webcam Feed" window //we pass the handle to our "frame" matrix so that we can draw a rectangle to it //as the user clicks and drags the mouse setMouseCallback(windowName, ImageUtils::MouseCallback, &hsvMouseSelector); //initiate mouse move and drag to false //start an infinite loop where webcam feed is copied to cameraFeed matrix //all of our operations will be performed within this loop while (1){ //store image to matrix capture.read(cameraFeed); //convert frame from BGR to HSV colorspace cvtColor(cameraFeed, HSV, COLOR_BGR2HSV); //set HSV values from user selected region hsvMouseSelector.UpdateFrame(&HSV); //filter HSV image between values and store filtered image to //threshold matrix inRange(HSV, hsv.ToMin(), hsv.ToMax(), threshold); //perform morphological operations on thresholded image to eliminate noise //and emphasize the filtered object(s) if (useMorphOps) morphOps(threshold); //pass in thresholded frame to our object tracking function //this function will return the x and y coordinates of the //filtered object if (trackObjects) trackFilteredObject(x, y, threshold, cameraFeed); //show frames if (calibrationMode == true){ //create slider bars for HSV filtering createTrackbars(); imshow(windowName1, HSV); imshow(windowName2, threshold); } else{ destroyWindow(windowName1); destroyWindow(windowName2); destroyWindow(trackbarWindowName); } imshow(windowName, cameraFeed); //delay 30ms so that screen can refresh. //image will not appear without this waitKey() command //also use waitKey command to capture keyboard input switch (waitKey(30)) { case 99: calibrationMode = !calibrationMode;//if user presses 'c', toggle calibration mode break; case 27: return 0; } } return 0; }
void TrackerTRETest::distanceTest() { Mat frame; bool initialized = false; int fc = ( startFrame - gtStartFrame ); Rect currentBBi = bbs.at( fc ); Rect2d currentBB(currentBBi); float sumDistance = 0; string folder = cvtest::TS::ptr()->get_data_path() + TRACKING_DIR + "/" + video + "/" + FOLDER_IMG; int frameTotal = 0; int frameTotalSucc = 0; VideoCapture c; c.open( cvtest::TS::ptr()->get_data_path() + "/" + TRACKING_DIR + "/" + video + "/" + FOLDER_IMG + "/" + video + ".webm" ); c.set( CAP_PROP_POS_FRAMES, startFrame ); for ( int frameCounter = startFrame; frameCounter < endFrame; frameCounter++ ) { c >> frame; if( frame.empty() ) { break; } if( !initialized ) { if( !tracker->init( frame, currentBB ) ) { FAIL()<< "Could not initialize tracker" << endl; return; } initialized = true; } else if( initialized ) { if( frameCounter >= (int) bbs.size() ) break; tracker->update( frame, currentBB ); } float curDistance = calcDistance( currentBB, bbs.at( fc ) ); if( curDistance <= threshold ) frameTotalSucc++; sumDistance += curDistance; fc++; frameTotal++; } float distance = sumDistance / ( fc - ( startFrame - gtStartFrame ) ); ratioSucc = (float) frameTotalSucc / (float) frameTotal; if( distance > threshold ) { FAIL()<< "Incorrect distance: curr = " << distance << ", min = " << threshold << endl; return; } }
int main(int argc, char* argv[]) { //if we would like to calibrate our filter values, set to true. bool calibrationMode = true; //Matrix to store each frame of the webcam feed Mat cameraFeed; Mat threshold; Mat HSV; if(calibrationMode){ //create slider bars for HSV filtering createTrackbars(); } //video capture object to acquire webcam feed VideoCapture capture; //open capture object at location zero (default location for webcam) capture.open(0); //set height and width of capture frame capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH); capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT); //start an infinite loop where webcam feed is copied to cameraFeed matrix //all of our operations will be performed within this loop while(1){ //store image to matrix capture.read(cameraFeed); //convert frame from BGR to HSV colorspace cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); if(calibrationMode==true){ //if in calibration mode, we track objects based on the HSV slider values. cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold); morphOps(threshold); imshow(windowName2,threshold); trackFilteredObject(threshold,HSV,cameraFeed); }else{ //create some temp fruit objects so that //we can use their member functions/information Fruit apple("apple"), banana("banana"), cherry("cherry"); //first find apples cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); inRange(HSV,apple.getHSVmin(),apple.getHSVmax(),threshold); morphOps(threshold); trackFilteredObject(apple,threshold,HSV,cameraFeed); //then bananas cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); inRange(HSV,banana.getHSVmin(),banana.getHSVmax(),threshold); morphOps(threshold); trackFilteredObject(banana,threshold,HSV,cameraFeed); //then cherries cvtColor(cameraFeed,HSV,COLOR_BGR2HSV); inRange(HSV,cherry.getHSVmin(),cherry.getHSVmax(),threshold); morphOps(threshold); trackFilteredObject(cherry,threshold,HSV,cameraFeed); } //show frames //imshow(windowName2,threshold); imshow(windowName,cameraFeed); //imshow(windowName1,HSV); //delay 30ms so that screen can refresh. //image will not appear without this waitKey() command waitKey(30); } return 0; }
void main() { int num1 = 0; int num2 = 0; int result; char key; char command = '@'; bool SecondNumPressed = false; VideoCapture cap; cap.open(0); if (!cap.isOpened()) { system("CLS"); printf("\n\n\t\t\tcamera disconnected"); system("PAUSE"); exit; } cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); system("CLS"); printf("\n\n\t\t\t0"); while (true) { key = press(cap); if ((SecondNumPressed == true) && ((key != '=') && (((key - '0')<0) || ((key - '0')>9)))) { //cout << SecondNumPressed << " " << key << " " << key - '0' << "\n"; continue; } if (key == '=') { system("CLS"); if ((num2 == 0) && (command == '/')) printf("\n\n\t\t\tcan not devide with zero"); else if (SecondNumPressed == true) printf("\n\n\t\t\t%d", result); else printf("\n\n\t\t\t%d", num1); num1 = 0; num2 = 0; command = '@'; SecondNumPressed = false; } else if (key == 'c') { system("CLS"); num1 = 0; num2 = 0; command = '@'; printf("\n\n\t\t\t0"); SecondNumPressed = false; } else if ((key == '+') || (key == '*') || (key == '-') || (key == '/')) { if (command != '@') { system("CLS"); printf("\n\n\t\t\t%d", num1); } printf(" %c ", key); command = key; } else if (command == '@') { system("CLS"); if (key == '<') num1 = num1 - num1 % 10; else num1 = num1 * 10 + (key - '0'); printf("\n\n\t\t\t%d", num1); } else { system("CLS"); if (key == '<') num2 = num2 - num2 % 10; else { num2 = num2 * 10 + (key - '0'); SecondNumPressed = true; } printf("\n\n\t\t\t%d %c %d", num1, command, num2); if (command == '+') result = num1 + num2; else if (command == '-') result = num1 - num2; else if (command == '*') result = num1 * num2; else if (command == '/') if (num2 != 0) result = num1 / num2; } } }
int main(int argc, char** argv) { bool use_gui = false; double learningRate = -1; // time measurement timespec time_init; timespec time_now; timespec time_past; char fps[10] = ""; clock_gettime(CLOCK_MONOTONIC, &time_init); clock_gettime(CLOCK_MONOTONIC, &time_now); // video source VideoCapture cap; if (argc > 1) { for (int i = 1; i < argc; i++) { // -d <deviceid> if (string(argv[i]) == "-d") { int device_id = -1; sscanf(argv[i+1], "%i", &device_id); cap.open(device_id); i++; if (cap.isOpened() != true) { cerr << "Error: Device " << device_id << " could not be opened.\n exiting..." << endl; return -1; } } // -f <filename> else if (string(argv[i]) == "-f") { string filename = string(argv[i+1]); cap.open(filename); i++; if (cap.isOpened() != true) { cerr << "Error: \"" << filename << "\" could not be opened.\n exiting..." << endl; return -1; } } // -g (gui) else if (string(argv[i]) == "-g") { use_gui = true; } // noise // learning rate else if (string(argv[i]) == "-l") { sscanf(argv[i+1], "%lf", &learningRate); i++; } // mode else { cerr << "Error: unknown parameter \"" << string(argv[i]) << "\"\n"; usage(); return -1; } } } if (cap.isOpened() != true) { cap.open(0); } if (cap.isOpened()!= true) { cerr << "Error: Cannot read device 0.\n exiting..." << endl; return -1; } Mat frame; // the current frame Mat foreground, background; BackgroundSubtractorMOG2 bg(300, 16, false); std::vector<std::vector<cv::Point> > contours; // vector<string> detectors, detector_names; // detectors.push_back("/home/thomas/cv/tarantula/person.xml"); // detector_names.push_back("person"); if (use_gui == true) { namedWindow("frame", CV_WINDOW_AUTOSIZE); // current frame // namedWindow("foreground", CV_WINDOW_NORMAL); namedWindow("background", CV_WINDOW_NORMAL); } // LatentSvmDetector detector = LatentSvmDetector(detectors, detector_names); // vector<LatentSvmDetector::ObjectDetection> detections; cout << cap.get(CV_CAP_PROP_FRAME_WIDTH) << " x " << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl; cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768); // cap.set(CV_CAP_PROP_FPS, 30); //cap.set(); // main loop for (int f=0;;f++) { // write time clock_gettime(CLOCK_MONOTONIC, &time_past); if (!cap.read(frame)) { continue; } bg.operator() (frame, foreground, learningRate); if (use_gui == true) bg.getBackgroundImage(background); erode(foreground, foreground, Mat(), Point(-1, -1), 3); dilate(foreground, foreground, Mat(), Point(-1, -1), 3); if (use_gui == true) { findContours(foreground, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); drawContours(frame, contours, -1, Scalar(0,0,255), 1); } double area; int size = contours.size(); vector<vector<Point> > contours_poly( contours.size() ); vector<RotatedRect> boundRect( contours.size() ) ; for(int i = 0; i < size; i++) { area = contourArea(contours[i]); if (area > 2000) { // cout << i+1 << "/" << size << ": " << area << endl; if (use_gui == true) { drawContours(frame, contours, i, Scalar(0,255,255), 2); approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true ); boundRect[i] = minAreaRect( contours_poly[i] ); } } } // show images if (use_gui == true) { for( int i = 0; i< contours.size(); i++ ) { //ellipse( frame, boundRect[i], Scalar(255,255,255), 2, 8 ); circle( frame, boundRect[i].center, 6, Scalar(0, 255, 0), 3); } imshow("frame", frame); // imshow("foreground", foreground); imshow("background", background); } // calculate fps and display clock_gettime(CLOCK_MONOTONIC, &time_now); sprintf(fps, "%.2f fps, frame: %i, time: %.3f s, l: %.2e", getFps(calcTimeDiff (time_past, time_now)), f, calcTimeDiff (time_init, time_now), learningRate); if (use_gui == true) { displayOverlay("frame", fps, 0); } cout << fps << endl; int c = waitKey(1); if (c == 'q' || c == 'Q' || (c & 255) == 27) { break; } } return 0; }