/** @function detectAndDisplay */ void detectAndDisplay( Mat frame ) { std::vector<Rect> faces; Mat frame_gray; cvtColor( frame, frame_gray, CV_BGR2GRAY ); equalizeHist( frame_gray, frame_gray ); //-- Detect faces face_cascade.detectMultiScale( frame_gray, faces, 2, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) ); for( size_t i = 0; i < faces.size(); i++ ) { Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 ); rectangle(frame,faces[i],Scalar(255,0,0)); ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 ); Mat faceROI = frame_gray( faces[i] ); std::vector<Rect> eyes; //-- In each face, detect eyes eyes_cascade.detectMultiScale( faceROI, eyes, 2, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) ); for( size_t j = 0; j < eyes.size(); j++ ) { Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 ); int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 ); circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 ); } } //-- Show what you got imshow( window_name, frame ); outputVideo.write(frame); }
// function to record video taken by usb camera int record_video(VideoWriter &oVideoWriter, VideoCapture &cap){ namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo" if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program { cout << "ERROR: Failed to write the video" << endl; return -1; } is_recording=true; while (is_recording) { Mat frame; bool bSuccess = cap.read(frame); // read a new frame from video if (!bSuccess) //if not success, break loop { cout << "ERROR: Cannot read a frame from video file" << endl; break; } oVideoWriter.write(frame); //writer the frame into the file imshow("MyVideo", frame); //show the frame in "MyVideo" window if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; break; } } }
int main(int argc, char* argv[]) { int i; namedWindow("Frame"); //////////////////////////////////////////////////// Mat frame_4, frame; //double dWidth = 640; //get the width of frames of the video //double dHeight = 480; //get the height of frames of the video vector<Mat> layers; WebCore* web_core = WebCore::Initialize(WebConfig()); WebView* view = web_core->CreateWebView(WIDTH, HEIGHT); WebURL url(WSLit(URL_polar)); view->LoadURL(url); BitmapSurface* surface; Size frameSize(static_cast<int>(WIDTH), static_cast<int>(HEIGHT)); VideoWriter writer ("polarbearchillin.avi", CV_FOURCC('D','I','V','3'), 15, frameSize, true); //initialize the VideoWriter object for(int i=0; i<1000000; i++) //delay is added to pass advertisement on some of the URLs { web_core->Update(); } /////////////////////////////////////////////////// if ( !writer.isOpened() ) //if not initialize the VideoWriter successfully, exit the program { cout << "ERROR: Failed to write the video" << endl; return -1; } while(i != 27) { //////////////////////////////////////////////// web_core->Update(); surface = (BitmapSurface*)view->surface(); frame_4 = Mat(Size(WIDTH, HEIGHT), CV_8UC4, (unsigned char*)surface->buffer(), Mat::AUTO_STEP); split(frame_4, layers); layers.pop_back(); merge(layers, frame); //////////////////////////////////////////////// writer.write(frame); //writer the frame into the file imshow("Frame", frame); i=waitKey(30); } /////////////////////// view->Destroy(); WebCore::Shutdown(); /////////////////////// destroyAllWindows(); return 0; }
int main(int argc, char* argv[]) { VideoCapture cap(0); // open the video camera no. 0 if (!cap.isOpened()) // if not success, exit program { cout << "ERROR: Cannot open the video file" << endl; return -1; } namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo" double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video cout << "Frame Size = " << dWidth << "x" << dHeight << endl; Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight)); VideoWriter oVideoWriter ("myVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program { cout << "ERROR: Failed to write the video" << endl; return -1; } while (1) { Mat frame; bool bSuccess = cap.read(frame); // read a new frame from video if (!bSuccess) //if not success, break loop { cout << "ERROR: Cannot read a frame from video file" << endl; break; } oVideoWriter.write(frame); //writer the frame into the file imshow("MyVideo", frame); //show the frame in "MyVideo" window if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; break; } } return 0; }
bool checkFrame(VideoCapture &cam, VideoWriter &video, std::vector<Vec3f> &circles) { Mat frame; //Mat to store current frame from camera Mat hsv; //Mat to store transformed HSV space image Mat upLim; //Mat to store HSV image with upper limit applied Mat downLim; //Mat to store HSV image with lower limit applied Mat redImg; //Mat to store HSV image with combined upper and lower limits //capture frame cam >> frame; resize(frame, frame, Size(640, 360), 0, 0, INTER_CUBIC); video.write(frame); //convert to HSV space cvtColor(frame, hsv, CV_BGR2HSV); // <TODO: remove hard coded limits> inRange(hsv, Scalar(0, 100, 100), Scalar(10, 255, 255), downLim); inRange(hsv, Scalar(160, 100, 100), Scalar(179, 255, 255), upLim); //combine two ranges into single image addWeighted(downLim, 1.0, upLim, 1.0, 0.0, redImg); //apply Gaussian blur to improve detection GaussianBlur(redImg, redImg, Size(9, 9), 2, 2); //apply Hough transform (configured to only really work at 7m) //inputArray, outputArray, method, dp, minDistance, param1, param2, minR, maxR //redImg is 320x240 HoughCircles(redImg, circles, CV_HOUGH_GRADIENT, 1, redImg.rows / 2, 50, 24, 5, 9); //if circle is found, save image and return true if (circles.size() > 0) { // clone original frame to draw circle on Mat endFrame = frame.clone(); // draw circle for (size_t i = 0; i < circles.size(); i++) { Point center(cvRound(circles[i][0]), cvRound(circles[i][1])); int radius = cvRound(circles[i][2]); // circle center circle(endFrame, center, 3, Scalar(0, 255, 0), -1, 8, 0); // circle outline circle(endFrame, center, radius, Scalar(0, 0, 255), 3, 8, 0); } // save images imwrite("/home/pi/NGCP/RPI_cpslo/Datalogs/OriginalImg.jpg", frame); imwrite("/home/pi/NGCP/RPI_cpslo/Datalogs/HSVImg.jpg", redImg); imwrite("/home/pi/NGCP/RPI_cpslo/Datalogs/FinalImg.jpg", endFrame); return true; } return false; }
int main(int argc, char* argv[]) { VideoCapture cap(0); // otweranie kamery wideo if (!cap.isOpened()) // wyjscie w razie niepowodzenia { cout << "ERROR: Cannot open the video file" << endl; return -1; } namedWindow("Wideo",CV_WINDOW_AUTOSIZE); //twozenie okna wideo double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //pobieranie szerokosci klatek double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //pobieranie wysokosci kaltek cout << "Frame Size = " << dWidth << "x" << dHeight << endl; Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight)); VideoWriter oVideoWriter ("wideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //twozenie obiektu VideoWriter if ( !oVideoWriter.isOpened() ) //jezliWideoWriter nie dziala zakoncz { cout << "Blad nie mozna odczytac wideo" << endl; return -1; } while (1) { Mat frame; bool bSuccess = cap.read(frame); // cztanie wideo po klatce if (!bSuccess) //jezeli blad opusc petle { cout << "Blad nie mozna odczytac kaltki wideo" << endl; break; } oVideoWriter.write(frame); //zapisz klatke do pliku imshow("Wideo", frame); //pokaz ramke w oknie if (waitKey(10) == 27) //czekaj na kalwisz wyjcia -opusc petle { cout << "esc wcisniety koniec" << endl; break; } } return 0; }
int main(){ // Variables VideoCapture capture; VideoWriter writer; Mat frame; // Read from source capture.open(0); //capture.open("../Videos/chessboard-1.avi"); // Check if the source was opened correctly if (!capture.isOpened()){ cout << "Cannot open video device or file!" << endl; return -1; } // Read first frame (needed to configure VideoWriter) capture.read(frame); if (frame.empty()){ printf("VideoCapture failed getting the first frame!\n"); return -1; } // Open a video file for writing and check writer.open("./video.avi", CV_FOURCC('D','I','V','X'), 15, frame.size(), true); if( !writer.isOpened() ) { printf("VideoWriter failed to open!\n"); return -1; } // Read the video while(true){ // Read new frame capture.read(frame); if (frame.empty()) break; // Write frame to a file writer.write(frame); // Show frame imshow("video", frame); if ((cvWaitKey(10) & 255) == 27) break; } // Release memory capture.release(); frame.release(); return 0; }
int main(int, char**) { Mat src; // use default camera as video source VideoCapture cap(0); // check if we succeeded if (!cap.isOpened()) { cerr << "ERROR! Unable to open camera\n"; return -1; } // get one frame from camera to know frame size and type cap >> src; // check if we succeeded if (src.empty()) { cerr << "ERROR! blank frame grabbed\n"; return -1; } bool isColor = (src.type() == CV_8UC3); //--- INITIALIZE VIDEOWRITER VideoWriter writer; int codec = CV_FOURCC('M', 'J', 'P', 'G'); // select desired codec (must be available at runtime) double fps = 25.0; // framerate of the created video stream string filename = "./live.avi"; // name of the output video file writer.open(filename, codec, fps, src.size(), isColor); // check if we succeeded if (!writer.isOpened()) { cerr << "Could not open the output video file for write\n"; return -1; } //--- GRAB AND WRITE LOOP cout << "Writing videofile: " << filename << endl << "Press any key to terminate" << endl; for (;;) { // check if we succeeded if (!cap.read(src)) { cerr << "ERROR! blank frame grabbed\n"; break; } // encode the frame into the videofile stream writer.write(src); // show live and wait for a key with timeout long enough to show images imshow("Live", src); if (waitKey(5) >= 0) break; } // the videofile will be closed and released automatically in VideoWriter destructor return 0; }
int main(int argc, const char *argv[]) { VideoCapture cap("/home/mac/Documents/PROJECT/NewDataSets/krithika/book_k3.wmv"); noframes=1; double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight)); VideoWriter oVideoWriter ("/home/mac/Documents/PROJECT/Output/3spaces/kalmanfilter.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object //notstarted=false; Mat3b frame; while(cap.read(frame)) { skin = GetSkin(frame); //imshow("Skin",skin); cvtColor(skin,skin,CV_RGB2GRAY); skin1 = skin> 50; blur( skin1, skin1, Size(3,3) ); char* source_window = "Source"; namedWindow( source_window, CV_WINDOW_AUTOSIZE ); src_gray=skin1; createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); cv::SiftFeatureDetector detector; detector.detect(skin1, keypoints); Mat output; thresh_callback( 0, 0 ); if(noframes>8) { Kalman_Perdict(H);Kalman_Perdict(R);Kalman_Perdict(L); imshow("Tracking",skin2); oVideoWriter.write(skin2); } drawKeypoints(skin1, keypoints, output); skin2=frame; medianBlur( skin1, skin1, 5 ); noframes+=1; waitKey(5); keypoints.clear(); } return 0; }
int main () { VideoCapture vcap (0); if (!vcap.isOpened ()) { cout << "Error opening video stream or file" << endl; return -1; } cout << "Starting ..." << endl; int frame_width = vcap.get (CV_CAP_PROP_FRAME_WIDTH); int frame_height = vcap.get (CV_CAP_PROP_FRAME_HEIGHT); cout << "frame width: " << frame_width << endl; cout << "frame height: " << frame_height << endl; VideoWriter video ("out.avi", CV_FOURCC ('M', 'J', 'P', 'G'), 10, Size (frame_width, frame_height), true); int i = 0; for (;;) { Mat frame; vcap >> frame; video.write (frame); imshow ("Frame", frame); if (i < 10) { ostringstream convert; convert << i; string name = "image_" + convert.str () + ".jpg"; imwrite (name, frame); i++; } char c = (char) waitKey (33); if (c == 27) break; } return 0; }
void Converter::mergeImgToVid(char* src, char* dst, double fps){ path p(src); VideoWriter output; bool opened = false; if (exists(p) && is_directory(p)){ for (directory_entry& x : directory_iterator(p)){ if (is_regular_file(x.path())){ string path = x.path().string(); if (path.find( ".DS_Store" ) != string::npos ) continue; cv::Mat inImg = imread(path, CV_LOAD_IMAGE_COLOR); if (!output.isOpened()){ output.open(dst, CV_FOURCC('M', 'J', 'P', 'G'), fps, inImg.size(), true); } output.write(inImg); } } } }
void RunVision() { // pthread_t visionThread; // // if(1 == pthread_create(&visionThread, NULL, VisionActionAsync, NULL)) // { // fprintf(stderr, "Couldn't create Vision thread\n"); // exit(1); // } //} // //void* VisionActionAsync(void*) //{ Vision::GetInstance()->OpenFlyCapCamera(); signal(SIGTERM, SigTermHandler); Vision::GetInstance()->IsVisionThreadRunning = true; const string outputFile = "/home/robot/workspace2/RoboCup2016/RoboCup2016/GoalKeeper2016/demo.avi"; VideoWriter outputVideo; outputVideo.open(outputFile, CV_FOURCC('M', 'J', 'P', 'G'), 10, Size(FRAME_WIDTH, FRAME_HEIGHT), true); // capture loop char key = 0; while (key != 'q' && Vision::GetInstance()->IsVisionThreadRunning == true) { Mat currentFrame; Vision::GetInstance()->GetFrameFromFlyCap(currentFrame); Vision::GetInstance()->ProcessCurrentFrame(currentFrame); imshow("Outout", currentFrame); key = waitKey(30); outputVideo.write(currentFrame); } Vision::GetInstance()->CloseFlyCapCamera(); }
void Video::grabVideoAndData(string path, string ext, string buffer, VideoWriter &writer, const Mat &image) { if (!writer.isOpened()) { string source = path; source.append(buffer); source.append("." + ext); // Open the output writer.open(source, CV_FOURCC('X', 'V', 'I', 'D'), 12, cv::Size(image.size().width, image.size().height), true); if (!writer.isOpened()) { printf("Could not open the output video for write: %s", source.c_str()); } } writer.write(image); }
int main(){ namedWindow("Video"); namedWindow("erstes Video-Frame"); VideoCapture videoCapture; // ACHTUNG: Pfad bitte anpassen! videoCapture.open("C:/Users/Andreas/Desktop/Micro-dance_2_.avi"); int width = videoCapture.get(CV_CAP_PROP_FRAME_WIDTH); int height = videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); // >>>>>>>>>> VideoWriter Objekt initialisieren VideoWriter videoWriter; //>>>>>>>>>> VideoWriter Datei öffnen videoWriter.open("Video.avi", CV_FOURCC('P','I','M','1'), 30, Size(width, height), true); Mat firstFrame; int frameNumber = 0; while(true){ Mat videoFrame; if (false == videoCapture.read(videoFrame)){ break; } //>>>>>>>>>> VideoWriter Frame schreiben videoWriter.write(videoFrame); frameNumber++; if (frameNumber == 1){ videoFrame.copyTo(firstFrame); // kopiert die Pixel des ersten Video Frames } imshow("erstes Video-Frame", firstFrame); imshow("Video", videoFrame); waitKey(30); } return 0; }
int main(){ //Dimensions of Capture window int scale = 1; int width = 640/scale; int height = 480/scale; int lineSize; unsigned int start_time,stop_time; //Open capture device int device = 0; //assume we want first device bool gui = true; bool record = false; //create video capture device, set capture area VideoCapture capture = VideoCapture(device); capture.open(device); capture.set(CAP_PROP_FRAME_WIDTH,width); capture.set(CAP_PROP_FRAME_HEIGHT,height); //create recording object VideoWriter *recorder; //recorder = new VideoWriter ("test.avi",cv::CV_F FOURCC('D','I','V','X'), 30,Point(width,height)); if (!recorder->isOpened() && record){ return 0; } //Construct GUI object DebugGUI myGUI = DebugGUI(gui); //create image processing objects LineFinder imgproc = LineFinder(myGUI.getHSV(),scale); //imgproc.configWebcam("line"); if(capture.isOpened()){ //check if we succeeded Mat raw; //main loop while(true){ start_time = GetTimeMs64(); //Pull a frame from the camera to the raw image // capture the current frame if (!capture.grab()){ break; } capture >> raw; if (gui){ imgproc.setFrame(raw.clone()); }else{ imgproc.setFrame(raw); } imgproc.setHSV(&myGUI); /*//imgproc.getGray(); imgproc.thresholdHSV(); imgproc.fillHoles(); //imgproc.findObjects(); //imgproc.printBiggestObject(raw) imgproc.findLines(); double size = imgproc.calculateBestGradient(); LineObject* drawLine = imgproc.calculateErrorLine(height,width); if (drawLine != 0){ lineSize = drawLine->size(); }else{ lineSize = 0; } if (gui){ imgproc.drawErrorLine(raw,height,width); imgproc.printLines(raw); } //print (1/(time2-time1)) #ifdef time stop_time = GetTimeMs64(); cout << "FPS: " << 1000/(stop_time - start_time) << endl; #else cout << "Gradient: " << size << " " << "Offset: " << lineSize << endl; #endif */ if (gui){ imshow("Raw",raw); } if (record){ recorder->write(raw); } if(waitKey(30) >= 0){ return 0; } } }
void grabarVideo(Mat frame, VideoCapture cap) { bool static isRecording = false; VideoWriter static writer; time_t static vidDelta = 0; int vidFps = 10; int fourcc = CV_FOURCC(vidCodec[0],vidCodec[1],vidCodec[2], vidCodec[3]); int imgInterval = 60; // seconds int imgNum = 0; time_t sec; long static frameNum = 0; bool isDisplayEnabled = false; // int delay = 1; int vidNum = 1; bool isRecordingEnabled = vidNum > 0 ? true : false; bool isImageCaptureEnabled = imgNum > 0 ? true : false; time_t vidTime = 20; int vidTotal = 0; time_t imgTime = 0; time_t imgDelta = 0; int imgTotal = 0; int vidInterval = 60; // seconds double fps = 0.0; sec = time(NULL); frameNum++; if (isDisplayEnabled) { if(!frame.empty()) imshow("Current Frame", frame); } // Decide whether to create new video file if ((isRecordingEnabled) && (!isRecording)) { int width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH); int height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT); writer = createVideoFile(vidDir, width, height, vidFps, fourcc, sec); if(writer.isOpened()) { vidTime = sec; isRecording = true; frameNum = 0; } else { cout<< "No se pudo abrir el directorio: "<<vidDir<<endl; isRecordingEnabled=false; } } // Write frame to video, calculate time interval and whether or not to create new video file if (isRecordingEnabled) { writer.write(frame); vidDelta = sec - vidTime; // cout << "vidDelta "<<vidDelta<<" >= "<<vidInterval<<endl; if (vidDelta >= vidInterval) { // isRecording = false; vidTotal = vidTotal + 1; // cout << "Videos recorded =" << vidTotal << "/" << vidNum << endl; // cout << "vidTotal="<<vidTotal<<" vidNum="<<vidNum<<endl; if (vidTotal >= vidNum) { isRecordingEnabled = false; if (vidDelta > 0) { fps = frameNum / vidDelta; frameNum = 0; } // cout << "Recording completed fps=" << fps << endl; if (isDisplayEnabled) { writer = VideoWriter(); } } } } if (isImageCaptureEnabled) { imgDelta = (sec - imgTime); if (imgDelta >= imgInterval) { writeImageFile(imgDir, frame, imgFmt, sec); imgTime = sec; imgTotal = imgTotal + 1; if (imgTotal >= imgNum) { isImageCaptureEnabled = false; } } } }
void writeVideo(Mat frame) { videoWriter.write(frame); }
//function to write a frame void writeFrame(VideoWriter &output_cap, Mat &outFrame) { output_cap.write(outFrame); } //END writeFrame()
int main() { cout << "Built with OpenCV " << CV_VERSION << endl; Mat image; int frame = 0; int frameWidth = 0; int frameHeight = 0; int writeVideo = 0; char keyValue = 0; char frameInfo[256]; VideoCapture capture; VideoWriter videosink; capture.open(0); // open camera if(capture.isOpened()) { cout << "Capture opened" << endl; frameWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); frameHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); videosink.open("video.avi", CV_FOURCC('M','J','P','G'), 15, Size(frameWidth,frameHeight), true); if (!videosink.isOpened()) { printf("[ERR] Open Video Sink error! line:%d\n", __LINE__); } else { printf("[INFO] Open Video Sink OK!size %dx%d. \n", frameWidth, frameHeight); } for(;;) { capture >> image; if(image.empty()) { printf("[ERR] Capture Image error line:%d\n", __LINE__); break; } sprintf(frameInfo, "Frame%d.jpg", frame++); drawText(frameInfo, image); imshow("Sample", image); keyValue = (char)waitKey(10); //videosink << image; if(keyValue >= 0) //wait for key { if (keyValue == 27) { printf("[INFO] Key %c ESC striked. EXIT.\n", keyValue); break; } else if (keyValue == 'p') { imwrite(frameInfo, image); //write frame to file. printf("[INFO] Key %c striked. Save pic to %s!\n", keyValue, frameInfo); } else if ('v' == keyValue) { writeVideo = writeVideo ^ 1; printf("[INFO] Key %c striked. writeVideo = %d \n", keyValue, writeVideo); } else { printf("[INFO] Key %c striked. Do Nothing!\n", keyValue); } } if (1 == writeVideo) { videosink.write(image); //write frame to video. // printf("[INFO] writeVideo flag set. Save video to disk!\n"); } } }
int main(int argc, char** argv) { namedWindow("Motion_tracking",CV_WINDOW_AUTOSIZE); string values[4] = {"input", "frame_diff", "motion_hist", "grad_orient"}; createTrackbar( "visual", "Motion_tracking", &visual_trackbar, 3, NULL ); createTrackbar("threshold", "Motion_tracking", &DEFAULT_THRESHOLD, 255, NULL); VideoCapture cap; cap.open(0); if ( !cap.isOpened() ) // if not success, exit program { cout << "Cannot open the video file" << endl; return -1; } Mat frame,ret,frame_diff,gray_diff,motion_mask; for(int i = 0; i<10; i++) { cap.read(frame); Size frame_size = frame.size(); int h = frame_size.height; int w = frame_size.width; if(i==5) break; } cap.open(0); if (!cap.isOpened()) // if not success, exit program { cout << "Cannot open the video cam" << endl; return -1; } ret = frame.clone(); Size frame_size = frame.size(); int h = frame_size.height; int w = frame_size.width; double timestamp = 1000.0*clock()/CLOCKS_PER_SEC; Mat prev_frame = frame.clone(); Mat motion_history(h,w, CV_32FC1,Scalar(0,0,0)); Mat hsv(h,w, CV_8UC3,Scalar(0,255,0)); Mat mg_mask(h,w, CV_8UC1,Scalar(0,0,0)); Mat mg_orient(h,w, CV_32FC1,Scalar(0,0,0)); Mat seg_mask(h,w, CV_32FC1,Scalar(0,0,0)); vector<Rect> seg_bounds; String visual_name; Mat vis(h,w,CV_32FC3); Mat vis1(h,w,CV_8UC1); Mat silh_roi,orient_roi,mask_roi,mhi_roi; VideoWriter output; output.open ( "outputVideo.avi", CV_FOURCC('D','I','V','X'), 80, cv::Size ( 640,480), true ); while(1) { cap.retrieve(frame); cap.read(frame); ret = frame.clone(); if (!ret.data) //if not success, break loop { cout << "video ended" << endl; break; } absdiff(frame, prev_frame, frame_diff); cvtColor(frame_diff,gray_diff, CV_BGR2GRAY); threshold(gray_diff,ret,DEFAULT_THRESHOLD,255,0); motion_mask = ret.clone(); double timestamp = 1000.0*clock()/CLOCKS_PER_SEC; updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION); calcMotionGradient(motion_history, mg_mask, mg_orient, MIN_TIME_DELTA, MAX_TIME_DELTA, 3); segmentMotion(motion_history, seg_mask, seg_bounds, timestamp, 32); visual_name = values[visual_trackbar]; if(visual_name == "input") vis = frame.clone(); else if(visual_name == "frame_diff") vis = frame_diff.clone(); else if(visual_name == "motion_hist") { for(int i=0; i< motion_history.cols; i++) { for(int j=0; j< motion_history.rows ; j++) { float a = motion_history.at<float>(j,i); // cout << (a-timestamp-MHI_DURATION)/MHI_DURATION << endl; if((a-timestamp-MHI_DURATION)/MHI_DURATION <= -5) vis1.at<uchar>(j,i) = 0; else vis1.at<uchar>(j,i) = (a-timestamp-MHI_DURATION)/MHI_DURATION; } } cvtColor(vis1,vis,COLOR_GRAY2BGR); } else if(visual_name == "grad_orient") { for(int i=0; i< hsv.cols; i++) { for(int j=0; j< hsv.rows ; j++) { float a = (mg_orient.at<float>(j,i))/2; hsv.at<Vec3b>(j,i)[0] = a; float b = (mg_mask.at<uchar>(j,i))*255; hsv.at<Vec3b>(j,i)[2] = b; } } cvtColor(hsv,vis,COLOR_HSV2BGR); } for(unsigned int h = 0; h < seg_bounds.size(); h++) { Rect rec = seg_bounds[h]; if(rec.area() > 5000 && rec.area() < 70000) { rectangle(vis, rec,Scalar(0,0,255),3); silh_roi = motion_mask(rec); orient_roi = mg_orient(rec); mask_roi = mg_mask(rec); mhi_roi = motion_history(rec); if(norm(silh_roi, NORM_L2, noArray()) > rec.area()*0.5) { double angle = calcGlobalOrientation(orient_roi, mask_roi, mhi_roi,timestamp, MHI_DURATION); // cout << rec.width << endl; draw_motion_comp(vis, rec.x, rec.y, rec.width, rec.height,angle,vis); } } } imshow("Motion_tracking",vis); prev_frame = frame.clone(); // waitKey(30); output.write (vis); if(waitKey(30) >= 0) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; break; } // MHI_DURATION = 1000.0*clock()/CLOCKS_PER_SEC- timestamp; } // waitKey(30); return 0; }
void test_video(const Size & size) { char key = 27; Mat img, draw; Ptr<SVM> svm; HOGDescriptor hog; hog.winSize = size; vector< Rect > locations; vector< Rect > found_filtered; // Load the trained SVM. svm = StatModel::load<SVM>(TRAINED_SVM); // Set the trained svm to my_hog vector< float > hog_detector; get_svm_detector(svm, hog_detector); hog.setSVMDetector(hog_detector); printHOGParams(hog); VideoCapture video; // Open the video file. video.open(TRAFFIC_VIDEO_FILE); if (!video.isOpened()) { cerr << "Unable to open the device" << endl; exit(-1); } // Get the frame rate double rate = video.get(CV_CAP_PROP_FPS); cout << " Frame rate : " << rate << endl; cout << " Input video codec :" << video.get(CV_CAP_PROP_FOURCC); // initilaize the video writer object to write the video output std::string outputFile(OUT_Video_File); VideoWriter writer; int codec = static_cast<int>(video.get(CV_CAP_PROP_FOURCC)); //int codec = CV_FOURCC('M', 'J', 'P', 'G'); bool isWriterInitialized = false; int num_of_vehicles = 0; bool end_of_process = false; while (!end_of_process) { video >> img; if (img.empty()) break; draw = img.clone(); Mat cropped; cv::resize(draw, cropped, Size(720, 560)); Mat temp, temp3; cvtColor(cropped, temp, COLOR_BGR2GRAY); /*Mat bgr[3]; //destination array split(temp3,bgr);//split source temp = bgr[0]+bgr[2]; */ if (isWriterInitialized) { //execute only once isWriterInitialized = true; /*writer.open(outputFile, capture.get(CV_CAP_PROP_FOURCC), capture.get(CV_CAP_PROP_FPS), Size(capture.get(CV_CAP_PROP_FRAME_WIDTH),capture.get(CV_CAP_PROP_FRAME_HEIGHT)), true);*/ writer.open(outputFile, codec, rate, cropped.size(), true); } locations.clear(); // Rect(x,y,w,h) w->width=cols;h->rows // first remove the upper 50% from height Original Cropped =size(720,560)=(cols,rows) Mat roi = temp(Rect(0, temp.rows*0.5, temp.cols, temp.rows - temp.rows*0.5)); //size(roi) = size(720,280) //cout<<"roi.size() = "<<roi.size()<<endl; int y_offset = temp.rows*0.5; //again crop the lower 10 % to remove the images near dashboard-> remove false positives roi = roi(Rect(0, 0, roi.cols, roi.rows - 100)); //cout<<"roi.size() = "<<roi.size()<<endl; //no offset required as this is the lower row colums. //hog.detectMultiScale(roi, locations); //hog.detectMultiScale(roi, locations, 1, Size(50, 50), Size(32, 32), 1, 2);//对图片进行多尺度行人检测 hog.detectMultiScale(roi, locations, 0.00, Size(4, 8), Size(0, 0), 1.05, 2);//less false positive //hog.detectMultiScale(roi, locations, 0.00, Size(8,8), Size(0,0), 1.05, 2);// less true negative(missed) // add the offset std::vector<Rect>::iterator it = locations.begin(); std::vector<Rect>::iterator itend = locations.end(); vector<Rect> actuallocations; bool isVehicle = false; for (; it != itend; it++) { Rect current = *it; //cout<<" Rect current = "<< current<<endl; //cout<<" roi size= "<<roi.size()<<endl; Mat roi2Check = roi(Rect(current.x, current.y, current.width, current.height));//define a roi of 50x50 //cout<<" roi2Check size= "<<roi2Check.size()<<endl; isVehicle = checkIfpatchIsVehicle(roi2Check); if (isVehicle) actuallocations.push_back(Rect(current.x, current.y + y_offset, current.width, current.height)); } if (0 != actuallocations.size()) draw_locations(cropped, actuallocations, Scalar(0, 255, 0)); imshow(WINDOW_NAME, cropped); if (save_video) writer.write(cropped); //wait infinite fro keypress key = (char)waitKey(3); if (27 == key) end_of_process = true; } // Close the video file. // Not required since called by destructor writer.release(); video.release(); }
void TrackingRoutine() { int64 start, finish; start = getTickCount(); capture.read(curr_bgr_frame); if (curr_bgr_frame.empty()) { running = false; return; // I DON'T LIKE IT } Mat curr_hsv_frame; cvtColor(curr_bgr_frame, curr_hsv_frame, CV_BGR2HSV); cvtColor(curr_bgr_frame, curr_gray, CV_BGR2GRAY); vector <Point2f> prev_corner, cur_corner; vector <Point2f> prev_corner2, cur_corner2; vector <uchar> status; vector <float> err; goodFeaturesToTrack(prev_gray, prev_corner, 200, 0.1, 30); calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_corner, cur_corner, status, err); for (size_t i = 0; i < status.size(); i++) { if (status[i]) { prev_corner2.push_back(prev_corner[i]); cur_corner2.push_back(cur_corner[i]); } } // translation + rotation only if (prev_corner2.size() > 0 && cur_corner2.size() > 0) { current_transform = estimateRigidTransform(prev_corner2, cur_corner2, false); // false = rigid transform, no scaling/shearing } if (current_transform.rows == 0) { current_transform = previous_transform.clone(); } ///Diff Section Mat stabilized, stab_diff; warpAffine(prev_gray, stabilized, current_transform, prev_gray.size()); absdiff(stabilized, curr_gray, stab_diff); AddToDebugImages(stab_diff, "stab_diff"); Mat rotated_block(prev_gray.size(), prev_gray.type(), Scalar(255)); int dx = current_transform.at<double>(0, 2); int dy = current_transform.at<double>(1, 2); int thickness = int(sqrt(dx*dx + dy*dy)); rectangle(rotated_block, Rect(0, 0, rotated_block.cols, rotated_block.rows), Scalar(0), thickness); warpAffine(rotated_block, rotated_block, current_transform, rotated_block.size()); bitwise_and(rotated_block, stab_diff, stab_diff); AddToDebugImages(rotated_block, "rotated-block"); stab_diff = Close(stab_diff, "stab_diff"); stab_diff = Blur(stab_diff, "stab_diff"); stab_diff = Threshold(stab_diff, "stab_diff"); //Color Section Mat hsv_in_range; inRange(curr_hsv_frame, hsv_min, hsv_max, hsv_in_range); AddToDebugImages(hsv_in_range, "hsv_in_range"); hsv_in_range = Close(hsv_in_range, "hsv_in_range"); hsv_in_range = Blur(hsv_in_range, "hsv_in_range"); hsv_in_range = Threshold(hsv_in_range, "hsv_in_range"); Mat hsv_in_expanded_range; Scalar hsv_min_expanded = hsv_min - HSV_RANGE_ADDER*(hsv_max - hsv_min); Scalar hsv_max_expanded = hsv_max + HSV_RANGE_ADDER*(hsv_max - hsv_min); inRange(curr_hsv_frame, hsv_min_expanded, hsv_max_expanded, hsv_in_expanded_range); AddToDebugImages(hsv_in_expanded_range, "hsv_in_expanded_range"); Mat canny_output; vector<vector<Point> > canny_contours; vector<Vec4i> canny_hierarchy; Canny(curr_gray, canny_output, 80, 240, 3); /// Find contours findContours(canny_output, canny_contours, canny_hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); /// Draw contours Mat drawing = Mat::zeros(canny_output.size(), CV_8UC1); for (int i = 0; i< canny_contours.size(); i++) { Scalar color = Scalar(255, 255, 255); drawContours(drawing, canny_contours, i, color, 2, 8, canny_hierarchy, 0, Point()); } AddToDebugImages(canny_output, "conrours"); AddToDebugImages(drawing, "other_contours"); //Union Section Mat raw_mask; //bitwise_and(diff_closed_blur_threshold, hsv_in_range, raw_mask); double lambda = 0.5; /*int corners_in_object = 0; for (Point2f corner : cur_corner2) { if () }*/ raw_mask = lambda*stab_diff + (1 - lambda)*hsv_in_range; AddToDebugImages(raw_mask, "raw_mask"); raw_mask = Threshold(raw_mask, "raw_mask"); raw_mask = Close(raw_mask, "raw_mask"); //raw_mask = Blur(raw_mask, "raw_mask"); raw_mask = Threshold(raw_mask, "raw_mask"); Rect object_bounding_rectangle; Point2d last_position; vector< vector<Point> > contours; vector<Vec4i> hierarchy; findContours(raw_mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // retrieves external contours for (vector<Point> contour : contours) { object_bounding_rectangle = boundingRect(contour); rectangle(curr_bgr_frame, object_bounding_rectangle, Scalar(0, 150, 0)); } int x_pos = -1; int y_pos = -1; if (contours.size() > 0)//stalefix. //TODO:find a better solution { object_bounding_rectangle = boundingRect(contours.back()); rectangle(curr_bgr_frame, object_bounding_rectangle, Scalar(0, 0, 0)); x_pos = object_bounding_rectangle.x + object_bounding_rectangle.width / 2; y_pos = object_bounding_rectangle.y + object_bounding_rectangle.height / 2; WritePosition(x_pos, y_pos); } finish = getTickCount(); double seconds = getTickFrequency() / (finish - start); putText(curr_bgr_frame, to_string(seconds), Point(10, 30), CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 0)); if (debug) { ShowDebugImages(); char* text = new char[10]; sprintf(text, "x:%d x:%d", x_pos, y_pos); putText(curr_bgr_frame, text, object_bounding_rectangle.tl(), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 0)); for (int i = 0; i < prev_corner2.size(); i++) { arrowedLine(curr_bgr_frame, prev_corner2[i], cur_corner2[i], CV_RGB(0, 255, 0)); } } else { cvDestroyWindow(DEBUG_WINDOW); } if (recording) { if (tracking_recorder.isOpened()) { tracking_recorder.write(curr_bgr_frame); } else throw exception("well shit"); circle(curr_bgr_frame, Point(10, 10), 8, Scalar(0, 0, 255), -1); } if (mouse_is_dragging) { rectangle(curr_bgr_frame, initial_click_point, current_mouse_point, Scalar(0, 0, 0)); } imshow(MAIN_WINDOW, curr_bgr_frame); previous_transform = current_transform.clone(); prev_gray = curr_gray.clone(); }
int main(int argc, const char **argv) { // create an image (3 channels, 16 bit image depth, // 650 high, 600 wide, (0, 50000, 50000)) assigned for // Blue, Green and Red plane respectively.) Mat img(650, 600, CV_16UC3, Scalar(0, 50000, 50000)); if (img.empty()) { cout << "ERROR : Image cannot be loaded..!!" << endl; return -1; } // vector that stores the compression parameters of the image vector<int> compression_params; // specify the compression technique compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); // specify the compression quality compression_params.push_back(98); // write the image to file bool bSuccess = imwrite("./testImage.jpg", img, compression_params); if (!bSuccess) { cout << "ERROR : Failed to save the image" << endl; } // create a window with the name "MyWindow" namedWindow("MyWindow", CV_WINDOW_AUTOSIZE); // display the image which is stored in the 'img' in the "MyWindow" window imshow("MyWindow", img); waitKey(0); destroyWindow("MyWindow"); // write video to file VideoCapture cap(0); // open the video camera no. 0 if (!cap.isOpened()) // if not success, exit program { cout << "ERROR: Cannot open the video file" << endl; return -1; } namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo" double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video cout << "Frame Size = " << dWidth << "x" << dHeight << endl; Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight)); VideoWriter oVideoWriter ("./MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program { cout << "ERROR: Failed to write the video" << endl; return -1; } while (1) { Mat frame; bool bSuccess = cap.read(frame); // read a new frame from video if (!bSuccess) //if not success, break loop { cout << "ERROR: Cannot read a frame from video file" << endl; break; } oVideoWriter.write(frame); //writer the frame into the file imshow("MyVideo", frame); //show the frame in "MyVideo" window if (waitKey(10) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; break; } } return 0; }
int main(int argc, const char * argv[]){ Help(); String recorderName = ""; if (argc > 3) { recorderName = argv[1]; //recorderObject = argv[2]; sensorVectorSize = atoi(argv[2]);//TODO: handle error later samplingFrequency = atof(argv[3]);//TODO: handle error later if (argc > 4) { plot = true;//TODO: make it that the argument should be PLOT, rather than anything as it is now. } } else { printf("Too few arguments. Please enter the name of the recorder, then a string indicating what is being recorded, and finally the sensor data vector size (3 arguments)"); return 1; } //-------------------------------------- time_t session; char buffer [80]; struct tm * timeinfo; time (&session); timeinfo = localtime (&session); //%d%m%H%M%S strftime (buffer,80,"%d%m%H%M%S",timeinfo); String sessionName = String(buffer); int status = CreateDirectories(recorderName, sessionName); if (status == 1) { printf("The was an error creating the necessary folders for the recorder, the program will now exit.\n"); return 1; } int fps = 10; int frameCounter = 0; double timeIntegral_FPS = 1000/fps; VideoCapture vcap(0); if(!vcap.isOpened()){ cout << "Error opening video stream or file" << endl; return -1; } vcap.set(CV_CAP_PROP_CONVERT_RGB , false); //vcap.set(CV_CAP_PROP_FPS , fps); //allocate memory and start threads here after passing all cases in which program might exit int frame_width= vcap.get(CV_CAP_PROP_FRAME_WIDTH); int frame_height= vcap.get(CV_CAP_PROP_FRAME_HEIGHT); int plot_w = 250; int plot_h = frame_height; int r_Plot = 0; int g_Plot = 0; int b_Plot = 0; if (plot) { plotColors = new Scalar [sensorVectorSize]; RNG rng( 0xFFFFFFFF ); for (int i = 0; i < sensorVectorSize; i++) { plotColors[i] = Scalar(rng.uniform(0,255), rng.uniform(0, 255), rng.uniform(0, 255)); } //int bin_w = cvRound( (double) hist_w/histSize ); plotValuesArraySize = sensorVectorSize * plot_w; //int bin_w = cvRound( (double) hist_w/plotValuesArraySize ); plotValues = new double [plotValuesArraySize]; for (int i = 0; i < plotValuesArraySize; i++) { plotValues[i] = 0.0; } namedWindow("Plot Control Panel", WINDOW_NORMAL); //const char* trackBarObject = "Object"; //const char* trackBarRed = "Red"; //const char* trackBarGreen = "Green"; //const char* trackBarBlue = "Blue"; createTrackbar("Object", "Plot Control Panel", &selectedItem, sensorVectorSize, NULL ); createTrackbar("Red", "Plot Control Panel", &selectedR, 255, Color_Modifier ); createTrackbar("Green", "Plot Control Panel", &selectedG, 255, Color_Modifier ); createTrackbar("Blue", "Plot Control Panel", &selectedB, 255, Color_Modifier ); resizeWindow("Plot Control Panel", 500,150); } ifstream infile("Gestures"); string gestureString; thread ss (SensorStream); thread tr (Timer); ss.detach(); tr.detach(); //VideoWriter video(vidFileName,CV_FOURCC('M','J','P','G'),10, Size(frame_width,frame_height),true); //VideoWriter video("Folder/file.avi",CV_FOURCC('M','J','P','G'),10, Size(frame_width,frame_height),true); VideoWriter * video; int recordingsCounter = 1; unsigned long timeDiff; //SRT related variables bool readyForGesture = false; //this will be used to indicate whether the recorder is ready to record a gesture or not. bool writingSRT = false; int srtLineCounter = 1; String srtData = ""; //const char* control_window = "Plot Control Panel"; for(;;){ if (startedRecording) { //TODO: make the files naming also suitable for windows String vidFileName = "Videos/" + recorderName + "/" + sessionName + "/" + std::to_string(recordingsCounter) + ".avi"; String srtFileName = "Videos/" + recorderName + "/" + sessionName + "/" + std::to_string(recordingsCounter) + ".srt"; String audioFileName = "Videos/" + recorderName + "/" + sessionName + "/" + std::to_string(recordingsCounter) + ".wav"; //String trainingFileName = "Output/" + recorderName + "/" + sessionName + "/Training/" + std::to_string(recordingsCounter);//TODO: Delete later srtFile = fopen (srtFileName.c_str(),"w"); //trainingFile = fopen (trainingFileName.c_str() ,"w"); WriteWavHeader(audioFileName.c_str(), samplingFrequency, sensorVectorSize); if (plot) { video = new VideoWriter(vidFileName,CV_FOURCC('M','J','P','G'),fps, Size(frame_width + plot_w,frame_height),true); } else { video = new VideoWriter(vidFileName,CV_FOURCC('M','J','P','G'),fps, Size(frame_width,frame_height),true); } startedRecording = false; } if (stoppedRecording) { fclose (srtFile); //fclose (trainingFile); fclose (audioFile); srtLineCounter = 1; recordingsCounter++; stoppedRecording = false; } Mat frame; vcap >> frame; if (plot) { if (getTrackbarPos("Object", "Plot Control Panel") == 0)//check to see if user wants to change color of the background of the plot or not { r_Plot = getTrackbarPos("Red", "Plot Control Panel"); g_Plot = getTrackbarPos("Green", "Plot Control Panel"); b_Plot = getTrackbarPos("Blue", "Plot Control Panel"); } Mat plotImage( plot_h, plot_w, CV_8UC3, Scalar( b_Plot,g_Plot,r_Plot) ); //Plotting for( int i = 0; i < plot_w - 1; i++ ) { for (int j = 0; j < sensorVectorSize; j++) { line( plotImage, Point( i, plot_h/2 - cvRound(plotValues[i*sensorVectorSize+j]) ) , Point( i + 1, plot_h/2 - cvRound(plotValues[i*sensorVectorSize+j + sensorVectorSize]) ), plotColors[j], 1, 8, 0 ); } } //namedWindow("Plot", CV_WINDOW_AUTOSIZE ); //imshow("Plot", plotImage ); Mat display = Mat::zeros ( MAX ( frame.rows, plotImage.rows ), frame.cols + plotImage.cols, frame.type() ); plotImage.copyTo ( Mat ( display, Rect ( frame.cols, 0, plotImage.cols, plotImage.rows ) ) ); if (startWriting) { timeDiff = timeNow - startTime; String disp1 = "Timer: " + FromMillisecondsToSRTFormat(timeDiff); putText(frame, disp1, Point(20, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 1); frame.copyTo ( Mat ( display, Rect ( 0, 0, frame.cols, frame.rows ) ) ); if (frameCounter * timeIntegral_FPS < timeDiff) { video->write(display); //Record the video till here. It is not needed to have a recording indicator in the recoring output frameCounter++; } //Recording Indicator circle(display, Point(26, 16), 8, Scalar(0, 0, 255), -1, 8, 0); putText(display, "RECORDING", Point(40, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 1); } else { frame.copyTo ( Mat ( display, Rect ( 0, 0, frame.cols, frame.rows ) ) ); } imshow( "Recorder", display ); } else { if(streamComing) { circle(frame, Point(426, 16), 8, Scalar(255, 0, 0), -1, 8, 0); putText(frame, "SENSOR STREAMING", Point(440, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 0, 0), 1); } if (startWriting) { timeDiff = timeNow - startTime; String disp1 = "Timer: " + FromMillisecondsToSRTFormat(timeDiff); putText(frame, disp1, Point(20, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 1); if (frameCounter * timeIntegral_FPS < timeDiff) { video->write(frame); //Record the video till here. It is not needed to have a recording indicator in the recoring output frameCounter++; } //Recording Indicator circle(frame, Point(26, 16), 8, Scalar(0, 0, 255), -1, 8, 0); putText(frame, "RECORDING", Point(40, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 1); } imshow( "Recorder", frame ); } //++blinks_number; char c = (char)waitKey(33);//TODO: Check if that is causing a problem with how long the video is if( c == 27 ) //Escape button pressed { if (recording) { //fclose (srtFile); //fclose (trainingFile); //fclose(audioFile); //recording = false; printf("Please end the recording session first.\n"); } else { terminating = true; streaming = false; break; } } else if (c == 'r') { if (recording) { //TODO: make a loop that will block untill our audio file reaches the time at which we decided to end the recording //take time, and get only the seconds part unsigned long endTime; struct timeval now; gettimeofday(&now, NULL); endTime = (unsigned long long)(now.tv_sec) * 1000 + (unsigned long long)(now.tv_usec) / 1000; endTime -= startTime; endTime /= 1000; printf("Please wait while the stream catches up with the video...\n"); int num_channels_sampling_frequency = sensorVectorSize*samplingFrequency; while (audioValuesWritten/(num_channels_sampling_frequency) <= endTime){} stoppedRecording = true; printf("Stopped recording!\n"); } else { startedRecording = true; printf("Started recording!\n"); } recording = !recording; } else if (c == 'n') { //TODO: if writingSRT == true if (recording && !writingSRT) { if(getline(infile, gestureString)) { printf("Recording gesture %s. Press s when ready to record the gesture, and then s again when finished.\n", gestureString.c_str()); readyForGesture = true; } else { printf("No more gestures to record, you can now end the recording session.\n"); } } else { printf("Please start a recording session before trying to record a gestrue.\n"); } } else if(c == 's') { if (readyForGesture && !writingSRT) { //record the time at this instance for the srt file, and write it there srtData += FromMillisecondsToSRTFormat(timeNow - startTime); srtData += " --> "; writingSRT = true; readyForGesture = false; printf("In\n"); } else if (!readyForGesture && writingSRT) { srtData += FromMillisecondsToSRTFormat(timeNow - startTime); srtData += "\n"; srtData += gestureString + "\n"; WriteSRT(srtLineCounter, srtData); srtData = ""; srtLineCounter++; writingSRT = false; readyForGesture = true; printf("Out\n"); } } //a failed attempt to restart streaming in case the stream feed was interrupted, because eitherway, the piping program has to be restarted. //else if (c == 's') //{ // if (!streaming) // { // thread ss (SensorStream); // ss.detach(); // streaming = true; // } //} } delete plotValues; delete plotColors; delete video; return 0; }
int main(int argc, char** argv) { if(argc >= 3) { VideoCapture inputVideo(argv[1]); // open the default camera if(!inputVideo.isOpened()) // check if we succeeded return -1; // Initialize VideoWriter outputVideo; // Open the output const string source = argv[2]; // the source file name const string NAME = source + ".mp4"; // Form the new name with container int ex = inputVideo.get(CV_CAP_PROP_FOURCC); // Get Codec Type- Int form std::cout << ex << "\n" << (int)inputVideo.get(CV_CAP_PROP_FOURCC) << "\n"; Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), //Acquire input size (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, false); char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0}; cout << "Input codec type: " << EXT << endl; if (!outputVideo.isOpened()) { cout << "Could not open the output video for write \n"; return -1; } namedWindow("Result Window", 1); // Mat declaration Mat prev_frame, prev_gray, cur_frame, cur_gray; Mat frame_blurred, frameHSV; // take the first frame inputVideo >> prev_frame; /* manual ball selection */ MouseParams mp; prev_frame.copyTo( mp.ori ); prev_frame.copyTo( mp.img ); setMouseCallback("Result Window", BallSelectFunc, &mp ); int enterkey = 0; while(enterkey != 32 && enterkey != 113) { enterkey = waitKey(30) & 0xFF; imshow("Result Window", mp.img); } outputVideo.write( mp.img ); /* Kalman Filter Kalman filter is a prediction-correction filter. It has two stages: predict and correct. In predict stage, the filter uses the states of previous frame to predict the state of current frame. In correct stage, the filter takes in current measurement to "correct" the prediction made in prediction stage. Here we are using an adaptive Kalman filter to do ball tracking. (noise matrix P, Q changes depending on the occulusion index) */ /* Initialization four parameters: x, y, vx, vy two measurements: mx, my Here we're implementing a constant velocity model. x_t = x_t-1 + vx_t-1; y_t = y_t-1 + vy_t-1; vx_t = vx_t-1; vy_t = vy_t-1; These linear equations can be written as transition matrix A. */ KalmanFilter KF(4, 2, 0); float transMatrixData[16] = {1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1}; KF.transitionMatrix = Mat(4, 4, CV_32F, transMatrixData); Mat_<float> measurement(2,1); measurement.setTo(Scalar(0)); /* We put the first point in predicted state */ KF.statePost.at<float>(0) = mp.pt.x; KF.statePost.at<float>(1) = mp.pt.y; KF.statePost.at<float>(2) = 0; KF.statePost.at<float>(3) = 0; setIdentity(KF.measurementMatrix); // measurement matrix H setIdentity(KF.processNoiseCov, Scalar::all(1e-4)); // process noise covariance matrix Q setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1)); // measurement noise covariance matrix R // priori error estimate covariance matrix P'(t) /* KF.errorCovPre.at<float>(0) = 1; KF.errorCovPre.at<float>(5) = 1; KF.errorCovPre.at<float>(10) = 1; KF.errorCovPre.at<float>(15) = 1; */ setIdentity(KF.errorCovPre); // priori error estimate covariance matrix P'(t) setIdentity(KF.errorCovPost, Scalar::all(.1)); // posteriori error estimate cov matrix P(t) /* params related to previous frames */ Rect prev_box; Point2f prev_motion; Point noFoundStartPt; vector<cv::Point2f> prev_ball_centers; int noFoundCount = 0; /* start tracking */ setMouseCallback("Result Window", CallBackFunc, &frameHSV); for(int frame_num=1; frame_num < inputVideo.get(CAP_PROP_FRAME_COUNT); ++frame_num) { cout << "===FRAME #" << frame_num << "===" << endl; /* get current frame */ inputVideo >> cur_frame; // Blur & convert frame to HSV color space cv::GaussianBlur(cur_frame, frame_blurred, cv::Size(5, 5), 3.0, 3.0); cvtColor(frame_blurred, frameHSV, COLOR_BGR2HSV); // gray scale current frame cvtColor(prev_frame, prev_gray, CV_BGR2GRAY); cvtColor(cur_frame, cur_gray, CV_BGR2GRAY); // mask generation Mat mask; mask = getMask(frameHSV); // Hough Transform Mat frame_filtered, frame_filtered_gray; cur_frame.copyTo( frame_filtered, mask ); cv::cvtColor( frame_filtered, frame_filtered_gray, CV_BGR2GRAY ); vector<cv::Vec3f> circles; cv::GaussianBlur(frame_filtered_gray, frame_filtered_gray, cv::Size(5, 5), 3.0, 3.0); HoughCircles( frame_filtered_gray, circles, CV_HOUGH_GRADIENT, 1, frame_filtered_gray.rows/8, 120, 18, 5,300); // contour generation vector< vector<cv::Point> > contours_ball; cv::findContours(mask, contours_ball, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); Mat result; cur_frame.copyTo( result ); // OpticalFlow vector<Point2f> optFlow_ball_centers; vector<uchar> featuresFound; Mat err; TermCriteria termcrit(TermCriteria::COUNT|TermCriteria::EPS, 20, 0.03); Size winSize(31, 31); if( prev_ball_centers.size() > 0 ) calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers, optFlow_ball_centers, featuresFound, err, winSize, 0, termcrit, 0, 0.001); // Kalman Filter: Extract previous point & prediction point Point2f statePt = Point( KF.statePost.at<float>(0), KF.statePost.at<float>(1) ); Mat prediction = KF.predict(); Point2f predictPt = Point2f( prediction.at<float>(0), prediction.at<float>(1) ); cout << "state:" << statePt << endl; cout << "predict:" << predictPt << endl; cout << "prev_motion: " << prev_motion << " sqr: " << prev_motion.x * prev_motion.x + prev_motion.y * prev_motion.y << endl; // Search current frame for good candidate measurements vector<Point2f> cur_contour_centers; vector<cv::Point> best_ball_contour; Point2f best_ball_center; Rect best_ball_box; bool ballFound = false; // TODO dynamic search range int closest_dist = (prev_motion.x * prev_motion.x + prev_motion.y * prev_motion.y) * 15; if(closest_dist == 0) closest_dist = 10000; //circle( result, predictPt, sqrt(closest_dist), CV_RGB(255,255,0), 2 ); for (size_t i = 0; i < contours_ball.size(); i++) { drawContours(result, contours_ball, i, CV_RGB(255,0,0), 1); // draw the area cv::Rect bBox; bBox = cv::boundingRect(contours_ball[i]); Point2f center; center.x = bBox.x + bBox.width / 2; center.y = bBox.y + bBox.height / 2; cur_contour_centers.push_back(center); // find corresponding optical flow center float optFlow_dist = 2500; int best_j = -1; for( size_t j=0; j < optFlow_ball_centers.size(); ++j ) { float diff_x = center.x - optFlow_ball_centers[j].x; float diff_y = center.y - optFlow_ball_centers[j].y; float distance = diff_x * diff_x + diff_y * diff_y; if(distance < optFlow_dist) { distance = optFlow_dist; best_j = j; } } /* TODO Point2f optPredictPt = center; if(best_j != -1) { Point2f motion = optFlow_ball_centers[best_j] - prev_ball_centers[best_j]; optPredictPt = center + motion; line( result, optPredictPt, center, CV_RGB(190,60,70), 2 ); } */ // If we find a contour that includes our prediction point, // it's the best choice then. // If we cannot found a contour to contain prediction point, // we search the rest contours. The one with closest distance // should be picked. if( pointPolygonTest( contours_ball[i], predictPt, false ) >= 0) { best_ball_contour = contours_ball[i]; best_ball_center = center; best_ball_box = bBox; ballFound = true; break; } else { float diff_x = center.x - predictPt.x; float diff_y = center.y - predictPt.y; float distance = diff_x * diff_x + diff_y * diff_y; //if( bBox.area() < 200 ) // continue; /* stringstream sstr; sstr << "(dot= " << dot_product << ")"; cv::putText(result, sstr.str(), cv::Point(center.x + 3, center.y - 3), cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(0,255,100), 2); */ //if(bBox.area() < 250) // continue; // if distance is close enough if( distance < closest_dist ) { best_ball_contour = contours_ball[i]; best_ball_center = center; best_ball_box = bBox; closest_dist = distance; ballFound = true; } } } if(ballFound) { // calculte occulusion rate float occ = fabs( (float)best_ball_box.area() / (float)prev_box.area() - 1.0 ); if(occ > 1.0) occ = 1.0; // check threshold float threshold = 0.3; if(occ < threshold) { setIdentity(KF.processNoiseCov, Scalar::all(1.0-occ)); // Q = 1 - occ setIdentity(KF.measurementNoiseCov, Scalar::all(occ)); // R = occ } else { setIdentity(KF.processNoiseCov, Scalar::all(0.0)); // Q = 0 setIdentity(KF.measurementNoiseCov, Scalar::all(1e10)); // R = infinite cout << "NON_CONFIDENTIAL_MEASUREMENT\n"; } // correction measurement.at<float>(0) = best_ball_center.x; measurement.at<float>(1) = best_ball_center.y; Mat estimated = KF.correct(measurement); cout << "measured:" << best_ball_center << endl; cout << "estimated:" << estimated.at<float>(0) << ", " << estimated.at<float>(1) << endl; // remember to update prev parameters prev_box = best_ball_box; prev_motion = best_ball_center - statePt; noFoundCount = 0; } else { // TODO prev_motion = predictPt - statePt; if( noFoundCount == 0 ) { noFoundStartPt = statePt; } circle( result, noFoundStartPt, 5, CV_RGB(255,255,255), 2 ); // if Kalman filter failed... we "CORRECT" the frame if(noFoundCount > 1) { closest_dist = 1e8; for( size_t i = 0; i < circles.size(); i++ ) { Point center_circle(cvRound(circles[i][0]), cvRound(circles[i][1])); int radius_circle = cvRound(circles[i][2]); if( radius_circle < 6 ) continue; /* cv::Rect bBox; bBox = cv::boundingRect(circles[i]); Point center; center.x = bBox.x + bBox.width / 2; center.y = bBox.y + bBox.height / 2; */ int diff_x = center_circle.x - noFoundStartPt.x; int diff_y = center_circle.y - noFoundStartPt.y; int distance = diff_x * diff_x + diff_y * diff_y; if( distance < closest_dist) { closest_dist = distance; best_ball_center = center_circle; //best_ball_box = bBox; ballFound = true; } } if(ballFound) { //measurement.at<float>(0) = best_ball_center.x; //measurement.at<float>(1) = best_ball_center.y; //Mat estimated = KF.correct(measurement); KF.statePost.at<float>(0) = best_ball_center.x; KF.statePost.at<float>(1) = best_ball_center.y; KF.statePost.at<float>(2) = 0; KF.statePost.at<float>(3) = 0; prev_box = best_ball_box; prev_motion = Point2f(0, 0); noFoundCount = 0; } else { cout << "UNABLE TO CORRECT..." << endl; } } noFoundCount++; cout << "NO FOUND: " << noFoundCount << endl; } // rendering result line( result, statePt, predictPt, CV_RGB(255,0,255), 2 ); circle( result, predictPt, 2, CV_RGB(255,0,255), 2 ); circle( result, best_ball_center, 2, CV_RGB(255,255,255), 2 ); rectangle( result, best_ball_box, CV_RGB(0,255,0), 2 ); // Optical Flow /* for (size_t i = 0; i < optFlow_ball_centers.size(); i++) { line( result, prev_ball_centers[i], optFlow_ball_centers[i], CV_RGB(120,70,255), 2 ); circle( result, optFlow_ball_centers[i], 2, CV_RGB(120,70,255), 2 ); } */ // Hough /* for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ ) { Point center(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1])); int radius = cvRound(circles[circle_i][2]); circle( result, center, radius, Scalar(12,12,255), 2 ); } */ prev_ball_centers = cur_contour_centers; imshow("Result Window", result); outputVideo.write( result ); /* UPDATE FRAME */ cur_frame.copyTo( prev_frame ); /* KEY INPUTS */ int keynum = waitKey(30) & 0xFF; if(keynum == 113) // press q break; else if(keynum == 32) // press space { keynum = 0; while(keynum != 32 && keynum != 113) keynum = waitKey(30) & 0xFF; if(keynum == 113) break; } } inputVideo.release(); outputVideo.release(); }
void writeForegroundVideoWithoutNoise() { Mat matFrame(frameWithoutNoise, false); foregroundVideoWithoutNoise.write(matFrame); }
void main(int argc, char *argv[]) { Mat emptyFrame = Mat::zeros(Camera::reso_height, Camera::reso_width, CV_8UC3); Thesis::FastTracking fastTrack(20); //used to be 50, why? i dno Thesis::KalmanFilter kalman; kalman.initialise(CoordinateReal(0, 0, 0)); kalman.openFile(); // the two stereoscope images Camera one(0,-125,0,0,0,90); Camera two(2, 125,0,0,0,90); Camera three; // list of cameras and cameraLocs std::vector<Camera> cameraList; std::vector<CoordinateReal> locList; VideoWriter writeOne ; VideoWriter writeTwo; VideoWriter writeThree; VideoCapture capOne; VideoCapture capTwo; VideoCapture capThree; Thesis::Stats stat; cv::Point2d horizontalOne(0,Camera::reso_height/2); cv::Point2d horizontalTwo(Camera::reso_width, Camera::reso_height/2); cv::Point2d verticalOne(Camera::reso_width / 2, 0); cv::Point2d verticalTwo(Camera::reso_width / 2, Camera::reso_height); ofstream framesFile_; framesFile_.open("../../../../ThesisImages/fps_ABSDIFF.txt"); double framesPerSecond = 1 / 10.0; //open the recorders FeatureExtraction surf(5000); Stereoscope stereo; Util util; bool once = false; bool foundInBoth = false; bool foundInMono = false; std::vector<cv::Point2f> leftRect(4); cv::Rect leftRealRect; cv::Rect rightRealRect; std::vector<cv::Point2f> rightRect(4); cv::Mat frameLeft; cv::Mat frameRight; cv::Mat frameThree; cv::Mat prevFrameLeft; cv::Mat prevFrameRight; cv::Mat prevFrameThree; // check if you going to run simulation or not or record cout << " run simulation: 's' or normal: 'n' or record 'o' or threeCameras 'c' " << endl; imshow("main", emptyFrame); char command = waitKey(0); string left = "../../../../ThesisImages/leftTen.avi"; string right = "../../../../ThesisImages/rightTen.avi"; string mid = "../../../../ThesisImages/midTen.avi"; commands(command); emptyFrame = Mat::ones(10, 10, CV_64F); imshow("main", emptyFrame); command = waitKey(0); camCount(command); // checkt the cam count if (multiCams){ //load in all the cameras three = Camera(3, 175, -50, 585, 7.1, 97);//Camera(3, 200, -60, 480, 7,111); } //==========hsv values======================= cv::Mat hsvFrame; cv::Mat threshold; int iLowH = 155; int iHighH = 179; int iLowS = 75; int iHighS = 255; int iLowV = 0; int iHighV = 255; //================================= double elapsedTime = 0; double waitDelta = 0; if (record){ writeOne.open("../../../../ThesisImages/leftTen.avi", 0, 10, cv::Size(864, 480), true); writeTwo.open("../../../../ThesisImages/rightTen.avi", 0, 10, cv::Size(864, 480), true); writeThree.open("../../../../ThesisImages/midTen.avi", 0, 10, cv::Size(864, 480), true); }else if (simulation){ capOne.open(left); capTwo.open(right); capThree.open(mid); assert(capOne.isOpened() && capTwo.isOpened()); } if (hsv){ //Create trackbars in "Control" window cvCreateTrackbar("LowH", "main", &iLowH, 179); //Hue (0 - 179) cvCreateTrackbar("HighH", "main", &iHighH, 179); cvCreateTrackbar("LowS", "main", &iLowS, 255); //Saturation (0 - 255) cvCreateTrackbar("HighS", "main", &iHighS, 255); cvCreateTrackbar("LowV", "main", &iLowV, 255); //Value (0 - 255) cvCreateTrackbar("HighV", "main", &iHighV, 255); } if(!simulation){ cout << " adding" << endl; surf.addImageToLib("backToTheFutureCover.jpg"); } CoordinateReal leftLoc; CoordinateReal rightLoc; CoordinateReal threeLoc; while (running){ clock_t beginTime = clock(); commands(command); if (found){ kalman.predictState(); kalman.printCurrentState(); } int thickness = -1; int lineType = 8; //normal running if (!simulation){ frameLeft = one.grabFrame(); frameRight = two.grabFrame(); if (multiCams){ frameThree = three.grabFrame(); } } else{ //if last frame, release then reopen if (capOne.get(CV_CAP_PROP_POS_FRAMES) == (capOne.get(CV_CAP_PROP_FRAME_COUNT) - 1)){ capOne.release(); capTwo.release(); capOne.open(left); capTwo.open(right); if (multiCams){ capThree.release(); capThree.open(mid); } } // means it is simulation: i.e frames come from a video capOne >> frameLeft; capTwo >> frameRight; if (multiCams){ capThree >> frameThree; } } if (hsv){ //convert the frame into hsv cvtColor(frameLeft, hsvFrame, COLOR_BGR2HSV); inRange(hsvFrame, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), threshold); blur(threshold, threshold, cv::Size(20, 20)); cv::threshold(threshold, threshold, 50, 255, THRESH_BINARY); //imshow("imageTwo", hsvFrame); imshow("hsv", threshold); } if (record){ writeOne.write(frameLeft); writeTwo.write(frameRight); if (multiCams){ writeThree.write(frameThree); } } if (command == ' '){ //left frame ============================= cout << "pressedSpace " << endl; std::vector<CoordinateReal> coordLeft = surf.detect(frameLeft, true, found, leftRealRect); if (!coordLeft.empty()){ int thickness = -1; int lineType = 8; cv::circle(frameLeft, cv::Point2f(coordLeft[0].x(), coordLeft[0].y()), 5, cv::Scalar(0, 0, 255), thickness, lineType); leftRect = surf.getSceneCorners(); line(frameLeft, leftRect[0], leftRect[1], cv::Scalar(0, 255, 0), 2); //TOP line line(frameLeft, leftRect[1], leftRect[2], cv::Scalar(0, 0, 255), 2); line(frameLeft, leftRect[2], leftRect[3], cv::Scalar(0, 255, 0), 2); line(frameLeft, leftRect[3], leftRect[0], cv::Scalar(0, 255, 0), 2); leftRealRect = util.getSizedRect(leftRect, one.reso_height, one.reso_width, 0.1); leftLoc = coordLeft[0]; } //right frame ================================== std::vector<CoordinateReal> coordRight = surf.detect(frameRight, true, found, rightRealRect); if (!coordRight.empty()){ int thickness = -1; int lineType = 8; cv::circle(frameRight, cv::Point2f(coordRight[0].x(), coordRight[0].y()), 5, cv::Scalar(0, 0, 255), thickness, lineType); rightRect = surf.getSceneCorners(); line(frameRight, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line line(frameRight, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2); line(frameRight, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2); line(frameRight, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2); rightRealRect = util.getSizedRect(rightRect, one.reso_height, one.reso_width, 0.1); rightLoc = coordRight[0]; } if (multiCams){ std::vector<CoordinateReal> coordThrees = surf.detect(frameThree, true, false, leftRealRect); CoordinateReal coordThree = coordThrees[0]; rightRect = surf.getSceneCorners(); line(frameThree, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line line(frameThree, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2); line(frameThree, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2); line(frameThree, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2); cout << " foundIN x: " << coordThree.x() << "found in y: " << coordThree.y() << endl; foundInMono = true; threeLoc = coordThree; } found = true; } else if(!record){ cout << " fastTracking " << endl; if (once){ CoordinateReal leftCameraLoc(0, 0, 0); CoordinateReal rightCameraLoc(0,0,0); if (found) { leftCameraLoc = kalman.expectedLocObs(one); rightCameraLoc = kalman.expectedLocObs(two); } leftLoc = fastTrack.findObject(frameLeft, prevFrameLeft, leftCameraLoc,leftDebug); rightLoc = fastTrack.findObject(frameRight, prevFrameRight, rightCameraLoc ,rightDebug); // go through the list of locations if (multiCams){ CoordinateReal miscCameraLoc(0, 0, 0); if (found){ miscCameraLoc = kalman.expectedLocObs(three); } threeLoc = fastTrack.findObject(frameThree, prevFrameThree, miscCameraLoc, threeDebug); } } frameLeft.copyTo(prevFrameLeft); frameRight.copyTo(prevFrameRight); if (multiCams){ frameThree.copyTo(prevFrameThree); } once = true; cv::circle(frameLeft, cv::Point2f(leftLoc.x(), leftLoc.y()), 5, cv::Scalar(0, 0, 255), thickness, lineType); cv::circle(frameRight, cv::Point2f(rightLoc.x(), rightLoc.y()), 5, cv::Scalar(0, 0, 255), thickness, lineType); cv::circle(frameThree, cv::Point2f(threeLoc.x(), threeLoc.y()), 5, cv::Scalar(0, 0, 255), thickness, lineType); } if (multiCams){ foundInMono = Util::isInFrame(threeLoc); } foundInBoth = Util::isInBothFrames(leftLoc, rightLoc); if (foundInBoth){ CoordinateReal real = stereo.getLocation(leftLoc, rightLoc); //print the current location cout << "x: " << real.x() << "y: " << real.y() << "z: " << real.z() << endl; //cout << "time in seconds" << float(clock() - beginTime) / CLOCKS_PER_SEC << endl; if (!found){ cout << "initialising kalman filter" << endl; kalman.initialise(real); } else { kalman.stereoObservation(real); } double curTime = double(clock())/CLOCKS_PER_SEC; cout << "curTime" << curTime << endl; stat.getVel(real, curTime); foundInBoth = false; found = true; } if (foundInMono){ // pass the observation cout << "found in mono" << endl; kalman.observation(threeLoc, three); foundInMono = false; } if (cross){ // add cross to all the frames line(frameRight, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2); line(frameRight, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2); line(frameLeft, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2); line(frameLeft, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2); //multi cam if (multiCams){ line(frameThree, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2); line(frameThree, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2); } } cv::imshow("left", frameLeft); cv::imshow("right", frameRight); if (multiCams){ cv::imshow("mid", frameThree); } command = waitKey(1); if (surfing){ cout << "wait" << endl; waitKey(0); surfing = false; } clock_t end = clock(); elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC; waitDelta = framesPerSecond - elapsedTime; if (waitDelta > 0){ command = waitKey(waitDelta* 1000); } end = clock(); elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC; cout << "fps" << 1 / elapsedTime << endl; //convert fps to string string fps = std::to_string(1 / elapsedTime); fps += "\n"; framesFile_ << fps; } framesFile_.close(); kalman.closeFile(); return; }
int main(int argc, char** argv ) { ofstream outfile; srand( time(NULL) ); if( argc < 2 || argc > 3 ) { cerr << "Argc " << argc << endl; cerr << "Usage: " << argv[0] << " VIDEO_CAPTURE_FILE " << "[VIDEO_OUTPUT_FILE.mp4]" << endl; cerr << "Matej Minarik (C) ElectroMeter " << VERSION << endl; cerr << "OpenCV " << CV_VERSION << endl; return 1; } outfile.open( "data.txt" ); VideoCapture cap( argv[1] ); if( cap.isOpened() == false && outfile.is_open() ) { cerr << "Cannot open file" << endl; return -1; } double frameWidth = cap.get( CV_CAP_PROP_FRAME_WIDTH ); double frameHeight = cap.get( CV_CAP_PROP_FRAME_HEIGHT ); double videoFPS = cap.get( CV_CAP_PROP_FPS ); VideoWriter vw; if( argc == 3 ) { bool open = vw.open( argv[2], CV_FOURCC('m', 'p', '4', 'v'), videoFPS, Size((int)frameWidth, (int)frameHeight)); if( false == open || false == vw.isOpened() ) { cerr << "Cannot open file " << argv[2] << endl; return -1; } } cout << " Width: " << frameWidth << endl; cout << " Height: " << frameHeight << endl; cout << " FPS: " << videoFPS << endl; int indicatorY = (int) ((float) frameHeight * 0.1); int indicatorX = (int) ((float) frameWidth * 0.8); namedWindow(WIN_TITLE); resizeWindow(WIN_TITLE, frameHeight, frameWidth); Mat currFrame, cloneFrame; Vec3b currPixel; Vec3b filterPixel; unsigned long sumR, sumG, sumB; unsigned long frameNo = 0; unsigned long lastR = 0; while( cap.read(currFrame) ) { sumR = sumG = sumB = 0; cloneFrame = currFrame.clone(); for( int i = 0; i < frameHeight; i++ ) { for( int j = 0; j < frameWidth; j++ ) { currPixel = currFrame.at<Vec3b>(Point(j, i)); sumR += currPixel[2]; if( cloneFrame.at<Vec3b>(Point(j, i))[0] + filterPixel[0] > 255 ) { cloneFrame.at<Vec3b>(Point(j, i))[0] = 255; } else { cloneFrame.at<Vec3b>(Point(j, i))[0] += filterPixel[0]; } if( cloneFrame.at<Vec3b>(Point(j, i))[1] + filterPixel[1] > 255 ) { cloneFrame.at<Vec3b>(Point(j, i))[1] = 255; } else { cloneFrame.at<Vec3b>(Point(j, i))[1] += filterPixel[1]; } if( cloneFrame.at<Vec3b>(Point(j, i))[2] + filterPixel[2] > 255 ) { cloneFrame.at<Vec3b>(Point(j, i))[2] = 255; } else { cloneFrame.at<Vec3b>(Point(j, i))[2] += filterPixel[2]; } } } vw.write( cloneFrame ); outfile << frameNo; outfile << " " << sumR; outfile << endl; ++frameNo; if( lastR != 0 ) { float ratio = (float) sumR / (float) lastR; if( ratio > 1.08 ) { showIndicator(indicatorX, indicatorY, cloneFrame); cout << "Dot " << frameNo << " " << indicatorX << ":" << indicatorY; cout << " ratio " << ratio << endl; if( true == filterShouldChange(frameNo) ) { filterPixel = getRandomFilter(); } } else { lastR = sumR; } } else { lastR = sumR; } imshow(WIN_TITLE, cloneFrame); if( waitKey(29) >= 0 ) break; } outfile.close(); cap.release(); if( argc == 3) { vw.release(); } return 0; }
void visionNode::run(){ //run initial calibration. If that fails, this node will shut down. if(!calibrate()) ros::shutdown(); VideoWriter outputVideo; Size S = cv::Size(cam->get_img_width(),cam->get_img_height()); outputVideo.open("/home/lcv/output.avi" , CV_FOURCC('M','P','2','V'), 30, S, true); //main loop while(ros::ok()){ //if calibration was manualy invoked by call on the service if(invokeCalibration) { invokeCalibration = false; calibrate(); } //grab frame from camera cam->get_frame(&camFrame); //correct the lens distortion rectifier->rectify(camFrame, rectifiedCamFrame); //create a duplicate grayscale frame cv::Mat gray; cv::cvtColor(rectifiedCamFrame, gray, CV_BGR2GRAY); //draw the calibration points for(point2f::point2fvector::iterator it=markers.begin(); it!=markers.end(); ++it) cv::circle(rectifiedCamFrame, cv::Point(cv::saturate_cast<int>(it->x), cv::saturate_cast<int>(it->y)), 1, cv::Scalar(0, 0, 255), 2); //detect crates std::vector<Crate> crates; qrDetector->detectCrates(gray, crates); //transform crate coordinates for(std::vector<Crate>::iterator it=crates.begin(); it!=crates.end(); ++it) { it->draw(rectifiedCamFrame); std::vector<cv::Point2f> points; for(int n = 0; n <3; n++){ point2f result = cordTransformer->to_rc(point2f(it->getPoints()[n].x, it->getPoints()[n].y)); points.push_back(cv::Point2f(result.x, result.y)); } it->setPoints(points); } //inform the crate tracker about the seen crates std::vector<CrateEvent> events = crateTracker->update(crates); //publish events for(std::vector<CrateEvent>::iterator it = events.begin(); it != events.end(); ++it) { vision::CrateEventMsg msg; msg.event = it->type; msg.crate.name = it->name; msg.crate.x = it->x; msg.crate.y = it->y; msg.crate.angle = it->angle; ROS_INFO(it->toString().c_str()); crateEventPublisher.publish(msg); } //update GUI outputVideo.write(rectifiedCamFrame); imshow("image",rectifiedCamFrame); waitKey(1000/30); //let ROS do it's magical things ros::spinOnce(); } }
void processVideo(CMFC_CartoonApp *theApp) { processedFrame = false; vector<Rect> vec_rect_facesCascade; Mat myFrame = cvQueryFrame(myCap);//frame 指向g_capture指向的AVI文件的当前读入帧 if (myFrame.empty())//判断是否帧为空 { cout << "视频文件播放完毕" << endl; cout << "nnn " << nnn << endl; theApp->dlg.KillTimer(1); //停止 return; } Mat targetImg, originalMat; targetImg = myFrame.clone(); originalMat = myFrame.clone(); //滚动条 theApp->dlg.m_slider.SetPos(++pos); //绘制人脸检测矩形框 if(detectFace && grabFlag == false) { //TFacePosition *FaceArray; //HImage hTargetImg; //int detectedCount = -1; //当前帧的人脸数 //hTargetImg = loadImageFromMat(targetImg); //FSDK_FaceTemplate *detectedFaceTemplates = new FSDK_FaceTemplate[20]; //FaceArray = detectUsingLuxand(hTargetImg, detectedCount, detectedFaceTemplates, NULL); //检测当前帧的人脸 //for (int i = 0; i < detectedCount; i++) //{ // TFacePosition facePosition = FaceArray[i]; // rectangle(targetImg, Rect(facePosition.xc - facePosition.w/2, facePosition.yc - facePosition.w/2, facePosition.w, facePosition.w), cvScalar(255, 0, 255), 2); //} detectCascade(targetImg, cascade1, 1, vec_rect_facesCascade); for(int j = 0; j < vec_rect_facesCascade.size(); j++) { rectangle(targetImg, vec_rect_facesCascade.at(j), cvScalar(255, 0, 255), 2); } } if (grabFlag == true && vec_FaceCartoons.size() > 0) { base_test1_str += "pos: " + toString(pos) + "\n"; //检测是否切换了场景 if(!preFrame.empty() && !myFrame.empty()) { base_test1 = compareHist1(preFrame, myFrame); //计算出两帧之间的相似度 base_test1_str += toString(base_test1) + "\n"; cout<<"ffsd "<<base_test1<<endl; if(base_test1 > 6) //超过阈值,认为切换了场景 { nnn++; situationCount = 0; //切换场景后,统计帧数归零 selectFace = false; vec_formerTemplates.clear(); //waitKey(0); for (int i = 0; i < 20; i++) //将统计切换场景后,匹配人脸情况的数组清零,重新统计 { changeBackArray[i] = 0; } }else{ situationCount++; //没切换场景后,帧数加1 } } preFrame = myFrame.clone(); //保存当前帧 double t,tt,ttt; t = (double)cvGetTickCount(); int detectedCount = -1; //当前帧的人脸数 HImage hTargetImg; hTargetImg = loadImageFromMat(targetImg); /*imwrite("targetImg.jpg", targetImg); FSDK_LoadImageFromFile(&hTargetImg, "targetImg.jpg");*/ FSDK_FaceTemplate *detectedFaceTemplates = new FSDK_FaceTemplate[20]; TFacePosition *FaceArray; FaceArray = detectUsingLuxand(hTargetImg, detectedCount, detectedFaceTemplates, NULL); //检测当前帧的人脸 tt = (double)cvGetTickCount() - t; printf("detect time = %g ms\n", tt / ((double)cvGetTickFrequency()*1000.)); if (detectedCount > 0) //当前帧有人脸才去进行匹配替换 { //绘制人脸检测矩形框 if(detectFace) { for (int i = 0; i < detectedCount; i++) { TFacePosition facePosition = FaceArray[i]; rectangle(targetImg, Rect(facePosition.xc - facePosition.w/2, facePosition.yc - facePosition.w/2, facePosition.w, facePosition.w), cvScalar(255, 0, 255), 2); } } ttt = (double)cvGetTickCount(); float temp_same_num_all = 0; float max_same_num_all = 0; int similiestFaceNum_all_cartoon = -1; int similiestFaceNum_all_man = -1; //从画面所有脸中找出与选定人脸最相似的一张脸 for(int i = 0; i < vec_FaceCartoons.size(); i++) { FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(i); float temp_same_num = 0; float max_same_num = 0; int similiestFaceNum = -1; for(int j = 0; j < detectedCount; j++) { for(int k = 0; k < faceCartoonObj->faceTemplates.size(); k++) { temp_same_num = getSimilarityBetweenTemplate(faceCartoonObj->faceTemplates.at(k), detectedFaceTemplates[j]); if(temp_same_num > max_same_num){ similiestFaceNum = j; max_same_num = temp_same_num; } } } if(max_same_num > max_same_num_all) { max_same_num_all = max_same_num; similiestFaceNum_all_cartoon = similiestFaceNum; similiestFaceNum_all_man = i; } cout<<"相似度 "<<max_same_num<<endl; base_test1_str += "similirity: " + toString(max_same_num) + "\n"; } base_test1_str += "sitCount: " + toString(situationCount) + "\n"; base_test1_str += "sitCartNum: " + toString(situationCartoonNum) + "\n"; //场景没切换,已经贴了5帧,则这一帧也贴 //选脸替换的第一个场景,直接贴 if ((base_test1 <= 6 && situationCartoonNum >= 5) || selectFace) { //切换场景后,通过前十帧来确定后续要显示的人脸 //超过10帧,则以后都选取匹配最多的那个FcObj来卡通化 if(situationCount > 10) { base_test1_str += ">10"; base_test1_str += "\n"; int mostMatchManNum = 0, maxMatchTime = 0; for(int k = 0; k < 20; k++) //选出前十帧匹配次数最多的那个人 { if(changeBackArray[k] > maxMatchTime) { mostMatchManNum = k; maxMatchTime = changeBackArray[k]; } } FaceCartoon *tempFaceObj = vec_FaceCartoons.at(mostMatchManNum); tempFaceObj->facePosition = FaceArray[similiestFaceNum_all_cartoon]; detect_and_draw(myFrame, *tempFaceObj); //如果当前帧的相似度低于阈值,则加入到模板中用于以后匹配 if(max_same_num_all < threshhold){ tempFaceObj->faceTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]); base_test1_str += "add Temp \n"; } //确定使用哪个FcObj之后,如果之前有未匹配的帧,则将其加入模板 if(!vec_formerTemplates.empty()){ base_test1_str += "use former!! \n"; for(int i = 0; i < vec_formerTemplates.size(); i++){ tempFaceObj->faceTemplates.push_back(vec_formerTemplates.at(i)); } //使用完之后,清空 vec_formerTemplates.clear(); } }else{ //小于10帧,则使用当前最相似的那个FcObj base_test1_str += "<10"; base_test1_str += "\n"; changeBackArray[similiestFaceNum_all_man]++; FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(similiestFaceNum_all_man); //faceCartoonObj->faceTemplate = detectedFaceTemplates[similiestFaceNum_all_cartoon]; //为当前FcObj选取一个配对的卡通形象,如果是半自动的,则不会去找,是固定的 matchCartoonForFcObj(faceCartoonObj, frameCount, base_test1_str); double drawTime = (double)cvGetTickCount(); faceCartoonObj->facePosition = FaceArray[similiestFaceNum_all_cartoon]; detect_and_draw(myFrame, *faceCartoonObj); //如果当前帧的相似度低于阈值,则加入到模板中用于以后匹配 if(max_same_num_all < threshhold){ faceCartoonObj->faceTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]); base_test1_str += "add temp \n"; } printf("draw time = %g ms\n", ((double)cvGetTickCount() - drawTime) / ((double)cvGetTickFrequency()*1000.)); } situationCartoonNum++; }else if((base_test1 > 6 || base_test1 == -1) || (situationCount < 10 ) || (base_test1 <= 6 && situationCartoonNum < 5 && situationCount < 20)){ //场景切换了 //选择第一个人脸 //该场景前10帧 //场景没切换,场景前20帧中贴图小于10帧 cout<<"没吊用!"<<endl; if(base_test1 > 6 || base_test1 == -1){ //场景切换了 //选择第一个人脸 situationCartoonNum = 0; } if(max_same_num_all > threshhold) //如果当前帧人脸相似度超过阈值,则贴图 { changeFlag = true; changeBackArray[similiestFaceNum_all_man]++; FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(similiestFaceNum_all_man); //faceCartoonObj->faceTemplate = detectedFaceTemplates[similiestFaceNum_all_cartoon]; //为当前FcObj选取一个配对的卡通形象 matchCartoonForFcObj(faceCartoonObj, frameCount, base_test1_str); double drawTime = (double)cvGetTickCount(); faceCartoonObj->facePosition = FaceArray[similiestFaceNum_all_cartoon]; detect_and_draw(myFrame, *faceCartoonObj); printf("draw time = %g ms\n", ((double)cvGetTickCount() - drawTime) / ((double)cvGetTickFrequency()*1000.)); situationCartoonNum++; }else{ //不相似则当前帧不贴图 changeFlag = false; //保存该帧模板 vec_formerTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]); base_test1_str += "add to former \n"; } }else if(base_test1 <= 6 && !changeFlag){ //场景没切换,上一帧没贴图 base_test1_str += "enter: " + toString(38) + "\n"; } base_test1_str += "changeFlag: " + toString((double)changeFlag) + "\n"; base_test1_str += "simi: " + toString(simiAuto) + "\n"; t = (double)cvGetTickCount() - t; totalTime = t / ((double)cvGetTickFrequency()*1000.); base_test1_str += "totalTime: " + toString(totalTime) + "\n"; if(drawStringFlag) drawString(myFrame, base_test1_str); ttt = (double)cvGetTickCount() - ttt; printf("similarity time = %g ms\n", ttt / ((double)cvGetTickFrequency()*1000.)); //显示这个最像的人脸 //Rect faceRegionRect = getROIFromTFacePosition(targetImg, FaceArray[similiestFaceNum]); //Mat selected_faceImg(targetImg, faceRegionRect); //imshow("检测出的人脸", selected_faceImg); } base_test1_str = ""; //清空参数表 FSDK_FreeImage(hTargetImg); free(detectedFaceTemplates); } if (theApp->dlg.key_esc) { //如果按下Esc键中断 theApp->dlg.key_esc = false; theApp->dlg.KillTimer(1); if(saveSelectedTemplates) //清空和关闭输出流 { fout.clear(); fout.close(); } return; }else if (theApp->dlg.key_space){ //按下空格键取消卡通化操作 theApp->dlg.key_space = false; if(grabFlag) { grabFlag = false; }else{ grabFlag = true; } } else if (theApp->dlg.key_enter){ theApp->dlg.KillTimer(1); //暂停 cout << "按了enter" << endl; judge_Image = myFrame.clone(); int grabReturn = grab(originalMat, name, selected_rect); //按下enter则暂停,去调用抠图程序,selected_rect为选中的区域 if(grabReturn == GRAB_OK) //判断是否抠图成功 { int tempCount = -1; //当前帧人脸个数 HImage h_judge_image; h_judge_image = loadImageFromMat(judge_Image); /*imwrite("judgeImg.jpg", judge_Image); FSDK_LoadImageFromFile(&h_judge_image, "judgeImg.jpg");*/ //FSDK_SaveImageToFile(h_judge_image,"multiddddddddddddd.jpg"); FSDK_FaceTemplate *judgeFaceTemplates = new FSDK_FaceTemplate[20]; TFacePosition *judgeFaceArray; int genders[20]; judgeFaceArray = detectUsingLuxand(h_judge_image, tempCount, judgeFaceTemplates, genders); int center_x = -1; int center_y = -1; if(tempCount > 0){ selectFace = true; situationCount = 0; //计算区域中心 center_x = selected_rect.x + selected_rect.width / 2; center_y = selected_rect.y + selected_rect.height / 2; //区域中心落在哪个人脸区域则选的哪个人脸 int minDist = INT_MAX, closestFaceNum = -1; for (int i = 0; i < tempCount; i++) { int dist = myDist(center_x, center_y, judgeFaceArray[i].xc, judgeFaceArray[i].yc); if(dist < minDist) { minDist = dist; closestFaceNum = i; } } //FaceCartoon faceCartoon; //创建对象,这样不行,每次创建的地址都相同 FaceCartoon *faceCartoon = new FaceCartoon(); faceCartoon->cartoonNumArray = new int[vec_faceTemplates.size()]; //创建对应动画个数的数组大小 //数组归零 for(int i = 0; i < vec_faceTemplates.size(); i++) { faceCartoon->cartoonNumArray[i] = 0; } faceCartoon->faceTemplates.push_back(judgeFaceTemplates[closestFaceNum]); //获取目标人脸模板 faceCartoon->gender = genders[closestFaceNum]; //找出最相似的人,以及卡通 int *cartoonNums = findMatchCartoonNum(*faceCartoon); for(int i = 0; i< vec_faceTemplates.size(); i++) cout<< cartoonNums[i] << " "; cout<<"fwff"<<endl; int cartoonNum; if(simiAuto){ theApp->selectCartoonDlg.cartoonNums = cartoonNums; theApp->selectCartoonDlg.DoModal(); cartoonNum = theApp->selectCartoonDlg.getSelectedCartoonNum(); //cartoonNum = showSelectCartoonWindow(cartoonNums, simiNum); }else{ cartoonNum = cartoonNums[0]; //最相似的一个 } faceCartoon->cartoonMatchNum = cartoonNum; faceCartoon->cartoonNumArray[cartoonNum]++; grabFlag = true; vec_FaceCartoons.push_back(faceCartoon); if(saveSelectedTemplates) //是否保存模板 { fout.write((char *)(&judgeFaceTemplates[closestFaceNum]), sizeof(judgeFaceTemplates[closestFaceNum])); } //画出标定的人脸 Rect faceRegionRect = getROIFromTFacePosition(judge_Image, judgeFaceArray[closestFaceNum]); Mat selected_faceImg(judge_Image, faceRegionRect); FSDK_FreeImage(h_judge_image); }else{ cout << "当前图像没有检测出人脸!!" << endl; } } } //imshow("视频播放", myFrame); IplImage *originalImg, *cartoonImg; originalImg = &IplImage(targetImg); cartoonImg = &IplImage(myFrame); theApp->dlg.DrawPicToHDC(cartoonImg, IDC_showcartoon); theApp->dlg.DrawPicToHDC(originalImg, IDC_showOriginal); if(createVideo) { //合成视频 outputVideo.write(myFrame); } //重启计时器 if(theApp->dlg.key_enter == true){ theApp->dlg.key_enter = false; theApp->dlg.SetTimer(1,1000/fps,NULL); //开始 theApp->dlg.m_playBtn.SetWindowTextA("暂停"); theApp->dlg.isPlay = true; } processedFrame = true; }