void MakeVideoLocally() { CvSize init_size; Mat img1 = imread("D:/TDDOWNLOAD/GDown/473/473/1/1.bmp"); Mat img2 = imread("D:/TDDOWNLOAD/GDown/473/473/1/2.bmp"); Mat img3 = imread("D:/TDDOWNLOAD/GDown/473/473/1/3.bmp"); Mat img4 = imread("D:/TDDOWNLOAD/GDown/473/473/1/4.bmp"); init_size = cvSize(img1.size().width, img1.size().height); namedWindow("img1", 1); imshow("img1", img1); namedWindow("img2", 1); imshow("img2", img2); namedWindow("img3", 1); imshow("img3", img3); namedWindow("img4", 1); imshow("img4", img4); waitKey(0); VideoWriter saveVideoWriter; saveVideoWriter.open("source.avi", CV_FOURCC('X', 'V', 'I', 'D'), 1, init_size, true) ; saveVideoWriter<<img1; saveVideoWriter<<img2; saveVideoWriter<<img3; saveVideoWriter<<img4; }
/* Funció que calcula l'histograma acumulat de cada una de les execucions d'una activitat */ Mat HistogramaOF::calcularHistogramaAcumulatOF(String path, int num_imatges, String nom_activitat, int num_repeticio, string ruta, bool video) { String nomVideo, nomImatgeRGBa, nomImatgeDa, nomImatgeRGBb, nomImatgeDb; VideoWriter outputVideo; if(video) { double fps = 15; CvSize mida = cvSize(399, 240); nomVideo = ruta+"/"+nom_activitat+"_acumulat_"+to_string(num_repeticio)+".avi"; outputVideo.open(nomVideo, 0, fps, mida, true); } Mat imageA, depthA, imageB, depthB, resultat; nomImatgeRGBa = path+"/c_0.png"; nomImatgeDa = path+"/d_0.png"; imageA = imread(nomImatgeRGBa, IMREAD_COLOR); depthA = imread(nomImatgeDa, IMREAD_GRAYSCALE); resultat = calcularHistogramaOF(imageA, imageA, depthA, depthA); if(video) outputVideo << resultat; for(int k = 1; k <= num_imatges; ++k) { cout << "Imatge: " << num_repeticio << " - " << k << "/" << num_imatges << endl; nomImatgeRGBb = path+"/c_"+to_string(k)+".png"; nomImatgeDb = path+"/d_"+to_string(k)+".png"; imageB = imread(nomImatgeRGBb, IMREAD_COLOR); depthB= imread(nomImatgeDb, IMREAD_GRAYSCALE); resultat = calcularHistogramaOF(imageA, imageB, depthA, depthB); if(video) outputVideo << resultat; nomImatgeRGBa = nomImatgeRGBb; nomImatgeDa = nomImatgeDb; imageB.copyTo(imageA); depthB.copyTo(depthA); } if(video) outputVideo.release(); return repr; }
/** start the camera and hand over the variable to the function detectAndDisplay*/ int main( int argc, const char** argv ) { Mat frame; /** 2D or multi-dimensional dense array for frame*/ char c; /** definition for close the Video - Surveillance GUI */ if( !LoadHaarClassifier() || cascade ) { VideoCapture capture(-1); /** open the default camera */ if( !capture.isOpened() ){ cout << "\nCamera couldn't be opened!" << endl; return -1; } capture >> frame; /** get frame for size */ /** record video */ record.open("Record.avi", CV_FOURCC('D','I','V','X'), 30, frame.size(), true); if( !record.isOpened() ) { cout << "\nCamera couldn't be opened!"<< endl; return -1; } while(true) { /** new frame from Camera */ capture >> frame; /** Apply the classifier to the frame */ if( !frame.empty() ) { /** call the function to detect, save and write detected faces, */ detectAndDisplay( frame ); } /** Press Esc to close face detection */ c = cvWaitKey(33); /** The 33 number in the code means that after 33ms, a new frame would be shown. */ if( c == 27 ) { break; } /** 27 is the Ascii Code for Esc*/ } }
void HOGDetectorGPU::processVideo(char* ptrNameInput, char* ptrNameOutput){ VideoCapture inputVideo(ptrNameInput); VideoWriter outputVideo; Size sizeVideo = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); Mat* ptrMatOut; outputVideo.open(ptrNameOutput, CV_FOURCC('x','v','i','d'), inputVideo.get(CV_CAP_PROP_FPS), sizeVideo, true); Mat* ptrMat; vector<Mat> spl; while(inputVideo.grab()){ ptrMat = new Mat(); inputVideo >> *ptrMat; // get a new frame from video Mat finalMat; split(*ptrMat, spl); cvtColor(*ptrMat, *ptrMat, CV_BGR2GRAY); ptrMatOut = detectPeople(ptrMat); spl[0] = *(ptrMatOut); spl[1] = *(ptrMatOut); spl[2] = *(ptrMatOut); merge(spl, finalMat); outputVideo << finalMat; imshow("edges", *(ptrMatOut)); if(waitKey(30) >= 0) break; //Deletes the processed frame delete ptrMatOut; } outputVideo.release(); }
void Converter::saveTCMat2RGBVideo(char* resultDir, TCVectorizedVidMat& data){ VideoWriter outputVideo; outputVideo.open(resultDir, data.ex, data.fps, data.size, true); if (!outputVideo.isOpened()){ cout << "cannot save video." << endl; throw 1; } cout << "start save"<<endl; for (int i = 0 ; i < data.nMat ; ++i){ // Devectorization cout << i << endl; TCMat frame; frame.rows = data.rows; frame.cols = data.cols; frame.green = data.green.col(i); frame.green.reshape(data.rows, data.cols); frame.red = data.red.col(i); frame.red.reshape(data.rows, data.cols); frame.blue = data.blue.col(i); frame.blue.reshape(data.rows, data.cols); cv::Mat img = TCMat2RGB(frame); imshow("Video", img); int key = waitKey(10); if((char)key == 'q') { break; } outputVideo << img; } return; }
void run() { VideoWriter writer; Mat stabilizedFrame; int nframes = 0; // for each stabilized frame while (!(stabilizedFrame = stabilizedFrames->nextFrame()).empty()) { nframes++; // init writer (once) and save stabilized frame if (!outputPath.empty()) { if (!writer.isOpened()) writer.open(outputPath, CV_FOURCC('D','I','V','X'),//CV_FOURCC('X','V','I','D'), outputFps, stabilizedFrame.size(), true); writer << stabilizedFrame; } // show stabilized frame if (!quietMode) { imshow("stabilizedFrame", stabilizedFrame); char key = static_cast<char>(waitKey(3)); if (key == 27) { cout << endl; break; } } } cout << "processed frames: " << nframes << endl << "finished\n"; }
void MainWindow::on_cmdRun_clicked() { if (selectedFile != "") { VideoCapture vid(selectedFile.toStdString().c_str()); VideoWriter vout; vout.open((selectedFile + "out.mpeg").toStdString().c_str(),CV_FOURCC('M','P','E','G'),30,Size(vid.get(CV_CAP_PROP_FRAME_WIDTH), vid.get(CV_CAP_PROP_FRAME_HEIGHT))); qDebug() << "Frame cnt " << frame_count; QFileInfo filedir = QFileInfo(selectedFile); QDir currdir = filedir.dir(); int sample_size = frame_count; if (sample_size <= 0) return; int sample_interval = frame_count / sample_size; int sample_cnt = 0; ui->prog->setMaximum(frame_count); bool undistorted = ui->chkDistort->isChecked(); Mat cammatrix; Mat distortion; double rms; if (undistorted) { FileStorage file(calibFile.toStdString().c_str(),FileStorage::READ); file["rms"] >> rms; file["cameramatrix"] >> cammatrix; file["distortion"] >> distortion; } for (int i = 0; i < frame_count; i++) { Mat r; vid >> r; QString writepath = selectedFile + "-" + QString::number(i) + ".png"; Mat c; if (undistorted) { undistort(r, c, cammatrix, distortion); } else { c = r; } imwrite(writepath.toStdString().c_str(), c); vout << c; ui->prog->setValue(i); } ui->prog->setValue(frame_count); }
void initializeVideoWriter(const char* fileName, int width, int height, int fps) { Size size = Size(width, height); //int fourcc = VideoWriter::fourcc('F','L','V','1'); int fourcc = CV_FOURCC('F', 'L', 'V', '1'); bool opened = videoWriter.open(fileName, fourcc, fps, size, true); printf("video opened=%d %dx%d %d fps\n", opened, width, height, fps); videoWriterReleased=0; }
void initVideoWriter(CvCapture *inputVideo, string source, string addStr) { string::size_type pAt = source.find_last_of('.'); // Find extension point const string NAME = source.substr(0, pAt) + addStr + ".avi"; // Form the new name with container Size S = Size((int) cvGetCaptureProperty(inputVideo, CV_CAP_PROP_FRAME_WIDTH), //Acquire input size (int) cvGetCaptureProperty(inputVideo, CV_CAP_PROP_FRAME_HEIGHT)); outputVideo.open(NAME , -1, cvGetCaptureProperty(inputVideo, CV_CAP_PROP_FPS) * 3 /3,S, true); CV_Assert(outputVideo.isOpened() == true); }
int main(int argc, char* argv[]) { VideoCapture cap("joker.avi"); // open the video file for reading if (!cap.isOpened()) // if not success, exit program { cout << "Cannot open the video file" << endl; return -1; } //cap.set(CV_CAP_PROP_POS_MSEC, 200); //start the video at 300ms double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form cout << "Frame per seconds : " << fps << endl; cout << "Frame size : " << dWidth << " x " << dHeight << endl; cout << "Codec type of the video : " << ex << endl; int askFileTypeBox = cap.get(CV_CAP_PROP_FOURCC); //-1 is show box of codec int Color = 1; Size S = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT)); VideoWriter outputVideo; outputVideo.open(".\\outVideo.avi", -1, cap.get(CV_CAP_PROP_FPS), S, Color); //namedWindow("MyVideo", CV_WINDOW_AUTOSIZE); //create a window called "MyVideo" //while (1) //{ // Mat frame; // bool bSuccess = cap.read(frame); // read a new frame from video // if (!bSuccess) //if not success, break loop // { // cout << "Cannot read the frame from video file" << endl; // break; // } // imshow("MyVideo", frame); //show the frame in "MyVideo" window if (waitKey(30) == 27) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop { cout << "esc key is pressed by user" << endl; // break; } //} return 0; }
int main( int argc, char* argv[] ) { if(argc > 2) NOWRITE = 0; cout << "nowrite = " << NOWRITE << endl; namedWindow( "Example2_10", CV_WINDOW_AUTOSIZE ); namedWindow( "Log_Polar", CV_WINDOW_AUTOSIZE ); Mat bgr_frame; VideoCapture capture; if( argc < 2 || !capture.open( argv[1] ) ){ help(); cout << "Failed to open " << argv[1] << "\n" << endl; return -1; } double fps = capture.get(CV_CAP_PROP_FPS); cout << "fps = " << fps << endl; Size size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT)); cout << " frame (w, h) = (" << size.width << ", " << size.height << ")" <<endl; VideoWriter writer; if(! NOWRITE) { writer.open( // On linux Will only work if you've installed ffmpeg development files correctly, argv[2], // otherwise segmentation fault. Windows probably better. CV_FOURCC('M','J','P','G'), fps, size ); } Mat logpolar_frame(size,CV_8UC3); Mat gray_frame(size,CV_8UC1); for(;;) { capture >> bgr_frame; if( bgr_frame.empty() ) break; imshow( "Example2_10", bgr_frame ); cvtColor( //We never make use of this gray image bgr_frame, gray_frame, CV_BGR2GRAY); IplImage lp = logpolar_frame; IplImage bgrf = bgr_frame; cvLogPolar( &bgrf, &lp, //This is just a fun conversion the mimic's the human visual system cvPoint2D32f(bgr_frame.cols/2, bgr_frame.rows/2), 40, CV_WARP_FILL_OUTLIERS ); imshow( "Log_Polar", logpolar_frame ); //Sigh, on linux, depending on your ffmpeg, this often won't work ... if(! NOWRITE) writer << logpolar_frame; char c = waitKey(10); if( c == 27 ) break; } capture.release(); }
int main(){ // Variables VideoCapture capture; VideoWriter writer; Mat frame; // Read from source capture.open(0); //capture.open("../Videos/chessboard-1.avi"); // Check if the source was opened correctly if (!capture.isOpened()){ cout << "Cannot open video device or file!" << endl; return -1; } // Read first frame (needed to configure VideoWriter) capture.read(frame); if (frame.empty()){ printf("VideoCapture failed getting the first frame!\n"); return -1; } // Open a video file for writing and check writer.open("./video.avi", CV_FOURCC('D','I','V','X'), 15, frame.size(), true); if( !writer.isOpened() ) { printf("VideoWriter failed to open!\n"); return -1; } // Read the video while(true){ // Read new frame capture.read(frame); if (frame.empty()) break; // Write frame to a file writer.write(frame); // Show frame imshow("video", frame); if ((cvWaitKey(10) & 255) == 27) break; } // Release memory capture.release(); frame.release(); return 0; }
void openForegroundVideoWithoutNoise(const char* foregroundVideoWithoutNoisePath) { frameWithoutNoise = cvCreateImage(cvSize(foregroundVideoWidth, foregroundVideoHeight), IPL_DEPTH_8U, 3); Size frameSize; frameSize.width = foregroundVideoWidth; frameSize.height = foregroundVideoHeight; if(!foregroundVideoWithoutNoise.open(foregroundVideoWithoutNoisePath, CV_FOURCC('D', 'I', 'V', 'X'), foregroundVideoRate,frameSize, true)) { printf("open foregroundVideoWriter error ...\n"); return; } }
ImLogger::ImLogger(std::map<std::string,std::string> to_hook,double fps,cv::Size frame_size) : frame_size(frame_size) { for(auto x : to_hook) { VideoWriter wr; wr.open ( x.second , CV_FOURCC('F','M','P','4') , fps , frame_size); hooks.insert(make_pair(x.first,wr)); } }
GenericClassnameOneTracker9000(string filename, string log_name, string output_path, bool extern_debug, bool extern_recording) { debug = extern_debug; recording = extern_recording; capture = VideoCapture(filename); logger.open(log_name); logger << filename << endl; //int codec = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); Size frame_size = Size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT)); tracking_recorder.open(output_path, CV_FOURCC('M', 'P', '4', '3'), capture.get(CV_CAP_PROP_FPS), frame_size); }
int main(int, char**) { Mat src; // use default camera as video source VideoCapture cap(0); // check if we succeeded if (!cap.isOpened()) { cerr << "ERROR! Unable to open camera\n"; return -1; } // get one frame from camera to know frame size and type cap >> src; // check if we succeeded if (src.empty()) { cerr << "ERROR! blank frame grabbed\n"; return -1; } bool isColor = (src.type() == CV_8UC3); //--- INITIALIZE VIDEOWRITER VideoWriter writer; int codec = CV_FOURCC('M', 'J', 'P', 'G'); // select desired codec (must be available at runtime) double fps = 25.0; // framerate of the created video stream string filename = "./live.avi"; // name of the output video file writer.open(filename, codec, fps, src.size(), isColor); // check if we succeeded if (!writer.isOpened()) { cerr << "Could not open the output video file for write\n"; return -1; } //--- GRAB AND WRITE LOOP cout << "Writing videofile: " << filename << endl << "Press any key to terminate" << endl; for (;;) { // check if we succeeded if (!cap.read(src)) { cerr << "ERROR! blank frame grabbed\n"; break; } // encode the frame into the videofile stream writer.write(src); // show live and wait for a key with timeout long enough to show images imshow("Live", src); if (waitKey(5) >= 0) break; } // the videofile will be closed and released automatically in VideoWriter destructor return 0; }
// Write a frame to an output video // optionally, if dateAndTime is set, stamp the date, time and match information to the frame before writing void writeVideoToFile(VideoWriter &outputVideo, const char *filename, const Mat &frame, NetworkTable *netTable, bool dateAndTime) { if (!outputVideo.isOpened()) outputVideo.open(filename, CV_FOURCC('M','J','P','G'), 15, Size(frame.cols, frame.rows), true); WriteOnFrame textWriter(frame); if (dateAndTime) { string matchNum = netTable->GetString("Match Number", "No Match Number"); double matchTime = netTable->GetNumber("Match Time",-1); textWriter.writeMatchNumTime(matchNum,matchTime); textWriter.writeTime(); } textWriter.write(outputVideo); }
void startVideo(){ int ancho=40; MostrarClass mostrarImagen; cv::VideoCapture cap; // Abre el video, 0 si se desea usar la webcam. std::string videoFile = "0"; cap.open(0); if (!cap.isOpened()) // Confirmacion si el video fue abierto con exito o no. return; Mat frame; cap >> frame; // Obtengo el primer frame del video para mostrar un video Base. double width = cap.get(CV_CAP_PROP_FRAME_WIDTH);// Obtengo el ancho del video. double height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);// Obtengo el alto del video. mostrarImagen.Define(width,height,ancho); VideoWriter video; video.open ( "outputVideo.avi", CV_FOURCC('D','I','V','X'), 30, Size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get (CV_CAP_PROP_FRAME_HEIGHT)), true ); int buffer [ancho*ancho*3]; while (true)// for que leera todos los frames del video hasta que el usuario lo detenga. { if(mostrar){ cap >> frame; // Obtengo el proximo frame del video. Mat a;// Creo un "frame" para guardarlo y no referenciar al frame que cambia dentro del for. frame.copyTo(a);// Copio el frame del video en la variable a. IplImage* imageOriginal; imageOriginal = cvCreateImage(cvSize(width,height), IPL_DEPTH_8U, 3); imageOriginal->imageData = (char *) a.data; for (int i = 0; i < height/ancho; i++){ for (int j = 0; j < width/ancho; j++){ for(int k =0;k<ancho;k++){ for(int z=0;z<ancho;z++){ CvScalar pixel = cvGet2D(imageOriginal, k+ancho*i ,z+ancho*j); buffer[k*(ancho*3)+z*3]=pixel.val[0]; buffer[k*(ancho*3)+z*3+1]=pixel.val[1]; buffer[k*(ancho*3)+z*3+2]=pixel.val[2]; int pos =k*(ancho*3)+z*3; //std::cout<<pos<<" "<<buffer[k*(ancho*3)+z*3+2]<<"\n"; } } mostrarImagen.MostrarImagen(i, j, buffer); } } video << a; // Guardo el frame creado en el archivo nuevo. } if (waitKey(1) >= 0) mostrar=!mostrar;//break;// si el usuario preciona la tecla "escape" se termina el programa. } return ; }
VideoWriter setOutput(const VideoCapture &input) { // Reference from // http://docs.opencv.org/2.4/doc/tutorials/highgui/video-write/video-write.html // Acquire input size Size S = Size((int)input.get(CV_CAP_PROP_FRAME_WIDTH), (int)input.get(CV_CAP_PROP_FRAME_HEIGHT)); // Get Codec Type- Int form int ex = static_cast<int>(input.get(CV_CAP_PROP_FOURCC)); VideoWriter output; output.open("outputVideo.avi", CV_FOURCC('H', 'F', 'Y', 'U'), input.get(CV_CAP_PROP_FPS), S, true); return output; }
void skizImage::WriteVideo() { string fileName("/home/xuweirong/video/kakumeiki.mp4"); VideoCapture inputVideo(fileName); if(!inputVideo.isOpened()) { printf("cap is not open\n"); return ; } string::size_type pAt= fileName.find_last_of('.'); const string name = fileName.substr(0,pAt)+"R"+".avi"; int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); char EXT[5] = {(char)(ex &0XFF),(char)((ex & 0XFF00) >> 8 ) ,(char)((ex & 0XFF0000) >> 16 ),(char)( (ex & 0XFF000000) >> 24 ),0} ; Size S =Size( (int)inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),(int)inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)) ; printf("Input frame resolution:Width =%d ,Height=%d .of nr#:%lf\n",S.width,S.height,inputVideo.get(CV_CAP_PROP_FRAME_COUNT)); printf("Input codec type:%s\n",EXT); VideoWriter outputVideo; outputVideo.open(name,ex,inputVideo.get(CV_CAP_PROP_FPS),S,true); if(!outputVideo.isOpened()) { printf("outputVideo is not open\n"); return ; } Mat src,res; vector<Mat> spl; for(;;) { inputVideo>>src; if ( src.empty()) break ; if( ( inputVideo.get(CV_CAP_PROP_POS_MSEC)/1000) >20 ) break; split(src,spl); spl[0] = Mat::zeros(S,spl[0].type()); spl[1] = Mat::zeros(S,spl[0].type()); merge(spl,res); outputVideo <<res ; } cout << "Finish writing"<<endl; }
int main() { //image decelerations Mat showImg,curFrame,lastFrame,lastGrey,curGrey; Mat motionCondensed; int kernelSize=7; VideoCapture video =VideoCapture("onionskin-1.MP4"); VideoWriter videoOut; videoOut.open("major_flow.avi", CV_FOURCC('X','V','I','D'), video.get(CV_CAP_PROP_FPS), Size(video.get(CV_CAP_PROP_FRAME_WIDTH),video.get(CV_CAP_PROP_FRAME_HEIGHT)), true); if (!videoOut.isOpened()) { cout << "Could not open the output video for write! " << endl; return -1; } double total_frames; total_frames=video.get(CV_CAP_PROP_FRAME_COUNT); video>>showImg; curFrame=showImg.clone(); cv::cvtColor(curFrame,lastGrey,CV_BGR2GRAY); //initialize the motion condensed image motionCondensed=Mat(showImg.rows,(int)total_frames,CV_8UC3); double displacementX=0, displacementY=0; int frameCounter=1; char key=0; columnsMean(curFrame,motionCondensed,0); while(key!=27) { video>>showImg; if (showImg.empty()) { break; } curFrame=showImg.clone(); cv::cvtColor(curFrame,curGrey,CV_BGR2GRAY); columnsMean(curFrame,motionCondensed,frameCounter); frameCounter++; } waitKey(); return 0; }
int main(int argc, char ** argv){ VideoWriter vidwriter; identify_mode(argc,argv); if (mode == "help"){ help_display(); } else if (mode == "detect"){ FaceDetect detector; Mat frame; double fps; vector <Rect> face_pos; vector <struct Eye> eye_pos; namedWindow("Face Detection"); cout << "Press ESC to end detection" << endl; if (input_type == "video" || input_type == "webcam"){ if (input_type == "video") cap.open(input_path); else cap.open(0); string vidout,extn; int write_flag = 0; if (input_type == "video" && output_type == "record"){ stringstream liness(input_path); getline(liness,vidout,'.'); getline(liness,extn); vidout = vidout + "_with_face_tag." + extn; waitKey(0); vidwriter.open(vidout.c_str(),CV_FOURCC('P','I','M','1'),cap.get(CV_CAP_PROP_FPS),Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH),(int)cap.get(CV_CAP_PROP_FRAME_HEIGHT))); write_flag = 1; } while (1) { cap >> frame; fps = 1.0/detector.detect_face(face_pos,eye_pos,frame); detector.draw_face(face_pos,eye_pos,frame,1); if (write_flag) vidwriter << frame; put_text_fps(frame,fps); imshow("Face Detection",frame); if (waitKey(10) == 27) break; } } else {
/** @function main */ int main( int argc, const char** argv ) { clock_t timer = clock(); CvCapture* capture; Mat frame; //-- 1. Load the cascades if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; //-- 2. Read the video stream capture = cvCaptureFromCAM(-1); Size S = Size(640,480); const string NAME="1.avi"; outputVideo.open(NAME,CV_FOURCC('M','J','P','G'),24, S, true); // 10 FPS if( capture ) { while( true ) { timer = clock(); frame = cvQueryFrame( capture ); //-- 3. Apply the classifier to the frame if( !frame.empty() ) { detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } timer = clock() - timer; float time =(float)(timer/CLOCKS_PER_SEC); cout<<"time "<<time<<"\n"; int c = waitKey(1); if( (char)c == 'c' ) { break; } } } return 0; }
void Converter::mergeImgToVid(char* src, char* dst, double fps){ path p(src); VideoWriter output; bool opened = false; if (exists(p) && is_directory(p)){ for (directory_entry& x : directory_iterator(p)){ if (is_regular_file(x.path())){ string path = x.path().string(); if (path.find( ".DS_Store" ) != string::npos ) continue; cv::Mat inImg = imread(path, CV_LOAD_IMAGE_COLOR); if (!output.isOpened()){ output.open(dst, CV_FOURCC('M', 'J', 'P', 'G'), fps, inImg.size(), true); } output.write(inImg); } } } }
void RunVision() { // pthread_t visionThread; // // if(1 == pthread_create(&visionThread, NULL, VisionActionAsync, NULL)) // { // fprintf(stderr, "Couldn't create Vision thread\n"); // exit(1); // } //} // //void* VisionActionAsync(void*) //{ Vision::GetInstance()->OpenFlyCapCamera(); signal(SIGTERM, SigTermHandler); Vision::GetInstance()->IsVisionThreadRunning = true; const string outputFile = "/home/robot/workspace2/RoboCup2016/RoboCup2016/GoalKeeper2016/demo.avi"; VideoWriter outputVideo; outputVideo.open(outputFile, CV_FOURCC('M', 'J', 'P', 'G'), 10, Size(FRAME_WIDTH, FRAME_HEIGHT), true); // capture loop char key = 0; while (key != 'q' && Vision::GetInstance()->IsVisionThreadRunning == true) { Mat currentFrame; Vision::GetInstance()->GetFrameFromFlyCap(currentFrame); Vision::GetInstance()->ProcessCurrentFrame(currentFrame); imshow("Outout", currentFrame); key = waitKey(30); outputVideo.write(currentFrame); } Vision::GetInstance()->CloseFlyCapCamera(); }
void Video::grabVideoAndData(string path, string ext, string buffer, VideoWriter &writer, const Mat &image) { if (!writer.isOpened()) { string source = path; source.append(buffer); source.append("." + ext); // Open the output writer.open(source, CV_FOURCC('X', 'V', 'I', 'D'), 12, cv::Size(image.size().width, image.size().height), true); if (!writer.isOpened()) { printf("Could not open the output video for write: %s", source.c_str()); } } writer.write(image); }
int main(){ namedWindow("Video"); namedWindow("erstes Video-Frame"); VideoCapture videoCapture; // ACHTUNG: Pfad bitte anpassen! videoCapture.open("C:/Users/Andreas/Desktop/Micro-dance_2_.avi"); int width = videoCapture.get(CV_CAP_PROP_FRAME_WIDTH); int height = videoCapture.get(CV_CAP_PROP_FRAME_HEIGHT); // >>>>>>>>>> VideoWriter Objekt initialisieren VideoWriter videoWriter; //>>>>>>>>>> VideoWriter Datei öffnen videoWriter.open("Video.avi", CV_FOURCC('P','I','M','1'), 30, Size(width, height), true); Mat firstFrame; int frameNumber = 0; while(true){ Mat videoFrame; if (false == videoCapture.read(videoFrame)){ break; } //>>>>>>>>>> VideoWriter Frame schreiben videoWriter.write(videoFrame); frameNumber++; if (frameNumber == 1){ videoFrame.copyTo(firstFrame); // kopiert die Pixel des ersten Video Frames } imshow("erstes Video-Frame", firstFrame); imshow("Video", videoFrame); waitKey(30); } return 0; }
/** * Sets up a video writer for .AVI files with the given filename * * @param frameSize size of the frames * @param filenamePrefix prefix for the new video filename * @param configStruct OpenCvStereoConfig structure * for reading the directory things should be saved in * and other data. * @param increment_number Default is true. Set to false to * not increment the video number. Used for stereo vision * systems if you want to have a left and right video with * the same number (ie set to false for the second writer * you initialize.) * @param this_video_number (optional) if non-null, then the video number will be supplied * @param is_color true if it is color * @default false * * @retval VideoWriter object */ VideoWriter RecordingManager::SetupVideoWriterAVI(string filenamePrefix, Size frameSize, bool increment_number, bool is_color, int *this_video_number) { VideoWriter recorder; CheckOrCreateDirectory(stereo_config_.videoSaveDir); string filename = GetNextVideoFilename(filenamePrefix, false, increment_number, this_video_number); char fourcc1 = stereo_config_.fourcc.at(0); char fourcc2 = stereo_config_.fourcc.at(1); char fourcc3 = stereo_config_.fourcc.at(2); char fourcc4 = stereo_config_.fourcc.at(3); recorder.open(filename, CV_FOURCC(fourcc1, fourcc2, fourcc3, fourcc4), 30, frameSize, is_color); if (!recorder.isOpened()) { printf("VideoWriter failed to open!\n"); } else { cout << endl << "Opened " << filename << endl; } return recorder; }
int main(){ namedWindow("Screen",WINDOW_NORMAL); resizeWindow("Screen", 1800, 1000); //Mat image, prevImage; cvSetMouseCallback("Screen", mouseHandlerL, NULL); const int start = 5; const int end = 10; if (end <= start){ return -1;} VideoWriter Vout; // multiFrame.avi Vout.open("junk.avi", CV_FOURCC('M', 'J', 'P', 'G'), 30, Size(640,480), 1); // This is made into a function so I can call many different images sets easily. // multiFrameTrack(start, end, Vout, "parallel_real/ParallelReal", "outParaRealStart.jpg", "outParaRealEnd.jpg", "paraRealStart.txt", "paraRealEnd.txt", 1); // At 15/39 search window multiFrameTrack(start, end, Vout, "turned_real/TurnReal", "outTurnRealStart.jpg", "outTurnRealEnd.jpg", "turnRealStart.txt", "turnRealEnd.txt", 2); // multiFrameTrack(start, end, Vout, "parallel_cube/ParallelCube", "outParaCubeStart.jpg", "outParaCubeEnd.jpg", "paraCubeStart.txt", "paraCubeEnd.txt", 1); // Just gets better points... // multiFrameTrack(start, end, Vout, "turned_cube/TurnCube", "outTurnCubeStart.jpg", "outTurnCubeEnd.jpg", "turnCubeStart.txt", "turnCubeEnd.txt", 1); // At 55/89 search window }
/// Program recording a video stream from camera int main(int argc,char *argv[]) { Catcher catcher; VideoCapture cap; VideoWriter writ; char cont='e'; Mat obr; long counter=0; if(argc<3) { cerr<<"Error: not enough parameters."<<endl<<argv[0] <<" Input_source_name output_target_name"<<endl; return 1; } //cap.open(atoi(argv[1])); catcher.init(atoi(argv[1])); writ.open(argv[2],CV_FOURCC('D','I','V','X'),10,Size(640,480)); namedWindow("input",CV_WINDOW_NORMAL); do { ++counter; try { //cap>>obr; catcher.catchFrame(obr); writ<<obr; cerr<<counter<<endl; imshow("input",obr); cont=waitKey(500); } catch(Exception ex) { cerr<<"Exception passed up through "<<__FILE__<<':'<<__LINE__ <<" in fucntion "<<__func__; cerr<<ex.code<<endl<<ex.err<<endl<<ex.func<<endl<<ex.line<<endl; } } while(cont!='q'); return 0; }