/* * Create pipe with either write- _or_ read-semantics. Fortunately for us, * on SYS_MSDOS, we don't need both at the same instant. */ int inout_popen(FILE **fr, FILE **fw, char *cmd) { char *type = (fw != 0) ? "w" : "r"; static FILE *pp[2] = {0, 0}; int fd; TRACE(("inout_popen(fr=%p, fw=%p, cmd='%s')\n", fr, fw, cmd)); ffstatus = file_is_pipe; fileeof = FALSE; append_libdir_to_path(); /* Create the file that will hold the pipe's content */ if ((fd = createTemp(type)) >= 0) { if (fw == 0) { *fr = pp[0] = readPipe(cmd, -1, fd); myWrtr = 0; myPipe = &pp[0]; /* "fr" may be stack-based. */ myCmds = 0; } else { *fw = pp[1] = fdopen(fd, type); myPipe = fr; myWrtr = &pp[1]; /* "fw" may be stack-based. */ myCmds = strmalloc(cmd); } } return TRUE; }
/* * If we were writing to a pipe, invoke the read-process with stdin set to the * temporary-file. This is used in the filter-buffer code, which needs both * read- and write-pipes. */ void npflush(void) { if (myCmds != 0) { if (myWrtr != 0) { int fd; static FILE *pp; (void) fflush(*myWrtr); #if 0 (void) fclose(*myWrtr); *myWrtr = fopen(myName[0], "r"); #else rewind(*myWrtr); #endif fd = createTemp("r"); pp = fdopen(fd, "r"); myRead = &pp; *myPipe = readPipe(myCmds, fileno(*myWrtr), fd); } FreeAndNull(myCmds); } }
/** * colorMagnify - color magnification * */ void VideoProcessor::colorMagnify() { // set filter setSpatialFilter(GAUSSIAN); setTemporalFilter(IDEAL); // create a temp file createTemp(); // current frame cv::Mat input; // output frame cv::Mat output; // motion image cv::Mat motion; // temp image cv::Mat temp; // video frames std::vector<cv::Mat> frames; // down-sampled frames std::vector<cv::Mat> downSampledFrames; // filtered frames std::vector<cv::Mat> filteredFrames; // concatenate image of all the down-sample frames cv::Mat videoMat; // concatenate filtered image cv::Mat filtered; // if no capture device has been set if (!isOpened()) return; // set the modify flag to be true modify = true; // is processing stop = false; // save the current position long pos = curPos; // jump to the first frame jumpTo(0); // 1. spatial filtering while (getNextFrame(input) && !isStop()) { input.convertTo(temp, CV_32FC3); frames.push_back(temp.clone()); // spatial filtering std::vector<cv::Mat> pyramid; spatialFilter(temp, pyramid); downSampledFrames.push_back(pyramid.at(levels-1)); // update process std::string msg= "Spatial Filtering..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (isStop()){ emit closeProgressDialog(); fnumber = 0; return; } emit closeProgressDialog(); // 2. concat all the frames into a single large Mat // where each column is a reshaped single frame // (for processing convenience) concat(downSampledFrames, videoMat); // 3. temporal filtering temporalFilter(videoMat, filtered); // 4. amplify color motion amplify(filtered, filtered); // 5. de-concat the filtered image into filtered frames deConcat(filtered, downSampledFrames.at(0).size(), filteredFrames); // 6. amplify each frame // by adding frame image and motions // and write into video fnumber = 0; for (int i=0; i<length-1 && !isStop(); ++i) { // up-sample the motion image upsamplingFromGaussianPyramid(filteredFrames.at(i), levels, motion); resize(motion, motion, frames.at(i).size()); temp = frames.at(i) + motion; output = temp.clone(); double minVal, maxVal; minMaxLoc(output, &minVal, &maxVal); //find minimum and maximum intensities output.convertTo(output, CV_8UC3, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal)); tempWriter.write(output); std::string msg= "Amplifying..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (!isStop()) { emit revert(); } emit closeProgressDialog(); // release the temp writer tempWriter.release(); // change the video to the processed video setInput(tempFile); // jump back to the original position jumpTo(pos); }
/** * motionMagnify - eulerian motion magnification * */ void VideoProcessor::motionMagnify() { // set filter setSpatialFilter(LAPLACIAN); setTemporalFilter(IIR); // create a temp file createTemp(); // current frame cv::Mat input; // output frame cv::Mat output; // motion image cv::Mat motion; std::vector<cv::Mat> pyramid; std::vector<cv::Mat> filtered; // if no capture device has been set if (!isOpened()) return; // set the modify flag to be true modify = true; // is processing stop = false; // save the current position long pos = curPos; // jump to the first frame jumpTo(0); while (!isStop()) { // read next frame if any if (!getNextFrame(input)) break; input.convertTo(input, CV_32FC3, 1.0/255.0f); // 1. convert to Lab color space cv::cvtColor(input, input, CV_BGR2Lab); // 2. spatial filtering one frame cv::Mat s = input.clone(); spatialFilter(s, pyramid); // 3. temporal filtering one frame's pyramid // and amplify the motion if (fnumber == 0){ // is first frame lowpass1 = pyramid; lowpass2 = pyramid; filtered = pyramid; } else { for (int i=0; i<levels; ++i) { curLevel = i; temporalFilter(pyramid.at(i), filtered.at(i)); } // amplify each spatial frequency bands // according to Figure 6 of paper cv::Size filterSize = filtered.at(0).size(); int w = filterSize.width; int h = filterSize.height; delta = lambda_c/8.0/(1.0+alpha); // the factor to boost alpha above the bound // (for better visualization) exaggeration_factor = 2.0; // compute the representative wavelength lambda // for the lowest spatial frequency band of Laplacian pyramid lambda = sqrt(w*w + h*h)/3; // 3 is experimental constant for (int i=levels; i>=0; i--) { curLevel = i; amplify(filtered.at(i), filtered.at(i)); // go one level down on pyramid // representative lambda will reduce by factor of 2 lambda /= 2.0; } } // 4. reconstruct motion image from filtered pyramid reconImgFromLaplacianPyramid(filtered, levels, motion); // 5. attenuate I, Q channels attenuate(motion, motion); // 6. combine source frame and motion image if (fnumber > 0) // don't amplify first frame s += motion; // 7. convert back to rgb color space and CV_8UC3 output = s.clone(); cv::cvtColor(output, output, CV_Lab2BGR); output.convertTo(output, CV_8UC3, 255.0, 1.0/255.0); // write the frame to the temp file tempWriter.write(output); // update process std::string msg= "Processing..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (!isStop()){ emit revert(); } emit closeProgressDialog(); // release the temp writer tempWriter.release(); // change the video to the processed video setInput(tempFile); // jump back to the original position jumpTo(pos); }