void MainWindow::slotShortcutUsed(int id) { if (id == BOOST1) { qDebug() << "Amplify 1"; amplify(1); } else if (id == BOOST2) { qDebug() << "Amplify 2"; amplify(3); } else if (id == BOOST3) { qDebug() << "Amplify 3"; amplify(9); } else if (id == PREV) { slotPrevFile(); } else if (id == NEXT) { slotNextFile(); } else if (id == OPEN) { slotOpenFlow(); } else if (id == SAVE) { slotSaveFlow(); } else if (id == HELP) { slotShowShortcuts(); } else if (id == QUIT) { close(); } else { qDebug() << "Shortcut with ID " << id << " has no action!"; Q_ASSERT(false); } }
/* Normalize data */ float normalize(float * data_time) { float max = 0.0f; for(int i = 0; i < N; i++) { float data = data_time[i]; max = MAX(max, data * data); } max = sqrt(max); amplify(data_time, 1.0f/max); return max; }
static int audiosourcefadepanvol_Read(struct audiosource* source, char* buffer, unsigned int bytes) { struct audiosourcefadepanvol_internaldata* idata = source->internaldata; if (idata->eof) { return -1; } unsigned int byteswritten = 0; while (bytes > 0) { // see how many samples we want to have minimum int stereosamples = bytes / (sizeof(float) * 2); if (stereosamples * sizeof(float) * 2 < bytes) { stereosamples++; } // get new unprocessed samples int unprocessedstart = idata->processedsamplesbytes; if (!idata->sourceeof) { while (idata->processedsamplesbytes + sizeof(float) * 2 <= sizeof(idata->processedsamplesbuf) && stereosamples > 0) { int i = idata->source->read(idata->source, idata->processedsamplesbuf + idata->processedsamplesbytes, sizeof(float) * 2); if (i < (int)sizeof(float)*2) { if (i < 0) { // read function returned error idata->returnerroroneof = 1; } idata->sourceeof = 1; break; }else{ idata->processedsamplesbytes += sizeof(float)*2; } stereosamples--; } } // process unprocessed samples unsigned int i = unprocessedstart; float faderange = (-idata->fadesamplestart + idata->fadesampleend); float fadeprogress = idata->fadesampleend; while ((int)i <= ((int)idata->processedsamplesbytes - ((int)sizeof(float) * 2))) { float leftchannel = *((float*)((char*)idata->processedsamplesbuf+i)); float rightchannel = *((float*)((float*)((char*)idata->processedsamplesbuf+i))+1); if (idata->fadesamplestart < 0 || idata->fadesampleend > 0) { // calculate fade volume idata->vol = idata->fadevaluestart + (idata->fadevalueend - idata->fadevaluestart)*(1 - fadeprogress/faderange); // increase fade progress idata->fadesamplestart--; idata->fadesampleend--; fadeprogress = idata->fadesampleend; if (idata->fadesampleend < 0) { // fade ended idata->vol = idata->fadevalueend; idata->fadesamplestart = 0; idata->fadesampleend = 0; if (idata->terminateafterfade) { idata->sourceeof = 1; i = idata->processedsamplesbytes; } } } // apply volume if (!idata->noamplify) { leftchannel = amplify(leftchannel, idata->vol); rightchannel = amplify(rightchannel, idata->vol); } else { leftchannel *= idata->vol; rightchannel *= idata->vol; } // calculate panning if (idata->pan < 0) { leftchannel *= (1+idata->pan); } if (idata->pan > 0) { rightchannel *= (1-idata->pan); } // amplify channels when closer to edges: float panningamplifyfactor = abs(idata->pan); float amplifyamount = 1.3; if (!idata->noamplify) { if (idata->pan > 0) { leftchannel = amplify(leftchannel, 1 + panningamplifyfactor * (amplifyamount-1)); } else { rightchannel = amplify(rightchannel, 1 + panningamplifyfactor * (amplifyamount-1)); } } // write floats back memcpy(idata->processedsamplesbuf+i, &leftchannel, sizeof(float)); memcpy(idata->processedsamplesbuf+i+sizeof(float), &rightchannel, sizeof(float)); i += sizeof(float)*2; } // return from our processed samples unsigned int returnbytes = bytes; if (returnbytes > idata->processedsamplesbytes) { returnbytes = idata->processedsamplesbytes; } if (returnbytes == 0) { if (byteswritten == 0) { idata->eof = 1; if (idata->returnerroroneof) { return -1; } return 0; }else{ return byteswritten; } }else{ byteswritten += returnbytes; memcpy(buffer, idata->processedsamplesbuf, returnbytes); buffer += returnbytes; bytes -= returnbytes; } // move away processed & returned samples if (returnbytes > 0) { if (idata->processedsamplesbytes - returnbytes > 0) { memmove(idata->processedsamplesbuf, idata->processedsamplesbuf + returnbytes, sizeof(idata->processedsamplesbuf) - returnbytes); } idata->processedsamplesbytes -= returnbytes; } } return byteswritten; }
/** * colorMagnify - color magnification * */ void VideoProcessor::colorMagnify() { // set filter setSpatialFilter(GAUSSIAN); setTemporalFilter(IDEAL); // create a temp file createTemp(); // current frame cv::Mat input; // output frame cv::Mat output; // motion image cv::Mat motion; // temp image cv::Mat temp; // video frames std::vector<cv::Mat> frames; // down-sampled frames std::vector<cv::Mat> downSampledFrames; // filtered frames std::vector<cv::Mat> filteredFrames; // concatenate image of all the down-sample frames cv::Mat videoMat; // concatenate filtered image cv::Mat filtered; // if no capture device has been set if (!isOpened()) return; // set the modify flag to be true modify = true; // is processing stop = false; // save the current position long pos = curPos; // jump to the first frame jumpTo(0); // 1. spatial filtering while (getNextFrame(input) && !isStop()) { input.convertTo(temp, CV_32FC3); frames.push_back(temp.clone()); // spatial filtering std::vector<cv::Mat> pyramid; spatialFilter(temp, pyramid); downSampledFrames.push_back(pyramid.at(levels-1)); // update process std::string msg= "Spatial Filtering..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (isStop()){ emit closeProgressDialog(); fnumber = 0; return; } emit closeProgressDialog(); // 2. concat all the frames into a single large Mat // where each column is a reshaped single frame // (for processing convenience) concat(downSampledFrames, videoMat); // 3. temporal filtering temporalFilter(videoMat, filtered); // 4. amplify color motion amplify(filtered, filtered); // 5. de-concat the filtered image into filtered frames deConcat(filtered, downSampledFrames.at(0).size(), filteredFrames); // 6. amplify each frame // by adding frame image and motions // and write into video fnumber = 0; for (int i=0; i<length-1 && !isStop(); ++i) { // up-sample the motion image upsamplingFromGaussianPyramid(filteredFrames.at(i), levels, motion); resize(motion, motion, frames.at(i).size()); temp = frames.at(i) + motion; output = temp.clone(); double minVal, maxVal; minMaxLoc(output, &minVal, &maxVal); //find minimum and maximum intensities output.convertTo(output, CV_8UC3, 255.0/(maxVal - minVal), -minVal * 255.0/(maxVal - minVal)); tempWriter.write(output); std::string msg= "Amplifying..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (!isStop()) { emit revert(); } emit closeProgressDialog(); // release the temp writer tempWriter.release(); // change the video to the processed video setInput(tempFile); // jump back to the original position jumpTo(pos); }
/** * motionMagnify - eulerian motion magnification * */ void VideoProcessor::motionMagnify() { // set filter setSpatialFilter(LAPLACIAN); setTemporalFilter(IIR); // create a temp file createTemp(); // current frame cv::Mat input; // output frame cv::Mat output; // motion image cv::Mat motion; std::vector<cv::Mat> pyramid; std::vector<cv::Mat> filtered; // if no capture device has been set if (!isOpened()) return; // set the modify flag to be true modify = true; // is processing stop = false; // save the current position long pos = curPos; // jump to the first frame jumpTo(0); while (!isStop()) { // read next frame if any if (!getNextFrame(input)) break; input.convertTo(input, CV_32FC3, 1.0/255.0f); // 1. convert to Lab color space cv::cvtColor(input, input, CV_BGR2Lab); // 2. spatial filtering one frame cv::Mat s = input.clone(); spatialFilter(s, pyramid); // 3. temporal filtering one frame's pyramid // and amplify the motion if (fnumber == 0){ // is first frame lowpass1 = pyramid; lowpass2 = pyramid; filtered = pyramid; } else { for (int i=0; i<levels; ++i) { curLevel = i; temporalFilter(pyramid.at(i), filtered.at(i)); } // amplify each spatial frequency bands // according to Figure 6 of paper cv::Size filterSize = filtered.at(0).size(); int w = filterSize.width; int h = filterSize.height; delta = lambda_c/8.0/(1.0+alpha); // the factor to boost alpha above the bound // (for better visualization) exaggeration_factor = 2.0; // compute the representative wavelength lambda // for the lowest spatial frequency band of Laplacian pyramid lambda = sqrt(w*w + h*h)/3; // 3 is experimental constant for (int i=levels; i>=0; i--) { curLevel = i; amplify(filtered.at(i), filtered.at(i)); // go one level down on pyramid // representative lambda will reduce by factor of 2 lambda /= 2.0; } } // 4. reconstruct motion image from filtered pyramid reconImgFromLaplacianPyramid(filtered, levels, motion); // 5. attenuate I, Q channels attenuate(motion, motion); // 6. combine source frame and motion image if (fnumber > 0) // don't amplify first frame s += motion; // 7. convert back to rgb color space and CV_8UC3 output = s.clone(); cv::cvtColor(output, output, CV_Lab2BGR); output.convertTo(output, CV_8UC3, 255.0, 1.0/255.0); // write the frame to the temp file tempWriter.write(output); // update process std::string msg= "Processing..."; emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length)); } if (!isStop()){ emit revert(); } emit closeProgressDialog(); // release the temp writer tempWriter.release(); // change the video to the processed video setInput(tempFile); // jump back to the original position jumpTo(pos); }