JNIEXPORT jintArray JNICALL Java_pris_videotest_JNIClient_detectWithReturn( JNIEnv * env, jclass, jintArray pixels, jint width, jint height) { jint * cPixels; cPixels = env->GetIntArrayElements(pixels, 0); cv::Mat imgData(height, width, CV_8UC4, (unsigned char*) cPixels); IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4); *frame = imgData.operator _IplImage(); //imgData.release(); cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0); ///<高斯滤波 cv::Mat m_OriFrameMat = frame; //cvReleaseImage(&frame); resize(m_OriFrameMat, m_ResizeFrameMat, cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40 //m_OriFrameMat.release(); //cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化 //m_ResizeFrameMat.release(); m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001); m_ResizeFrameMat = foregroundMat; int result[m_nVideoResizeH*m_nVideoResizeW]; for(int i = 0; i<m_nVideoResizeH*m_nVideoResizeW; i++){ result[i] = m_ResizeFrameMat.data[i]; } env->ReleaseIntArrayElements(pixels, cPixels, 0); jintArray intArray = env->NewIntArray(m_nVideoResizeH*m_nVideoResizeW); env->SetIntArrayRegion(intArray, 0, m_nVideoResizeH*m_nVideoResizeW, result); return intArray; }
JNIEXPORT jboolean JNICALL Java_pris_videotest_JNIClient_detect(JNIEnv * env, jclass, jbyteArray pixels, jint width, jint height) { jbyte * cPixels; cPixels = env->GetByteArrayElements(pixels, 0); cv::Mat imgData(height, width, CV_8UC1, (unsigned char*) cPixels); IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1); *frame = imgData.operator _IplImage(); //imgData.release(); cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0); ///<高斯滤波 cv::Mat m_OriFrameMat = frame; //cvReleaseImage(&frame); resize(m_OriFrameMat, m_ResizeFrameMat, cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40 //m_OriFrameMat.release(); //cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化 //m_ResizeFrameMat.release(); m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001); m_ResizeFrameMat = foregroundMat; int i, j, k; k = 0; for (i = 0; i < m_nVideoResizeH; i++) { for (j = 0; j < m_nVideoResizeW; j++) { if (m_ResizeFrameMat.data[i * m_nVideoResizeW + j] != 0) { k++; ///<计算二值前景图像非0像素点个数 } } } //m_GrayFrameMat.release(); //delete frame; double k_ratio = (double) k / (double) (m_nVideoResizeW * m_nVideoResizeH); if (k_ratio <= 0.01) { env->ReleaseByteArrayElements(pixels, cPixels, 0); return false; } if (k_ratio / m_rFGSegThreshold > 1.5 || k_ratio / m_rFGSegThreshold < 0.79) m_rFGSegThreshold = k_ratio; ///检测到运动视频段 if (k_ratio >= m_rFGSegThreshold) { env->ReleaseByteArrayElements(pixels, cPixels, 0); return true; } env->ReleaseByteArrayElements(pixels, cPixels, 0); return false; }
int getFirePixelNumber(Mat aFrame) { const int ROI_WIDTH = 40; const int ROI_HEIGHT = 30; unsigned int currentWidth = 0, currentHeight = 0; unsigned int width, height; std::vector<std::vector<cv::Point> > contours; //Mat roi; Rect roi; width = aFrame.cols; height = aFrame.rows; Mat YCrCbFrame; Mat YChannel, CrChannel, CbChannel; Mat Y_Cb, Cr_Cb; Mat colorMask; //check for input frame if(aFrame.empty()) { return -1; } //---------------detect moving pixel------------// // using BackgroundSubstractMOG2 // //----------------------------------------------// bg.operator ()(aFrame, front); bg.getBackgroundImage(back); //cv::erode(front,front, cv::Mat()); //cv::dilate(front, front, cv::Mat()); cv::medianBlur(front, front, 5); cv::findContours(front,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE); std::vector<std::vector<cv::Point> > contours_poly( contours.size() ); vector<Rect> boundRect( contours.size() ); vector<Point2f>center( contours.size() ); vector<float>radius( contours.size() ); for(unsigned int i = 0; i < contours.size(); i++ ) { cv::approxPolyDP( contours[i], contours_poly[i], 3.0, true ); boundRect[i] = boundingRect( Mat(contours_poly[i]) ); cv::minEnclosingCircle( contours_poly[i], center[i], radius[i] ); } for(unsigned int i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( 255,255,255 ); //params: input output contourIdx color thickness drawContours( front, contours_poly, i, color, CV_FILLED, 8, vector<Vec4i>(), 0, Point() ); } //----detect fire color----// //-------------------------------------------------------------------// // pixel = fire color when // // valueY > valueCb && // // valueCr > valueCb && // // (valueY > meanY && valueCr > meanCr && valueCb < meanCb) // //-------------------------------------------------------------------// //get YCrCb channel cvtColor(aFrame, YCrCbFrame, CV_BGR2YCrCb); vector<Mat> channels(3); split(YCrCbFrame, channels); YChannel = channels[0]; CrChannel = channels[1]; CbChannel = channels[2]; //calculate mean of 3 channels: => for further use // unsigned char Y_mean, Cr_mean, Cb_mean; // Y_mean = (unsigned char)mean(YChannel)[0]; // Cr_mean = (unsigned char)mean(CrChannel)[0]; // Cb_mean = (unsigned char)mean(CbChannel)[0]; colorMask = Mat(aFrame.rows, aFrame.cols, CV_8UC1); Y_Cb = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//YChannel minus CbChannel Cr_Cb = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//CrChannel minus CbChannel subtract(YChannel, CbChannel, Y_Cb); threshold(Y_Cb, Y_Cb, 10, 255, THRESH_BINARY); subtract(CrChannel, CbChannel, Cr_Cb);threshold(Cr_Cb, Cr_Cb, 10, 255, THRESH_BINARY); //colorMask = front & Y_Cb & Y_Cr bitwise_and(front, Y_Cb, colorMask); bitwise_and(colorMask, Cr_Cb, colorMask); for(currentWidth = 0; currentWidth < width; currentWidth+=ROI_WIDTH) { for(currentHeight = 0; currentHeight < height; currentHeight+=ROI_HEIGHT) { roi = Rect(currentWidth, currentHeight, ROI_WIDTH, ROI_HEIGHT); cv::Mat testArea = colorMask(roi); int fireCount = countNonZero(testArea); if(fireCount > 10) { cv::Mat roi_draw = aFrame(roi); cv::Mat color(roi_draw.size(), CV_8UC3, cv::Scalar (0,125,125)); double alpha = 0.5; cv::addWeighted(color, alpha, roi_draw, 1.0-alpha, 0.0, roi_draw); } } } int fireCount = countNonZero(colorMask); cvtColor(front, front, CV_GRAY2BGR); cvtColor(Y_Cb, Y_Cb, CV_GRAY2BGR); cvtColor(Cr_Cb, Cr_Cb, CV_GRAY2BGR); cvtColor(colorMask, colorMask, CV_GRAY2BGR); char wName[25]; sprintf(&(wName[0]),"Frames"); cvShowManyImages(wName, aFrame.cols, aFrame.rows, 5, (unsigned char*)aFrame.data, (unsigned char*)front.data, (unsigned char*)Y_Cb.data, (unsigned char*)Cr_Cb.data, (unsigned char*)colorMask.data); // imshow(wName, frame); // if(fireCount>fireThreshold) // { // //count the frame that contains firePixel surpass threshold // std::cout << "Fired" << std::endl; // } // else // { // std::cout << "Not fired" << std::endl; // } return fireCount; }
void WayFinderApp::update() { if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) { detected = false; // TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance. if(capture && capture->checkNewFrame()) { frame = toOcv(capture->getSurface()); //cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray; //cvtColor(frame, frameGray, CV_BGR2GRAY); int blurAmount = 10; //cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount)); //threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY); // Get all contours. //bg.operator()(frameThresh,fore); bg.operator()(frame, fore); bg.getBackgroundImage(back); cv::erode(fore, fore, cv::Mat()); cv::dilate(fore, fore, cv::Mat()); cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); // Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects unsigned largestIndex = 0; unsigned largestContour = 0; for(unsigned i = 0; i < contours.size(); i++) { if(contours[i].size() > largestContour) { largestContour = contours[i].size(); largestIndex = i; } } vector<std::vector<cv::Point>> hack; cv::Rect rect; cv::Point center; if(contours.size() > 0) { hack.push_back(contours[largestIndex]); // Find bounding rectangle for largest countour. rect = boundingRect(contours[largestIndex]); // Make sure the blog is large enough to be a track-worthy. println("Rext area = " + boost::lexical_cast<std::string>(rect.area())); if(rect.area() >= 5000) { // TODO: Tweak this value. // Get center of rectangle. center = cv::Point( rect.x + (rect.width / 2), rect.y + (rect.height / 2) ); // Show guide. spotlightCenter2D.x = (float)center.x; spotlightCenter2D.y = (float)center.y; spotlightCenter3D.x = (float)center.x; spotlightCenter3D.y = (float)center.y; //spotlightRadius = (rect.width + rect.y) / 2; detected = true; } } // When debug mode is off, the background should be black. if(debugView) { if(contours.size() > 0) { cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2); cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2); rectangle(frame, rect, cv::Scalar(0, 255, 0), 3); circle(frame, center, 10, cv::Scalar(0, 255, 0), 3); } mTexture = gl::Texture(fromOcv(frame)); } } // TODO: Create control panel for all inputs. } }
void processVideo(char* videoFilename) { //create the capture object IplImage *labelImg;//foreground CTracker openTracker((float)0.033, (float)0.6, (double)20.0, 10, 3000); CvTracks tracks; VideoCapture capture(videoFilename); if(!capture.isOpened()){ //error in opening the video input cerr << "Unable to open video file: " << videoFilename << endl; exit(EXIT_FAILURE); } bool bInitialized = false; //read input data. ESC or 'q' for quitting while( (char)keyboard != 'q' && (char)keyboard != 27 ){ //read the current frame if(!capture.read(frame)) { cerr << "Unable to read next frame." << endl; cerr << "Exiting..." << endl; exit(EXIT_FAILURE); } if(bInitialized==false) { cv::Size frameSize(static_cast<int>(frame.cols), static_cast<int>(frame.rows)); labelImg = cvCreateImage(frameSize, IPL_DEPTH_LABEL, 1); bInitialized = true; } //update the background model pMOG2.operator ()(frame,fgMaskMOG2); //open operator. cv::erode(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),1); cv::dilate(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),4); // step 2::blob analysis CvBlobs blobs; unsigned int result = cvLabel(&(IplImage)fgMaskMOG2, labelImg, blobs); cvFilterByArea(blobs, 125, 10000); cvRenderBlobs(labelImg, blobs, &(IplImage)frame, &(IplImage)frame, CV_BLOB_RENDER_BOUNDING_BOX); //cvUpdateTracks(blobs, tracks, 200, 5); //cvRenderTracks(tracks, &(IplImage)frame, &(IplImage)frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_LOG); //convert the blobs into detection structure; vector<Detection*> detections; for (CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it) { CvBlob *blob=(*it).second; Detection *_detection = new Detection; _detection->centroid.x= blob->centroid.x; _detection->centroid.y= blob->centroid.y; _detection->brect.x = blob->minx; _detection->brect.y = blob->miny; _detection->brect.height = blob->maxy - blob->miny; _detection->brect.width = blob->maxx - blob->minx; detections.push_back(_detection); } //Step 3 : give the list of all centroids of all detected contours to tracker. Track return the trace of the track, whose values are Kalman-filtered if(blobs.size() > 0) { openTracker.Update(detections); int i, j; for(i=0; i < openTracker.tracks.size(); i++) { //add a threshold to de-noise, if the contour just appeared, maybe noise. set a threshold if(openTracker.tracks[i]->trace.size() > 10) { for(j = 0; j < (openTracker.tracks[i]->trace.size() - 2); j++) { cv::rectangle(frame, openTracker.tracks[i]->brect, Scalar(255,0,0)); //line(fore, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Colors[openTracker.tracks[i]->track_id % 9], 1, CV_AA); line(frame, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Scalar(255,0,0), 1, CV_AA); } stringstream ss; ss << openTracker.tracks[i]->track_id; string str = ss.str(); putText(frame, str, openTracker.tracks[i]->trace[j], FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255,0,0), 1); } } } //get the frame number and write it on the current frame stringstream ss; rectangle(frame, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1); //show the current frame and the fg masks imshow("Frame", frame); imshow("FG Mask MOG 2", fgMaskMOG2); //get the input from the keyboard keyboard = waitKey( 30 ); } //delete capture object capture.release(); cvReleaseImage(&labelImg); }