int getFirePixelNumber(Mat aFrame) { const int ROI_WIDTH = 40; const int ROI_HEIGHT = 30; unsigned int currentWidth = 0, currentHeight = 0; unsigned int width, height; std::vector<std::vector<cv::Point> > contours; //Mat roi; Rect roi; width = aFrame.cols; height = aFrame.rows; Mat YCrCbFrame; Mat YChannel, CrChannel, CbChannel; Mat Y_Cb, Cr_Cb; Mat colorMask; //check for input frame if(aFrame.empty()) { return -1; } //---------------detect moving pixel------------// // using BackgroundSubstractMOG2 // //----------------------------------------------// bg.operator ()(aFrame, front); bg.getBackgroundImage(back); //cv::erode(front,front, cv::Mat()); //cv::dilate(front, front, cv::Mat()); cv::medianBlur(front, front, 5); cv::findContours(front,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE); std::vector<std::vector<cv::Point> > contours_poly( contours.size() ); vector<Rect> boundRect( contours.size() ); vector<Point2f>center( contours.size() ); vector<float>radius( contours.size() ); for(unsigned int i = 0; i < contours.size(); i++ ) { cv::approxPolyDP( contours[i], contours_poly[i], 3.0, true ); boundRect[i] = boundingRect( Mat(contours_poly[i]) ); cv::minEnclosingCircle( contours_poly[i], center[i], radius[i] ); } for(unsigned int i = 0; i< contours.size(); i++ ) { Scalar color = Scalar( 255,255,255 ); //params: input output contourIdx color thickness drawContours( front, contours_poly, i, color, CV_FILLED, 8, vector<Vec4i>(), 0, Point() ); } //----detect fire color----// //-------------------------------------------------------------------// // pixel = fire color when // // valueY > valueCb && // // valueCr > valueCb && // // (valueY > meanY && valueCr > meanCr && valueCb < meanCb) // //-------------------------------------------------------------------// //get YCrCb channel cvtColor(aFrame, YCrCbFrame, CV_BGR2YCrCb); vector<Mat> channels(3); split(YCrCbFrame, channels); YChannel = channels[0]; CrChannel = channels[1]; CbChannel = channels[2]; //calculate mean of 3 channels: => for further use // unsigned char Y_mean, Cr_mean, Cb_mean; // Y_mean = (unsigned char)mean(YChannel)[0]; // Cr_mean = (unsigned char)mean(CrChannel)[0]; // Cb_mean = (unsigned char)mean(CbChannel)[0]; colorMask = Mat(aFrame.rows, aFrame.cols, CV_8UC1); Y_Cb = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//YChannel minus CbChannel Cr_Cb = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//CrChannel minus CbChannel subtract(YChannel, CbChannel, Y_Cb); threshold(Y_Cb, Y_Cb, 10, 255, THRESH_BINARY); subtract(CrChannel, CbChannel, Cr_Cb);threshold(Cr_Cb, Cr_Cb, 10, 255, THRESH_BINARY); //colorMask = front & Y_Cb & Y_Cr bitwise_and(front, Y_Cb, colorMask); bitwise_and(colorMask, Cr_Cb, colorMask); for(currentWidth = 0; currentWidth < width; currentWidth+=ROI_WIDTH) { for(currentHeight = 0; currentHeight < height; currentHeight+=ROI_HEIGHT) { roi = Rect(currentWidth, currentHeight, ROI_WIDTH, ROI_HEIGHT); cv::Mat testArea = colorMask(roi); int fireCount = countNonZero(testArea); if(fireCount > 10) { cv::Mat roi_draw = aFrame(roi); cv::Mat color(roi_draw.size(), CV_8UC3, cv::Scalar (0,125,125)); double alpha = 0.5; cv::addWeighted(color, alpha, roi_draw, 1.0-alpha, 0.0, roi_draw); } } } int fireCount = countNonZero(colorMask); cvtColor(front, front, CV_GRAY2BGR); cvtColor(Y_Cb, Y_Cb, CV_GRAY2BGR); cvtColor(Cr_Cb, Cr_Cb, CV_GRAY2BGR); cvtColor(colorMask, colorMask, CV_GRAY2BGR); char wName[25]; sprintf(&(wName[0]),"Frames"); cvShowManyImages(wName, aFrame.cols, aFrame.rows, 5, (unsigned char*)aFrame.data, (unsigned char*)front.data, (unsigned char*)Y_Cb.data, (unsigned char*)Cr_Cb.data, (unsigned char*)colorMask.data); // imshow(wName, frame); // if(fireCount>fireThreshold) // { // //count the frame that contains firePixel surpass threshold // std::cout << "Fired" << std::endl; // } // else // { // std::cout << "Not fired" << std::endl; // } return fireCount; }
void WayFinderApp::update() { if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) { detected = false; // TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance. if(capture && capture->checkNewFrame()) { frame = toOcv(capture->getSurface()); //cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray; //cvtColor(frame, frameGray, CV_BGR2GRAY); int blurAmount = 10; //cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount)); //threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY); // Get all contours. //bg.operator()(frameThresh,fore); bg.operator()(frame, fore); bg.getBackgroundImage(back); cv::erode(fore, fore, cv::Mat()); cv::dilate(fore, fore, cv::Mat()); cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); // Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects unsigned largestIndex = 0; unsigned largestContour = 0; for(unsigned i = 0; i < contours.size(); i++) { if(contours[i].size() > largestContour) { largestContour = contours[i].size(); largestIndex = i; } } vector<std::vector<cv::Point>> hack; cv::Rect rect; cv::Point center; if(contours.size() > 0) { hack.push_back(contours[largestIndex]); // Find bounding rectangle for largest countour. rect = boundingRect(contours[largestIndex]); // Make sure the blog is large enough to be a track-worthy. println("Rext area = " + boost::lexical_cast<std::string>(rect.area())); if(rect.area() >= 5000) { // TODO: Tweak this value. // Get center of rectangle. center = cv::Point( rect.x + (rect.width / 2), rect.y + (rect.height / 2) ); // Show guide. spotlightCenter2D.x = (float)center.x; spotlightCenter2D.y = (float)center.y; spotlightCenter3D.x = (float)center.x; spotlightCenter3D.y = (float)center.y; //spotlightRadius = (rect.width + rect.y) / 2; detected = true; } } // When debug mode is off, the background should be black. if(debugView) { if(contours.size() > 0) { cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2); cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2); rectangle(frame, rect, cv::Scalar(0, 255, 0), 3); circle(frame, center, 10, cv::Scalar(0, 255, 0), 3); } mTexture = gl::Texture(fromOcv(frame)); } } // TODO: Create control panel for all inputs. } }
extern "C" void getbg(int rows, int cols, unsigned char *bgD) { cv::Mat bg = cv::Mat(rows, cols, CV_8UC3, bgD); mog.getBackgroundImage(bg); }
/** * Detect people using background segmentation and contours * BSN2013 */ static vector<cv::Mat> detectPeopleSegment(cv::Mat image) { vector<cv::Mat> points; // convert to HSV cv::Mat imageHSV; cv::cvtColor(image, imageHSV, CV_BGR2HSV); vector<cv::Mat> imageHSVSlices; cv::split(imageHSV, imageHSVSlices); //cv::threshold(imageHSVSlices[0], imageHSVSlices[0], 160, 200, cv::THRESH_BINARY); // background subtraction cv::Mat fgMask; bgmodel(image, fgMask, learningRate); // tidy foreground mask cv::GaussianBlur(fgMask, fgMask, cv::Size(1, 1), 0, 0); int erosionSize = 5; cv::Mat element = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size(2*erosionSize+1, 2*erosionSize+1), cv::Point( erosionSize, erosionSize )); cv::dilate(fgMask, fgMask, element); cv::erode(fgMask, fgMask, element); cv::erode(fgMask, fgMask, element); cv::dilate(fgMask, fgMask, element); cv::Mat background; bgmodel.getBackgroundImage(background); //cv::imshow("back", background); // subtract background from original image cv::Mat foreground; //cv::not cv::threshold(fgMask, fgMask, 128, 255, cv::THRESH_BINARY); image.copyTo(foreground, fgMask); cv::imshow("fg", fgMask); cv::imshow("fore", foreground); // edge information int lowThreshold = 100; int ratio = 3; int kernelSize = 3; cv::Mat imageCanny; cv::Canny(foreground, imageCanny, lowThreshold, lowThreshold*ratio, kernelSize); // weight map and weighted-gradient image // apply Gaussian filter (size = 9 and sigma = 1.5) to edge information from foreground image // create Gaussian filter // weight map cv::Mat weightMap; cv::GaussianBlur(imageCanny, weightMap, cv::Size(9, 9), 1.5, 1.5); // gradient image cv::Mat imageGray; cv::cvtColor(image, imageGray, CV_BGR2GRAY); cv::Mat imageGradient; cv::Mat imageGradientX; cv::Mat imageGradientY; cv::Mat imageAbsGradientX; cv::Mat imageAbsGradientY; cv::Sobel(imageGray, imageGradientX, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT); cv::Sobel(imageGray, imageGradientY, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT); cv::convertScaleAbs(imageGradientX, imageAbsGradientX); cv::convertScaleAbs(imageGradientY, imageAbsGradientY); cv::addWeighted(imageAbsGradientX, 0.5, imageAbsGradientY, 0.5, 0, imageGradient); // weighted-gradient image cv::Mat weightedGradient; cv::Mat colourWeightMap; weightedGradient = imageGradient.mul(weightMap); // object (body) contours vector< vector<cv::Point> > objectContours; vector<cv::Vec4i> objectHierarchy; cv::findContours(fgMask, objectContours, objectHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)); // bodies and heads // store index of detected body contours and position of head vector<int> bodies; vector<cv::Point2f> headCenter; vector<float> headRadius; // detect big bodies for (int i = 0; i < objectContours.size(); i++) { // if contour is too big if (getContourRadius(objectContours[i]) > BODYSIZE*2) { // increment merged counter numMerged++; cout << "Merged object" << endl; // TODO cut down to size // TODO consider just slicing it // process contour by eroding it cv::Mat largeContour = cv::Mat::zeros(imageCanny.size(), CV_8UC3); drawContours(largeContour, objectContours, i, colourRed, CV_FILLED, 8, objectHierarchy, 0, cv::Point()); // erode until large contour becomes 2+ vector< vector<cv::Point> > largeContours; vector<cv::Vec4i> largeHierarchy; do { cv::erode(largeContour, largeContour, element); cv::Canny(largeContour, largeContour, lowThreshold, lowThreshold*ratio, kernelSize); cv::findContours(largeContour, largeContours, largeHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)); } while (largeContours.size() == 1); // || (largeContours.size() == 1 && getContourRadius(largeContours[0]) >= BODYSIZE)); // TODO potential infinite bug here if (largeContours.size() > 1) { // increment split counter numSplit++; cout << "Split object" << endl; } else if (largeContours.size() == 1) { // increment unsplit counter numUnsplit++; cout << "No split - size still 1" << endl; } for (int j = 0; j < largeContours.size(); j++) { objectContours.push_back(largeContours[j]); } } } cv::Mat bodiesHeads = cv::Mat::zeros(image.size(), CV_8UC3); // detect bodies for (int i = 0; i < objectContours.size(); i++) { if (isBody(objectContours[i])) { // predict head position cv::Point2f defaultHeadCenter; // body bounding box cv::RotatedRect minBodyRect; minBodyRect = cv::minAreaRect(cv::Mat(objectContours[i])); // body bounding circle radius float headOffset = getContourRadius(objectContours[i]); //*0.7; // image centre cv::Point2f imageCentre(image.size().width/2, image.size().height/2); // find gradient float m = (minBodyRect.center.y - imageCentre.y)/(minBodyRect.center.x - imageCentre.x); // find angle double angle; if (minBodyRect.center.x <= imageCentre.x && minBodyRect.center.y < imageCentre.y) { // top left quad angle = atan((imageCentre.x - minBodyRect.center.x)/(imageCentre.y - minBodyRect.center.y)); } else if (minBodyRect.center.x <= imageCentre.x) { // bottom left quad angle = PI - atan((imageCentre.x - minBodyRect.center.x)/(minBodyRect.center.y - imageCentre.y)); } else if (minBodyRect.center.x > imageCentre.x && minBodyRect.center.y > imageCentre.y) { // bottom right quad angle = PI + atan((minBodyRect.center.x - imageCentre.x)/(minBodyRect.center.y - imageCentre.y)); } else { // top right quad angle = 2*PI - atan((minBodyRect.center.x - imageCentre.x)/(imageCentre.y - minBodyRect.center.y)); } do { headOffset *= 0.7; defaultHeadCenter = cv::Point2f(minBodyRect.center.x - headOffset * sin(angle), minBodyRect.center.y - headOffset * cos(angle)); } while (cv::pointPolygonTest(objectContours[i], defaultHeadCenter, true) <= 0 && headOffset >= 1); // store body and head if body big enough for head if (headOffset >= 1) { // store body bodies.push_back(i); //angle = angle * 180/PI; headCenter.push_back(defaultHeadCenter); headRadius.push_back(0); // default head size // get detailed contours of body cv::Mat bodyMask = cv::Mat::zeros(image.size(), CV_8UC1); drawContours(bodyMask, objectContours, i, colourWhite, CV_FILLED, 8, objectHierarchy, 0, cv::Point()); //cv::floodFill(bodyMask, cv::Point2i(0, 0), cv::Scalar(1)); cv::Mat body; image.copyTo(body, bodyMask); //cv::imshow("B", body); // body edges cv::Mat bodyCanny; cv::Canny(body, bodyCanny, lowThreshold, lowThreshold*ratio, kernelSize); // weight map cv::Mat bodyWeightMap; cv::GaussianBlur(bodyCanny, bodyWeightMap, cv::Size(9, 9), 1.5, 1.5); // gradient image cv::Mat bodyGray; cv::cvtColor(body, bodyGray, CV_BGR2GRAY); cv::Mat bodyGradient; cv::Mat bodyGradientX; cv::Mat bodyGradientY; cv::Mat bodyAbsGradientX; cv::Mat bodyAbsGradientY; cv::Sobel(bodyGray, bodyGradientX, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT); cv::Sobel(bodyGray, bodyGradientY, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT); cv::convertScaleAbs(bodyGradientX, bodyAbsGradientX); cv::convertScaleAbs(bodyGradientY, bodyAbsGradientY); cv::addWeighted(bodyAbsGradientX, 0.5, bodyAbsGradientY, 0.5, 0, bodyGradient); // weighted-gradient image cv::Mat bodyWeightedGradient; bodyWeightedGradient = bodyGradient.mul(bodyWeightMap); // body contours vector< vector<cv::Point> > bodyContours; vector<cv::Vec4i> bodyHierarchy; cv::findContours(bodyWeightedGradient, bodyContours, bodyHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)); // detect head for (int j = 0; j < bodyContours.size(); j++) { // process contour by eroding it cv::Mat aContour = cv::Mat::zeros(image.size(), CV_8UC3); drawContours(aContour, bodyContours, j, colourWhite, CV_FILLED, 8, bodyHierarchy, 0, cv::Point()); drawContours(bodiesHeads, bodyContours, j, colourWhite, 2, 8, bodyHierarchy, 0, cv::Point()); cv::erode(aContour, aContour, element); //cv::erode(aContour, aContour, element); //cv::dilate(aContour, aContour, element); cv::Canny(aContour, aContour, lowThreshold, lowThreshold*ratio, kernelSize); vector< vector<cv::Point> > subContours; vector<cv::Vec4i> subHierarchy; cv::findContours(aContour, subContours, subHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)); // for (int k = 0; k < subContours.size(); k++) { //cv::drawContours(imageContours, subContours, k, cv::Scalar(0, 255, 0), 2, 8, subHierarchy, 0, cv::Point()); if (isHead(subContours[k], objectContours[i])) { vector<cv::Point> contourPoly; cv::Point2f center; float radius; if (subContours.size() > 1) { approxPolyDP(cv::Mat(subContours[k]), contourPoly, 3, true); } else { approxPolyDP(cv::Mat(bodyContours[j]), contourPoly, 3, true); } minEnclosingCircle((cv::Mat)contourPoly, center, radius); float distanceOld = euclideanDistance(headCenter[headCenter.size() - 1], defaultHeadCenter); float distanceNew = euclideanDistance(center, defaultHeadCenter); if (headRadius[headRadius.size() - 1] == 0 || (distanceOld > 0 && distanceNew < distanceOld)) { // store first detected head or store if it is a better detection headCenter[headCenter.size() - 1] = center; headRadius[headRadius.size() - 1] = radius; } } } } if (headRadius[headRadius.size() - 1] == 0) { headRadius[headRadius.size() - 1] = 10; } } } } // draw bodies and heads //cv::Mat bodiesHeads = cv::Mat::zeros(image.size(), CV_8UC3); for (int i = 0; i < bodies.size(); i++) { // draw body cv::Scalar colour = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); drawContours(foreground, objectContours, bodies[i], colour, 2, 8, objectHierarchy, 0, cv::Point()); circle(foreground, headCenter[i], (int)headRadius[i], colour, 2, 8, 0); // body bounding box cv::RotatedRect bodyRect; bodyRect = cv::minAreaRect(cv::Mat(objectContours[bodies[i]])); // output cout << imageNum; cout << "," << headCenter[i].x << "," << headCenter[i].y << "," << headRadius[i]; // head info cout << "," << bodyRect.center.x << "," << bodyRect.center.y; cout << "," << cv::contourArea(objectContours[bodies[i]]); cout << endl; // output points cv::Mat point(2, 1, CV_32FC1); point.at<float>(0) = headCenter[i].x; point.at<float>(1) = headCenter[i].y; points.push_back(point); } // increment frame counter numFrames++; cv::imshow("Original", image); //cv::imshow("Hue", imageHSVSlices[0]); //cv::imshow("Saturation", imageHSVSlices[1]); //cv::imshow("Value", imageHSVSlices[2]); cv::imshow("fgMask", fgMask); cv::imshow("Foreground", foreground); cv::imshow("Canny", imageCanny); cv::imshow("WeightMap", weightMap); cv::imshow("Gradient Image", imageGradient); cv::imshow("Weighted-Gradient Image", weightedGradient); //cv::imshow("Contours", imageContours); cv::imshow("Body & Head", bodiesHeads); cvWaitKey(delay); //5 return points; }