Ejemplo n.º 1
0
void WayFinderApp::setup()
{
    println("WayFinderApp started.");

    // Load destinations from config file.
    destinations = Destination::getDestinations();
    if(destinations.size() == 0) {
        println("No destinations found, check the config file.");
        exit(EXIT_FAILURE);
    }
    println("Destinations loaded.");

    // Initialized state.
    spotlightRadius = (float)getWindowWidth() / 16.0f;
    arrowLength = (float)min(getWindowWidth(), getWindowHeight()) / 2.0f;
    spotlightCenter2D = Vec2f((float)getWindowWidth() / 2.0f, (float)getWindowHeight() / 2.0f);
    spotlightCenter3D = Vec3f((float)getWindowWidth() / 2.0f, (float)getWindowHeight() / 2.0f, 0.0f);
    detected = false;

    //capture = Capture::create(WayFinderApp::WIDTH, WayFinderApp::HEIGHT);
    capture = Capture::create(getWindowWidth(), getWindowHeight());
    capture->start();

    //bg.set("bShadowDetection", false);
    bg.set("nmixtures", 3);
    bg.setBool("detectShadows", true);

    debugView = false;
}
Ejemplo n.º 2
0
JNIEXPORT jintArray JNICALL Java_pris_videotest_JNIClient_detectWithReturn(
		JNIEnv * env, jclass, jintArray pixels, jint width, jint height) {
	jint * cPixels;
	cPixels = env->GetIntArrayElements(pixels, 0);

	cv::Mat imgData(height, width, CV_8UC4, (unsigned char*) cPixels);

	IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
	*frame = imgData.operator _IplImage();
	//imgData.release();

	cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0);     ///<高斯滤波
	cv::Mat m_OriFrameMat = frame;
	//cvReleaseImage(&frame);
	resize(m_OriFrameMat, m_ResizeFrameMat,
			cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40
	//m_OriFrameMat.release();
	//cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化
	//m_ResizeFrameMat.release();
	m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001);
	m_ResizeFrameMat = foregroundMat;

	int result[m_nVideoResizeH*m_nVideoResizeW];
	for(int i = 0; i<m_nVideoResizeH*m_nVideoResizeW; i++){
		result[i] = m_ResizeFrameMat.data[i];
	}
	env->ReleaseIntArrayElements(pixels, cPixels, 0);
	jintArray intArray = env->NewIntArray(m_nVideoResizeH*m_nVideoResizeW);
	env->SetIntArrayRegion(intArray, 0, m_nVideoResizeH*m_nVideoResizeW, result);
	return intArray;
}
Ejemplo n.º 3
0
JNIEXPORT jboolean JNICALL Java_pris_videotest_JNIClient_detect(JNIEnv * env,
		jclass, jbyteArray pixels, jint width, jint height) {
	jbyte * cPixels;
	cPixels = env->GetByteArrayElements(pixels, 0);

	cv::Mat imgData(height, width, CV_8UC1, (unsigned char*) cPixels);

	IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	*frame = imgData.operator _IplImage();
	//imgData.release();

	cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0);     ///<高斯滤波
	cv::Mat m_OriFrameMat = frame;
	//cvReleaseImage(&frame);
	resize(m_OriFrameMat, m_ResizeFrameMat,
			cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40
	//m_OriFrameMat.release();
	//cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化
	//m_ResizeFrameMat.release();
	m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001);
	m_ResizeFrameMat = foregroundMat;

	int i, j, k;
	k = 0;
	for (i = 0; i < m_nVideoResizeH; i++) {
		for (j = 0; j < m_nVideoResizeW; j++) {
			if (m_ResizeFrameMat.data[i * m_nVideoResizeW + j] != 0) {
				k++; ///<计算二值前景图像非0像素点个数
			}
		}
	}
	//m_GrayFrameMat.release();
	//delete frame;
	double k_ratio = (double) k / (double) (m_nVideoResizeW * m_nVideoResizeH);
	if (k_ratio <= 0.01) {
		env->ReleaseByteArrayElements(pixels, cPixels, 0);
		return false;
	}
	if (k_ratio / m_rFGSegThreshold > 1.5 || k_ratio / m_rFGSegThreshold < 0.79)
		m_rFGSegThreshold = k_ratio;

	///检测到运动视频段
	if (k_ratio >= m_rFGSegThreshold) {
		env->ReleaseByteArrayElements(pixels, cPixels, 0);
		return true;
	}
	env->ReleaseByteArrayElements(pixels, cPixels, 0);
	return false;
}
Ejemplo n.º 4
0
int getFirePixelNumber(Mat aFrame) {
    const int ROI_WIDTH = 40;
    const int ROI_HEIGHT = 30;
    unsigned int currentWidth = 0, currentHeight = 0;
    unsigned int width, height;
    std::vector<std::vector<cv::Point> > contours;
    //Mat roi;
    Rect roi;
    width = aFrame.cols;
    height = aFrame.rows;

    Mat YCrCbFrame;
    Mat YChannel, CrChannel, CbChannel;
    Mat Y_Cb, Cr_Cb;
    Mat colorMask;

    //check for input frame
    if(aFrame.empty())
    {
        return -1;
    }
    //---------------detect moving pixel------------//
    //       using BackgroundSubstractMOG2 			//
    //----------------------------------------------//
    bg.operator ()(aFrame, front);
    bg.getBackgroundImage(back);
    //cv::erode(front,front, cv::Mat());
    //cv::dilate(front, front, cv::Mat());
    cv::medianBlur(front, front, 5);
    cv::findContours(front,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
    std::vector<std::vector<cv::Point> > contours_poly( contours.size() );
    vector<Rect> boundRect( contours.size() );
    vector<Point2f>center( contours.size() );
    vector<float>radius( contours.size() );

    for(unsigned int i = 0; i < contours.size(); i++ )
    {
        cv::approxPolyDP( contours[i], contours_poly[i], 3.0, true );
        boundRect[i] = boundingRect( Mat(contours_poly[i]) );
        cv::minEnclosingCircle( contours_poly[i], center[i], radius[i] );
    }

    for(unsigned int i = 0; i< contours.size(); i++ )
    {
        Scalar color = Scalar( 255,255,255 );
        //params:	  input   output       contourIdx	color    thickness
        drawContours( front, contours_poly,    i,       color,   CV_FILLED, 8, vector<Vec4i>(), 0, Point() );
    }

    //----detect fire color----//
    //-------------------------------------------------------------------//
    //	pixel = fire color when											 //
    //			valueY > valueCb &&										 //
    //			valueCr > valueCb &&									 //
    //			(valueY > meanY && valueCr > meanCr && valueCb < meanCb) //
    //-------------------------------------------------------------------//

    //get YCrCb channel
    cvtColor(aFrame, YCrCbFrame, CV_BGR2YCrCb);
    vector<Mat> channels(3);
    split(YCrCbFrame, channels);
    YChannel = channels[0];
    CrChannel = channels[1];
    CbChannel = channels[2];

    //calculate mean of 3 channels: => for further use
//	unsigned char Y_mean, Cr_mean, Cb_mean;
//	Y_mean = (unsigned char)mean(YChannel)[0];
//	Cr_mean = (unsigned char)mean(CrChannel)[0];
//	Cb_mean = (unsigned char)mean(CbChannel)[0];

    colorMask = Mat(aFrame.rows, aFrame.cols, CV_8UC1);
    Y_Cb  = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//YChannel minus CbChannel
    Cr_Cb = Mat(aFrame.rows, aFrame.cols, CV_8UC1);//CrChannel minus CbChannel
    subtract(YChannel, CbChannel, Y_Cb); threshold(Y_Cb, Y_Cb, 10, 255, THRESH_BINARY);
    subtract(CrChannel, CbChannel, Cr_Cb);threshold(Cr_Cb, Cr_Cb, 10, 255, THRESH_BINARY);

    //colorMask = front & Y_Cb & Y_Cr
    bitwise_and(front, Y_Cb, colorMask);
    bitwise_and(colorMask, Cr_Cb, colorMask);

    for(currentWidth = 0; currentWidth < width; currentWidth+=ROI_WIDTH)
    {
        for(currentHeight = 0; currentHeight < height; currentHeight+=ROI_HEIGHT)
        {
            roi = Rect(currentWidth, currentHeight, ROI_WIDTH, ROI_HEIGHT);
            cv::Mat testArea = colorMask(roi);
            int fireCount = countNonZero(testArea);
            if(fireCount > 10)
            {
                cv::Mat roi_draw = aFrame(roi);
                cv::Mat color(roi_draw.size(), CV_8UC3, cv::Scalar (0,125,125));
                double alpha = 0.5;
                cv::addWeighted(color, alpha, roi_draw, 1.0-alpha, 0.0, roi_draw);
            }
        }
    }

    int fireCount = countNonZero(colorMask);

    cvtColor(front, front, CV_GRAY2BGR);
    cvtColor(Y_Cb, Y_Cb, CV_GRAY2BGR);
    cvtColor(Cr_Cb, Cr_Cb, CV_GRAY2BGR);
    cvtColor(colorMask, colorMask, CV_GRAY2BGR);

    char wName[25];
    sprintf(&(wName[0]),"Frames");
    cvShowManyImages(wName, aFrame.cols, aFrame.rows, 5, (unsigned char*)aFrame.data, (unsigned char*)front.data, (unsigned char*)Y_Cb.data, (unsigned char*)Cr_Cb.data, (unsigned char*)colorMask.data);
//	imshow(wName, frame);
//    if(fireCount>fireThreshold)
//    {
//        //count the frame that contains firePixel surpass threshold
//        std::cout << "Fired" << std::endl;
//    }
//    else
//    {
//        std::cout << "Not fired" << std::endl;
//    }
    return fireCount;
}
Ejemplo n.º 5
0
extern "C" void getbg(int rows, int cols, unsigned char *bgD) {
    cv::Mat bg = cv::Mat(rows, cols, CV_8UC3, bgD);
    mog.getBackgroundImage(bg);
}
Ejemplo n.º 6
0
void WayFinderApp::update()
{
    if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) {
        detected = false;

        // TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance.
        if(capture && capture->checkNewFrame()) {
            frame = toOcv(capture->getSurface());
            //cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray;
            //cvtColor(frame, frameGray, CV_BGR2GRAY);
            int blurAmount = 10;
            //cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount));
            //threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY);

            // Get all contours.
            //bg.operator()(frameThresh,fore);
            bg.operator()(frame, fore);
            bg.getBackgroundImage(back);
            cv::erode(fore, fore, cv::Mat());
            cv::dilate(fore, fore, cv::Mat());
            cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

            // Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects
            unsigned largestIndex = 0;
            unsigned largestContour = 0;
            for(unsigned i = 0; i < contours.size(); i++) {
                if(contours[i].size() > largestContour) {
                    largestContour = contours[i].size();
                    largestIndex = i;
                }
            }

            vector<std::vector<cv::Point>> hack;
            cv::Rect rect;
            cv::Point center;

            if(contours.size() > 0) {
                hack.push_back(contours[largestIndex]);

                // Find bounding rectangle for largest countour.
                rect = boundingRect(contours[largestIndex]);

                // Make sure the blog is large enough to be a track-worthy.
                println("Rext area = " + boost::lexical_cast<std::string>(rect.area()));
                if(rect.area() >= 5000) { // TODO: Tweak this value.
                    // Get center of rectangle.
                    center = cv::Point(
                                 rect.x + (rect.width / 2),
                                 rect.y + (rect.height / 2)
                             );

                    // Show guide.
                    spotlightCenter2D.x = (float)center.x;
                    spotlightCenter2D.y = (float)center.y;
                    spotlightCenter3D.x = (float)center.x;
                    spotlightCenter3D.y = (float)center.y;
                    //spotlightRadius = (rect.width + rect.y) / 2;
                    detected = true;
                }
            }

            // When debug mode is off, the background should be black.
            if(debugView) {
                if(contours.size() > 0) {
                    cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2);
                    cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2);
                    rectangle(frame, rect, cv::Scalar(0, 255, 0), 3);
                    circle(frame, center, 10, cv::Scalar(0, 255, 0), 3);
                }
                mTexture = gl::Texture(fromOcv(frame));
            }
        }

        // TODO: Create control panel for all inputs.
    }
}
void processVideo(char* videoFilename) {
    //create the capture object
    IplImage *labelImg;//foreground
    CTracker openTracker((float)0.033, (float)0.6, (double)20.0, 10, 3000);
    CvTracks tracks;
    VideoCapture capture(videoFilename);
    if(!capture.isOpened()){
        //error in opening the video input
        cerr << "Unable to open video file: " << videoFilename << endl;
        exit(EXIT_FAILURE);
    }
    bool bInitialized = false;
    //read input data. ESC or 'q' for quitting
    while( (char)keyboard != 'q' && (char)keyboard != 27 ){
    //read the current frame
    if(!capture.read(frame)) {
        cerr << "Unable to read next frame." << endl;
        cerr << "Exiting..." << endl;
        exit(EXIT_FAILURE);
    }

    if(bInitialized==false)
    {
        cv::Size frameSize(static_cast<int>(frame.cols), static_cast<int>(frame.rows)); 					
        labelImg = cvCreateImage(frameSize, IPL_DEPTH_LABEL, 1);         
        bInitialized = true;
    }
    //update the background model
    pMOG2.operator ()(frame,fgMaskMOG2);

    //open operator. 
    cv::erode(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),1);
    cv::dilate(fgMaskMOG2,fgMaskMOG2,cv::Mat(),cv::Point(-1,-1),4);

    // step 2::blob analysis
    CvBlobs blobs;          
    unsigned int result = cvLabel(&(IplImage)fgMaskMOG2, labelImg, blobs);          
    cvFilterByArea(blobs, 125, 10000);
    cvRenderBlobs(labelImg, blobs, &(IplImage)frame, &(IplImage)frame, CV_BLOB_RENDER_BOUNDING_BOX);
    //cvUpdateTracks(blobs, tracks, 200, 5);
    //cvRenderTracks(tracks, &(IplImage)frame, &(IplImage)frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_LOG);
   
    //convert the blobs into detection structure;
    vector<Detection*> detections;
    for (CvBlobs::const_iterator it=blobs.begin();it!=blobs.end();++it)
    {
      CvBlob *blob=(*it).second;
      Detection *_detection = new Detection;
      _detection->centroid.x= blob->centroid.x;
      _detection->centroid.y= blob->centroid.y;
      _detection->brect.x  = blob->minx;
      _detection->brect.y  = blob->miny;
      _detection->brect.height = blob->maxy - blob->miny;
      _detection->brect.width  = blob->maxx - blob->minx;
      detections.push_back(_detection);
    }

    //Step 3 : give the list of all centroids of all detected contours to tracker. Track return the trace of the track, whose values are Kalman-filtered
    if(blobs.size() > 0)
    {			
        openTracker.Update(detections);
        int i, j;
        for(i=0; i < openTracker.tracks.size(); i++)
        {
        //add a threshold to de-noise, if the contour just appeared, maybe noise. set a threshold
        if(openTracker.tracks[i]->trace.size() > 10)
        {
            for(j = 0; j < (openTracker.tracks[i]->trace.size() - 2); j++)
            {
            cv::rectangle(frame, openTracker.tracks[i]->brect, Scalar(255,0,0));
            //line(fore, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Colors[openTracker.tracks[i]->track_id % 9], 1, CV_AA);							
            line(frame, openTracker.tracks[i]->trace[j], openTracker.tracks[i]->trace[j+1], Scalar(255,0,0), 1, CV_AA);
            }

        stringstream ss;
        ss << openTracker.tracks[i]->track_id;
        string str = ss.str();
		
        putText(frame, str, openTracker.tracks[i]->trace[j], FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255,0,0), 1);
      }
        }
    }
    //get the frame number and write it on the current frame
    stringstream ss;
    rectangle(frame, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1);
    //show the current frame and the fg masks
    imshow("Frame", frame);
    imshow("FG Mask MOG 2", fgMaskMOG2);
    //get the input from the keyboard
    keyboard = waitKey( 30 );
    }
    //delete capture object
    capture.release();
    cvReleaseImage(&labelImg); 

}
Ejemplo n.º 8
0
int main(int argc, const char** argv) {
	int persons = 1;
	const bool recalibrate = true;

	// read args
	if (argc > 1)
		persons = atoi(argv[1]);

	// setup path to file
	char* filepath;
	filepath = "/home/charence/Workspace/biomotion-vision/images/set2/%d/10/frame%04d.jpg";
	// is it my mac?
	#ifdef MYMAC
	filepath = "/Users/charence/Workspace/biomotion-vision/images/set2/%d/10/frame%04d.jpg";
	#endif
	// is it on doc?
	#ifdef ONDOC
	filepath = "/media/Charence500/Data/20121221/10/%d/frame%04d.jpg";
	#endif

	int start = 0;
	int end = 2485;
	switch(persons) {
		case 1: end = 2485; break;
		case 2: end = 3621; break;
		case 3: end = 4489; break;
	}

	// setup background model
	bgmodel.set("history", history);
	bgmodel.set("varThreshold", varThreshold);
	bgmodel.set("detectShadows", true);

	// setup homography
	if (recalibrate) {
	}
	else {
	}

	// setup tracker
	pointTracker.setArguments(3.5, 50);

	//cout << "ImageNum,ContourArea,RectCentreX,RectCentreY,RectAngle,RectWidth,RectHeight,CircCentreX,CircCentreY,Radius" << endl;
	//cout << "ImageNum,ContourArea,CircCentreX,CircCentreY,Radius" << endl;

	// process sequence
	for (int i = start; i <= end; i++) {
		if (i > 200)
			i = 999; //1050; // 999; // 1050;*/
		char filename [128];
		sprintf(filename, filepath, persons, i);
		//cout << "In: " << filename << endl;
		imageNum = i;
		// load image
		cv::Mat image = cv::imread(filename);
		if (image.empty()) {
			throw runtime_error("Could not load image");
		}
		// detect objects
		learningRate = (i > 200) ? 0.00005 : 0.01;
		//delay = (i > 900) ? 500 : 5; // slows down update so I can get screencaps
		//vector<ObjectInfo> detectedObjects = detectPeople(image);
		vector<cv::Mat> detectedObjects = detectPeople(image);
		// homography
		//detectedObjects = ho
		// track objects
		//objectTracker->update(detectedObjects);
		if (i > 200) {
			trackObjects(detectedObjects);
			predictObjects(image);
		}
		// translate coordinates
	}

	cout << "Total frames: " << numFrames << endl;
	cout << "Merged frames: " << numMerged << endl;
	cout << "Split frames: " << numSplit << endl;
	cout << "Unsplit frames: " << numUnsplit << endl;

	return 0;
}
Ejemplo n.º 9
0
/**
 * Detect people using background segmentation and contours
 * BSN2013
 */
static vector<cv::Mat> detectPeopleSegment(cv::Mat image) {
	vector<cv::Mat> points;

	// convert to HSV
	cv::Mat imageHSV;
	cv::cvtColor(image, imageHSV, CV_BGR2HSV);
	vector<cv::Mat> imageHSVSlices;
	cv::split(imageHSV, imageHSVSlices);
	//cv::threshold(imageHSVSlices[0], imageHSVSlices[0], 160, 200, cv::THRESH_BINARY);

	// background subtraction
	cv::Mat fgMask;
	bgmodel(image, fgMask, learningRate);
	// tidy foreground mask
	cv::GaussianBlur(fgMask, fgMask, cv::Size(1, 1), 0, 0);
	int erosionSize = 5;
	cv::Mat element = cv::getStructuringElement( cv::MORPH_ELLIPSE,
		cv::Size(2*erosionSize+1, 2*erosionSize+1),
		cv::Point( erosionSize, erosionSize ));
	cv::dilate(fgMask, fgMask, element);
	cv::erode(fgMask, fgMask, element);
	cv::erode(fgMask, fgMask, element);
	cv::dilate(fgMask, fgMask, element);
	cv::Mat background;
	bgmodel.getBackgroundImage(background);
	//cv::imshow("back", background);

	// subtract background from original image
	cv::Mat foreground;
	//cv::not
	cv::threshold(fgMask, fgMask, 128, 255, cv::THRESH_BINARY);
	image.copyTo(foreground, fgMask);
	cv::imshow("fg", fgMask);
	cv::imshow("fore", foreground);

	// edge information
	int lowThreshold = 100;
	int ratio = 3;
	int kernelSize = 3;
	cv::Mat imageCanny;
	cv::Canny(foreground, imageCanny, lowThreshold, lowThreshold*ratio, kernelSize);

	// weight map and weighted-gradient image
	// apply Gaussian filter (size = 9 and sigma = 1.5) to edge information from foreground image
	// create Gaussian filter
	// weight map
	cv::Mat weightMap;
	cv::GaussianBlur(imageCanny, weightMap, cv::Size(9, 9), 1.5, 1.5);
	// gradient image
	cv::Mat imageGray;
	cv::cvtColor(image, imageGray, CV_BGR2GRAY);
	cv::Mat imageGradient;
	cv::Mat imageGradientX;
	cv::Mat imageGradientY;
	cv::Mat imageAbsGradientX;
	cv::Mat imageAbsGradientY;
	cv::Sobel(imageGray, imageGradientX, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
	cv::Sobel(imageGray, imageGradientY, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT);
	cv::convertScaleAbs(imageGradientX, imageAbsGradientX);
	cv::convertScaleAbs(imageGradientY, imageAbsGradientY);
	cv::addWeighted(imageAbsGradientX, 0.5, imageAbsGradientY, 0.5, 0, imageGradient);
	// weighted-gradient image
	cv::Mat weightedGradient;
	cv::Mat colourWeightMap;
	weightedGradient = imageGradient.mul(weightMap);

	// object (body) contours
	vector< vector<cv::Point> > objectContours;
	vector<cv::Vec4i> objectHierarchy;
	cv::findContours(fgMask, objectContours, objectHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

	// bodies and heads
	// store index of detected body contours and position of head
	vector<int> bodies;
	vector<cv::Point2f> headCenter;
	vector<float> headRadius;

	// detect big bodies
	for (int i = 0; i < objectContours.size(); i++) {
		// if contour is too big
		if (getContourRadius(objectContours[i]) > BODYSIZE*2) {
			// increment merged counter
			numMerged++;
			cout << "Merged object" << endl;

			// TODO cut down to size
			// TODO consider just slicing it
			// process contour by eroding it
			cv::Mat largeContour = cv::Mat::zeros(imageCanny.size(), CV_8UC3);
			drawContours(largeContour, objectContours, i, colourRed, CV_FILLED, 8, objectHierarchy, 0, cv::Point());
			// erode until large contour becomes 2+
			vector< vector<cv::Point> > largeContours;
			vector<cv::Vec4i> largeHierarchy;
			do {
				cv::erode(largeContour, largeContour, element);
				cv::Canny(largeContour, largeContour, lowThreshold, lowThreshold*ratio, kernelSize);
				cv::findContours(largeContour, largeContours, largeHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
			} while (largeContours.size() == 1); // || (largeContours.size() == 1 && getContourRadius(largeContours[0]) >= BODYSIZE)); // TODO potential infinite bug here
			if (largeContours.size() > 1) {
				// increment split counter
				numSplit++;
				cout << "Split object" << endl;
			}
			else if (largeContours.size() == 1) {
				// increment unsplit counter
				numUnsplit++;
				cout << "No split - size still 1" << endl;
			}
			for (int j = 0; j < largeContours.size(); j++) {
				objectContours.push_back(largeContours[j]);
			}
		}
	}
	
	cv::Mat bodiesHeads = cv::Mat::zeros(image.size(), CV_8UC3);
	// detect bodies
	for (int i = 0; i < objectContours.size(); i++) {
		if (isBody(objectContours[i])) {
			// predict head position
			cv::Point2f defaultHeadCenter;
			// body bounding box
			cv::RotatedRect minBodyRect;
			minBodyRect = cv::minAreaRect(cv::Mat(objectContours[i]));
			// body bounding circle radius
			float headOffset = getContourRadius(objectContours[i]); //*0.7;
			// image centre
			cv::Point2f imageCentre(image.size().width/2, image.size().height/2);
			// find gradient
			float m = (minBodyRect.center.y - imageCentre.y)/(minBodyRect.center.x - imageCentre.x);
			// find angle
			double angle;
			if (minBodyRect.center.x <= imageCentre.x && minBodyRect.center.y < imageCentre.y) {
				// top left quad
				angle = atan((imageCentre.x - minBodyRect.center.x)/(imageCentre.y - minBodyRect.center.y));
			}
			else if (minBodyRect.center.x <= imageCentre.x) {
				// bottom left quad
				angle = PI - atan((imageCentre.x - minBodyRect.center.x)/(minBodyRect.center.y - imageCentre.y));
			}
			else if (minBodyRect.center.x > imageCentre.x && minBodyRect.center.y > imageCentre.y) {
				// bottom right quad
				angle = PI + atan((minBodyRect.center.x - imageCentre.x)/(minBodyRect.center.y - imageCentre.y));
			}
			else {
				// top right quad
				angle = 2*PI - atan((minBodyRect.center.x - imageCentre.x)/(imageCentre.y - minBodyRect.center.y));
			}
			do {
				headOffset *= 0.7;
				defaultHeadCenter = cv::Point2f(minBodyRect.center.x - headOffset * sin(angle), minBodyRect.center.y - headOffset * cos(angle));
			} while (cv::pointPolygonTest(objectContours[i], defaultHeadCenter, true) <= 0 && headOffset >= 1);
			// store body and head if body big enough for head
			if (headOffset >= 1) {
				// store body
				bodies.push_back(i);
				//angle = angle * 180/PI;
				headCenter.push_back(defaultHeadCenter);
				headRadius.push_back(0); // default head size
				// get detailed contours of body
				cv::Mat bodyMask = cv::Mat::zeros(image.size(), CV_8UC1);
				drawContours(bodyMask, objectContours, i, colourWhite, CV_FILLED, 8, objectHierarchy, 0, cv::Point());
				//cv::floodFill(bodyMask, cv::Point2i(0, 0), cv::Scalar(1));
				cv::Mat body;
				image.copyTo(body, bodyMask);
				//cv::imshow("B", body);
				// body edges
				cv::Mat bodyCanny;
				cv::Canny(body, bodyCanny, lowThreshold, lowThreshold*ratio, kernelSize);
				// weight map
				cv::Mat bodyWeightMap;
				cv::GaussianBlur(bodyCanny, bodyWeightMap, cv::Size(9, 9), 1.5, 1.5);
				// gradient image
				cv::Mat bodyGray;
				cv::cvtColor(body, bodyGray, CV_BGR2GRAY);
				cv::Mat bodyGradient;
				cv::Mat bodyGradientX;
				cv::Mat bodyGradientY;
				cv::Mat bodyAbsGradientX;
				cv::Mat bodyAbsGradientY;
				cv::Sobel(bodyGray, bodyGradientX, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
				cv::Sobel(bodyGray, bodyGradientY, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT);
				cv::convertScaleAbs(bodyGradientX, bodyAbsGradientX);
				cv::convertScaleAbs(bodyGradientY, bodyAbsGradientY);
				cv::addWeighted(bodyAbsGradientX, 0.5, bodyAbsGradientY, 0.5, 0, bodyGradient);
				// weighted-gradient image
				cv::Mat bodyWeightedGradient;
				bodyWeightedGradient = bodyGradient.mul(bodyWeightMap);
				// body contours
				vector< vector<cv::Point> > bodyContours;
				vector<cv::Vec4i> bodyHierarchy;
				cv::findContours(bodyWeightedGradient, bodyContours, bodyHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
				// detect head
				for (int j = 0; j < bodyContours.size(); j++) {
					// process contour by eroding it
					cv::Mat aContour = cv::Mat::zeros(image.size(), CV_8UC3);
					drawContours(aContour, bodyContours, j, colourWhite, CV_FILLED, 8, bodyHierarchy, 0, cv::Point());
					drawContours(bodiesHeads, bodyContours, j, colourWhite, 2, 8, bodyHierarchy, 0, cv::Point());
					cv::erode(aContour, aContour, element);
					//cv::erode(aContour, aContour, element);
					//cv::dilate(aContour, aContour, element);
					cv::Canny(aContour, aContour, lowThreshold, lowThreshold*ratio, kernelSize);
					vector< vector<cv::Point> > subContours;
					vector<cv::Vec4i> subHierarchy;
					cv::findContours(aContour, subContours, subHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
					//
					for (int k = 0; k < subContours.size(); k++) {
						//cv::drawContours(imageContours, subContours, k, cv::Scalar(0, 255, 0), 2, 8, subHierarchy, 0, cv::Point());
						if (isHead(subContours[k], objectContours[i])) {
							vector<cv::Point> contourPoly;
							cv::Point2f center;
							float radius;
							if (subContours.size() > 1) {
								approxPolyDP(cv::Mat(subContours[k]), contourPoly, 3, true);
							}
							else {
								approxPolyDP(cv::Mat(bodyContours[j]), contourPoly, 3, true);
							}
							minEnclosingCircle((cv::Mat)contourPoly, center, radius);
							float distanceOld = euclideanDistance(headCenter[headCenter.size() - 1], defaultHeadCenter);
							float distanceNew = euclideanDistance(center, defaultHeadCenter);
							if (headRadius[headRadius.size() - 1] == 0 || (distanceOld > 0 && distanceNew < distanceOld)) {
								// store first detected head or store if it is a better detection
								headCenter[headCenter.size() - 1] = center;
								headRadius[headRadius.size() - 1] = radius;
							}
						}
					}
				}
				if (headRadius[headRadius.size() - 1] == 0) {
					headRadius[headRadius.size() - 1] = 10;
				}
			}
		}
	}

	// draw bodies and heads
	//cv::Mat bodiesHeads = cv::Mat::zeros(image.size(), CV_8UC3);
	for (int i = 0; i < bodies.size(); i++) {
		// draw body
		cv::Scalar colour = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		drawContours(foreground, objectContours, bodies[i], colour, 2, 8, objectHierarchy, 0, cv::Point());
		circle(foreground, headCenter[i], (int)headRadius[i], colour, 2, 8, 0);
		// body bounding box
		cv::RotatedRect bodyRect;
		bodyRect = cv::minAreaRect(cv::Mat(objectContours[bodies[i]]));
		// output
		cout << imageNum;
		cout << "," << headCenter[i].x << "," << headCenter[i].y << "," << headRadius[i]; // head info
		cout << "," << bodyRect.center.x << "," << bodyRect.center.y;
		cout << "," << cv::contourArea(objectContours[bodies[i]]);
		cout << endl;
		// output points
		cv::Mat point(2, 1, CV_32FC1);
		point.at<float>(0) = headCenter[i].x;
		point.at<float>(1) = headCenter[i].y;
		points.push_back(point);
	}

	// increment frame counter
	numFrames++;

	cv::imshow("Original", image);
	//cv::imshow("Hue", imageHSVSlices[0]);
	//cv::imshow("Saturation", imageHSVSlices[1]);
	//cv::imshow("Value", imageHSVSlices[2]);
	cv::imshow("fgMask", fgMask);
	cv::imshow("Foreground", foreground);
	cv::imshow("Canny", imageCanny);
	cv::imshow("WeightMap", weightMap);
	cv::imshow("Gradient Image", imageGradient);
	cv::imshow("Weighted-Gradient Image", weightedGradient);
	//cv::imshow("Contours", imageContours);
	cv::imshow("Body & Head", bodiesHeads);
	cvWaitKey(delay); //5
	
	return points;
}