int main(int argc, char **argv)
   {
        ros::init(argc, argv, "nmpt_saliency");
        ros::NodeHandle nh("~");
        nh.getParam("gui",debug_mode);
        if (debug_mode) cvNamedWindow("view");
        cvStartWindowThread();
        image_transport::ImageTransport it(nh);

        image_transport::Subscriber sub = it.subscribe("/camera/image_raw", 1, imageCallback);
        pub = nh.advertise<ros_nmpt_saliency::targets>("/nmpt_saliency_point", 50);

        salientSpot.setTrackerTarget(lqrpt);
        bt.blockRestart(1);

        ros::spin();
   }
int main (int argc, char * const argv[]) 
{
	
	//Check if there are any command line arguments -- if not, use a movie. If so, 
	//open the video. 
	if (argc < 2) {
		cout << "An advanced example program illustrating the use of the MIPOMDP class that " << endl
		<< "    allows the user to select a single image input source and  view different aspects of the algorithm." << endl << endl
		<< "Usage: " << endl << "    >> FoveatedFaceTrackerImage [required-path-to-image-file]" << endl << endl 
		<< "Note: Requires the data files \"data/haarcascade_frontalface_alt2.xml\" and" << endl
		<< "    \"data/MIPOMDPData-21x21-3Scales-AllImages.txt\" to function properly." << endl << endl; 
		return 0; 
	} else {
		if (argv[1][0] == '-') {
			cout << "An advanced example program illustrating the use of the MIPOMDP class that " << endl
			<< "    allows the user to select a single image input source and  view different aspects of the algorithm." << endl << endl
			<< "Usage: " << endl << "    >> FoveatedFaceTrackerImage [required-path-to-image-file]" << endl << endl 
			<< "Note: Requires the data files \"data/haarcascade_frontalface_alt2.xml\" and" << endl
			<< "    \"data/MIPOMDPData-21x21-3Scales-AllImages.txt\" to function properly." << endl << endl; 
			return 0; 
		}
		cout << "Getting Image " << argv[1] << endl; 
		current_frame = cvLoadImage( argv[1]); 
	}
	
    if (! current_frame) {
		cout << "Failed to get input from image file." << endl << endl
		<< "Usage: " << endl << "    >> FoveatedFaceTrackerImage [required-path-to-image-file]" << endl << endl ; 
		return 0; 
	}
	

	
	cout << "*************************************************************************" << endl;
	cout << "*  Running Foveated Face Tracker Example." << endl; 
	cout << "*    TIPS: " << endl;
	cout << "*    -- Press 'q' to quit." << endl;
	cout << "*    -- Press 't' to toggle display of probabilities and face counts as text." << endl; 
	cout << "*    -- Press 'b' to toggle display of belief map." << endl; 
	cout << "*    -- Press 'f' to toggle display of framerate." << endl; 
	cout << "*    -- Press 'h' to toggle display of hi-res full-frame search." << endl; 
	cout << "*    -- Press 'r' to reset the belief about the face location." << endl; 
	cout << "*    -- Reducing video size will make the demo more responsive." << endl;  
	cout << "*    -- If the output is not changing, select the display window " << endl;
	cout << "*         and try moving the mouse." << endl; 
	cout << "*************************************************************************" << endl << endl;
	
	
	BlockTimer timer; //The timer is used to track the frame rate. 
	timer.blockStart(0); 
	
    cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE); //Create the graphical algorithm display
	CvFont font; 
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX, .25, .25); 
	if (showSlider)
		cvCreateTrackbar( "Reduce Video Size", WINDOW_NAME, &sizeSlider, maxReduce-1, &changeImSize); 
	
	//Get an initial frame so we know what size the image will be. 
	movieWidth = current_frame->width;//cvGetCaptureProperty(movie, CV_CAP_PROP_FRAME_WIDTH); 
	movieHeight= current_frame->height;//cvGetCaptureProperty(movie, CV_CAP_PROP_FRAME_HEIGHT); 
	
	int frameHeight = dispHeight/2; 
	int frameWidth = frameHeight*movieWidth/movieHeight; 
	
	//Create the initial saliency tracker with the default constructor
	facetracker = MIPOMDP::loadFromFile("data/MIPOMDPData-21x21-3Scales-AllImages.txt"); 
	
	//Tell the face tracker what size it needs to be. 
	changeImSize(0); 
	
	//Make some intermediate image representations: 
	
	
	//(1) The downsized representation of the original image frame
	
	
	IplImage* small_gray_int = cvCreateImage(cvSize (frameWidth, frameHeight ), 
											 IPL_DEPTH_8U, 1);
	IplImage* small_gray_float = cvCreateImage(cvSize (frameWidth,frameHeight ), 
											   IPL_DEPTH_32F, 1);
	IplImage* small_color_float = cvCreateImage(cvSize (frameWidth, frameHeight ), 
												IPL_DEPTH_32F, 3);
	IplImage* disp_window = cvCreateImage(cvSize (frameWidth, 2*frameHeight ), 
										  IPL_DEPTH_32F, 3);
	
	
	int showProb = 1; 
	int showMap = 1; 
	int showFPS = 1; 
	int showHiRes = 0; 
	char str[1000] = {'\0'};
	
	
	
	int key=0; 
    while (key != 'q' && key != 'Q') //Loop until user enters 'q'
    {
		if (sizeSlider > 0) {
			cvResize(current_frame, color_image, CV_INTER_NN); 
		} else {
			color_image = current_frame; 
		}
		
		if (key == 'F' || key == 'f') showFPS = !showFPS; 
		if (key == 'B' || key == 'b') showMap = !showMap; 
		if (key == 'T' || key == 't') showProb = !showProb; 
		if (key == 'H' || key == 'h') showHiRes = !showHiRes; 
		if (key == 'R' || key == 'r') facetracker->resetPrior(); 
		
		
		
		//Put the current frame the format expected by the MIPOMDP algorithm		
		cvCvtColor (color_image, gray_image, CV_BGR2GRAY);
		if (usingCamera) 	
			cvFlip( gray_image, NULL, 1);
		
		//Call the "searchNewFrame" method.
		timer.blockStart(1); 
		if (showHiRes) {
			facetracker->searchHighResImage(gray_image); 
		} else {
			facetracker->searchNewFrame(gray_image); 
		}
		timer.blockStop(1); 
		
		//Put the result into the display window
		if( current_frame->origin != IPL_ORIGIN_TL ) //on some systems, the image is upside down
			cvFlip(facetracker->foveaRepresentation, NULL, 0); 
		
		cvResize(facetracker->foveaRepresentation, small_gray_int); 
		cvCvtScale(small_gray_int, small_gray_float, 1.0/256); 
		cvCvtColor(small_gray_float, small_color_float, CV_GRAY2BGR);  
		cvSetImageROI(disp_window, cvRect(0, 0, frameWidth, frameHeight)); 
		cvCopy(small_color_float, disp_window); 
		
		
		if (showProb) {
			//sprintf(str,"FastSUN: %03.1f MS;   Total: %03.1f MS", 1000.0*fps, 1000.0*tot); 		
			for (int i = 0; i < facetracker->currentBelief->width; i++) {
				for (int j= 0; j < facetracker->currentBelief->height; j++) {
					int c = (int)cvGetReal2D(facetracker->getCounts(),i,j); 
					if (c > 0) {
						sprintf(str, "%3d", c); 	
						CvPoint loc=facetracker->pixelForGridPoint(cvPoint(j,i)); 
						loc.x = loc.x*frameWidth/imWidth-7; 
						loc.y = loc.y*frameHeight/imHeight+2; 
						cvPutText( disp_window, str,loc, &font, CV_RGB(255,0,255) );
					}
				}
			}
		}
		
		
		if (showMap && !showHiRes) {
			cvResetImageROI(disp_window); 
			
			cvResize(facetracker->currentBelief, small_gray_float, CV_INTER_NN); 
			double scale = .1; 
			//cvScale(small_gray_float, small_gray_float, scale); 
			cvLog(small_gray_float, small_gray_float); 
			cvScale(small_gray_float, small_gray_float, -scale); 
			//cvAddS(small_gray_float, cvRealScalar(scale), small_gray_float); 
			cvCvtColor(small_gray_float, small_color_float, CV_GRAY2BGR);  
			cvSetImageROI(disp_window,cvRect(0, frameHeight, frameWidth, frameHeight)); 
			cvCopy(small_color_float, disp_window); 
			cvResetImageROI(disp_window); 
			
			if (showProb) {
				//sprintf(str,"FastSUN: %03.1f MS;   Total: %03.1f MS", 1000.0*fps, 1000.0*tot); 		
				for (int i = 0; i < facetracker->currentBelief->width; i++) {
					for (int j= 0; j < facetracker->currentBelief->height; j++) {
						sprintf(str, "%5.2f", cvGetReal2D(facetracker->currentBelief,i,j)*100); 	
						CvPoint loc=facetracker->pixelForGridPoint(cvPoint(j,i)); 
						loc.x = loc.x*frameWidth/imWidth-10; 
						loc.y = loc.y*frameHeight/imHeight+frameHeight; 
						cvPutText( disp_window, str,loc, &font, CV_RGB(255,0,255) );
					}
				}
			}
			
		}
		
		
		timer.blockStop(0); 
		
		//Get the time for the saliency algorithm, and for everything in total, and
		//reset the timers. 
		double tot = timer.getTotTime(0); 
		double fps = timer.getTotTime(1); 
		
		timer.blockReset(0); 
		timer.blockReset(1); 
		timer.blockStart(0); 
		
		//Print the timing information to the display image
		if (showFPS) {
			char str[1000] = {'\0'};
			sprintf(str,"MIPOMDP: %03.1f MS;   Total: %03.1f MS", 1000.0*fps, 1000.0*tot); 			
			cvPutText( disp_window, str, cvPoint(20,10), &font, CV_RGB(255,0,255) );
		}
		
		//Display the result to the main window		
		cvShowImage (WINDOW_NAME, disp_window);
		
		//Check if any keys have been pressed, for quitting.
		key = cvWaitKey (5);
		
	} 
	
	// clean up
	return 0;
}
int main (int argc, char * const argv[]) 
{	
	
	
	string WINDOW_NAME = "Object Tracker"; 
	
	Size patchSize = Size(30,30); 
	
	string GENTLEBOOST_FILE= "data/GenkiSZSLCascade.txt"; 
	
	Size minSearchSize(0,0); 
	Size maxSearchSize(0,0); 
	int numFeaturesToUse = -1;//-1; //3; //-1 means "All"
	int NMSRadius = 15; //15; 
	double patchThresh = -INFINITY;  //-INFINITY means "All", 0 means "p>.5"
	int maxObjects = 20; 
	int skipFrames = 0; 
	int useFast = 1; 
	
	Size imSize(320,240); 
	int key=0;
	
	/* Open capture */ 
	VideoCapture capture; 
	int usingCamera = NMPTUtils::getVideoCaptureFromCommandLineArgs(capture, argc, (const char**) argv); 
	if (!usingCamera--) return 0;  
	
	/* Set capture to desired width/height */ 
	if (usingCamera) {
		capture.set(CV_CAP_PROP_FRAME_WIDTH, imSize.width); 
		capture.set(CV_CAP_PROP_FRAME_HEIGHT, imSize.height); 
	}

	
    namedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE); //Create the graphical algorithm display
    Mat current_frame, color_image, gray_image;
	
	BlockTimer bt; 
		
	GentleBoostCascadedClassifier* booster = new GentleBoostCascadedClassifier(); 
	
	ifstream in; 
	in.open(GENTLEBOOST_FILE.c_str()); 
	in >> booster; 
	in.close(); 
	booster->setSearchParams(useFast, minSearchSize, maxSearchSize,1.2, 1, 1); 
	booster->setNumFeaturesUsed(numFeaturesToUse); 
	
	int i = 0; 
    while (key != 'q' && key != 'Q') //Loop until user enters 'q'
    {
		
		//cout<< "Getting camera frame " << endl; 
		capture >> current_frame; 
		if (i++%(skipFrames+1) > 0 && !usingCamera) continue; 
		
		current_frame.copyTo(color_image); 
		cvtColor(current_frame, gray_image, CV_RGB2GRAY); 
				
		vector<SearchResult> boxes; 
		
		bt.blockRestart(2); 
		Mat img = gray_image; 
		booster->searchImage(img, boxes, NMSRadius, patchThresh); 
		cout << "Image Search Time was " << bt.getCurrTime(2)<< endl; 		
		
		if (boxes.size() > (unsigned int) maxObjects) {
			boxes.resize(maxObjects); 
		} 
		
		for (size_t i = 0; i < boxes.size(); i++) {
			Rect imgloc = boxes[i].imageLocation; 
			Point center = Point(imgloc.x + imgloc.width/2.0, imgloc.y + imgloc.height/2.0); 
			Scalar color; 
			if (boxes[i].value > 0 ) 
				color = (i== 0) ? Scalar(0,0,255): Scalar(0,255,255); 
			else color = Scalar(0,0,0); 
			circle(color_image, center, imgloc.width/2.0, color, 3); 
			circle(color_image, center, 2, color, 3); 
		}
		
		imshow(WINDOW_NAME, color_image);
		
		key = cvWaitKey(5);
	} 
	
	return 0;
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	double saltime, tottime, degree;
	//	capture >> im2;
   	cv_bridge::CvImagePtr cv_ptr;
	//sensor_msgs::Image salmap_;
   //CvBridge bridge;
   try
   {
        cv_ptr = cv_bridge::toCvCopy(msg, "bgr8");
        im2=cv_ptr->image;
        /* Transforming down the image*/
        double ratio = imSize.width * 1. / im2.cols;
        resize(im2, im, Size(0,0), ratio, ratio, INTER_NEAREST);

        viz.create(im.rows, im.cols*2, CV_32FC3);

        bt.blockRestart(0);
        vector<KeyPoint> pts;
        salTracker.detect(im, pts);
        saltime = bt.getCurrTime(0) ;

        salTracker.getSalImage(sal);

        double min, max;
        Point minloc, maxloc;
        minMaxLoc(sal, &min, &max, &minloc, &maxloc);

        lqrpt[0] = maxloc.x*1.0 / sal.cols;
        lqrpt[1] = maxloc.y*1.0 / sal.rows;

        salientSpot.setTrackerTarget(lqrpt);

        Mat vizRect = viz(Rect(im.cols,0,im.cols, im.rows));
        cvtColor(sal, vizRect, CV_GRAY2BGR);

        vizRect = viz(Rect(0, 0, im.cols, im.rows));
        im.convertTo(vizRect,CV_32F, 1./256.);

        salientSpot.updateTrackerPosition();
        lqrpt = salientSpot.getCurrentPosition();

        /* assign the geometric coordinates of points identified as salient to the var. pt(Point type)*/
        pt.x=lqrpt[0];
        pt.y=lqrpt[1];
        pt.z=0;
        /* call a function to get the degree of saliency of the current point*/
        degree = 0;


        /* for the purpose of visualizing points above degree 7:the most certain
        green for degree >=7 features, yellow for degree <7 and >=4 ...
         */
        double realx = 320*pt.x;
        double realy = 240*pt.y;

        if(degree >= 7) {       circle(vizRect,cvPoint(realx,realy),5,CV_RGB(0,255,0),-1);        }
        else if(degree >= 4) {       circle(vizRect,cvPoint(realx,realy),5,CV_RGB(128,128,0),-1);        }
        else {       circle(vizRect,cvPoint(realx,realy),5,CV_RGB(255,0,0),-1);        }

        //end: discard me

        ros_nmpt_saliency::targets trg;
        trg.positions.push_back(pt);
        trg.degree=degree;
        pub.publish(trg);
        vizRect = viz(Rect(im.cols,0,im.cols, im.rows));
        cvtColor(sal, vizRect, CV_GRAY2BGR);

        tottime = bt.getCurrTime(1);
        bt.blockRestart(1);

        if (debug_mode){
            imshow("view",viz);
            waitKey(1);
        }

   }
   catch (...)
        {
            ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
        }
}
/* put this under degree.cpp and include degree.h here*/
double getdegree(double x, double y)
{
    /*   */
    timel+=bt.getCurrTime(0);
    int flag=0; //for the purpose of switchin to "Add as unique salient point mode"
    double delay; // interval between the last change at a point and now
    double Lturnaround; // time to trigger reconfiguration per each salient point
    double DegreeVal; // to store/pass the final degree value

    /* .
    Take Degree one along w/ points as newly coming salient point if it is first time func. call
    */
    if (not counter)
    {
        degreeHandler[counter][0]=x;
        degreeHandler[counter][1]=y;
        degreeHandler[counter][2]=timel;

        DegreeVal = degreeHandler[counter][3] = 1;
        // Reference for Turn_Around
        degreeHandler[counter][4] = timel;
        degreeHandler[counter][5]=timel;

        ++counter;
    }
    else
    {
        /* In each function call we have to check the belongingness of
        of salient point to the exsiting point                      */
        for(int i=0;i<counter;i++)
        {
            /* creating a bounding rectangle for the upcoming salient point.
            This helps to consider  close enough salient points as one.
            */
            //27 X 21 Pixel Bounding box
         L=degreeHandler[i][0]- 0.09; R=degreeHandler[i][0]+ 0.09;
         T=degreeHandler[i][1]- 0.09; B=degreeHandler[i][1]+ 0.09;
       /* //max degree checking point
            L=degreeHandler[i][0]- 1.0; R=degreeHandler[i][0]+ 1.0;
            T=degreeHandler[i][1]- 1.0; B=degreeHandler[i][1]+ 1.0;
       */


            /* check whether this point relys in the exsiting group or not */
            /* uncomment these two lines and comment the following two lines
            to make per point degree checkup */

            /*if(degreeHandler[i][0] == x  && degreeHandler[i][1] == y) //use this for default comparison for "per point comparision"
            {
            */
            /* Checking the belonginingess of the point*/
            if ((x>=L && y>=T) && (x<=R && y>=T) && (x>=L && y<=B) && (x<=R && y<=B))
            {
                flag=1;
                Lturnaround = timel - degreeHandler[i][4];
                delay = timel - degreeHandler[i][2] ;
                defaultMax = 10 * Lturnaround; //considering the performance of core i7 machine w/ 2.34 ghz and w/ pyface tracker : cmt take much resource so better to deduct the value
                /* How long the point identified as salient?  */
                if  (Lturnaround < turn_around_time)
                {
                    /* add the naromalized salience value (will be one if it has high freq. of occurance)*/
                    DegreeVal = (degreeHandler[i][3] += (0.02/delay))* Lturnaround;// maximum possible freq. in time.

                 }
                else
                {

                    DegreeVal=degreeHandler[i][3] =1;
                    degreeHandler[i][4] = timel;
                }
                degreeHandler[i][2] = timel;

                break; // jump out from the loop given the point group is found
            }
            else { continue;  } // loop till it get the end or find the group
        }

        /* New point identified*/
        if (not flag)
        {
            degreeHandler[counter][0]=x;
            degreeHandler[counter][1]=y;
            delay = timel - degreeHandler[counter][2];
            degreeHandler[counter][2]=timel;
            DegreeVal = degreeHandler[counter][3]=0.1;
            degreeHandler[counter][4] = timel;
            degreeHandler[counter][5]=timel;
            ++counter;
        }
    }


    /* put the number in between 1 and 10 */
    if (((DegreeVal*10)/defaultMax) + 0.5 >defaultMax) {defaultMax = ((DegreeVal*10)/defaultMax) + 0.5; }
    int roundD =  ((DegreeVal*10)/defaultMax) + 0.5;
    //cout<<"Degree Raw: "<<DegreeVal<<" Degree Tenth  "<<roundD<<" Possible Raw "<<Lturnaround * 7.5<<endl;
    DegreeVal = roundD;
    return DegreeVal;
}