Exemple #1
0
bool ObjectReconstr::updateCloud()
{
    ImageOf<PixelRgb> *tmpL = imagePortInLeft.read(true);
    ImageOf<PixelRgb> *tmpR = imagePortInRight.read(true);

    IplImage* imgL;
    IplImage* imgR;
    if(tmpL!=NULL && tmpR!=NULL)
    {
        imgL= (IplImage*) tmpL->getIplImage();
        imgR= (IplImage*) tmpR->getIplImage();
    }
    else
    {
        fprintf(stdout, "Problem with image ports occurred\n");
        return false;
    }

    if (currentState==STATE_RECONSTRUCT)
    {
        Bottle pixelList=getPixelList();
        return recRoutine.reconstruct(imgL,imgR,pixelList);
    }
    else
        return recRoutine.updateDisparity(imgL,imgR);
}
Exemple #2
0
cv::Rect CalibModule::extractFingerTip(ImageOf<PixelMono> &imgIn, ImageOf<PixelBgr> &imgOut,
                                       const Vector &c, Vector &px)
{
    cv::Mat imgInMat=cv::cvarrToMat((IplImage*)imgIn.getIplImage());

    // produce a colored image
    imgOut.resize(imgIn);
    cv::Mat imgOutMat=cv::cvarrToMat((IplImage*)imgOut.getIplImage());
    
    // proceed iff the center is within the image plane
    if ((c[0]<10.0) || (c[0]>imgIn.width()-10) ||
        (c[1]<10.0) || (c[1]>imgIn.height()-10))
    {
        cv::cvtColor(imgInMat,imgOutMat,CV_GRAY2BGR);
        return cv::Rect();
    }

    // saturate the top-left and bottom-right corners
    int roi_side2=roi_side>>1;
    cv::Point ct((int)c[0],(int)c[1]);
    cv::Point tl((int)(c[0]-roi_side2),(int)(c[1]-roi_side2));
    cv::Point br((int)(c[0]+roi_side2),(int)(c[1]+roi_side2));
    tl.x=std::max(1,tl.x); tl.x=std::min(tl.x,imgIn.width()-1);
    tl.y=std::max(1,tl.y); tl.y=std::min(tl.y,imgIn.height()-1);
    br.x=std::max(1,br.x); br.x=std::min(br.x,imgIn.width()-1);
    br.y=std::max(1,br.y); br.y=std::min(br.y,imgIn.height()-1);
    cv::Rect rect(tl,br);

    // run Otsu algorithm to segment out the finger    
    cv::Mat imgInMatRoi(imgInMat,rect);
    cv::threshold(imgInMatRoi,imgInMatRoi,0,255,cv::THRESH_BINARY|cv::THRESH_OTSU);
    cv::cvtColor(imgInMat,imgOutMat,CV_GRAY2BGR);

    px.resize(2,0.0);
    bool ok=false;
    for (int y=tl.y; y<br.y; y++)
    {
        for (int x=tl.x; x<br.x; x++)
        {
            if (imgIn(x,y)>0)
            {
                // predict the center of the finger a bit shifted
                x+=3;    y+=5;
                px[0]=x; px[1]=y;
                cv::circle(imgOutMat,cv::Point(x,y),5,cv::Scalar(0,0,255),-1);
                ok=true;
                break;
            }
        }

        if (ok)
            break;
    }

    cv::circle(imgOutMat,ct,5,cv::Scalar(0,255,0),-1);
    cv::rectangle(imgOutMat,tl,br,cv::Scalar(255,255,255),4);
    return rect;
}
Exemple #3
0
void AcousticMap::renderGaussianDistribution(ImageOf<PixelFloat> &img,
        int posX, int posY,
        int sizex, int sizey,
        float sigmaX, float sigmaY,
        float height) {
    IplImage *ipl = (IplImage*)img.getIplImage();
    //cout << "going to render.." << posX << " " << posY << " " << sizex << " " << sizey << " " << sigmaX << " " << sigmaY << " " << height << endl;
    if (((sizex-1)%2 == 0) && ((sizey-1)%2 == 0)) {
        int extx = (sizex-1)/2;
        int exty = (sizey-1)/2;
        int currX, currY;
        for(int y=-exty; y<=exty; y++) {
            for(int x=-extx; x<=extx; x++) {
                currX = x+posX;
                currY = y+posY;
                // if inside acoustic map
                //cout << "rendering at: " << posX << " " << posY << endl;
                if (posX >= 0 && posX < img.width() &&
                        posY >= 0 && posY < img.height()) {
                    //cout << "rendering at: " << posX << " " << posY << " value: " << height*(float)(exp(-0.5*(((float)x/sigmaX)*((float)x/sigmaX)+((float)y/sigmaY)*((float)y/sigmaY)))) << endl;
                    ((float*)(ipl->imageData + ipl->widthStep*(currY)))[currX] = height*(float)(exp(-0.5*(((float)x/sigmaX)*((float)x/sigmaX)+((float)y/sigmaY)*((float)y/sigmaY))));
                }
            }
        }
    }
    else
    {
        cout << "AcousticMap::renderGaussianDistribution not possible due to invalid size (has to be odd)" << endl;
    }
}
void IntensitySalience::applyImpl(ImageOf<PixelRgb>& src, 
                          ImageOf<PixelRgb>& dest,
                          ImageOf<PixelFloat>& sal) {
    dest.resize(src);
    sal.resize(src);
    if (_sizeOld.width != src.width() || 
        _sizeOld.height != src.height())
        initImages(cvSize(src.width(), src.height()));
    
    cvCvtColor(src.getIplImage(), _imgGray, CV_RGB2GRAY);
    cvCvtColor(_imgGray, dest.getIplImage(), CV_GRAY2RGB);
    gray2Float(_imgGray, (IplImage*)sal.getIplImage());

    _sizeOld.width = src.width();
    _sizeOld.height = src.height();
}
Exemple #5
0
 /*! 
  * Revices a picture in the OpenCV native formate IplImage 
  */
 IplImage* SamgarModule::RecivePictureOCVNative( void ) {
   if( YarpImagePort.getInputCount() > 0 ) {
     ImageOf<PixelBgr> *img = YarpImagePort.read();
     IplImage *frame2 = (IplImage*) img->getIplImage();
     return frame2;
   }
   return false;
 }
bool KinectDriverOpenNI::readRgb(ImageOf<PixelRgb> &rgb, double &timestamp)
{
    const XnRGB24Pixel* pImage = imageGenerator.GetRGB24ImageMap();
    XnRGB24Pixel* ucpImage = const_cast<XnRGB24Pixel*> (pImage);
    cvSetData(rgb_big,ucpImage,this->def_image_width*3);
    cvResize(rgb_big,(IplImage*)rgb.getIplImage());
    int ts=(int)imageGenerator.GetTimestamp();
    timestamp=(double)ts/1000.0;
    return true;
}
bool gazeEvaluatorThread::draw2DMotionField(double &_avg,ImageOf<PixelRgb> &_iFlow)
{
    cv::Mat iFlowMat((IplImage*)_iFlow.getIplImage());
    int xSpace=7;
    int ySpace=7;
    float cutoff=1;
    float multiplier=5;
    cv::Scalar color=cv::Scalar(125,255,0);

    CvPoint p0 = cvPoint(0,0);
    CvPoint p1 = cvPoint(0,0);

    float deltaX, deltaY, angle, hyp;
    float sum = 0;
    float cnt = 0;

    for(int i=IMG_CROP_SIZE; i<optFlow.rows-IMG_CROP_SIZE; i+=7) 
    {
        for (int j=IMG_CROP_SIZE; j<optFlow.cols-IMG_CROP_SIZE; j+=7)
        {
            p0.x = j;
            p0.y = i;
            deltaX = optFlow.ptr<float>(i)[2*j];
            deltaY = -optFlow.ptr<float>(i)[2*j+1];
            angle = atan2(deltaY, deltaX);
            hyp = sqrt(deltaX*deltaX + deltaY*deltaY);
            if (hyp > 1e-1)
            {
                sum = sum + hyp;
                cnt++;
            }

            if(hyp > cutoff)
            {
                p1.x = p0.x + cvRound(multiplier*hyp*cos(angle));
                p1.y = p0.y + cvRound(multiplier*hyp*sin(angle));
                cv::line(iFlowMat,p0,p1,color,1,CV_AA);
                p0.x = p1.x + cvRound(3*cos(angle-CV_PI + CV_PI/4));
                p0.y = p1.y + cvRound(3*sin(angle-CV_PI + CV_PI/4));
                cv::line(iFlowMat,p0,p1,color,1,CV_AA);
                p0.x = p1.x + cvRound(3*cos(angle-CV_PI - CV_PI/4));
                p0.y = p1.y + cvRound(3*sin(angle-CV_PI - CV_PI/4));
                cv::line(iFlowMat,p0,p1,color,1,CV_AA);
            }
        }
    }
    if (cnt!=0)
    {
        _avg = sum/cnt;
        return true;
    }

    return false;
}
Exemple #8
0
/**
 * Read an image from the grabber.
 *
 * @param image The image to read. The image will be resized to
 * the dimensions the grabber is using, and the captured image
 * data will be written to it.
 *
 * @return True if an image was successfully captured. If false
 * returned, the image will be resized to the dimensions used by
 * the grabber, but all pixels will be zeroed.
 */
bool OpenCVGrabber::sendImage(IplImage* iplFrame, ImageOf<PixelRgb> & image)
{
    // Resize the output image, this should not result in new
    // memory allocation if the image is already the correct size
    image.resize(iplFrame->width, iplFrame->height);

    if (!m_saidSize) {
        yDebug("Received image of size %dx%d\n", image.width(), image.height());
        m_saidSize = true;
    }

    // Get an IplImage, the Yarp Image owns the memory pointed to
    IplImage * iplImage = (IplImage*)image.getIplImage();
    // create the timestamp
    m_laststamp.update();

    // Copy the captured image to the output image, flipping it if
    // the coordinate origin is not the top left
    if (IPL_ORIGIN_TL == iplFrame->origin)
        cvCopy(iplFrame, iplImage, 0);
    else
        cvFlip(iplFrame, iplImage, 0);

    if (iplFrame->channelSeq[0] == 'B') {
        cvCvtColor(iplImage, iplImage, CV_BGR2RGB);
    }

    if (m_w <= 0) {
        m_w = image.width();
    }
    if (m_h <= 0) {
        m_h = image.height();
    }
    if (fromFile) {
        if (m_w>0 && m_h>0) {
            if (image.width() != m_w || image.height() != m_h) {
                if (!m_saidResize) {
                    yDebug("Software scaling from %dx%d to %dx%d",  image.width(), image.height(), m_w, m_h);
                    m_saidResize = true;
                }
                image.copy(image, m_w, m_h);
            }
        }
    }

    DBG yDebug("%d by %d %s image\n", image.width(), image.height(), iplFrame->channelSeq);

    return true;

}
    bool getBox()
    {
        // Crops the image based on user input and creates a template for the tracker with it.
        printf("Reading image!!\n");
        ImageOf<PixelRgb> *imgIn = imInPort.read();  // read an image
        cv::Mat img((IplImage*) imgIn->getIplImage());	   
     
        printf("Click first top left and then bottom right from the object !!\n");
        bool boxOK = false;
        //Bottle &out  = coordsOutPort.prepare();
        cv::Point tl, br;

        while (!boxOK){
            printf("Click on top left!\n");
            Bottle *point1 = coordsInPort.read(true);
            tl.x =  point1->get(0).asDouble();
            tl.y =  point1->get(1).asDouble();
            printf("Point read at %d, %d!!\n", tl.x, tl.y);

            printf("Click on bottom right!\n");
            Bottle *point2 = coordsInPort.read(true);            
            br.x =  point2->get(0).asDouble();
            br.y =  point2->get(1).asDouble();
            printf("Point read at %d, %d!!\n", br.x, br.y);

            BBox = cv::Rect(tl,br);            
            if (BBox.area() > 20) {
                printf("valid coordinates, cropping image!\n");
                boxOK = true;}
            else {printf("Coordinates not valid, click again!\n");}
        }

        printf("Prep out mat !!\n");
        ImageOf<PixelRgb> &templateOut  = tempOutPort.prepare();
        templateOut.resize(BBox.width, BBox.height);
        cv::Mat tempOut((IplImage*)templateOut.getIplImage(),false);
        img(BBox).copyTo(tempOut);
        //cv::GaussianBlur(img(BBox), imOut, cv::Size(1,1), 1, 1);

        double t0 = Time::now();
        while(Time::now()-t0 < 1) {  //send the template for one second
            printf("Writing Template!\n");
            tempOutPort.write();
            Time::delay(0.1);
        }

        tracking = true;
        return true;
    }
Exemple #10
0
void Manager::processMotionPoints(Bottle &b)
{
   // fprintf(stdout,"create mat\n");

    //create MAT image
    cv::Mat imgMat(Size(320,240),CV_8UC3);
    cv::Mat imgClean(Size(320,240),CV_8UC3);
    //vector<Point> points;
    imgMat = Scalar::all(0);
    imgClean = Scalar::all(255);
   // fprintf(stdout,"filling up the data\n");
    for (int x=1; x<b.size(); x++)
    {
        Point pt;
        pt.x = b.get(x).asList()->get(0).asInt();
        pt.y = b.get(x).asList()->get(1).asInt();
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[0] = 255 ;
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[1] = 0 ;
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[2] = 0 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[0] = 255 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[1] = 0 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[2] = 0 ;
    }
    //imgClean = imgMat;
    int n = 10;
    int an = n > 0 ? n : -n;
    int element_shape = MORPH_RECT;
    Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
    morphologyEx(imgMat, imgMat, CV_MOP_CLOSE, element);

    Bottle data;
    //fprintf(stdout,"before process image\n");
    data = processImage(b, imgMat, imgClean, lineDetails); //image analisis and bottle cleaning
    //fprintf(stdout,"before process blobs\n");
    
    if (data.size() > 0)
        processBlobs(data, imgClean, lineDetails); // kmeans

    ImageOf<PixelRgb> outImg;// = new ImageOf<PixelRgb>;
    //fprintf(stdout,"done1 with data %d %d\n", imgClean.cols, imgClean.rows);
    outImg.resize( imgClean.cols, imgClean.rows );
    IplImage ipl_img = imgClean;
    cvCopyImage(&ipl_img, (IplImage*)outImg.getIplImage());
    imgOutPort.prepare() = outImg;
    imgOutPort.write();
    //delete[] outImg;
    //fprintf(stdout,"ok\n");
    }
Exemple #11
0
void visionUtils::convertCvToYarp(cv::Mat MatImage, ImageOf<PixelRgb> & yarpImage)
{
    IplImage* IPLfromMat = new IplImage(MatImage);

    yarpImage.resize(IPLfromMat->width,IPLfromMat->height);

    IplImage * iplYarpImage = (IplImage*)yarpImage.getIplImage();

    if (IPL_ORIGIN_TL == IPLfromMat->origin) {
        cvCopy(IPLfromMat, iplYarpImage, 0);
    }
    else {
        cvFlip(IPLfromMat, iplYarpImage, 0);
    }

    if (IPLfromMat->channelSeq[0]=='B') {
        cvCvtColor(iplYarpImage, iplYarpImage, CV_BGR2RGB);
    }
}
Exemple #12
0
int main(int argc, char *argv[]) {
    printf("Show a circle for 3 seconds...\n");
    ImageOf<PixelRgb> yarpImage;


    printf("Creating a YARP image of a nice circle\n");
    yarpImage.resize(300,200);
    addCircle(yarpImage,PixelRgb(255,0,0),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/4);
    addCircle(yarpImage,PixelRgb(255,50,50),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/5);


    printf("Copying YARP image to an OpenCV/IPL image\n");
    IplImage *cvImage = cvCreateImage(cvSize(yarpImage.width(),  
                                             yarpImage.height()), 
                                      IPL_DEPTH_8U, 3 );
    cvCvtColor((IplImage*)yarpImage.getIplImage(), cvImage, CV_RGB2BGR);

    printf("Showing OpenCV/IPL image\n");
    cvNamedWindow("test",1);
    cvShowImage("test",cvImage);

    printf("Taking image back into YARP...\n");
    ImageOf<PixelBgr> yarpReturnImage;
    yarpReturnImage.wrapIplImage(cvImage);
    yarp::sig::file::write(yarpReturnImage,"test.ppm");
    printf("Saving YARP image to test.ppm\n");


    cvWaitKey(3000);

    cvDestroyWindow("test");

    cvReleaseImage(&cvImage);

    printf("...done\n");
    return 0;
}
Exemple #13
0
void SalienceModule::getPeak(ImageOf<PixelFloat> &img, int &i, int &j, float &v){
    //v = -FLT_MAX;
    v = 0.0f;
    //i = img.width()/2;
    //j = img.height()/2;
	// i and j should be left invalid if there is no clear peak (Alex 31/05/08)
	i = -1;
	j = -1; 
    IplImage *image = (IplImage*)img.getIplImage();
    float *data = (float*)image->imageData;
    for (int y = 0; y < image->height; y++){
        for (int x = 0; x < image->width; x++){
            if (v < *data){
                v = *data;
                i = x;
                j = y;
            }
            data++;
        }
    }
}
Exemple #14
0
bool autobiographicalMemory::saveImageFromPort(const string &fullPath, const string &fromPort, BufferedPort<ImageOf<PixelRgb> >* imgPort)
{
    //Extract the incoming images from yarp
    ImageOf<PixelRgb> *yarpImage = imgPort->read(false);
    //yDebug() << "imgPort name : " << imgPort->getName();

    if (yarpImage != NULL) { // check we actually got something
        //use opencv to convert the image and save it
        IplImage *cvImage = cvCreateImage(cvSize(yarpImage->width(), yarpImage->height()), IPL_DEPTH_8U, 3);
        cvCvtColor((IplImage*)yarpImage->getIplImage(), cvImage, CV_RGB2BGR);
        cvSaveImage(fullPath.c_str(), cvImage);

        //yDebug() << "img created : " << fullPath;
        cvReleaseImage(&cvImage);
        return true;
    }
    else {
        //yWarning() << "[saveImageFromPort] No image received from: " << imgPort->getName();
        return false;
    }
}
Exemple #15
0
    void readAndShow(ImageOf<PixelRgb> &reconstructYarp, bool refreshGui)
    {
        if (reconstructWidth == -1 || reconstructHeight == -1)
        {
            //Sample the subdivision sizes
            std::cout << "Waiting for input..." << std::endl;
            ImageOf<PixelRgb>* testImg = ports[0][0]->read(true);
            reconstructWidth = testImg->width() * ports.size();
            reconstructHeight = testImg->height() * ports[0].size();
            std::cout << "Input detected with size : " << testImg->width() << "x" << testImg->height() << std::endl;
            std::cout << "Reconstruct size will: " << reconstructWidth << "x" << reconstructHeight << std::endl;
            reconstruct = cvCreateImage(cvSize(reconstructWidth, reconstructHeight), 8, 3);
        }

        for (unsigned int x = 0; x < ports.size(); x++)
        {
            for (unsigned int y = 0; y < ports[x].size(); y++)
            {
                ImageOf<PixelRgb>* imgYarp = ports[x][y]->read(true);
                if (imgYarp)
                {
                    IplImage* partImg = (IplImage*) imgYarp->getIplImage();
                    cvCvtColor(partImg, partImg, CV_BGR2RGB);
                    int rectW = reconstructWidth / ports.size();
                    int rectH = reconstructHeight / ports[0].size();
                    cvSetImageROI(reconstruct, cvRect(x*rectW, y*rectH, rectW, rectH));
                    cvCopy(partImg,reconstruct);
                    cvResetImageROI(reconstruct);
                }
            }
        }

        if (refreshGui)
            cvShowImage((name + "/Reconstruct").c_str(), reconstruct);
        reconstructYarp.wrapIplImage(reconstruct);
        cvWaitKey(1);
    }
Exemple #16
0
bool ObjectReconstr::respond(const Bottle& command, Bottle& reply) 
{
    if (command.get(0).asString()=="help")
    {
        reply.addVocab(ACK);
        reply.addString("To obtain a reconstruction, the module needs a segmentation algorithm that returns the set of pixels belonging to the object");
        reply.addString("It typically uses the graphBasedSegmentation module");
        reply.addString("Provide a point of the object first, in the format x y, for instance clicking on the segmentation image.");
        reply.addString("Then, when you send the command 3Drec, you will be provided with the reconstructed object along with the minimum enclosing bounding box on the output port of the module -- typically /object-reconstruction/mesh:o.");
        reply.addString("If you also want to visualize the result, the command is 3Drec visualize.");
        reply.addString("If you want to reconstruct a single pixel, the command is get point (x y).");
        return true;
    }

    if (command.get(0).asString()=="set")
    {
        if (command.get(1).asString()=="write")
        {
            if (command.size()>2)
            {
                if (command.get(2).asString()=="on")
                {
                    reply.addVocab(ACK);
                    write=true;
                }
                else if (command.get(2).asString()=="off")
                {
                    write=false;
                    reply.addVocab(ACK);
                }
                else
                    reply.addVocab(NACK);
                return true;
            }
        }
        else
        {
            reply.addVocab(NACK);
            return true;
        }
    }
    if (command.get(0).asString()=="3Drec")
    {
        if (middlex==-1 || middley==-1)
        {
            reply.addVocab(NACK);
            reply.addString("I need a pixel of the object");
            return true;
        }

        currentState=STATE_RECONSTRUCT;

        visualizationOn=false;

        if (command.size()==2)
            if (command.get(1).asString()=="visualize")
                visualizationOn=true;

        reply.addVocab(ACK);
        return true;
    }

    if (command.get(0).asString()=="name")
    {
        if (command.size()>=2)
        {
            fileName=command.get(1).asString().c_str();
            reply.addVocab(ACK);
 	    }
        else
        {
            reply.addVocab(NACK);
	        reply.addString("No name was provided");
        }

        return true;
    }

    if (command.get(0).asString()=="get")
    {
        if (command.get(1).asString()=="point" && command.size()==3)
        {
            if (currentState!=STATE_RECONSTRUCT)
            {
                IplImage* imgL;
                IplImage* imgR;

                ImageOf<PixelRgb> *tmpL = imagePortInLeft.read(true);
                ImageOf<PixelRgb> *tmpR = imagePortInRight.read(true);

                if(tmpL!=NULL && tmpR!=NULL)
                {
                    imgL= (IplImage*) tmpL->getIplImage();
                    imgR= (IplImage*) tmpR->getIplImage();
                }
                else
                {
                    reply.addVocab(NACK);
                    return true;
                }

                yarp::sig::Vector point2D(2);
                Bottle *pixel=command.get(2).asList();
                point2D[0]=pixel->get(0).asDouble();
                point2D[1]=pixel->get(1).asDouble();

                yarp::sig::Vector point3D(3);

                bool done=recRoutine.triangulateSinglePoint(imgL,imgR,point2D,point3D);

                if (done)
                {
                    Bottle &result=reply.addList();
                    result.addDouble(point3D[0]);
                    result.addDouble(point3D[1]);
                    result.addDouble(point3D[2]);
                }
                else
                    reply.addVocab(NACK);
            }
            else
            {
                reply.addVocab(NACK);
                reply.addString("Still processing");
            }
            return true;
        }
        else
        {
            reply.addVocab(NACK);
            return true;
        }
    }

    if (command.size()==2)
    {
        if (command.get(0).asInt()!=0 && command.get(1).asInt()!=0)
        {
            middlex=(double)command.get(0).asInt();
            middley=(double)command.get(1).asInt();
            reply.addVocab(ACK);
            return true;
        }
        else
        {
            reply.addVocab(NACK);
            return true;
        }
    }

    reply.addVocab(NACK);
    return true;
}
bool SCSPMClassifier::train(Bottle *locations, Bottle &reply)
{
   if(locations==NULL)
       return false;

    string object_name=locations->get(0).asList()->get(0).asString().c_str();

    if(burst)
        currObject=object_name.c_str();


    // Save Features
    if(doTrain)
    {
        Bottle cmdClass;
        cmdClass.addString("save");
        cmdClass.addString(object_name.c_str());

        Bottle classReply;
        printf("Sending training request: %s\n",cmdClass.toString().c_str());
        rpcClassifier.write(cmdClass,classReply);
        printf("Received reply: %s\n",classReply.toString().c_str());
    }


    // Read Image and Locations
    ImageOf<PixelRgb> *image = imgInput.read(true);
    if(image==NULL)
        return false;

    IplImage* img= (IplImage*) image->getIplImage();

    Bottle* bb=locations->get(0).asList()->get(1).asList();
    int x_min=bb->get(0).asInt();
    int y_min=bb->get(1).asInt();
    int x_max=bb->get(2).asInt();
    int y_max=bb->get(3).asInt();

    if(x_min>5)
        x_min=x_min-5;

    if(y_min>5)
        y_min=y_min-5;

    if((x_max+5)<img->height)
        x_max=x_max+5;

    if((y_max+5)<img->width)
        y_max=y_max+5;

    int blobW=x_max-x_min;
    int blobH=y_max-y_min;
    //Crop Image

    cvSetImageROI(img,cvRect(x_min,y_min,blobW,blobH));
    IplImage* croppedImg=cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
    cvCopy(img, croppedImg);

   
    cvResetImageROI(img);

    /*cvShowImage("blob", croppedImg);
    cvShowImage("all", img);
    cvWaitKey(0);*/
    //Send Image to SC
    ImageOf<PixelBgr>& outim=imgOutput.prepare();
    outim.wrapIplImage(croppedImg);
    imgOutput.write();
    cvReleaseImage(&croppedImg);


    //Read Coded Feature
    Bottle fea;
    featureInput.read(fea);

    if(fea.size()==0)
        return false;

    //Send Feature to Classifier
    if(!burst)
    {
        featureOutput.write(fea);
        yarp::os::Time::delay(0.01);
    }
    else
    {
        trainingFeature.push_back(fea);
    }


    // get negatives
    /*if(burst)
    {

        int offW=-3*blobW;
        for (int w=0; w<4; w++)
        {

            int offH=-3*blobH;
            for (int h=0; h<4; h++)
            {

                int x_min_bg=x_min+(offW+1.5*blobW);
                int y_min_bg=y_min+(offH+1.5*blobH);

                if((x_min_bg==x_min) && (y_min_bg==y_min))
                {
                    offH=offH+1.5*blobH;
                    continue;
                }

                if((x_min_bg+blobW)>=img->width || (x_min_bg)<0 || (y_min_bg+blobH)>=img->height || (y_min_bg)<0)
                    continue;

                //printf("W: %d H: %d Limits: %d %d %d %d Size: %d %d \n", w,h,x_min_bg+blobW,x_min_bg,y_min_bg+blobH,y_min_bg,img->height,img->width);

                cvSetImageROI(img,cvRect(x_min_bg,y_min_bg,blobW,blobH));
                IplImage* croppedImg=cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
                cvCopy(img, croppedImg);
                cvResetImageROI(img);
                
                /*cvShowImage("blob", croppedImg);
                cvShowImage("all", img);
                cvWaitKey(0);

                ImageOf<PixelBgr>& outim=imgOutput.prepare();
                outim.wrapIplImage(croppedImg);
                imgOutput.write();
                cvReleaseImage(&croppedImg);


                //Read Coded Feature
                Bottle fea;
                featureInput.read(fea);

                negativeFeature.push_back(fea);
                offH=offH+1.5*blobH;
            }
            offW=offW+1.5*blobW;
        }

    }*/

    //Train Classifier
    if(doTrain)
    {
        Bottle cmdTr;
        cmdTr.addString("train");
        Bottle trReply;
        printf("Sending training request: %s\n",cmdTr.toString().c_str());
        rpcClassifier.write(cmdTr,trReply);
        printf("Received reply: %s\n",trReply.toString().c_str());
    }


    reply.addString("ack");

    return true;
}
void AdvancedDirectionalSalience::applyImpl(ImageOf<PixelRgb>& src, 
                          ImageOf<PixelRgb>& dest,
                          ImageOf<PixelFloat>& sal) {
    // time measurement                              
    double tStart = yarp::os::Time::now();
    // block other threads (e.g. configuration)
    _mutex.wait();
    // resizing/init
    sal.resize(src);
    if (_w != src.width() || _h != src.height()){
        _w = src.width();
        _h = src.height();
        _ws = (int)(_w * _scale + 0.5);
        _hs = (int)(_h * _scale + 0.5);
        init(_ws, _hs);
    }
    // scaling if _scale != 1.0
    cvResize((IplImage*)src.getIplImage(), _imgInRgbResized, CV_INTER_LINEAR );
    // convert input to grayscale and float image
    cvCvtColor( _imgInRgbResized, _imgInGray, CV_BGR2GRAY );
    cvConvert(_imgInGray, _imgInFloat);
    // compute D-maps (24 maps: _numSizs*_numDirs * 2 (center & surround))
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
                // compute gabor maps
                _gabor[s][d]->GaborFiltZeroMean((float*)_imgInFloat->imageData, 
                    (float*)_imgGReal->imageData, 
                    (float*)_imgGImag->imageData);
                // combine real and imaginary parts
                _gabor[s][d]->ComputeMagnitude((float*)_imgGReal->imageData, 
                    (float*)_imgGImag->imageData, 
                    (float*)_imgGabor[s][d]->imageData);
                // center- & surround-maps
                _gaussCen[s]->GaussFilt((float*)_imgGabor[s][d]->imageData, (float*)_imgDCen[s][d]->imageData);
                _gaussSur[s]->GaussFilt((float*)_imgGabor[s][d]->imageData, (float*)_imgDSur[s][d]->imageData);
        }
    }
    // compute DPlus & DMinus maps
    float max = 0.0f;
    int maxX, maxY;
    for (int s = 0; s < _numSizs; s++){
        // sum all directions of size s
        cvZero(_imgDSumCen);
        cvZero(_imgDSumSur);
        for (int d = 0; d < _numDirs; d++){
            cvAdd(_imgDSumCen, _imgDCen[s][d], _imgDSumCen);
            cvAdd(_imgDSumSur, _imgDSur[s][d], _imgDSumSur);
        }
        // create DPlus & DMinus maps for size s
        for (int d = 0; d < _numDirs; d++){
            // subtract the current direction from the total sum (Sum(i!=j))
            cvSub(_imgDSumCen, _imgDCen[s][d], _imgDSumCenEx);
            cvSub(_imgDSumSur, _imgDSur[s][d], _imgDSumSurEx);
            // calculate DPlus
            cvSub(_imgDCen[s][d], _imgDSur[s][d], _imgTmp);
            cvAdd(_imgTmp, _imgDSumSurEx, _imgTmp);
            cvMul(_imgDCen[s][d], _imgTmp, _imgDPlu[s][d]);
            // calculate DMinus
            cvSub(_imgDCen[s][d], _imgDSumCenEx, _imgTmp);
            cvAdd(_imgTmp, _imgDSumSurEx, _imgTmp);
            cvMul(_imgDCen[s][d], _imgTmp, _imgDMin[s][d]);
            // discard negative values
            cvThreshold(_imgDPlu[s][d], _imgDPlu[s][d], 0.0, 0.0, CV_THRESH_TOZERO); // this is probably not necessary
            cvThreshold(_imgDMin[s][d], _imgDMin[s][d], 0.0, 0.0, CV_THRESH_TOZERO); 
            // scale to MAX_SALIENCE 
            // DPlus
            //calcGlobalMaximum(_imgDPlu[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDPlu[s][d], _imgDPlu[s][d], MAX_SALIENCE/max);
            // DMinus
            //calcGlobalMaximum(_imgDMin[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDMin[s][d], _imgDMin[s][d], MAX_SALIENCE/max);
        
            // old:
            // calc maximum, conspicuity factors and scale to MAX_SALIENCE
            // DPlus
            //calcGlobalMaximum(_imgDPlu[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDPlu[s][d], _imgDPlu[s][d], MAX_SALIENCE/max);
            //calcAdvancedConspicuityFactor(_imgDPlu[s][d], _sigs[s]*_sig2wav, maxX, maxY, _conspDPlu[s][d]); 
            //calcConspicuityFactor(_imgDPlu[s][d], _conspDPlu[s][d]); 
            // DMinus
            //calcGlobalMaximum(_imgDMin[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDMin[s][d], _imgDMin[s][d], MAX_SALIENCE/max);
            // calc conspicuity factors
            //calcAdvancedConspicuityFactor(_imgDMin[s][d], _sigs[s]*_sig2wav, maxX, maxY, _conspDMin[s][d]);
            //calcConspicuityFactor(_imgDMin[s][d], _conspDMin[s][d]);
        }
    }
    // center-surround competition to extract isolated peaks
    // M <- |M+M*DoG-Cinh|>0
    for (int s = 0; s < _numSizs; s++){   
        // gaussian filters for each size
        // cen = sigma
        // sur = 4*sigma
        for (int d = 0; d < _numDirs; d++){
            // copy to working imgDPlu/MinComp
            cvCopy(_imgDPlu[s][d], _imgDPluComp[s][d]);
            cvCopy(_imgDMin[s][d], _imgDMinComp[s][d]);
            for (int k = 0; k < 3; k++){
                // imgDPluComp
                // calculate M*DoG
                _gaussCompCen[s]->GaussFilt((float*)_imgDPluComp[s][d]->imageData, (float*)_imgTmpCen->imageData);
                _gaussCompSur[s]->GaussFilt((float*)_imgDPluComp[s][d]->imageData, (float*)_imgTmpSur->imageData);
                cvSub(_imgTmpCen, _imgTmpSur, _imgTmp); // calculate center surround (mexican hat) convolution
                // add M + M&DoG
                cvAdd(_imgDPluComp[s][d], _imgTmp, _imgDPluComp[s][d]);
                // subtract Cinh
                cvSub(_imgDPluComp[s][d], _imgCinh, _imgDPluComp[s][d]);
                // threshold to >0
                cvThreshold(_imgDPluComp[s][d], _imgDPluComp[s][d], 0.0, 0.0, CV_THRESH_TOZERO);
                // imgDMinComp
                // calculate M*DoG
                _gaussCompCen[s]->GaussFilt((float*)_imgDMinComp[s][d]->imageData, (float*)_imgTmpCen->imageData);
                _gaussCompSur[s]->GaussFilt((float*)_imgDMinComp[s][d]->imageData, (float*)_imgTmpSur->imageData);
                cvSub(_imgTmpCen, _imgTmpSur, _imgTmp); // calculate center surround (mexican hat) convolution
                //// add M + M&DoG
                cvAdd(_imgDMinComp[s][d], _imgTmp, _imgDMinComp[s][d]);
                //// subtract Cinh
                cvSub(_imgDMinComp[s][d], _imgCinh, _imgDMinComp[s][d]);
                //// threshold to >0
                cvThreshold(_imgDMinComp[s][d], _imgDMinComp[s][d], 0.0, 0.0, CV_THRESH_TOZERO);
            }
            // scale final _imgD*Comp's
            calcGlobalMaximum(_imgDPluComp[s][d], max, maxX, maxY);
            if (max > 0.0f)
                cvScale(_imgDPluComp[s][d], _imgDPluComp[s][d], MAX_SALIENCE/max);
            calcGlobalMaximum(_imgDMinComp[s][d], max, maxX, maxY);
            if (max > 0.0f)
                cvScale(_imgDMinComp[s][d], _imgDMinComp[s][d], MAX_SALIENCE/max);
        }
    }

    // summing up all maps (normalized)
    cvZero(_imgResult);
    float mapWeight = 1.0f/((float)(2*_numSizs*_numDirs));
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
            cvScaleAdd(_imgDPlu[s][d], cvScalar(mapWeight), _imgResult, _imgResult);
            cvScaleAdd(_imgDMin[s][d], cvScalar(mapWeight), _imgResult, _imgResult);
        }
    }

    // summing up all maps (normalized)
    /*cvZero(_imgResult);
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
            cvScaleAdd(_imgDPlu[s][d], cvScalar(_conspDPlu[s][d]), _imgResult, _imgResult);
            cvScaleAdd(_imgDMin[s][d], cvScalar(_conspDMin[s][d]), _imgResult, _imgResult);
        }
    }*/

    // resize/copy back to output image
    cvResize(_imgResult, (IplImage*)sal.getIplImage());
   
    // output debug/analysis of requested map [name, size, direction]
    if (_debug){
        IplImage ***ocvImgArrayDebug = _mapImgTable[_dbgImgArray];
        IplImage *ocvImgDebug = ocvImgArrayDebug[_dbgSizIndex][_dbgDirIndex];
        ImageOf<PixelFloat> &yrpImgDebug = _prtDebug.prepare();
        yrpImgDebug.resize(ocvImgDebug->width, ocvImgDebug->height);
        cvCopy(ocvImgDebug, (IplImage*)yrpImgDebug.getIplImage());
        _prtDebug.write();
    }
    
    // testing output
    //IplImage *test = _imgGImag;
    /*IplImage *test = _imgDCen[0][0];
    max = 0.0f;
    calcGlobalMaximum(test, max);
    cvScale(test, test, 255.0f/max);
    cvResize(test, (IplImage*)sal.getIplImage());*/
    _mutex.post();
    double tEnd = yarp::os::Time::now();
    if (_verbose)
        cout << "Update time: " << tEnd - tStart << " Processed image size: " << _ws << " x " << _hs << " original size: " << _w << " x " << _h << endl;
    // tmp
    //cvConvertScale(_result, (IplImage*)sal.getIplImage(), 1/_scale);
}
Exemple #19
0
bool faceTrackerModule::updateModule() {

    unsigned long AAtime=0, BBtime=0; //check processing time
    AAtime = cv::getTickCount(); //check processing time

    ImageOf<PixelRgb> *yarpImageLeft = imagePortLeft.read();
    //printf("Copying YARP image to an OpenCV/IPL image\n");

    if ( cvIplImageLeft == NULL )
    {
        cvIplImageLeft = cvCreateImage(cvSize(yarpImageLeft->width(),yarpImageLeft->height()),
                            IPL_DEPTH_8U,3);
    }
    cvCvtColor((IplImage*)yarpImageLeft->getIplImage(), cvIplImageLeft, CV_RGB2BGR);

    cv::Mat cvMatImageLeft(cvIplImageLeft);

    //vector< cv::Mat > vImg;
    //cv::Mat rImg;
    //vImg.push_back(cvMatImageLeft);

    if(yarpImageLeft!=NULL)	// check we actually got something
    {
        // resize images
        cv::resize(cvMatImageLeft, cvMatImageLeft, cv::Size(320, 240), 0,0,CV_INTER_NN);	//downsample 1/2x

        // convert captured frame to gray scale & equalize
        cv::Mat cvMatGrayImageLeft;
        cv::cvtColor(cvMatImageLeft, cvMatGrayImageLeft, CV_BGR2GRAY);
        cv::equalizeHist(cvMatGrayImageLeft, cvMatGrayImageLeft);

        // ==================================================================
        // face detection routine

        // a vector array to store the face found
        std::vector<cv::Rect> faces_left;;

        face_classifier_left.detectMultiScale(cvMatGrayImageLeft, faces_left,
            1.1, // increase search scale by 10% each pass
            3, 	 // merge groups of three detections
            CV_HAAR_DO_CANNY_PRUNING, //CV_HAAR_FIND_BIGGEST_OBJECT, //|CV_HAAR_SCALE_IMAGE, CV_HAAR_DO_CANNY_PRUNING
            cv::Size(30,30),
            cv::Size(50,50));

        int biggest_face_left_idx = 0;
        int biggest_face_left_size_buf = 0;

        // choosing the biggest face
        for(unsigned int i=0; i<faces_left.size(); i++)
        {
            cv::Point flb(faces_left[i].x + faces_left[i].width,
                          faces_left[i].y + faces_left[i].height);
            cv::Point ftr(faces_left[i].x, faces_left[i].y);

            cv::rectangle(cvMatImageLeft, flb, ftr, cv::Scalar(0,255,0), 3,4,0);

            if(biggest_face_left_size_buf < faces_left[i].height)
            {
                biggest_face_left_size_buf = faces_left[i].height;
                biggest_face_left_idx = i;
            }
        }

        //======================================================================================
        // Mode
        prev_encoders = cur_encoders;
        enc->getEncoders(cur_encoders.data());
//			cout << "Encoder: " << cur_encoders[0] << " " << cur_encoders[1] << " "<< cur_encoders[2] << '\n';

        ///////////////////////////
        // To set position mode
        if (mode == 0)
        {
            //-------------------------------------------------------------
            // Going to the set position mode
            if (setpos_counter < 100)
            {
                //cout << setpos_counter << endl;

                setpoints[0] = (0-cur_encoders[0])*0.3;	// common tilt of head
                setpoints[1] = (0-cur_encoders[1])*0.3;	// common roll of head
                setpoints[2] = (0-cur_encoders[2])*0.3;	// common pan of head
                setpoints[3] = (0-cur_encoders[3])*0.3;	// common tilt of eyes
                setpoints[4] = (0-cur_encoders[4])*0.3;	// common pan of eyes

                setpos_counter++;
            }
            else
            {
                printf("Going to the set position is DONE!\n");

                setpoints[0] = 0;
                setpoints[2] = 0;
                setpoints[3] = 0;
                setpoints[4] = 0;

                mode = 1;
                printf("Face searching mode!\n");
                setpos_counter = 0;
            }
            vel->velocityMove(setpoints.data());
        }
        ///////////////////////////
        // Panning mode
        else if (mode == 1)
        {
            if(faces_left.size() > 0)
            {
                mode = 2;
                printf("I find a face!\n");
                panning_counter++;
            }
            else
            {
                //-------------------------------------------------------------
                // panning mode
                //printf("Face searching mode!\n");
                //cout << panning_target << endl;

                setpoints[0] = (tilt_target-cur_encoders[0])*0.3;	// common tilt of head
                setpoints[1] = (0-cur_encoders[1])*0.3;	// common roll of head
                setpoints[2] = (pan_target-cur_encoders[2])*0.3;	// common pan of head
                setpoints[3] = (0-cur_encoders[3])*0.3;	// common tilt of eyes
                setpoints[4] = (0-cur_encoders[4])*0.3;	// common pan of eyes

                if ((abs(tilt_target - cur_encoders[0]) < 1) && (abs(pan_target - cur_encoders[2]) < 1))
                {
                    pan_r = ((double)rand() / ((double)(RAND_MAX)+(double)(1)));
                    tilt_r = ((double)rand() / ((double)(RAND_MAX)+(double)(1)));

                    pan_target = (int)((pan_r*pan_max)-(pan_max/2));
                    tilt_target = (int)((tilt_r*tilt_max)-(tilt_max/2));

                    //cout << pan_target << ", " << tilt_target << endl;
                }
            }

            vel->velocityMove(setpoints.data());
        }
        else if (mode == 2)
        {
            //-------------------------------------------------------------
            // face tracking mode
            if(faces_left.size() > 0)
            {
                double x = 320-(faces_left[biggest_face_left_idx].x + faces_left[biggest_face_left_idx].width/2);
                double y = 240-(faces_left[biggest_face_left_idx].y + faces_left[biggest_face_left_idx].height/2);

                //cout << "x:" << x << " y:" << y << '\n';

                x -= 320/2;
                y -= 240/2;

                double vx = x*0.3;	// Not to move too fast
                double vy = y*0.3;

                /* prepare command */
                for(int i=0;i<jnts;i++)
                {
                    setpoints[i] = 0;
                }

                setpoints[0] = vy;	// common tilt of head
                setpoints[2] = vx;	// common pan of head
                setpoints[3] = vy;	// common tilt of eyes
                setpoints[4] = -vx;	// common pan of eyes

                x_buf = x;
                y_buf = y;
                counter = 0;

            // stopping smoothly
            }
            else if (faces_left.size() == 0 && counter < 10)
            {
                //cout << counter << endl;
                double vx = x_buf*0.3;	// Not to move too fast
                double vy = y_buf*0.3;

                /* prepare command */
                for(int i=0;i<jnts;i++)
                {
                    setpoints[i] = 0;
                }

                setpoints[0] = vy;	// common tilt of head
                setpoints[2] = vx;	// common pan of head
                setpoints[3] = vy;	// common tilt of eyes
                setpoints[4] = -vx;	// common pan of eyes

                counter++;
            }
            else
            {
                printf("Hey! I don't see any face.\n");

                setpoints[0] = 0;
                setpoints[2] = 0;
                setpoints[3] = 0;
                setpoints[4] = 0;

		        Time::delay(0.3);

                stuck_counter++;

                mode = 2;

                if(stuck_counter == 10)
                {
                    if(panning_counter > 5)
                    {
                        mode = 0;
                        printf("To a set position!\n");
                        stuck_counter = 0;
                    }
                    else
                    {
                        mode = 1;
                        printf("Face searching mode!\n");
                        stuck_counter = 0;
                    }
                }
            }
            vel->velocityMove(setpoints.data());

        }

        cv::imshow("cvImage_Left", cvMatImageLeft);

    }

    BBtime = cv::getTickCount(); //check processing time

    cv::waitKey(1);

    return true;
}
void DispBlobberPort::onRead(ImageOf<PixelBgr> &input)
{
    mutex.wait();

    /* Get the envelope from the input image */

    Stamp stamp;
    BufferedPort<ImageOf<PixelBgr>  >::getEnvelope(stamp);

    /* Prepare output data structures */

    std::vector<int> centroid;
    std::vector<int> roi;
    cv::Mat blobMat;

    /* Prepare the buffer, call the extractor, clear the buffer */

    cv::Mat inputMat=cv::cvarrToMat((IplImage*)input.getIplImage());
    imagesMatBuffer.push_back(inputMat);

    double blobSize = blobExtractor->extractBlob(imagesMatBuffer, roi, centroid, blobMat);

    if (blobSize>0)
    {

        if (optOutPort.getOutputCount()>0)
        {
            yarp::sig::ImageOf<yarp::sig::PixelMono> &blobImage = optOutPort.prepare();
            blobImage.resize(blobMat.cols, blobMat.rows);

            cv::Mat blobImageMat=cv::cvarrToMat((IplImage*)blobImage.getIplImage());
            blobMat.copyTo(blobImageMat);

            optOutPort.setEnvelope(stamp);
            optOutPort.write();
        }

        int x = centroid[0];
        int y = centroid[1];

        int dx = ( (cropSize>0) ? cropSize : (roi[2]-roi[0]) );
        int dy = ( (cropSize>0) ? cropSize : (roi[3]-roi[1]) );

        int dx2 = dx>>1;
        int dy2 = dy>>1;

        int tlx = std::max(x-dx2,0);
        int tly = std::max(y-dy2,0);
        int brx = std::min(x+dx2,blobMat.cols-1);
        int bry = std::min(y+dy2,blobMat.rows-1);

        if (cropOutPort.getOutputCount()>0)
        {
            cv::Rect roiRegion = cv::Rect(cv::Point( tlx, tly ), cv::Point( brx, bry ));

            yarp::sig::ImageOf<yarp::sig::PixelBgr> &cropImage = cropOutPort.prepare();
            cropImage.resize(roiRegion.width, roiRegion.height);

            cv::Mat cropImageMat=cv::cvarrToMat((IplImage*)cropImage.getIplImage());
            imagesMatBuffer.back()(roiRegion).copyTo(cropImageMat);

            cropOutPort.setEnvelope(stamp);
            cropOutPort.write();
        }

        if (roiOutPort.getOutputCount()>0)
        {
            Bottle roisBottle;

            Bottle &roiBottle = roisBottle.addList();
            roiBottle.addInt(tlx);
            roiBottle.addInt(tly);
            roiBottle.addInt(brx);
            roiBottle.addInt(bry);

            roiOutPort.prepare() = roisBottle;

            roiOutPort.setEnvelope(stamp);
            roiOutPort.write();
        }

        if (blobsOutPort.getOutputCount()>0)
        {
            Bottle blobsBottle;

            Bottle &blobBottle = blobsBottle.addList();
            blobBottle.addInt(centroid[0]);
            blobBottle.addInt(centroid[1]);
            blobBottle.addInt((int)(blobSize+0.5f));

            blobsOutPort.prepare() = blobsBottle;

            blobsOutPort.setEnvelope(stamp);
            blobsOutPort.write();
        }

        if (roiOutPortRight.getOutputCount()>0)
        {
            Bottle cmd_sfm, reply_sfm;

            cmd_sfm.addInt(tlx);
            cmd_sfm.addInt(tly);
            sfmRpcPort.write(cmd_sfm,reply_sfm);

            Bottle roisBottle;
            Bottle &roiBottle = roisBottle.addList();

            if (reply_sfm.size()>0)
            {
                double tlX = reply_sfm.get(0).asDouble();
                double tlY = reply_sfm.get(1).asDouble();
                double tlZ = reply_sfm.get(2).asDouble();

                if (!(tlX==0 && tlY==0 && tlZ==0))
                {
                    int tlur = reply_sfm.get(3).asInt();
                    int tlvr = reply_sfm.get(4).asInt();

                    roiBottle.addInt(tlur);
                    roiBottle.addInt(tlvr);
                }
            }

            cmd_sfm.clear();
            reply_sfm.clear();

            cmd_sfm.addInt(brx);
            cmd_sfm.addInt(bry);
            sfmRpcPort.write(cmd_sfm,reply_sfm);

            if (reply_sfm.size()>0)
            {
                double brX = reply_sfm.get(0).asDouble();
                double brY = reply_sfm.get(1).asDouble();
                double brZ = reply_sfm.get(2).asDouble();

                if (!(brX==0 && brY==0 && brZ==0))
                {
                    int brur = reply_sfm.get(3).asInt();
                    int brvr = reply_sfm.get(4).asInt();

                    roiBottle.addInt(brur);
                    roiBottle.addInt(brvr);
                }
            }

            if (roiBottle.size()>0)
            {
                roiOutPortRight.prepare() = roisBottle;
                roiOutPortRight.setEnvelope(stamp);
                roiOutPortRight.write();
            }

        }

        if (blobsOutPortRight.getOutputCount()>0 || points3dOutPort.getOutputCount()>0)
        {
            Bottle cmd_sfm, reply_sfm;

            cmd_sfm.addInt(centroid[0]);
            cmd_sfm.addInt(centroid[1]);
            sfmRpcPort.write(cmd_sfm,reply_sfm);

            if (reply_sfm.size()>0)
            {
                double X = reply_sfm.get(0).asDouble();
                double Y = reply_sfm.get(1).asDouble();
                double Z = reply_sfm.get(2).asDouble();

                if (points3dOutPort.getOutputCount()>0)
                {
                    Bottle points3dBottle;
                    Bottle &point3dBottle = points3dBottle.addList();

                    point3dBottle.addDouble(X);
                    point3dBottle.addDouble(Y);
                    point3dBottle.addDouble(Z);

                    points3dOutPort.prepare() = points3dBottle;
                    points3dOutPort.setEnvelope(stamp);
                    points3dOutPort.write();
                }

                if (blobsOutPort.getOutputCount()>0)
                {
                    if (!(X==0.0 && Y==0.0 && Z==0.0))
                    {
                        int ur = reply_sfm.get(3).asInt();
                        int vr = reply_sfm.get(4).asInt();

                        Bottle blobsBottle;
                        Bottle &blobBottle = blobsBottle.addList();

                        blobBottle.addInt(ur);
                        blobBottle.addInt(vr);

                        blobsOutPortRight.prepare() = blobsBottle;
                        blobsOutPortRight.setEnvelope(stamp);
                        blobsOutPortRight.write();
                    }
                }
            }
        }

    }

    imagesMatBuffer.erase(imagesMatBuffer.begin());

    mutex.post();

}
int main()
{
    Network yarp; // set up yarp
    BufferedPort<ImageOf<PixelRgb> > imagePort;
    BufferedPort<Vector> targetPort;
    imagePort.open("/tutorial/image/in");
    targetPort.open("/tutorial/target/out");
    //Network::connect("/icubSim/cam/left","/tutorial/image/in");
    Network::connect("/icub/cam/left","/tutorial/image/in");


    IplImage* object_color;
    while (1)
    {   // repeat forever
        ImageOf<PixelRgb> *image = imagePort.read();
        if (image!=NULL)
        {
            printf("We got an image of size %dx%d\n", image->width(), image->height());


            IplImage *frame_temp = (IplImage*)image->getIplImage();

            IplImage *frames = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), frame_temp->depth, 3 );

            cvCvtColor(frame_temp,frames, CV_RGB2BGR);
            frames->origin=0;

            cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE);
            cvShowImage("cNameselect",frames);

            char c = cvWaitKey(33);
            if(c==27)
            {
                cvReleaseImage(&frames);
                cvReleaseImage(&frame_temp);
                object_color = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), frame_temp->depth, 3 );
                cvCopy(frames,object_color);
                break;
            }
            else
            {
                cvReleaseImage(&frame_temp);
                cvReleaseImage(&frames);
            }
        }
    }
    cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
    cvShowImage("cNameoriginal",object_color);


    while (1)
    {
        ImageOf<PixelRgb> *image = imagePort.read();
        if (image!=NULL)
        {
            printf("We got an image of size %dx%d\n", image->width(), image->height());

            IplImage *frame_temp = cvCreateImage( cvSize(image->width(),  image->height()), IPL_DEPTH_8U, 1 );

            frame_temp = (IplImage*)image->getIplImage();

            IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), IPL_DEPTH_8U, 1 );

            cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);



            //send values to the lookatlocation
            Vector& target = targetPort.prepare();
            target.resize(3);
            target[0] = xmean;
            target[1] = ymean;
            target[2] = 1;
            targetPort.write();

        }

    }
    return 0;
}
bool ARMarkerDetectorModule::updateModule()
{
    int i, j;

    ImageOf<PixelRgb> *yrpImgIn;   


    ImageOf<PixelRgb>* yrpCopyImgIn;   
    yrpImgIn = _imgPort.read();
    yrpCopyImgIn= _imgPort.read();

	if (yrpImgIn == NULL)   // this is the case if module is requested to quit while waiting for image
        return true;

	double auxtime, aux;

	auxtime = yarp::os::Time::now();

	aux = auxtime - timelastround;

	printf( "%g ms %g fps \n", aux*1000, 1/aux );

	timelastround = auxtime;



	IplImage *iplimg = (IplImage*)yrpImgIn->getIplImage();

	tracker.detectMarkers( iplimg );

	cvWaitKey( 5 );

	Bottle &b = _outPort.prepare();
	b.clear();
	bool detected = false;

	for(int cnt=0; cnt<tracker.nobjects(); cnt++)
	{
		if( tracker.isObjectDetected(cnt) )
		{    
				double T[3][4];
				tracker.getMatrix(cnt,T);
				char name[256];
				tracker.getObjectName(cnt, name);
				printf("%s\nT\n",name);
				b.addString( name );
				b.addString( "T");
				for(i = 0; i < 3; i++ )
				{
					b.addDouble( T[i][0]);
					b.addDouble( T[i][1]);
					b.addDouble( T[i][2]);
					b.addDouble( T[i][3]);
					char str[64];
					printf("%g %g %g %g\n",T[i][0],T[i][1],T[i][2],T[i][3]);
				}

				int cx, cy;
				tracker.getCenter( cnt, &cx, &cy);
				b.addString("imgcoord");
				b.addDouble( cx);
				b.addDouble( cy);
                               
                                
				b.addString("size");
				b.addDouble( tracker.getSize(cnt));
				b.addString("range");
				b.addDouble(sqrt(T[0][3]*T[0][3] + T[1][3]*T[1][3] + T[2][3]*T[2][3]));
				
				b.addString("imgcoordnorm");
				b.addDouble( 2.0 * cx / (double)iplimg->width  - 1.0 );
				b.addDouble( - (2.0 * cy / (double)iplimg->height - 1.0) );

				printf("img coord\n %d %d\t size %d\n", cx, cy,  tracker.getSize(cnt));
				printf("range: %f\n",sqrt(T[0][3]*T[0][3] + T[1][3]*T[1][3] + T[2][3]*T[2][3]));
				printf("imgcoordnorm\n %1.3g %1.3g\n\n",
						2.0 * cx / (double)iplimg->width  - 1.0 ,
						- (2.0 * cy / (double)iplimg->height - 1.0));
                                // edges
                                int x0,y0,x1,y1,x2,y2,x3,y3;
                                tracker.getEdge(cnt,0,&x0,&y0);
                                tracker.getEdge(cnt,1,&x1,&y1);
                                tracker.getEdge(cnt,2,&x2,&y2);
                                tracker.getEdge(cnt,3,&x3,&y3);
                                printf("Edges:\n %d %d - %d %d - %d %d - %d %d\n\n",x0,y0,x1,y1,x2,y2,x3,y3);
                                
                                // compute the simple colour using  chroma comparison - yields Red, green or Blue only!
                                
                                float chromaTable[3];   // in this program we simply normalise for the 3 colours
     
                                for (int i=0;i<3;i++) 
                                {
                                   chromaTable[i] = 0;
                                }
                                
                                int l = sqrt((double)tracker.getSize(cnt))/2.0;        // this gives a slightly wider area than the ART central image - allows the bounding ART square to be coloured   

                                int X1,Y1,X2,Y2;
                                
                                X1 = cx - l;
                                Y1 = cy - l;
                                X2   = cx + l;
                                Y2   = cy + l;
                                             
                                
                     //           printf("x1 y1: %d %d   x2 y2: %d %d\n",X1,Y1,X2,Y2);
                    //            printf("=====================================\n");                                
                                for (int x = X1; x < X2; x++) 
                                {
                                   for (int y = Y1; y < Y2;  y++) 
                                   {
                                      PixelRgb& pixel = yrpCopyImgIn->pixel(x,y);
                                      
                                    // printf("r: %d g: %d   b:  %d  x: %d  y: %d\n",pixel.r,pixel.g,pixel.b,x,y);
         
                                      chromaTable[0] += (float)pixel.r / ((float)pixel.r  + (float)pixel.g  + (float)pixel.b);
                                      chromaTable[1] += (float)pixel.g / ((float)pixel.r  + (float)pixel.g  + (float)pixel.b);
                                      chromaTable[2] += (float)pixel.b / ((float)pixel.r  + (float)pixel.g  + (float)pixel.b);                           

                                   }
                                }
                      //          for (int i=0; i < 3; i++)
                      //          {
                      //             printf("%f ",chromaTable[i]); 
                      //          }
                      //          printf( " \n"); 
                      //          printf("=====================================\n");  
                                if ((chromaTable[0] > chromaTable[1]) && (chromaTable[0] > chromaTable[2]))
                                {
                                   printf("CHROMA: Red\n");
                                   b.addString("red");
                                   b.addInt(4);    // the numbers represent the position of red in a 3 bit colour space
                                                   // in case we expand the selection later
                                }
      
                                if ((chromaTable[1] > chromaTable[0]) && (chromaTable[1] > chromaTable[2]))
                                {
                                   printf("CHROMA: Green\n");
                                   b.addString("green");
                                   b.addInt(2);    // the numbers represent the position of green in a 3 bit colour space
                                                   // in case we expand the selection later
                                }      
      
                                if ((chromaTable[2] > chromaTable[0]) && (chromaTable[2] > chromaTable[1]))
                                {
                                   printf("CHROMA: Blue\n");
                                   b.addString("blue");
                                   b.addInt(1);    // the numbers represent the position of blue in 3 bit colour space
                                                   // in case we expand the selection later
                                }  
			Bottle time;
			_imgPort.getEnvelope(time);
			b.addDouble(time.get(0).asDouble());                                       
                                
			detected = true;
		}
	}
	
	if (detected) {
		 
	}

	_outPort.write();

//        ImageOf<PixelRgb> *imageOutput;
        
//        imageOutput = yrpCopyImgIn;
        
//        imageOut.prepare() = *imageOutput;
      
//        imageOut.write();	
         
    return true;
}
int main() {
  Network yarp; // set up yarp
  BufferedPort<ImageOf<PixelRgb> > imagePort;  // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo.
  BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore

  imagePort.open("/tutorial/image/in");  // give the port a name
  targetPort.open("/tutorial/target/out");
  //Network::connect("/icubSim/cam/left","/tutorial/image/in");
  Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera

//training part

  while (1) { // repeat forever
    ImageOf<PixelRgb> *image = imagePort.read();  // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
    if (image!=NULL) { // check we actually got something
       printf("We got an image of size %dx%d\n", image->width(), image->height());

//pramod added nov 15       
IplImage *frame_temp = cvCreateImage( cvSize(image->width(),  image->height()), IPL_DEPTH_8U, 1 );

    frame_temp = (IplImage*)image->getIplImage();
		
		IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), IPL_DEPTH_8U, 1 );
	    	
  	  	cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);
        cvSaveImage("object.pgm",frame1);
 		//frame1->origin=0;
//	cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
//	cvShowImage("cNameoriginal",frame_temp);

	//cvSaveImage("save_first.jpg",image1);
		//cvReleaseImage(&image1);
	cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE);
	cvShowImage("cNameselect",frame1);
cvReleaseImage(&frame1);
  char c = cvWaitKey(33);
if(c==27) break;
}
}
printf("breaked");
		system("/home/gabriele/siftWin32.exe <object.pgm >tmp1.txt");
		IplImage *image1 = cvLoadImage("object.pgm");

		FILE* fp1=fopen("tmp1.txt","rb");
		float *arr1x; float *arr1y; int *arr2;float scale,ori;int temp1, temp2;

			fscanf(fp1,"%d %d \n",&temp1, &temp2);
			//printf("%d %d \n",temp1,temp2);
			//getchar();
			arr1x=(float*)malloc(sizeof(float)*temp1);
			arr1y=(float*)malloc(sizeof(float)*temp1);
			arr2=(int*)malloc(sizeof(int)*temp2*temp1);
			for(int i2=1;i2<=temp1;i2++)
			{
				//printf("temp1 %d \n",i2);
				fscanf(fp1,"%f %f %f %f \n", &arr1x[i2-1], &arr1y[i2-1], &scale,&ori);
				//printf("%f %f %f %f \n", arr1x[i2-1], arr1y[i2-1], scale,ori);
				//getchar();
				for(int i3=1;i3<=temp2;i3++)
				{
					fscanf(fp1,"%d ", &arr2[(i2-1)*temp2+(i3-1)]);
					//printf("%d ", arr2[(i2-1)*temp2+(i3-1)]);
					
				}fscanf(fp1,"\n");printf("\n");
				cvCircle(image1,cvPoint((int)arr1x[i2-1],(int)arr1y[i2-1]),3,cvScalar(0,255,255,255),1);
				//getchar();
			}


		cvNamedWindow("cNameobject",CV_WINDOW_AUTOSIZE);
	cvShowImage("cNameobject",image1);
		printf("first one finished \n");
		fclose(fp1);

//training part ends


  while (1) { // repeat forever
    ImageOf<PixelRgb> *image = imagePort.read();  // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
    if (image!=NULL) { // check we actually got something
       printf("We got an image of size %dx%d\n", image->width(), image->height());

//pramod added nov 15       
IplImage *frame_temp = cvCreateImage( cvSize(image->width(),  image->height()), IPL_DEPTH_8U, 1 );

    frame_temp = (IplImage*)image->getIplImage();
		
		IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), IPL_DEPTH_8U, 1 );
	    	
  	  	cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);
        cvSaveImage("object_test.pgm",frame1);
 		//frame1->origin=0;
	cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
	cvShowImage("cNameoriginal",frame_temp);

//    cvWaitKey(0);
//pramod added ends


		system("/home/gabriele/siftWin32.exe <object_test.pgm >tmp2.txt");
		IplImage *image2 = cvLoadImage("object_test.pgm");
		FILE* fp1new=fopen("tmp2.txt","rb");
		float *arr1xnew; float *arr1ynew; int *arr2new;float scalenew,orinew;int temp1new, temp2new;
		//while(fp1new!=NULL)
		{
			fscanf(fp1new,"%d %d \n",&temp1new, &temp2new);
			//printf("%d %d \n",temp1new,temp2new);
			//getchar();
			arr1xnew=(float*)malloc(sizeof(float)*temp1new);
			arr1ynew=(float*)malloc(sizeof(float)*temp1new);
			arr2new=(int*)malloc(sizeof(int)*temp2new*temp1new);
			for(int i2new=1;i2new<=temp1new;i2new++)
			{
				fscanf(fp1new,"%f %f %f %f \n", &arr1xnew[i2new-1], &arr1ynew[i2new-1], &scalenew,&orinew);
				//printf("%f %f %f %f \n", arr1xnew[i2new-1], arr1ynew[i2new-1], scalenew,orinew);
				//getchar();
				for(int i3new=1;i3new<=temp2new;i3new++)
				{
					fscanf(fp1new,"%d ", &arr2new[(i2new-1)*temp2new+(i3new-1)]);
					//printf("%d ", arr2new[(i2new-1)*temp2new+(i3new-1)]);
					
				}fscanf(fp1new,"\n");printf("\n");
				cvCircle(image2,cvPoint((int)arr1xnew[i2new-1],(int)arr1ynew[i2new-1]),3,cvScalar(0,255,255,255),1);
				//getchar();
				
			}

		}
	cvNamedWindow("cNametest",CV_WINDOW_AUTOSIZE);
	cvShowImage("cNametest",image2);
		//matching
		//IplImage *imagematch = cvLoadImage("blue2.pgm");
		float *bestmatchx = (float*)malloc(sizeof(float)*temp1);
		float *bestmatchy = (float*)malloc(sizeof(float)*temp1);
		for (int i4=1;i4<=temp1;i4++)
		{
			float bestval = 1000000.0;
			for(int i5=1;i5<=temp1new;i5++)
			{
				double value=0.0;
				for(int i6=1;i6<=temp2new;i6++)
				{
					double temp = arr2[(i4-1)*temp2+(i6-1)]-arr2new[(i5-1)*temp2new+(i6-1)];
					value+= temp*temp;
				}
				value = sqrt(value)/temp2new;
				if (value < bestval)
				{
					bestval=value;
					bestmatchx[i4-1]=arr1xnew[i5-1];
					bestmatchy[i4-1]=arr1ynew[i5-1];
				}
			}
			cvCircle(frame1,cvPoint((int)bestmatchx[i4-1],(int)bestmatchy[i4-1]),3,cvScalar(0,255,255,255),1);
		}
		double xmean = 0.0; double ymean = 0.0;
		for(int i7=1;i7<=temp1;i7++)
		{
			xmean += bestmatchx[i7-1];
			ymean += bestmatchy[i7-1];
		}
		xmean = xmean/temp1;
		ymean = ymean/temp1;
		printf("%lf %lf",xmean,ymean);
		//getchar();
		//cvSaveImage("savematch.jpg",imagematch);
		//cvReleaseImage(&imagematch);

    cvCircle(frame1,cvPoint((int)xmean,(int)ymean),10,cvScalar(0,255,255,0));
	cvNamedWindow("cNamematch",CV_WINDOW_AUTOSIZE);
	cvShowImage("cNamematch",frame1);
   char c = cvWaitKey(33);

//int xmean=0;int ymean=0;
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = xmean;
         target[1] = ymean;
         target[2] = 1;
         targetPort.write();

cvReleaseImage(&image2);
    }

  }
  cvReleaseImage(&image1);
  return 0;
}
void TRACKERManager::onRead(ImageOf<yarp::sig::PixelRgb> &img)
{
    ImageOf<PixelRgb> &outImg = imageOutPort.prepare();
    outImg = img;
    orig = (IplImage*)img.getIplImage();

    mutex.wait();

    std::map< unsigned int, ParticleThread* >::iterator itr;
    for (itr = workerThreads.begin(); itr!=workerThreads.end(); itr++)
    {
        workerThreads[itr->first]->update(orig);
    }
    mutex.post();

    if (flag)
    {
        if (!deleted)
        {
            yDebug() << "[TRACKERManager::onRead] will now delete TRACKER";
            stopTracker(toDel[0]);
            
            toDel.clear();
            
            deleted = true;
            yDebug() << "[TRACKERManager::onRead] will now delete TRACKER";
        }
    }

    Bottle &b = targetOutPort.prepare();
    b.clear();
    container.lock();

    for (TargetObjectRecord::iterator itr = container.targets.begin(); itr!=container.targets.end(); itr++)
    {
        TargetObject *obj = *itr;
        //send template and segmentation only once
        if (!obj->hasSent)
        {
            ImageOf<PixelBgr> &segImg = imageSegOutPort.prepare();
            segImg.resize( obj->seg->width, obj->seg->height );
            cvCopyImage(obj->seg, (IplImage*)segImg.getIplImage());
            imageSegOutPort.write();

            ImageOf<PixelBgr> &segCrop = imageTplOutPort.prepare();
            segCrop.resize(obj->tpl->width, obj->tpl->height);
            cvCopyImage(obj->tpl, (IplImage*)segCrop.getIplImage());
            imageTplOutPort.write();

            obj->hasSent = true;
        }
        if (obj->boundingBox.size())
        {
            bool send = true;
            for (int i = 0; i < pausedThreads.size(); i++  )
                if (obj->owner == pausedThreads[i])
                    send = false;
            
            if (send)
            {
                Bottle &t = b.addList();
                t.addDouble(obj->owner);
                for (int i=0; i<obj->boundingBox.size(); i++)
                    t.addDouble(obj->boundingBox[i]);

                t.addInt(obj->group);

                int x0 = obj->boundingBox[2];
                int y0 = obj->boundingBox[3];
                int x1 = obj->boundingBox[4];
                int y1 = obj->boundingBox[5];
                int cx = obj->boundingBox[0];
                int cy = obj->boundingBox[1];

                if (disParticles)
                {
                    for (int j = 0; j < obj->particlePoints.size(); j++ )
                        cvCircle ((IplImage*)outImg.getIplImage(), obj->particlePoints[j], 3,  obj->colour, 1);
                }

                cvRectangle( (IplImage*)outImg.getIplImage(), cvPoint( x0, y0 ), cvPoint( x1, y1 ), obj->colour, 1, 8, 0 );

                cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( cx, cy ), 3, CV_RGB(255, 0 , 0), 1 );
                cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( x0, y0 ), 3, CV_RGB(0, 255 , 0), 1 );
                cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( x1, y1 ), 3, CV_RGB(0, 255 , 0), 1 );

                int sampleCount = obj->particlePoints.size();
                int dimensions = 2;
                int clusterCount = 2;
                cv::Mat points(sampleCount, dimensions, CV_32F);
                cv::Mat labels;
                cv::Mat centers(clusterCount, dimensions, points.type());
                for(int i = 0; i<sampleCount;i++)
                {
                    points.at<float>(i,0) = obj->particlePoints[i].x;
                    points.at<float>(i,1) = obj->particlePoints[i].y;
                }

    #ifdef KMEANS_WITH_POINTER

                cv::kmeans(points, clusterCount, labels, cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 1, 10.0), 3, cv::KMEANS_PP_CENTERS, &centers);
    #else
                cv::kmeans(points, clusterCount, labels, cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 1, 10.0), 3, cv::KMEANS_PP_CENTERS, centers);
    #endif
                cv::Point pts[3];

                for (int i = 0; i < clusterCount; i++)
                {
                    cv::Point ipt;
                    ipt.x = (int) centers.at<float>(i,0);
                    ipt.y = (int) centers.at<float>(i,1);

                    pts[i] = ipt;

                    //cvCircle ((IplImage*)outImg.getIplImage(), ipt, 5,  CV_RGB(0, 0 , 255), CV_FILLED, CV_AA);//obj->colour,  CV_FILLED, CV_AA);
                }
                int limits_x = abs(pts[0].x - pts[1].x );
                int limits_y = abs(pts[0].y - pts[1].y );

                int mutationThres = 30;

                if ( limits_x > mutationThres || limits_y > mutationThres)
                {
                    if (!flag)
                    {
                        cvCircle ((IplImage*)outImg.getIplImage(), pts[0], 5,  CV_RGB(255, 0 , 255), CV_FILLED, CV_AA);
                        cvCircle ((IplImage*)outImg.getIplImage(), pts[1], 5,  CV_RGB(255, 0 , 255), CV_FILLED, CV_AA);
                        //cvSaveImage("output.png", (IplImage*)outImg.getIplImage());
                        //cloneTracker(obj, pts);
                    }
                }
            }
        }
    }
    
    container.unlock();
    if (b.size())
        targetOutPort.write();

    imageOutPort.write();
}
Exemple #25
0
void CalibModule::onRead(ImageOf<PixelMono> &imgIn)
{
    mutex.lock();

    Vector kinPoint,o;
    iarm->getPose(kinPoint,o);

    Vector c,tipl(2,0.0),tipr(2,0.0);
    igaze->get2DPixel(0,kinPoint,c);

    ImageOf<PixelBgr> imgOut;
    cv::Rect rect=extractFingerTip(imgIn,imgOut,c,tipl);
    
    bool holdImg=false;
    if (motorExplorationState==motorExplorationStateLog)
    {
        cv::Scalar color=cv::Scalar(0,0,255);
        string tag="discarded";

        if (enabled && (depthRpcPort.getOutputCount()>0))
        {
            Vector depthPoint;
            if (getDepthAveraged(tipl,depthPoint,tipr))
            {
                // consistency check
                double dist=norm(depthPoint-kinPoint);
                if (dist<max_dist)
                {
                    color=cv::Scalar(0,255,0);
                    tag="matched";

                    if (exp_depth2kin)
                    {
                        calibrator->addPoints(depthPoint,kinPoint);
                        yInfo("collecting calibration points: p_depth=(%s); p_kin=(%s);",
                              depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str());

                        tag="acquired";
                    }

                    if (exp_aligneyes)
                    {
                        Vector kinPoint4=kinPoint;
                        kinPoint4.push_back(1.0);

                        Vector xe,oe,kinPoint_e;
                        Matrix He;

                        igaze->getLeftEyePose(xe,oe);
                        He=axis2dcm(oe);
                        xe.push_back(1.0);
                        He.setCol(3,xe);
                        kinPoint_e=SE3inv(He)*kinPoint4;

                        aligner.addPoints(tipl,kinPoint_e);
                        yInfo("collecting points for aligning eye: tip=(%s); p_kin=(%s);",
                              tipl.toString(3,3).c_str(),kinPoint_e.toString(3,3).c_str());

                        tag="acquired";
                    }
                }
                else
                    yInfo("discarding calibration points: p_depth=(%s); p_kin=(%s); (|p_depth-p_kin|=%g)>%g",
                          depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str(),dist,max_dist);
            }
            else
            {
                yInfo("unavailable depth; discarding...");
                tag="no-depth";
            }
        }

        cv::Mat img=cv::cvarrToMat((IplImage*)imgOut.getIplImage());
        cv::putText(img,tag.c_str(),cv::Point(rect.x+5,rect.y+15),CV_FONT_HERSHEY_SIMPLEX,0.5,color);

        motorExplorationState=motorExplorationStateTrigger;
        holdImg=true;
    }
    
    if (depthOutPort.getOutputCount()>0)
    {
        depthOutPort.prepare()=imgOut;
        depthOutPort.write();
        if (holdImg)
            Time::delay(0.5);
    }

    mutex.unlock();
}
int main() {
  Network yarp; // set up yarp
  BufferedPort<ImageOf<PixelRgb> > imagePort;  // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo.
  BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore

  imagePort.open("/tutorial/image/in");  // give the port a name
  targetPort.open("/tutorial/target/out");
  //Network::connect("/icubSim/cam/left","/tutorial/image/in");
  Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera
int cnt=0;bool temp;
  while (1) { // repeat forever
    ImageOf<PixelRgb> *image = imagePort.read();  // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
    if (image!=NULL) { // check we actually got something
       printf("We got an image of size %dx%d\n", image->width(), image->height());
		IplImage *frame_temp = (IplImage*)image->getIplImage();
		
		IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), frame_temp->depth, 3 );
	    	
  	  	cvCvtColor(frame_temp,frame1, CV_RGB2BGR);
 		frame1->origin=0;

	cvNamedWindow("cName",CV_WINDOW_AUTOSIZE);
	cvShowImage("cName",frame1);
    cvWaitKey(0);
       
       double xMean = 0;
       double yMean = 0;
       int ct = 0;
       for (int x=0; x<frame1->width; x++) {
         for (int y=0; y<frame1->height; y++) {
PixelRgb& pixel;
for (int z=0;z<3;z++){

         pixel->z= (unsigned char) (frame1->imageData + frame1->widthStep * y + frame1->nChannels*x  + z);}
          //PixelRgb& pixel = image->pixel(x,y); //definisco la variabile pixel (di tipo reference) un'istanza della classe PixelRgb.  Allo stesso modo di una variabile puntatore, il tipo reference fa riferimento alla locazione di memoria di un'altra variabile (in questo caso la variabile image che punta al pixel(x,y), ma come una comune variabile, non richiede nessun operatore specifico di deindirizzamento. 
           // very simple test for blueishness
           // make sure blue level exceeds red and green by a factor of 2
           if (pixel.b>pixel.g){ //&& pixel.b>pixel.g){//*1.2+10) {
            // there's a blueish pixel at (x,y)!
            // let's find the average location of these pixels
            xMean += x;
            yMean += y;
            ct++;
           }
         }
       }
       if (ct>0) {
         xMean /= ct;
         yMean /= ct; 
       }
    printf("current value of ct %d \n",ct);
       if (ct>(frame1->width/20)*(frame1->height/20)) 
       {
         printf("Best guess at blue target: %d %d \n", cnt, cnt); //%g %g\n",xMean, yMean);
         Vector& target = targetPort.prepare();
         target.resize(3);

         if(cnt>25)
temp =false;

else if(cnt<0)
temp =true;
if (temp ==true)
cnt++;
else if (temp==false)
cnt--; 
       // { cnt =100;}         
  //         target[0] = xMean;
   //      target[1] = yMean;
          target[0] = cnt;
         target[1] = cnt;
         target[2] = 1;
         targetPort.write();
       } else {
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = 0;
         target[1] = 0;
         target[2] = 0;
         targetPort.write();
       }
    }
  }
  
  return 0;
}
void SCSPMClassifier::classify(Bottle *blobs, Bottle &reply)
{
    if(blobs==NULL)
    {
        reply.addList();
        return;
    }

    if(blobs->size()==0)
    {
        reply.addList();
        return;
    }

    if(imgInput.getInputCount()==0 || imgSIFTInput.getInputCount()==0 || featureInput.getInputCount()==0 || scoresInput.getInputCount()==0 || rpcClassifier.getOutputCount()==0)
    {
        reply.addList();
        return;
    }
    //Read Object Classes
    Bottle cmdObjClass;
    cmdObjClass.addString("objList");
    Bottle objList;
    printf("Sending training request: %s\n",cmdObjClass.toString().c_str());
    rpcClassifier.write(cmdObjClass,objList);
    printf("Received reply: %s\n",objList.toString().c_str());

    if(objList.size()<=1)
    {
        for(int b=0; b<blobs->size(); b++)
        {
             Bottle &blob_scorelist=reply.addList();
             // name of the blob
             blob_scorelist.addString(blobs->get(b).asList()->get(0).asString().c_str());
             blob_scorelist.addList();
        }
        return;
    }


    // Start Recognition mode
    Bottle cmdClass;
    cmdClass.addString("recognize");

    Bottle classReply;
    //printf("Sending training request: %s\n",cmdClass.toString().c_str());
    rpcClassifier.write(cmdClass,classReply);
    //printf("Received reply: %s\n",classReply.toString().c_str());

    // Read Image
    //printf("Reading Image: \n");
    ImageOf<PixelRgb> *image = imgInput.read(true);
    //printf("Image Read \n");
    if(image==NULL)
        return;
    IplImage* imgC= (IplImage*) image->getIplImage();

   //printf("Valid Image \n");

    double t2=Time::now();

    // Classify each blob
    printf("Start Classification \n");
    for(int b=0; b<blobs->size(); b++)
    {
        // list of the scores
        Bottle &blob_scorelist=reply.addList();
        // name of the blob
        blob_scorelist.addString(blobs->get(b).asList()->get(0).asString().c_str());

        //list of scores
        Bottle &scores=blob_scorelist.addList();

        //retrieve bounding box
        Bottle* bb=blobs->get(b).asList()->get(1).asList();
        int x_min=(int) bb->get(0).asDouble();
        int y_min=(int) bb->get(1).asDouble();
        int x_max=(int) bb->get(2).asDouble();
        int y_max=(int) bb->get(3).asDouble();

        
    if(x_min>5)
       x_min=x_min-5;

    if(y_min>5)
       y_min=y_min-5;

    if((x_max+5)<imgC->height)
       x_max=x_max+5;

    if((y_max+5)<imgC->width)
       y_max=y_max+5;

        //Crop Image
        cvSetImageROI(imgC,cvRect(x_min,y_min,x_max-x_min,y_max-y_min));
        IplImage* croppedImg=cvCreateImage(cvGetSize(imgC), imgC->depth , imgC->nChannels);

        cvCopy(imgC, croppedImg);
            
        cvResetImageROI(imgC);

        double t=Time::now();
        //Send Image to SC
        //printf("Sending Image to SC: \n");

        ImageOf<PixelBgr> *image = imgSIFTInput.read(false);
        while(image!=NULL)
            image = imgSIFTInput.read(false);

        ImageOf<PixelBgr>& outim=imgOutput.prepare();
        outim.wrapIplImage(croppedImg);
        imgOutput.write();
        

        //Read Coded Feature
        //printf("Reading Feature: \n");
        Bottle fea;
        featureInput.read(fea);

        image = imgSIFTInput.read(true);
        if(image!=NULL)
        {
            IplImage * imgBlob= (IplImage*) image->getIplImage();
            cvSetImageROI(imgC,cvRect(x_min,y_min,x_max-x_min,y_max-y_min));
            cvCopy(imgBlob,imgC);
             
        }
        cvResetImageROI(imgC);
        cvReleaseImage(&croppedImg);

        t=Time::now()-t;
        //fprintf(stdout, "Coding Time: %g \n", t);


        if(fea.size()==0)
            return;

        //Send Feature to Classifier
        //printf("Sending Feature to Classifier: \n");
         featureOutput.write(fea);

         // Read scores
        //printf("Reading Scores: \n");
         Bottle Class_scores;
         scoresInput.read(Class_scores);

        printf("Scores received: ");


         if(Class_scores.size()==0)
             return;

         // Fill the list of the b-th blob
         for (int i=0; i<objList.size()-1; i++)
         {
             Bottle *obj=Class_scores.get(i).asList();
             if(obj->get(0).asString()=="background")
                continue;
             
             Bottle &currObj_score=scores.addList();
             currObj_score.addString(obj->get(0).asString());
             double normalizedVal=((obj->get(1).asDouble())+1)/2;
             currObj_score.addDouble(normalizedVal);
             printf("%s %f ",obj->get(0).asString().c_str(),normalizedVal);
         }
    printf("\n");

    }

    if(imgSIFTOutput.getOutputCount()>0)
    {
        ImageOf<PixelRgb>& outim=imgSIFTOutput.prepare();
        outim.wrapIplImage(imgC);
        imgSIFTOutput.write();
    }

    t2=Time::now()-t2;
    //printf("%s \n",reply.toString().c_str());
    //fprintf(stdout, "All Time: %g \n", t2);
}
bool EdisonSegmModule::updateModule()
{
    ImageOf<PixelRgb> *yrpImgIn;
    static int cycles = 0;

    yrpImgIn = _imgPort.read();
    if (yrpImgIn == NULL)   // this is the case if module is requested to quit while waiting for image
        return true;

    bool use_private_stamp;
    Stamp s;
    if(!_imgPort.getEnvelope(s))
    {
            cout << "No stamp found in input image. Will use private stamp" << endl;
            use_private_stamp = true;
    }
    else
    {
            cout << "Received image #" << s.getCount() << " generated at time " << s.getTime() << endl;
            use_private_stamp = false;
    }

    if(cycles == 0)
            _timestart = yarp::os::Time::now();
    cycles++;

    IplImage *iplimg = (IplImage*)yrpImgIn->getIplImage();

    //computing the ROI to crop the image
	/*struct _IplROI roi;
	roi.coi = 0; // all channels are selected
	roi.height = height_;
	roi.width = width_;
	roi.xOffset = ( orig_width_ - width_ ) / 2;
	roi.yOffset = ( orig_height_ - height_ ) / 2;*/
	
	//copying roi data to buffer
	/*iplimg->roi = &roi;
	cvCopy( iplimg, inputImage.getIplImage());*/

    //Rescale image if required
    if( (width_ != orig_width_) || (height_ != orig_height_ ) )
            cvResize(iplimg, inputImage.getIplImage(), CV_INTER_NN);
    else
            cvCopy( iplimg, inputImage.getIplImage());

    double edgetime = yarp::os::Time::now();
    //compute gradient and confidence maps
    BgEdgeDetect edgeDetector(gradWindRad);
    BgImage bgImage;
    bgImage.SetImage(inputImage_, width_, height_, true);
    edgeDetector.ComputeEdgeInfo(&bgImage, confMap_, gradMap_);
    //compute the weigth map
    for(int i = 0; i < width_*height_; i++) {
      if(gradMap_[i] > 0.02) {
		weightMap_[i] = mixture*gradMap_[i] + (1 - mixture)*confMap_[i];
      } else {
		weightMap_[i] = 0;
      }
    }
	///////////////////////////// This block can be parallelized
	cout << "Edge computation Time (ms): " << (yarp::os::Time::now() - edgetime)*1000.0 << endl;

	msImageProcessor iProc;
	if( dim_ == 3 )
		iProc.DefineImage(inputImage_, COLOR, height_, width_);
	else
	{	
		cvCvtColor(inputImage.getIplImage(), inputHsv.getIplImage(), CV_RGB2HSV);
		cvSplit(inputHsv.getIplImage(), inputHue.getIplImage(), 0, 0, 0);
		iProc.DefineImage(inputHue_, GRAYSCALE, height_, width_);
	}
	if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}
	iProc.SetWeightMap(weightMap_, threshold);
	if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}


	double filtertime = yarp::os::Time::now();
	iProc.Filter(sigmaS, sigmaR, speedup);
        if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}
	cout << "Mean Shift Filter Computation Time (ms): " << (yarp::os::Time::now() - filtertime)*1000.0 << endl;


    //obtain the filtered image
    iProc.GetResults(filtImage_);
	if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}
    
    //fuse regions
	double fusetime = yarp::os::Time::now();
    iProc.FuseRegions(sigmaR, minRegion);
    if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}
	cout << "Region Fusion Computation Time (ms): " << (yarp::os::Time::now() - fusetime)*1000.0 << endl;

    //obtain the segmented image
    iProc.GetResults(segmImage_);
    if(iProc.ErrorStatus) {
		cout << "MeanShift Error" << endl;
		return false;
	}

	//define the boundaries - do not need this
	/*
	RegionList *regionList        = iProc.GetBoundaries();
	int        *regionIndeces     = regionList->GetRegionIndeces(0);
	int        numRegions         = regionList->GetNumRegions();
	numBoundaries_ = 0;
	for(int i = 0; i < numRegions; i++) {
		numBoundaries_ += regionList->GetRegionCount(i);
	}
	if(boundaries_) delete [] boundaries_;
	boundaries_ = new int [numBoundaries_];
	for(int i = 0; i < numBoundaries_; i++) {
		boundaries_[i] = regionIndeces[i];
	}*/
		
	int regionCount; // how many regions have been found
	int *labels = NULL; //pointer for the labels (should this be released in the end?) 
	float *modes; //pointer for the Luv values (should this be released in the end?) 
	int *modePointCounts; //the area of each region (should this be released in the end?) 

	regionCount = iProc.GetRegions(&labels, &modes, &modePointCounts);
	int *labelp = (int*)labelImage.getRawImage();
	for(int i = 0; i < width_*height_; i++)
		labelp[i] = labels[i];
	
	IplImage *labelint = (IplImage*)labelImage.getIplImage();
	IplImage *labelchar = (IplImage*)labelView.getIplImage();

	cvConvert(labelint, labelchar);

	//prepare timestamps
	if(use_private_stamp)
	{
		_stamp.update();
		_labelPort.setEnvelope(_stamp);
		_labelViewPort.setEnvelope(_stamp);
		_viewPort.setEnvelope(_stamp);
		_filtPort.setEnvelope(_stamp);
		_rawPort.setEnvelope(_stamp);
	}
	else
	{
		_labelPort.setEnvelope(s);
		_labelViewPort.setEnvelope(s);
		_viewPort.setEnvelope(s);
		_filtPort.setEnvelope(s);
		_rawPort.setEnvelope(s);
	}

	ImageOf<PixelInt> &yrpImgLabel = _labelPort.prepare();
	//Rescale image if required
	if( (width_ != orig_width_) || (height_ != orig_height_ ) )
	{
		yrpImgLabel.resize(orig_width_, orig_height_);
		cvResize(labelImage.getIplImage(), yrpImgLabel.getIplImage(), CV_INTER_NN);
	}
	else
		yrpImgLabel = labelImage;
	_labelPort.write();

	ImageOf<PixelMono> &yrpImgDebug = _labelViewPort.prepare();
	//Rescale image if required
	if( (width_ != orig_width_) || (height_ != orig_height_ ) )
	{
		yrpImgDebug.resize(orig_width_, orig_height_);
		cvResize(labelView.getIplImage(), yrpImgDebug.getIplImage(), CV_INTER_NN);
	}
	else
		yrpImgDebug = labelView;
	_labelViewPort.write();


	ImageOf<PixelRgb> &yrpFiltOut = _filtPort.prepare();
	//Rescale image if required
	if( (width_ != orig_width_) || (height_ != orig_height_ ) )
	{
		yrpFiltOut.resize(orig_width_, orig_height_);
		cvResize(filtImage.getIplImage(), yrpFiltOut.getIplImage(), CV_INTER_NN);
	}
	else
		yrpFiltOut = filtImage;
	_filtPort.write();

	ImageOf<PixelRgb> &yrpImgView = _viewPort.prepare();
	//Rescale image if required
	if( (width_ != orig_width_) || (height_ != orig_height_ ) )
	{
		yrpImgView.resize(orig_width_, orig_height_);
		cvResize(segmImage.getIplImage(), yrpImgView.getIplImage(), CV_INTER_NN);
	}
	else
		yrpImgView = segmImage;
	_viewPort.write();

	ImageOf<PixelRgb> &yrpImgOut = _rawPort.prepare();
	yrpImgOut = *yrpImgIn;
	_rawPort.write();


	//report the frame rate
	if(cycles % 100 == 0)
	{
		double cps = ((double)cycles)/(yarp::os::Time::now() - _timestart);
		printf("fps: %02.2f\n", cps);
	}
    return true;
}
Exemple #29
0
    bool updateModule()
    {
        ImageOf<PixelBgr> *img=imgInPort.read();
        if (img==NULL)
            return true;

        mutex.lock();

        ImageOf<PixelMono> imgMono;
        imgMono.resize(img->width(),img->height());

        cv::Mat imgMat=cv::cvarrToMat((IplImage*)img->getIplImage());
        cv::Mat imgMonoMat=cv::cvarrToMat((IplImage*)imgMono.getIplImage());
        cv::cvtColor(imgMat,imgMonoMat,CV_BGR2GRAY);

        if (initBoundingBox)
        {
            tracker->initialise(imgMonoMat,tl,br);
            initBoundingBox=false;
        }

        if (doCMT)
        {
            if (tracker->processFrame(imgMonoMat))
            {
                if (dataOutPort.getOutputCount()>0)
                {
                    Bottle &data=dataOutPort.prepare();
                    data.clear();

                    data.addInt((int)tracker->topLeft.x);
                    data.addInt((int)tracker->topLeft.y);
                    data.addInt((int)tracker->topRight.x);
                    data.addInt((int)tracker->topRight.y);
                    data.addInt((int)tracker->bottomRight.x);
                    data.addInt((int)tracker->bottomRight.y);
                    data.addInt((int)tracker->bottomLeft.x);
                    data.addInt((int)tracker->bottomLeft.y);

                    dataOutPort.write();
                }

                if (imgOutPort.getOutputCount()>0)
                {
                    cv::line(imgMat,tracker->topLeft,tracker->topRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->topRight,tracker->bottomRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomRight,tracker->bottomLeft,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomLeft,tracker->topLeft,cv::Scalar(255,0,0),2);

                    for (size_t i=0; i<tracker->trackedKeypoints.size(); i++) 
                        cv::circle(imgMat,tracker->trackedKeypoints[i].first.pt,3,cv::Scalar(0,255,0));
                }
            }            
        }

        if (imgOutPort.getOutputCount()>0)
        {
            imgOutPort.prepare()=*img;
            imgOutPort.write();
        }

        mutex.unlock();
        return true;
    }
int main() {
  Network yarp; // set up yarp
  BufferedPort<ImageOf<PixelRgb> > imagePort;  // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo.
  BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore

  imagePort.open("/tutorial/image/in");  // give the port a name
  targetPort.open("/tutorial/target/out");
  //Network::connect("/icubSim/cam/left","/tutorial/image/in");
  Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera

  while (1) { // repeat forever
    ImageOf<PixelRgb> *image = imagePort.read();  // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
    if (image!=NULL) { // check we actually got something
       printf("We got an image of size %dx%d\n", image->width(), image->height());

//pramod added nov 15       
    IplImage *frame_temp = (IplImage*)image->getIplImage();
		
		IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width,  frame_temp->height), frame_temp->depth, 3 );
	    	
  	  	cvCvtColor(frame_temp,frame1, CV_RGB2BGR);
 		frame1->origin=0;

	cvNamedWindow("cName",CV_WINDOW_AUTOSIZE);
	cvShowImage("cName",frame1);
 //   cvWaitKey(0);
//pramod added ends

int nl= frame1->height; // number of lines
        int nc= frame1->width * frame1->nChannels; // total number of element per line
          int step= frame1->widthStep; // effective width

      // get the pointer to the image buffer
      unsigned char *data= reinterpret_cast<unsigned char*>(frame1->imageData);
      int ct=0;double xmean=0.0;double ymean=0.0;
      for (int i=1; i<nl; i++) {
            for (int j=0; j<nc; j+= frame1->nChannels) {
            // finding blue pixels ---------------------
//printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]);

    if((int)data[j]>(int)data[j+1]&&(int)data[j]>(int)data[j+2])
                {
                   // printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]);
                    //getchar();
                    ct++;
                    xmean += (double)j/3.0;
                    ymean += (double)i;
                   // data[j]=data[j+1]=data[j+2]=0;

data[j]=255;
                }
            // end of pixel processing ----------------
            } // end of line
            data+= step;  // next line
      }
	cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE);
	cvShowImage("cName1",frame1);
    char c = cvWaitKey(33);
		if(c==27) break;
      if (ct>0) {
         xmean /= (double)ct;
         ymean /= (double)ct;
       }
    printf("current value of ct %d \n",ct);
       if (ct>(frame1->width/20)*(frame1->height/20))
       {
         printf("Best guess at blue target: %f %f \n", xmean, ymean);
    cvCircle(frame1,cvPoint((int)xmean,(int)ymean),5,cvScalar(0,0,255,0));
	cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE);
	cvShowImage("cName1",frame1);
   char c = cvWaitKey(33);
		if(c==27) break;
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = xmean;
         target[1] = ymean;
         target[2] = 1;
         targetPort.write();
       } else {
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = 0;
         target[1] = 0;
         target[2] = 0;
         targetPort.write();
       }

/*     double xMean = 0;
       double yMean = 0;
       int ct = 0;
       for (int x=0; x<image->width(); x++) {
         for (int y=0; y<image->height(); y++) {
           PixelRgb& pixel = image->pixel(x,y); //definisco la variabile pixel (di tipo reference) un'istanza della classe PixelRgb.  Allo stesso modo di una variabile puntatore, il tipo reference fa riferimento alla locazione di memoria di un'altra variabile (in questo caso la variabile image che punta al pixel(x,y), ma come una comune variabile, non richiede nessun operatore specifico di deindirizzamento. 
           // very simple test for blueishness
           // make sure blue level exceeds red and green by a factor of 2
           if (pixel.b>pixel.g){ //&& pixel.b>pixel.g){//*1.2+10) {
            // there's a blueish pixel at (x,y)!
            // let's find the average location of these pixels
            xMean += x;
            yMean += y;
            ct++;
           }
         }
       }
       if (ct>0) {
         xMean /= ct;
         yMean /= ct;
       }
    printf("current value of ct %d \n",ct);
       if (ct>(image->width()/20)*(image->height()/20)) {
         printf("Best guess at blue target: %g %g\n", xMean, yMean);
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = xMean;
         target[1] = yMean;
         target[2] = 1;
         targetPort.write();
       } else {
         Vector& target = targetPort.prepare();
         target.resize(3);
         target[0] = 0;
         target[1] = 0;
         target[2] = 0;
         targetPort.write();
       }*/
    }
  }
  
  return 0;
}