예제 #1
0
파일: ImageFile.cpp 프로젝트: BRKMYR/yarp
static bool ImageReadRGB(ImageOf<PixelRgb> &img, const char *filename)
{
    int width, height, color, num;
    FILE *fp=0;
    fp = fopen(filename, "rb");

    if(fp==0)
        {
            fprintf(stderr, "Error opening %s, check if file exists.\n", filename);
            return false;
        }

    if (!ReadHeader(fp, &height, &width, &color))
        {
            fclose (fp);
            fprintf(stderr, "Error reading header, is file a valid ppm/pgm?\n");
            return false;
        }

    if (!color)
        {
            ImageOf<PixelMono> tmp;
            tmp.resize(width,height);

            const int w = tmp.width() * tmp.getPixelSize();
            const int h = tmp.height();
            const int pad = tmp.getRowSize();
            unsigned char *dst = tmp.getRawImage ();

            num = 0;
            for (int i = 0; i < h; i++)
                {
                    num += (int)fread((void *) dst, 1, (size_t) w, fp);
                    dst += pad;
                }
            fclose(fp);
            img.copy(tmp);
            return true;
        }

    img.resize(width,height);

    const int w = img.width() * img.getPixelSize();
    const int h = img.height();
    const int pad = img.getRowSize();
    unsigned char *dst = img.getRawImage ();

    num = 0;
    for (int i = 0; i < h; i++)
        {
            num += (int)fread((void *) dst, 1, (size_t) w, fp);
            dst += pad;
        }

    fclose(fp);

    return true;
}
예제 #2
0
void merge(const ImageOf<PixelRgb> &imgR, const ImageOf<PixelRgb> &imgL, ImageOf<PixelRgb> &out)
{
    if (out.width()!=imgR.width()/scale)
        out.resize(imgR.width()/scale, imgR.height()/scale);

    int rr=0;
    for(int r=0; r<out.height(); r++)
    {
        const unsigned char *tmpR=imgR.getRow(rr);
        const unsigned char *tmpL=imgL.getRow(rr);
        unsigned char *tmpO=out.getRow(r);

        for(int c=0; c<out.width(); c++)
        {
            tmpO[0]=(unsigned char) (1/3.0*(tmpL[0]+tmpL[1]+tmpL[2]));
            tmpO[1]=(unsigned char) (1/3.0*(tmpR[0]+tmpR[1]+tmpR[2]));

            tmpO+=3;
            tmpL+=(3*scale);
            tmpR+=(3*scale);
        }

        rr+=scale;
    }
}
예제 #3
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testScale() {
     report(0,"checking scaling...");
     ImageOf<PixelRgb> img;
     ImageOf<PixelMono> img2;
     ImageOf<PixelRgb> img3;
     img.resize(64,64);
     img.zero();
     for (int i=0; i<img.width()/2; i++) {
         for (int j=0; j<img.height()/2; j++) {
             img(i,j).r = 255;
             img(i,j).g = 255;
             img(i,j).b = 255;
         }
     }
     img2.copy(img,32,32);
     checkEqual(img2.width(),32,"dimension check");
     checkEqual(img2(0,0),255,"logic check");
     checkEqual(img2(img2.width()-2,0),0,"logic check");
     checkEqual(img2(0,img2.height()-2),0,"logic check");
     img3.copy(img,16,16);
     checkEqual(img3.width(),16,"dimension check");
     checkEqual(img3(0,0).r,255,"logic check");
     checkEqual(img3(img3.width()-2,0).r,0,"logic check");
     checkEqual(img3(0,img3.height()-2).r,0,"logic check");
     img.copy(img3,4,4);
     checkEqual(img.width(),4,"dimension check");
 }
예제 #4
0
  void Read(const char *fname = NULL)
    {
      if (!active)
	{
	  printf("Catalog loading from %s...\n",
		 (fname!=NULL)?fname:ORIENT_SRC);
	  ImageOf<PixelFloat> orient_pre;
	  bool ok = yarp::sig::file::read(orient_pre,
					  (fname!=NULL)?fname:ORIENT_SRC);
	  if (!ok) {
	    printf("*** error: cannot find %s\n",
		   (fname!=NULL)?fname:ORIENT_SRC);
	    exit(1);
	  }

	  orient.resize(4,orient_pre.height());
	  for (int i=0; i<orient_pre.height(); i++)
	    {
	      for (int j=0; j<4; j++)
		{
		  orient(j,i) = orient_pre(j,i);
		}
	    }

	  printf("Catalog loaded\n");
	  active = 1;
	}
    }
void IntensitySalience::applyImpl(ImageOf<PixelRgb>& src, 
                          ImageOf<PixelRgb>& dest,
                          ImageOf<PixelFloat>& sal) {
    dest.resize(src);
    sal.resize(src);
    if (_sizeOld.width != src.width() || 
        _sizeOld.height != src.height())
        initImages(cvSize(src.width(), src.height()));
    
    cvCvtColor(src.getIplImage(), _imgGray, CV_RGB2GRAY);
    cvCvtColor(_imgGray, dest.getIplImage(), CV_GRAY2RGB);
    gray2Float(_imgGray, (IplImage*)sal.getIplImage());

    _sizeOld.width = src.width();
    _sizeOld.height = src.height();
}
예제 #6
0
파일: module.cpp 프로젝트: CV-IP/icub-main
cv::Rect CalibModule::extractFingerTip(ImageOf<PixelMono> &imgIn, ImageOf<PixelBgr> &imgOut,
                                       const Vector &c, Vector &px)
{
    cv::Mat imgInMat=cv::cvarrToMat((IplImage*)imgIn.getIplImage());

    // produce a colored image
    imgOut.resize(imgIn);
    cv::Mat imgOutMat=cv::cvarrToMat((IplImage*)imgOut.getIplImage());
    
    // proceed iff the center is within the image plane
    if ((c[0]<10.0) || (c[0]>imgIn.width()-10) ||
        (c[1]<10.0) || (c[1]>imgIn.height()-10))
    {
        cv::cvtColor(imgInMat,imgOutMat,CV_GRAY2BGR);
        return cv::Rect();
    }

    // saturate the top-left and bottom-right corners
    int roi_side2=roi_side>>1;
    cv::Point ct((int)c[0],(int)c[1]);
    cv::Point tl((int)(c[0]-roi_side2),(int)(c[1]-roi_side2));
    cv::Point br((int)(c[0]+roi_side2),(int)(c[1]+roi_side2));
    tl.x=std::max(1,tl.x); tl.x=std::min(tl.x,imgIn.width()-1);
    tl.y=std::max(1,tl.y); tl.y=std::min(tl.y,imgIn.height()-1);
    br.x=std::max(1,br.x); br.x=std::min(br.x,imgIn.width()-1);
    br.y=std::max(1,br.y); br.y=std::min(br.y,imgIn.height()-1);
    cv::Rect rect(tl,br);

    // run Otsu algorithm to segment out the finger    
    cv::Mat imgInMatRoi(imgInMat,rect);
    cv::threshold(imgInMatRoi,imgInMatRoi,0,255,cv::THRESH_BINARY|cv::THRESH_OTSU);
    cv::cvtColor(imgInMat,imgOutMat,CV_GRAY2BGR);

    px.resize(2,0.0);
    bool ok=false;
    for (int y=tl.y; y<br.y; y++)
    {
        for (int x=tl.x; x<br.x; x++)
        {
            if (imgIn(x,y)>0)
            {
                // predict the center of the finger a bit shifted
                x+=3;    y+=5;
                px[0]=x; px[1]=y;
                cv::circle(imgOutMat,cv::Point(x,y),5,cv::Scalar(0,0,255),-1);
                ok=true;
                break;
            }
        }

        if (ok)
            break;
    }

    cv::circle(imgOutMat,ct,5,cv::Scalar(0,255,0),-1);
    cv::rectangle(imgOutMat,tl,br,cv::Scalar(255,255,255),4);
    return rect;
}
예제 #7
0
void plotterThread::copyM(cv::Mat Mask) {        
    sem.wait();
    ImageOf<PixelMono>* imagem = new ImageOf<PixelMono>();    //here we are just putting the pointer image at the beginning of the memory , but we have to allocate memory with new
    imagem->resize(width, height);
    convertMat2ImageOf(Mask, imagem);      //oppure fare le 3 sopra,in  modo che sia riutilizzabile anche nel caso uImage e U siano di tipo diverso
    mImage->copy(*imagem);
    sem.post();
    delete imagem;
}
예제 #8
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testRgba() {
     report(0,"checking rgba...");
     ImageOf<PixelRgba> img;
     ImageOf<PixelRgb> img2;
     img.resize(50,50);
     img(4,2) = PixelRgba(10,20,30,40);
     img2.copy(img);
     checkEqual(img(4,2).a,40,"a level");
     checkEqual(img2(4,2).r,10,"r level");
 }
예제 #9
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testRgbInt() {
     report(0,"checking rgbi...");
     ImageOf<PixelRgbInt> img;
     ImageOf<PixelRgb> img2;
     img.resize(50,50);
     img(4,2) = PixelRgbInt(10,20,30);
     checkEqual(img(4,2).r,10,"r level original");
     img2.copy(img);
     checkEqual(img2(4,2).r,10,"r level copied");
 }
예제 #10
0
파일: main.cpp 프로젝트: AbuMussabRaja/yarp
 virtual void run() {
     printf("Fake framegrabber starting\n");
     while (!isStopping()) {
         ImageOf<PixelRgb> img;
         img.resize(40,20);
         img.zero();
         printf("Fake framegrabber wrote an image...\n");
         p.write(img);
         Time::delay(1);
     }
     printf("Fake framegrabber stopping\n");
 }
예제 #11
0
// From iCub staticgrabber device.
// DF2 bayer sequence.
// -- in staticgrabber: first row GBGBGB, second row RGRGRG.
// -- changed here to:  first row GRGRGR, second row BGBGBG.
bool TestFrameGrabber::makeSimpleBayer(
        ImageOf<PixelRgb>& img, 
        ImageOf<PixelMono>& bayer) {

    bayer.resize(img.width(), img.height());

    const int w = img.width();
    const int h = img.height();

    int i, j;
    for (i = 0; i < h; i++) {
        PixelRgb *row = (PixelRgb *)img.getRow(i);
        PixelMono *rd = (PixelMono *)bayer.getRow(i);

        for (j = 0; j < w; j++) {

            if ((i%2) == 0) {
                switch (j%4) {
                    case 0:
                    case 2:
                        *rd++ = row->g;
                        row++;
                        break;

                    case 1:
                    case 3:
                        *rd++ = row->r;
                        row++;
                        break;
                }                
            }

            if ((i%2) == 1) {
                switch (j%4) {
                    case 1:
                    case 3:
                        *rd++ = row->g;
                        row++;
                        break;

                    case 0:
                    case 2:
                        *rd++ = row->b;
                        row++;
                        break;
                }                
            }
        }
    }

    return true;
}
예제 #12
0
void ColorSalience::applyImpl(ImageOf<PixelRgb>& src, 
                          ImageOf<PixelRgb>& dest,
                          ImageOf<PixelFloat>& sal) {
    dest.resize(src);
    sal.resize(src);
    IMGFOR(src,x,y) {
        PixelRgb pix = src(x,y);
        float lum3 = (float)(pix.r+pix.g+pix.b);
        if (lum3/3.0<60) {
            pix.r = pix.g = pix.b = 0;
        }
        float rn = 255.0F * pix.r / lum3;
        float gn = 255.0F * pix.g / lum3;
        float bn = 255.0F * pix.g / lum3;
        // in theory, apparently, yellow should be (rn+gn)/2 ...
        float yw = (rn+gn)-fabs(rn-gn)-bn;
        yw *= (pix.r>1.5*pix.b);
        yw *= (pix.g>1.5*pix.b);
        float s = 0;
        float r = rn - (gn+bn)/2;
        float g = gn - (rn+bn)/2;
        float b = bn - (rn+gn)/2;
        if (r>s) s = r;
        if (g>s) s = g;
        if (yw>s) s = yw;
        if (b>s) s = b;
        if (s>255) s = 255;
        if (s<0) s = 0;
        if (r<0) r = 0;
        if (g<0) g = 0;
        if (b<0) b = 0;
        if (r>255) r = 255;
        if (g>255) g = 255;
        if (b>255) b = 255;
        sal(x,y) = (unsigned char)s;
        dest(x,y) = PixelRgb((unsigned char)r,
                             (unsigned char)g,
                             (unsigned char)b);
    }
예제 #13
0
/**
 * Read an image from the grabber.
 *
 * @param image The image to read. The image will be resized to
 * the dimensions the grabber is using, and the captured image
 * data will be written to it.
 *
 * @return True if an image was successfully captured. If false
 * returned, the image will be resized to the dimensions used by
 * the grabber, but all pixels will be zeroed.
 */
bool OpenCVGrabber::sendImage(IplImage* iplFrame, ImageOf<PixelRgb> & image)
{
    // Resize the output image, this should not result in new
    // memory allocation if the image is already the correct size
    image.resize(iplFrame->width, iplFrame->height);

    if (!m_saidSize) {
        yDebug("Received image of size %dx%d\n", image.width(), image.height());
        m_saidSize = true;
    }

    // Get an IplImage, the Yarp Image owns the memory pointed to
    IplImage * iplImage = (IplImage*)image.getIplImage();
    // create the timestamp
    m_laststamp.update();

    // Copy the captured image to the output image, flipping it if
    // the coordinate origin is not the top left
    if (IPL_ORIGIN_TL == iplFrame->origin)
        cvCopy(iplFrame, iplImage, 0);
    else
        cvFlip(iplFrame, iplImage, 0);

    if (iplFrame->channelSeq[0] == 'B') {
        cvCvtColor(iplImage, iplImage, CV_BGR2RGB);
    }

    if (m_w <= 0) {
        m_w = image.width();
    }
    if (m_h <= 0) {
        m_h = image.height();
    }
    if (fromFile) {
        if (m_w>0 && m_h>0) {
            if (image.width() != m_w || image.height() != m_h) {
                if (!m_saidResize) {
                    yDebug("Software scaling from %dx%d to %dx%d",  image.width(), image.height(), m_w, m_h);
                    m_saidResize = true;
                }
                image.copy(image, m_w, m_h);
            }
        }
    }

    DBG yDebug("%d by %d %s image\n", image.width(), image.height(), iplFrame->channelSeq);

    return true;

}
예제 #14
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testStandard() {
     report(0,"checking standard compliance of description...");
     ImageOf<PixelRgb> img;
     img.resize(8,4);
     img.zero();
     BufferedConnectionWriter writer;
     img.write(writer);
     ConstString s = writer.toString();
     Bottle bot;
     bot.fromBinary(s.c_str(),s.length());
     checkEqual(bot.size(),4,"plausible bottle out");
     checkEqual(bot.get(0).toString().c_str(),"mat","good tag");
     YARP_DEBUG(Logger::get(),"an example image:");
     YARP_DEBUG(Logger::get(),bot.toString().c_str());
 }
예제 #15
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testDraw() {
     report(0,"checking draw tools...");
     ImageOf<PixelRgb> img;
     img.resize(64,64);
     img.zero();
     addCircle(img,PixelRgb(255,0,0),32,32,200);
     // full image should be colored blue
     bool ok = true;
     IMGFOR(img,x,y) {
         if (img.pixel(x,y).r!=255) {
             ok = false;
         }
     }
     checkTrue(ok,"image is blue");
 }
예제 #16
0
void Manager::processMotionPoints(Bottle &b)
{
   // fprintf(stdout,"create mat\n");

    //create MAT image
    cv::Mat imgMat(Size(320,240),CV_8UC3);
    cv::Mat imgClean(Size(320,240),CV_8UC3);
    //vector<Point> points;
    imgMat = Scalar::all(0);
    imgClean = Scalar::all(255);
   // fprintf(stdout,"filling up the data\n");
    for (int x=1; x<b.size(); x++)
    {
        Point pt;
        pt.x = b.get(x).asList()->get(0).asInt();
        pt.y = b.get(x).asList()->get(1).asInt();
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[0] = 255 ;
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[1] = 0 ;
        imgMat.at<cv::Vec3b>(pt.y,pt.x)[2] = 0 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[0] = 255 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[1] = 0 ;
        imgClean.at<cv::Vec3b>(pt.y,pt.x)[2] = 0 ;
    }
    //imgClean = imgMat;
    int n = 10;
    int an = n > 0 ? n : -n;
    int element_shape = MORPH_RECT;
    Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
    morphologyEx(imgMat, imgMat, CV_MOP_CLOSE, element);

    Bottle data;
    //fprintf(stdout,"before process image\n");
    data = processImage(b, imgMat, imgClean, lineDetails); //image analisis and bottle cleaning
    //fprintf(stdout,"before process blobs\n");
    
    if (data.size() > 0)
        processBlobs(data, imgClean, lineDetails); // kmeans

    ImageOf<PixelRgb> outImg;// = new ImageOf<PixelRgb>;
    //fprintf(stdout,"done1 with data %d %d\n", imgClean.cols, imgClean.rows);
    outImg.resize( imgClean.cols, imgClean.rows );
    IplImage ipl_img = imgClean;
    cvCopyImage(&ipl_img, (IplImage*)outImg.getIplImage());
    imgOutPort.prepare() = outImg;
    imgOutPort.write();
    //delete[] outImg;
    //fprintf(stdout,"ok\n");
    }
예제 #17
0
/**
 * Read an image from the grabber.
 *
 * @param image The image to read. The image will be resized to
 * the dimensions the grabber is using, and the captured image
 * data will be written to it.
 *
 * @return True if an image was successfully captured. If false
 * returned, the image will be resized to the dimensions used by
 * the grabber, but all pixels will be zeroed.
 */
bool OpenCVGrabber::sendImage(const cv::Mat & frame, ImageOf<PixelRgb> & image)
{
    // Resize the output image, this should not result in new
    // memory allocation if the image is already the correct size
    image.resize(frame.cols, frame.rows);

    if (!m_saidSize) {
        yDebug("Received image of size %zux%zu\n", image.width(), image.height());
        m_saidSize = true;
    }

    // create the timestamp
    m_laststamp.update();

    // Convert to RGB color space
    cv::Mat frame_rgb;
    cv::cvtColor(frame, frame_rgb, cv::COLOR_BGR2RGB);

    // Copy the captured image to the output image
    memcpy(image.getRawImage(), frame_rgb.data, sizeof(unsigned char) * frame_rgb.rows * frame_rgb.cols * frame_rgb.channels());

    if (m_w == 0) {
        m_w = image.width();
    }
    if (m_h == 0) {
        m_h = image.height();
    }
    if (fromFile) {
        if (m_w>0 && m_h>0) {
            if (image.width() != m_w || image.height() != m_h) {
                if (!m_saidResize) {
                    yDebug("Software scaling from %zux%zu to %zux%zu",  image.width(), image.height(), m_w, m_h);
                    m_saidResize = true;
                }
                image.copy(image, m_w, m_h);
            }
        }
    }

    DBG yDebug("%zu by %zu image\n", image.width(), image.height());

    return true;

}
예제 #18
0
파일: ImageFile.cpp 프로젝트: BRKMYR/yarp
static bool ImageReadBGR(ImageOf<PixelBgr> &img, const char *filename)
{
    int width, height, color, num;
    FILE *fp=0;
    fp = fopen(filename, "rb");

    if(fp==0)
        {
            fprintf(stderr, "Error opening %s, check if file exists.\n", filename);
            return false;
        }

    if (!ReadHeader(fp, &height, &width, &color))
        {
            fclose (fp);
            fprintf(stderr, "Error reading header, is file a valid ppm/pgm?\n");
            return false;
        }
    if (!color)
        {
            fclose(fp);
            fprintf(stderr, "File is grayscale, conversion not yet supported\n");
            return false;
        }

    ImageOf<PixelRgb> tmpImg;
    tmpImg.resize(width, height);

    const int w = tmpImg.width() * img.getPixelSize();
    const int h = tmpImg.height();
    const int pad = tmpImg.getRowSize();
    unsigned char *dst = tmpImg.getRawImage ();

    num = 0;
    for (int i = 0; i < h; i++)
        {
            num += (int)fread((void *) dst, 1, (size_t) w, fp);
            dst += pad;
        }

    fclose(fp);

    return img.copy(tmpImg);
}
예제 #19
0
void visionUtils::convertCvToYarp(cv::Mat MatImage, ImageOf<PixelRgb> & yarpImage)
{
    IplImage* IPLfromMat = new IplImage(MatImage);

    yarpImage.resize(IPLfromMat->width,IPLfromMat->height);

    IplImage * iplYarpImage = (IplImage*)yarpImage.getIplImage();

    if (IPL_ORIGIN_TL == IPLfromMat->origin) {
        cvCopy(IPLfromMat, iplYarpImage, 0);
    }
    else {
        cvFlip(IPLfromMat, iplYarpImage, 0);
    }

    if (IPLfromMat->channelSeq[0]=='B') {
        cvCvtColor(iplYarpImage, iplYarpImage, CV_BGR2RGB);
    }
}
예제 #20
0
파일: main.cpp 프로젝트: AbuMussabRaja/yarp
int main(int argc, char *argv[]) {
    printf("Show a circle for 3 seconds...\n");
    ImageOf<PixelRgb> yarpImage;


    printf("Creating a YARP image of a nice circle\n");
    yarpImage.resize(300,200);
    addCircle(yarpImage,PixelRgb(255,0,0),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/4);
    addCircle(yarpImage,PixelRgb(255,50,50),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/5);


    printf("Copying YARP image to an OpenCV/IPL image\n");
    IplImage *cvImage = cvCreateImage(cvSize(yarpImage.width(),  
                                             yarpImage.height()), 
                                      IPL_DEPTH_8U, 3 );
    cvCvtColor((IplImage*)yarpImage.getIplImage(), cvImage, CV_RGB2BGR);

    printf("Showing OpenCV/IPL image\n");
    cvNamedWindow("test",1);
    cvShowImage("test",cvImage);

    printf("Taking image back into YARP...\n");
    ImageOf<PixelBgr> yarpReturnImage;
    yarpReturnImage.wrapIplImage(cvImage);
    yarp::sig::file::write(yarpReturnImage,"test.ppm");
    printf("Saving YARP image to test.ppm\n");


    cvWaitKey(3000);

    cvDestroyWindow("test");

    cvReleaseImage(&cvImage);

    printf("...done\n");
    return 0;
}
예제 #21
0
bool OdeSdlSimulation::getImage(ImageOf<PixelRgb>& target) {
    int w = cameraSizeWidth;
    int h = cameraSizeHeight;
    int p = 3;

    char *buf=new char[w * h * p];
    glReadPixels( 0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE, buf);
    ImageOf<PixelRgb> img;
    img.setQuantum(1);
    img.setExternal(buf,w,h);

    // inefficient flip!
    target.resize(img);
    int ww = img.width();
    int hh = img.height();
    for (int x=0; x<ww; x++) {
        for (int y=0; y<hh; y++) {
            target(x,y) = img(x,hh-1-y);
        }
    }
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    delete[] buf;
    return true;
}
예제 #22
0
    /*
     * init the thread, instantiate the device driver and connet the ports appropriately.
     * @return true iff successful.
     */
    virtual bool threadInit() {
        Property p;
        p.put("device", "logpolarclient");
        p.put("local", local.c_str());
        p.put("remote", remote.c_str());
        poly.open(p);
        if (poly.isValid()) {
            poly.view(lpImage);
            active = false;

            if (lpImage != 0) {
                const int nang = lpImage->nang();
                const int necc = lpImage->necc();
                const int fovea = lpImage->fovea();
                const double overlap = lpImage->overlap();

                if (necc != 0 && nang != 0) {
                    fprintf(stdout, "logpolar format with ang: %d ecc: %d fov: %d ovl: %f\n",
                        nang,
                        necc,
                        fovea,
                        overlap);

                    fprintf(stdout, "cartesian image of size %d %d\n", width, height);
                    trsf.allocLookupTables(L2C, necc, nang, width, height, overlap);
                    active = true;

                    lp.resize(nang, necc);
                    
                    // open the out port.
                    out.open(outname.c_str());
                }
            }
        }
        return active;
    }
예제 #23
0
파일: ImageTest.cpp 프로젝트: apaikan/yarp
 void testCreate() {
     report(0,"testing image creation...");
     FlexImage image;
     image.setPixelCode(VOCAB_PIXEL_RGB);
     image.resize(256,128);
     checkEqual(image.width(),256,"check width");
     checkEqual(image.height(),128,"check height");
     ImageOf<PixelInt> iint;
     iint.resize(256,128);
     long int total = 0;
     for (int x=0; x<iint.width(); x++) {
         for (int y=0; y<iint.height(); y++) {
             int v = (x+y)%65537;
             iint.pixel(x,y) = v;
             total += v;
         }
     }
     for (int x2=0; x2<iint.width(); x2++) {
         for (int y2=0; y2<iint.height(); y2++) {
             total -= iint.pixel(x2,y2);
         }
     }
     checkEqual(total,0,"pixel assignment check");
 }
예제 #24
0
    bool updateModule()
    {
        ImageOf<PixelBgr> *img=imgInPort.read();
        if (img==NULL)
            return true;

        mutex.lock();

        ImageOf<PixelMono> imgMono;
        imgMono.resize(img->width(),img->height());

        cv::Mat imgMat=cv::cvarrToMat((IplImage*)img->getIplImage());
        cv::Mat imgMonoMat=cv::cvarrToMat((IplImage*)imgMono.getIplImage());
        cv::cvtColor(imgMat,imgMonoMat,CV_BGR2GRAY);

        if (initBoundingBox)
        {
            tracker->initialise(imgMonoMat,tl,br);
            initBoundingBox=false;
        }

        if (doCMT)
        {
            if (tracker->processFrame(imgMonoMat))
            {
                if (dataOutPort.getOutputCount()>0)
                {
                    Bottle &data=dataOutPort.prepare();
                    data.clear();

                    data.addInt((int)tracker->topLeft.x);
                    data.addInt((int)tracker->topLeft.y);
                    data.addInt((int)tracker->topRight.x);
                    data.addInt((int)tracker->topRight.y);
                    data.addInt((int)tracker->bottomRight.x);
                    data.addInt((int)tracker->bottomRight.y);
                    data.addInt((int)tracker->bottomLeft.x);
                    data.addInt((int)tracker->bottomLeft.y);

                    dataOutPort.write();
                }

                if (imgOutPort.getOutputCount()>0)
                {
                    cv::line(imgMat,tracker->topLeft,tracker->topRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->topRight,tracker->bottomRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomRight,tracker->bottomLeft,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomLeft,tracker->topLeft,cv::Scalar(255,0,0),2);

                    for (size_t i=0; i<tracker->trackedKeypoints.size(); i++) 
                        cv::circle(imgMat,tracker->trackedKeypoints[i].first.pt,3,cv::Scalar(0,255,0));
                }
            }            
        }

        if (imgOutPort.getOutputCount()>0)
        {
            imgOutPort.prepare()=*img;
            imgOutPort.write();
        }

        mutex.unlock();
        return true;
    }
예제 #25
0
파일: ImageFile.cpp 프로젝트: BRKMYR/yarp
bool file::read(ImageOf<PixelFloat>& dest, const ConstString& src)
{
    int hh = 0, ww = 0;

    FILE *fp = fopen(src.c_str(), "r");
    if (fp==NULL) {
        return false;
    }
    int blank = 1;
    int curr = 0;
    while (!feof(fp)) {
        int ch = fgetc(fp);
        if (ch==' ' || ch == '\t' || ch == '\r' ||ch == '\n'|| feof(fp)){
            if (!blank) {
                if (curr==0) {
                    hh++;
                }
                curr++;
                if (curr>ww) {
                    ww = curr;
                }
            }
            blank = 1;
            if (ch=='\n') {
                curr = 0;
            }
        } else {
            blank = 0;
        }
    }
    fclose(fp);
    fp = fopen(src.c_str(), "rb");
    if (fp==NULL) {
        return false;
    }
    dest.resize(ww,hh);
    hh = 0; ww = 0;
    {
        char buf[256];
        int idx = 0;
        int blank = 1;
        int curr = 0;
        while (!feof(fp)) {
            int ch = fgetc(fp);
            if (ch==' ' || ch == '\t' ||ch == '\r'||ch == '\n' || feof(fp)){
                if (!blank) {
                    if (curr==0) {
                        hh++;
                    }
                    curr++;
                    if (curr>ww) {
                        ww = curr;
                    }
                    buf[idx] = '\0';
                    dest(curr-1,hh-1) = float(atof(buf));
                    idx = 0;
                }
                blank = 1;
                if (ch=='\n') {
                    curr = 0;
                }
            } else {
                buf[idx] = ch;
                idx++;
                yAssert(((unsigned int)idx)<sizeof(buf));
                blank = 0;
            }
        }
    }
    fclose(fp);
    return true;
}
예제 #26
0
void GlutOrientation::Apply(ImageOf<PixelRgb>& src,
				ImageOf<PixelRgb>& dest,
				ImageOf<PixelFloat>& xdata, 
				ImageOf<PixelFloat>& ydata, 
				ImageOf<PixelFloat>& mdata)
{
  int view = 1; //(dest.width()>0);
  ImageOf<PixelFloat>& orient = fine_orientation_data.Orient();
  static ImageOf<PixelMono> mask;
  static IntegralImage ii;
  if (view)
    {
      dest.resize(src);
    }
  for (int i=0;i<SHIFTS;i++)
    {
      shifts[i].resize(src);
    }
  mean.resize(src);

  static int shifts_x[SHIFTS], shifts_y[SHIFTS];
  
  int ct = 0;
  for (int dx=-1; dx<=2; dx++)
    {
      for (int dy=-1; dy<=2; dy++)
	{
	  assert(ct<SHIFTS);
	  ii.Offset(src,shifts[ct],dx,dy,1);
	  shifts_x[ct] = dx;
	  shifts_y[ct] = dy;
	  ct++;
	}
    }

  static ImageOf<PixelFloat> mean, var, lx, ly, agree;
  ImageOf<PixelRgb>& mono = src;
  SatisfySize(mono,mean);
  SatisfySize(mono,var);
  SatisfySize(mono,lx);
  SatisfySize(mono,ly);
  SatisfySize(mono,agree);
  SatisfySize(mono,xdata);
  SatisfySize(mono,ydata);
  SatisfySize(mono,mdata);
  int response_ct = 0;
  IMGFOR(mono,x,y)
    {
      float total = 0;
      float total2 = 0;
      PixelRgb& pix0 = src(x,y);
      for (int k=0; k<SHIFTS; k++)
	{
	  PixelRgb& pix1 = shifts[k](x,y);
	  float v = pdist(pix0,pix1);
	  total += v;
#ifdef USE_LUMINANCE_FILTER
	  total2 += v*v;
#endif
	}
      mean(x,y) = total/SHIFTS;
#ifdef USE_LUMINANCE_FILTER
      var(x,y) = total2/SHIFTS - (total/SHIFTS)*(total/SHIFTS);
#endif
    }
void AdvancedDirectionalSalience::applyImpl(ImageOf<PixelRgb>& src, 
                          ImageOf<PixelRgb>& dest,
                          ImageOf<PixelFloat>& sal) {
    // time measurement                              
    double tStart = yarp::os::Time::now();
    // block other threads (e.g. configuration)
    _mutex.wait();
    // resizing/init
    sal.resize(src);
    if (_w != src.width() || _h != src.height()){
        _w = src.width();
        _h = src.height();
        _ws = (int)(_w * _scale + 0.5);
        _hs = (int)(_h * _scale + 0.5);
        init(_ws, _hs);
    }
    // scaling if _scale != 1.0
    cvResize((IplImage*)src.getIplImage(), _imgInRgbResized, CV_INTER_LINEAR );
    // convert input to grayscale and float image
    cvCvtColor( _imgInRgbResized, _imgInGray, CV_BGR2GRAY );
    cvConvert(_imgInGray, _imgInFloat);
    // compute D-maps (24 maps: _numSizs*_numDirs * 2 (center & surround))
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
                // compute gabor maps
                _gabor[s][d]->GaborFiltZeroMean((float*)_imgInFloat->imageData, 
                    (float*)_imgGReal->imageData, 
                    (float*)_imgGImag->imageData);
                // combine real and imaginary parts
                _gabor[s][d]->ComputeMagnitude((float*)_imgGReal->imageData, 
                    (float*)_imgGImag->imageData, 
                    (float*)_imgGabor[s][d]->imageData);
                // center- & surround-maps
                _gaussCen[s]->GaussFilt((float*)_imgGabor[s][d]->imageData, (float*)_imgDCen[s][d]->imageData);
                _gaussSur[s]->GaussFilt((float*)_imgGabor[s][d]->imageData, (float*)_imgDSur[s][d]->imageData);
        }
    }
    // compute DPlus & DMinus maps
    float max = 0.0f;
    int maxX, maxY;
    for (int s = 0; s < _numSizs; s++){
        // sum all directions of size s
        cvZero(_imgDSumCen);
        cvZero(_imgDSumSur);
        for (int d = 0; d < _numDirs; d++){
            cvAdd(_imgDSumCen, _imgDCen[s][d], _imgDSumCen);
            cvAdd(_imgDSumSur, _imgDSur[s][d], _imgDSumSur);
        }
        // create DPlus & DMinus maps for size s
        for (int d = 0; d < _numDirs; d++){
            // subtract the current direction from the total sum (Sum(i!=j))
            cvSub(_imgDSumCen, _imgDCen[s][d], _imgDSumCenEx);
            cvSub(_imgDSumSur, _imgDSur[s][d], _imgDSumSurEx);
            // calculate DPlus
            cvSub(_imgDCen[s][d], _imgDSur[s][d], _imgTmp);
            cvAdd(_imgTmp, _imgDSumSurEx, _imgTmp);
            cvMul(_imgDCen[s][d], _imgTmp, _imgDPlu[s][d]);
            // calculate DMinus
            cvSub(_imgDCen[s][d], _imgDSumCenEx, _imgTmp);
            cvAdd(_imgTmp, _imgDSumSurEx, _imgTmp);
            cvMul(_imgDCen[s][d], _imgTmp, _imgDMin[s][d]);
            // discard negative values
            cvThreshold(_imgDPlu[s][d], _imgDPlu[s][d], 0.0, 0.0, CV_THRESH_TOZERO); // this is probably not necessary
            cvThreshold(_imgDMin[s][d], _imgDMin[s][d], 0.0, 0.0, CV_THRESH_TOZERO); 
            // scale to MAX_SALIENCE 
            // DPlus
            //calcGlobalMaximum(_imgDPlu[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDPlu[s][d], _imgDPlu[s][d], MAX_SALIENCE/max);
            // DMinus
            //calcGlobalMaximum(_imgDMin[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDMin[s][d], _imgDMin[s][d], MAX_SALIENCE/max);
        
            // old:
            // calc maximum, conspicuity factors and scale to MAX_SALIENCE
            // DPlus
            //calcGlobalMaximum(_imgDPlu[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDPlu[s][d], _imgDPlu[s][d], MAX_SALIENCE/max);
            //calcAdvancedConspicuityFactor(_imgDPlu[s][d], _sigs[s]*_sig2wav, maxX, maxY, _conspDPlu[s][d]); 
            //calcConspicuityFactor(_imgDPlu[s][d], _conspDPlu[s][d]); 
            // DMinus
            //calcGlobalMaximum(_imgDMin[s][d], max, maxX, maxY);
            //if (max > 0.0f)
            //    cvScale(_imgDMin[s][d], _imgDMin[s][d], MAX_SALIENCE/max);
            // calc conspicuity factors
            //calcAdvancedConspicuityFactor(_imgDMin[s][d], _sigs[s]*_sig2wav, maxX, maxY, _conspDMin[s][d]);
            //calcConspicuityFactor(_imgDMin[s][d], _conspDMin[s][d]);
        }
    }
    // center-surround competition to extract isolated peaks
    // M <- |M+M*DoG-Cinh|>0
    for (int s = 0; s < _numSizs; s++){   
        // gaussian filters for each size
        // cen = sigma
        // sur = 4*sigma
        for (int d = 0; d < _numDirs; d++){
            // copy to working imgDPlu/MinComp
            cvCopy(_imgDPlu[s][d], _imgDPluComp[s][d]);
            cvCopy(_imgDMin[s][d], _imgDMinComp[s][d]);
            for (int k = 0; k < 3; k++){
                // imgDPluComp
                // calculate M*DoG
                _gaussCompCen[s]->GaussFilt((float*)_imgDPluComp[s][d]->imageData, (float*)_imgTmpCen->imageData);
                _gaussCompSur[s]->GaussFilt((float*)_imgDPluComp[s][d]->imageData, (float*)_imgTmpSur->imageData);
                cvSub(_imgTmpCen, _imgTmpSur, _imgTmp); // calculate center surround (mexican hat) convolution
                // add M + M&DoG
                cvAdd(_imgDPluComp[s][d], _imgTmp, _imgDPluComp[s][d]);
                // subtract Cinh
                cvSub(_imgDPluComp[s][d], _imgCinh, _imgDPluComp[s][d]);
                // threshold to >0
                cvThreshold(_imgDPluComp[s][d], _imgDPluComp[s][d], 0.0, 0.0, CV_THRESH_TOZERO);
                // imgDMinComp
                // calculate M*DoG
                _gaussCompCen[s]->GaussFilt((float*)_imgDMinComp[s][d]->imageData, (float*)_imgTmpCen->imageData);
                _gaussCompSur[s]->GaussFilt((float*)_imgDMinComp[s][d]->imageData, (float*)_imgTmpSur->imageData);
                cvSub(_imgTmpCen, _imgTmpSur, _imgTmp); // calculate center surround (mexican hat) convolution
                //// add M + M&DoG
                cvAdd(_imgDMinComp[s][d], _imgTmp, _imgDMinComp[s][d]);
                //// subtract Cinh
                cvSub(_imgDMinComp[s][d], _imgCinh, _imgDMinComp[s][d]);
                //// threshold to >0
                cvThreshold(_imgDMinComp[s][d], _imgDMinComp[s][d], 0.0, 0.0, CV_THRESH_TOZERO);
            }
            // scale final _imgD*Comp's
            calcGlobalMaximum(_imgDPluComp[s][d], max, maxX, maxY);
            if (max > 0.0f)
                cvScale(_imgDPluComp[s][d], _imgDPluComp[s][d], MAX_SALIENCE/max);
            calcGlobalMaximum(_imgDMinComp[s][d], max, maxX, maxY);
            if (max > 0.0f)
                cvScale(_imgDMinComp[s][d], _imgDMinComp[s][d], MAX_SALIENCE/max);
        }
    }

    // summing up all maps (normalized)
    cvZero(_imgResult);
    float mapWeight = 1.0f/((float)(2*_numSizs*_numDirs));
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
            cvScaleAdd(_imgDPlu[s][d], cvScalar(mapWeight), _imgResult, _imgResult);
            cvScaleAdd(_imgDMin[s][d], cvScalar(mapWeight), _imgResult, _imgResult);
        }
    }

    // summing up all maps (normalized)
    /*cvZero(_imgResult);
    for (int s = 0; s < _numSizs; s++){
        for (int d = 0; d < _numDirs; d++){
            cvScaleAdd(_imgDPlu[s][d], cvScalar(_conspDPlu[s][d]), _imgResult, _imgResult);
            cvScaleAdd(_imgDMin[s][d], cvScalar(_conspDMin[s][d]), _imgResult, _imgResult);
        }
    }*/

    // resize/copy back to output image
    cvResize(_imgResult, (IplImage*)sal.getIplImage());
   
    // output debug/analysis of requested map [name, size, direction]
    if (_debug){
        IplImage ***ocvImgArrayDebug = _mapImgTable[_dbgImgArray];
        IplImage *ocvImgDebug = ocvImgArrayDebug[_dbgSizIndex][_dbgDirIndex];
        ImageOf<PixelFloat> &yrpImgDebug = _prtDebug.prepare();
        yrpImgDebug.resize(ocvImgDebug->width, ocvImgDebug->height);
        cvCopy(ocvImgDebug, (IplImage*)yrpImgDebug.getIplImage());
        _prtDebug.write();
    }
    
    // testing output
    //IplImage *test = _imgGImag;
    /*IplImage *test = _imgDCen[0][0];
    max = 0.0f;
    calcGlobalMaximum(test, max);
    cvScale(test, test, 255.0f/max);
    cvResize(test, (IplImage*)sal.getIplImage());*/
    _mutex.post();
    double tEnd = yarp::os::Time::now();
    if (_verbose)
        cout << "Update time: " << tEnd - tStart << " Processed image size: " << _ws << " x " << _hs << " original size: " << _w << " x " << _h << endl;
    // tmp
    //cvConvertScale(_result, (IplImage*)sal.getIplImage(), 1/_scale);
}
예제 #28
0
double oculomotorController::calculateEntropy(yarp::sig::ImageOf<yarp::sig::PixelBgr>* entImg,double& entropy, int& counter) {
    //printf("calculating entropy.... %d %d \n", width, height);
    ImageOf<PixelRgb>* hsv = new ImageOf<PixelRgb>;
    hsv->resize(width, height);
    cvCvtColor(entImg->getIplImage(), hsv->getIplImage(), CV_BGR2HSV);

    //cv::Mat hsv;
    //IplImage hsvI;
    //cv::Mat* p = (cv::Mat*) entImg.getIplImage();
    //cvtColor(entImg->getIplImage()) , hsvI, CV_BGR2HSV);

    // let`s quantize the value to 30 levels
    // let's quantize the hue to 30 levels
    // and the saturation to 32 levels
    int hbins = 30, sbins = 32,  vbins = 30;
    //MatND* hist = new MatND();
    MatND hist;
    int histSize[] = {hbins, sbins, vbins};
    // hue varies from 0 to 179, see cvtColor
    float hranges[] = { 0, 180 };
    // saturation varies from 0 (black-gray-white) to
    // 255 (pure spectrum color)
    float sranges[] = { 0, 256 };
    
    float vranges[] = { 0, 256 } ;
    const float* ranges[] = { hranges, sranges, vranges };
    
    // we compute the histogram from the 0-th and 1-st channels
    int channels[] = {0, 1, 2};


    //cv::Mat matImage;
    const cv::Mat mat((const IplImage*) hsv->getIplImage(), true);
    //cvConvert(hsv->getIplImage(),mat);
    //    matImage.data = (char *)hsv->getIplImage().imageData;
    //nst cv::Mat* cvHsvP = (const cv::Mat*)hsv->getIplImage();
    calcHist(&mat,1,channels, Mat(), // do not use mask
        hist, 3, histSize, ranges,
        true, // the histogram is uniform
        false );
    


    //double maxVal=0;
    //minMaxLoc(hist, 0, &maxVal, 0, 0);
    yarp::sig::Matrix hsProb(hbins, sbins);    
    yarp::sig::Vector vProb(vbins);

    // calculating entropy in v
    double sumEntropyV = 0;

    for( int v = 0 ; v < vbins; v++) {
        // calculating entropy in h
        double sumEntropyH = 0;
        for( int h = 0; h < hbins; h++ ) {
            double sum = 0;
            double sumEntropyS =0;
            for( int s = 0; s < sbins; s++ )
                {
                    float binVal = hist.at<float>(h, s, v);
                    sum += binVal;
                    
                    //int intensity = cvRound(binVal*255/maxVal);
                }
            for( int s = 0; s < sbins; s++ )
                {
                    float binVal = hist.at<float>(h, s, v);
                    hsProb(h,s) = binVal / sum; // extracing the probability
                    double entropyS     =  -1 * hsProb(h,s) * Log2(hsProb(h,s)) ;
                    if(!isnan_fun(entropyS))
                        sumEntropyS += entropyS;
                    //printf("entropyS = %f upto 5.0 \n", entropyS,sumEntropyS);
                }
            sumEntropyH += sumEntropyS;
            //printf("sumEntropyS %f \n", sumEntropyS);
        }
        //printf("entropyH = %f upto 1500 \n", sumEntropyH);    
        sumEntropyV += sumEntropyH;
    }
    //printf("entropyV = %f upto 4800 \n",sumEntropyV);
    entropy += sumEntropyV;
    return (entropy / counter);
}