/*
OutputImageFormat ConvertImage(address to input image)

converts an Mat image to an pixel-bgr image
*/
ImageOf<PixelBgr> ConvertImage(Mat& input)
{
	IplImage ipback = static_cast<IplImage>(input);											// At first we casting the Mat to an IPL image
	ImageOf<PixelBgr> back;
	back.wrapIplImage(&ipback);																// then we are wrapping the ipl image into our Output format
	return back;
}
//Loads a vector of JPG images into a bottle based on our 'database' file
void ANTData::loadDatabase()
{

    string file;
    string databaseFolder;
    if (databaseContext == "")
        databaseFolder = databaseName;
    else
        databaseFolder = databaseContext + "/" + databaseName;

    cout << "ANTData::loadDatabase: trying to read from " << (databaseFolder + "/" + databaseName).c_str() << endl;

    ifstream datafile((databaseFolder + "/" + databaseName).c_str());
    if (datafile.is_open()) {
        while (!datafile.eof()) {  //open the file and read in each line
            getline(datafile,file);
            if (file.size() <= 3) break;
            file = databaseFolder + "/" + file;
            IplImage *thisImg = cvLoadImage(file.c_str());  //load image
            ImageOf <PixelRgb> yarpImg;
            yarpImg.wrapIplImage(thisImg);  
            imgs.push_back(yarpImg);
        }
    }
	else {
		cout << "ANTData::loadDatabase: unable to open " << (databaseFolder + "/" + databaseName).c_str() << endl;
	}
}
Esempio n. 3
0
int main(int argc, char *argv[]) {
    printf("Show a circle for 3 seconds...\n");
    ImageOf<PixelRgb> yarpImage;


    printf("Creating a YARP image of a nice circle\n");
    yarpImage.resize(300,200);
    addCircle(yarpImage,PixelRgb(255,0,0),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/4);
    addCircle(yarpImage,PixelRgb(255,50,50),
              yarpImage.width()/2,yarpImage.height()/2,
              yarpImage.height()/5);


    printf("Copying YARP image to an OpenCV/IPL image\n");
    IplImage *cvImage = cvCreateImage(cvSize(yarpImage.width(),  
                                             yarpImage.height()), 
                                      IPL_DEPTH_8U, 3 );
    cvCvtColor((IplImage*)yarpImage.getIplImage(), cvImage, CV_RGB2BGR);

    printf("Showing OpenCV/IPL image\n");
    cvNamedWindow("test",1);
    cvShowImage("test",cvImage);

    printf("Taking image back into YARP...\n");
    ImageOf<PixelBgr> yarpReturnImage;
    yarpReturnImage.wrapIplImage(cvImage);
    yarp::sig::file::write(yarpReturnImage,"test.ppm");
    printf("Saving YARP image to test.ppm\n");


    cvWaitKey(3000);

    cvDestroyWindow("test");

    cvReleaseImage(&cvImage);

    printf("...done\n");
    return 0;
}
Esempio n. 4
0
    void readAndShow(ImageOf<PixelRgb> &reconstructYarp, bool refreshGui)
    {
        if (reconstructWidth == -1 || reconstructHeight == -1)
        {
            //Sample the subdivision sizes
            std::cout << "Waiting for input..." << std::endl;
            ImageOf<PixelRgb>* testImg = ports[0][0]->read(true);
            reconstructWidth = testImg->width() * ports.size();
            reconstructHeight = testImg->height() * ports[0].size();
            std::cout << "Input detected with size : " << testImg->width() << "x" << testImg->height() << std::endl;
            std::cout << "Reconstruct size will: " << reconstructWidth << "x" << reconstructHeight << std::endl;
            reconstruct = cvCreateImage(cvSize(reconstructWidth, reconstructHeight), 8, 3);
        }

        for (unsigned int x = 0; x < ports.size(); x++)
        {
            for (unsigned int y = 0; y < ports[x].size(); y++)
            {
                ImageOf<PixelRgb>* imgYarp = ports[x][y]->read(true);
                if (imgYarp)
                {
                    IplImage* partImg = (IplImage*) imgYarp->getIplImage();
                    cvCvtColor(partImg, partImg, CV_BGR2RGB);
                    int rectW = reconstructWidth / ports.size();
                    int rectH = reconstructHeight / ports[0].size();
                    cvSetImageROI(reconstruct, cvRect(x*rectW, y*rectH, rectW, rectH));
                    cvCopy(partImg,reconstruct);
                    cvResetImageROI(reconstruct);
                }
            }
        }

        if (refreshGui)
            cvShowImage((name + "/Reconstruct").c_str(), reconstruct);
        reconstructYarp.wrapIplImage(reconstruct);
        cvWaitKey(1);
    }
Esempio n. 5
0
bool KinectDriverOpenNI::readDepth(ImageOf<PixelMono16> &depth, double &timestamp)
{
    const XnDepthPixel* pDepthMap = depthGenerator.GetDepthMap();
    int ts=(int)depthGenerator.GetTimestamp();
    timestamp=(double)ts/1000.0;

    SceneMetaData smd;
    userGenerator.GetUserPixels(0,smd);

    for (int y=0; y<this->def_depth_height; y++)
    {
        for(int x=0;x<this->def_depth_width;x++)
        {
            int player=(smd[y * this->def_depth_width + x]);
            int depth=(pDepthMap[y * this->def_depth_width + x]);
            int finalValue=0;
            finalValue|=((depth<<3)&0XFFF8);
            finalValue|=(player&0X0007);
            //if (x==320 && y==240)
             //   fprintf(stdout, "%d %d\n", ((finalValue&0XFFF8)>>3), finalValue&0X0007);
            //We associate the depth to the first 13 bits, using the last 3 for the player identification
            depthMat->data.s[y * this->def_depth_width + x ]=finalValue;
        }
    }

    if (device==KINECT_TAGS_DEVICE_KINECT)
    {
        cvGetImage(depthMat,depthTmp);
        resizeImage(depthTmp,depthImage);
    }
    else
        cvGetImage(depthMat,depthImage);
    depth.wrapIplImage(depthImage);

    return true;
}
Esempio n. 6
0
 /*! 
  * Sends a picture in the OpenCV native format IplImage 
  */
 void SamgarModule::SendPictureOCVNative( IplImage* Image ) {
   ImageOf<PixelBgr> yarpReturnImage;
   yarpReturnImage.wrapIplImage( Image );
   SendPictureYarpNative( yarpReturnImage );
 }