bool KinectDriverOpenNI::readDepth(ImageOf<PixelMono16> &depth, double ×tamp) { const XnDepthPixel* pDepthMap = depthGenerator.GetDepthMap(); int ts=(int)depthGenerator.GetTimestamp(); timestamp=(double)ts/1000.0; SceneMetaData smd; userGenerator.GetUserPixels(0,smd); for (int y=0; y<this->def_depth_height; y++) { for(int x=0;x<this->def_depth_width;x++) { int player=(smd[y * this->def_depth_width + x]); int depth=(pDepthMap[y * this->def_depth_width + x]); int finalValue=0; finalValue|=((depth<<3)&0XFFF8); finalValue|=(player&0X0007); //if (x==320 && y==240) // fprintf(stdout, "%d %d\n", ((finalValue&0XFFF8)>>3), finalValue&0X0007); //We associate the depth to the first 13 bits, using the last 3 for the player identification depthMat->data.s[y * this->def_depth_width + x ]=finalValue; } } if (device==KINECT_TAGS_DEVICE_KINECT) { cvGetImage(depthMat,depthTmp); resizeImage(depthTmp,depthImage); } else cvGetImage(depthMat,depthImage); depth.wrapIplImage(depthImage); return true; }
/* * init the thread, instantiate the device driver and connet the ports appropriately. * @return true iff successful. */ virtual bool threadInit() { Property p; p.put("device", "logpolarclient"); p.put("local", local.c_str()); p.put("remote", remote.c_str()); poly.open(p); if (poly.isValid()) { poly.view(lpImage); active = false; if (lpImage != 0) { const int nang = lpImage->nang(); const int necc = lpImage->necc(); const int fovea = lpImage->fovea(); const double overlap = lpImage->overlap(); if (necc != 0 && nang != 0) { fprintf(stdout, "logpolar format with ang: %d ecc: %d fov: %d ovl: %f\n", nang, necc, fovea, overlap); fprintf(stdout, "cartesian image of size %d %d\n", width, height); trsf.allocLookupTables(L2C, necc, nang, width, height, overlap); active = true; lp.resize(nang, necc); // open the out port. out.open(outname.c_str()); } } } return active; }
static bool ImageReadMono(ImageOf<PixelMono> &img, const char *filename) { int width, height, color, num; FILE *fp=0; fp = fopen(filename, "rb"); if(fp==0) { fprintf(stderr, "Error opening %s, check if file exists.\n", filename); return false; } if (!ReadHeader(fp, &height, &width, &color)) { fclose (fp); fprintf(stderr, "Error reading header, is file a valid ppm/pgm?\n"); return false; } if (color) { fclose(fp); fprintf(stderr, "File is color, conversion not yet supported\n"); return false; } img.resize(width,height); const int w = img.width() * img.getPixelSize(); const int h = img.height(); const int pad = img.getRowSize(); unsigned char *dst = img.getRawImage (); num = 0; for (int i = 0; i < h; i++) { num += (int)fread((void *) dst, 1, (size_t) w, fp); dst += pad; } fclose(fp); return true; }
void testCopy() { report(0,"testing image copying..."); ImageOf<PixelRgb> img1; img1.resize(128,64); for (int x=0; x<img1.width(); x++) { for (int y=0; y<img1.height(); y++) { PixelRgb& pixel = img1.pixel(x,y); pixel.r = x; pixel.g = y; pixel.b = 42; } } ImageOf<PixelRgb> result; result.copy(img1); checkEqual(img1.width(),result.width(),"width check"); checkEqual(img1.height(),result.height(),"height check"); if (img1.width()==result.width() && img1.height()==result.height()) { int mismatch = 0; for (int x=0; x<img1.width(); x++) { for (int y=0; y<img1.height(); y++) { PixelRgb& pix0 = img1.pixel(x,y); PixelRgb& pix1 = result.pixel(x,y); if (pix0.r!=pix1.r || pix0.g!=pix1.g || pix0.b!=pix1.b) { mismatch++; } } } checkTrue(mismatch==0,"pixel match check"); } }
void testCast() { report(0,"testing image casting..."); ImageOf<PixelRgb> img1; img1.resize(128,64); for (int x=0; x<img1.width(); x++) { for (int y=0; y<img1.height(); y++) { PixelRgb& pixel = img1.pixel(x,y); unsigned char v = x%30; pixel.r = v; pixel.g = v; pixel.b = v; } } ImageOf<PixelMono> result; result.copy(img1); checkEqual(img1.width(),result.width(),"width check"); checkEqual(img1.height(),result.height(),"height check"); if (img1.width()==result.width() && img1.height()==result.height()) { int mismatch = 0; for (int x=0; x<img1.width(); x++) { for (int y=0; y<img1.height(); y++) { PixelRgb& pix0 = img1.pixel(x,y); PixelMono& pix1 = result.pixel(x,y); if (pix0.r>pix1+1 || pix0.r<pix1-1) { mismatch++; } } } checkTrue(mismatch==0,"pixel match check"); } }
bool OdeSdlSimulation::getImage(ImageOf<PixelRgb>& target) { int w = cameraSizeWidth; int h = cameraSizeHeight; int p = 3; char *buf=new char[w * h * p]; glReadPixels( 0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE, buf); ImageOf<PixelRgb> img; img.setQuantum(1); img.setExternal(buf,w,h); // inefficient flip! target.resize(img); int ww = img.width(); int hh = img.height(); for (int x=0; x<ww; x++) { for (int y=0; y<hh; y++) { target(x,y) = img(x,hh-1-y); } } glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); delete[] buf; return true; }
void CalibModule::onRead(ImageOf<PixelMono> &imgIn) { mutex.lock(); Vector kinPoint,o; iarm->getPose(kinPoint,o); Vector c,tipl(2,0.0),tipr(2,0.0); igaze->get2DPixel(0,kinPoint,c); ImageOf<PixelBgr> imgOut; cv::Rect rect=extractFingerTip(imgIn,imgOut,c,tipl); bool holdImg=false; if (motorExplorationState==motorExplorationStateLog) { cv::Scalar color=cv::Scalar(0,0,255); string tag="discarded"; if (enabled && (depthRpcPort.getOutputCount()>0)) { Vector depthPoint; if (getDepthAveraged(tipl,depthPoint,tipr)) { // consistency check double dist=norm(depthPoint-kinPoint); if (dist<max_dist) { color=cv::Scalar(0,255,0); tag="matched"; if (exp_depth2kin) { calibrator->addPoints(depthPoint,kinPoint); yInfo("collecting calibration points: p_depth=(%s); p_kin=(%s);", depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str()); tag="acquired"; } if (exp_aligneyes) { Vector kinPoint4=kinPoint; kinPoint4.push_back(1.0); Vector xe,oe,kinPoint_e; Matrix He; igaze->getLeftEyePose(xe,oe); He=axis2dcm(oe); xe.push_back(1.0); He.setCol(3,xe); kinPoint_e=SE3inv(He)*kinPoint4; aligner.addPoints(tipl,kinPoint_e); yInfo("collecting points for aligning eye: tip=(%s); p_kin=(%s);", tipl.toString(3,3).c_str(),kinPoint_e.toString(3,3).c_str()); tag="acquired"; } } else yInfo("discarding calibration points: p_depth=(%s); p_kin=(%s); (|p_depth-p_kin|=%g)>%g", depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str(),dist,max_dist); } else { yInfo("unavailable depth; discarding..."); tag="no-depth"; } } cv::Mat img=cv::cvarrToMat((IplImage*)imgOut.getIplImage()); cv::putText(img,tag.c_str(),cv::Point(rect.x+5,rect.y+15),CV_FONT_HERSHEY_SIMPLEX,0.5,color); motorExplorationState=motorExplorationStateTrigger; holdImg=true; } if (depthOutPort.getOutputCount()>0) { depthOutPort.prepare()=imgOut; depthOutPort.write(); if (holdImg) Time::delay(0.5); } mutex.unlock(); }
/*! * Sends a picture in the OpenCV native format IplImage */ void SamgarModule::SendPictureOCVNative( IplImage* Image ) { ImageOf<PixelBgr> yarpReturnImage; yarpReturnImage.wrapIplImage( Image ); SendPictureYarpNative( yarpReturnImage ); }
int main() { Network yarp; // set up yarp BufferedPort<ImageOf<PixelRgb> > imagePort; // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo. BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore imagePort.open("/tutorial/image/in"); // give the port a name targetPort.open("/tutorial/target/out"); //Network::connect("/icubSim/cam/left","/tutorial/image/in"); Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera while (1) { // repeat forever ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort. if (image!=NULL) { // check we actually got something printf("We got an image of size %dx%d\n", image->width(), image->height()); //pramod added nov 15 IplImage *frame_temp = (IplImage*)image->getIplImage(); IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 ); cvCvtColor(frame_temp,frame1, CV_RGB2BGR); frame1->origin=0; cvNamedWindow("cName",CV_WINDOW_AUTOSIZE); cvShowImage("cName",frame1); // cvWaitKey(0); //pramod added ends int nl= frame1->height; // number of lines int nc= frame1->width * frame1->nChannels; // total number of element per line int step= frame1->widthStep; // effective width // get the pointer to the image buffer unsigned char *data= reinterpret_cast<unsigned char*>(frame1->imageData); int ct=0;double xmean=0.0;double ymean=0.0; for (int i=1; i<nl; i++) { for (int j=0; j<nc; j+= frame1->nChannels) { // finding blue pixels --------------------- //printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]); if((int)data[j]>(int)data[j+1]&&(int)data[j]>(int)data[j+2]) { // printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]); //getchar(); ct++; xmean += (double)j/3.0; ymean += (double)i; // data[j]=data[j+1]=data[j+2]=0; data[j]=255; } // end of pixel processing ---------------- } // end of line data+= step; // next line } cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE); cvShowImage("cName1",frame1); char c = cvWaitKey(33); if(c==27) break; if (ct>0) { xmean /= (double)ct; ymean /= (double)ct; } printf("current value of ct %d \n",ct); if (ct>(frame1->width/20)*(frame1->height/20)) { printf("Best guess at blue target: %f %f \n", xmean, ymean); cvCircle(frame1,cvPoint((int)xmean,(int)ymean),5,cvScalar(0,0,255,0)); cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE); cvShowImage("cName1",frame1); char c = cvWaitKey(33); if(c==27) break; Vector& target = targetPort.prepare(); target.resize(3); target[0] = xmean; target[1] = ymean; target[2] = 1; targetPort.write(); } else { Vector& target = targetPort.prepare(); target.resize(3); target[0] = 0; target[1] = 0; target[2] = 0; targetPort.write(); } /* double xMean = 0; double yMean = 0; int ct = 0; for (int x=0; x<image->width(); x++) { for (int y=0; y<image->height(); y++) { PixelRgb& pixel = image->pixel(x,y); //definisco la variabile pixel (di tipo reference) un'istanza della classe PixelRgb. Allo stesso modo di una variabile puntatore, il tipo reference fa riferimento alla locazione di memoria di un'altra variabile (in questo caso la variabile image che punta al pixel(x,y), ma come una comune variabile, non richiede nessun operatore specifico di deindirizzamento. // very simple test for blueishness // make sure blue level exceeds red and green by a factor of 2 if (pixel.b>pixel.g){ //&& pixel.b>pixel.g){//*1.2+10) { // there's a blueish pixel at (x,y)! // let's find the average location of these pixels xMean += x; yMean += y; ct++; } } } if (ct>0) { xMean /= ct; yMean /= ct; } printf("current value of ct %d \n",ct); if (ct>(image->width()/20)*(image->height()/20)) { printf("Best guess at blue target: %g %g\n", xMean, yMean); Vector& target = targetPort.prepare(); target.resize(3); target[0] = xMean; target[1] = yMean; target[2] = 1; targetPort.write(); } else { Vector& target = targetPort.prepare(); target.resize(3); target[0] = 0; target[1] = 0; target[2] = 0; targetPort.write(); }*/ } } return 0; }
void RGBDSensorWrapper::shallowCopyImages(const ImageOf<PixelFloat>& src, ImageOf<PixelFloat>& dest) { dest.setQuantum(src.getQuantum()); dest.setExternal(src.getRawImage(), src.width(), src.height()); }
int main() { Network yarp; // set up yarp BufferedPort<ImageOf<PixelRgb> > imagePort; BufferedPort<Vector> targetPort; imagePort.open("/tutorial/image/in"); targetPort.open("/tutorial/target/out"); //Network::connect("/icubSim/cam/left","/tutorial/image/in"); Network::connect("/icub/cam/left","/tutorial/image/in"); IplImage* object_color; while (1) { // repeat forever ImageOf<PixelRgb> *image = imagePort.read(); if (image!=NULL) { printf("We got an image of size %dx%d\n", image->width(), image->height()); IplImage *frame_temp = (IplImage*)image->getIplImage(); IplImage *frames = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 ); cvCvtColor(frame_temp,frames, CV_RGB2BGR); frames->origin=0; cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE); cvShowImage("cNameselect",frames); char c = cvWaitKey(33); if(c==27) { cvReleaseImage(&frames); cvReleaseImage(&frame_temp); object_color = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 ); cvCopy(frames,object_color); break; } else { cvReleaseImage(&frame_temp); cvReleaseImage(&frames); } } } cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE); cvShowImage("cNameoriginal",object_color); while (1) { ImageOf<PixelRgb> *image = imagePort.read(); if (image!=NULL) { printf("We got an image of size %dx%d\n", image->width(), image->height()); IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 ); frame_temp = (IplImage*)image->getIplImage(); IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 ); cvCvtColor(frame_temp,frame1, CV_RGB2GRAY); //send values to the lookatlocation Vector& target = targetPort.prepare(); target.resize(3); target[0] = xmean; target[1] = ymean; target[2] = 1; targetPort.write(); } } return 0; }
static bool ImageWriteMono(ImageOf<PixelMono>& img, const char *filename) { return SavePGM((char*)img.getRawImage(), filename, img.height(), img.width(), img.getRowSize()); }
bool SCSPMClassifier::train(Bottle *locations, Bottle &reply) { if(locations==NULL) return false; string object_name=locations->get(0).asList()->get(0).asString().c_str(); if(burst) currObject=object_name.c_str(); // Save Features if(doTrain) { Bottle cmdClass; cmdClass.addString("save"); cmdClass.addString(object_name.c_str()); Bottle classReply; printf("Sending training request: %s\n",cmdClass.toString().c_str()); rpcClassifier.write(cmdClass,classReply); printf("Received reply: %s\n",classReply.toString().c_str()); } // Read Image and Locations ImageOf<PixelRgb> *image = imgInput.read(true); if(image==NULL) return false; IplImage* img= (IplImage*) image->getIplImage(); Bottle* bb=locations->get(0).asList()->get(1).asList(); int x_min=bb->get(0).asInt(); int y_min=bb->get(1).asInt(); int x_max=bb->get(2).asInt(); int y_max=bb->get(3).asInt(); if(x_min>5) x_min=x_min-5; if(y_min>5) y_min=y_min-5; if((x_max+5)<img->height) x_max=x_max+5; if((y_max+5)<img->width) y_max=y_max+5; int blobW=x_max-x_min; int blobH=y_max-y_min; //Crop Image cvSetImageROI(img,cvRect(x_min,y_min,blobW,blobH)); IplImage* croppedImg=cvCreateImage(cvGetSize(img), img->depth, img->nChannels); cvCopy(img, croppedImg); cvResetImageROI(img); /*cvShowImage("blob", croppedImg); cvShowImage("all", img); cvWaitKey(0);*/ //Send Image to SC ImageOf<PixelBgr>& outim=imgOutput.prepare(); outim.wrapIplImage(croppedImg); imgOutput.write(); cvReleaseImage(&croppedImg); //Read Coded Feature Bottle fea; featureInput.read(fea); if(fea.size()==0) return false; //Send Feature to Classifier if(!burst) { featureOutput.write(fea); yarp::os::Time::delay(0.01); } else { trainingFeature.push_back(fea); } // get negatives /*if(burst) { int offW=-3*blobW; for (int w=0; w<4; w++) { int offH=-3*blobH; for (int h=0; h<4; h++) { int x_min_bg=x_min+(offW+1.5*blobW); int y_min_bg=y_min+(offH+1.5*blobH); if((x_min_bg==x_min) && (y_min_bg==y_min)) { offH=offH+1.5*blobH; continue; } if((x_min_bg+blobW)>=img->width || (x_min_bg)<0 || (y_min_bg+blobH)>=img->height || (y_min_bg)<0) continue; //printf("W: %d H: %d Limits: %d %d %d %d Size: %d %d \n", w,h,x_min_bg+blobW,x_min_bg,y_min_bg+blobH,y_min_bg,img->height,img->width); cvSetImageROI(img,cvRect(x_min_bg,y_min_bg,blobW,blobH)); IplImage* croppedImg=cvCreateImage(cvGetSize(img), img->depth, img->nChannels); cvCopy(img, croppedImg); cvResetImageROI(img); /*cvShowImage("blob", croppedImg); cvShowImage("all", img); cvWaitKey(0); ImageOf<PixelBgr>& outim=imgOutput.prepare(); outim.wrapIplImage(croppedImg); imgOutput.write(); cvReleaseImage(&croppedImg); //Read Coded Feature Bottle fea; featureInput.read(fea); negativeFeature.push_back(fea); offH=offH+1.5*blobH; } offW=offW+1.5*blobW; } }*/ //Train Classifier if(doTrain) { Bottle cmdTr; cmdTr.addString("train"); Bottle trReply; printf("Sending training request: %s\n",cmdTr.toString().c_str()); rpcClassifier.write(cmdTr,trReply); printf("Received reply: %s\n",trReply.toString().c_str()); } reply.addString("ack"); return true; }
int main() { Network yarp; // set up yarp BufferedPort<ImageOf<PixelRgb> > imagePort; // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo. BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore imagePort.open("/tutorial/image/in"); // give the port a name targetPort.open("/tutorial/target/out"); //Network::connect("/icubSim/cam/left","/tutorial/image/in"); Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera //training part while (1) { // repeat forever ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort. if (image!=NULL) { // check we actually got something printf("We got an image of size %dx%d\n", image->width(), image->height()); //pramod added nov 15 IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 ); frame_temp = (IplImage*)image->getIplImage(); IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 ); cvCvtColor(frame_temp,frame1, CV_RGB2GRAY); cvSaveImage("object.pgm",frame1); //frame1->origin=0; // cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE); // cvShowImage("cNameoriginal",frame_temp); //cvSaveImage("save_first.jpg",image1); //cvReleaseImage(&image1); cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE); cvShowImage("cNameselect",frame1); cvReleaseImage(&frame1); char c = cvWaitKey(33); if(c==27) break; } } printf("breaked"); system("/home/gabriele/siftWin32.exe <object.pgm >tmp1.txt"); IplImage *image1 = cvLoadImage("object.pgm"); FILE* fp1=fopen("tmp1.txt","rb"); float *arr1x; float *arr1y; int *arr2;float scale,ori;int temp1, temp2; fscanf(fp1,"%d %d \n",&temp1, &temp2); //printf("%d %d \n",temp1,temp2); //getchar(); arr1x=(float*)malloc(sizeof(float)*temp1); arr1y=(float*)malloc(sizeof(float)*temp1); arr2=(int*)malloc(sizeof(int)*temp2*temp1); for(int i2=1;i2<=temp1;i2++) { //printf("temp1 %d \n",i2); fscanf(fp1,"%f %f %f %f \n", &arr1x[i2-1], &arr1y[i2-1], &scale,&ori); //printf("%f %f %f %f \n", arr1x[i2-1], arr1y[i2-1], scale,ori); //getchar(); for(int i3=1;i3<=temp2;i3++) { fscanf(fp1,"%d ", &arr2[(i2-1)*temp2+(i3-1)]); //printf("%d ", arr2[(i2-1)*temp2+(i3-1)]); }fscanf(fp1,"\n");printf("\n"); cvCircle(image1,cvPoint((int)arr1x[i2-1],(int)arr1y[i2-1]),3,cvScalar(0,255,255,255),1); //getchar(); } cvNamedWindow("cNameobject",CV_WINDOW_AUTOSIZE); cvShowImage("cNameobject",image1); printf("first one finished \n"); fclose(fp1); //training part ends while (1) { // repeat forever ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort. if (image!=NULL) { // check we actually got something printf("We got an image of size %dx%d\n", image->width(), image->height()); //pramod added nov 15 IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 ); frame_temp = (IplImage*)image->getIplImage(); IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 ); cvCvtColor(frame_temp,frame1, CV_RGB2GRAY); cvSaveImage("object_test.pgm",frame1); //frame1->origin=0; cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE); cvShowImage("cNameoriginal",frame_temp); // cvWaitKey(0); //pramod added ends system("/home/gabriele/siftWin32.exe <object_test.pgm >tmp2.txt"); IplImage *image2 = cvLoadImage("object_test.pgm"); FILE* fp1new=fopen("tmp2.txt","rb"); float *arr1xnew; float *arr1ynew; int *arr2new;float scalenew,orinew;int temp1new, temp2new; //while(fp1new!=NULL) { fscanf(fp1new,"%d %d \n",&temp1new, &temp2new); //printf("%d %d \n",temp1new,temp2new); //getchar(); arr1xnew=(float*)malloc(sizeof(float)*temp1new); arr1ynew=(float*)malloc(sizeof(float)*temp1new); arr2new=(int*)malloc(sizeof(int)*temp2new*temp1new); for(int i2new=1;i2new<=temp1new;i2new++) { fscanf(fp1new,"%f %f %f %f \n", &arr1xnew[i2new-1], &arr1ynew[i2new-1], &scalenew,&orinew); //printf("%f %f %f %f \n", arr1xnew[i2new-1], arr1ynew[i2new-1], scalenew,orinew); //getchar(); for(int i3new=1;i3new<=temp2new;i3new++) { fscanf(fp1new,"%d ", &arr2new[(i2new-1)*temp2new+(i3new-1)]); //printf("%d ", arr2new[(i2new-1)*temp2new+(i3new-1)]); }fscanf(fp1new,"\n");printf("\n"); cvCircle(image2,cvPoint((int)arr1xnew[i2new-1],(int)arr1ynew[i2new-1]),3,cvScalar(0,255,255,255),1); //getchar(); } } cvNamedWindow("cNametest",CV_WINDOW_AUTOSIZE); cvShowImage("cNametest",image2); //matching //IplImage *imagematch = cvLoadImage("blue2.pgm"); float *bestmatchx = (float*)malloc(sizeof(float)*temp1); float *bestmatchy = (float*)malloc(sizeof(float)*temp1); for (int i4=1;i4<=temp1;i4++) { float bestval = 1000000.0; for(int i5=1;i5<=temp1new;i5++) { double value=0.0; for(int i6=1;i6<=temp2new;i6++) { double temp = arr2[(i4-1)*temp2+(i6-1)]-arr2new[(i5-1)*temp2new+(i6-1)]; value+= temp*temp; } value = sqrt(value)/temp2new; if (value < bestval) { bestval=value; bestmatchx[i4-1]=arr1xnew[i5-1]; bestmatchy[i4-1]=arr1ynew[i5-1]; } } cvCircle(frame1,cvPoint((int)bestmatchx[i4-1],(int)bestmatchy[i4-1]),3,cvScalar(0,255,255,255),1); } double xmean = 0.0; double ymean = 0.0; for(int i7=1;i7<=temp1;i7++) { xmean += bestmatchx[i7-1]; ymean += bestmatchy[i7-1]; } xmean = xmean/temp1; ymean = ymean/temp1; printf("%lf %lf",xmean,ymean); //getchar(); //cvSaveImage("savematch.jpg",imagematch); //cvReleaseImage(&imagematch); cvCircle(frame1,cvPoint((int)xmean,(int)ymean),10,cvScalar(0,255,255,0)); cvNamedWindow("cNamematch",CV_WINDOW_AUTOSIZE); cvShowImage("cNamematch",frame1); char c = cvWaitKey(33); //int xmean=0;int ymean=0; Vector& target = targetPort.prepare(); target.resize(3); target[0] = xmean; target[1] = ymean; target[2] = 1; targetPort.write(); cvReleaseImage(&image2); } } cvReleaseImage(&image1); return 0; }
bool updateModule() { ImageOf<PixelBgr> *img=imgInPort.read(); if (img==NULL) return true; mutex.lock(); ImageOf<PixelMono> imgMono; imgMono.resize(img->width(),img->height()); cv::Mat imgMat=cv::cvarrToMat((IplImage*)img->getIplImage()); cv::Mat imgMonoMat=cv::cvarrToMat((IplImage*)imgMono.getIplImage()); cv::cvtColor(imgMat,imgMonoMat,CV_BGR2GRAY); if (initBoundingBox) { tracker->initialise(imgMonoMat,tl,br); initBoundingBox=false; } if (doCMT) { if (tracker->processFrame(imgMonoMat)) { if (dataOutPort.getOutputCount()>0) { Bottle &data=dataOutPort.prepare(); data.clear(); data.addInt((int)tracker->topLeft.x); data.addInt((int)tracker->topLeft.y); data.addInt((int)tracker->topRight.x); data.addInt((int)tracker->topRight.y); data.addInt((int)tracker->bottomRight.x); data.addInt((int)tracker->bottomRight.y); data.addInt((int)tracker->bottomLeft.x); data.addInt((int)tracker->bottomLeft.y); dataOutPort.write(); } if (imgOutPort.getOutputCount()>0) { cv::line(imgMat,tracker->topLeft,tracker->topRight,cv::Scalar(255,0,0),2); cv::line(imgMat,tracker->topRight,tracker->bottomRight,cv::Scalar(255,0,0),2); cv::line(imgMat,tracker->bottomRight,tracker->bottomLeft,cv::Scalar(255,0,0),2); cv::line(imgMat,tracker->bottomLeft,tracker->topLeft,cv::Scalar(255,0,0),2); for (size_t i=0; i<tracker->trackedKeypoints.size(); i++) cv::circle(imgMat,tracker->trackedKeypoints[i].first.pt,3,cv::Scalar(0,255,0)); } } } if (imgOutPort.getOutputCount()>0) { imgOutPort.prepare()=*img; imgOutPort.write(); } mutex.unlock(); return true; }
void CamCalibPort::onRead(ImageOf<PixelRgb> &yrpImgIn) { double t=Time::now(); yarp::os::Stamp s; this->getEnvelope(s); double time = s.getTime(); if (!updatePose(time)) { return; } unsigned char *pixel = yrpImgIn.getPixelAddress(0, 0); double *stamp = reinterpret_cast<double*>(pixel); double backdoorTime = stamp[0]; double backdoorRoll = stamp[1]; double backdoorPitch = stamp[2]; double backdoorYaw = stamp[3]; if (time != backdoorTime) { yWarning() << "Backdoor time:" << backdoorTime << "Imu time:" << time << "diff:" << (backdoorTime - time); } Bottle& b = rpyPort.prepare(); b.clear(); b.addDouble(roll); b.addDouble(pitch); b.addDouble(yaw); b.addDouble(backdoorRoll); b.addDouble(backdoorPitch); b.addDouble(backdoorYaw); b.addDouble(backdoorRoll - roll); b.addDouble(backdoorPitch - pitch); b.addDouble(backdoorYaw - yaw); rpyPort.write(); // execute calibration if (portImgOut!=NULL) { yarp::sig::ImageOf<PixelRgb> &yrpImgOut=portImgOut->prepare(); if (verbose) { yDebug("received input image after %g [s] ... ",t-t0); } double t1=Time::now(); if (calibTool!=NULL) { calibTool->apply(yrpImgIn,yrpImgOut); for (int r =0; r <yrpImgOut.height(); r++) { for (int c=0; c<yrpImgOut.width(); c++) { unsigned char *pixel = yrpImgOut.getPixelAddress(c,r); double mean = (1.0/3.0)*(pixel[0]+pixel[1]+pixel[2]); for(int i=0; i<3; i++) { double s=pixel[i]-mean; double sn=currSat*s; sn+=mean; if(sn<0.0) sn=0.0; else if(sn>255.0) sn=255.0; pixel[i]=(unsigned char)sn; } } } if (verbose) yDebug("calibrated in %g [s]\n",Time::now()-t1); } else { yrpImgOut=yrpImgIn; if (verbose) yDebug("just copied in %g [s]\n",Time::now()-t1); } m.lock(); //timestamp propagation //yarp::os::Stamp stamp; //BufferedPort<ImageOf<PixelRgb> >::getEnvelope(stamp); //portImgOut->setEnvelope(stamp); Bottle pose; pose.addDouble(roll); pose.addDouble(pitch); pose.addDouble(yaw); portImgOut->setEnvelope(pose); portImgOut->writeStrict(); m.unlock(); } t0=t; }
void ReadKernel( ImageOf<float> &kernelImg, const Parameters ¶ms) { printf("Reading %s ... ", params.pImKernelFile.c_str()); if(params.pImKernelFile.find(".txt") != string::npos) { // kernel file is txt float val; ifstream inFile; inFile.open(params.pImKernelFile.c_str()); if(!inFile) { printf("Error: Unable to open file %s\n", params.pImKernelFile.c_str()); exit(0); } int lineCount = 0; int pCount = 0; int height = 0; int width = 0; while(inFile >> val) { if(lineCount == 0) { height = (int)val; } else if(lineCount == 1) { width = (int)val; kernelImg.ReAllocate(height, width, 1); } else { int y = pCount % height; int x = (int)floor((float)pCount / (float)height); kernelImg.Pixel(y,x) = val; pCount++; if(pCount >= height*width) { break; } } lineCount++; } inFile.close(); if(width <= 10) { printf("done:\n"); for(int y = 0; y < height; y++) { printf("\t"); for(int x = 0; x < width; x++) { printf("%f ", kernelImg.Pixel(y,x)); } printf("\n"); } } else { printf("done\n"); } }
void TRACKERManager::onRead(ImageOf<yarp::sig::PixelRgb> &img) { ImageOf<PixelRgb> &outImg = imageOutPort.prepare(); outImg = img; orig = (IplImage*)img.getIplImage(); mutex.wait(); std::map< unsigned int, ParticleThread* >::iterator itr; for (itr = workerThreads.begin(); itr!=workerThreads.end(); itr++) { workerThreads[itr->first]->update(orig); } mutex.post(); if (flag) { if (!deleted) { yDebug() << "[TRACKERManager::onRead] will now delete TRACKER"; stopTracker(toDel[0]); toDel.clear(); deleted = true; yDebug() << "[TRACKERManager::onRead] will now delete TRACKER"; } } Bottle &b = targetOutPort.prepare(); b.clear(); container.lock(); for (TargetObjectRecord::iterator itr = container.targets.begin(); itr!=container.targets.end(); itr++) { TargetObject *obj = *itr; //send template and segmentation only once if (!obj->hasSent) { ImageOf<PixelBgr> &segImg = imageSegOutPort.prepare(); segImg.resize( obj->seg->width, obj->seg->height ); cvCopyImage(obj->seg, (IplImage*)segImg.getIplImage()); imageSegOutPort.write(); ImageOf<PixelBgr> &segCrop = imageTplOutPort.prepare(); segCrop.resize(obj->tpl->width, obj->tpl->height); cvCopyImage(obj->tpl, (IplImage*)segCrop.getIplImage()); imageTplOutPort.write(); obj->hasSent = true; } if (obj->boundingBox.size()) { bool send = true; for (int i = 0; i < pausedThreads.size(); i++ ) if (obj->owner == pausedThreads[i]) send = false; if (send) { Bottle &t = b.addList(); t.addDouble(obj->owner); for (int i=0; i<obj->boundingBox.size(); i++) t.addDouble(obj->boundingBox[i]); t.addInt(obj->group); int x0 = obj->boundingBox[2]; int y0 = obj->boundingBox[3]; int x1 = obj->boundingBox[4]; int y1 = obj->boundingBox[5]; int cx = obj->boundingBox[0]; int cy = obj->boundingBox[1]; if (disParticles) { for (int j = 0; j < obj->particlePoints.size(); j++ ) cvCircle ((IplImage*)outImg.getIplImage(), obj->particlePoints[j], 3, obj->colour, 1); } cvRectangle( (IplImage*)outImg.getIplImage(), cvPoint( x0, y0 ), cvPoint( x1, y1 ), obj->colour, 1, 8, 0 ); cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( cx, cy ), 3, CV_RGB(255, 0 , 0), 1 ); cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( x0, y0 ), 3, CV_RGB(0, 255 , 0), 1 ); cvCircle ( (IplImage*)outImg.getIplImage(), cvPoint( x1, y1 ), 3, CV_RGB(0, 255 , 0), 1 ); int sampleCount = obj->particlePoints.size(); int dimensions = 2; int clusterCount = 2; cv::Mat points(sampleCount, dimensions, CV_32F); cv::Mat labels; cv::Mat centers(clusterCount, dimensions, points.type()); for(int i = 0; i<sampleCount;i++) { points.at<float>(i,0) = obj->particlePoints[i].x; points.at<float>(i,1) = obj->particlePoints[i].y; } #ifdef KMEANS_WITH_POINTER cv::kmeans(points, clusterCount, labels, cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 1, 10.0), 3, cv::KMEANS_PP_CENTERS, ¢ers); #else cv::kmeans(points, clusterCount, labels, cv::TermCriteria( cv::TermCriteria::EPS+cv::TermCriteria::COUNT, 1, 10.0), 3, cv::KMEANS_PP_CENTERS, centers); #endif cv::Point pts[3]; for (int i = 0; i < clusterCount; i++) { cv::Point ipt; ipt.x = (int) centers.at<float>(i,0); ipt.y = (int) centers.at<float>(i,1); pts[i] = ipt; //cvCircle ((IplImage*)outImg.getIplImage(), ipt, 5, CV_RGB(0, 0 , 255), CV_FILLED, CV_AA);//obj->colour, CV_FILLED, CV_AA); } int limits_x = abs(pts[0].x - pts[1].x ); int limits_y = abs(pts[0].y - pts[1].y ); int mutationThres = 30; if ( limits_x > mutationThres || limits_y > mutationThres) { if (!flag) { cvCircle ((IplImage*)outImg.getIplImage(), pts[0], 5, CV_RGB(255, 0 , 255), CV_FILLED, CV_AA); cvCircle ((IplImage*)outImg.getIplImage(), pts[1], 5, CV_RGB(255, 0 , 255), CV_FILLED, CV_AA); //cvSaveImage("output.png", (IplImage*)outImg.getIplImage()); //cloneTracker(obj, pts); } } } } } container.unlock(); if (b.size()) targetOutPort.write(); imageOutPort.write(); }
int main(int argc, char *argv[]) { printf("Show a circle for 3 seconds...\n"); ImageOf<PixelRgb> yarpImage; printf("Creating a YARP image of a nice circle\n"); yarpImage.resize(300,200); addCircle(yarpImage,PixelRgb(255,0,0), yarpImage.width()/2,yarpImage.height()/2, yarpImage.height()/4); addCircle(yarpImage,PixelRgb(255,50,50), yarpImage.width()/2,yarpImage.height()/2, yarpImage.height()/5); printf("Copying YARP image to an OpenCV/IPL image\n"); IplImage *cvImage = cvCreateImage(cvSize(yarpImage.width(), yarpImage.height()), IPL_DEPTH_8U, 3 ); cvCvtColor((IplImage*)yarpImage.getIplImage(), cvImage, CV_RGB2BGR); printf("Showing OpenCV/IPL image\n"); cvNamedWindow("test",1); cvShowImage("test",cvImage); printf("Taking image back into YARP...\n"); ImageOf<PixelBgr> yarpReturnImage; yarpReturnImage.wrapIplImage(cvImage); yarp::sig::file::write(yarpReturnImage,"test.ppm"); printf("Saving YARP image to test.ppm\n"); cvWaitKey(3000); cvDestroyWindow("test"); cvReleaseImage(&cvImage); printf("...done\n"); return 0; }
int main() { Network yarp; // set up yarp BufferedPort<ImageOf<PixelRgb> > imagePort; // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo. BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore imagePort.open("/tutorial/image/in"); // give the port a name targetPort.open("/tutorial/target/out"); //Network::connect("/icubSim/cam/left","/tutorial/image/in"); Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera int cnt=0;bool temp; while (1) { // repeat forever ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort. if (image!=NULL) { // check we actually got something printf("We got an image of size %dx%d\n", image->width(), image->height()); IplImage *frame_temp = (IplImage*)image->getIplImage(); IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 ); cvCvtColor(frame_temp,frame1, CV_RGB2BGR); frame1->origin=0; cvNamedWindow("cName",CV_WINDOW_AUTOSIZE); cvShowImage("cName",frame1); cvWaitKey(0); double xMean = 0; double yMean = 0; int ct = 0; for (int x=0; x<frame1->width; x++) { for (int y=0; y<frame1->height; y++) { PixelRgb& pixel; for (int z=0;z<3;z++){ pixel->z= (unsigned char) (frame1->imageData + frame1->widthStep * y + frame1->nChannels*x + z);} //PixelRgb& pixel = image->pixel(x,y); //definisco la variabile pixel (di tipo reference) un'istanza della classe PixelRgb. Allo stesso modo di una variabile puntatore, il tipo reference fa riferimento alla locazione di memoria di un'altra variabile (in questo caso la variabile image che punta al pixel(x,y), ma come una comune variabile, non richiede nessun operatore specifico di deindirizzamento. // very simple test for blueishness // make sure blue level exceeds red and green by a factor of 2 if (pixel.b>pixel.g){ //&& pixel.b>pixel.g){//*1.2+10) { // there's a blueish pixel at (x,y)! // let's find the average location of these pixels xMean += x; yMean += y; ct++; } } } if (ct>0) { xMean /= ct; yMean /= ct; } printf("current value of ct %d \n",ct); if (ct>(frame1->width/20)*(frame1->height/20)) { printf("Best guess at blue target: %d %d \n", cnt, cnt); //%g %g\n",xMean, yMean); Vector& target = targetPort.prepare(); target.resize(3); if(cnt>25) temp =false; else if(cnt<0) temp =true; if (temp ==true) cnt++; else if (temp==false) cnt--; // { cnt =100;} // target[0] = xMean; // target[1] = yMean; target[0] = cnt; target[1] = cnt; target[2] = 1; targetPort.write(); } else { Vector& target = targetPort.prepare(); target.resize(3); target[0] = 0; target[1] = 0; target[2] = 0; targetPort.write(); } } } return 0; }
bool faceTrackerModule::updateModule() { unsigned long AAtime=0, BBtime=0; //check processing time AAtime = cv::getTickCount(); //check processing time ImageOf<PixelRgb> *yarpImageLeft = imagePortLeft.read(); //printf("Copying YARP image to an OpenCV/IPL image\n"); if ( cvIplImageLeft == NULL ) { cvIplImageLeft = cvCreateImage(cvSize(yarpImageLeft->width(),yarpImageLeft->height()), IPL_DEPTH_8U,3); } cvCvtColor((IplImage*)yarpImageLeft->getIplImage(), cvIplImageLeft, CV_RGB2BGR); cv::Mat cvMatImageLeft(cvIplImageLeft); //vector< cv::Mat > vImg; //cv::Mat rImg; //vImg.push_back(cvMatImageLeft); if(yarpImageLeft!=NULL) // check we actually got something { // resize images cv::resize(cvMatImageLeft, cvMatImageLeft, cv::Size(320, 240), 0,0,CV_INTER_NN); //downsample 1/2x // convert captured frame to gray scale & equalize cv::Mat cvMatGrayImageLeft; cv::cvtColor(cvMatImageLeft, cvMatGrayImageLeft, CV_BGR2GRAY); cv::equalizeHist(cvMatGrayImageLeft, cvMatGrayImageLeft); // ================================================================== // face detection routine // a vector array to store the face found std::vector<cv::Rect> faces_left;; face_classifier_left.detectMultiScale(cvMatGrayImageLeft, faces_left, 1.1, // increase search scale by 10% each pass 3, // merge groups of three detections CV_HAAR_DO_CANNY_PRUNING, //CV_HAAR_FIND_BIGGEST_OBJECT, //|CV_HAAR_SCALE_IMAGE, CV_HAAR_DO_CANNY_PRUNING cv::Size(30,30), cv::Size(50,50)); int biggest_face_left_idx = 0; int biggest_face_left_size_buf = 0; // choosing the biggest face for(unsigned int i=0; i<faces_left.size(); i++) { cv::Point flb(faces_left[i].x + faces_left[i].width, faces_left[i].y + faces_left[i].height); cv::Point ftr(faces_left[i].x, faces_left[i].y); cv::rectangle(cvMatImageLeft, flb, ftr, cv::Scalar(0,255,0), 3,4,0); if(biggest_face_left_size_buf < faces_left[i].height) { biggest_face_left_size_buf = faces_left[i].height; biggest_face_left_idx = i; } } //====================================================================================== // Mode prev_encoders = cur_encoders; enc->getEncoders(cur_encoders.data()); // cout << "Encoder: " << cur_encoders[0] << " " << cur_encoders[1] << " "<< cur_encoders[2] << '\n'; /////////////////////////// // To set position mode if (mode == 0) { //------------------------------------------------------------- // Going to the set position mode if (setpos_counter < 100) { //cout << setpos_counter << endl; setpoints[0] = (0-cur_encoders[0])*0.3; // common tilt of head setpoints[1] = (0-cur_encoders[1])*0.3; // common roll of head setpoints[2] = (0-cur_encoders[2])*0.3; // common pan of head setpoints[3] = (0-cur_encoders[3])*0.3; // common tilt of eyes setpoints[4] = (0-cur_encoders[4])*0.3; // common pan of eyes setpos_counter++; } else { printf("Going to the set position is DONE!\n"); setpoints[0] = 0; setpoints[2] = 0; setpoints[3] = 0; setpoints[4] = 0; mode = 1; printf("Face searching mode!\n"); setpos_counter = 0; } vel->velocityMove(setpoints.data()); } /////////////////////////// // Panning mode else if (mode == 1) { if(faces_left.size() > 0) { mode = 2; printf("I find a face!\n"); panning_counter++; } else { //------------------------------------------------------------- // panning mode //printf("Face searching mode!\n"); //cout << panning_target << endl; setpoints[0] = (tilt_target-cur_encoders[0])*0.3; // common tilt of head setpoints[1] = (0-cur_encoders[1])*0.3; // common roll of head setpoints[2] = (pan_target-cur_encoders[2])*0.3; // common pan of head setpoints[3] = (0-cur_encoders[3])*0.3; // common tilt of eyes setpoints[4] = (0-cur_encoders[4])*0.3; // common pan of eyes if ((abs(tilt_target - cur_encoders[0]) < 1) && (abs(pan_target - cur_encoders[2]) < 1)) { pan_r = ((double)rand() / ((double)(RAND_MAX)+(double)(1))); tilt_r = ((double)rand() / ((double)(RAND_MAX)+(double)(1))); pan_target = (int)((pan_r*pan_max)-(pan_max/2)); tilt_target = (int)((tilt_r*tilt_max)-(tilt_max/2)); //cout << pan_target << ", " << tilt_target << endl; } } vel->velocityMove(setpoints.data()); } else if (mode == 2) { //------------------------------------------------------------- // face tracking mode if(faces_left.size() > 0) { double x = 320-(faces_left[biggest_face_left_idx].x + faces_left[biggest_face_left_idx].width/2); double y = 240-(faces_left[biggest_face_left_idx].y + faces_left[biggest_face_left_idx].height/2); //cout << "x:" << x << " y:" << y << '\n'; x -= 320/2; y -= 240/2; double vx = x*0.3; // Not to move too fast double vy = y*0.3; /* prepare command */ for(int i=0;i<jnts;i++) { setpoints[i] = 0; } setpoints[0] = vy; // common tilt of head setpoints[2] = vx; // common pan of head setpoints[3] = vy; // common tilt of eyes setpoints[4] = -vx; // common pan of eyes x_buf = x; y_buf = y; counter = 0; // stopping smoothly } else if (faces_left.size() == 0 && counter < 10) { //cout << counter << endl; double vx = x_buf*0.3; // Not to move too fast double vy = y_buf*0.3; /* prepare command */ for(int i=0;i<jnts;i++) { setpoints[i] = 0; } setpoints[0] = vy; // common tilt of head setpoints[2] = vx; // common pan of head setpoints[3] = vy; // common tilt of eyes setpoints[4] = -vx; // common pan of eyes counter++; } else { printf("Hey! I don't see any face.\n"); setpoints[0] = 0; setpoints[2] = 0; setpoints[3] = 0; setpoints[4] = 0; Time::delay(0.3); stuck_counter++; mode = 2; if(stuck_counter == 10) { if(panning_counter > 5) { mode = 0; printf("To a set position!\n"); stuck_counter = 0; } else { mode = 1; printf("Face searching mode!\n"); stuck_counter = 0; } } } vel->velocityMove(setpoints.data()); } cv::imshow("cvImage_Left", cvMatImageLeft); } BBtime = cv::getTickCount(); //check processing time cv::waitKey(1); return true; }
bool GBSegmModule::updateModule() { ImageOf<PixelRgb> *yrpImgIn; static int cycles = 0; yrpImgIn = _imgPort.read(); if (yrpImgIn == NULL) // this is the case if module is requested to quit while waiting for image return true; bool use_private_stamp; Stamp s; if(!_imgPort.getEnvelope(s)) { cout << "No stamp found in input image. Will use private stamp" << endl; use_private_stamp = true; } else { cout << "Received image #" << s.getCount() << " generated at time " << s.getTime() << endl; use_private_stamp = false; } if(cycles == 0) _timestart = yarp::os::Time::now(); cycles++; //IplImage *iplimg = (IplImage*)yrpImgIn->getIplImage(); cout << "converting image of size " << yrpImgIn->width() << yrpImgIn->height() <<" to size" << input->width() << input->height() << endl; YarpImageToRGBImage(input, yrpImgIn); cout << "converted" << endl; segMutex.wait(); if(seg) delete seg; seg=segment_image(input, sigma, k, min_size, &num_components); segMutex.post(); cout << "processed" << endl; //prepare timestamps if(use_private_stamp) { _stamp.update(); _viewPort.setEnvelope(_stamp); } else { _viewPort.setEnvelope(s); } ImageOf<PixelRgb> &yrpImgView = _viewPort.prepare(); //Rescale image if required yrpImgView.resize(seg->width(), seg->height()); RGBImageToYarpImage(seg, &yrpImgView); _viewPort.write(); //report the frame rate if(cycles % 100 == 0) { double cps = ((double)cycles)/(yarp::os::Time::now() - _timestart); printf("fps: %02.2f\n", cps); } return true; }
int main(char** argv, int argc) { Network network; BufferedPort<ImageOf<PixelRgb> >* imageInputPort = new BufferedPort<ImageOf<PixelRgb> >(); BufferedPort<ImageOf<PixelRgb> >* imageOutputPort = new BufferedPort<ImageOf<PixelRgb> >(); BufferedPort<Bottle>* bbOutputPort = new BufferedPort<Bottle>(); yarp::os::BufferedPort<yarp::os::Bottle > dataPortMec; imageInputPort->open("/img:i"); imageOutputPort->open("/img:o"); //bbOutputPort->open("/bb:o"); editted VM network.connect("/icub/camcalib/left/out", "/img:i"); dataPortMec.open("/Shapedata:o"); //string para_yml_file = "data/para_cmp8toys.yml"; //modified VM string para_yml_file = "data_blocks/para_blocks.yml"; ///////////////////////////////////////////////// // STEP1: initiate ///////////////////////////////////////////////// bool flag; CTLdetector detector; flag = detector.initiate(para_yml_file); if (!flag) return 0; ///////////////////////////////////////////////// // STEP2: train ///////////////////////////////////////////////// flag = detector.train(); if (!flag) return 0; ///////////////////////////////////////////////// // STEP3: detect ///////////////////////////////////////////////// int key = 0; cv::Mat cvMatImage; std::cout<<"***Detecting..."<<std::endl; while(key != 27) { ImageOf<PixelRgb> *img = imageInputPort->read(true); cvMatImage.create(img->height(), img->width(), CV_8UC3); unsigned char* ptr = img->getRawImage(); memcpy(cvMatImage.data, ptr, cvMatImage.cols * cvMatImage.rows * 3); cv::cvtColor(cvMatImage, cvMatImage, CV_RGB2BGR); detector.detect(cvMatImage); //detector.showDetObjs(cvMatImage,Scalar(0,255,0),Scalar(255,255,255),480); //chaged 3.2.2013 Rea changed to orinal size detector.showDetObjs(cvMatImage,Scalar(0,255,0),Scalar(255,255,255),0); //detector.dispDetObjs(); std::vector<DetObj> objects; objects = detector.getDetObjs(); //sending out through image port the image out /* if(imageOutputPort->getOutputCount()) { cv::Mat ppIm = detector.getPostProcessIm(); //cv::cvtColor(ppIm, ppIm, CV_BGR2RGB); // printf("image %d %d \n", cvMatImage.cols, cvMatImage.rows); ImageOf<PixelRgb>& tmpImage = imageOutputPort->prepare(); tmpImage.resize(img->width(),img->height()); tmpImage.zero(); unsigned char* ptrd = tmpImage.getRawImage(); unsigned char* ptrs = ppIm.data; int padding = tmpImage.getPadding(); for (int row = 0; row < img->height(); row++) { for(int col = 0; col < img->width(); col++) { *ptrd = 255; ptrd++; ptrs++; *ptrd = 255; ptrd++; ptrs++; *ptrd = 255; ptrd++; ptrs++; //ptrs++; } ptrd += padding; } //memcpy(ptrd,ptrs, cvMatImage.cols * cvMatImage.rows * 3 ); imageOutputPort->write(); } */ Bottle& ShapOp = dataPortMec.prepare(); ShapOp.clear(); //Bottle output = bbOutputPort->prepare(); for (int i = 0; i < objects.size(); i++){ ShapOp.addInt(objects[i].box_tight.x); ShapOp.addInt(objects[i].box_tight.y); ShapOp.addInt(objects[i].box_tight.width); ShapOp.addInt(objects[i].box_tight.height); ShapOp.addInt(objects[i].id_label); //ShapOp.addInt(10); //if want to know the object name: detector.all_obj_cls[objects[i].id_label] } // free the memory of the objects. for (int i=0;i<objects.size();i++){ objects[i].mat_edge_NN_tr.release(); } objects.clear(); //bbOutputPort->write(); dataPortMec.write(); key = cv::waitKey(100); } cvMatImage.release(); std::cout<<"***Done."<<std::endl; return 0; }
bool OpenCVGrabber::getImage(ImageOf<PixelRgb> & image) { //yTrace("-->getImage123"); // Must have a capture object if (0 == m_capture) { image.zero(); return false; } //yTrace("-->HERE1"); // Grab and retrieve a frame, OpenCV owns the returned image IplImage * iplFrame = cvQueryFrame((CvCapture*)m_capture); //yTrace("-->HERE2"); if (0 == iplFrame && m_loop) { bool ok = open(m_config); if (!ok) return false; iplFrame = cvQueryFrame((CvCapture*)m_capture); } if (0 == iplFrame) { image.zero(); return false; } if (m_transpose == false && m_flip_x == false && m_flip_y == false) { return sendImage(iplFrame, image); } IplImage * iplFrame_out; if (m_transpose) { iplFrame_out = cvCreateImage(cvSize(iplFrame->height, iplFrame->width), iplFrame->depth, iplFrame->nChannels); cvTranspose(iplFrame, iplFrame_out); } else { iplFrame_out = cvCreateImage(cvSize(iplFrame->width, iplFrame->height), iplFrame->depth, iplFrame->nChannels); cvCopy(iplFrame, iplFrame_out); } if (m_flip_x && m_flip_y) { cvFlip(iplFrame_out, 0, -1); } else { if (m_flip_x) { cvFlip(iplFrame_out, 0, 0); } else if (m_flip_y) { cvFlip(iplFrame_out, 0, 1); } } bool ret = sendImage(iplFrame_out, image); cvReleaseImage(&iplFrame_out); return ret; }
bool file::read(ImageOf<PixelFloat>& dest, const ConstString& src) { int hh = 0, ww = 0; FILE *fp = fopen(src.c_str(), "r"); if (fp==NULL) { return false; } int blank = 1; int curr = 0; while (!feof(fp)) { int ch = fgetc(fp); if (ch==' ' || ch == '\t' || ch == '\r' ||ch == '\n'|| feof(fp)){ if (!blank) { if (curr==0) { hh++; } curr++; if (curr>ww) { ww = curr; } } blank = 1; if (ch=='\n') { curr = 0; } } else { blank = 0; } } fclose(fp); fp = fopen(src.c_str(), "rb"); if (fp==NULL) { return false; } dest.resize(ww,hh); hh = 0; ww = 0; { char buf[256]; int idx = 0; int blank = 1; int curr = 0; while (!feof(fp)) { int ch = fgetc(fp); if (ch==' ' || ch == '\t' ||ch == '\r'||ch == '\n' || feof(fp)){ if (!blank) { if (curr==0) { hh++; } curr++; if (curr>ww) { ww = curr; } buf[idx] = '\0'; dest(curr-1,hh-1) = float(atof(buf)); idx = 0; } blank = 1; if (ch=='\n') { curr = 0; } } else { buf[idx] = ch; idx++; yAssert(((unsigned int)idx)<sizeof(buf)); blank = 0; } } } fclose(fp); return true; }
void DisparityTool::makeHistogram(ImageOf<PixelMono>& hImg) { int i,j; int height = hImg.height(); int width = hImg.width(); unsigned char bgColor = 0; unsigned char hColor = 190; unsigned char lineColor = 255; unsigned char * hist = new unsigned char [height*width*hImg.getPixelSize()]; img2unpaddedVect(hist, hImg); int offset = (width - _shiftLevels + 1)/2; for (j = 0; j < height*width; j++) hist[j] = bgColor; //design the area for (i = 0; i < _shiftLevels-1; i++) { if ((i+offset >=_shiftMin)&&(i+offset< _shiftMax)) { for (j = height-(int)(height*_corrFunct[i]); j < height; j++) hist[(j*width+i+offset)] = hColor; } } //design the maxes for (i = 0; i < 3; i++) { if ((_maxShifts[i].index+offset >=_shiftMin)&&(_maxShifts[i].index+offset<_shiftMax)) { for (j = height-(int)(height*_corrFunct[_maxShifts[i].index]); j < height; j++) hist[(j*width+_maxShifts[i].index+offset)] = lineColor; } } //Drawing Zero Reference for (j = 0; j < height; j += height/9) { for (int k = 0; k < height/18; k++) { hist[((j+k)*width + _shiftLevels/2 + offset)] = lineColor; } } //Partially inverted color for (int y = 0; y < height; y++) { for (int x1 = 0; x1 < _shiftMin; x1++) hist[(y*width+x1+offset)] = hist[(y*width+x1+offset)]+100; for (int x2 = width - 1; x2 > _shiftMax; x2--) hist[(y*width+x2+offset)] = hist[(y*width+x2+offset)]+100; } //Drawing Limits (inverted colors sides) /* for (int y = 0; y < height; y++) { for (int x1 = 0; x1 < _shiftMin; x1++) hist[(y*width+x1+offset)] = -hist[(y*width+x1+offset)]+255; for (int x2 = width - 1; x2 > _shiftMax; x2--) hist[(y*width+x2+offset)] = -hist[(y*width+x2+offset)]+255; }/***/ unpaddedVect2img(hist, hImg); delete [] hist; }
void SCSPMClassifier::classify(Bottle *blobs, Bottle &reply) { if(blobs==NULL) { reply.addList(); return; } if(blobs->size()==0) { reply.addList(); return; } if(imgInput.getInputCount()==0 || imgSIFTInput.getInputCount()==0 || featureInput.getInputCount()==0 || scoresInput.getInputCount()==0 || rpcClassifier.getOutputCount()==0) { reply.addList(); return; } //Read Object Classes Bottle cmdObjClass; cmdObjClass.addString("objList"); Bottle objList; printf("Sending training request: %s\n",cmdObjClass.toString().c_str()); rpcClassifier.write(cmdObjClass,objList); printf("Received reply: %s\n",objList.toString().c_str()); if(objList.size()<=1) { for(int b=0; b<blobs->size(); b++) { Bottle &blob_scorelist=reply.addList(); // name of the blob blob_scorelist.addString(blobs->get(b).asList()->get(0).asString().c_str()); blob_scorelist.addList(); } return; } // Start Recognition mode Bottle cmdClass; cmdClass.addString("recognize"); Bottle classReply; //printf("Sending training request: %s\n",cmdClass.toString().c_str()); rpcClassifier.write(cmdClass,classReply); //printf("Received reply: %s\n",classReply.toString().c_str()); // Read Image //printf("Reading Image: \n"); ImageOf<PixelRgb> *image = imgInput.read(true); //printf("Image Read \n"); if(image==NULL) return; IplImage* imgC= (IplImage*) image->getIplImage(); //printf("Valid Image \n"); double t2=Time::now(); // Classify each blob printf("Start Classification \n"); for(int b=0; b<blobs->size(); b++) { // list of the scores Bottle &blob_scorelist=reply.addList(); // name of the blob blob_scorelist.addString(blobs->get(b).asList()->get(0).asString().c_str()); //list of scores Bottle &scores=blob_scorelist.addList(); //retrieve bounding box Bottle* bb=blobs->get(b).asList()->get(1).asList(); int x_min=(int) bb->get(0).asDouble(); int y_min=(int) bb->get(1).asDouble(); int x_max=(int) bb->get(2).asDouble(); int y_max=(int) bb->get(3).asDouble(); if(x_min>5) x_min=x_min-5; if(y_min>5) y_min=y_min-5; if((x_max+5)<imgC->height) x_max=x_max+5; if((y_max+5)<imgC->width) y_max=y_max+5; //Crop Image cvSetImageROI(imgC,cvRect(x_min,y_min,x_max-x_min,y_max-y_min)); IplImage* croppedImg=cvCreateImage(cvGetSize(imgC), imgC->depth , imgC->nChannels); cvCopy(imgC, croppedImg); cvResetImageROI(imgC); double t=Time::now(); //Send Image to SC //printf("Sending Image to SC: \n"); ImageOf<PixelBgr> *image = imgSIFTInput.read(false); while(image!=NULL) image = imgSIFTInput.read(false); ImageOf<PixelBgr>& outim=imgOutput.prepare(); outim.wrapIplImage(croppedImg); imgOutput.write(); //Read Coded Feature //printf("Reading Feature: \n"); Bottle fea; featureInput.read(fea); image = imgSIFTInput.read(true); if(image!=NULL) { IplImage * imgBlob= (IplImage*) image->getIplImage(); cvSetImageROI(imgC,cvRect(x_min,y_min,x_max-x_min,y_max-y_min)); cvCopy(imgBlob,imgC); } cvResetImageROI(imgC); cvReleaseImage(&croppedImg); t=Time::now()-t; //fprintf(stdout, "Coding Time: %g \n", t); if(fea.size()==0) return; //Send Feature to Classifier //printf("Sending Feature to Classifier: \n"); featureOutput.write(fea); // Read scores //printf("Reading Scores: \n"); Bottle Class_scores; scoresInput.read(Class_scores); printf("Scores received: "); if(Class_scores.size()==0) return; // Fill the list of the b-th blob for (int i=0; i<objList.size()-1; i++) { Bottle *obj=Class_scores.get(i).asList(); if(obj->get(0).asString()=="background") continue; Bottle &currObj_score=scores.addList(); currObj_score.addString(obj->get(0).asString()); double normalizedVal=((obj->get(1).asDouble())+1)/2; currObj_score.addDouble(normalizedVal); printf("%s %f ",obj->get(0).asString().c_str(),normalizedVal); } printf("\n"); } if(imgSIFTOutput.getOutputCount()>0) { ImageOf<PixelRgb>& outim=imgSIFTOutput.prepare(); outim.wrapIplImage(imgC); imgSIFTOutput.write(); } t2=Time::now()-t2; //printf("%s \n",reply.toString().c_str()); //fprintf(stdout, "All Time: %g \n", t2); }
int main () { Network yarp; BufferedPort<ImageOf<PixelRgb> > imagePort; BufferedPort<Vector> target; imagePort.open ("/target/image/in"); target.open ("/target/out"); Network::connect ("/icubSim/cam/left", "/target/image/in"); // gets the position of an object from left eye of iCub, and sends this // to the port in "search.cpp" while (true) { ImageOf<PixelRgb> *image = imagePort.read (); if (image != NULL) { double xMean = 0; double yMean = 0; int ct = 0; for (int x = 0; x < image->width (); x++) { for (int y = 0; y < image->height (); y++) { PixelRgb& pixel = image->pixel (x, y); // selects the reddish object according to the threshold value if (pixel.r > pixel.b * 1.2 + 20 && pixel.r > pixel.g * 1.2 + 20) { xMean += x; yMean += y; ct++; } } } if (ct > 0) { xMean /= ct; yMean /= ct; } // to know whether an object is big enough to send its positions to the port if (ct > (image->width () / 35) * (image->height () / 35)) { printf ("Best guess at red target: %g %g\n", xMean, yMean); Vector& tar = target.prepare (); tar.resize (3); tar[0] = xMean; tar[1] = yMean; tar[2] = 1; target.write (); } else { Vector& tar = target.prepare (); tar.resize (3); tar[0] = 0; tar[1] = 0; tar[2] = 0; target.write (); } } else { cout << "image is not detected!" << endl; } } return (0); }
int main(int argc, char** argv) { std::string imageInPortName; std::string stereoInPortName; std::string magnoInPortName; std::string saliencyOutPortName; std::string gazeOutPortName; std::string facesInPortName; Network yarp; if(argc < 6) { std::cout << "Not enough arguments. Must provide port name to the input and output ports" << endl; std::cout << "Exiting ..." << endl; return -1; } else { imageInPortName = argv[1]; stereoInPortName = argv[2]; magnoInPortName = argv[3]; facesInPortName = argv[4]; saliencyOutPortName = argv[5]; gazeOutPortName = argv[6]; } int numGPU = cuda::getCudaEnabledDeviceCount(); if (numGPU == 0) { std::cout << "No GPU found or library compiled without GPU support" << endl; std::cout << "Exiting ..." << endl; return 0; } for (int i = 0; i < numGPU; i++) { cuda::setDevice(i); cuda::DeviceInfo GPUlist; bool compatible = GPUlist.isCompatible(); if (compatible == false) { std::cout << "Library not compiled with appropriate architecture" << endl; std::cout << "Exiting ..." << endl; return 0; } } std::cout << "Found " << numGPU << " CUDA enabled device/s" << endl; cv::cuda::Stream cuStream[5]; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb> > imageInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgbFloat> > stereoInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelMono> > magnoInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelMono> > facesInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat> > saliencyOutPort; yarp::os::Port gazeOutPort; bool inOpen = imageInPort.open(imageInPortName.c_str()); //bool inOpen2 = stereoInPort.open(stereoInPortName.c_str()); bool inOpen4 = facesInPort.open(facesInPortName.c_str()); //bool inOpen3 = magnoInPort.open(magnoInPortName.c_str()); bool outOpen = saliencyOutPort.open(saliencyOutPortName.c_str()); bool outOpen2 = gazeOutPort.open(gazeOutPortName.c_str()); if(!inOpen | !inOpen4 |!outOpen | !outOpen2) { std::cout << "Could not open ports. Exiting" << endl; return -1; } #ifdef devDebug { //yarp.connect("/icub/cam/left","/imin","udp+mjpeg+recv.bayer+method.nearest"); //yarp.connect("/icub/cam/left","/imin","udp+mjpeg"); //yarp.connect("/icubcam", "/imin"); yarp.connect("/icub/cam/left","/inputCalibImage", "udp+mjpeg+recv.bayer+method.nearest"); //yarp.connect("/stereo/out","/inputStereoMap"); //yarp.connect("/magno/out", "/magnoInput"); yarp.connect("/salientOut", "/salientFacesIn"); yarp.connect("/saliencyOut","/showSaliency"); yarp.connect("/gazeOut", "/iKinGazeCtrl/mono:i"); } #endif //using vision for stereo cameras: // for all features except for depth, stitch the two images together to remove overlap and thus including peripheral vision. // after stitching process through retina model and extract features // for depth, process the overlapping section of the image only then after getting a saliency map, reincorporate this into // an image whose dimensions are equal to the rest by setting the remaining pixels = 0 //instead of using optical flow for motion detection use magnocellular pathway implemented in retina module to extract motion saliency //use of several gpus is done by getting a list of devices and setting which gpu does which operation: //- requires either optimisation of event sequences where each gpu has a separate task to implement or striping of image //separate tasks could possibly be more processing efficient due to the several 'independent' tasks that need to be carried out // bool setup = false; int count = 0; Size imageSize; int border = 100; int inCount = 0, inCount1 = 0, inCount2 = 0, inCount3 = 0; int outCount = 0; int step = 0; int stepMagno = 0; int stepStereo = 0; int stepFaces = 0; double t = 0.0, time = 0.0; while(setup == false) { inCount = imageInPort.getInputCount(); //inCount1 = stereoInPort.getInputCount(); //inCount2 = magnoInPort.getInputCount(); inCount2 = facesInPort.getInputCount(); if (inCount == 0) { cout << "Awaiting input images" << endl; pauseExec(100); } else { ImageOf<PixelRgb> *image = imageInPort.read(); //ImageOf<PixelRgbFloat> *stereo = stereoInPort.read(); //ImageOf<PixelMono> *magno = magnoInPort.read(); ImageOf<PixelMono> *faces = facesInPort.read(); if (image != NULL & faces != NULL) { count = 0; step = image->getRowSize() + image->getPadding(); //stepMagno = magno->getRowSize() + magno->getPadding(); //stepStereo = stereo->getRowSize() + stereo->getPadding(); stepFaces = faces->getRowSize() + faces->getPadding(); Mat left_cpuRGB(image->height(), image->width(), CV_8UC3, image->getRawImage(), step); imageSize = left_cpuRGB.size(); setup = true; } } } int numPix = imageSize.height*imageSize.width; char mess[100]; GpuMat Intensity_past(imageSize, CV_32F); Intensity_past.setTo(Scalar::all(0.0)); //prepare gabor filters int gaborWidth = 5; Size gaborSize = Size(gaborWidth, gaborWidth); Mat cgabor_0, cgabor_45, cgabor_90, cgabor_135; double sigma = 1, lambda = 2.0, gamma = 1.0, phase = 0; double Gstep = CV_PI / 4.0; double orien[4] = { (2 * Gstep), Gstep, 0, (3 * Gstep) }; cgabor_0 = getGaborKernel(gaborSize, sigma, orien[0], lambda, gamma, phase, CV_32F); cgabor_45 = getGaborKernel(gaborSize, sigma, orien[1], lambda, gamma, phase, CV_32F); cgabor_90 = getGaborKernel(gaborSize, sigma, orien[2], lambda, gamma, phase, CV_32F); cgabor_135 = getGaborKernel(gaborSize, sigma, orien[3], lambda, gamma, phase, CV_32F); GpuMat Gabor0_0, Gabor0_90, Gabor0_45, Gabor0_135; Ptr<Filter> gaborFilt0 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_0, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt90 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_90, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt45 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_45, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt135 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_135, Point(-1, -1), BORDER_CONSTANT, 0); //prepare pyramid sizes array const int numPyrLevels = 8; int numFMaps = 6; int conspMapLevel = 4; vector<int> centreVec; centreVec.push_back(2); centreVec.push_back(3); centreVec.push_back(4); vector<int> surroundOffsetVec; surroundOffsetVec.push_back(3); surroundOffsetVec.push_back(4); vector<GpuMat> gMagnoPYR, gMagnoFMaps, gIntensityPYR, gIntensityFMaps, RGantPYR, RGantFMaps, BYantPYR, BYantFMaps, Gabor0PYR, Gabor90PYR, Gabor45PYR, Gabor135PYR, Gabor0FMaps, Gabor90FMaps, Gabor45FMaps, Gabor135FMaps; vector<Mat> intensity; Mat intPast, IOR; GpuMat gTempY, gTempX, gRG; Size pyrSizes[numPyrLevels+1]; Rect pyrRect[numPyrLevels+1]; Point origin = Point(0, 0); int tempH, tempW; Intensity_past.download(intPast); for (int i = 0; i <= numPyrLevels; i++) { tempH = ceil(imageSize.height / pow(2,i)); tempW = ceil(imageSize.width / pow(2, i)); pyrSizes[i] = Size(tempW, tempH); pyrRect[i] = Rect(origin, pyrSizes[i]); gIntensityPYR.push_back(Intensity_past.clone()); //gMagnoPYR.push_back(Intensity_past.clone()); RGantPYR.push_back(Intensity_past.clone()); BYantPYR.push_back(Intensity_past.clone()); Gabor0PYR.push_back(Intensity_past.clone()); Gabor90PYR.push_back(Intensity_past.clone()); Gabor45PYR.push_back(Intensity_past.clone()); Gabor135PYR.push_back(Intensity_past.clone()); if (i < numFMaps) { gIntensityFMaps.push_back(Intensity_past.clone()); //gMagnoFMaps.push_back(Intensity_past.clone()); RGantFMaps.push_back(Intensity_past.clone()); BYantFMaps.push_back(Intensity_past.clone()); Gabor0FMaps.push_back(Intensity_past.clone()); Gabor90FMaps.push_back(Intensity_past.clone()); Gabor45FMaps.push_back(Intensity_past.clone()); Gabor135FMaps.push_back(Intensity_past.clone()); } intensity.push_back(intPast); } GpuMat gIntensityConspMap(pyrSizes[conspMapLevel], CV_32F); //GpuMat gMagnoConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat RGantConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat BYantConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor0ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor90ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor45ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor135ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat OrientationConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat IORMap(pyrSizes[conspMapLevel], CV_32F); GpuMat gMask(Size(3, numPix), CV_8UC1); IORMap.setTo(Scalar::all(0.0)); //initialise maximum filtering //Ptr<Filter> maxFilt = cuda::createBoxMaxFilter(CV_8UC1, Size(5, 5), Point(-1, -1), BORDER_CONSTANT, 0); Mat kernel, cSaliencyInt; kernel = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1)); Ptr<Filter> maxFilt = cuda::createMorphologyFilter(MORPH_DILATE, CV_8UC1, kernel, Point(-1, -1), 1); double avgFps, cumuTime = 0, avgTime, minSalVal, maxSalVal; Point minSalPoint, maxSalPoint; Mat m1, m2, m3, m0, imageFloat, cSaliency, magnoFloat, facesFloat; GpuMat gTemp3, gMaxRGB, gMinRG, gMagnoDisparity, gRightInt, gFaces, gFacesConspMap, gMagno3D, gLeftInt, gLeftROI, gMagnoROI, gMagnoLeftCorrect1, gMagnoRightCorrect1, gMagnoRightInt, gMagnoLeftInt, gRight, gR, gMagnoLeft, gMagnoRight, gMagno0, gG, gB, gIntensityCurr, gIntensity0, gLeft, pano, RGant0, temp, BYant0, Flicker0, gTemp, gTemp2, gTemp4, gTemp5, gTemp6, gTemp7, gTemp8, gSaliency, ColorConspMap; vector<GpuMat> bgr, rg; int countFrames = 0, countGaze = 0; Scalar arrMean, arrStddev; int lenX = pow(2,conspMapLevel); int lenY = lenX; int salX = 0, salY = 0; int cumulativeX = 0, cumulativeY = 0; Rect salientRegion, saliencyMapRegion; Scalar meanPos; double decayFactor = 0.95; int gazePeriod = 6; int midX = floor(gazePeriod/2); std::vector<int> vecX(gazePeriod); std::vector<int> vecY(gazePeriod); std::vector<int> sortX(gazePeriod); vector<int>::iterator medianIdx; int medianX; int salIdx; //std::deque<Point> saccadePts; //saccadePts.push_back(Point(0,0)); //saccadePts.push_back(Point(0,0)); //saccadePts.push_back(Point(0,0)); Point2f avgPoint; while(true) { inCount = imageInPort.getInputCount(); //inCount1 = stereoInPort.getInputCount(); //inCount2 = magnoInPort.getInputCount(); inCount3 = facesInPort.getInputCount(); outCount = saliencyOutPort.getOutputCount(); if(inCount == 0 || inCount3 == 0 || outCount == 0) { std::cout << "Awaiting input and output connections" << endl; pauseExec(100); } else { ImageOf<PixelRgb> *imageIn = imageInPort.read(); //ImageOf<PixelRgbFloat> *stereoIn = stereoInPort.read(); //ImageOf<PixelMono> *magnoIn = magnoInPort.read(); ImageOf<PixelMono> *facesIn = facesInPort.read(); if ((imageIn != NULL) & (facesIn != NULL)) { t = (double)getTickCount(); Mat inImage(imageIn->height(), imageIn->width(), CV_8UC3, imageIn->getRawImage(), step); //Mat stereoImage(stereoIn->height(), stereoIn->width(), CV_32FC3, stereoIn->getRawImage(), stepStereo); //Mat magnoImage(magnoIn->height(), magnoIn->width(), CV_8UC1, magnoIn->getRawImage(), stepMagno); Mat facesImage(facesIn->height(), facesIn->width(), CV_8UC1, facesIn->getRawImage(), stepFaces); cv::cvtColor(inImage, inImage, COLOR_RGB2BGR, -1); //magnoImage.convertTo(magnoFloat, CV_32FC1); //magnoFloat /= 255; inImage.convertTo(imageFloat, CV_32FC3); imageFloat /= 255; facesImage.convertTo(facesFloat, CV_32FC1); facesFloat /= 255; gLeft.upload(imageFloat); //gMagno0.upload(magnoFloat); gFaces.upload(facesFloat); //------------ start of saliency---------------------// //create intensity map cuda::split(gLeft, bgr, cuStream[0]); cuda::add(bgr[1], bgr[0], gTemp2, noArray(), -1, cuStream[0]); cuda::addWeighted(gTemp2, 0.33333, bgr[2], 0.33333, 0, gIntensity0, -1, cuStream[0]); cuda::threshold(gIntensity0, gMask, 0.1, 1, THRESH_BINARY, cuStream[0]); cudaConspMap(&gIntensity0, &gIntensityPYR, &gIntensityFMaps, &gIntensityConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); //creating colour antagonist maps cuda::max(bgr[0], bgr[1], gTemp2, cuStream[0]); cuda::max(gTemp2, bgr[2], gTemp3, cuStream[0]); cuda::multiply(gTemp3, gMask, gMaxRGB, 1.0, -1, cuStream[0]); cuda::min(bgr[2], bgr[1], gMinRG, cuStream[0]); cuda::subtract(bgr[2], bgr[1], RGant0, noArray(), -1, cuStream[0]); cuda::divide(RGant0, gMaxRGB, RGant0, 1, -1, cuStream[0]); cudaConspMap(&RGant0, &RGantPYR, &RGantFMaps, &RGantConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); cuda::add(gIntensityConspMap, RGantConspMap, gSaliency); cuda::subtract(bgr[0], gMinRG, BYant0, noArray(), -1, cuStream[0]); cuda::divide(BYant0, gMaxRGB, BYant0, 1, -1, cuStream[0]); cudaConspMap(&BYant0, &BYantPYR, &BYantFMaps, &BYantConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); cuda::add(gSaliency, BYantConspMap, gSaliency); cuda::resize(gFaces, gFacesConspMap, pyrSizes[conspMapLevel], 0.0, 0.0, 1.0, cuStream[1]); //--------------------------------------------------------// //flicker map (use retina magno channel) //cudaConspMap(&gMagno0, &gMagnoPYR, &gMagnoFMaps, &gMagnoConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); //gabor filtering on intensity map gaborFilt0->apply(gIntensity0, Gabor0_0, cuStream[0]); cudaConspMap(&Gabor0_0, &Gabor0PYR, &Gabor0FMaps, &Gabor0ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt90->apply(gIntensity0, Gabor0_90, cuStream[0]); cudaConspMap(&Gabor0_90, &Gabor90PYR, &Gabor90FMaps, &Gabor90ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt45->apply(gIntensity0, Gabor0_45, cuStream[0]); cudaConspMap(&Gabor0_45, &Gabor45PYR, &Gabor45FMaps, &Gabor45ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt135->apply(gIntensity0, Gabor0_135, cuStream[0]); cudaConspMap(&Gabor0_135, &Gabor135PYR, &Gabor135FMaps, &Gabor135ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); OrientationConspMap.setTo(Scalar::all(0.0)); normImage(&Gabor0ConspMap, maxFilt, &Gabor0ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor0ConspMap, OrientationConspMap); normImage(&Gabor90ConspMap, maxFilt, &Gabor90ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor90ConspMap, OrientationConspMap); normImage(&Gabor45ConspMap, maxFilt, &Gabor45ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor45ConspMap, OrientationConspMap); normImage(&Gabor135ConspMap, maxFilt, &Gabor135ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor135ConspMap, OrientationConspMap); cuda::addWeighted(gSaliency, 0.333, OrientationConspMap, 0.333, 0, gSaliency, -1 ,cuStream[1]); //cuda::addWeighted(gSaliency, 0.75 , gMagnoConspMap, 0.25, 0, gSaliency, -1, cuStream[1]); cuda::addWeighted(gSaliency, 0.5, gFacesConspMap, 0.5, 0, gSaliency, -1, cuStream[1]); cuda::multiply(gSaliency,255, gSaliency, 1.0, -1, cuStream[1]); cuda::subtract(gSaliency, IORMap, gSaliency, noArray(), CV_32FC1, cuStream[1]); cuda::minMaxLoc(gSaliency, &minSalVal, &maxSalVal, &minSalPoint, &maxSalPoint, noArray()); //--------------------------------------------------// gSaliency.download(cSaliency); salX = maxSalPoint.x; salY = maxSalPoint.y; if(salX == 0) salX = 1; if(salY == 0) salY = 1; saliencyMapRegion = Rect(salX, salY, 1, 1); salX = floor(salX*pow(2,conspMapLevel)); salY = floor(salY*pow(2,conspMapLevel)); //cumulativeX += salX; //cumulativeY += salY; vecX[countGaze] = salX; vecY[countGaze] = salY; salientRegion = Rect(salX-ceil(lenX/2), salY-ceil(lenY/2), lenX, lenY); rectangle(inImage, salientRegion, Scalar(0, 0, 200), 3, 8, 0); imshow("salientAttention", inImage); waitKey(1); countGaze++; if(countGaze == gazePeriod) { countGaze = 0; sortX = vecX; nth_element(sortX.begin(), sortX.begin()+midX, sortX.end()); medianX = sortX[midX]; medianIdx = std::find(vecX.begin(), vecX.end(), medianX); salIdx = medianIdx - vecX.begin(); //vecX.begin() Bottle gaze; gaze.clear(); gaze.addString("left"); gaze.addDouble(vecX[salIdx]); gaze.addDouble(vecY[salIdx]); gaze.addDouble(1.0); gazeOutPort.write(gaze); cout << "gaze written" << endl; cumulativeX = 0; cumulativeY = 0; cuda::multiply(IORMap, decayFactor, IORMap, 1.0, CV_32FC1, cuStream[1]); cuda::add(IORMap(saliencyMapRegion), 500, IORMap(saliencyMapRegion), noArray(), CV_32FC1, cuStream[1]); } time = (1000/(getTickFrequency() / ((double)getTickCount() - t))); cumuTime += time; countFrames++; avgTime = cumuTime / countFrames; avgFps = 1000 / avgTime; if (countFrames == 50) { cumuTime = 0; countFrames = 0; } //gDisparityBM.download(cDisparity); //g3D.download(cDisparity); //imshow("Disparity", cDisparity); //gMagnoDisparity.download(cMagnoDisparity); //gMagno3D.download(cMagnoDisparity); //imshow("Magno Disparity", cMagnoDisparity); //waitKey(1); //gIntensityConspMap.download(cSaliency, cuStream[1]); std::sprintf(mess, "Avg Time= %3.4f, Avg FPS = %3.2f", avgTime, avgFps); //putText(cSaliency, mess, cvPoint(30, 30), FONT_HERSHEY_COMPLEX, 0.2, cvScalar(200, 0, 0), 1, CV_AA); std::cout << mess << endl; //for (int i = 0; i < numFMaps; i++) //{ // gIntensityPYR[i].download(intensity[i]); // if (i == 0) putText(intensity[i], mess, cvPoint(30, 30), FONT_HERSHEY_COMPLEX, 0.5, cvScalar(200, 0, 0), 1, CV_AA); // sprintf(win, "window %d", i); // namedWindow(win, WINDOW_NORMAL); //imshow(win, intensity[i]); //} //cv::namedWindow("Saliency Map", WINDOW_NORMAL); //cv::imshow("Saliency Map", cSaliency); //cv::waitKey(1); //cSaliency.convertTo(cSaliencyInt, CV_8UC1, 1, 0); yarp::sig::ImageOf<yarp::sig::PixelFloat>& saliencyYarpOut = saliencyOutPort.prepare(); CVtoYarp(cSaliency, saliencyYarpOut); saliencyOutPort.write(); } } } }
/** * Read an image from the grabber. * * @param image The image to read. The image will be resized to * the dimensions the grabber is using, and the captured image * data will be written to it. * * @return True if an image was successfully captured. If false * returned, the image will be resized to the dimensions used by * the grabber, but all pixels will be zeroed. */ bool OpenCVGrabber::sendImage(IplImage* iplFrame, ImageOf<PixelRgb> & image) { // Resize the output image, this should not result in new // memory allocation if the image is already the correct size image.resize(iplFrame->width, iplFrame->height); if (!m_saidSize) { yDebug("Received image of size %dx%d\n", image.width(), image.height()); m_saidSize = true; } // Get an IplImage, the Yarp Image owns the memory pointed to IplImage * iplImage = (IplImage*)image.getIplImage(); // create the timestamp m_laststamp.update(); // Copy the captured image to the output image, flipping it if // the coordinate origin is not the top left if (IPL_ORIGIN_TL == iplFrame->origin) cvCopy(iplFrame, iplImage, 0); else cvFlip(iplFrame, iplImage, 0); if (iplFrame->channelSeq[0] == 'B') { cvCvtColor(iplImage, iplImage, CV_BGR2RGB); } if (m_w <= 0) { m_w = image.width(); } if (m_h <= 0) { m_h = image.height(); } if (fromFile) { if (m_w>0 && m_h>0) { if (image.width() != m_w || image.height() != m_h) { if (!m_saidResize) { yDebug("Software scaling from %dx%d to %dx%d", image.width(), image.height(), m_w, m_h); m_saidResize = true; } image.copy(image, m_w, m_h); } } } DBG yDebug("%d by %d %s image\n", image.width(), image.height(), iplFrame->channelSeq); return true; }