yarp::os::Things& ZfpMonitorObject::update(yarp::os::Things& thing) { if(shouldCompress) { ImageOf<PixelFloat>* img = thing.cast_as< ImageOf<PixelFloat> >(); // .... buffer, len int sizeCompressed; compress((float*)img->getRawImage(), compressed, sizeCompressed, img->width(),img->height(),1e-3); if(!compressed){ yError()<<"ZfpMonitorObject:Failed to compress, exiting..."; return thing; } data.clear(); data.addInt32(img->width()); data.addInt32(img->height()); data.addInt32(sizeCompressed); Value v(compressed, sizeCompressed); data.add(v); th.setPortWriter(&data); } else { Bottle* compressedbt= thing.cast_as<Bottle>(); int width=compressedbt->get(0).asInt32(); int height=compressedbt->get(1).asInt32(); int sizeCompressed=compressedbt->get(2).asInt32(); // cast thing to compressed. decompress((float*)compressedbt->get(3).asBlob(), decompressed, sizeCompressed, width, height,1e-3); if(!decompressed){ yError()<<"ZfpMonitorObject:Failed to decompress, exiting..."; return thing; } imageOut.resize(width,height); memcpy(imageOut.getRawImage(),decompressed,width*height*4); th.setPortWriter(&imageOut); } return th; }
static bool ImageReadMono(ImageOf<PixelMono> &img, const char *filename) { int width, height, color, num; FILE *fp=0; fp = fopen(filename, "rb"); if(fp==0) { fprintf(stderr, "Error opening %s, check if file exists.\n", filename); return false; } if (!ReadHeader(fp, &height, &width, &color)) { fclose (fp); fprintf(stderr, "Error reading header, is file a valid ppm/pgm?\n"); return false; } if (color) { fclose(fp); fprintf(stderr, "File is color, conversion not yet supported\n"); return false; } img.resize(width,height); const int w = img.width() * img.getPixelSize(); const int h = img.height(); const int pad = img.getRowSize(); unsigned char *dst = img.getRawImage (); num = 0; for (int i = 0; i < h; i++) { num += (int)fread((void *) dst, 1, (size_t) w, fp); dst += pad; } fclose(fp); return true; }
void CRMainEstimation::estimate() { // get depth image int depth_width=320; int depth_height=240; ImageOf<PixelMono16>* img; IplImage* depthCV; depthCV=cvCreateImageHeader(cvSize(depth_width,depth_height),IPL_DEPTH_16U,1); if ((img=(ImageOf<PixelMono16>*)depthPort.read(false))) { unsigned short* pBuff=(unsigned short*)img->getRawImage(); for (int i=0; i<img->width()*img->height(); i++) { //We take only the first 13 bits, that contain the depth value in mm unsigned short realDepth = (pBuff[i]&0xFFF8)>>3; buf[i]=realDepth; } cvSetData(depthCV,buf,depth_width*2); }
virtual void onRead(ImageOf<PixelRgb>& img) { if(dataReceived%1==0) { JavaVMAttachArgs args; args.version = JNI_VERSION_1_6; // choose your JNI version args.name = NULL; // you might want to give the java thread a name args.group = NULL; // you might want to assign the java thread to a ThreadGroup gvm->AttachCurrentThread(&env, &args); unsigned char *imgRaw = img.getRawImage(); // std::string imstr(reinterpret_cast<char*>(imgRaw)); // jstring imgStr = env->NewStringUTF(imstr.c_str()); jbyteArray imgByte=as_byte_array(env,imgRaw,img.getRawImageSize()); std::stringstream s; s << "DATA RECEIVED! Size of the imgRaw: " << sizeof(imgRaw) << " " << img.getRawImageSize() << " Img size:" << img.width() << "x" << img.height(); __android_log_print(ANDROID_LOG_INFO, LOG_TAG, s.str().c_str()); // std::stringstream ss; // ss << std::hex << std::setfill('0'); // for (int i=0;i<img.getRawImageSize();i++) // { // ss << std::setw(2) << static_cast<unsigned>(imgRaw[i]); // } // __android_log_print(ANDROID_LOG_WARN, LOG_TAG, ss.str().c_str()); // std::ofstream out; // out.open ("/sdcard/CiveTest/imgOrig.ppm", std::ios::out | std::ios::binary); // out<< "P6\n" << img.width() << " " << img.height() <<"\n255\n"; // out.write (reinterpret_cast<char*>(imgRaw), img.getRawImageSize()); // out.close (); getImgReceivedonPort(imgByte); // env->DeleteLocalRef(imgByte); gvm->DetachCurrentThread(); } dataReceived++; }
int main(int argc, char** argv) { std::string imageInPortName; std::string stereoInPortName; std::string magnoInPortName; std::string saliencyOutPortName; std::string gazeOutPortName; std::string facesInPortName; Network yarp; if(argc < 6) { std::cout << "Not enough arguments. Must provide port name to the input and output ports" << endl; std::cout << "Exiting ..." << endl; return -1; } else { imageInPortName = argv[1]; stereoInPortName = argv[2]; magnoInPortName = argv[3]; facesInPortName = argv[4]; saliencyOutPortName = argv[5]; gazeOutPortName = argv[6]; } int numGPU = cuda::getCudaEnabledDeviceCount(); if (numGPU == 0) { std::cout << "No GPU found or library compiled without GPU support" << endl; std::cout << "Exiting ..." << endl; return 0; } for (int i = 0; i < numGPU; i++) { cuda::setDevice(i); cuda::DeviceInfo GPUlist; bool compatible = GPUlist.isCompatible(); if (compatible == false) { std::cout << "Library not compiled with appropriate architecture" << endl; std::cout << "Exiting ..." << endl; return 0; } } std::cout << "Found " << numGPU << " CUDA enabled device/s" << endl; cv::cuda::Stream cuStream[5]; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgb> > imageInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelRgbFloat> > stereoInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelMono> > magnoInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelMono> > facesInPort; yarp::os::BufferedPort<yarp::sig::ImageOf<yarp::sig::PixelFloat> > saliencyOutPort; yarp::os::Port gazeOutPort; bool inOpen = imageInPort.open(imageInPortName.c_str()); //bool inOpen2 = stereoInPort.open(stereoInPortName.c_str()); bool inOpen4 = facesInPort.open(facesInPortName.c_str()); //bool inOpen3 = magnoInPort.open(magnoInPortName.c_str()); bool outOpen = saliencyOutPort.open(saliencyOutPortName.c_str()); bool outOpen2 = gazeOutPort.open(gazeOutPortName.c_str()); if(!inOpen | !inOpen4 |!outOpen | !outOpen2) { std::cout << "Could not open ports. Exiting" << endl; return -1; } #ifdef devDebug { //yarp.connect("/icub/cam/left","/imin","udp+mjpeg+recv.bayer+method.nearest"); //yarp.connect("/icub/cam/left","/imin","udp+mjpeg"); //yarp.connect("/icubcam", "/imin"); yarp.connect("/icub/cam/left","/inputCalibImage", "udp+mjpeg+recv.bayer+method.nearest"); //yarp.connect("/stereo/out","/inputStereoMap"); //yarp.connect("/magno/out", "/magnoInput"); yarp.connect("/salientOut", "/salientFacesIn"); yarp.connect("/saliencyOut","/showSaliency"); yarp.connect("/gazeOut", "/iKinGazeCtrl/mono:i"); } #endif //using vision for stereo cameras: // for all features except for depth, stitch the two images together to remove overlap and thus including peripheral vision. // after stitching process through retina model and extract features // for depth, process the overlapping section of the image only then after getting a saliency map, reincorporate this into // an image whose dimensions are equal to the rest by setting the remaining pixels = 0 //instead of using optical flow for motion detection use magnocellular pathway implemented in retina module to extract motion saliency //use of several gpus is done by getting a list of devices and setting which gpu does which operation: //- requires either optimisation of event sequences where each gpu has a separate task to implement or striping of image //separate tasks could possibly be more processing efficient due to the several 'independent' tasks that need to be carried out // bool setup = false; int count = 0; Size imageSize; int border = 100; int inCount = 0, inCount1 = 0, inCount2 = 0, inCount3 = 0; int outCount = 0; int step = 0; int stepMagno = 0; int stepStereo = 0; int stepFaces = 0; double t = 0.0, time = 0.0; while(setup == false) { inCount = imageInPort.getInputCount(); //inCount1 = stereoInPort.getInputCount(); //inCount2 = magnoInPort.getInputCount(); inCount2 = facesInPort.getInputCount(); if (inCount == 0) { cout << "Awaiting input images" << endl; pauseExec(100); } else { ImageOf<PixelRgb> *image = imageInPort.read(); //ImageOf<PixelRgbFloat> *stereo = stereoInPort.read(); //ImageOf<PixelMono> *magno = magnoInPort.read(); ImageOf<PixelMono> *faces = facesInPort.read(); if (image != NULL & faces != NULL) { count = 0; step = image->getRowSize() + image->getPadding(); //stepMagno = magno->getRowSize() + magno->getPadding(); //stepStereo = stereo->getRowSize() + stereo->getPadding(); stepFaces = faces->getRowSize() + faces->getPadding(); Mat left_cpuRGB(image->height(), image->width(), CV_8UC3, image->getRawImage(), step); imageSize = left_cpuRGB.size(); setup = true; } } } int numPix = imageSize.height*imageSize.width; char mess[100]; GpuMat Intensity_past(imageSize, CV_32F); Intensity_past.setTo(Scalar::all(0.0)); //prepare gabor filters int gaborWidth = 5; Size gaborSize = Size(gaborWidth, gaborWidth); Mat cgabor_0, cgabor_45, cgabor_90, cgabor_135; double sigma = 1, lambda = 2.0, gamma = 1.0, phase = 0; double Gstep = CV_PI / 4.0; double orien[4] = { (2 * Gstep), Gstep, 0, (3 * Gstep) }; cgabor_0 = getGaborKernel(gaborSize, sigma, orien[0], lambda, gamma, phase, CV_32F); cgabor_45 = getGaborKernel(gaborSize, sigma, orien[1], lambda, gamma, phase, CV_32F); cgabor_90 = getGaborKernel(gaborSize, sigma, orien[2], lambda, gamma, phase, CV_32F); cgabor_135 = getGaborKernel(gaborSize, sigma, orien[3], lambda, gamma, phase, CV_32F); GpuMat Gabor0_0, Gabor0_90, Gabor0_45, Gabor0_135; Ptr<Filter> gaborFilt0 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_0, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt90 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_90, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt45 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_45, Point(-1, -1), BORDER_CONSTANT, 0); Ptr<Filter> gaborFilt135 = cuda::createLinearFilter(CV_32F, CV_32F, cgabor_135, Point(-1, -1), BORDER_CONSTANT, 0); //prepare pyramid sizes array const int numPyrLevels = 8; int numFMaps = 6; int conspMapLevel = 4; vector<int> centreVec; centreVec.push_back(2); centreVec.push_back(3); centreVec.push_back(4); vector<int> surroundOffsetVec; surroundOffsetVec.push_back(3); surroundOffsetVec.push_back(4); vector<GpuMat> gMagnoPYR, gMagnoFMaps, gIntensityPYR, gIntensityFMaps, RGantPYR, RGantFMaps, BYantPYR, BYantFMaps, Gabor0PYR, Gabor90PYR, Gabor45PYR, Gabor135PYR, Gabor0FMaps, Gabor90FMaps, Gabor45FMaps, Gabor135FMaps; vector<Mat> intensity; Mat intPast, IOR; GpuMat gTempY, gTempX, gRG; Size pyrSizes[numPyrLevels+1]; Rect pyrRect[numPyrLevels+1]; Point origin = Point(0, 0); int tempH, tempW; Intensity_past.download(intPast); for (int i = 0; i <= numPyrLevels; i++) { tempH = ceil(imageSize.height / pow(2,i)); tempW = ceil(imageSize.width / pow(2, i)); pyrSizes[i] = Size(tempW, tempH); pyrRect[i] = Rect(origin, pyrSizes[i]); gIntensityPYR.push_back(Intensity_past.clone()); //gMagnoPYR.push_back(Intensity_past.clone()); RGantPYR.push_back(Intensity_past.clone()); BYantPYR.push_back(Intensity_past.clone()); Gabor0PYR.push_back(Intensity_past.clone()); Gabor90PYR.push_back(Intensity_past.clone()); Gabor45PYR.push_back(Intensity_past.clone()); Gabor135PYR.push_back(Intensity_past.clone()); if (i < numFMaps) { gIntensityFMaps.push_back(Intensity_past.clone()); //gMagnoFMaps.push_back(Intensity_past.clone()); RGantFMaps.push_back(Intensity_past.clone()); BYantFMaps.push_back(Intensity_past.clone()); Gabor0FMaps.push_back(Intensity_past.clone()); Gabor90FMaps.push_back(Intensity_past.clone()); Gabor45FMaps.push_back(Intensity_past.clone()); Gabor135FMaps.push_back(Intensity_past.clone()); } intensity.push_back(intPast); } GpuMat gIntensityConspMap(pyrSizes[conspMapLevel], CV_32F); //GpuMat gMagnoConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat RGantConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat BYantConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor0ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor90ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor45ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat Gabor135ConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat OrientationConspMap(pyrSizes[conspMapLevel], CV_32F); GpuMat IORMap(pyrSizes[conspMapLevel], CV_32F); GpuMat gMask(Size(3, numPix), CV_8UC1); IORMap.setTo(Scalar::all(0.0)); //initialise maximum filtering //Ptr<Filter> maxFilt = cuda::createBoxMaxFilter(CV_8UC1, Size(5, 5), Point(-1, -1), BORDER_CONSTANT, 0); Mat kernel, cSaliencyInt; kernel = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1)); Ptr<Filter> maxFilt = cuda::createMorphologyFilter(MORPH_DILATE, CV_8UC1, kernel, Point(-1, -1), 1); double avgFps, cumuTime = 0, avgTime, minSalVal, maxSalVal; Point minSalPoint, maxSalPoint; Mat m1, m2, m3, m0, imageFloat, cSaliency, magnoFloat, facesFloat; GpuMat gTemp3, gMaxRGB, gMinRG, gMagnoDisparity, gRightInt, gFaces, gFacesConspMap, gMagno3D, gLeftInt, gLeftROI, gMagnoROI, gMagnoLeftCorrect1, gMagnoRightCorrect1, gMagnoRightInt, gMagnoLeftInt, gRight, gR, gMagnoLeft, gMagnoRight, gMagno0, gG, gB, gIntensityCurr, gIntensity0, gLeft, pano, RGant0, temp, BYant0, Flicker0, gTemp, gTemp2, gTemp4, gTemp5, gTemp6, gTemp7, gTemp8, gSaliency, ColorConspMap; vector<GpuMat> bgr, rg; int countFrames = 0, countGaze = 0; Scalar arrMean, arrStddev; int lenX = pow(2,conspMapLevel); int lenY = lenX; int salX = 0, salY = 0; int cumulativeX = 0, cumulativeY = 0; Rect salientRegion, saliencyMapRegion; Scalar meanPos; double decayFactor = 0.95; int gazePeriod = 6; int midX = floor(gazePeriod/2); std::vector<int> vecX(gazePeriod); std::vector<int> vecY(gazePeriod); std::vector<int> sortX(gazePeriod); vector<int>::iterator medianIdx; int medianX; int salIdx; //std::deque<Point> saccadePts; //saccadePts.push_back(Point(0,0)); //saccadePts.push_back(Point(0,0)); //saccadePts.push_back(Point(0,0)); Point2f avgPoint; while(true) { inCount = imageInPort.getInputCount(); //inCount1 = stereoInPort.getInputCount(); //inCount2 = magnoInPort.getInputCount(); inCount3 = facesInPort.getInputCount(); outCount = saliencyOutPort.getOutputCount(); if(inCount == 0 || inCount3 == 0 || outCount == 0) { std::cout << "Awaiting input and output connections" << endl; pauseExec(100); } else { ImageOf<PixelRgb> *imageIn = imageInPort.read(); //ImageOf<PixelRgbFloat> *stereoIn = stereoInPort.read(); //ImageOf<PixelMono> *magnoIn = magnoInPort.read(); ImageOf<PixelMono> *facesIn = facesInPort.read(); if ((imageIn != NULL) & (facesIn != NULL)) { t = (double)getTickCount(); Mat inImage(imageIn->height(), imageIn->width(), CV_8UC3, imageIn->getRawImage(), step); //Mat stereoImage(stereoIn->height(), stereoIn->width(), CV_32FC3, stereoIn->getRawImage(), stepStereo); //Mat magnoImage(magnoIn->height(), magnoIn->width(), CV_8UC1, magnoIn->getRawImage(), stepMagno); Mat facesImage(facesIn->height(), facesIn->width(), CV_8UC1, facesIn->getRawImage(), stepFaces); cv::cvtColor(inImage, inImage, COLOR_RGB2BGR, -1); //magnoImage.convertTo(magnoFloat, CV_32FC1); //magnoFloat /= 255; inImage.convertTo(imageFloat, CV_32FC3); imageFloat /= 255; facesImage.convertTo(facesFloat, CV_32FC1); facesFloat /= 255; gLeft.upload(imageFloat); //gMagno0.upload(magnoFloat); gFaces.upload(facesFloat); //------------ start of saliency---------------------// //create intensity map cuda::split(gLeft, bgr, cuStream[0]); cuda::add(bgr[1], bgr[0], gTemp2, noArray(), -1, cuStream[0]); cuda::addWeighted(gTemp2, 0.33333, bgr[2], 0.33333, 0, gIntensity0, -1, cuStream[0]); cuda::threshold(gIntensity0, gMask, 0.1, 1, THRESH_BINARY, cuStream[0]); cudaConspMap(&gIntensity0, &gIntensityPYR, &gIntensityFMaps, &gIntensityConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); //creating colour antagonist maps cuda::max(bgr[0], bgr[1], gTemp2, cuStream[0]); cuda::max(gTemp2, bgr[2], gTemp3, cuStream[0]); cuda::multiply(gTemp3, gMask, gMaxRGB, 1.0, -1, cuStream[0]); cuda::min(bgr[2], bgr[1], gMinRG, cuStream[0]); cuda::subtract(bgr[2], bgr[1], RGant0, noArray(), -1, cuStream[0]); cuda::divide(RGant0, gMaxRGB, RGant0, 1, -1, cuStream[0]); cudaConspMap(&RGant0, &RGantPYR, &RGantFMaps, &RGantConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); cuda::add(gIntensityConspMap, RGantConspMap, gSaliency); cuda::subtract(bgr[0], gMinRG, BYant0, noArray(), -1, cuStream[0]); cuda::divide(BYant0, gMaxRGB, BYant0, 1, -1, cuStream[0]); cudaConspMap(&BYant0, &BYantPYR, &BYantFMaps, &BYantConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); cuda::add(gSaliency, BYantConspMap, gSaliency); cuda::resize(gFaces, gFacesConspMap, pyrSizes[conspMapLevel], 0.0, 0.0, 1.0, cuStream[1]); //--------------------------------------------------------// //flicker map (use retina magno channel) //cudaConspMap(&gMagno0, &gMagnoPYR, &gMagnoFMaps, &gMagnoConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); //gabor filtering on intensity map gaborFilt0->apply(gIntensity0, Gabor0_0, cuStream[0]); cudaConspMap(&Gabor0_0, &Gabor0PYR, &Gabor0FMaps, &Gabor0ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt90->apply(gIntensity0, Gabor0_90, cuStream[0]); cudaConspMap(&Gabor0_90, &Gabor90PYR, &Gabor90FMaps, &Gabor90ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt45->apply(gIntensity0, Gabor0_45, cuStream[0]); cudaConspMap(&Gabor0_45, &Gabor45PYR, &Gabor45FMaps, &Gabor45ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); gaborFilt135->apply(gIntensity0, Gabor0_135, cuStream[0]); cudaConspMap(&Gabor0_135, &Gabor135PYR, &Gabor135FMaps, &Gabor135ConspMap, numPyrLevels, centreVec, surroundOffsetVec, conspMapLevel, pyrSizes, maxFilt, cuStream[1]); OrientationConspMap.setTo(Scalar::all(0.0)); normImage(&Gabor0ConspMap, maxFilt, &Gabor0ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor0ConspMap, OrientationConspMap); normImage(&Gabor90ConspMap, maxFilt, &Gabor90ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor90ConspMap, OrientationConspMap); normImage(&Gabor45ConspMap, maxFilt, &Gabor45ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor45ConspMap, OrientationConspMap); normImage(&Gabor135ConspMap, maxFilt, &Gabor135ConspMap, pyrSizes[conspMapLevel], cuStream[1]); cuda::add(OrientationConspMap, Gabor135ConspMap, OrientationConspMap); cuda::addWeighted(gSaliency, 0.333, OrientationConspMap, 0.333, 0, gSaliency, -1 ,cuStream[1]); //cuda::addWeighted(gSaliency, 0.75 , gMagnoConspMap, 0.25, 0, gSaliency, -1, cuStream[1]); cuda::addWeighted(gSaliency, 0.5, gFacesConspMap, 0.5, 0, gSaliency, -1, cuStream[1]); cuda::multiply(gSaliency,255, gSaliency, 1.0, -1, cuStream[1]); cuda::subtract(gSaliency, IORMap, gSaliency, noArray(), CV_32FC1, cuStream[1]); cuda::minMaxLoc(gSaliency, &minSalVal, &maxSalVal, &minSalPoint, &maxSalPoint, noArray()); //--------------------------------------------------// gSaliency.download(cSaliency); salX = maxSalPoint.x; salY = maxSalPoint.y; if(salX == 0) salX = 1; if(salY == 0) salY = 1; saliencyMapRegion = Rect(salX, salY, 1, 1); salX = floor(salX*pow(2,conspMapLevel)); salY = floor(salY*pow(2,conspMapLevel)); //cumulativeX += salX; //cumulativeY += salY; vecX[countGaze] = salX; vecY[countGaze] = salY; salientRegion = Rect(salX-ceil(lenX/2), salY-ceil(lenY/2), lenX, lenY); rectangle(inImage, salientRegion, Scalar(0, 0, 200), 3, 8, 0); imshow("salientAttention", inImage); waitKey(1); countGaze++; if(countGaze == gazePeriod) { countGaze = 0; sortX = vecX; nth_element(sortX.begin(), sortX.begin()+midX, sortX.end()); medianX = sortX[midX]; medianIdx = std::find(vecX.begin(), vecX.end(), medianX); salIdx = medianIdx - vecX.begin(); //vecX.begin() Bottle gaze; gaze.clear(); gaze.addString("left"); gaze.addDouble(vecX[salIdx]); gaze.addDouble(vecY[salIdx]); gaze.addDouble(1.0); gazeOutPort.write(gaze); cout << "gaze written" << endl; cumulativeX = 0; cumulativeY = 0; cuda::multiply(IORMap, decayFactor, IORMap, 1.0, CV_32FC1, cuStream[1]); cuda::add(IORMap(saliencyMapRegion), 500, IORMap(saliencyMapRegion), noArray(), CV_32FC1, cuStream[1]); } time = (1000/(getTickFrequency() / ((double)getTickCount() - t))); cumuTime += time; countFrames++; avgTime = cumuTime / countFrames; avgFps = 1000 / avgTime; if (countFrames == 50) { cumuTime = 0; countFrames = 0; } //gDisparityBM.download(cDisparity); //g3D.download(cDisparity); //imshow("Disparity", cDisparity); //gMagnoDisparity.download(cMagnoDisparity); //gMagno3D.download(cMagnoDisparity); //imshow("Magno Disparity", cMagnoDisparity); //waitKey(1); //gIntensityConspMap.download(cSaliency, cuStream[1]); std::sprintf(mess, "Avg Time= %3.4f, Avg FPS = %3.2f", avgTime, avgFps); //putText(cSaliency, mess, cvPoint(30, 30), FONT_HERSHEY_COMPLEX, 0.2, cvScalar(200, 0, 0), 1, CV_AA); std::cout << mess << endl; //for (int i = 0; i < numFMaps; i++) //{ // gIntensityPYR[i].download(intensity[i]); // if (i == 0) putText(intensity[i], mess, cvPoint(30, 30), FONT_HERSHEY_COMPLEX, 0.5, cvScalar(200, 0, 0), 1, CV_AA); // sprintf(win, "window %d", i); // namedWindow(win, WINDOW_NORMAL); //imshow(win, intensity[i]); //} //cv::namedWindow("Saliency Map", WINDOW_NORMAL); //cv::imshow("Saliency Map", cSaliency); //cv::waitKey(1); //cSaliency.convertTo(cSaliencyInt, CV_8UC1, 1, 0); yarp::sig::ImageOf<yarp::sig::PixelFloat>& saliencyYarpOut = saliencyOutPort.prepare(); CVtoYarp(cSaliency, saliencyYarpOut); saliencyOutPort.write(); } } } }
static bool ImageWriteMono(ImageOf<PixelMono>& img, const char *filename) { return SavePGM((char*)img.getRawImage(), filename, img.height(), img.width(), img.getRowSize()); }
void RGBDSensorWrapper::shallowCopyImages(const ImageOf<PixelFloat>& src, ImageOf<PixelFloat>& dest) { dest.setQuantum(src.getQuantum()); dest.setExternal(src.getRawImage(), src.width(), src.height()); }
int main(char** argv, int argc) { Network network; BufferedPort<ImageOf<PixelRgb> >* imageInputPort = new BufferedPort<ImageOf<PixelRgb> >(); BufferedPort<ImageOf<PixelRgb> >* imageOutputPort = new BufferedPort<ImageOf<PixelRgb> >(); BufferedPort<Bottle>* bbOutputPort = new BufferedPort<Bottle>(); yarp::os::BufferedPort<yarp::os::Bottle > dataPortMec; imageInputPort->open("/img:i"); imageOutputPort->open("/img:o"); //bbOutputPort->open("/bb:o"); editted VM network.connect("/icub/camcalib/left/out", "/img:i"); dataPortMec.open("/Shapedata:o"); //string para_yml_file = "data/para_cmp8toys.yml"; //modified VM string para_yml_file = "data_blocks/para_blocks.yml"; ///////////////////////////////////////////////// // STEP1: initiate ///////////////////////////////////////////////// bool flag; CTLdetector detector; flag = detector.initiate(para_yml_file); if (!flag) return 0; ///////////////////////////////////////////////// // STEP2: train ///////////////////////////////////////////////// flag = detector.train(); if (!flag) return 0; ///////////////////////////////////////////////// // STEP3: detect ///////////////////////////////////////////////// int key = 0; cv::Mat cvMatImage; std::cout<<"***Detecting..."<<std::endl; while(key != 27) { ImageOf<PixelRgb> *img = imageInputPort->read(true); cvMatImage.create(img->height(), img->width(), CV_8UC3); unsigned char* ptr = img->getRawImage(); memcpy(cvMatImage.data, ptr, cvMatImage.cols * cvMatImage.rows * 3); cv::cvtColor(cvMatImage, cvMatImage, CV_RGB2BGR); detector.detect(cvMatImage); //detector.showDetObjs(cvMatImage,Scalar(0,255,0),Scalar(255,255,255),480); //chaged 3.2.2013 Rea changed to orinal size detector.showDetObjs(cvMatImage,Scalar(0,255,0),Scalar(255,255,255),0); //detector.dispDetObjs(); std::vector<DetObj> objects; objects = detector.getDetObjs(); //sending out through image port the image out /* if(imageOutputPort->getOutputCount()) { cv::Mat ppIm = detector.getPostProcessIm(); //cv::cvtColor(ppIm, ppIm, CV_BGR2RGB); // printf("image %d %d \n", cvMatImage.cols, cvMatImage.rows); ImageOf<PixelRgb>& tmpImage = imageOutputPort->prepare(); tmpImage.resize(img->width(),img->height()); tmpImage.zero(); unsigned char* ptrd = tmpImage.getRawImage(); unsigned char* ptrs = ppIm.data; int padding = tmpImage.getPadding(); for (int row = 0; row < img->height(); row++) { for(int col = 0; col < img->width(); col++) { *ptrd = 255; ptrd++; ptrs++; *ptrd = 255; ptrd++; ptrs++; *ptrd = 255; ptrd++; ptrs++; //ptrs++; } ptrd += padding; } //memcpy(ptrd,ptrs, cvMatImage.cols * cvMatImage.rows * 3 ); imageOutputPort->write(); } */ Bottle& ShapOp = dataPortMec.prepare(); ShapOp.clear(); //Bottle output = bbOutputPort->prepare(); for (int i = 0; i < objects.size(); i++){ ShapOp.addInt(objects[i].box_tight.x); ShapOp.addInt(objects[i].box_tight.y); ShapOp.addInt(objects[i].box_tight.width); ShapOp.addInt(objects[i].box_tight.height); ShapOp.addInt(objects[i].id_label); //ShapOp.addInt(10); //if want to know the object name: detector.all_obj_cls[objects[i].id_label] } // free the memory of the objects. for (int i=0;i<objects.size();i++){ objects[i].mat_edge_NN_tr.release(); } objects.clear(); //bbOutputPort->write(); dataPortMec.write(); key = cv::waitKey(100); } cvMatImage.release(); std::cout<<"***Done."<<std::endl; return 0; }
bool ImageSplitter::updateModule() { ImageOf<PixelRgb> *inputImage = inputPort.read(); yarp::os::Stamp stamp; inputPort.getEnvelope(stamp); ImageOf<PixelRgb> &outLeftImage = outLeftPort.prepare(); ImageOf<PixelRgb> &outRightImage = outRightPort.prepare(); inWidth = inputImage->width(); inHeight = inputImage->height(); if(horizontal) // input image is horizontally aligned { outWidth = inWidth/2; outHeight = inHeight; } else { outWidth = inWidth; outHeight = inHeight/2; } outLeftImage.setQuantum(inputImage->getQuantum()); outRightImage.setQuantum(inputImage->getQuantum()); outLeftImage.resize(outWidth, outHeight); outRightImage.resize(outWidth, outHeight); // alloc and compute some vars for efficency int h2, w2; unsigned char *pixelLeft, *pixelRight; unsigned char *pixelInputL, *pixelInputR; unsigned char *pixelInput = inputImage->getRawImage(); int dualImage_rowSizeByte = inputImage->getRowSize(); int singleImage_rowSizeByte = outLeftImage.getRowSize(); int singleImage_wholeSizeByte = outWidth * outHeight * outLeftImage.getPixelSize(); static int counter = 0; static double start = 0; start = yarp::os::Time::now(); switch(method) { case 0: // pixel by pixel { if(horizontal) { for(int h=0; h<outHeight; h++) { for(int w1=0; w1<outWidth; w1++) { w2 = w1+outWidth; pixelLeft = outLeftImage.getPixelAddress(w1, h); pixelLeft[0] = *(inputImage->getPixelAddress(w1, h)+0); pixelLeft[1] = *(inputImage->getPixelAddress(w1, h)+1); pixelLeft[2] = *(inputImage->getPixelAddress(w1, h)+2); pixelRight = outRightImage.getPixelAddress(w1, h); pixelRight[0] = *(inputImage->getPixelAddress(w2, h)+0); pixelRight[1] = *(inputImage->getPixelAddress(w2, h)+1); pixelRight[2] = *(inputImage->getPixelAddress(w2, h)+2); } } } else { for(int h1=0; h1<outHeight; h1++) { for(int w=0; w<outWidth; w++) { h2 = h1+outHeight; pixelLeft = outLeftImage.getPixelAddress(w, h1); pixelLeft[0] = *(inputImage->getPixelAddress(w, h1)+0); pixelLeft[1] = *(inputImage->getPixelAddress(w, h1)+1); pixelLeft[2] = *(inputImage->getPixelAddress(w, h1)+2); pixelRight = outRightImage.getPixelAddress(w, h1); pixelRight[0] = *(inputImage->getPixelAddress(w, h2)+0); pixelRight[1] = *(inputImage->getPixelAddress(w, h2)+1); pixelRight[2] = *(inputImage->getPixelAddress(w, h2)+2); } } } } break; case 1: // pixel by pixel, a bit better { if(horizontal) { pixelLeft = outLeftImage.getRawImage(); pixelRight = outRightImage.getRawImage(); pixelInputL = pixelInput; pixelInputR = pixelInput+singleImage_rowSizeByte; for(int h=0, idx=0, idx2=0; h<outHeight; h++) { for(int w=0; w<outWidth; w++) { pixelLeft[idx++] = *(pixelInputL++); pixelLeft[idx++] = *(pixelInputL++); pixelLeft[idx++] = *(pixelInputL++); pixelRight[idx2++] = *(pixelInputR++); pixelRight[idx2++] = *(pixelInputR++); pixelRight[idx2++] = *(pixelInputR++); } pixelInputL += singleImage_rowSizeByte; pixelInputR += singleImage_rowSizeByte; } } else { } } break; case 2: // line by line { if(horizontal) { pixelLeft = outLeftImage.getRawImage(); pixelRight = outRightImage.getRawImage(); for(int h=0; h<inHeight; h++) { memcpy(pixelLeft + h*singleImage_rowSizeByte, pixelInput, singleImage_rowSizeByte); memcpy(pixelRight + h*singleImage_rowSizeByte, pixelInput+=singleImage_rowSizeByte, singleImage_rowSizeByte); pixelInput+= dualImage_rowSizeByte/2; } } else { pixelLeft = outLeftImage.getRawImage(); pixelRight = outRightImage.getRawImage(); pixelInputL = pixelInput; pixelInputR = pixelInput+singleImage_wholeSizeByte; for(int h=0; h<outHeight; h++) { memcpy(pixelLeft + h*singleImage_rowSizeByte, pixelInputL, singleImage_rowSizeByte); memcpy(pixelRight + h*singleImage_rowSizeByte, pixelInputR, singleImage_rowSizeByte); pixelInputL+= singleImage_rowSizeByte; pixelInputR+= singleImage_rowSizeByte; } } } break; case 3: // whole image, only if input image is vertically aligned { if(horizontal) { yError() << "Cannot use this copy method with horizontally aligned source image."; } else { pixelLeft = outLeftImage.getRawImage(); pixelRight = outRightImage.getRawImage(); memcpy(pixelLeft, pixelInput, singleImage_wholeSizeByte); memcpy(pixelRight, pixelInput+ singleImage_wholeSizeByte, singleImage_wholeSizeByte); } } break; default: { yError() << " @line " << __LINE__ << "unhandled switch case, we should not be here!"; } } static double end = 0; static double elapsed = 0; end = yarp::os::Time::now(); elapsed += (end-start); counter++; if((counter % 100) == 0) { yInfo() << "Elapsed time: " << elapsed; elapsed = 0; } outLeftPort.setEnvelope(stamp); outRightPort.setEnvelope(stamp); outLeftPort.write(); outRightPort.write(); return true; }