int YARPLogpolarSampler::Cartesian2Logpolar (const YARPGenericImage& in, YARPGenericImage& out) { using namespace _logpolarParams; ACE_ASSERT (in.GetWidth() == _xsize && in.GetHeight() == _ysize); ACE_ASSERT (out.GetWidth() == _stheta && out.GetHeight() == _srho); Make_LP_Real (_outimage, (unsigned char *)in.GetRawBuffer(), &_img, _cart2LP_Map); char *de = out.GetRawBuffer(); unsigned char * o = _outimage; char *cmap = _colormap; const int w = out.GetWidth(); const int h = out.GetHeight(); const int p = out.GetPadding(); int i, j; for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { *de++ = *(o + *cmap++); o += 3; } de += p; o += _padb; } return YARP_OK; }
static int ImageWrite(YARPGenericImage& img, const char *filename) { ///ACE_ASSERT(img.GetPadding()==0); if (img.GetID()==YARP_PIXEL_MONO) { SavePGM((char*)img.GetRawBuffer(), filename, img.GetHeight(), img.GetWidth()); } else { if (img.GetID()==YARP_PIXEL_RGB) { ACE_ASSERT(img.GetID()==YARP_PIXEL_RGB); // Image<YarpPixelRGB> img2; // img2.ReferOrCopy(img); SavePPM((char*)img.GetRawBuffer(),filename,img.GetHeight(),img.GetWidth()); } else { YARPImageOf<YarpPixelRGB> img2; img2.CastCopy(img); SavePPM((char*)img2.GetRawBuffer(),filename,img2.GetHeight(), img2.GetWidth()); } } return 0; }
YARPDIBConverter::YARPDIBConverter(const YARPGenericImage &img) { ACE_ASSERT (img.GetIplPointer != NULL); dimX = img.GetWidth(); dimY = img.GetHeight(); pixelType = img.GetID(); _alloc_dib(); }
void GetPlane (const YARPGenericImage& in, YARPImageOf<YarpPixelMono>& out, int shift) { ACE_ASSERT (in.GetIplPointer() != NULL && out.GetIplPointer() != NULL); ACE_ASSERT (in.GetWidth() == out.GetWidth()); ACE_ASSERT (in.GetHeight() == out.GetHeight()); const int width = in.GetWidth(); const int height = in.GetHeight(); unsigned char *src = NULL; unsigned char *dst = NULL; for (int i = 0; i < height; i++) { src = (unsigned char *)in.GetArray()[i] + shift; dst = (unsigned char *)out.GetArray()[i]; for (int j = 0; j < width; j++) { *dst++ = *src; src += 3; } } }
void YARPBlobFinder::_apply(const YARPGenericImage& is, YARPGenericImage& id) { // hsv_enhanced is already filled out. m_last_tag = SpecialTagging (m_tagged, m_hsv_enhanced); // I've got the tagged image now. assert (m_last_tag <= MaxBoxesBlobFinder); //printf ("last_tag is %d\n", m_last_tag); const int w = is.GetWidth (); const int h = is.GetHeight (); for(int i = 0; i <= m_last_tag; i++) { m_boxes[i].cmax = m_boxes[i].rmax = 0; m_boxes[i].cmax = m_boxes[i].rmax = 0; m_boxes[i].cmin = w; m_boxes[i].rmin = h; m_boxes[i].xmax = m_boxes[i].ymax = 0; m_boxes[i].xmin = m_boxes[i].ymin = m_lp.GetSize(); m_boxes[i].total_sal = 0; m_boxes[i].total_pixels = 0; m_boxes[i].xsum = m_boxes[i].ysum = 0; m_boxes[i].valid = false; } // special case for the null tag (0) m_boxes[0].rmax = m_boxes[0].rmin = h/2; m_boxes[0].cmax = m_boxes[0].cmin = w/2; m_boxes[0].xmax = m_boxes[0].xmin = m_lp.GetSize() / 2; m_boxes[0].ymax = m_boxes[0].ymin = m_lp.GetSize() / 2; m_boxes[0].valid = true; // build all possible bounding boxes out of the tagged image. // pixels are logpolar, averaging is done in cartesian. unsigned char *source = (unsigned char *)m_hsv_enhanced.GetArray()[0]; // Hue. short *tmp = m_tagged; for(int r = 0; r < h; r++) for(int c = 0; c < w; c++) { short tag_index = *tmp++; if (tag_index != 0) { m_boxes[tag_index].total_pixels++; // the saliency here is the average hue. m_boxes[tag_index].total_sal += *source; source += 3; m_boxes[tag_index].valid = true; // x,y. double x, y; m_lp.Lp2Cart (double(c), double(r), x, y); if (m_boxes[tag_index].ymax < int(y)) m_boxes[tag_index].ymax = int(y); if (m_boxes[tag_index].ymin > int(y)) m_boxes[tag_index].ymin = int(y); if (m_boxes[tag_index].xmax < int(x)) m_boxes[tag_index].xmax = int(x); if (m_boxes[tag_index].xmin > int(x)) m_boxes[tag_index].xmin = int(x); if (m_boxes[tag_index].rmax < r) m_boxes[tag_index].rmax = r; if (m_boxes[tag_index].rmin > r) m_boxes[tag_index].rmin = r; if (m_boxes[tag_index].cmax < c) m_boxes[tag_index].cmax = c; if (m_boxes[tag_index].cmin > c) m_boxes[tag_index].cmin = c; m_boxes[tag_index].ysum += int(y); m_boxes[tag_index].xsum += int(x); } } RemoveNonValid (); MergeBoxes (); //further clustering not needed. // merge boxes which are too close. // statically. Clearly this procedure could be more effective // if it takes time into account. // LATER: update also the logpolar coordinates during // the merger of the boxes. int max_tag, max_num; // create now the subset of attentional boxes. for (int box_num = 0; box_num < MaxBoxes; box_num++) { // find the most frequent tag, zero does not count max_tag = max_num = 0; for(int i = 1; i < m_last_tag; i++) { int area = TotalArea (m_boxes[i]); if (area > max_num && m_boxes[i].total_pixels > 0) //if(m_boxes[i].total_pixels > max_num) { max_num = area; //m_boxes[i].total_pixels; max_tag = i; } } if (max_tag != 0) { // compute saliency of region. // it cannot be done here. m_attn[box_num] = m_boxes[max_tag]; m_attn[box_num].valid = true; m_attn[box_num].centroid_y = m_boxes[max_tag].ysum / max_num; m_attn[box_num].centroid_x = m_boxes[max_tag].xsum / max_num; m_boxes[max_tag].total_pixels = 0; } else { // no motion, return the center m_attn[box_num].valid = false; m_attn[box_num].centroid_x = m_lp.GetSize() / 2; m_attn[box_num].centroid_y = m_lp.GetSize() / 2; } } //PrintBoxes (m_attn, MaxBoxes); // I've here MaxBoxes boxes accounting for the largest // regions in the image. // remember that there's a bias because of logpolar. // destination image is not changed. // should I store the result of the tagging process? }
/// LATER: this is NOT tested. static int ImageRead(YARPGenericImage& img, const char *filename) { int width, height, color, num, size; FILE *fp = ACE_OS::fopen(filename, "rb"); if (!fp) //die("cannot open file for reading"); { warn("cannot open file for reading"); return -1; } if (ReadHeader(fp, &height, &width, &color) < 0) { ACE_OS::fclose (fp); return -1; } if (!color) // img.GetID()==YARP_PIXEL_RGB || img.GetID() == YARP_PIXEL_MONO) { // img.SetID(color?YARP_PIXEL_RGB:YARP_PIXEL_MONO); img.SetID(YARP_PIXEL_MONO); img.Resize(width,height); ///ACE_ASSERT(img.GetPadding() == 0); ACE_ASSERT(img.GetRawBuffer()!=NULL); const int w = img.GetWidth() * img.GetPixelSize(); const int h = img.GetHeight(); const int pad = img.GetPadding() + w; char *dst = img.GetRawBuffer (); size = w * h; num = 0; for (int i = 0; i < h; i++) { num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp); dst += pad; } } else if (img.GetID()==YARP_PIXEL_RGB) { img.SetID(YARP_PIXEL_RGB); img.Resize(width,height); ///ACE_ASSERT(img.GetPadding() == 0); ACE_ASSERT(img.GetRawBuffer()!=NULL); const int w = img.GetWidth() * img.GetPixelSize(); const int h = img.GetHeight(); const int pad = img.GetPadding() + w; char *dst = img.GetRawBuffer (); size = w * h; num = 0; for (int i = 0; i < h; i++) { num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp); dst += pad; } } else { // image is color, nothing was specified, assume BGR img.SetID(YARP_PIXEL_BGR); img.Resize(width,height); ///ACE_ASSERT(img.GetPadding() == 0); ACE_ASSERT(img.GetRawBuffer()!=NULL); const int w = img.GetWidth() * img.GetPixelSize(); const int h = img.GetHeight(); const int pad = img.GetPadding() + w; size = w * h; YARPImageOf<YarpPixelRGB> img2; img2.Resize (width,height); char *dst = img2.GetRawBuffer (); num = 0; for (int i = 0; i < h; i++) { num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp); dst += pad; } img.CastCopy(img2); } if (num != size) { ACE_OS::printf ( "%d versus %d\n", num, size ); //die("cannot read image data from file"); warn("cannot read image data from file"); ACE_OS::fclose (fp); return -1; } ACE_OS::fclose(fp); return 0; }