Ejemplo n.º 1
0
YARPDIBConverter::YARPDIBConverter(const YARPGenericImage &img)
{
	ACE_ASSERT (img.GetIplPointer != NULL);

	dimX = img.GetWidth();
	dimY = img.GetHeight();
	pixelType = img.GetID();
	_alloc_dib();
}
Ejemplo n.º 2
0
int YARPLogpolarSampler::Cartesian2Logpolar (const YARPGenericImage& in, YARPGenericImage& out)
{
	using namespace _logpolarParams;
	ACE_ASSERT (in.GetWidth() == _xsize && in.GetHeight() == _ysize);
	ACE_ASSERT (out.GetWidth() == _stheta && out.GetHeight() == _srho);

	Make_LP_Real (_outimage, (unsigned char *)in.GetRawBuffer(), &_img, _cart2LP_Map);
	
	char *de = out.GetRawBuffer();
	unsigned char * o = _outimage;
	char *cmap = _colormap;
	const int w = out.GetWidth();
	const int h = out.GetHeight();
	const int p = out.GetPadding();

	int i, j;
	for (i = 0; i < h; i++)
	{
		for (j = 0; j < w; j++)
		{
			*de++ = *(o + *cmap++); 
			o += 3;
		}

		de += p;
		o += _padb;
	}

	return YARP_OK;
}
Ejemplo n.º 3
0
static int ImageWrite(YARPGenericImage& img, const char *filename)
{
	///ACE_ASSERT(img.GetPadding()==0);
	if (img.GetID()==YARP_PIXEL_MONO)
	{
		SavePGM((char*)img.GetRawBuffer(), filename, img.GetHeight(), img.GetWidth());
	}
	else
	{
		if (img.GetID()==YARP_PIXEL_RGB)
		{
			ACE_ASSERT(img.GetID()==YARP_PIXEL_RGB);
			//      Image<YarpPixelRGB> img2;
			//      img2.ReferOrCopy(img);
			SavePPM((char*)img.GetRawBuffer(),filename,img.GetHeight(),img.GetWidth());
		}
		else
		{
			YARPImageOf<YarpPixelRGB> img2;
			img2.CastCopy(img);
			SavePPM((char*)img2.GetRawBuffer(),filename,img2.GetHeight(), img2.GetWidth());	  
		}
	}

	return 0;
}
Ejemplo n.º 4
0
inline bool pollPort(YARPInputPortOf<YARPGenericImage> &port, YARPGenericImage &out)
{
	bool ret = false;
	if (port.Read(0))
	{
		out.Refer(port.Content());
		ret = true;
	}

	return ret;
}
Ejemplo n.º 5
0
void SetPlane (const YARPImageOf<YarpPixelMono>& in, YARPGenericImage& out, int shift)
{
	ACE_ASSERT (in.GetIplPointer() != NULL && out.GetIplPointer() != NULL);
	ACE_ASSERT (in.GetWidth() == out.GetWidth());
	ACE_ASSERT (in.GetHeight() == out.GetHeight());

	const int width = in.GetWidth();
	const int height = in.GetHeight();

	unsigned char *src = NULL;
	unsigned char *dst = NULL;

	for (int i = 0; i < height; i++)
	{
		src = (unsigned char *)in.GetArray()[i];
		dst = (unsigned char *)out.GetArray()[i] + shift;
		for (int j = 0; j < width; j++)
		{
			*dst = *src++;
			dst += 3;
		}
	}
}
Ejemplo n.º 6
0
void YARPBlobFinder::_apply(const YARPGenericImage& is, YARPGenericImage& id)
{
	// hsv_enhanced is already filled out.

	m_last_tag = SpecialTagging (m_tagged, m_hsv_enhanced);

	// I've got the tagged image now.
	assert (m_last_tag <= MaxBoxesBlobFinder);

	//printf ("last_tag is %d\n", m_last_tag);

	const int w = is.GetWidth ();
	const int h = is.GetHeight ();
	for(int i = 0; i <= m_last_tag; i++)
	{
		m_boxes[i].cmax = m_boxes[i].rmax = 0;
		m_boxes[i].cmax = m_boxes[i].rmax = 0;
		m_boxes[i].cmin = w;
		m_boxes[i].rmin = h;

		m_boxes[i].xmax = m_boxes[i].ymax = 0;
		m_boxes[i].xmin = m_boxes[i].ymin = m_lp.GetSize();

		m_boxes[i].total_sal = 0;
		m_boxes[i].total_pixels = 0;
		m_boxes[i].xsum = m_boxes[i].ysum = 0;
		m_boxes[i].valid = false;
	}
  
	// special case for the null tag (0)
	m_boxes[0].rmax = m_boxes[0].rmin = h/2;
	m_boxes[0].cmax = m_boxes[0].cmin = w/2;
	m_boxes[0].xmax = m_boxes[0].xmin = m_lp.GetSize() / 2;
	m_boxes[0].ymax = m_boxes[0].ymin = m_lp.GetSize() / 2;
	m_boxes[0].valid = true;

	// build all possible bounding boxes out of the tagged image.

	// pixels are logpolar, averaging is done in cartesian.
	unsigned char *source = (unsigned char *)m_hsv_enhanced.GetArray()[0];	// Hue. 
	short *tmp = m_tagged;
	for(int r = 0; r < h; r++)
		for(int c = 0; c < w; c++)
		{
			short tag_index = *tmp++;
			if (tag_index != 0)
			{
				m_boxes[tag_index].total_pixels++;

				// the saliency here is the average hue.
				m_boxes[tag_index].total_sal += *source;
				source += 3;

				m_boxes[tag_index].valid = true;

				// x,y.
				double x, y;
				m_lp.Lp2Cart (double(c), double(r), x, y);

				if (m_boxes[tag_index].ymax < int(y)) m_boxes[tag_index].ymax = int(y);
				if (m_boxes[tag_index].ymin > int(y)) m_boxes[tag_index].ymin = int(y);
				if (m_boxes[tag_index].xmax < int(x)) m_boxes[tag_index].xmax = int(x);
				if (m_boxes[tag_index].xmin > int(x)) m_boxes[tag_index].xmin = int(x);

				if (m_boxes[tag_index].rmax < r) m_boxes[tag_index].rmax = r;
				if (m_boxes[tag_index].rmin > r) m_boxes[tag_index].rmin = r;
				if (m_boxes[tag_index].cmax < c) m_boxes[tag_index].cmax = c;
				if (m_boxes[tag_index].cmin > c) m_boxes[tag_index].cmin = c;

				m_boxes[tag_index].ysum += int(y);
				m_boxes[tag_index].xsum += int(x);
			}
		}
	
	RemoveNonValid ();
	MergeBoxes (); //further clustering not needed.

	// merge boxes which are too close.
	// statically. Clearly this procedure could be more effective 
	// if it takes time into account.
	// LATER: update also the logpolar coordinates during
	//	the merger of the boxes.

	int max_tag, max_num;

	// create now the subset of attentional boxes.
	for (int box_num = 0; box_num < MaxBoxes; box_num++)
    {
		// find the most frequent tag, zero does not count 
		max_tag = max_num = 0;
		for(int i = 1; i < m_last_tag; i++)
		{
			int area = TotalArea (m_boxes[i]);
			if (area > max_num && m_boxes[i].total_pixels > 0)
			//if(m_boxes[i].total_pixels > max_num)
			{
				max_num = area; //m_boxes[i].total_pixels;
				max_tag = i;
			}
		}

		if (max_tag != 0)
		{
			// compute saliency of region.
			// it cannot be done here.
			m_attn[box_num] = m_boxes[max_tag];

			m_attn[box_num].valid = true;
			m_attn[box_num].centroid_y = m_boxes[max_tag].ysum / max_num;
			m_attn[box_num].centroid_x = m_boxes[max_tag].xsum / max_num;
			
			m_boxes[max_tag].total_pixels = 0;
		}
		else
		{
			// no motion, return the center 
			m_attn[box_num].valid = false;
			m_attn[box_num].centroid_x = m_lp.GetSize() / 2;
			m_attn[box_num].centroid_y = m_lp.GetSize() / 2;
		}
	}

	//PrintBoxes (m_attn, MaxBoxes);

	// I've here MaxBoxes boxes accounting for the largest 
	// regions in the image.
	// remember that there's a bias because of logpolar.

	// destination image is not changed.
	// should I store the result of the tagging process?
}
Ejemplo n.º 7
0
int YARPImageFile::Read(const char *src, YARPGenericImage& dest, int format)
{
 if (format!=YARPImageFile::FORMAT_NUMERIC)
    {
      return ImageRead(dest,src);
    }
  int hh = 0, ww = 0;
  {
    ifstream fin(src);
    int blank = 1;
    int curr = 0;
    while (!fin.eof())
      {
	int ch = fin.get();
	//if (ch!='\n') printf("[%c]",ch);
	if (ch==' ' || ch == '\t' || ch == '\r' || ch == '\n' || fin.eof())
	  {
	    if (!blank)
	      {
		if (curr==0)
		  {
		    hh++;
		  }
		curr++;
		if (curr>ww)
		  {
		    ww = curr;
		    //printf("%d\n", ww);
		  }
	      }
	    blank = 1;
	    if (ch=='\n')
	      {
		curr = 0;
	      }
	  }
	else
	  {
	    blank = 0;
	  }
      }
  }
  //printf("yyy dim %d %d\n", hh, ww);
  YARPImageOf<YarpPixelFloat> flt;
  flt.Resize(ww,hh);
  hh = 0; ww = 0;
  {
    char buf[256];
    int idx = 0;
    ifstream fin(src);
    int blank = 1;
    int curr = 0;
    while (!fin.eof())
      {
	int ch = fin.get();
	if (ch==' ' || ch == '\t' || ch == '\r' || ch == '\n' || fin.eof())
	  {
	    if (!blank)
	      {
		if (curr==0)
		  {
		    hh++;
		  }
		curr++;
		if (curr>ww)
		  {
		    ww = curr;
		  }
		buf[idx] = '\0';
		flt(curr-1,hh-1) = float(atof(buf));
		idx = 0;
	      }
	    blank = 1;
	    if (ch=='\n')
	      {
		curr = 0;
	      }
	  }
	else
	  {
	    buf[idx] = ch;
	    idx++;
	    assert(idx<sizeof(buf));
	    blank = 0;
	  }
      }
  }
    
  dest.CastCopy(flt);

  return 0;

  //return ImageRead(dest,src);
  //return 0;
}
Ejemplo n.º 8
0
/// LATER: this is NOT tested.
static int ImageRead(YARPGenericImage& img, const char *filename)
{
	int width, height, color, num, size;

	FILE  *fp = ACE_OS::fopen(filename, "rb");

	if (!fp)    //die("cannot open file for reading");
	{
		warn("cannot open file for reading");
		return -1;
	}

	if (ReadHeader(fp, &height, &width, &color) < 0)
	{
		ACE_OS::fclose (fp);
		return -1;
	}

	if (!color)
		// img.GetID()==YARP_PIXEL_RGB || img.GetID() == YARP_PIXEL_MONO)
	{
		// img.SetID(color?YARP_PIXEL_RGB:YARP_PIXEL_MONO);
		img.SetID(YARP_PIXEL_MONO);
		img.Resize(width,height);
		///ACE_ASSERT(img.GetPadding() == 0);
		ACE_ASSERT(img.GetRawBuffer()!=NULL);

		const int w = img.GetWidth() * img.GetPixelSize();
		const int h = img.GetHeight();
		const int pad = img.GetPadding() + w;
		char *dst = img.GetRawBuffer ();
		size = w * h;

		num = 0;
		for (int i = 0; i < h; i++)
		{
			num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp);
			dst += pad;
		}
	}
	else if (img.GetID()==YARP_PIXEL_RGB)
	{		
		img.SetID(YARP_PIXEL_RGB);
		img.Resize(width,height);
		///ACE_ASSERT(img.GetPadding() == 0);
		ACE_ASSERT(img.GetRawBuffer()!=NULL);

		const int w = img.GetWidth() * img.GetPixelSize();
		const int h = img.GetHeight();
		const int pad = img.GetPadding() + w;
		char *dst = img.GetRawBuffer ();
		size = w * h;

		num = 0;
		for (int i = 0; i < h; i++)
		{
			num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp);
			dst += pad;
		}

	}
	else
	{
		// image is color, nothing was specified, assume BGR
		img.SetID(YARP_PIXEL_BGR);
		img.Resize(width,height);
		///ACE_ASSERT(img.GetPadding() == 0);
		ACE_ASSERT(img.GetRawBuffer()!=NULL);

		const int w = img.GetWidth() * img.GetPixelSize();
		const int h = img.GetHeight();
		const int pad = img.GetPadding() + w;
		size = w * h;

		YARPImageOf<YarpPixelRGB> img2;
		img2.Resize (width,height);
		char *dst = img2.GetRawBuffer ();

		num = 0;
		for (int i = 0; i < h; i++)
		{
			num += ACE_OS::fread((void *) dst, 1, (size_t) w, fp);
			dst += pad;
		}

		img.CastCopy(img2);
	}

	if (num != size) 
	{
		ACE_OS::printf ( "%d versus %d\n", num, size );
		//die("cannot read image data from file");
		warn("cannot read image data from file");
		ACE_OS::fclose (fp);
		return -1;
	}

	ACE_OS::fclose(fp);

	return 0;
}