Example #1
0
void process(IplImage *source, IplImage *destination) {
  uchar *dataS, *dataD;
  int bpp = 3;
  int step;
  CvSize size;
  int x, y;

  /* for outer region */
  cvCopy(source, destination, NULL);

  cvGetRawData(source, &dataS, &step, &size);
  cvGetRawData(destination, &dataD, NULL, NULL);

  for (y = size.height / 4; y < (size.height * 3 / 4); y++) {
    uchar* pD = dataD + step * y + bpp * size.width / 4;
    for (x = size.width / 4; x < (size.width * 3 / 4); x++) {
      int i;
      int j;
      double temporary;
      double data[9];

      for (j = 0; j < 3; j++) {
        for (i = 0; i < 3; i++) {
          uchar* pN = dataS + step * (y + j - 1) + bpp * (x + i - 1);
          uchar blueS = *pN;
          uchar greenS = *(pN + 1);
          uchar redS = *(pN + 2);
          double intensityS = (0.114 * blueS + 0.587 * greenS + 0.299 * redS)
              / 255.0;
          data[3 * j + i] = 219.0 * intensityS + 16;
        }
      }

      for (i = 0; i < 8; i++) {
        for (j = i + 1; j < 9; j++) {
          if (data[i] > data[j]) {
            temporary = data[i];
            data[i] = data[j];
            data[j] = temporary;
          }
        }
      }

      *pD = data[4];
      *(pD + 1) = data[4];
      *(pD + 2) = data[4];
      pD += 3;
    }
  }
}
Example #2
0
/* 改变轮廓位置使得它的能量最小 

void cvSnakeImage( const IplImage* image, CvPoint* points, int length,
                   float* alpha, float* beta, float* gamma, int coeff_usage,
                   CvSize win, CvTermCriteria criteria, int calc_gradient=1 );
image 
输入图像或外部能量域 
points 
轮廓点 (snake). 
length 
轮廓点的数目 
alpha 
连续性能量的权 Weight[s],单个浮点数或长度为 length 的浮点数数组,每个轮廓点有一个权 
beta 
曲率能量的权 Weight[s],与 alpha 类似 
gamma 
图像能量的权 Weight[s],与 alpha 类似 
coeff_usage 
前面三个参数的不同使用方法: 
CV_VALUE 表示每个 alpha, beta, gamma 都是指向为所有点所用的一个单独数值; 
CV_ARRAY 表示每个 alpha, beta, gamma 是一个指向系数数组的指针,snake 上面各点的系数都不相同。因此,各个系数数组必须与轮廓具有同样的大小。所有数组必须与轮廓具有同样大小 
win 
每个点用于搜索最小值的邻域尺寸,两个 win.width 和 win.height 都必须是奇数 
criteria 
终止条件 
calc_gradient 
梯度符号。如果非零,函数为每一个图像象素计算梯度幅值,且把它当成能量场,否则考虑输入图像本身。 
函数 cvSnakeImage 更新 snake 是为了最小化 snake 的整个能量,其中能量是依赖于轮廓形状的内部能量(轮廓越光滑,内部能量越小)以及依赖于能量场的外部能量之和,外部能量通常在哪些局部能量极值点中达到最小值(这些局部能量极值点与图像梯度表示的图像边缘相对应)。 

参数 criteria.epsilon 用来定义必须从迭代中除掉以保证迭代正常运行的点的最少数目。 

如果在迭代中去掉的点数目小于 criteria.epsilon 或者函数达到了最大的迭代次数 criteria.max_iter ,则终止函数。 

 */
CV_IMPL void
cvSnakeImage( const IplImage* src, CvPoint* points,
              int length, float *alpha,
              float *beta, float *gamma,
              int coeffUsage, CvSize win,
              CvTermCriteria criteria, int calcGradient )
{

    CV_FUNCNAME( "cvSnakeImage" );

    __BEGIN__;

    uchar *data;
    CvSize size;
    int step;

    if( src->nChannels != 1 )
        CV_ERROR( CV_BadNumChannels, "input image has more than one channel" );

    if( src->depth != IPL_DEPTH_8U )
        CV_ERROR( CV_BadDepth, cvUnsupportedFormat );

    cvGetRawData( src, &data, &step, &size );

    IPPI_CALL( icvSnake8uC1R( data, step, size, points, length,
                              alpha, beta, gamma, coeffUsage, win, criteria,
                              calcGradient ? _CV_SNAKE_GRAD : _CV_SNAKE_IMAGE ));
    __END__;
}
Example #3
0
// ------------------------------------------------------------------------
void LABHistogram2D::Serialize( CArchive& ar )
{
    if( !CV_IS_HIST(h))
        ASSERT(false);
	
	int size[CV_MAX_DIM];
    int dims = cvGetDims( this->h->bins, size );
	int total = 1;
	for(int i = 0; i < dims; i++ )
		total *= size[i];

	//// TEMP:
	//total = 30*30;
	float *ptr = 0;
    cvGetRawData( this->h->bins, (uchar**)&ptr);
	if (ar.IsStoring()) {
		for(int i = 0; i < total; i++ )
			ar << ptr[i];
	}
	else {
		for(int i = 0; i < total; i++ )
			ar >> ptr[0];
	}
	
}
Example #4
0
// ------------------------------------------------------------------------
System::String^ LABHistogram2D::ToString()
{
	if( !CV_IS_HIST(h))
		ASSERT(false);

	int size[CV_MAX_DIM];
	int dims = cvGetDims( this->h->bins, size );
	int total = 1;
	for(int i = 0; i < dims; i++ )
		total *= size[i];

	System::String ^s;
	float *ptr = 0;
	cvGetRawData( this->h->bins, (uchar**)&ptr);
	for(int i=0; i<a_bins;i++ )
	{
		for(int j=0; j<b_bins;j++ )
		{
			float bin_val = cvQueryHistValue_2D( h, i, j );
			s += System::String::Format("{0:f4}, ", bin_val);
		}
		s += "\n";
	}
	return s;
}
Example #5
0
unsigned char* ImageHandler::getRGBImage()
{
    if (m_imgSrc->curImage == NULL)
    {
        return NULL;
    }

    if (m_imgSrc->curImage->nChannels == 1)
    {
        return NULL;
    }
    
    //unsigned char *dataCh;
    int rows, cols, iplCols;
    
    rows = m_imgSrc->curImage->height;
    cols = m_imgSrc->curImage->width;
    iplCols= m_imgSrc->curImage->widthStep;
    
    //dataCh = new unsigned char[rows*cols*3];
    unsigned char *dataCh = new unsigned char[rows*cols*3];

    cvGetRawData(m_imgSrc->curImage, &dataCh);

    return dataCh;
}
Example #6
0
static void libopencv_(Main_opencvMat2torch)(CvMat *source, THTensor *dest) {

  int mat_step;
  CvSize mat_size;
  THTensor *tensor;
  // type dependent variables
  float * data_32F;
  float * data_32Fp;
  double * data_64F;
  double * data_64Fp;
  uchar * data_8U;
  uchar * data_8Up;
  char * data_8S;
  char * data_8Sp;
  unsigned int * data_16U;
  unsigned int * data_16Up;
  short * data_16S;
  short * data_16Sp;
  switch (CV_MAT_DEPTH(source->type))
    {
    case CV_32F:
      cvGetRawData(source, (uchar**)&data_32F, &mat_step, &mat_size);
      // Resize target
      THTensor_(resize3d)(dest, 1, source->rows, source->cols);
      tensor = THTensor_(newContiguous)(dest);
      data_32Fp = data_32F;
      // copy
      TH_TENSOR_APPLY(real, tensor,
                      *tensor_data = ((real)(*data_32Fp));
                      // step through channels of ipl
                      data_32Fp++;
                      );
      THTensor_(free)(tensor);
      break;
    case CV_64F:
      cvGetRawData(source, (uchar**)&data_64F, &mat_step, &mat_size);
      // Resize target
      THTensor_(resize3d)(dest, 1, source->rows, source->cols);
      tensor = THTensor_(newContiguous)(dest);

      data_64Fp = data_64F;
      // copy
      TH_TENSOR_APPLY(real, tensor,
                      *tensor_data = ((real)(*data_64Fp));
                      // step through channels of ipl
                      data_64Fp++;
                      );
	bool DataIO::load_float_image(std::string filename, cv::Mat& output) {

		IplImage* imInt   = cvCreateImage( cvSize(176,144) , IPL_DEPTH_32F , 1);
		IplImage* img = 0;
		img = cvLoadImage(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
		bool res = (img!=0);

		if(res) {

			// data for input image
			uchar* indata;
			int stepIn;
			cvGetRawData( img, (uchar**)&indata, &stepIn);
			int si = sizeof(indata[0]);
			stepIn /= si;

			// data for output image
			int so = sizeof(float);
			float* outdata;
			int stepOut;
			cvGetRawData( imInt, (uchar**)&outdata, &stepOut);
			stepOut /= so;

			// copy float -> uchar
			for( int y = 0; y < img->height; y++, indata += stepIn, outdata += stepOut) {
				int m = 0;
				for( int k=0; k < so; k++)
					for( int l = k; l < imInt->width*so; l+=so ) {
						*((uchar*)(outdata)+l*si) = *((uchar*)(indata) + m*si);
						m++;
					}
			}

			cvReleaseImage(&img);

		} else {

			std::cout << "Could not find " << filename << std::endl;
		}

		output = cv::Mat(imInt);

		return res;
	}
void ImproveContoursPlugin::ProcessImage_static(ImagePlus* img, IplImage* &gray, float alpha, float beta, float gamma, CvSize win, int scheme, bool useBlur, int max_iter, float epsilon)
{
    CvSeq *seq;
	CvPoint* ps;
    if (!gray)
		gray = cvCreateImage( cvSize(img->orig->width, img->orig->height), IPL_DEPTH_8U, 1 );
	cvCvtColor(img->orig, gray, CV_BGR2GRAY);
	if (useBlur)
    {
        IplImage* temp = cvCreateImage( cvSize(gray->width, gray->height), IPL_DEPTH_8U, 1 );
        cvCopyImage(gray, temp);
        cvSmooth(temp, gray, CV_MEDIAN, 3);
        cvReleaseImage(&temp);
    }
	for (int i=(int)img->contourArray.size()-1; i>=0; i--)
	{
		seq = img->contourArray[i];
		int np = seq->total;
		ps = (CvPoint*)malloc( np*sizeof(CvPoint) );
		cvCvtSeqToArray(seq, ps);

		uchar *data;
        CvSize size;
        int step;
		cvGetRawData( gray, &data, &step, &size );
        if( gray == NULL )
            std::cout << "Err1" << std::endl;
        if( (size.height <= 0) || (size.width <= 0) )
            std::cout << "Err2" << std::endl;
        if( step < size.width )
            std::cout << "Err3" << std::endl;
        if( ps == NULL )
            std::cout << "Err4" << std::endl;
        if( np < 3 )
            std::cout << "Err5" << std::endl;
        if( &alpha == NULL )
            std::cout << "Err6" << std::endl;
        if( &beta == NULL )
            std::cout << "Err7" << std::endl;
        if( &gamma == NULL )
            std::cout << "Err8" << std::endl;
        if( CV_VALUE != CV_VALUE && CV_VALUE != CV_ARRAY )
            std::cout << "Err9" << std::endl;
        if( (win.height <= 0) || (!(win.height & 1)))
            std::cout << "Err10 " << win.height << std::endl;
        if( (win.width <= 0) || (!(win.width & 1)))
            std::cout << "Err11" << std::endl;


		CvTermCriteria term=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, max_iter, epsilon*np);
		cvSnakeImage( gray, ps, np, &alpha, &beta, &gamma, CV_VALUE, win, term, scheme );
		img->ReplaceContour(i, ps, np);
		free(ps); ps=NULL;
	}
}
Example #9
0
/**
 *  \brief	Returns all nonzero points in image into x- and y-coordinate sets
 */
void getPointsFromImageHough(int *Px, int *Py, int *numPoints) {
	unsigned char* rawROI = NULL; int ssize;
		cvGetRawData(zeta, &rawROI, &ssize, NULL);
		int num = cvCountNonZero(zeta); num = 0; int i,j;
		for (j = max_of_2(0,yGuess - height); j < min_of_2(height, yGuess + height); j++){
			for (i = max_of_2(0, xGuess - width); i < min_of_2(width, xGuess + width); i++){
				if (rawROI[i+j*width] != 0){ Px[num] = i; Py[num] = j; num++; }
			}
		}
		*numPoints = num;
}
Example #10
0
void  cvUnDistort( const CvArr* src, CvArr* dst,
                   const CvArr* undistortion_map, int )
{
    union { uchar* ptr; float* fl; } data;
    float a[] = {0,0,0,0,0,0,0,0,1};
    CvSize sz;
    cvGetRawData( undistortion_map, &data.ptr, 0, &sz );
    assert( sz.width >= 8 );
    a[0] = data.fl[0]; a[4] = data.fl[1];
    a[2] = data.fl[2]; a[5] = data.fl[3];
    cvUnDistortOnce( src, dst, a, data.fl + 4, 1 );
}
Example #11
0
double LABHistogram2D::Compare(const LABHistogram2D* that) {
	if( !CV_IS_HIST(h) || !CV_IS_HIST(that->h) )
        ASSERT(false);
	
	int size1[CV_MAX_DIM], size2[CV_MAX_DIM];

    int dims1 = cvGetDims( this->h->bins, size1 );
    int dims2 = cvGetDims( that->h->bins, size2 );
	int total = 1;

    if( dims1 != dims2 )
        ASSERT(false);

	for(int i = 0; i < dims1; i++ )
	{
		if( size1[i] != size2[i] )
			ASSERT(false);
		total *= size1[i];
	}

	float *ptr1 = 0, *ptr2 = 0;
    cvGetRawData( this->h->bins, (uchar**)&ptr1 );
    cvGetRawData( that->h->bins, (uchar**)&ptr2 );
	float sum = 0, sum1 = 0, sum2 = 0;
    for(int i = 0; i < total; i++ )
    {
        float a = ptr1[i];
        float b = ptr2[i];
        sum += sqrt(a*b);
		sum1 += a; sum2 +=b;
    }

	// normalize both histograms so that all bins sum up to 1
	if (sum1 == 0 || sum2 == 0)
		return 1;
	return sum/sqrt(sum1*sum2);
}
Example #12
0
/* the two functions below have quite hackerish implementations, use with care
   (or, which is better, switch to cvUndistortInitMap and cvRemap instead */
void cvUnDistortInit( const CvArr*,
                      CvArr* undistortion_map,
                      const float* A, const float* k,
                      int)
{
    union { uchar* ptr; float* fl; } data;
    CvSize sz;
    cvGetRawData( undistortion_map, &data.ptr, 0, &sz );
    assert( sz.width >= 8 );
    /* just save the intrinsic parameters to the map */
    data.fl[0] = A[0]; data.fl[1] = A[4];
    data.fl[2] = A[2]; data.fl[3] = A[5];
    data.fl[4] = k[0]; data.fl[5] = k[1];
    data.fl[6] = k[2]; data.fl[7] = k[3];
}
unsigned char*
grab_raw_data(const double scale, const int convert_grayscale, int* imagesize)
{
	IplImage *image = grab_image(scale, convert_grayscale);

	if(!image) {
		printf("Grab image failed.\n");
		return NULL;
	}
	
	*imagesize = image->imageSize;
	
	cvGetRawData(image, &raw_data, NULL, NULL);

	return raw_data;
}
Example #14
0
////////////////////////////////////////////////////////////////////
// Method:	OnDraw
// Class:	CCamView
// Purose:	CCamView drawing
// Input:	nothing
// Output:	nothing
////////////////////////////////////////////////////////////////////
void CCamView::DrawCam( IplImage* pImg )
{
//	return;
	if( m_bDrawing ) return;
	m_bDrawing = true;
	int i = 0;

	// if there was an image then we need to update view
    if( pImg )
    {
        IplImage* pDstImg = cvCloneImage( pImg ); //m_Canvas.GetImage();

		int nCamWidth = m_pCamera->m_nWidth;
		int nCamHeight = m_pCamera->m_nHeight;

		// draw a rectangle
		cvRectangle( pDstImg,
					cvPoint( 10, 10 ),
					cvPoint( nCamWidth-10, nCamHeight-10 ),
					CV_RGB( 0,255,0 ), 1 );


		// process image from opencv to wxwidgets
		unsigned char *rawData;
		// draw my stuff to output canvas
		CvSize roiSize;
		int step = 0;
		// get raw data from ipl image
		cvGetRawData( pDstImg, &rawData, &step, &roiSize );
		// convert data from raw image to wxImg 
		wxImage pWxImg = wxImage( nCamWidth, nCamHeight, rawData, TRUE );
		// convert to bitmap to be used by the window to draw
		m_pBitmap = wxBitmap( pWxImg.Scale(m_nWidth, m_nHeight) );

		m_bNewImage = true;
		m_bDrawing = false;

		Refresh( FALSE );

		Update( );

		cvReleaseImage( &pDstImg );

    }

}
Example #15
0
static GQueue*
get_n_max_coords (const IplImage *image,
                  int             n)
{
  GQueue *coords;
  float max = -1.0;
  CvSize size;
  CvPoint *new_elem, *old_elem;
  int stride, n_channels, depth;
  uchar *pixels;
  float *row;

  n_channels = image->nChannels;
  depth = image->depth;
  g_assert (n_channels == N_CHANNELS_GRAY &&
            depth == IPL_DEPTH_32F);

  cvGetRawData(image, &pixels, &stride, &size);
  coords = g_queue_new();

  for (int i = 0; i < size.height; ++i)
    {
      row = (float *)(pixels + i * stride);
      for (int j = 0; j < size.width; ++j)
        {
          if (row[j] > max)
            {
              max = row[j];
              new_elem = (CvPoint*)malloc(sizeof(CvPoint));
              new_elem->x = j;
              new_elem->y = i;
              g_queue_push_head(coords, new_elem);
              if (g_queue_get_length(coords) > n)
                {
                  old_elem = (CvPoint*)g_queue_pop_tail(coords);
                  free(old_elem);
                }
            }
        }
    }
  return coords;
}
Example #16
0
// ------------------------------------------------------------------------
double LABHistogram2D::GetNorm() {
    if( !CV_IS_HIST(h))
        ASSERT(false);
	
	int size[CV_MAX_DIM];

    int dims = cvGetDims( this->h->bins, size );
	int total = 1;

	for(int i = 0; i < dims; i++ )
		total *= size[i];

	float *ptr = 0;
    cvGetRawData( this->h->bins, (uchar**)&ptr);
	float sum = 0;
    for(int i = 0; i < total; i++ )
		sum += ptr[i];

	return sum;
}
/*************************************************
  vision-serverの本体
    Cameraデータの取得、画像処理、ソケット通信待ち受けを行う
************************************************/
int main (int argc, char **argv){
  CvSize size;
  int step;
  CvCapture *cap;
  IplImage *capture_image;
  IplImage *frame_image;
  IplImage *processed_image;
  IplImage *grayImage; 
  IplImage *binaryImage;
  unsigned char* binarydata;

  CvFont font;
  char text[50];
  char hostname[30];
  int s, i, port = 9000;
  pthread_t tid;

  /*** socket通信のための処理(ここから) ***/
  for (i=1;i<argc;i++){
    if (strcmp("-port", argv[i]) == 0) {
      port=atoi(argv[++i]);
    }}
  gethostname(hostname, sizeof(hostname));
  s = init_socket_server(hostname, &port);
  fprintf(stderr, "hostname %s\n", hostname);
  for (i=0; i< MAX_SOCKET ; i++) sockets[i].type=0;
  //threadで待ちうけ
  fprintf(stderr, "Waiting connection...\n");
  pthread_create(&tid, NULL, acceptor, (void *)s);
  /*** socket通信のための処理(ここまで) ***/

  /** semaphoreの準備 ***/
  raw_semaphore = semget((key_t)1111, 1, 0666|IPC_CREAT);
  if(raw_semaphore == -1){
    perror("semget failure");
    exit(EXIT_FAILURE);
  }
  process_semaphore = semget((key_t)1111, 1, 0666|IPC_CREAT);
  if(process_semaphore == -1){
    perror("semget failure");
    exit(EXIT_FAILURE);
  }
  union semun semunion;
  semunion.val = 0;  //semaphoreの初期値
  if(semctl(raw_semaphore, 0, SETVAL, semunion) == -1){
    perror("semctl(init) failure");
    exit(EXIT_FAILURE);
  }
  if(semctl(process_semaphore, 0, SETVAL, semunion) == -1){
    perror("semctl(init) failure");
    exit(EXIT_FAILURE);
  }
  /** semaphoreの準備(ここまで) ***/

  /** cameraや画像取得の用意(ここから) ***/
  //camera initialization 
  if((cap = cvCreateCameraCapture(-1))==NULL){
    printf("Couldn't find any camera.\n");
    return -1;
  }
  capture_image = cvQueryFrame(cap);
  width = capture_image->width;
  height = capture_image->height;
  fprintf(stderr, "height %d, width %d\n", height, width);
  fprintf(stderr, "process height %d, process width %d\n", process_height, process_width);
  /** cameraや画像取得の用意(ここまで) ***/

  /** 画像処理(赤色抽出)の準備 ***/
  //fontの設定(しないとSegfaultで落ちる)
  float hscale = 1.0f;
  float vscale = 1.0f;
  float italicscale = 0.0f;
  int thickness = 3;
  cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, hscale, vscale, italicscale, thickness, CV_AA);
  //font設定ここまで
  // Set threshold
  rgb_thre[0] = R_MIN_THRE;
  rgb_thre[1] = R_MAX_THRE;
  rgb_thre[2] = G_MIN_THRE;
  rgb_thre[3] = G_MAX_THRE;
  rgb_thre[4] = B_MIN_THRE;
  rgb_thre[5] = B_MAX_THRE;


  //画像処理するイメージ領域を確保
  frame_image = cvCreateImage(cvSize(process_width, process_height), IPL_DEPTH_8U, 3);
  processed_image = cvCreateImage(cvSize(process_width, process_height), IPL_DEPTH_8U, 3);
  /** 画像処理(赤色抽出)の準備(ここまで) ***/

  
  /**** 面積を出すための2値化 ***/
  grayImage = cvCreateImage(cvGetSize(frame_image), IPL_DEPTH_8U, 1);
  binaryImage = cvCreateImage(cvGetSize(frame_image), IPL_DEPTH_8U, 1);
  
  //Labeling init
  label_buf = (int*)malloc(sizeof(int)*frame_image->width*frame_image->height);

  /**** main loop(本体) ****/
  while(1){
    CvPoint centroid;
    //カメラ画像をcaptureする
    capture_image = cvQueryFrame(cap);
    if (capture_image==NULL) {
      fprintf(stderr, "capture_image is %p\n", capture_image);
      continue;
    }
    cvResize(capture_image, frame_image, CV_INTER_LINEAR);

    //カメラ画像を処理する
    maskRGB(frame_image, processed_image, rgb_thre);          //赤色抽出
    // Binarize
    myBinarize(processed_image, grayImage, binaryImage);
    cvDilate(binaryImage, grayImage, NULL, 10); //ぼうちょう
    cvErode(grayImage, binaryImage, NULL, 15);  //収縮
    // Labeling
    cvGetRawData(binaryImage, &binarydata, &step, &size);
    labeling(binarydata, frame_image->height, frame_image->width, label_buf, step);
    label_num = labeling_result(&linfo, label_buf, frame_image->height, frame_image->width);
    //処理結果を書き込む
    {
      int i,n;
      n=25;
      //fprintf(stderr, "num is %d\n", label_num);
      for(i=0; i<label_num; i++){
        //fprintf(stderr, "area %d, x %d y %d\n", linfo[i].area, (int)linfo[i].xpos, (int)linfo[i].ypos);
        centroid.x = (int) linfo[i].xpos;
        centroid.y = (int) linfo[i].ypos;
        drawCross(processed_image, &centroid, CV_RGB(0, 255, 0));                                 //×印をいれる
        sprintf(text, "X: %d Y: %d AREA: %d", centroid.x, centroid.y, linfo[i].area);             //値をかく
        cvPutText(processed_image, text, cvPoint(n, (height-n*(i+1))), &font, CV_RGB(0, 255, 0)); //
      }
    }
    // image -> rawdata
    sema_wait(raw_semaphore);
    cvGetRawData(frame_image, &rawdata, &step, &size);
    
    // process image -> process data
    sema_wait(process_semaphore);
    cvGetRawData(processed_image, &processdata, &step, &size);

    //sleep
    usleep(30000);
  }
  //release the capture object
  cvReleaseCapture(&cap);
  return 0;
}
Example #18
0
/* Warps source into destination by a perspective transform */
static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
{
    CV_FUNCNAME( "cvWarpPerspective" );

    __BEGIN__;

#ifdef __IPL_H__
    IplImage src_stub, dst_stub;
    IplImage* src_img;
    IplImage* dst_img;
    CV_CALL( src_img = cvGetImage( src, &src_stub ) );
    CV_CALL( dst_img = cvGetImage( dst, &dst_stub ) );
    iplWarpPerspectiveQ( src_img, dst_img, quad, IPL_WARP_R_TO_Q,
                         IPL_INTER_CUBIC | IPL_SMOOTH_EDGE );
#else

    int fill_value = 0;

    double c[3][3]; /* transformation coefficients */
    double q[4][2]; /* rearranged quad */

    int left = 0;
    int right = 0;
    int next_right = 0;
    int next_left = 0;
    double y_min = 0;
    double y_max = 0;
    double k_left, b_left, k_right, b_right;

    uchar* src_data;
    int src_step;
    CvSize src_size;

    uchar* dst_data;
    int dst_step;
    CvSize dst_size;

    double d = 0;
    int direction = 0;
    int i;

    if( !src || (!CV_IS_IMAGE( src ) && !CV_IS_MAT( src )) ||
        cvGetElemType( src ) != CV_8UC1 ||
        cvGetDims( src ) != 2 )
    {
        CV_ERROR( CV_StsBadArg,
            "Source must be two-dimensional array of CV_8UC1 type." );
    }
    if( !dst || (!CV_IS_IMAGE( dst ) && !CV_IS_MAT( dst )) ||
        cvGetElemType( dst ) != CV_8UC1 ||
        cvGetDims( dst ) != 2 )
    {
        CV_ERROR( CV_StsBadArg,
            "Destination must be two-dimensional array of CV_8UC1 type." );
    }

    CV_CALL( cvGetRawData( src, &src_data, &src_step, &src_size ) );
    CV_CALL( cvGetRawData( dst, &dst_data, &dst_step, &dst_size ) );

    CV_CALL( cvGetPerspectiveTransform( src_size, quad, c ) );

    /* if direction > 0 then vertices in quad follow in a CW direction,
       otherwise they follow in a CCW direction */
    direction = 0;
    for( i = 0; i < 4; ++i )
    {
        int ni = i + 1; if( ni == 4 ) ni = 0;
        int pi = i - 1; if( pi == -1 ) pi = 3;

        d = (quad[i][0] - quad[pi][0])*(quad[ni][1] - quad[i][1]) -
            (quad[i][1] - quad[pi][1])*(quad[ni][0] - quad[i][0]);
        int cur_direction = CV_SIGN(d);
        if( direction == 0 )
        {
            direction = cur_direction;
        }
        else if( direction * cur_direction < 0 )
        {
            direction = 0;
            break;
        }
    }
    if( direction == 0 )
    {
        CV_ERROR( CV_StsBadArg, "Quadrangle is nonconvex or degenerated." );
    }

    /* <left> is the index of the topmost quad vertice
       if there are two such vertices <left> is the leftmost one */
    left = 0;
    for( i = 1; i < 4; ++i )
    {
        if( (quad[i][1] < quad[left][1]) ||
            ((quad[i][1] == quad[left][1]) && (quad[i][0] < quad[left][0])) )
        {
            left = i;
        }
    }
    /* rearrange <quad> vertices in such way that they follow in a CW
       direction and the first vertice is the topmost one and put them
       into <q> */
    if( direction > 0 )
    {
        for( i = left; i < 4; ++i )
        {
            q[i-left][0] = quad[i][0];
            q[i-left][1] = quad[i][1];
        }
        for( i = 0; i < left; ++i )
        {
            q[4-left+i][0] = quad[i][0];
            q[4-left+i][1] = quad[i][1];
        }
    }
    else
    {
        for( i = left; i >= 0; --i )
        {
            q[left-i][0] = quad[i][0];
            q[left-i][1] = quad[i][1];
        }
        for( i = 3; i > left; --i )
        {
            q[4+left-i][0] = quad[i][0];
            q[4+left-i][1] = quad[i][1];
        }
    }

    left = right = 0;
    /* if there are two topmost points, <right> is the index of the rightmost one
       otherwise <right> */
    if( q[left][1] == q[left+1][1] )
    {
        right = 1;
    }

    /* <next_left> follows <left> in a CCW direction */
    next_left = 3;
    /* <next_right> follows <right> in a CW direction */
    next_right = right + 1;

    /* subtraction of 1 prevents skipping of the first row */
    y_min = q[left][1] - 1;

    /* left edge equation: y = k_left * x + b_left */
    k_left = (q[left][0] - q[next_left][0]) /
               (q[left][1] - q[next_left][1]);
    b_left = (q[left][1] * q[next_left][0] -
               q[left][0] * q[next_left][1]) /
                 (q[left][1] - q[next_left][1]);

    /* right edge equation: y = k_right * x + b_right */
    k_right = (q[right][0] - q[next_right][0]) /
               (q[right][1] - q[next_right][1]);
    b_right = (q[right][1] * q[next_right][0] -
               q[right][0] * q[next_right][1]) /
                 (q[right][1] - q[next_right][1]);

    for(;;)
    {
        int x, y;

        y_max = MIN( q[next_left][1], q[next_right][1] );

        int iy_min = MAX( cvRound(y_min), 0 ) + 1;
        int iy_max = MIN( cvRound(y_max), dst_size.height - 1 );

        double x_min = k_left * iy_min + b_left;
        double x_max = k_right * iy_min + b_right;

        /* walk through the destination quadrangle row by row */
        for( y = iy_min; y <= iy_max; ++y )
        {
            int ix_min = MAX( cvRound( x_min ), 0 );
            int ix_max = MIN( cvRound( x_max ), dst_size.width - 1 );

            for( x = ix_min; x <= ix_max; ++x )
            {
                /* calculate coordinates of the corresponding source array point */
                double div = (c[2][0] * x + c[2][1] * y + c[2][2]);
                double src_x = (c[0][0] * x + c[0][1] * y + c[0][2]) / div;
                double src_y = (c[1][0] * x + c[1][1] * y + c[1][2]) / div;

                int isrc_x = cvFloor( src_x );
                int isrc_y = cvFloor( src_y );
                double delta_x = src_x - isrc_x;
                double delta_y = src_y - isrc_y;

                uchar* s = src_data + isrc_y * src_step + isrc_x;

                int i00, i10, i01, i11;
                i00 = i10 = i01 = i11 = (int) fill_value;

                /* linear interpolation using 2x2 neighborhood */
                if( isrc_x >= 0 && isrc_x <= src_size.width &&
                    isrc_y >= 0 && isrc_y <= src_size.height )
                {
                    i00 = s[0];
                }
                if( isrc_x >= -1 && isrc_x < src_size.width &&
                    isrc_y >= 0 && isrc_y <= src_size.height )
                {
                    i10 = s[1];
                }
                if( isrc_x >= 0 && isrc_x <= src_size.width &&
                    isrc_y >= -1 && isrc_y < src_size.height )
                {
                    i01 = s[src_step];
                }
                if( isrc_x >= -1 && isrc_x < src_size.width &&
                    isrc_y >= -1 && isrc_y < src_size.height )
                {
                    i11 = s[src_step+1];
                }

                double i0 = i00 + (i10 - i00)*delta_x;
                double i1 = i01 + (i11 - i01)*delta_x;

                ((uchar*)(dst_data + y * dst_step))[x] = (uchar) (i0 + (i1 - i0)*delta_y);
            }
            x_min += k_left;
            x_max += k_right;
        }

        if( (next_left == next_right) ||
            (next_left+1 == next_right && q[next_left][1] == q[next_right][1]) )
        {
            break;
        }

        if( y_max == q[next_left][1] )
        {
            left = next_left;
            next_left = left - 1;

            k_left = (q[left][0] - q[next_left][0]) /
                       (q[left][1] - q[next_left][1]);
            b_left = (q[left][1] * q[next_left][0] -
                       q[left][0] * q[next_left][1]) /
                         (q[left][1] - q[next_left][1]);
        }
        if( y_max == q[next_right][1] )
        {
            right = next_right;
            next_right = right + 1;

            k_right = (q[right][0] - q[next_right][0]) /
                       (q[right][1] - q[next_right][1]);
            b_right = (q[right][1] * q[next_right][0] -
                       q[right][0] * q[next_right][1]) /
                         (q[right][1] - q[next_right][1]);
        }
        y_min = y_max;
    }
#endif /* #ifndef __IPL_H__ */

    __END__;
}
Example #19
0
IplImage *
camera_control_query_frame(CameraControl* cc,
        PSMove_timestamp *ts_grab, PSMove_timestamp *ts_retrieve)
{
    IplImage* result;

#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
    // assign buffer-pointer to address of buffer
    cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0);

    CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000);

    // convert 4ch image to 3ch image
    const int from_to[] = { 0, 0, 1, 1, 2, 2 };
    const CvArr** src = (const CvArr**) &cc->frame4ch;
    CvArr** dst = (CvArr**) &cc->frame3ch;
    cvMixChannels(src, 1, dst, 1, from_to, 3);

    result = cc->frame3ch;
#elif defined(CAMERA_CONTROL_USE_PS3EYE_DRIVER)
    int stride = 0;
    unsigned char *pixels = ps3eye_grab_frame(cc->eye, &stride);

    // Convert pixels from camera to BGR
    unsigned char *cvpixels;
    cvGetRawData(cc->framebgr, &cvpixels, 0, 0);
    yuv422_to_bgr(pixels, stride, cvpixels, cc->width, cc->height);

    result = cc->framebgr;
#else
    cvGrabFrame(cc->capture);
    if (ts_grab != NULL) {
        *ts_grab = _psmove_timestamp();
    }
    result = cvRetrieveFrame(cc->capture, 0);
    if (ts_retrieve != NULL) {
        *ts_retrieve = _psmove_timestamp();
    }
#endif

    if (cc->deinterlace == PSMove_True) {
        /**
         * Dirty hack follows:
         *  - Clone image
         *  - Hack internal variables to make an image of all odd lines
         **/
        IplImage *tmp = cvCloneImage(result);
        tmp->imageData += tmp->widthStep; // odd lines
        tmp->widthStep *= 2;
        tmp->height /= 2;

        /**
         * Use nearest-neighbor to be faster. In my tests, this does not
         * cause a speed disadvantage, and tracking quality is still good.
         *
         * This will scale the half-height image "tmp" to the original frame
         * size by doubling lines (so we can still do normal circle tracking).
         **/
        cvResize(tmp, result, CV_INTER_NN);

        /**
         * Need to revert changes in tmp from above, otherwise the call
         * to cvReleaseImage would cause a crash.
         **/
        tmp->height = result->height;
        tmp->widthStep = result->widthStep;
        tmp->imageData -= tmp->widthStep; // odd lines
        cvReleaseImage(&tmp);
    }

    // undistort image
    if (cc->mapx && cc->mapy) {
        cvRemap(result, cc->frame3chUndistort,
                cc->mapx, cc->mapy,
                CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS,
                cvScalarAll(0));
        result = cc->frame3chUndistort;
    }


#if defined(CAMERA_CONTROL_DEBUG_CAPTURED_IMAGE)
    cvShowImage("camera input", result);
    cvWaitKey(1);
#endif

    return result;
}
Example #20
0
void ColorSeqDetector::input(IplImage *inputImg)
{
  if(!img)
  {
    img = cvCreateImage(cvGetSize(inputImg), IPL_DEPTH_32F, 1);
    cvZero(img);
  }
  if(!stateImg)
  {
    stateImg = cvCreateImage(cvGetSize(inputImg), IPL_DEPTH_8U, 3);
    cvZero(stateImg);
  }
  
  else if(img->width != inputImg->width
          || img->height != inputImg->height)
  {
    cvReleaseImage(&img);
    img = cvCreateImage(cvGetSize(inputImg), IPL_DEPTH_32F, 1);
    cvReleaseImage(&stateImg);
    stateImg = cvCreateImage(cvGetSize(inputImg), IPL_DEPTH_8U, 3);
    cvZero(stateImg);
    cvZero(img);
  }

  uchar *data;
  int step;
  CvSize(size);
  uchar *state_data;
  int state_step;
  float *float_data;
  int float_step;

  cvGetRawData(inputImg, (uchar**)&data, &step, &size);
  step /= sizeof(data[0]);
  cvGetRawData(img, (uchar**)&float_data, &float_step, &size);
  float_step /= sizeof(float_data[0]);
  cvGetRawData(stateImg, (uchar**)&state_data, &state_step, &size);
  state_step /= sizeof(state_data[0]);
  
  for(int y = 0; y < size.height;
      y++, data += step, state_data+=state_step, float_data+=float_step )
    for(int x = 0; x < size.width; x++ )
    {
      uchar h = data[3*x];
      uchar s = data[3*x+1];
      uchar v = data[3*x+2];
      uchar state = state_data[3*x];
      uchar sstate = state_data[3*x+1];
      uchar bstate = state_data[3*x+1];
      float score = float_data[x];
      
      switch(state){
      case 0: // invalid state
        if(isRed(h,s,v))
        {
          state = 1;
          sstate = 0;
          bstate = 0;
        }
        break;
      case 1: // red state
        if(isRed(h,s,v))
        {
          sstate++;
          if(sstate > period+2)
          {
            sstate = 0;
            state = 0;
          }
        }else if(sstate >= period-2 && isGreen(h,s,v))
        {
          state = 2;
          sstate = 0;
          bstate = 0;
        }else{
          bstate ++;
          if(bstate > 1)
          {
            state = 0;
            sstate = 0;
          }
        }
        break;
      case 2: // green state
        if(isGreen(h,s,v))
        {
          sstate++;
          if(sstate > period+2)
          {
            sstate = 0;
            state = 0;
          }
        }else if(sstate >= period - 2 && isBlue(h,s,v))
        {
          state = 3;
          sstate = 0;
          bstate = 0;
        }else{
          bstate ++;
          if(bstate > 1)
          {
            state = 0;
            sstate = 0;
          }
        }
        break;
      case 3: // blue state
        if(isBlue(h,s,v))
        {
          sstate++;
          if(sstate > period+2)
          {
            sstate = 0;
            state = 0;
          }
        }else if(sstate >= period-2 && isYellow(h,s,v))
        {
          state = 4;
          sstate = 0;
          bstate = 0;
        }else{
          bstate ++;
          if(bstate > 1)
          {
            state = 0;
            sstate = 0;
          }
        }
        break;
      case 4: // yellow state
        if(isYellow(h,s,v))
        {
          sstate++;
          if(sstate > period+2)
          {
            sstate = 0;
            state = 0;
          }
        }else if(sstate >= period-2 && isRed(h,s,v))
        {
          state = 1;
          sstate = 0;
          bstate = 0;

          score = 1.;
          //float_data[x]=1.;
        }else{
          bstate ++;
          if(bstate > 1)
          {
            state = 0;
            sstate = 0;
          }
        }
        break;
      default:
        state = 0;
        sstate = 0;
        break;
      }
      
      state_data[3*x] = state;
      state_data[3*x+1] = sstate;
      //float_data[x] = (float) state / 4.f;

      score -= 0.005;
      if(score < 0. )
      {
        score = 0.;
      }
      float_data[x] = score;
      
//       if(state == 0){
//         float_data[x] = 0.;
//       }
    }
  
  emitNamedEvent("output", img);
}
Example #21
0
static void color_derv(const CvArr* srcArr, CvArr* dstArr, int thresh) {
    static int tab[] = { 0, 2, 2, 1 };

    uchar* src = 0, *dst = 0;
    int dst_step, src_step;
    int x, y;
    CvSize size;

    cvGetRawData(srcArr, (uchar**)&src, &src_step, &size);
    cvGetRawData(dstArr, (uchar**)&dst, &dst_step, 0);

    memset(dst, 0, size.width * sizeof(dst[0]));
    memset((uchar*)dst + dst_step*(size.height - 1), 0, size.width * sizeof(dst[0]));
    src += 3;

#define  CV_IABS(a)     (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))

    for (y = 1; y < size.height - 1; y++) {
        src += src_step;
        dst += dst_step;
        uchar* src0 = src;

        dst[0] = dst[size.width - 1] = 0;

        for (x = 1; x < size.width - 1; x++, src += 3) {
            /*int d[3];
            int ad[3];
            int f0, f1;
            int val;*/
            int m[3];
            double val;
            //double xx, yy;
            int dh[3];
            int dv[3];
            dh[0] = src[0] - src[-3];
            dv[0] = src[0] - src[-src_step];
            dh[1] = src[1] - src[-2];
            dv[1] = src[1] - src[1-src_step];
            dh[2] = src[2] - src[-1];
            dv[2] = src[2] - src[2-src_step];

            m[0] = dh[0] * dh[0] + dh[1] * dh[1] + dh[2] * dh[2];
            m[2] = dh[0] * dv[0] + dh[1] * dv[1] + dh[2] * dv[2];
            m[1] = dv[0] * dv[0] + dv[1] * dv[1] + dh[2] * dh[2];

            val = (m[0] + m[2]) +
            sqrt(((double)((double)m[0] - m[2])) * (m[0] - m[2]) + (4.*m[1]) * m[1]);

            /*

            xx = m[1];
            yy = v - m[0];
            v /= sqrt(xx*xx + yy*yy) + 1e-7;
            xx *= v;
            yy *= v;

            dx[x] = (short)cvRound(xx);
            dy[x] = (short)cvRound(yy);

            //dx[x] = (short)cvRound(v);

            //dx[x] = dy[x] = (short)v;
            d[0] = src[0] - src[-3];
            ad[0] = CV_IABS(d[0]);

            d[1] = src[1] - src[-2];
            ad[1] = CV_IABS(d[1]);

            d[2] = src[2] - src[-1];
            ad[2] = CV_IABS(d[2]);

            f0 = ad[1] > ad[0];
            f1 = ad[2] > ad[f0];

            val = d[tab[f0*2 + f1]];

            d[0] = src[0] - src[-src_step];
            ad[0] = CV_IABS(d[0]);

            d[1] = src[1] - src[1-src_step];
            ad[1] = CV_IABS(d[1]);

            d[2] = src[2] - src[2-src_step];
            ad[2] = CV_IABS(d[2]);

            f0 = ad[1] > ad[0];
            f1 = ad[2] > ad[f0];

            dst[x] = (uchar)(val + d[tab[f0*2 + f1]] > thresh ? 255 : 0);*/
            dst[x] = (uchar)(val > thresh);
        }

        src = src0;
    }

}
Example #22
0
void
cvSnakeImageTrack( const IplImage* src, CvPoint* points, int length,
		  float *alpha, float *beta, float *gamma,
		  const IplImage* orig,
		  CvPoint *opoints,
		  float *oalpha, float *obeta, float *ogamma, float *oteta, float *ozeta, float *oomega,
		  int coeffUsage, CvSize win,
		  CvTermCriteria criteria, int calcGradient,
		   float *oEarc_static, bool oEarc_static_ready,
		  int *iterations)
{

	CV_FUNCNAME( "cvSnakeImageTrack" );

	__BEGIN__;

	uchar *data;
	CvSize size;
	int step;

	if( src->nChannels != 1 )
		CV_ERROR( CV_BadNumChannels, "input image has more than one channel" );

	if( src->depth != IPL_DEPTH_8U )
		CV_ERROR( CV_BadDepth, cvUnsupportedFormat );

	cvGetRawData( src, &data, &step, &size );
	uchar *odata;
	cvGetRawData( orig, &odata, &step, &size );

	IplImage *dux = NULL;
    IplImage *duy = NULL;
	IplImage *odux= NULL;
    IplImage *oduy= NULL;
	if(oomega!=0){
		dux = cvCreateImage(size, IPL_DEPTH_32F, 1 );
		duy = cvCreateImage(size, IPL_DEPTH_32F, 1 );
		odux = cvCreateImage(size, IPL_DEPTH_32F, 1 );
		oduy = cvCreateImage(size, IPL_DEPTH_32F, 1 );
		cvSobel(src, dux, 2, 0, 5);
		cvSobel(src, duy, 0, 2, 5);
		cvSobel(orig, odux, 2, 0, 5);
		cvSobel(orig, oduy, 0, 2, 5);
	//	cvNamedWindow("t");
	//	cvShowImage("t",dux);
	//	cvShowImage("t",odux);
	}

	CvStatus status = ( icvSnakeTrack8uC1R( data, step, size, points, length,
		alpha, beta, gamma, coeffUsage, win, criteria,
		calcGradient ? _CV_SNAKE_GRAD : _CV_SNAKE_IMAGE,
		odata, opoints, oalpha, obeta, ogamma, oteta, ozeta, oomega,
		dux, duy, odux, oduy,
		oEarc_static, oEarc_static_ready,
		iterations));
	if(oomega!=0){
		cvReleaseImage(&dux);
		cvReleaseImage(&duy);
		cvReleaseImage(&odux);
		cvReleaseImage(&oduy);
	}
	if (status != CV_OK) {
		char buff[64];
		sprintf(buff, "icvSnakeTrack8uC1R returned with error code %d", status);
		CV_ERROR(CV_StsBackTrace, buff);
	}
	__END__;
}
Example #23
0
/* process */
void process(IplImage *source, IplImage *destination) {
  uchar *pS, *pD, *pP0, *pP1;
  uchar *dataS, *dataD, *dataP0, *dataP1;
  int bpp;
  int step;
  CvSize size;
  int x, y;
  double intensityS;
  int intensityS_d;
  double intensityP0;
  int intensityP0_d;
  double intensityP1;
  int intensityP1_d;
  int intensity_diff0;
  int intensity_diff1;

  /* for outer region */
  cvCopy( source, destination, NULL );

  /* bpp 3 */
  bpp = bytes_per_pixel(source);

  /* step 1920, size 640,480 */
  cvGetRawData(source, &dataS, &step, &size);    
  cvGetRawData(destination, &dataD, NULL, NULL);
  cvGetRawData(previous0, &dataP0, NULL, NULL);
  cvGetRawData(previous1, &dataP1, NULL, NULL);

  /* inner region */
  for(y=size.height/8; y<(size.height*7/8); y++) {
    pS = dataS+step*size.height/8+bpp*size.width/8;
    pD = dataD+step*size.height/8+bpp*size.width/8;
    pP0 = dataP0+step*size.height/8+bpp*size.width/8;
    pP1 = dataP1+step*size.height/8+bpp*size.width/8;
    
    for(x=size.width/8; x<(size.width*7/8); x++) {
      intensityS = (0.114 * pS[0] + 0.587 * pS[1] + 0.299 * pS[2]) / 255.0;
      intensityS_d = (int)(219.0 * intensityS) + 16;
      intensityP0 = (0.114 * pP0[0] + 0.587 * pP0[1] + 0.299 * pP0[2]) / 255.0;
      intensityP0_d = (int)(219.0 * intensityP0) + 16;
      intensityP1 = (0.114 * pP1[0] + 0.587 * pP1[1] + 0.299 * pP1[2]) / 255.0;
      intensityP1_d = (int)(219.0 * intensityP1) + 16;

      intensity_diff0 = intensityS_d - intensityP0_d;
      intensity_diff0 = ((intensity_diff0<0) ? -intensity_diff0 : intensity_diff0);
      intensity_diff1 = intensityP0_d - intensityP1_d;
      intensity_diff1 = ((intensity_diff1<0) ? -intensity_diff1 : intensity_diff1);

      if( (intensity_diff0 > DIFFERENCE_THRESHOLD) 
	  && (intensity_diff1 > DIFFERENCE_THRESHOLD) ) {
	*pD = pP0[0];
	*(pD+1) = pP0[1];
	*(pD+2) = pP0[2];
      }
      else {
	*pD = 0;
	*(pD+1) = 0;
	*(pD+2) = 0;
      }
	
      pS += bpp; // next pixel of the source
      pD += bpp; // next pixel of the destination
      pP0 += bpp; // next pixel of the source
      pP1 += bpp; // next pixel of the source
    }
    dataS += step; // next line of the source
    dataD += step; // next line of the destination
    dataP0 += step; // next line of the source
    dataP1 += step; // next line of the source
  }    

  /* for next */
  cvCopy( previous0, previous1, NULL );
  cvCopy( source, previous0, NULL );
}
Example #24
0
static void get_next_frame(void*)
{
    static int repositioning = 0;
    IplImage* frame = 0;
    double new_pos = video_pos->value();
    
    if( (new_pos-old_pos >= 1e-10 || new_pos-old_pos <= -1e-10) && !repositioning)
    {
        video_window->redraw();
        cvSetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO, new_pos );
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        printf("Repositioning\n");
        repositioning = 1;
    }
    else
    {
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        video_pos->value(new_pos);
        repositioning = 0;
    }
    old_pos = new_pos;
    frame = cvQueryFrame( capture );

    if( frame == 0 && is_avi )
    {
        cb_Stop(0,0);
        return;
    }

    if( video_window && frame )
    {
        if( video_window->w() < frame->width || video_window->h() < frame->height )
            root_window->size( (short)(frame->width + 40), (short)(frame->height + 150));

        CvRect rect = { video_window->x(), video_window->y(),
                        frame->width, frame->height };
        
        if( !video_image || video_image->width < rect.width ||
            video_image->height < rect.height )
        {
            cvReleaseImage( &video_image );
            video_image = cvCreateImage( cvSize( rect.width, rect.height ), 8, 3 );
        }

        cvSetImageROI( video_image, cvRect(0,0,rect.width, rect.height));
        if( frame->origin == 1 )
            cvFlip( frame, video_image, 0 );
        else
            cvCopy( frame, video_image, 0 );

        DetectAndDrawFaces( video_image );
        if( writer && is_recorded )
        {
            cvWriteToAVI( writer, video_image );
        }
        cvCvtColor( video_image, video_image, CV_RGB2BGR );

        uchar* data = 0;
        int step = 0;
        CvSize size;
        cvGetRawData( video_image, &data, &step, &size );

        video_window->redraw();
        fl_draw_image( (uchar*)data, video_window->x(), video_window->y(),
                       size.width, size.height, 3, step );
    }

    if( started )
    {
        double cur_frame_stamp = get_time_accurate();
        // update fps
        if( fps < 0 )
            fps = 1000/(cur_frame_stamp - prev_frame_stamp);
        else
            fps = (1-fps_alpha)*fps + fps_alpha*1000/(cur_frame_stamp - prev_frame_stamp);
        prev_frame_stamp = cur_frame_stamp;
        sprintf( fps_buffer, "FPS: %5.1f", fps );
        fps_box->label( fps_buffer );
        fps_box->redraw();
        if( total_frames > 0 )
        {
            if( --total_frames == 0 )
                if( !is_loopy )
                    cb_Exit(0,0);
                else
                {
                    total_frames = total_frames0;
                    cvSetCaptureProperty( capture, CV_CAP_PROP_POS_FRAMES, start_pos );
                }
        }
        Fl::add_timeout( timeout, get_next_frame, 0 );
    }
}
Example #25
0
void cveGetRawData(CvArr* arr, uchar** data, int* step, CvSize* roiSize)
{
   cvGetRawData(arr, data, step, roiSize);
}
Example #26
0
IplImage *
camera_control_query_frame(CameraControl* cc)
{
    IplImage* result;

#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
    // assign buffer-pointer to address of buffer
    cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0);

    CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000);

    // convert 4ch image to 3ch image
    const int from_to[] = { 0, 0, 1, 1, 2, 2 };
    const CvArr** src = (const CvArr**) &cc->frame4ch;
    CvArr** dst = (CvArr**) &cc->frame3ch;
    cvMixChannels(src, 1, dst, 1, from_to, 3);

    result = cc->frame3ch;
#else
    long start = psmove_util_get_ticks();
    result = cvQueryFrame(cc->capture);
    psmove_DEBUG("cvQueryFrame: %ld ms\n", psmove_util_get_ticks() - start);
#endif

#if defined(PSMOVE_USE_DEINTERLACE)
    /**
     * Dirty hack follows:
     *  - Clone image
     *  - Hack internal variables to make an image of all odd lines
     **/
    IplImage *tmp = cvCloneImage(result);
    tmp->imageData += tmp->widthStep; // odd lines
    tmp->widthStep *= 2;
    tmp->height /= 2;

    /**
     * Use nearest-neighbor to be faster. In my tests, this does not
     * cause a speed disadvantage, and tracking quality is still good.
     *
     * This will scale the half-height image "tmp" to the original frame
     * size by doubling lines (so we can still do normal circle tracking).
     **/
    cvResize(tmp, result, CV_INTER_NN);

    /**
     * Need to revert changes in tmp from above, otherwise the call
     * to cvReleaseImage would cause a crash.
     **/
    tmp->height = result->height;
    tmp->widthStep = result->widthStep;
    tmp->imageData -= tmp->widthStep; // odd lines
    cvReleaseImage(&tmp);
#endif

    // undistort image
    if (cc->mapx && cc->mapy) {
        cvRemap(result, cc->frame3chUndistort,
                cc->mapx, cc->mapy,
                CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS,
                cvScalarAll(0));
        result = cc->frame3chUndistort;
    }

    return result;
}
Example #27
0
 CvGLCM*
cvCreateGLCM( const IplImage* srcImage,
              int stepMagnitude,
              const int* srcStepDirections,/* should be static array..
                                          or if not the user should handle de-allocation */
              int numStepDirections,
              int optimizationType )
{
    static const int defaultStepDirections[] = { 0,1, -1,1, -1,0, -1,-1 };

    int* memorySteps = 0;
    CvGLCM* newGLCM = 0;
    int* stepDirections = 0;

    CV_FUNCNAME( "cvCreateGLCM" );

    __BEGIN__;

    uchar* srcImageData = 0;
    CvSize srcImageSize;
    int srcImageStep;
    int stepLoop;
    const int maxNumGreyLevels8u = CV_MAX_NUM_GREY_LEVELS_8U;

    if( !srcImage )
        CV_ERROR( CV_StsNullPtr, "" );

    if( srcImage->nChannels != 1 )
        CV_ERROR( CV_BadNumChannels, "Number of channels must be 1");

    if( srcImage->depth != IPL_DEPTH_8U )
        CV_ERROR( CV_BadDepth, "Depth must be equal IPL_DEPTH_8U");

    // no Directions provided, use the default ones - 0 deg, 45, 90, 135
    if( !srcStepDirections )
    {
        srcStepDirections = defaultStepDirections;
    }

    CV_CALL( stepDirections = (int*)cvAlloc( numStepDirections*2*sizeof(stepDirections[0])));
    memcpy( stepDirections, srcStepDirections, numStepDirections*2*sizeof(stepDirections[0]));

    cvGetRawData( srcImage, &srcImageData, &srcImageStep, &srcImageSize );

    // roll together Directions and magnitudes together with knowledge of image (step)
    CV_CALL( memorySteps = (int*)cvAlloc( numStepDirections*sizeof(memorySteps[0])));

    for( stepLoop = 0; stepLoop < numStepDirections; stepLoop++ )
    {
        stepDirections[stepLoop*2 + 0] *= stepMagnitude;
        stepDirections[stepLoop*2 + 1] *= stepMagnitude;

        memorySteps[stepLoop] = stepDirections[stepLoop*2 + 0]*srcImageStep +
                                stepDirections[stepLoop*2 + 1];
    }

    CV_CALL( newGLCM = (CvGLCM*)cvAlloc(sizeof(newGLCM[0])));
    memset( newGLCM, 0, sizeof(newGLCM[0]) );
	//memset( newGLCM, 0, sizeof(newGLCM) );

    newGLCM->matrices = 0;
    newGLCM->numMatrices = numStepDirections;
    newGLCM->optimizationType = optimizationType;

    if( optimizationType <= CV_GLCM_OPTIMIZATION_LUT )
    {
        int lookupTableLoop, imageColLoop, imageRowLoop, lineOffset = 0;

        // if optimization type is set to lut, then make one for the image
        if( optimizationType == CV_GLCM_OPTIMIZATION_LUT )
        {
            for( imageRowLoop = 0; imageRowLoop < srcImageSize.height;
                                   imageRowLoop++, lineOffset += srcImageStep )
            {
                for( imageColLoop = 0; imageColLoop < srcImageSize.width; imageColLoop++ )
                {
                    newGLCM->forwardLookupTable[srcImageData[lineOffset+imageColLoop]]=1;
                }
            }

            newGLCM->numLookupTableElements = 0;

            for( lookupTableLoop = 0; lookupTableLoop < maxNumGreyLevels8u; lookupTableLoop++ )
            {
                if( newGLCM->forwardLookupTable[ lookupTableLoop ] != 0 )
                {
                    newGLCM->forwardLookupTable[ lookupTableLoop ] =
                        newGLCM->numLookupTableElements;
                    newGLCM->reverseLookupTable[ newGLCM->numLookupTableElements ] =
                        lookupTableLoop;

                    newGLCM->numLookupTableElements++;
                }
            }
        }
        // otherwise make a "LUT" which contains all the gray-levels (for code-reuse)
        else if( optimizationType == CV_GLCM_OPTIMIZATION_NONE )
        {
            for( lookupTableLoop = 0; lookupTableLoop <maxNumGreyLevels8u; lookupTableLoop++ )
            {
                newGLCM->forwardLookupTable[ lookupTableLoop ] = lookupTableLoop;
                newGLCM->reverseLookupTable[ lookupTableLoop ] = lookupTableLoop;
            }
            newGLCM->numLookupTableElements = maxNumGreyLevels8u;
        }

        newGLCM->matrixSideLength = newGLCM->numLookupTableElements;
        icvCreateGLCM_LookupTable_8u_C1R( srcImageData, srcImageStep, srcImageSize,
                                          newGLCM, stepDirections,
                                          numStepDirections, memorySteps );
    }
    else if( optimizationType == CV_GLCM_OPTIMIZATION_HISTOGRAM )
    {
        CV_ERROR( CV_StsBadFlag, "Histogram-based method is not implemented" );

    /*  newGLCM->numMatrices *= 2;
        newGLCM->matrixSideLength = maxNumGreyLevels8u*2;

        icvCreateGLCM_Histogram_8uC1R( srcImageStep, srcImageSize, srcImageData,
                                       newGLCM, numStepDirections,
                                       stepDirections, memorySteps );
    */
    }

    __END__;

    cvFree( &memorySteps );
    cvFree( &stepDirections );

    if( cvGetErrStatus() < 0 )
    {
        cvFree( &newGLCM );
    }

    return newGLCM;
}
Example #28
0
////////////////////////////////////////////////////////////////////
// Method:  OnDraw
// Class:   CCamView
// Purose:  CCamView drawing
// Input:   nothing
// Output:  nothing
////////////////////////////////////////////////////////////////////
void CCamView::DrawCam( IplImage* pImg )
{
//  return;
    if( m_bDrawing ) return;
    m_bDrawing = true;
    // if there was an image then we need to update view
    if( pImg )
    {
        // copy the image (will be deleted after display)
        IplImage *pDstImg = pImg;//cvCloneImage(pImg);

        int nCamWidth = pImg->width;//m_pCamera->m_nWidth;
        int nCamHeight = pImg->height;//m_pCamera->m_nHeight;


        // draw a vertical line through the center of the image
        cvLine(pDstImg, cvPoint(nCamWidth/2, 0), cvPoint(nCamWidth/2, nCamHeight), CV_RGB( 0,255,0 ));

        // draw a horizontal line at pixel 25
        cvLine(pDstImg, cvPoint(0, 25), cvPoint(nCamWidth, 25), CV_RGB( 0,255,0 ));

        // draw a horizontal line through the center of the image
        //cvLine(pDstImg, cvPoint(0, nCamHeight/2), cvPoint(nCamWidth, nCamHeight/2), CV_RGB( 0,255,0 ));

        // process image from opencv to wxwidgets
        unsigned char *rawData;
        // draw my stuff to output canvas
        CvSize roiSize;
        int step = 0;

        // get raw data from ipl image
        cvGetRawData( pDstImg, &rawData, &step, &roiSize );

        // convert data from raw image to wxImg


        wxImage *pWxImg = new wxImage( nCamWidth, nCamHeight, rawData, TRUE );

        // convert to bitmap to be used by the window to draw

        if (m_pBitmap)
        {
            delete m_pBitmap;
        }

        m_pBitmap = new wxBitmap( pWxImg->Scale(m_nWidth, m_nHeight) );

        m_bNewImage = true;
        m_bDrawing = false;

        Refresh( FALSE );

        //Update( );
        delete pWxImg;

        //cvReleaseImage( &pDstImg );


    }

}
Example #29
0
CvStatus
_CalcDispMean( const CvMat* srcIm, CvMat* dispIm, CvMat* meanIm, int size )
{
    //////////// check input parametrs ///////////////////
    if( (srcIm == NULL) || (dispIm == NULL) || (meanIm == NULL) )
    {
        return CV_NULLPTR_ERR;
    }

    //////////// variables ///////////////
    uchar *src;
    int step_srcIm;
    int number = 2 * size + 1;
    CvSize roi_srcIm;

    IplImage*     AverageIm_f;
    IplImage*     Temp1_f;
    IplImage*     Temp2_f;
    _CvConvState* convState;

    /////////////// initialising /////////////////////////
    cvGetRawData( srcIm, &src, &step_srcIm, &roi_srcIm );

    //////////// creating images ///////////////////////
    Temp2_f = cvCreateImage( roi_srcIm, IPL_DEPTH_32F, 1 );
    AverageIm_f = cvCreateImage( roi_srcIm, IPL_DEPTH_32F, 1 );
    Temp1_f = cvCreateImage( roi_srcIm, IPL_DEPTH_32F, 1 );
    icvBlurInitAlloc( roi_srcIm.width, cv32f, number, &convState );

    if( (Temp2_f == NULL) || (AverageIm_f == NULL) || (Temp1_f == NULL) || !convState )
    {
        cvReleaseImage( &Temp2_f );
        cvReleaseImage( &AverageIm_f );
        cvReleaseImage( &Temp1_f );
        icvConvolFree( &convState );
        return CV_OUTOFMEM_ERR;
    }

    ////////////// calculating //////////////////////
    cvConvertScale( srcIm, Temp2_f, 1, 0 );
    icvBlur_32f_C1R( (float*)Temp2_f->imageData,     Temp2_f->widthStep,
                     (float*)AverageIm_f->imageData, AverageIm_f->widthStep,
                     &roi_srcIm, convState, 0 );
    cvMul( Temp2_f, Temp2_f, Temp1_f );      // square
    icvBlur_32f_C1R( (float*)Temp1_f->imageData, Temp1_f->widthStep,
                     (float*)Temp2_f->imageData, Temp2_f->widthStep,
                     &roi_srcIm, convState, 0 );

    cvSub( Temp2_f, AverageIm_f, Temp1_f );       // dispersion powered 2

    cvConvertScale( Temp2_f, dispIm, 1.0 / (number * number), 0 );    // convert to IPL_DEPTH_8U
    cvConvertScale( AverageIm_f, meanIm, 1.0 / (number * number), 0 );

    ///////////// relesing memory ///////////////////////////
    cvReleaseImage( &Temp1_f );
    cvReleaseImage( &Temp2_f );
    cvReleaseImage( &AverageIm_f );
    icvConvolFree( &convState );

    return CV_NO_ERR;

} // CvStatus _CalcDispMean( IplImage* srcIm, IplImage* dispIm, IplImage* meanIm , int size )
Example #30
0
void
icvAdaptiveThreshold_StdDev( const CvMat* srcIm, CvMat* dstIm,
                             int maxValue, CvThreshType type,
                             int size, int epsilon )
{
    //////////////// Some variables  /////////////////////
    CvMat* avgIm = 0;
    CvMat* dispIm = 0;

    CvSize roi;

    uchar* src = 0;
    uchar* dst = 0;
    uchar* disp = 0;
    uchar* avg = 0;

    int  src_step;
    int  dst_step;
    int  disp_step;
    int  avg_step;

    int  thresh = 0;
    int  i, j;

    CV_FUNCNAME( "cvAdaptiveThreshold" );

    __BEGIN__;

    //////////////// Check for bad arguments ////////////////
    if( !srcIm || !dstIm )
        CV_ERROR( CV_StsNullPtr, "" );

    if( size < 1 || size > 4 )
        CV_ERROR( CV_StsBadSize, "" );

    cvGetRawData( srcIm, &src, &src_step, &roi );
    cvGetRawData( dstIm, &dst, &dst_step, 0 );

    CV_CALL( avgIm = cvCreateMat( roi.height, roi.width, CV_8UC1 ));
    CV_CALL( dispIm = cvCreateMat( roi.height, roi.width, CV_8UC1 ));

    // calc dispersion
    IPPI_CALL( _CalcDispMean( srcIm, avgIm, dispIm, size ));

    cvGetRawData( dispIm, &disp, &disp_step, 0 );
    cvGetRawData( avgIm, &avg, &avg_step, 0 );

    epsilon = epsilon * epsilon;

    _FindInitiThreshold( src, src_step, roi, size, epsilon, &thresh );

    switch (type)
    {
    case CV_THRESH_BINARY:
        for( i = 0; i < roi.height; i++, src += src_step,
             dst += dst_step, disp += disp_step, avg += avg_step )
        {
            for( j = 0; j < roi.width; j++ )
            {
                int tdisp = disp[j];

                if( tdisp > epsilon )
                    thresh = avg[j];

                dst[j] = (uchar)((thresh < src[j] ? -1 : 0) & maxValue);
            }
        }
        break;

    case CV_THRESH_BINARY_INV:
        for( i = 0; i < roi.height; i++, src += src_step,
             dst += dst_step, disp += disp_step, avg += avg_step )
        {
            for( j = 0; j < roi.width; j++ )
            {
                int tdisp = disp[j];

                if( tdisp > epsilon )
                    thresh = avg[j];

                dst[j] = (uchar)((src[j] < thresh ? -1 : 0) & maxValue);
            }
        }
        break;

    case CV_THRESH_TOZERO:
        for( i = 0; i < roi.height; i++, src += src_step,
             dst += dst_step, disp += disp_step, avg += avg_step )
        {
            for( j = 0; j < roi.width; j++ )
            {
                int t;
                int tdisp = disp[j];

                if( tdisp > epsilon )
                    thresh = avg[j];

                t = src[j];
                dst[j] = (uchar) ((thresh < t ? -1 : 0) & t);
            }
        }
        break;

    case CV_THRESH_TOZERO_INV:
        for( i = 0; i < roi.height; i++, src += src_step,
             dst += dst_step, disp += disp_step, avg += avg_step )
        {
            for( j = 0; j < roi.width; j++ )
            {
                int tdisp = disp[j];

                if( tdisp > epsilon )
                    thresh = avg[j];

                dst[j] = (uchar)((src[j] < thresh ? -1 : 0) & src[j]);
            }
        }
        break;

    default:
        CV_ERROR( CV_StsBadFlag, "" );
    }

    __END__;

    cvReleaseMat( &avgIm );
    cvReleaseMat( &dispIm );
}