Exemplo n.º 1
0
static NODE_IMPLEMENTATION(resize, Pointer)
{
    MuLangContext* context = static_cast<MuLangContext*>(NODE_THREAD.context());
    const Class*   c       = static_cast<const ImageType*>(NODE_THIS.type());
    ClassInstance* inObj   = NODE_ARG_OBJECT(0, ClassInstance);
    int            width   = NODE_ARG(1, int);
    int            height  = NODE_ARG(2, int);
    ClassInstance* outObj  = makeImage(context, c, width, height);
    ImageStruct*   inIm    = inObj->data<ImageStruct>();
    ImageStruct*   outIm   = outObj->data<ImageStruct>();

    CvMat inMat;
    CvMat outMat;

    cvInitMatHeader(&inMat,
                    inIm->height,
                    inIm->width,
                    CV_32FC(4),
                    inIm->data->data<float>(),
                    0);

    cvInitMatHeader(&outMat,
                    outIm->height,
                    outIm->width,
                    CV_32FC(4),
                    outIm->data->data<float>(),
                    0);

    cvResize(&inMat, &outMat, CV_INTER_AREA);

    NODE_RETURN(outObj);
}
Exemplo n.º 2
0
void
condensPose::init(vpHomogeneousMatrix& cMo, float rotPerturb, float transPerturb)
{
	vpPoseVector pv;
	pv.buildFrom(cMo);
	float minRange[] = {
		pv[0] - rotPerturb, 
		pv[1] - rotPerturb, 
		pv[2] - rotPerturb, 
		pv[3] - transPerturb, 
		pv[4] - transPerturb, 
		pv[5] - transPerturb};
	float maxRange[] = {
		pv[0] + rotPerturb, 
		pv[1] + rotPerturb, 
		pv[2] + rotPerturb, 
		pv[3] + transPerturb, 
		pv[4] + transPerturb, 
		pv[5] + transPerturb};
	CvMat LB, UB;
	cvInitMatHeader(&LB, this->dim, 1, CV_32FC1, minRange);
	cvInitMatHeader(&UB, this->dim, 1, CV_32FC1, maxRange);

	cvConDensInitSampleSet(condens, &LB, &UB);
}
Exemplo n.º 3
0
/*在图像上画单个OXFD特征点
参数:
img:图像指针
feat:要画的特征点
color:颜色
*/
static void draw_oxfd_feature(IplImage* img, struct feature* feat,
	CvScalar color)
{
	double m[4] = { feat->a, feat->b, feat->b, feat->c };
	double v[4] = { 0 };		//特征向量的数据 
	double e[2] = { 0 };		//特征值的数据 
	CvMat M, V, E;
	double alpha, l1, l2;

	//计算椭圆的轴线和方向  
	cvInitMatHeader(&M, 2, 2, CV_64FC1, m, CV_AUTOSTEP);		//矩阵
	cvInitMatHeader(&V, 2, 2, CV_64FC1, v, CV_AUTOSTEP);		//2个2*1的特征向量组成的矩阵 
	cvInitMatHeader(&E, 2, 1, CV_64FC1, e, CV_AUTOSTEP);		//特征值  
	cvEigenVV(&M, &V, &E, DBL_EPSILON, 0, 0);					//计算特征值和特征向量
	l1 = 1 / sqrt(e[1]);
	l2 = 1 / sqrt(e[0]);
	alpha = -atan2(v[1], v[0]);
	alpha *= 180 / CV_PI;

	//画椭圆和十字星 
	cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha,
		0, 360, CV_RGB(0, 0, 0), 3, 8, 0);
	cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha,
		0, 360, color, 1, 8, 0);
	cvLine(img, cvPoint(feat->x + 2, feat->y), cvPoint(feat->x - 2, feat->y),
		color, 1, 8, 0);
	cvLine(img, cvPoint(feat->x, feat->y + 2), cvPoint(feat->x, feat->y - 2),
		color, 1, 8, 0);
}
Exemplo n.º 4
0
/*
Draws a single Oxford-type feature

@param img image on which to draw
@param feat feature to be drawn
@param color color in which to draw
*/
void draw_oxfd_feature( IplImage* img, struct feature* feat, CvScalar color )
{
	double m[4] = { feat->a, feat->b, feat->b, feat->c };
	double v[4] = { 0 };
	double e[2] = { 0 };
	CvMat M, V, E;
	double alpha, l1, l2;

	/* compute axes and orientation of ellipse surrounding affine region */
	cvInitMatHeader( &M, 2, 2, CV_64FC1, m, CV_AUTOSTEP );
	cvInitMatHeader( &V, 2, 2, CV_64FC1, v, CV_AUTOSTEP );
	cvInitMatHeader( &E, 2, 1, CV_64FC1, e, CV_AUTOSTEP );
#if CV_MAJOR_VERSION==1
	cvEigenVV( &M, &V, &E, DBL_EPSILON );
#else
	cvEigenVV( &M, &V, &E, DBL_EPSILON, -1,-1 );
#endif
	l1 = 1 / sqrt( e[1] );
	l2 = 1 / sqrt( e[0] );
	alpha = -atan2( v[1], v[0] );
	alpha *= 180 / CV_PI;

	cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha,
				0, 360, CV_RGB(0,0,0), 3, 8, 0 );
	cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha,
				0, 360, color, 1, 8, 0 );
	cvLine( img, cvPoint( feat->x+2, feat->y ), cvPoint( feat->x-2, feat->y ),
			color, 1, 8, 0 );
	cvLine( img, cvPoint( feat->x, feat->y+2 ), cvPoint( feat->x, feat->y-2 ),
			color, 1, 8, 0 );
}
Exemplo n.º 5
0
void particle::genParticles(glm::vec3 particleV)
{

    particleCenterM.setTo(cv::Scalar(0));
    //Bereich der Partikelstreuung
    setRanges(particleV.x, particleV.y, particleV.z, 0.5);

    CvMat LB, UB;
    cvInitMatHeader(&LB, 3, 1, CV_32FC1, minRange);
    cvInitMatHeader(&UB, 3, 1, CV_32FC1, maxRange);

      CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);

      cvConDensInitSampleSet(condens, &LB, &UB);

      //Einheitsmatrix
      condens->DynamMatr[0] = 1.0;
      condens->DynamMatr[1] = 0.0;
      condens->DynamMatr[2] = 0.0;

      condens->DynamMatr[3] = 0.0;
      condens->DynamMatr[4] = 1.0;
      condens->DynamMatr[5] = 0.0;

      condens->DynamMatr[6] = 0.0;
      condens->DynamMatr[7] = 0.0;
      condens->DynamMatr[8] = 1.0;

      cameraV.clear();
      newCameraV.clear();

      for (int i = 0; i < condens->SamplesNum; i++) {

         //Berechnung der Abweichung
//         float diffX = (particleV.x - condens->flSamples[i][0])/xRange;
//         float diffY = (particleV.y - condens->flSamples[i][1])/yRange;
//         float diffZ = (particleV.z - condens->flSamples[i][2])/zRange;
//         condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY + diffZ * diffZ));

         // Partikelstreuung werde ich benötigen
         //cv::Point3f partPt(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2]);
         glm::vec3 partCenter(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2]);

         particleCenterM(i,0) = partCenter.x;
         particleCenterM(i,1) = partCenter.y;
         particleCenterM(i,2) = partCenter.z;
         genParticles(lookAtCamera, partCenter, i);
         //cout << "PartikelPos: X-Achse: " << condens->flSamples[i][0] << "/" << lastCam(0) << " Y-Achse: " << condens->flSamples[i][1] << "/" << lastCam(1)<< " Z-Achse: " << condens->flSamples[i][2] << "/" << lastCam(2)<< endl;
         //writeFile(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2], "particlePos.txt");

       }

       //cvConDensUpdateByTime(condens);

       //Bester Partikel, ist aber keine der Partikelpositionen
       //cv::Point3f statePt(condens->State[0], condens->State[1], condens->State[2]);
       //newCameraV.push_back(statePt);
       //cout << "NeuePose: X-Achse: " << condens->State[0] << "/" << lastCam(0) << " Y-Achse: " << condens->State[1] << "/" << lastCam(1)<< " Z-Achse: " << condens->State[2] << "/" << lastCam(2)<< endl;
}
Exemplo n.º 6
0
	void BazARTracker::show_result(CamAugmentation &augment, IplImage *video, IplImage **dst)
	{
		if (getDebugMode()){
			if (*dst==0) *dst=cvCloneImage(video);
			else cvCopy(video, *dst);
		}

		CvMat *m  = augment.GetProjectionMatrix(0);
		// Flip...(This occured from OpenGL origin / camera origin)
		CvMat *coordinateTrans = cvCreateMat(3, 3, CV_64F);
		cvmSetIdentity(coordinateTrans);
		cvmSet(coordinateTrans, 1, 1, -1);
		cvmSet(coordinateTrans, 1, 2, m_cparam->cparam.ysize);
		cvMatMul(coordinateTrans, m, m);
	
		// extract intrinsic camera parameters from bazar's projection matrix..
		GetARToolKitRTfromBAZARProjMat(g_matIntrinsic, m, matCameraRT4_4);
			
		cvTranspose(matCameraRT4_4, matCameraRT4_4);
		cvReleaseMat(&coordinateTrans);

		// Debug
		if (getDebugMode()) {
			// draw the coordinate system axes
			double w =video->width/2.0;
			double h =video->height/2.0;

			// 3D coordinates of an object
			double pts[4][4] = {
				{w,h,0, 1}, // 0,0,0,1
				{w*2,h,0, 1}, // w, 0
				{w,h*2,0, 1}, // 0, h
				{w,h,-w-h, 1} // 0, 0, -
			};

			CvMat ptsMat, projectedMat;
			cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
			cvInitMatHeader(&projectedMat, 3, 4, CV_64FC1, projected);
		
			cvGEMM(m, &ptsMat, 1, 0, 0, &projectedMat, CV_GEMM_B_T );

			for (int i=0; i<4; i++) 
			{
				projected[0][i] /= projected[2][i];
				projected[1][i] /= projected[2][i];
			}

			// draw the projected lines
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][1], (int)projected[1][1]), CV_RGB(255,0,0), 2);
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][2], (int)projected[1][2]), CV_RGB(0,255,0), 2);
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][3], (int)projected[1][3]), CV_RGB(0,0,255), 2);
		}
	}
Exemplo n.º 7
0
static void augment_scene(CalibModel &model, IplImage *frame, IplImage *display)
{
  cvCopy(frame, display);

  if (!model.detector.object_is_detected) 
    return;

  CvMat *m = model.augm.GetProjectionMatrix(0);
  if (!m) return;

  double pts[4][4];
  double proj[4][4];
  CvMat ptsMat, projMat;
  cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
  cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
  for (int i=0; i<4; i++) {
    pts[0][i] = model.corners[i].x;
    pts[1][i] = model.corners[i].y;
    pts[2][i] = 0;
    pts[3][i] = 1;
  }
  cvMatMul(m, &ptsMat, &projMat);
  cvReleaseMat(&m);

  CvPoint projPts[4];
  for (int i=0;i<4; i++) {
    projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
    projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
  }

  CvMat *o2w = model.augm.GetObjectToWorld();
  float normal[3];
  for (int j=0;j<3;j++)
    normal[j] = cvGet2D(o2w, j, 2).val[0];
  cvReleaseMat(&o2w);

  // we want to relight a color present on the model image
  // with an irradiance coming from the irradiance map
  CvScalar color = cvGet2D(model.image, model.image->height/2, model.image->width/2);
  CvScalar irradiance = model.map.readMap(normal);

  // the camera has some gain and bias
  const float *g = model.map.getGain(0);
  const float *b = model.map.getBias(0);

  // relight the 3 RGB channels. The bias value expects 0 black 1 white,
  // but the image are stored with a white value of 255: Conversion is required.
  for (int i=0; i<3; i++) {
    color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
  }

  // draw a filled polygon with the relighted color
  cvFillConvexPoly(display, projPts, 4, color);
}
Exemplo n.º 8
0
// get column of input array
CV_IMPL  CvMat*
cvGetDiag( const CvArr* arr, CvMat* submat, int diag )
{
    CvMat* res = 0;
    
    CV_FUNCNAME( "cvGetDiag" );

    __BEGIN__;

    CvMat stub, *mat = (CvMat*)arr;
    int pix_size; 

    if( !CV_IS_ARR( mat ))
        CV_CALL( mat = cvGetMat( mat, &stub ));

    if( !submat )
        CV_ERROR( CV_StsNullPtr, "" );

    pix_size = icvPixSize[CV_ARR_TYPE(mat->type)];

    if( diag >= 0 )
    {
        int len = mat->width - diag;
        
        if( len <= 0 )
            CV_ERROR( CV_StsOutOfRange, "" );

        len = CV_IMIN( len, mat->height );

        CV_CALL( cvInitMatHeader( submat, len, 1, mat->type,
                                  mat->data.ptr + diag*pix_size,
                                  mat->step + pix_size ));
    }
    else
    {
        int len = mat->height + diag;
        diag = -diag;
        
        if( len <= 0 )
            CV_ERROR( CV_StsOutOfRange, "" );

        len = CV_IMIN( len, mat->width );

        CV_CALL( cvInitMatHeader( submat, len, 1, mat->type,
                                  mat->data.ptr + diag*mat->step,
                                  mat->step + pix_size ));
    }

    res = submat;
    
    __END__;

    return res;
}
Exemplo n.º 9
0
/*
  Performs a perspective transformation on a single point.  That is, for a
  point (x, y) and a 3 x 3 matrix T this function returns the point
  (u, v), where
  
  [x' y' w']^T = T * [x y 1]^T,
  
  and
  
  (u, v) = (x'/w', y'/w').

  Note that affine transforms are a subset of perspective transforms.
  
  @param pt a 2D point
  @param T a perspective transformation matrix
  
  @return Returns the point (u, v) as above.
*/
CvPoint2D64f persp_xform_pt( CvPoint2D64f pt, CvMat* T )
{
  CvMat XY, UV;
  double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };
  CvPoint2D64f rslt;

  cvInitMatHeader( &XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP );
  cvInitMatHeader( &UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP );
  cvMatMul( T, &XY, &UV );
  rslt = cvPoint2D64f( uv[0] / uv[2], uv[1] / uv[2] );

  return rslt;
}
Exemplo n.º 10
0
/*计算点pt经透视变换后的点,即给定一点pt和透视变换矩阵T,计算变换后的点
给定点(x,y),变换矩阵M,计算[x',y',w']^T = M * [x,y,1]^T(^T表示转置),
则变换后的点是(u,v) = (x'/w', y'/w')
注意:仿射变换是透视变换的特例
参数:
pt:一个二维点
T:透视变换矩阵
返回值:pt经透视变换后的点
*/
CvPoint2D64f persp_xform_pt(CvPoint2D64f pt, CvMat* T)
{
	CvMat XY, UV;		 //XY:点pt对应的3*1列向量,UV:pt变换后的点对应的3*1列向量 
	double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };		//对应的数据  
	CvPoint2D64f rslt;		//结果  

	//初始化矩阵头  
	cvInitMatHeader(&XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP);
	cvInitMatHeader(&UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP);
	cvMatMul(T, &XY, &UV);		//计算矩阵乘法,T*XY,结果放在UV中  
	rslt = cvPoint2D64f(uv[0] / uv[2], uv[1] / uv[2]);		//得到转换后的点  

	return rslt;
}
Exemplo n.º 11
0
/*
Calculates interpolated pixel contrast.  Based on Eqn. (3) in Lowe's paper.

@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col

@param Returns interpolated contrast.
*/
double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r,
					int c, double xi, double xr, double xc )
{
	CvMat* dD, X, T;
	double t[1], x[3] = { xc, xr, xi };

	cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
	cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
	dD = deriv_3D( dog_pyr, octv, intvl, r, c );
	cvGEMM( dD, &X, 1, NULL, 0, &T,  CV_GEMM_A_T );
	cvReleaseMat( &dD );

	return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
Exemplo n.º 12
0
void	margBlobCorrector::init() {
	camera_matrix = cvCreateMat(3, 3, CV_32FC1);
	dist_coeffs	  = cvCreateMat(1, 4, CV_32FC1);
	
	float cam_mat[] = { 0, 0, 0,
						0, 0, 0,
						0, 0, 1 };
	
	cvInitMatHeader(camera_matrix, 3, 3, CV_32FC1, cam_mat);
	
	float dist_c[] = {0, 0, 0, 0};
	
	cvInitMatHeader(dist_coeffs, 1, 4, CV_32FC1, dist_c);
}
Exemplo n.º 13
0
// get ROI (or minor) of input array
CV_IMPL  CvMat*
cvGetSubArr( const CvArr* arr, CvMat* submat, CvRect rect )
{
    CvMat* res = 0;
    
    CV_FUNCNAME( "cvGetSubArr" );

    __BEGIN__;

    CvMat stub, *mat = (CvMat*)arr;

    if( !CV_IS_ARR( mat ))
        CV_CALL( mat = cvGetMat( mat, &stub ));

    if( !submat )
        CV_ERROR( CV_StsNullPtr, "" );

    if( (rect.x|rect.y|rect.width|rect.height) < 0 )
        CV_ERROR( CV_StsBadSize, "" );

    if( rect.x + rect.width > mat->width ||
        rect.y + rect.height > mat->height )
        CV_ERROR( CV_StsBadSize, "" );

    CV_CALL( cvInitMatHeader( submat, rect.height, rect.width, mat->type,
                             mat->data.ptr + rect.y*mat->step +
                             rect.x*icvPixSize[CV_ARR_TYPE(mat->type)],
                             mat->step ));
    res = submat;
    
    __END__;

    return res;
}
Exemplo n.º 14
0
/**
 * Calculate pose of the camera. Since no translation are made
 * Only the rotation is calculated.
 *
 * [R|T]
 */
CvMat* MultipleViewGeomOld::calculateRotationMatrix(float angle) {

    // | R T |
    // | 0 1 |
    // 1 	 0 	    0 	0
    // 0 cos() -sin()   0
    // 0 sin()  cos()   0
    // 0     0      0   1

    float sinTeta = sin(angle);
    float cosTeta = cos(angle);

    float a[] = { 1, 0, 0, 0, 0, cosTeta, -sinTeta, 0, 0, sinTeta, cosTeta, 0,
                  0, 0, 0, 1
                };

    //CvMat rtMat = cvMat(4, 4, CV_32FC1, a);
    //rtMat = *cvCloneMat(&rtMat);

    CvMat* rtMat = cvCreateMat(4, 4, CV_32F);
    cvInitMatHeader(rtMat, 4, 4, CV_32F, a);
    rtMat = cvCloneMat(rtMat);

    LOG4CPLUS_DEBUG(myLogger,"Rotation R|T matrix for angle: " << angle << endl << printCvMat(rtMat));

    return rtMat;

}
Exemplo n.º 15
0
int main(int argc, char* argv[])
{

	//创建矩阵 方式1  直接创建
	CvMat* pmat1;
	pmat1 = cvCreateMat(8, 9, CV_32FC1);

	//创建矩阵方式2  先创建矩阵头部  再创建矩阵的数据块的内存空间
	CvMat* pmat2;
	pmat2 = cvCreateMatHeader(4, 5, CV_8UC1);
	cvCreateData(pmat2);
	
	//创建矩阵方式3  通过数据创建矩阵
	float data[4] = { 3, 4, 6, 0 };
	CvMat pmat3;
	cvInitMatHeader(&pmat3, 2, 2, CV_32FC1, data);

	//创建矩阵方式4 通过已有矩阵进行克隆
	CvMat* pmat4;
	pmat4 = cvCloneMat(pmat2);

	//访问矩阵的相关属性
	test(pmat2);


	//释放矩阵的内存空间
	cvReleaseMat(&pmat1);
	cvReleaseMat(&pmat2);
	cvReleaseMat(&pmat4);
	
	return 0;
}
Exemplo n.º 16
0
// TODO: Would fail if m_nChannels != 3
// RGB to LAB
bool CFeatureExtraction::GetColorChannels(CvMat * pChannels, CvMat * pColorChannelsArr[])
{
	printf("\nCFeatureExtraction::GetColorChannels in\n");
	int nSize = COLOR_CHANNEL_NUM;
	
	// Convert to LAB color space
	IplImage *pLabImg = cvCreateImage(cvSize(m_pSrcImg->width,m_pSrcImg->height), IPL_DEPTH_32F, nSize);
	cvCvtColor(m_pSrcImgFloat,pLabImg,CV_BGR2Lab);	

	// Put the 32F lab image data in a matrix header
	CvMat srcMat;
	cvInitMatHeader(&srcMat, m_nWidth*m_nHeight, nSize , CV_32F, (float*)pLabImg->imageData );

	// This matrix would hold the values represented in the new basis we've found
	//CvMat * pResultMat = cvCreateMat( m_nWidth*m_nHeight, nSize , CV_32F );
	CvMat * pResultMat = pChannels;
	
	// Actual calculation
	DoPCA(&srcMat, pResultMat, nSize, COLOR_CHANNEL_NUM);
	// Extracting the 3 primary channels
	//GetChannels(pResultMat, pColorChannelsArr, nSize, COLOR_CHANNEL_NUM);

	// Useful releasing
	cvReleaseImage(&pLabImg);
	printf("CFeatureExtraction::GetColorChannels out\n");
	return true;	
}
Exemplo n.º 17
0
// get column of input array
CV_IMPL  CvMat*
cvGetCol( const CvArr* arr, CvMat* submat, int col )
{
    CvMat* res = 0;
    
    CV_FUNCNAME( "cvGetCol" );

    __BEGIN__;

    CvMat stub, *mat = (CvMat*)arr;

    if( !CV_IS_ARR( mat ))
        CV_CALL( mat = cvGetMat( mat, &stub ));

    if( !submat )
        CV_ERROR( CV_StsNullPtr, "" );

    if( (unsigned)col >= (unsigned)mat->width )
        CV_ERROR( CV_StsOutOfRange, "" );

    CV_CALL( cvInitMatHeader( submat, mat->height, 1, mat->type,
                              mat->data.ptr + col*icvPixSize[CV_ARR_TYPE(mat->type)],
                              mat->step ));
    res = submat;
    
    __END__;

    return res;
}
Exemplo n.º 18
0
void	margBlobCorrector::setCameraMatrix(float _fX, float _fY, float _cX, float _cY){
	float* _camera_matrix = new float[9];
		
	_camera_matrix[0] = _fX;
	_camera_matrix[1] = 0;
	_camera_matrix[2] = _cX;
	_camera_matrix[3] = 0;
	_camera_matrix[4] = _fY;
	_camera_matrix[5] = _cY;
	_camera_matrix[6] = 0;
	_camera_matrix[7] = 0;
	_camera_matrix[8] = 1;
	
	fX = _fX;
	cX = _cX;
	fY = _fY;
	cY = _cY;
	
	ifX= 1.0/fX;
	ifY= 1.0/fY;
	
	cvInitMatHeader(camera_matrix, 3, 3, CV_32FC1, _camera_matrix);
	
	calculateLensUndistBounds();
	
	delete[] _camera_matrix;
}
Exemplo n.º 19
0
// get row of input array
CV_IMPL  CvMat*
cvGetRow( const CvArr* arr, CvMat* submat, int row )
{
    CvMat* res = 0;
    
    CV_FUNCNAME( "cvGetRow" );

    __BEGIN__;

    CvMat stub, *mat = (CvMat*)arr;

    if( !CV_IS_ARR( mat ))
        CV_CALL( mat = cvGetMat( mat, &stub ));

    if( !submat )
        CV_ERROR( CV_StsNullPtr, "" );

    if( (unsigned)row >= (unsigned)mat->height )
        CV_ERROR( CV_StsOutOfRange, "" );

    CV_CALL( cvInitMatHeader( submat, 1, mat->width, mat->type,
                             mat->data.ptr + row*mat->step,
                             CV_AUTOSTEP ));
    res = submat;
    
    __END__;

    return res;
}
Exemplo n.º 20
0
/**
 * Projects a point in real world coordinates against the image
 * Output: image coordinate in pixels
 */
CvPoint MultipleViewGeomOld::getProjectionOf(float angle, CvPoint3D32f point) {

    //
    map<float, CvMat*>::iterator iter = projMatList.find(angle);

    CvMat *projMat = cvCreateMat(4, 3, CV_32FC1);

    if (iter == projMatList.end()) {
        // project matrix does not exist!!

        // Calculate rotation matrix
        CvMat* rtMat = calculateRotationMatrix(angle);

        // Calculate projection matrix
        projMat = calculateProjectionMatrix(rtMat);

        projMat = cvCloneMat(projMat);

        projMatList.insert(pair<float, CvMat*> (angle, projMat));

    } else {
        // otherwise it exists
        projMat = iter->second;
    }

    LOG4CPLUS_DEBUG(myLogger,"Projection matrix for angle: " << radToDegree(angle) << " and points: " << point << endl <<  printCvMat(projMat));

    //  [u v 1] = proj * [X Y Z 1]

    float uvContents[3];
    //CvMat* uvMat = cvMat(3, 1, CV_32F, uvContents);
    CvMat* uvMat = cvCreateMat(3, 1, CV_32F);
    cvInitMatHeader(uvMat, 3, 1, CV_32F, uvContents);


    float xyzContents[] = { point.x, point.y, point.z, 1 };
    //CvMat* xyzMat = cvMat(4, 1, CV_32F, xyzContents);
    CvMat* xyzMat = cvCreateMat(4, 1, CV_32F);
    cvInitMatHeader(xyzMat, 4, 1, CV_32F, xyzContents);

    cvMatMul (projMat, xyzMat,uvMat);

    LOG4CPLUS_DEBUG(myLogger, "Result [u v 1] = proj * [X Y Z 1]: " << endl << printCvMat(uvMat));

    return cvPoint(cvRound(cvmGet(uvMat, 0, 0)), cvRound(cvmGet(uvMat, 1, 0)));

}
Exemplo n.º 21
0
/*
Calculates interpolated pixel contrast.  Based on Eqn. (3) in Lowe's paper.

@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col

@param Returns interpolated contrast.
*/
 float SiftGPU::InterpContr( IplImage*** dog_pyr, int octv, int intvl, int r,
							int c, float xi, float xr, float xc )
{
	CvMat* dD, X, T;
	float t[1], x[3] = { xc, xr, xi };

	cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
	cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
	dD = Deriv3D( dog_pyr, octv, intvl, r, c );

	//cvGEMM( dD, &X, 1, NULL, 0, &T,  CV_GEMM_A_T );
	t[0] = cvGetReal2D(dD, 0, 0) * x[0] + cvGetReal2D(dD, 1, 0) * x[1] + cvGetReal2D(dD, 2, 0) * x[2];

	cvReleaseMat( &dD );

	return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
Exemplo n.º 22
0
void main()
{
	double datin[14][5]={
			10,11,12,13,14,
			20,21,22,23,24,
			30,31,32,33,34,
			40,41,42,43,44,
			50,51,52,53,54,
			60,61,62,63,64,
			70,71,72,73,74,
			80,81,82,83,84,
			90,91,92,93,94,
			100,101,102,103,104,
			110,111,112,113,114,
			120,121,122,123,124,
			130,131,132,133,134,
			140,141,142,143,144
			};
	double m_k[5][3]={
			30,32,34,
			40,42,44,
			50,52,54,
			60,62,64,
			70,72,74};

	CvMat *Mdatin;
	CvMat *Mk;
	CvMat *Mdatout;
	Mdatin=  cvCreateMat(14,5,CV_64FC1);
	Mk    =  cvCreateMat(5, 3,CV_64FC1);
	Mdatout= cvCreateMat(14,3,CV_64FC1);

	cvInitMatHeader(Mdatin,14,5,CV_64FC1,datin);
	cvInitMatHeader(Mk    ,5 ,3,CV_64FC1,m_k);
	cvMatMul(Mdatin,Mk,Mdatout);

	imprimematriz(Mdatout);
	printf("\nFIN");
	getchar();getchar();

	cvReleaseMat(&Mdatin);
	cvReleaseMat(&Mk);
	cvReleaseMat(&Mdatin);

}	
Exemplo n.º 23
0
void Pattern::getExtrinsics(int patternSize, const Mat& cameraMatrix, const Mat& distortions)
{

	CvMat objectPts;//header for 3D points of pat3Dpts
	CvMat imagePts;//header for 2D image points of pat2Dpts
	CvMat intrinsics = cameraMatrix;
	CvMat distCoeff = distortions;
	CvMat rot = rotVec;
	CvMat tra = transVec;
	//		CvMat rotationMatrix = rotMat; // projectionMatrix = [rotMat tra];

	CvPoint2D32f pat2DPts[4];
	for (int i = 0; i<4; i++){
		pat2DPts[i].x = this->vertices.at(i).x;
		pat2DPts[i].y = this->vertices.at(i).y;
	}

	//3D points in pattern coordinate system
	CvPoint3D32f pat3DPts[4];
	pat3DPts[0].x = 0.0;
	pat3DPts[0].y = 0.0;
	pat3DPts[0].z = 0.0;
	pat3DPts[1].x = patternSize;
	pat3DPts[1].y = 0.0;
	pat3DPts[1].z = 0.0;
	pat3DPts[2].x = patternSize;
	pat3DPts[2].y = patternSize;
	pat3DPts[2].z = 0.0;
	pat3DPts[3].x = 0.0;
	pat3DPts[3].y = patternSize;
	pat3DPts[3].z = 0.0;

	cvInitMatHeader(&objectPts, 4, 3, CV_32FC1, pat3DPts);
	cvInitMatHeader(&imagePts, 4, 2, CV_32FC1, pat2DPts);

	//find extrinsic parameters
	/*cout << "objectPts is mat : " << CV_IS_MAT(&objectPts) << endl;
		cout << "imagePts is mat : " << CV_IS_MAT(&imagePts) << endl;
		cout << "intrinsics is mat : " << CV_IS_MAT(&intrinsics) << endl;
		cout << "distCoeff is mat : " << CV_IS_MAT(&distCoeff) << endl;
		cout << "rot is mat : " << CV_IS_MAT(&rot) << endl;
		cout << "tra is mat : " << CV_IS_MAT(&tra) << endl;*/
	cvFindExtrinsicCameraParams2(&objectPts, &imagePts, &intrinsics, &distCoeff, &rot, &tra);
}
Exemplo n.º 24
0
/*
Calculates interpolated pixel contrast.  Based on Eqn. (3) in Lowe's paper.
@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col
@param Returns interpolated contrast.
*/
static double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r,
							int c, double xi, double xr, double xc )
{
	CvMat* dD, X, T;
	double t[1], x[3] = { xc, xr, xi };

    //偏移量组成的列向量X,其中是x,y,σ三方向上的偏移量
	cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
    //矩阵乘法的结果T,是一个数值
	cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
    //在DoG金字塔中计算某点的x方向、y方向以及尺度方向上的偏导数,结果存放在列向量dD中
	dD = deriv_3D( dog_pyr, octv, intvl, r, c );
    //矩阵乘法:T = dD^T * X
	cvGEMM( dD, &X, 1, NULL, 0, &T,  CV_GEMM_A_T );
	cvReleaseMat( &dD );

    //返回计算出的对比度值:D + 0.5 * dD^T * X (具体公式推导见SIFT算法说明)
	return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
Exemplo n.º 25
0
bool CFeatureExtraction::DoPCA(CvMat * pMat, CvMat * pResultMat, int nSize, int nExpectedSize)
{
	printf("\nCFeatureExtraction::DoPCA in\n");
	int i;	

	printf("DoPCA: Sort our data sets in a vector each\n");	
	// Sort our data sets in a vector each
	CvMat ** pDataSet = new CvMat*[m_nWidth*m_nHeight];
	float * pData = pMat->data.fl;
	for (i=0;i<m_nWidth*m_nHeight;i++)
	{
		pDataSet[i] = (CvMat*) malloc(sizeof(CvMat));
		cvInitMatHeader(pDataSet[i], 1, nSize, CV_32FC1, &pData[i*nSize]);
	}
	
	printf("DoPCA: Calc covariance matrix\n");
	// Calc covariance matrix
	CvMat* pCovMat = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMeanVec = cvCreateMat( 1, nSize, CV_32F );
	
	cvCalcCovarMatrix( (const void **)pDataSet, m_nWidth*m_nHeight, pCovMat, pMeanVec, CV_COVAR_SCALE | CV_COVAR_NORMAL );
	
	cvReleaseMat(&pMeanVec);
	
	printf("DoPCA: Do the SVD decomposition\n");
	// Do the SVD decomposition
	CvMat* pMatW = cvCreateMat( nSize, 1, CV_32F );
	CvMat* pMatV = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMatU = cvCreateMat( nSize, nSize, CV_32F );
	
	cvSVD(pCovMat, pMatW, pMatU, pMatV, CV_SVD_MODIFY_A+CV_SVD_V_T);
	
	cvReleaseMat(&pCovMat);
	cvReleaseMat(&pMatW);
	cvReleaseMat(&pMatV);

	printf("DoPCA: Extract the requested number of dominant eigen vectors\n");
	// Extract the requested number of dominant eigen vectors
	CvMat* pEigenVecs = cvCreateMat( nSize, nExpectedSize, CV_32F );
	for (i=0;i<nSize;i++)
		memcpy(&pEigenVecs->data.fl[i*nExpectedSize], &pMatU->data.fl[i*nSize], nExpectedSize*sizeof(float));

	printf("DoPCA: Transform to the new basis\n");
	// Transform to the new basis	
	cvMatMul(pMat, pEigenVecs, pResultMat);
	cvReleaseMat(&pMatU);
	cvReleaseMat(&pEigenVecs);
	
	for (i = 0; i < m_nHeight * m_nWidth; i++)
		delete [] pDataSet[i];
	delete [] pDataSet;

	printf("CFeatureExtraction::DoPCA out\n");
	return true;
}
Exemplo n.º 26
0
CvMatrix::CvMatrix( int _rows, int _cols, int _type, CvMemStorage* storage, bool alloc_data )
{
    if( storage )
    {
        matrix = (CvMat*)cvMemStorageAlloc( storage, sizeof(*matrix) );
        cvInitMatHeader( matrix, _rows, _cols, _type, alloc_data ?
            cvMemStorageAlloc( storage, _rows*_cols*CV_ELEM_SIZE(_type) ) : 0 );
    }
    else
        matrix = 0;
}
// 
// Rotate around the y axis
void yRotate(CvMat *vec, float angle, CvMat *res)
{
    angle = D2R(angle);
    float vals[] = { cos(angle), 0, -sin(angle), 0,
                     0, 1, 0, 0,
                     sin(angle), 0, cos(angle), 0,
                     0, 0, 0, 1};    
    CvMat mat;
    cvInitMatHeader( &mat, 4, 4, CV_32FC1, &vals );
    cvMatMul(&mat, vec, res);
}
Exemplo n.º 28
0
/*
 * Constructor
 *
 * (u0,v0) = image centre
 * alfaU = f*ku = pixels per unit of measurement in Y e.g. 1 pixel/microm
 * alfaV
 */
MultipleViewGeomOld::MultipleViewGeomOld(int u0, int v0, float alfaU, float alfaV) :
    myLogger(log4cplus::Logger::getInstance("multipleViewGeom")) {

    // Ortographic projection
    float pContents[] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1 };
    pMat = cvCreateMat(3, 4, CV_32F);
    cvInitMatHeader(pMat, 3, 4, CV_32F, pContents);
    pMat = cvCloneMat(pMat);

    calculateCameraMatrix(u0, v0, alfaU, alfaV);

}
// 
// Measurement inverse. Given a measurement ( {x,y} pixel coordinates ) and pose calculate the 
// world coordinates of the feature
int    Measurement_Inverse(IplImage *img, int px, int py, float focal_length,
                            float pitch, float roll, float alt,
                            CvMat *feature_point_res)
{
    struct line  l;
    struct plane pl;
    float plane_point[] = { 0, 0,  alt, 1 };
    float tmps[] = { px, py, focal_length, 1 };
    float def_bear[] = { 0, 1, 0, 0 };
    CvMat pix, def_bear_mat;
    
    cvInitMatHeader(&def_bear_mat, 4, 1, CV_32FC1, &def_bear);
    cvInitMatHeader(&pix, 4, 1, CV_32FC1, &tmps); 
    
    // Init line 
    cvInitMatHeader(&(l.p0), 4, 1, CV_32FC1, line_p0);
    
    // Init plane
    cvInitMatHeader(&(pl.p), 4, 1, CV_32FC1, plane_point);
    cvInitMatHeader(&(pl.norm), 4, 1, CV_32FC1, plane_norm);
    
    
    // Convert the pixels to meters units
    pixelTometer(img, &pix);
    
    // Rotate with pitch and roll
    yRotate(&pix, roll, &pix);
    xRotate(&pix, pitch, &pix);
    
    l.p1 = &pix;
    
    // Check for no solution to plane intersection
    if ( !plane_intersection(&pl, &l, &pix) )
        return -1;
    
    M_INV_X(feature_point_res) = FLOAT_MAT_ELEM(&pix, 0, 0);
    M_INV_Y(feature_point_res) = FLOAT_MAT_ELEM(&pix, 1, 0);
    
    return 1;
}
Exemplo n.º 30
0
grow_mat::grow_mat(int r, int c, int type, void* data, int step)
{
	clear_all();
	if (data)
	{
		CvMat temp_mat;
		cvInitMatHeader(&temp_mat, r, c, type, data, step );
		_mat = cvCreateMat(r, c, type);
		cvCopy(&temp_mat,_mat);
		cvGetSubRect(_mat, this, cvRect(0,0,c,r));
		return;
	}
	init(r,c,type,0,0);
}