Esempio n. 1
0
// TODO: Would fail if m_nChannels != 3
// RGB to LAB
bool CFeatureExtraction::GetColorChannels(CvMat * pChannels, CvMat * pColorChannelsArr[])
{
	printf("\nCFeatureExtraction::GetColorChannels in\n");
	int nSize = COLOR_CHANNEL_NUM;
	
	// Convert to LAB color space
	IplImage *pLabImg = cvCreateImage(cvSize(m_pSrcImg->width,m_pSrcImg->height), IPL_DEPTH_32F, nSize);
	cvCvtColor(m_pSrcImgFloat,pLabImg,CV_BGR2Lab);	

	// Put the 32F lab image data in a matrix header
	CvMat srcMat;
	cvInitMatHeader(&srcMat, m_nWidth*m_nHeight, nSize , CV_32F, (float*)pLabImg->imageData );

	// This matrix would hold the values represented in the new basis we've found
	//CvMat * pResultMat = cvCreateMat( m_nWidth*m_nHeight, nSize , CV_32F );
	CvMat * pResultMat = pChannels;
	
	// Actual calculation
	DoPCA(&srcMat, pResultMat, nSize, COLOR_CHANNEL_NUM);
	// Extracting the 3 primary channels
	//GetChannels(pResultMat, pColorChannelsArr, nSize, COLOR_CHANNEL_NUM);

	// Useful releasing
	cvReleaseImage(&pLabImg);
	printf("CFeatureExtraction::GetColorChannels out\n");
	return true;	
}
Esempio n. 2
0
bool CFeatureExtraction::GetTextureChannels(CvMat * pChannels, CvMat * pTextureChannelsArr[])
{
	printf("\nCFeatureExtraction::GetTextureChannels in\n");
	int gaborSize = 24;
	int histSize = 30;
	int vectorSize = gaborSize+histSize;
	
	// Calc the full histogram vectors
	CvMat * pHistMat = cvCreateMat( m_nWidth*m_nHeight, histSize , CV_32F );
	CalcHistogram(m_pSrcImg, pHistMat);
	// Do we need to normalize?
	cvNormalize(pHistMat, pHistMat, 0, 1, CV_MINMAX);

	CvMat * pGaborMat = cvCreateMat (m_nWidth * m_nHeight, gaborSize, CV_32F);
	GetGaborResponse(pGaborMat);
	// Do we need to normalize?
	//cvNormalize(pGaborMat, pGaborMat, 0, 1, CV_MINMAX);

	// Our merged matrix
	CvMat * pTextureMat = cvCreateMat( m_nWidth*m_nHeight, vectorSize , CV_32F );
	
	// Combine into a single matrix
	MergeMatrices(pGaborMat, pHistMat, pTextureMat);

	// This matrix would hold the values represented in the new basis we've found
	//CvMat * pResultMat = cvCreateMat( m_nWidth*m_nHeight, TEXTURE_CHANNEL_NUM , CV_32F );
	CvMat * pResultMat = pChannels;
	
	// Actual calculation
	DoPCA(pTextureMat, pResultMat, vectorSize, TEXTURE_CHANNEL_NUM);
	// Extracting the 3 primary channels
	//GetChannels(pResultMat, pTextureChannelsArr, TEXTURE_CHANNEL_NUM, TEXTURE_CHANNEL_NUM);

	cvReleaseMat(&pHistMat);
	cvReleaseMat(&pGaborMat);
	cvReleaseMat(&pTextureMat);
	
	printf("CFeatureExtraction::GetTextureChannels out\n");

	return true;
}
Esempio n. 3
0
//============================================================================
void AAM_TDM::Train(const file_lists& pts_files, const file_lists& img_files, 
					const AAM_PAW& m_warp, 
					double texture_percentage /* = 0.975 */, 
					bool registration /* = true */)
{
	int nPoints = m_warp.nPoints();
	int nPixels = m_warp.nPix()*3;
	int nSamples = pts_files.size();
	
	CvMat *AllTextures = cvCreateMat(nSamples, nPixels, CV_64FC1);
	
	CvMat * matshape = cvCreateMat(1, nPoints*2, CV_64FC1);
	for(int i = 0; i < nSamples; i++)
	{
		IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
		
		AAM_Shape trueshape;
		if(!trueshape.ReadAnnotations(pts_files[i]))
			trueshape.ScaleXY(image->width, image->height);
		trueshape.Point2Mat(matshape);
		AAM_Common::CheckShape(matshape, image->width, image->height);
		
		CvMat t;	cvGetRow(AllTextures, &t, i);
		m_warp.CalcWarpTexture(matshape, image, &t);
		
		cvReleaseImage(&image);
	}
	cvReleaseMat(&matshape);
	
	// align texture so as to minimize the lighting variation
	AAM_TDM::AlignTextures(AllTextures);
	
	//now do pca
	DoPCA(AllTextures, texture_percentage);

	if(registration) SaveSeriesTemplate(AllTextures, m_warp);

	cvReleaseMat(&AllTextures);
}
Esempio n. 4
0
//============================================================================
void AAM_CAM::Train(const file_lists& pts_files, 
					const file_lists& img_files, 
					double scale /* = 1.0 */,
					double shape_percentage /* = 0.975 */, 
					double texture_percentage /* = 0.975 */, 
					double appearance_percentage /* = 0.975 */)
{
	//building shape and texture distribution model
	std::vector<AAM_Shape> AllShapes;
	for(int ii = 0; ii < pts_files.size(); ii++)
	{
		AAM_Shape Shape;
		bool flag = Shape.ReadAnnotations(pts_files[ii]);
		if(!flag)
		{
			IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
			Shape.ScaleXY(image->width, image->height);
			cvReleaseImage(&image);
		}
		AllShapes.push_back(Shape);
	}

	printf("Build point distribution model...\n");
	__shape.Train(AllShapes, scale, shape_percentage);
	
	printf("Build warp information of mean shape mesh...");
	__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
	__Storage = cvCreateMemStorage(0);
	AAM_Shape refShape = __shape.__AAMRefShape/* * scale */;
	//if(refShape.GetWidth() > 50)
	//	refShape.Scale(50/refShape.GetWidth());
	
	__paw.Train(refShape, __Points, __Storage);
	printf("[%d by %d, %d triangles, %d*3 pixels]\n",
		__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());
	
	printf("Build texture distribution model...\n");
	__texture.Train(pts_files, img_files, __paw, texture_percentage, true);
	__pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);	

	printf("Build combined appearance model...\n");	
	int nsamples = pts_files.size();
	int npointsby2 = __shape.nPoints()*2;
	int npixels = __texture.nPixels();
	int nfeatures = __shape.nModes() + __texture.nModes();
	CvMat* AllAppearances = cvCreateMat(nsamples, nfeatures, CV_64FC1);
	CvMat* s = cvCreateMat(1, npointsby2, CV_64FC1);
	CvMat* t = cvCreateMat(1, npixels, CV_64FC1);
	__MeanS = cvCreateMat(1, npointsby2, CV_64FC1);
	__MeanG = cvCreateMat(1, npixels, CV_64FC1);
	cvCopy(__shape.GetMean(), __MeanS);
	cvCopy(__texture.GetMean(), __MeanG);

	//calculate ratio of shape to appearance
	CvScalar Sum1 = cvSum(__shape.__ShapesEigenValues);
    CvScalar Sum2 = cvSum(__texture.__TextureEigenValues);
    __WeightsS2T = sqrt(Sum2.val[0] / Sum1.val[0]);

	printf("Combine shape and texture parameters...\n");	
	for(int i = 0; i < nsamples; i++)
	{
		//Get Shape and Texture respectively
		IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
		
		AAM_Shape Shape;
		if(!Shape.ReadAnnotations(pts_files[i]))
			Shape.ScaleXY(image->width, image->height);
		Shape.Point2Mat(s);
		AAM_Common::CheckShape(s, image->width, image->height);
		
		__paw.CalcWarpTexture(s, image, t);
		__texture.NormalizeTexture(__MeanG, t);

		//combine shape and texture parameters
		CvMat OneAppearance;
		cvGetRow(AllAppearances, &OneAppearance, i);
		ShapeTexture2Combined(s, t, &OneAppearance);

		cvReleaseImage(&image);
	}

	//Do PCA of appearances
	DoPCA(AllAppearances, appearance_percentage);

	int np = __AppearanceEigenVectors->rows;

	printf("Extracting the shape and texture part of the combined eigen vectors..\n");
	
	// extract the shape part of the combined eigen vectors
    CvMat Ps;
	cvGetCols(__AppearanceEigenVectors, &Ps, 0, __shape.nModes());
	__Qs = cvCreateMat(np, npointsby2, CV_64FC1);
	cvMatMul(&Ps, __shape.GetBases(), __Qs);
	cvConvertScale(__Qs, __Qs, 1.0/__WeightsS2T);

	// extract the texture part of the combined eigen vectors
    CvMat Pg;
	cvGetCols(__AppearanceEigenVectors, &Pg, __shape.nModes(), nfeatures);
	__Qg = cvCreateMat(np, npixels, CV_64FC1);
	cvMatMul(&Pg, __texture.GetBases(), __Qg);

	__a = cvCreateMat(1, __AppearanceEigenVectors->cols, CV_64FC1);
}