Example #1
0
//============================================================================
void AAM_Basic::Train(const file_lists& pts_files,
					  const file_lists& img_files,
					  double scale /* = 1.0 */,
					  double shape_percentage /* = 0.975 */,
					  double texture_percentage /* = 0.975 */,
					  double appearance_percentage /* = 0.975 */)
{
	if(pts_files.size() != img_files.size())
	{
		fprintf(stderr, "ERROE(%s, %d): #Shapes != #Images\n",
			__FILE__, __LINE__);
		exit(0);
	}

	printf("################################################\n");
	printf("Build Fixed Jocobian Active Appearance Model ...\n");

	__cam.Train(pts_files, img_files, scale, shape_percentage,
		texture_percentage, appearance_percentage);

	printf("Build Jacobian Matrix...\n");
	__G = cvCreateMat(__cam.nModes()+4, __cam.__texture.nPixels(), CV_64FC1);
	CalcJacobianMatrix(pts_files, img_files);

	//allocate memory for on-line fitting
	__current_c_q = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	__update_c_q = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	__delta_c_q = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	__c = cvCreateMat(1, __cam.nModes(), CV_64FC1);
	__p = cvCreateMat(1, __cam.__shape.nModes(), CV_64FC1);
	__q = cvCreateMat(1, 4, CV_64FC1);
	__lamda = cvCreateMat(1, __cam.__texture.nModes(), CV_64FC1);
	__s = cvCreateMat(1, __cam.__shape.nPoints()*2, CV_64FC1);
	__t_s = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	__t_m = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	__delta_t = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);

	printf("################################################\n\n");
}
Example #2
0
//============================================================================
void AAM_TDM::Train(const file_lists& pts_files, const file_lists& img_files, 
					const AAM_PAW& m_warp, 
					double texture_percentage /* = 0.975 */, 
					bool registration /* = true */)
{
	int nPoints = m_warp.nPoints();
	int nPixels = m_warp.nPix()*3;
	int nSamples = pts_files.size();
	
	CvMat *AllTextures = cvCreateMat(nSamples, nPixels, CV_64FC1);
	
	CvMat * matshape = cvCreateMat(1, nPoints*2, CV_64FC1);
	for(int i = 0; i < nSamples; i++)
	{
		IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
		
		AAM_Shape trueshape;
		if(!trueshape.ReadAnnotations(pts_files[i]))
			trueshape.ScaleXY(image->width, image->height);
		trueshape.Point2Mat(matshape);
		AAM_Common::CheckShape(matshape, image->width, image->height);
		
		CvMat t;	cvGetRow(AllTextures, &t, i);
		m_warp.CalcWarpTexture(matshape, image, &t);
		
		cvReleaseImage(&image);
	}
	cvReleaseMat(&matshape);
	
	// align texture so as to minimize the lighting variation
	AAM_TDM::AlignTextures(AllTextures);
	
	//now do pca
	DoPCA(AllTextures, texture_percentage);

	if(registration) SaveSeriesTemplate(AllTextures, m_warp);

	cvReleaseMat(&AllTextures);
}
Example #3
0
//============================================================================
void AAM_IC::Train(const file_lists& pts_files, 
				   const file_lists& img_files, 
				   double scale /* = 1.0 */, 
				   double shape_percentage /* = 0.975 */, 
				   double texture_percentage /* = 0.975 */)
{
	if(pts_files.size() != img_files.size())
	{
		fprintf(stderr, "ERROE(%s, %d): #Shapes != #Images\n",
			__FILE__, __LINE__);
		exit(0);
	}

	printf("################################################\n");
	printf("Build Inverse Compositional Image Alignmennt Model...\n");

	std::vector<AAM_Shape> AllShapes;
	for(int ii = 0; ii < pts_files.size(); ii++)
	{
		AAM_Shape Shape;
		bool flag = Shape.ReadAnnotations(pts_files[ii]);
		if(!flag)
		{
			IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
			Shape.ScaleXY(image->width, image->height);
			cvReleaseImage(&image);
		}
		AllShapes.push_back(Shape);
	}

	//building shape and texture distribution model
	printf("Build point distribution model...\n");
	__shape.Train(AllShapes, scale, shape_percentage);
	
	printf("Build warp information of mean shape mesh...");
	__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
	__Storage = cvCreateMemStorage(0);

	double sp = 1.0;
	//if(__shape.GetMeanShape().GetWidth() > 48)
	//	sp = 48/__shape.GetMeanShape().GetWidth();

	__paw.Train(__shape.GetMeanShape()*sp, __Points, __Storage);
	printf("[%d by %d, triangles #%d, pixels #%d*3]\n",
		__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());

	printf("Build texture distribution model...\n");
	__texture.Train(pts_files, img_files, __paw, texture_percentage, true);

	//calculate gradient of texture
	printf("Calculating texture gradient...\n");
	CvMat* dTx = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	CvMat* dTy = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	CalcTexGrad(__texture.GetMean(), dTx, dTy);
	
	// save gradient image
	mkdir("Modes");
	__paw.SaveWarpTextureToImage("Modes/dTx.jpg", dTx);
	__paw.SaveWarpTextureToImage("Modes/dTy.jpg", dTy);
	
	//calculate warp Jacobian at base shape
	printf("Calculating warp Jacobian...\n");
	CvMat* Jx = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
	CvMat* Jy = cvCreateMat(__paw.nPix(), __shape.nModes()+4, CV_64FC1);
	CalcWarpJacobian(Jx,Jy);
	
	//calculate modified steepest descent image
	printf("Calculating steepest descent images...\n");
	CvMat* SD = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
	CalcModifiedSD(SD, dTx, dTy, Jx, Jy);

	//calculate inverse Hessian matrix
	printf("Calculating Hessian inverse matrix...\n");
	CvMat* H = cvCreateMat(__shape.nModes()+4, __shape.nModes()+4, CV_64FC1);
	CalcHessian(H, SD);

	//calculate update matrix (multiply inverse Hessian by modified steepest descent image)
	__G = cvCreateMat(__shape.nModes()+4, __texture.nPixels(), CV_64FC1);
	cvMatMul(H, SD, __G);

	//release
	cvReleaseMat(&Jx);
	cvReleaseMat(&Jy);
	cvReleaseMat(&dTx);
	cvReleaseMat(&dTy);
	cvReleaseMat(&SD);
	cvReleaseMat(&H);

	//alocate memory for on-line fitting stuff
	__update_s0 = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__inv_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__warp_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	__error_t = cvCreateMat(1, __texture.nPixels(), CV_64FC1);
	__search_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__delta_pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);
	__current_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__update_s = cvCreateMat(1, __shape.nPoints()*2, CV_64FC1);
	__lamda  = cvCreateMat(1, __texture.nModes(), CV_64FC1);

	printf("################################################\n\n");
}
Example #4
0
//============================================================================
void AAM_CAM::Train(const file_lists& pts_files, 
					const file_lists& img_files, 
					double scale /* = 1.0 */,
					double shape_percentage /* = 0.975 */, 
					double texture_percentage /* = 0.975 */, 
					double appearance_percentage /* = 0.975 */)
{
	//building shape and texture distribution model
	std::vector<AAM_Shape> AllShapes;
	for(int ii = 0; ii < pts_files.size(); ii++)
	{
		AAM_Shape Shape;
		bool flag = Shape.ReadAnnotations(pts_files[ii]);
		if(!flag)
		{
			IplImage* image = cvLoadImage(img_files[ii].c_str(), -1);
			Shape.ScaleXY(image->width, image->height);
			cvReleaseImage(&image);
		}
		AllShapes.push_back(Shape);
	}

	printf("Build point distribution model...\n");
	__shape.Train(AllShapes, scale, shape_percentage);
	
	printf("Build warp information of mean shape mesh...");
	__Points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
	__Storage = cvCreateMemStorage(0);
	AAM_Shape refShape = __shape.__AAMRefShape/* * scale */;
	//if(refShape.GetWidth() > 50)
	//	refShape.Scale(50/refShape.GetWidth());
	
	__paw.Train(refShape, __Points, __Storage);
	printf("[%d by %d, %d triangles, %d*3 pixels]\n",
		__paw.Width(), __paw.Height(), __paw.nTri(), __paw.nPix());
	
	printf("Build texture distribution model...\n");
	__texture.Train(pts_files, img_files, __paw, texture_percentage, true);
	__pq = cvCreateMat(1, __shape.nModes()+4, CV_64FC1);	

	printf("Build combined appearance model...\n");	
	int nsamples = pts_files.size();
	int npointsby2 = __shape.nPoints()*2;
	int npixels = __texture.nPixels();
	int nfeatures = __shape.nModes() + __texture.nModes();
	CvMat* AllAppearances = cvCreateMat(nsamples, nfeatures, CV_64FC1);
	CvMat* s = cvCreateMat(1, npointsby2, CV_64FC1);
	CvMat* t = cvCreateMat(1, npixels, CV_64FC1);
	__MeanS = cvCreateMat(1, npointsby2, CV_64FC1);
	__MeanG = cvCreateMat(1, npixels, CV_64FC1);
	cvCopy(__shape.GetMean(), __MeanS);
	cvCopy(__texture.GetMean(), __MeanG);

	//calculate ratio of shape to appearance
	CvScalar Sum1 = cvSum(__shape.__ShapesEigenValues);
    CvScalar Sum2 = cvSum(__texture.__TextureEigenValues);
    __WeightsS2T = sqrt(Sum2.val[0] / Sum1.val[0]);

	printf("Combine shape and texture parameters...\n");	
	for(int i = 0; i < nsamples; i++)
	{
		//Get Shape and Texture respectively
		IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
		
		AAM_Shape Shape;
		if(!Shape.ReadAnnotations(pts_files[i]))
			Shape.ScaleXY(image->width, image->height);
		Shape.Point2Mat(s);
		AAM_Common::CheckShape(s, image->width, image->height);
		
		__paw.CalcWarpTexture(s, image, t);
		__texture.NormalizeTexture(__MeanG, t);

		//combine shape and texture parameters
		CvMat OneAppearance;
		cvGetRow(AllAppearances, &OneAppearance, i);
		ShapeTexture2Combined(s, t, &OneAppearance);

		cvReleaseImage(&image);
	}

	//Do PCA of appearances
	DoPCA(AllAppearances, appearance_percentage);

	int np = __AppearanceEigenVectors->rows;

	printf("Extracting the shape and texture part of the combined eigen vectors..\n");
	
	// extract the shape part of the combined eigen vectors
    CvMat Ps;
	cvGetCols(__AppearanceEigenVectors, &Ps, 0, __shape.nModes());
	__Qs = cvCreateMat(np, npointsby2, CV_64FC1);
	cvMatMul(&Ps, __shape.GetBases(), __Qs);
	cvConvertScale(__Qs, __Qs, 1.0/__WeightsS2T);

	// extract the texture part of the combined eigen vectors
    CvMat Pg;
	cvGetCols(__AppearanceEigenVectors, &Pg, __shape.nModes(), nfeatures);
	__Qg = cvCreateMat(np, npixels, CV_64FC1);
	cvMatMul(&Pg, __texture.GetBases(), __Qg);

	__a = cvCreateMat(1, __AppearanceEigenVectors->cols, CV_64FC1);
}
Example #5
0
//============================================================================
void AAM_Basic::CalcJacobianMatrix(const file_lists& pts_files,
								   const file_lists& img_files,
								   double disp_scale /* = 0.2 */,
								   double disp_angle /* = 20 */,
								   double disp_trans /* = 5.0 */,
								   double disp_std /* = 1.0 */,
								   int nExp /* = 30 */)
{
	CvMat* J = cvCreateMat(__cam.nModes()+4, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* d = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	CvMat* o = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	CvMat* oo = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	CvMat* t = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* t_m = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* t_s = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* t1 = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* t2 = cvCreateMat(1, __cam.__texture.nPixels(), CV_64FC1);
	CvMat* u = cvCreateMat(1, __cam.nModes()+4, CV_64FC1);
	CvMat* c = cvCreateMat(1, __cam.nModes(), CV_64FC1);
    CvMat* s = cvCreateMat(1, __cam.__shape.nPoints()*2, CV_64FC1);
    CvMat* q = cvCreateMat(1, 4, CV_64FC1);
	CvMat* p = cvCreateMat(1, __cam.__shape.nModes(),CV_64FC1);
	CvMat* lamda = cvCreateMat(1, __cam.__texture.nModes(), CV_64FC1);

	double theta = disp_angle * CV_PI / 180;
	double aa = MAX(fabs(disp_scale*cos(theta)), fabs(disp_scale*sin(theta)));
	cvmSet(d,0,0,aa); cvmSet(d,0,1,aa); cvmSet(d,0,2,disp_trans); cvmSet(d,0,3,disp_trans);
	for(int nmode = 0; nmode < __cam.nModes(); nmode++)
		cvmSet(d,0,4+nmode,disp_std*sqrt(__cam.Var(nmode)));

	srand(unsigned(time(0)));
	cvSetZero(u);cvSetZero(J);
	for(int i = 0; i < pts_files.size(); i++)
	{
		IplImage* image = cvLoadImage(img_files[i].c_str(), -1);
		AAM_Shape Shape;
		if(!Shape.ReadAnnotations(pts_files[i]))
			Shape.ScaleXY(image->width, image->height);
		Shape.Point2Mat(s);

		//calculate current texture vector
		__cam.__paw.CalcWarpTexture(s, image, t);
		__cam.__texture.NormalizeTexture(__cam.__MeanG, t);

		//calculate appearance parameters
		__cam.__shape.CalcParams(s, p, q);
		__cam.__texture.CalcParams(t, lamda);
		__cam.CalcParams(c, p, lamda);

		//update appearance and pose parameters
		CvMat subo;
		cvGetCols(o, &subo, 0, 4); cvCopy(q, &subo);
		cvGetCols(o, &subo, 4, 4+__cam.nModes()); cvCopy(c, &subo);

		//get optimal EstResidual
		EstResidual(image, o, s, t_m, t_s, t1);

		for(int j = 0; j < nExp; j++)
		{
			printf("Pertubing (%d/%d) for image (%d/%d)...\r", j, nExp, i, pts_files.size());

			for(int l = 0; l < 4+__cam.nModes(); l++)
			{
				double D = cvmGet(d,0,l);
				double v = rand_in_between(-D, D);
				cvCopy(o, oo); CV_MAT_ELEM(*oo,double,0,l) += v;
				EstResidual(image, oo, s, t_m, t_s, t2);

				cvSub(t1, t2, t2);
				cvConvertScale(t2, t2, 1.0/v);

				//accumulate into l-th row
				CvMat Jl; cvGetRow(J, &Jl, l);
				cvAdd(&Jl, t2, &Jl);

				CV_MAT_ELEM(*u, double, 0, l) += 1.0;
			}
		}
		cvReleaseImage(&image);
	}

	//normalize
	for(int l = 0; l < __cam.nModes()+4; l++)
	{
		CvMat Jl; cvGetRow(J, &Jl, l);
		cvConvertScale(&Jl, &Jl, 1.0/cvmGet(u,0,l));
	}

	CvMat* JtJ = cvCreateMat(__cam.nModes()+4, __cam.nModes()+4, CV_64FC1);
	CvMat* InvJtJ = cvCreateMat(__cam.nModes()+4, __cam.nModes()+4, CV_64FC1);
	cvGEMM(J, J, 1, NULL, 0, JtJ, CV_GEMM_B_T);
    cvInvert(JtJ, InvJtJ, CV_SVD);
	cvMatMul(InvJtJ, J, __G);

	cvReleaseMat(&J);	cvReleaseMat(&d); 	cvReleaseMat(&o);
	cvReleaseMat(&oo); 	cvReleaseMat(&t);	cvReleaseMat(&t_s);
	cvReleaseMat(&t_m);	cvReleaseMat(&t1); 	cvReleaseMat(&t2);
	cvReleaseMat(&u); 	cvReleaseMat(&c);	cvReleaseMat(&s);
	cvReleaseMat(&q); 	cvReleaseMat(&p);	cvReleaseMat(&lamda);
	cvReleaseMat(&JtJ); cvReleaseMat(&InvJtJ);
}