Esempio n. 1
0
int AlignPair(int argc, const char *argv[])
{
    // Align two images using feature matching
    if (argc < 7) {
        printf("usage: %s input1.f input2.f matchfile nRANSAC RANSACthresh [sift]\n", argv[1]);
        return -1;
    }
    const char *infile1 = argv[2];
    const char *infile2 = argv[3];
    const char *matchfile = argv[4];
    int nRANSAC         = atoi(argv[5]);
    double RANSACthresh = atof(argv[6]);

    FeatureSet f1, f2;

    // Read in the feature sets
    if ((argc >= 8) && (strcmp(argv[7], "sift") == 0)) {
        f1.load_sift(infile1);
        f2.load_sift(infile2);
    }
    else {
        f1.load(infile1);
        f2.load(infile2);
    }

    CTransform3x3 M;

    // Read in the feature matches
    vector<FeatureMatch> matches;
    bool success = ReadFeatureMatches(matchfile, matches);

    if (!success) {
        printf("Error opening match file %s for reading\n", matchfile);
        return -1;
    }

    // Perform the alignment.
	if (strcmp(argv[1], "alignPairHomography") == 0) {
		alignPair(f1, f2, matches, eHomography, nRANSAC, RANSACthresh, M);
	} else {
		alignPair(f1, f2, matches, eTranslate, nRANSAC, RANSACthresh, M);
	}

    // Print out the result
    CTransform3x3 Mi = M.Inverse();

    /*printf("% 10.3e %10.3e %10.3e\n %10.3e %10.3e %10.3e\n %10.3e %10.3e %10.3e\n",
            Mi[0][0], Mi[0][1], Mi[0][2],
            Mi[1][0], Mi[1][1], Mi[1][2],
            Mi[2][0], Mi[2][1], Mi[2][2]);*/
	printf("%0.8e %0.8e %0.8e %0.8e %0.8e %0.8e %0.8e %0.8e %0.8e",
            Mi[0][0], Mi[0][1], Mi[0][2],
            Mi[1][0], Mi[1][1], Mi[1][2],
            Mi[2][0], Mi[2][1], Mi[2][2]);

    return 0;
}
void rotation()
{	
		CFloatImage matrixImage = GetImageFromMatrix((float *)featureMatrix, 10, 10);
		CTransform3x3 translationNegative;
		CTransform3x3 translationPositive;
		CTransform3x3 rotation;
		CFloatImage postHomography;

		Feature f;
		f.x = 6;
		f.y = 5;
		f.angleRadians = PI;

		translationNegative = translationNegative.Translation(f.x,f.y);
		translationPositive = translationPositive.Translation(-f.x,-f.y);

		rotation = rotation.Rotation(-f.angleRadians * 180/ PI);


		WarpGlobal(matrixImage, postHomography, translationNegative*rotation*translationPositive, eWarpInterpLinear, eWarpInterpNearest);
		for (int i = 0; i < postHomography.Shape().height; i++)
		{
			for (int j = 0; j < postHomography.Shape().width; j++)
			{
				printf("%.0f\t", postHomography.Pixel(j, i, 0));
			}
			printf("\n");
		}
}
Esempio n. 3
0
//
// TODO 3: ComputeHomography()
//		Computes the homography H from the plane specified by "points" to the image plane,
//		and its inverse Hinv.
//		If the plane is the reference plane (isRefPlane == true), don't convert the
//		coordinate system to the plane. Only do this for polygon patches where
//		texture mapping is necessary.
//		Coordinate system conversion is to be implemented in a separate routine
//		ConvertToPlaneCoordinate.
//		For more detailed explaination, see
//		http://www.cs.cornell.edu/courses/cs4670/2012fa/projects/p4/homography.pdf.
//
void ComputeHomography(CTransform3x3 &H, CTransform3x3 &Hinv, const vector<SVMPoint> &points, vector<Vec3d> &basisPts, bool isRefPlane)
{
    int i;
    int numPoints = (int) points.size();
    assert( numPoints >= 4 );

    basisPts.clear();
    if (isRefPlane) // reference plane
    {
        for (i=0; i < numPoints; i++) {
            Vec3d tmp = Vec3d(points[i].X, points[i].Y, points[i].W); // was Z, not W
            basisPts.push_back(tmp);
        }
    } 
    else // arbitrary polygon
    {
        double uScale, vScale; // unused in this function
        ConvertToPlaneCoordinate(points, basisPts, uScale, vScale);
    }

    // A: 2n x 9 matrix where n is the number of points on the plane
    //    as discussed in lecture
    int numRows = 2 * numPoints;
    const int numCols = 9;

    typedef Matrix<double, Dynamic, 9, RowMajor> MatrixType;
    MatrixType A = MatrixType::Zero(numRows, numCols);

    /******** BEGIN TODO ********/
    /* Fill in the A matrix for the call to MinEig */
	for (i = 0; i < numPoints; i++)
	{
		A(2*i,0) = basisPts[i][0];
		A(2*i,1) = basisPts[i][1];
		A(2*i,2) = 1;
		A(2*i,6) = - points[i].u * basisPts[i][0];
		A(2*i,7) = - points[i].u * basisPts[i][1];
		A(2*i,8) = - points[i].u;

		A(2*i+1,3) = basisPts[i][0];
		A(2*i+1,4) = basisPts[i][1];
		A(2*i+1,5) = 1;
		A(2*i+1,6) = - points[i].v * basisPts[i][0];
		A(2*i+1,7) = - points[i].v * basisPts[i][1];
		A(2*i+1,8) = - points[i].v;
	}

    double eval, h[9];
    MinEig(A, eval, h);

    H[0][0] = h[0];
    H[0][1] = h[1];
    H[0][2] = h[2];

    H[1][0] = h[3];
    H[1][1] = h[4];
    H[1][2] = h[5];

    H[2][0] = h[6];
    H[2][1] = h[7];
    H[2][2] = h[8];

    /******** END TODO ********/

    // compute inverse of H
    if (H.Determinant() == 0)
        fl_alert("Computed homography matrix is uninvertible \n");
    else
        Hinv = H.Inverse();

    int ii;
    printf("\nH=[\n");
    for (ii=0; ii<3; ii++)
        printf("%e\t%e\t%e;\n", H[ii][0]/H[2][2], H[ii][1]/H[2][2], H[ii][2]/H[2][2]);
    printf("]\nHinv=[\n");

    for (ii=0; ii<3; ii++)
        printf("%e\t%e\t%e;\n", Hinv[ii][0]/Hinv[2][2], Hinv[ii][1]/Hinv[2][2], Hinv[ii][2]/Hinv[2][2]);

    printf("]\n\n");
}
/******************* TO DO 4 *********************
 * AccumulateBlend:
 *	INPUT:
 *		img: a new image to be added to acc
 *		acc: portion of the accumulated image where img is to be added
 *		M: translation matrix for calculating a bounding box
 *		blendWidth: width of the blending function (horizontal hat function;
 *	    try other blending functions for extra credit)
 *	OUTPUT:
 *		add a weighted copy of img to the subimage specified in acc
 *		the first 3 band of acc records the weighted sum of pixel colors
 *		the fourth band of acc records the sum of weight
 */
static void AccumulateBlend(CByteImage& img, CFloatImage& acc, CTransform3x3 M, float blendWidth)
{
    /* Compute the bounding box of the image of the image */
    int bb_min_x, bb_min_y, bb_max_x, bb_max_y;
    ImageBoundingBox(img, M, bb_min_x, bb_min_y, bb_max_x, bb_max_y);

	int imgWidth = img.Shape().width;
	int imgHeight = img.Shape().height;

    CTransform3x3 Minv = M.Inverse();

    for (int y = bb_min_y; y <= bb_max_y; y++) {
        for (int x = bb_min_x; x < bb_max_x; x++) {
            /* Check bounds in destination */
            if (x < 0 || x >= acc.Shape().width || 
                y < 0 || y >= acc.Shape().height)
                continue;

            /* Compute source pixel and check bounds in source */
            CVector3 p_dest, p_src;
            p_dest[0] = x;
            p_dest[1] = y;
            p_dest[2] = 1.0;

            p_src = Minv * p_dest;

            float x_src = (float) (p_src[0] / p_src[2]);
            float y_src = (float) (p_src[1] / p_src[2]);

            if (x_src < 0.0 || x_src >= img.Shape().width - 1 ||
                y_src < 0.0 || y_src >= img.Shape().height - 1)
                continue;

            int xf = (int) floor(x_src);
            int yf = (int) floor(y_src);
            int xc = xf + 1;
            int yc = yf + 1;

            /* Skip black pixels */
            if (img.Pixel(xf, yf, 0) == 0x0 && 
                img.Pixel(xf, yf, 1) == 0x0 && 
                img.Pixel(xf, yf, 2) == 0x0)
                continue;

            if (img.Pixel(xc, yf, 0) == 0x0 && 
                img.Pixel(xc, yf, 1) == 0x0 && 
                img.Pixel(xc, yf, 2) == 0x0)
                continue;

            if (img.Pixel(xf, yc, 0) == 0x0 && 
                img.Pixel(xf, yc, 1) == 0x0 && 
                img.Pixel(xf, yc, 2) == 0x0)
                continue;

            if (img.Pixel(xc, yc, 0) == 0x0 && 
                img.Pixel(xc, yc, 1) == 0x0 && 
                img.Pixel(xc, yc, 2) == 0x0)
                continue;

            
            double weight = 1.0;

			// *** BEGIN TODO ***
			// set weight properly
			//(see mosaics lecture slide on "feathering") P455 on notebook
			//Q:How to find the invalid distance ? -> 
	
			double distance = ((imgWidth - x - 1)*(imgWidth - x - 1) + (imgHeight - y - 1)*(imgHeight - y - 1));
			if (distance < blendWidth)
			{
				weight = distance / blendWidth;
			}

            
			// *** END TODO ***	

			acc.Pixel(x, y, 0) += (float) (weight * img.PixelLerp(x_src, y_src, 0));
            acc.Pixel(x, y, 1) += (float) (weight * img.PixelLerp(x_src, y_src, 1));
            acc.Pixel(x, y, 2) += (float) (weight * img.PixelLerp(x_src, y_src, 2));
            acc.Pixel(x, y, 3) += (float) weight;
        }
    }
}
Esempio n. 5
0
/******************* TO DO *********************
* AccumulateBlend:
*	INPUT:
*		img: a new image to be added to acc
*		acc: portion of the accumulated image where img is to be added
*       M: the transformation mapping the input image 'img' into the output panorama 'acc'
*		blendWidth: width of the blending function (horizontal hat function;
*	    try other blending functions for extra credit)
*	OUTPUT:
*		add a weighted copy of img to the subimage specified in acc
*		the first 3 band of acc records the weighted sum of pixel colors
*		the fourth band of acc records the sum of weight
*/
static void AccumulateBlend(CByteImage& img, CFloatImage& acc, CTransform3x3 M, float blendWidth)
{
    // BEGIN TODO
    // Fill in this routine
	// get shape of acc and img
	CShape sh = img.Shape();
    int width = sh.width;
    int height = sh.height;
	CShape shacc = acc.Shape();
    int widthacc = shacc.width;
    int heightacc = shacc.height;
	
	// get the bounding box of img in acc
	int min_x, min_y, max_x, max_y;
	ImageBoundingBox(img, M, min_x, min_y, max_x, max_y);

	CVector3 p;
	double newx, newy;

	// Exposure Compensation
	double lumaScale = 1.0;
	double lumaAcc = 0.0;
	double lumaImg = 0.0;
	int cnt = 0;

	for (int ii = min_x; ii < max_x; ii++)
		for (int jj = min_y; jj < max_y; jj++)
		{
			// flag: current pixel black or not
			bool flag = false;
			p[0] = ii; p[1] = jj; p[2] = 1;
			p = M.Inverse() * p;
			newx = p[0] / p[2];
			newy = p[1] / p[2];
			// If in the overlapping region
			if (newx >=0 && newx < width && newy >=0 && newy < height)
			{
				if (acc.Pixel(ii,jj,0) == 0 &&
					acc.Pixel(ii,jj,1) == 0 &&
					acc.Pixel(ii,jj,2) == 0)
					flag = true;
				if (img.PixelLerp(newx,newy,0) == 0 &&
					img.PixelLerp(newx,newy,1) == 0 &&
					img.PixelLerp(newx,newy,2) == 0)
					flag = true;
				if (!flag)
				{
					// Compute Y using RGB (RGB -> YUV)
					lumaAcc = 0.299 * acc.Pixel(ii,jj,0) +
							   0.587 * acc.Pixel(ii,jj,1) +
							   0.114 * acc.Pixel(ii,jj,2);
					lumaImg = 0.299 * img.PixelLerp(newx,newy,0) +
							   0.587 * img.PixelLerp(newx,newy,1) +
							   0.114 * img.PixelLerp(newx,newy,2);
					
					if (lumaImg != 0)
					{
						double scale = lumaAcc / lumaImg;
						if (scale > 0.5 && scale < 2)
						{
							lumaScale += lumaAcc / lumaImg;
							cnt++;
						}
					}
				}
			}
		}

	if (cnt != 0)
		lumaScale = lumaScale / (double)cnt;
	else lumaScale = 1.0;

	// add every pixel in img to acc, feather the region withing blendwidth to the bounding box,
	// pure black pixels (caused by warping) are not added
	double weight;
	
	for (int ii = min_x; ii < max_x; ii++)
		for (int jj = min_y; jj < max_y; jj++)
		{
			p[0] = ii; p[1] = jj; p[2] = 1;
			p = M.Inverse() * p;
			newx = p[0] / p[2];
			newy = p[1] / p[2];
			if ((newx >= 0) && (newx < width-1) && (newy >= 0) && (newy < height-1))
			{
				weight = 1.0;
				if ( (ii >= min_x) && (ii < (min_x+blendWidth)) )
					weight = (ii-min_x) / blendWidth;
				if ( (ii <= max_x) && (ii > (max_x-blendWidth)) )
					weight = (max_x-ii) / blendWidth;
				if (img.Pixel(iround(newx),iround(newy),0) == 0 &&
					img.Pixel(iround(newx),iround(newy),1) == 0 &&
					img.Pixel(iround(newx),iround(newy),2) == 0)
					weight = 0.0;

				double LerpR = img.PixelLerp(newx, newy, 0);
				double LerpG = img.PixelLerp(newx, newy, 1);
				double LerpB = img.PixelLerp(newx, newy, 2);
				
				double r = LerpR*lumaScale > 255.0 ? 255.0 : LerpR*lumaScale;
				double g = LerpG*lumaScale > 255.0 ? 255.0 : LerpG*lumaScale;
				double b = LerpB*lumaScale > 255.0 ? 255.0 : LerpB*lumaScale;
				acc.Pixel(ii,jj,0) += r * weight;
				acc.Pixel(ii,jj,1) += g * weight;
				acc.Pixel(ii,jj,2) += b * weight;
				acc.Pixel(ii,jj,3) += weight;
			}
		}
	
	printf("AccumulateBlend\n"); 

    // END TODO
}
// Compute MOPs descriptors.
void ComputeMOPSDescriptors(CFloatImage &image, FeatureSet &features)
{
	CFloatImage grayImage=ConvertToGray(image);
	CFloatImage blurredImage;
	
	Convolve(grayImage, blurredImage, ConvolveKernel_7x7);

	CFloatImage postHomography = CFloatImage();
	CFloatImage gaussianImage = GetImageFromMatrix((float *)gaussian5x5Float, 5, 5);


	//first make the image invariant to changes in illumination by subtracting off the mean
	int grayHeight = grayImage.Shape().height;
	int grayWidth = grayImage.Shape().width;

	// now make this rotation invariant
    vector<Feature>::iterator featureIterator = features.begin();
    while (featureIterator != features.end()) {
		Feature &f = *featureIterator;

		CTransform3x3 scaleTransform = CTransform3x3();
		CTransform3x3 translationNegative;
		CTransform3x3 translationPositive;
		CTransform3x3 rotation;

		double scaleFactor = 41/8;
		scaleTransform[0][0] = scaleFactor;
		scaleTransform[1][1] = scaleFactor;

		translationNegative = translationNegative.Translation(f.x,f.y);
		translationPositive = translationPositive.Translation(-4, -4);
		rotation = rotation.Rotation(f.angleRadians * 180/ PI);
		
		CTransform3x3 finalTransformation = translationNegative * rotation * scaleTransform * translationPositive;
		//CFloatImage sample61x61Window = 
		//CFloatImage pixelWindow = GetXWindowAroundPixel(grayImage, f.x, f.y, 61);

		WarpGlobal(blurredImage, postHomography, finalTransformation, eWarpInterpLinear, 1.0f);

		//now we get the 41x41 box around the feature
		for(int row=0; row< 8; row++)
		{
			for(int col=0;col< 8;col++)
			{
				f.data.push_back(postHomography.Pixel(col, row, 0));
			}
		}
		/*
		// now we do the subsampling first round to reduce to a 20x20
		int imgSize = 41;
		subsample(&f, imgSize, gaussianImage);

		//second round of subsampling to get it to a 10x10
		imgSize = 20;
		subsample(&f, imgSize, gaussianImage);	

		imgSize = 10;
		CFloatImage img = featureToImage(f, imgSize, imgSize);
		CFloatImage blurredImg(img.Shape());
		Convolve(img, blurredImg, gaussianImage);
		featuresFromImage(&f,blurredImg,imgSize,imgSize);
		
		int count = 0;
		for(int y=0; y<imgSize; y++)
		{
			for(int x=0; x<imgSize; x++)
			{
				if(x == 3 || x == 7 || y == 3 || y == 7)
				{
					f.data.erase(f.data.begin() + count);
				}
				else
				{
					count++;
				}
			}
		}
		*/
		normalizeIntensities(&f, 8, 8);
		featureIterator++;

	}
}
Esempio n. 7
0
HOGFeatureExtractor::HOGFeatureExtractor(int nAngularBins, bool unsignedGradients, int cellSize):
_nAngularBins(nAngularBins),
_unsignedGradients(unsignedGradients),
_cellSize(cellSize)
{
    _kernelDx.ReAllocate(CShape(3, 1, 1), derivKvals, false, 1);
    _kernelDx.origin[0] = 1;

    _kernelDy.ReAllocate(CShape(1, 3, 1), derivKvals, false, 1);
    _kernelDy.origin[0] = 1;

    // For visualization
    // A set of patches representing the bin orientations. When drawing a hog cell 
    // we multiply each patch by the hog bin value and add all contributions up to 
    // form the visual representation of one cell. Full HOG is achieved by stacking 
    // the viz for individual cells horizontally and vertically.
    _oriMarkers.resize(_nAngularBins);
    const int ms = 11;
    CShape markerShape(ms, ms, 1);

    // First patch is a horizontal line
    _oriMarkers[0].ReAllocate(markerShape, true);
    _oriMarkers[0].ClearPixels();
    for(int i = 1; i < ms - 1; i++) _oriMarkers[0].Pixel(/*floor(*/ ms/2 /*)*/, i, 0) = 1;

#if 0 // debug
	std::cout << "DEBUG:" << __FILE__ << ":" << __LINE__ << std::endl;
	for(int i = 0; i < ms; i++) {
		for(int j = 0; j < ms; j++) {
			std::cout << _oriMarkers[0].Pixel(j, i, 0) << " ";
		}
		std::cout << std::endl;
	}
	std::cout << std::endl;

	char debugFName[2000];
	sprintf(debugFName, "/tmp/debug%03d.tga", 0);
	PRINT_EXPR(debugFName);
	WriteFile(_oriMarkers[0], debugFName);
#endif

	// The other patches are obtained by rotating the first one
	CTransform3x3 T = CTransform3x3::Translation((ms - 1) / 2.0, (ms - 1) / 2.0);
    for(int angBin = 1; angBin < _nAngularBins; angBin++) {
    	double theta;
    	if(unsignedGradients) theta = 180.0 * (double(angBin) / _nAngularBins);
    	else theta = 360.0 * (double(angBin) / _nAngularBins);
   		CTransform3x3 R  = T * CTransform3x3::Rotation(theta) * T.Inverse();

   		_oriMarkers[angBin].ReAllocate(markerShape, true);
   		_oriMarkers[angBin].ClearPixels();

		WarpGlobal(_oriMarkers[0], _oriMarkers[angBin], R, eWarpInterpLinear);

#if 0 // debug
		char debugFName[2000];
		sprintf(debugFName, "/tmp/debug%03d.tga", angBin);
		PRINT_EXPR(debugFName);
		WriteFile(_oriMarkers[angBin], debugFName);
#endif
    }
}
Esempio n. 8
0
//
// TODO 3: ComputeHomography()
//		Computes the homography H from the plane specified by "points" to the image plane,
//		and its inverse Hinv.
//		If the plane is the reference plane (isRefPlane == true), don't convert the
//		coordinate system to the plane. Only do this for polygon patches where
//		texture mapping is necessary.
//		Coordinate system conversion is to be implemented in a separate routine
//		ConvertToPlaneCoordinate.
//		For more detailed explaination, see
//		http://www.cs.cornell.edu/courses/cs4670/2012fa/projects/p4/homography.pdf.
//
void ComputeHomography(CTransform3x3 &H, CTransform3x3 &Hinv, const vector<SVMPoint> &points, vector<Vec3d> &basisPts, bool isRefPlane)
{
	int i,j;
	int numPoints = (int) points.size();
	printf("pts:%d",numPoints);
	assert( numPoints >= 4 );

	basisPts.clear();
	if (isRefPlane) // reference plane
	{
		printf("ref plane\n");
		for (i=0; i < numPoints; i++)
		{
			Vec3d tmp = Vec3d(points[i].X, points[i].Y, points[i].W); // was Z, not W
			basisPts.push_back(tmp);
		}
	} 
	else // arbitrary polygon
	{
			printf("polygon\n");
        double uScale, vScale; // unused in this function
		ConvertToPlaneCoordinate(points, basisPts, uScale, vScale);
	}

	// A: 2n x 9 matrix where n is the number of points on the plane
	//    as discussed in lecture
	int numRows = 2 * numPoints;
	const int numCols = 9;

	typedef Matrix<double, Dynamic, 9, RowMajor> MatrixType;
	MatrixType A = MatrixType::Zero(numRows, numCols);

	/******** BEGIN TODO ********/
	// fill in the entries of A 
//printf("TODO: svmmath.cpp:187\n"); 
//fl_message("TODO: svmmath.cpp:187\n");
	int n=0;
	for(j=0;j<numRows; j+=2){
		
		A(j,0)=basisPts[n][0]; //x1
		A(j,1)=basisPts[n][1]; //y1
		A(j,2)=1;
		A(j,3)=0;
		A(j,4)=0;
		A(j,5)=0;

		A(j,6)=-points[n].u*basisPts[n][0]; //-x1'*x1
		A(j,7)=-points[n].u*basisPts[n][1]; //-x1'*y1
		A(j,8)=-points[n].u; //-x1'

		//next row
		A(j+1,0)=0;
		A(j+1,1)=0;
		A(j+1,2)=0;
		A(j+1,3)=basisPts[n][0];
		A(j+1,4)=basisPts[n][1];
		A(j+1,5)=1;
		A(j+1,6)=-points[n].v*basisPts[n][0];
		A(j+1,7)=-points[n].v*basisPts[n][1];
		A(j+1,8)=-points[n].v;
		n++;

	}
	

	/******** END TODO ********/

	double eval, h[9];
	MinEig(A, eval, h);

	H[0][0] = h[0];
	H[0][1] = h[1];
	H[0][2] = h[2];

	H[1][0] = h[3];
	H[1][1] = h[4];
	H[1][2] = h[5];

	H[2][0] = h[6];
	H[2][1] = h[7];
	H[2][2] = h[8];

	// compute inverse of H
	if (H.Determinant() == 0)
		fl_alert("Computed homography matrix is uninvertible \n");
	else
		Hinv = H.Inverse();

	int ii;
	printf("\nH=[\n");
	for (ii=0; ii<3; ii++)
		printf("%e\t%e\t%e;\n", H[ii][0]/H[2][2], H[ii][1]/H[2][2], H[ii][2]/H[2][2]);
	printf("]\nHinv=[\n");

	for (ii=0; ii<3; ii++)
		printf("%e\t%e\t%e;\n", Hinv[ii][0]/Hinv[2][2], Hinv[ii][1]/Hinv[2][2], Hinv[ii][2]/Hinv[2][2]);

	printf("]\n\n");
}