// Compute Simple descriptors. void ComputeSimpleDescriptors(CFloatImage &image, FeatureSet &features) { //Create grayscale image used for Harris detection CFloatImage grayImage=ConvertToGray(image); vector<Feature>::iterator i = features.begin(); while (i != features.end()) { Feature &f = *i; //these fields should already be set in the computeFeatures function int x = f.x; int y = f.y; // now get the 5x5 window surrounding the feature and store them in the features for(int row=(y-2); row<=(y+2); row++) { for(int col=(x-2); col<=(x+2); col++) { //if the pixel is out of bounds, assume it is black if(row<0 || row>=grayImage.Shape().height || col<0 || col>=grayImage.Shape().width) { f.data.push_back(0.0); } else { f.data.push_back(grayImage.Pixel(col,row,0)); } } } printf("feature num %d\n", i->id); i++; } }
void CFeatureDrawer::DrawFadeFeaturesSet(FeatureSet& fadeFeatures, int modelType) { for (FeatureSet::iterator fi = fadeFeatures.begin(); fi != fadeFeatures.end(); ) { const float cols[] = {1.0f, 1.0f, 1.0f, fi->second}; if (modelType != MODELTYPE_3DO) { glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, cols); } // hack, sorting objects by distance would look better glAlphaFunc(GL_GREATER, fi->second / 2.0f); glColor4fv(cols); if (!DrawFeatureNow(fi->first, fi->second)) { fi = set_erase(fadeFeatures, fi); } else { ++fi; } } }
// Compute MOPs descriptors. void ComputeMOPSDescriptors(CFloatImage &image, FeatureSet &features) { CFloatImage grayImage=ConvertToGray(image); CFloatImage blurredImage; Convolve(grayImage, blurredImage, ConvolveKernel_7x7); CFloatImage postHomography = CFloatImage(); CFloatImage gaussianImage = GetImageFromMatrix((float *)gaussian5x5Float, 5, 5); //first make the image invariant to changes in illumination by subtracting off the mean int grayHeight = grayImage.Shape().height; int grayWidth = grayImage.Shape().width; // now make this rotation invariant vector<Feature>::iterator featureIterator = features.begin(); while (featureIterator != features.end()) { Feature &f = *featureIterator; CTransform3x3 scaleTransform = CTransform3x3(); CTransform3x3 translationNegative; CTransform3x3 translationPositive; CTransform3x3 rotation; double scaleFactor = 41/8; scaleTransform[0][0] = scaleFactor; scaleTransform[1][1] = scaleFactor; translationNegative = translationNegative.Translation(f.x,f.y); translationPositive = translationPositive.Translation(-4, -4); rotation = rotation.Rotation(f.angleRadians * 180/ PI); CTransform3x3 finalTransformation = translationNegative * rotation * scaleTransform * translationPositive; //CFloatImage sample61x61Window = //CFloatImage pixelWindow = GetXWindowAroundPixel(grayImage, f.x, f.y, 61); WarpGlobal(blurredImage, postHomography, finalTransformation, eWarpInterpLinear, 1.0f); //now we get the 41x41 box around the feature for(int row=0; row< 8; row++) { for(int col=0;col< 8;col++) { f.data.push_back(postHomography.Pixel(col, row, 0)); } } /* // now we do the subsampling first round to reduce to a 20x20 int imgSize = 41; subsample(&f, imgSize, gaussianImage); //second round of subsampling to get it to a 10x10 imgSize = 20; subsample(&f, imgSize, gaussianImage); imgSize = 10; CFloatImage img = featureToImage(f, imgSize, imgSize); CFloatImage blurredImg(img.Shape()); Convolve(img, blurredImg, gaussianImage); featuresFromImage(&f,blurredImg,imgSize,imgSize); int count = 0; for(int y=0; y<imgSize; y++) { for(int x=0; x<imgSize; x++) { if(x == 3 || x == 7 || y == 3 || y == 7) { f.data.erase(f.data.begin() + count); } else { count++; } } } */ normalizeIntensities(&f, 8, 8); featureIterator++; } }
// Compute MOPs descriptors. void ComputeMOPSDescriptors(CFloatImage &image, FeatureSet &features) { int w = image.Shape().width; // image width int h = image.Shape().height; // image height // Create grayscale image used for Harris detection CFloatImage grayImage=ConvertToGray(image); // Apply a 7x7 gaussian blur to the grayscale image CFloatImage blurImage(w,h,1); Convolve(grayImage, blurImage, ConvolveKernel_7x7); // Transform matrices CTransform3x3 xform; CTransform3x3 trans1; CTransform3x3 rotate; CTransform3x3 scale; CTransform3x3 trans2; // Declare additional variables float pxl; // pixel value double mean, sq_sum, stdev; // variables for normailizing data set // This image represents the window around the feature you need to compute to store as the feature descriptor const int windowSize = 8; CFloatImage destImage(windowSize, windowSize, 1); for (vector<Feature>::iterator i = features.begin(); i != features.end(); i++) { Feature &f = *i; // Compute the transform from each pixel in the 8x8 image to sample from the appropriate // pixels in the 40x40 rotated window surrounding the feature trans1 = CTransform3x3::Translation(f.x, f.y); // translate window to feature point rotate = CTransform3x3::Rotation(f.angleRadians * 180.0 / PI); // rotate window by angle scale = CTransform3x3::Scale(5.0); // scale window by 5 trans2 = CTransform3x3::Translation(-windowSize/2, -windowSize/2); // translate window to origin // transform resulting from combining above transforms xform = trans1*scale*rotate*trans2; //Call the Warp Global function to do the mapping WarpGlobal(blurImage, destImage, xform, eWarpInterpLinear); // Resize data field for a 8x8 square window f.data.resize(windowSize * windowSize); // Find mean of window mean = 0; for (int y = 0; y < windowSize; y++) { for (int x = 0; x < windowSize; x++) { pxl = destImage.Pixel(x, y, 0); f.data[y*windowSize + x] = pxl; mean += pxl/(windowSize*windowSize); } } // Find standard deviation of window sq_sum = 0; for (int k = 0; k < windowSize*windowSize; k++) { sq_sum += (mean - f.data[k]) * (mean - f.data[k]); } stdev = sqrt(sq_sum/(windowSize*windowSize)); // Normalize window to have 0 mean and unit variance by subtracting // by mean and dividing by standard deviation for (int k = 0; k < windowSize*windowSize; k++) { f.data[k] = (f.data[k]-mean)/stdev; } } }
// Compute Simple descriptors. void ComputeSimpleDescriptors(CFloatImage &image, FeatureSet &features) { // Create grayscale image used for Harris detection CFloatImage grayImage=ConvertToGray(image); int w = grayImage.Shape().width; // image width int h = grayImage.Shape().height; // image height // Declare additional variables int newX, newY; // (x,y) coordinate for pixel in 5x5 sample window int padType = 0; // select variable for what type of padding to use: , 0->zero, 1->edge, 2->reflect // Iterate through feature set and store simple descriptors for each feature into // corresponding feature for (vector<Feature>::iterator i = features.begin(); i != features.end(); i++) { Feature &f = *i; // Set angle to 0 since simple descriptors do not include orientation f.angleRadians = 0; // Resize data field for a 5x5 square window f.data.resize(5 * 5); // The descriptor is a 5x5 window of intensities sampled centered on the feature point for (int j = 0; j < 25; j++) { find5x5Index(f.x,f.y,j,&newX,&newY); if(grayImage.Shape().InBounds(newX, newY)) { f.data[j] = grayImage.Pixel(newX, newY, 0); } else { // Depending on value of padType, perform different types of border padding switch (padType) { case 1: // 1 -> replicate border values if (newX < 0) { newX = 0; } else if (newX >= w) { newX = w-1; } if (newY < 0) { newY = 0; } else if (newY >= h) { newY = h-1; } f.data[j] = grayImage.Pixel(newX, newY, 0); break; case 2: // 2 -> reflect border pixels if (newX < 0) { newX = -newX; } else if (newX >= w) { newX = w-(newX%w)-1; } if (newY < 0) { newY = -newY; } else if (newY >= h) { newY = h-(newY%h)-1; } f.data[j] = grayImage.Pixel(newX, newY, 0); break; default: // 0 -> zero padding f.data[j] = 0; break; } } } } }