Beispiel #1
0
KernelInit::KernelInit()
{
    static float k_11[2] = {0.5f, 0.5f};
    static float k_121[3] = {0.25f, 0.5f, 0.25f};
    static float k_14641[5] = {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f};
    static float k_8ptFP[8] = {-0.044734f, -0.059009f,  0.156544f,  0.449199f,
                                0.449199f,  0.156544f, -0.059009f, -0.044734f};
    // The following are derived as fix-point /256 fractions of the above:
    //  -12, -15, 40, 115
    static float k_8ptI [8] = {-0.04687500f, -0.05859375f,  0.15625000f,  0.44921875f,
                                0.44921875f,  0.15625000f, -0.05859375f, -0.04687500f};

    ConvolveKernel_121.ReAllocate(CShape(3, 1, 1), k_121, false, 3);
    ConvolveKernel_121.origin[0] = -1;
    ConvolveKernel_14641.ReAllocate(CShape(5, 1, 1), k_14641, false, 5);
    ConvolveKernel_14641.origin[0] = -2;
    ConvolveKernel_8tapLowPass.ReAllocate(CShape(8, 1, 1), k_8ptI, false, 8);
    ConvolveKernel_8tapLowPass.origin[0] = -4;
}
Beispiel #2
0
KernelInit::KernelInit()
{
    static float k_11[2] = {0.5f, 0.5f};
    static float k_121[3] = {0.25f, 0.5f, 0.25f};
    static float k_14641[5] = {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f};
    static float k_8ptFP[8] = {-0.044734f, -0.059009f,  0.156544f,  0.449199f,
                                0.449199f,  0.156544f, -0.059009f, -0.044734f};
    // The following are derived as fix-point /256 fractions of the above:
    //  -12, -15, 40, 115
    static float k_8ptI [8] = {-0.04687500f, -0.05859375f,  0.15625000f,  0.44921875f,
                                0.44921875f,  0.15625000f, -0.05859375f, -0.04687500f};

    static float k_7x7[49] = { 1.0, 4.0, 7.0, 10.0, 7.0, 4.0, 1.0, 
                               4.0, 12.0, 26.0, 33.0, 26.0, 12.0, 4.0,
                               7.0, 26.0, 55.0, 71.0, 55.0, 26.0, 7.0, 
                               10.0, 33.0, 71.0, 91.0, 71.0, 33.0, 10.0, 
                               7.0, 26.0, 55.0, 71.0, 55.0, 26.0, 7.0, 
                               4.0, 12.0, 26.0, 33.0, 26.0, 12.0, 4.0,
                               1.0, 4.0, 7.0, 10.0, 7.0, 4.0, 1.0 };

    for (int i = 0; i < 49; i++) {
        k_7x7[i] /= 1115.0;
    }

    ConvolveKernel_121.ReAllocate(CShape(3, 1, 1), k_121, false, 3);
    ConvolveKernel_121.origin[0] = 1;
    ConvolveKernel_14641.ReAllocate(CShape(5, 1, 1), k_14641, false, 5);
    ConvolveKernel_14641.origin[0] = 2;
    ConvolveKernel_8tapLowPass.ReAllocate(CShape(8, 1, 1), k_8ptI, false, 8);
    ConvolveKernel_8tapLowPass.origin[0] = 4;
    ConvolveKernel_7x7.ReAllocate(CShape(7, 7, 1), k_7x7, false, 7);

    /* Sobel filters */
    static float k_SobelX[9] = { -1, 0, 1,
                                 -2, 0, 2,
                                 -1, 0, 1 };

    static float k_SobelY[9] = { -1, -2, -1,
                                  0,  0,  0,
                                  1,  2,  1 };    
            
    ConvolveKernel_SobelX.ReAllocate(CShape(3, 3, 1), k_SobelX, false, 3);
    ConvolveKernel_SobelX.origin[0] = 1;
    ConvolveKernel_SobelX.origin[1] = 1;
    ConvolveKernel_SobelY.ReAllocate(CShape(3, 3, 1), k_SobelY, false, 3);
    ConvolveKernel_SobelY.origin[0] = 1;
    ConvolveKernel_SobelY.origin[1] = 1;
}
void
SupportVectorMachine::predictSlidingWindow(const Feature &feat, CFloatImage &response) const
{
    response.ReAllocate(CShape(feat.Shape().width, feat.Shape().height, 1));
    response.ClearPixels();

    /******** BEGIN TODO ********/
    // Sliding window prediction.
    //
    // In this project we are using a linear SVM. This means that
    // it's classification function is very simple, consisting of a
    // dot product of the feature vector with a set of weights learned
    // during training, followed by a subtraction of a bias term
    //
    //          pred <- dot(feat, weights) - bias term
    //
    // Now this is very simple to compute when we are dealing with
    // cropped images, our computed features have the same dimensions
    // as the SVM weights. Things get a little more tricky when you
    // want to evaluate this function over all possible subwindows of
    // a larger feature, one that we would get by running our feature
    // extraction on an entire image.
    //
    // Here you will evaluate the above expression by breaking
    // the dot product into a series of convolutions (remember that
    // a convolution can be though of as a point wise dot product with
    // the convolution kernel), each one with a different band.
    //
    // Convolve each band of the SVM weights with the corresponding
    // band in feat, and add the resulting score image. The final
    // step is to subtract the SVM bias term given by this->getBiasTerm().
    //
    // Hint: you might need to set the origin for the convolution kernel
    // in order to get the result from convoltion to be correctly centered
    //
    // Useful functions:
    // Convolve, BandSelect, this->getWeights(), this->getBiasTerm()

Feature weights = this->getWeights();
	int nWtBands = weights.Shape().nBands;
	
	// Set the center of the window as the origin for the conv. kernel
	for (int band = 0; band < nWtBands; band++)
	{
		// Select a band
		CFloatImage featBand;
		CFloatImage weightBand;
		BandSelect(feat, featBand, band, 0);
		BandSelect(weights, weightBand, band, 0);

		// Set the origin of the kernel
		weightBand.origin[0] = weights.Shape().width / 2;
		weightBand.origin[1] = weights.Shape().height / 2;
		
		// Compute the dot product
		CFloatImage dotproduct;
		dotproduct.ClearPixels();
		Convolve(featBand, dotproduct, weightBand);

		// Add the resulting score image
		for (int y = 0; y < feat.Shape().height; y++)
		{
			for (int x = 0; x < feat.Shape().width; x++)
			{
				response.Pixel(x, y, 0) += dotproduct.Pixel(x, y, 0);
			}
			// End of x loop
		}
		// End of y loop
	}
	// End of band loop
	
	// Substract the SVM bias term
	for (int y = 0; y < feat.Shape().height; y++)
	{
		for (int x = 0; x < feat.Shape().width; x++)
		{
			response.Pixel(x, y, 0) -= this->getBiasTerm();
		}
		// End of x loop
	}
	// End of y loop

    /******** END TODO ********/
}