Пример #1
0
cv::Mat SmartBlurFilter::computeBlur(cv::Mat inputMat, int threshold)
{
    cv::Mat blurMat;
    //  size base on picture size
    cv::GaussianBlur(inputMat, blurMat, cv::Size(BLUR_KERNEL, BLUR_KERNEL), 0);
    //cv::medianBlur(inputMat, blurMat, 5);
    //cv::blur(inputMat, blurMat, cv::Size(5,5));

    for (int j = PADDING; j <hsvMat.rows - PADDING; j++)
    {
        for (int i = PADDING; i < hsvMat.cols - PADDING; i++)
        {
            int h = hsvMat.at<cv::Vec3b>(j, i)[0];
            int s = hsvMat.at<cv::Vec3b>(j, i)[1];
            
            if (h > 90 && h < 130 &&  s > 30 && s < 180)
            {
                //  FACE
                cv::Rect rect(i-HALF_WINDOWSIZE, j-HALF_WINDOWSIZE, WINDOWSIZE, WINDOWSIZE);
                cv::Mat roiMat = inputMat(rect);
                float rms = rmsError(roiMat);
                float amt = getAmount(rms, threshold);
                inputMat.at<cv::Vec3b>(j,i) = lerpPixel(inputMat.at<cv::Vec3b>(j,i), blurMat.at<cv::Vec3b>(j,i), amt);
                
            }
        }
    }
    return inputMat;
}
Пример #2
0
// Modified Steepest Descent Minimiser
bool FitKernel::sdModMinimise(Array<double>& alpha, double randomMin, double randomMax)
{
	// Control variables
	double gradientDelta = 0.01;
	const int maxIterations = 100;
	const double factor = 0.50;
	Array<double> gradient(alpha.nItems()), tempAlpha(alpha.nItems());
	double currentRMSE, deltaRMSE, lambda = 1.0;
	bool converged = false;
	int n, i;

	// Loop over whole routine (up to maxSteps)
	int step = 0;
	do
	{
		// Get initial cost from supplied alpha
		double oldRMSE = rmsError(alpha);
		msg.print("Initial RMSE = %e\n", oldRMSE);

		// Check for RMSE being less than current tolerance
		if (oldRMSE < tolerance_)
		{
			msg.print("Current RMSE (%e) is below tolerance value (%e) so no point continuing minimisation.\n", oldRMSE, tolerance_);
			break;
		}

		// Randomise initial parameters to try and find better initial set?
		if ((modSDNRandomTrials_ > 0) && (step < modSDNRandomTrials_))
		{
			msg.print("Generating %i random values per alpha, within the range %e < alpha < %e.\n", modSDNRandomTrials_, randomMin, randomMax);
			for (n=0; n<alpha.nItems(); ++n)
			{
				tempAlpha = alpha;
				for (i=0; i<modSDNRandomTrials_; ++i)
				{
					tempAlpha[n] = UChromaMath::random() * (randomMax - randomMin) + randomMin;
					currentRMSE = rmsError(tempAlpha);
					if (currentRMSE < oldRMSE)
					{
						msg.print("Replaced current alpha with random parameters (deltaRMSE = %f).\n", currentRMSE - oldRMSE);
						oldRMSE = currentRMSE;
						alpha = tempAlpha;
					}
				}
			}
		}

		// Create initial gradient
		for (n=0; n<alpha.nItems(); ++n)
		{
			tempAlpha = alpha;
			tempAlpha[n] = (1.0+gradientDelta) * alpha[n];
			gradient[n] = rmsError(tempAlpha);
			tempAlpha[n] = (1.0-gradientDelta) * alpha[n];
			gradient[n] -= rmsError(tempAlpha);
		}
		gradient /= (2.0*gradientDelta);

		// Go!
		// Do some iterations
		do
		{
			++step;

			// Setup scaling factors for alpha
			double maxGrad = fabs(gradient[n]);
			for (n=0; n<alpha.nItems(); ++n) if (fabs(gradient[n]) > maxGrad) maxGrad = fabs(gradient[n]);
			gradient /= maxGrad;
			for (n=0; n<alpha.nItems(); ++n) if (fabs(gradient[n]) < 0.01) gradient[n] = 0.0;

			bool reduced = false;
			double lastDeltaRMSE = 1.0;
			int nSwaps = 0;
			bool foundPoint = false;
			for (i=0; i<maxIterations; ++i)
			{
				if ((fabs(lambda) < 1.0e-4) && (deltaRMSE > 0.0))
				{
					lambda = (lambda < 0.0 ? 1.0 : -1.0);
					if (++nSwaps > 2) break;
				}
				// Update alpha and get RMSE
				for (n=0; n<alpha.nItems(); ++n) tempAlpha[n] = alpha[n] - gradient[n]*lambda;
				currentRMSE = rmsError(tempAlpha);

				// Current RMSE higher than old RMSE?
				deltaRMSE = currentRMSE - oldRMSE;
				if (deltaRMSE > 0.0)
				{
					lambda *= factor;
					reduced = true;
	// 				printf("   -- %03i Reduce (deltaRMSE = %f), lambda = %f \n", i, deltaRMSE, lambda);
				}
				else if (!reduced)
				{
					// If we have made things worse, revert to the previous cost
					if (deltaRMSE > lastDeltaRMSE)
					{
						lambda *= factor;
	// 					printf("   -- %03i Worse delta (%f), so reverting to lambda = %f \n", i, deltaRMSE, lambda);
						foundPoint = true;
						break;
					}
					// First iteration, and we have reduced the RMSE, so double lambda and try again...
					lambda /= factor;
					lastDeltaRMSE = deltaRMSE;
	// 				printf("   -- %03i Incrse (deltaRMSE = %f), lambda = %f \n", i, deltaRMSE, lambda);
				}
				else
				{
					foundPoint = true;
					break;
				}
			}

			// Did we actually manage to reduce the RMSE?
			if (!foundPoint)
			{
				msg.print("Step %04i RMSE = %e (no better step found)\n", step, oldRMSE);
				break;
			}

			// Calculate new RMSE from current lambda
			for (n=0; n<alpha.nItems(); ++n) alpha[n] = alpha[n] - gradient[n]*lambda;
			currentRMSE = rmsError(alpha);
			deltaRMSE = currentRMSE - oldRMSE;

			// Check on convergence tolerance
			if (fabs(deltaRMSE) < tolerance_)
			{
				msg.print("Step %04i RMSE = %e (delta = %e) [CONVERGED, tolerance = %e]\n", step, currentRMSE, deltaRMSE, tolerance_);
				converged = true;
				break;
			}

			// Generate new gradient
			for (n=0; n<alpha.nItems(); ++n)
			{
				tempAlpha = alpha;
				tempAlpha[n] = (1.0+gradientDelta) * alpha[n];
				gradient[n] = rmsError(tempAlpha);
				tempAlpha[n] = (1.0-gradientDelta) * alpha[n];
				gradient[n] -= rmsError(tempAlpha);
			};
			gradient /= (2.0*gradientDelta);

			oldRMSE = currentRMSE;
			msg.print("Step %04i RMSE = %e (delta = %e)\n", step, oldRMSE, deltaRMSE);
		} while (step < maxSteps_);

		// Check for convergence
		if (converged) break;

	} while (step < maxSteps_);

	// Get final cost
	msg.print("Final RMSE = %e\n", rmsError(alpha));
	
	return true;
}