コード例 #1
0
void ImagePyramid::update() {
    if (sourceImage) {
        if (version != sourceImage->getVersion()) {
            layers.clear();
            Mat filteredImage = imageFilter->applyTo(sourceImage->getData());
            // TODO wenn maxscale <= 0.5 -> erstmal pyrdown auf bild (etc pp)
            for (size_t i = 0; i < octaveLayerCount; ++i) {
                double scaleFactor = pow(incrementalScaleFactor, i);
                Mat scaledImage;
                Size scaledImageSize(cvRound(filteredImage.cols * scaleFactor), cvRound(filteredImage.rows * scaleFactor));
                resize(filteredImage, scaledImage, scaledImageSize, 0, 0, cv::INTER_LINEAR);
                if (scaleFactor <= maxScaleFactor && scaleFactor >= minScaleFactor)
                    layers.push_back(make_shared<ImagePyramidLayer>(i, scaleFactor, layerFilter->applyTo(scaledImage)));
                Mat previousScaledImage = scaledImage;
                scaleFactor *= 0.5;
                for (size_t j = 1; scaleFactor >= minScaleFactor && previousScaledImage.cols > 1; ++j, scaleFactor *= 0.5) {
                    Mat downSampledImage;
                    pyrDown(previousScaledImage, scaledImage);
                    if (scaleFactor <= maxScaleFactor)
                        layers.push_back(make_shared<ImagePyramidLayer>(i + j * octaveLayerCount, scaleFactor, layerFilter->applyTo(scaledImage)));
                    previousScaledImage = scaledImage;
                }
            }
            std::sort(layers.begin(), layers.end(), [](const shared_ptr<ImagePyramidLayer>& a, const shared_ptr<ImagePyramidLayer>& b) {
                return a->getIndex() < b->getIndex();
            });
            if (!layers.empty())
                firstLayer = layers.front()->getIndex();
            version = sourceImage->getVersion();
        }
    } else if (sourcePyramid) {
        if (version != sourcePyramid->getVersion()) {
            incrementalScaleFactor = sourcePyramid->incrementalScaleFactor;
            layers.clear();
            for (const shared_ptr<ImagePyramidLayer>& layer : sourcePyramid->layers) {
                if (layer->getScaleFactor() > maxScaleFactor)
                    continue;
                if (layer->getScaleFactor() < minScaleFactor)
                    break;
                layers.push_back(make_shared<ImagePyramidLayer>(
                                     layer->getIndex(), layer->getScaleFactor(), layerFilter->applyTo(layer->getScaledImage())));
            }
            if (!layers.empty())
                firstLayer = layers.front()->getIndex();
            version = sourcePyramid->getVersion();
        }
    } else { // neither source pyramid nor source image are set, therefore the other parameters are missing, too
        Loggers->getLogger("ImageProcessing").warn("ImagePyramid: could not update because there is no source (image or pyramid)");
    }
}
コード例 #2
0
void ImagePyramidFeatureExtractor::init(const Mat& image) {
	clearLevels();
	Mat grayImage = image;
	if (grayImage.channels() > 1) {
		Mat temp;
		cvtColor(grayImage, temp, CV_BGR2GRAY);
		grayImage = temp;
	}
	Size minSize;
	minSize.height = max(featureSize.height, cvRound(minHeight * image.rows));
	minSize.width = max(featureSize.width, cvRound(minSize.height * featureSize.width / featureSize.height));
	Size maxSize;
	maxSize.height = min(image.rows, cvRound(maxHeight * image.rows));
	maxSize.width = min(image.cols, cvRound(maxSize.height * featureSize.width / featureSize.height));
	double factor = 1;
	for (int i = 0; ; ++i, factor *= scaleFactor) {
		Size scaledFeatureSize(cvRound(factor * featureSize.width), cvRound(factor * featureSize.height));
		if (scaledFeatureSize.width < minSize.width || scaledFeatureSize.height < minSize.height)
			continue;
		if (scaledFeatureSize.width > maxSize.width || scaledFeatureSize.height > maxSize.height)
			break;

		// All but the first scaled image use the previous scaled image as the base,
		// therefore the scaling itself adds more and more blur to the image
		// and because of that no additional guassian blur is applied.
		// When scaling the image down a lot, the bilinear interpolation would lead to some artefacts (higher frequencies),
		// therefore the first down-scaling is done using an area interpolation which produces much better results.
		// The bilinear interpolation is used for the following down-scalings because of speed and similar results as area.
		Mat scaledImage;
		Size scaledImageSize(cvRound(image.cols / factor), cvRound(image.rows / factor));
		if (levels.empty()) {
			firstLevel = i;
			resize(grayImage, scaledImage, scaledImageSize, 0, 0, cv::INTER_AREA);
		} else {
			resize(levels[levels.size() - 1]->getScaledImage(), scaledImage, scaledImageSize, 0, 0, cv::INTER_LINEAR);
		}
		initScale(scaledImage);
		levels.push_back(new PyramidLevel(factor, scaledImage));
	}
}
コード例 #3
0
QRectF ImageEditorScene::scaledImageRect() const {
	QRectF rect;
	rect.setTopLeft(m_pixmap->pos());
	rect.setSize(scaledImageSize());
	return rect;
}
コード例 #4
0
ファイル: cascadedetect.cpp プロジェクト: MarkBelmont/opencv
void CascadeClassifier::detectMultiScaleNoGrouping( const Mat& image, std::vector<Rect>& candidates,
                                                    std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
                                                    double scaleFactor, Size minObjectSize, Size maxObjectSize,
                                                    bool outputRejectLevels )
{
    candidates.clear();

    if (!maskGenerator.empty())
        maskGenerator->initializeMask(image);

    if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
        maxObjectSize = image.size();

    Mat grayImage = image;
    if( grayImage.channels() > 1 )
    {
        Mat temp;
        cvtColor(grayImage, temp, COLOR_BGR2GRAY);
        grayImage = temp;
    }

    Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);

    for( double factor = 1; ; factor *= scaleFactor )
    {
        Size originalWindowSize = getOriginalWindowSize();

        Size windowSize( cvRound(originalWindowSize.width*factor), cvRound(originalWindowSize.height*factor) );
        Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
        Size processingRectSize( scaledImageSize.width - originalWindowSize.width, scaledImageSize.height - originalWindowSize.height );

        if( processingRectSize.width <= 0 || processingRectSize.height <= 0 )
            break;
        if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
            break;
        if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
            continue;

        Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
        resize( grayImage, scaledImage, scaledImageSize, 0, 0, INTER_LINEAR );

        int yStep;
        if( getFeatureType() == cv::FeatureEvaluator::HOG )
        {
            yStep = 4;
        }
        else
        {
            yStep = factor > 2. ? 1 : 2;
        }

        int stripCount, stripSize;

        const int PTS_PER_THREAD = 1000;
        stripCount = ((processingRectSize.width/yStep)*(processingRectSize.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
        stripCount = std::min(std::max(stripCount, 1), 100);
        stripSize = (((processingRectSize.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep;

        if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
            rejectLevels, levelWeights, outputRejectLevels ) )
            break;
    }
}