Beispiel #1
0
void imageHist(const Mat& input, Mat& histVector){
    Mat input_tmp;
    unsigned char *input_data;
    float *hist_data;
    Size inputSize;
    // To check data is float or uchar
    if(input.type() == CV_32FC(1)){
        //printf("CV_32FC1 Dectect in imageHist!!\n");
        input.convertTo(input_tmp, CV_8UC(1), 255.0);
    }
    else if(input.type() == CV_8UC(1)){
        //printf("CV_8UC1 Dectect in imageHist!!\n");
        input_tmp = input;
    }
    else{
        printf("Error Type in imageHist!!!\n");
        exit(0);
    }
    input_data = input_tmp.data; // get input data
    inputSize = input_tmp.size(); // get input size
    histVector = Mat::zeros(1, 256, CV_32FC(1)); // get a zero mat
    //printf("%d\n", histVector.type()==CV_32FC(1));
    hist_data = (float*)histVector.data;
    // calculat histtgram
    for(int i=0; i<inputSize.width * inputSize.height; i++){
        int tmp = (int)input_data[i];
        hist_data[tmp]++;
    }
    return;
}
Beispiel #2
0
static NODE_IMPLEMENTATION(resize, Pointer)
{
    MuLangContext* context = static_cast<MuLangContext*>(NODE_THREAD.context());
    const Class*   c       = static_cast<const ImageType*>(NODE_THIS.type());
    ClassInstance* inObj   = NODE_ARG_OBJECT(0, ClassInstance);
    int            width   = NODE_ARG(1, int);
    int            height  = NODE_ARG(2, int);
    ClassInstance* outObj  = makeImage(context, c, width, height);
    ImageStruct*   inIm    = inObj->data<ImageStruct>();
    ImageStruct*   outIm   = outObj->data<ImageStruct>();

    CvMat inMat;
    CvMat outMat;

    cvInitMatHeader(&inMat,
                    inIm->height,
                    inIm->width,
                    CV_32FC(4),
                    inIm->data->data<float>(),
                    0);

    cvInitMatHeader(&outMat,
                    outIm->height,
                    outIm->width,
                    CV_32FC(4),
                    outIm->data->data<float>(),
                    0);

    cvResize(&inMat, &outMat, CV_INTER_AREA);

    NODE_RETURN(outObj);
}
Beispiel #3
0
Surf::Surf(Size size)
{
	W = size.width;
	H = size.height;
	C = 0;
	step = 0;
	grad = Mat(H, W, CV_32FC(L));
	zero = Mat(1, L, CV_32FC1);
	flag = Mat(H, W, CV_32SC1);
	hist = Mat(H, W, CV_32FC(L));
	norm = Mat(H, W, CV_32FC1);
}
Beispiel #4
0
int main(int argc, char *argv[])
{
	clock_t start, end;

	start = clock();
    //Load the image from the args
    cv::Mat image = cv::imread(argv[1]);
    HOGFeatures(image).convertTo(image, CV_32FC(32));

    cv::Mat myTemplate = cv::imread(argv[2]);
	HOGFeatures(myTemplate).convertTo(myTemplate, CV_32FC(32));

    cv::Mat output;

    cv::matchTemplate(image, myTemplate, output, CV_TM_CCORR);
	 

	float minNum = 1000000000;
	float maxNum = -1000000000;

	for(int row = 0; row < output.rows; row++)
    {
        float* rowPtr = output.ptr<float>(row);

        for(int col = 0; col < output.cols; col++)
        {
            if(rowPtr[col] < minNum){
                minNum = rowPtr[col];
            }
            if(rowPtr[col] > maxNum){
                maxNum = rowPtr[col];
            }
        }
    }

    output -= minNum;
    output *= (1 / (maxNum - minNum));

    resize(output, output, cv::Size(), 8, 8);
	
	end = clock();

	std::cout << "Time required for execution: " << (double)(end-start)/CLOCKS_PER_SEC << " seconds." << std::endl;

    // show the window
    cv::imshow("output", output);
    cv::waitKey(0);

    // CV_64FC(32) == cv::DataType<cv::Vec<double, 32> >::type
    // CV_32FC(32) == cv::DataType<cv::Vec<float, 32> >::type
}
Mat* convertToMat(const float* data, int height, int width, int channels,
		int misalign){
    Mat* output = new Mat(Size_<int>(width, height), CV_32FC(channels));

    int x, y, c;

    if (channels == 1) {
	for(c=0; c < channels; c++)
	    for(y=0;y<height;y++)
		for(x=0;x<width;x++)
		    output->at< Vec<float, 1> >(y, x)[c] = 
			data[x*height+y + c*height*width];

    }else if (channels == 2) {
	for(c=0; c < channels; c++)
	    for(y=0;y<height;y++)
		for(x=0;x<width;x++)
		    output->at< Vec<float, 2> >(y, x)[c] =
			data[x*height+y + c*height*width];

    }else if (channels == 3) {
	for(c=0; c < channels; c++)
	    for(y=0;y<height;y++)
		for(x=0;x<width;x++)
		    output->at< Vec<float, 3> >(y, x)[c] =
			data[x*height+y + c*height*width];
    }
    
    return output;
};
Beispiel #6
0
void addGaussNoise(cv::Mat& image, double sigma)
{
    cv::Mat noise(image.size(), CV_32FC(image.channels()));
    cvtest::TS::ptr()->get_rng().fill(noise, cv::RNG::NORMAL, 0.0, sigma);

    cv::addWeighted(image, 1.0, noise, 1.0, 0.0, image, image.depth());
}
void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj)
{
    CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
    CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

    Mat traj;
    _traj.getMat().convertTo(traj, CV_64F);
    const Affine3d* dpath = traj.ptr<Affine3d>();
    size_t total = traj.total();

    points = vtkSmartPointer<vtkPoints>::New();
    points->SetDataType(VTK_DOUBLE);
    points->SetNumberOfPoints((vtkIdType)total);

    tensors = vtkSmartPointer<vtkDoubleArray>::New();
    tensors->SetNumberOfComponents(9);
    tensors->SetNumberOfTuples((vtkIdType)total);

    for(size_t i = 0; i < total; ++i, ++dpath)
    {
        Matx33d R = dpath->rotation().t();  // transposed because of
        tensors->SetTuple((vtkIdType)i, R.val);        // column major order

        Vec3d p = dpath->translation();
        points->SetPoint((vtkIdType)i, p.val);
    }
}
void OpenCVPicture::loadDataWithoutScalingRemoveMeanColor(int flags) {
    cv::Mat temp = cv::imread(filename, flags);
    if (temp.empty()) {
        std::cout << "Error : Image " << filename << " cannot be loaded..."
                  << std::endl;
        exit(EXIT_FAILURE);
    }
    std::vector<float> meanColor(temp.channels());
    for (int i = 0; i < temp.channels(); ++i) {
        for (int y = 0; y < temp.rows; y++) {
            for (int x = 0; x < temp.cols; x++) {
                int c = temp.ptr()[i + x * temp.channels() +
                                   y * temp.channels() * temp.cols];
                meanColor[i] += c;
            }
        }
    }
    for (int i = 0; i < temp.channels(); ++i)
        meanColor[i] /= temp.rows * temp.cols;
    temp.convertTo(mat, CV_32FC(temp.channels()));
    float *matData = ((float *)(mat.data));
    for (int i = 0; i < mat.channels(); ++i)
        for (int y = 0; y < temp.rows; y++)
            for (int x = 0; x < temp.cols; x++)
                // matData[i + x * mat.channels() + y * mat.channels() * mat.cols] -=
                //     meanColor[i];
                matData[i + x * mat.channels() + y * mat.channels() * mat.cols] =
                    128 +
                    (matData[i + x * mat.channels() + y * mat.channels() * mat.cols] -
                     meanColor[i]) /
                    2;
    backgroundColor = 128;
    xOffset = -mat.cols / 2;
    yOffset = -mat.rows / 2;
}
OpenCVPicture::OpenCVPicture(int xSize, int ySize, int nInputFeatures,
                             unsigned char backgroundColor, int label)
    : Picture(label), backgroundColor(backgroundColor) {
  xOffset = -xSize / 2;
  yOffset = -ySize / 2;
  mat.create(xSize, ySize, CV_32FC(nInputFeatures));
}
void Detector::TrainDetector(const Mat& trainSample, const Mat& trainLabel, 
	const int* featureSizes, const CvSVMParams& params) {
	this->train(trainSample, trainLabel, Mat(), Mat(), params);
	Mat tem1 = TransSV2Detector();
	Mat tmp2(featureSizes[0], featureSizes[1], CV_32FC(featureSizes[2]), tem1.data);
	tmp2.copyTo(detector);
}
cv::Mat PhotometricStereo::readCalibratedLights() {
    
    cv::Mat lightsInv = cv::Mat(height, width, CV_32FC(24), cv::Scalar::all(0));
    
    std::stringstream lmp;
    lmp << PATH_ASSETS << "lightMat.kaw";
    
    FILE *kawFile = fopen(lmp.str().c_str(), "rb");
    if (kawFile == NULL) {
        std::cerr << "ERROR: Could not open calibrated light matrix." << std::endl;
        return lightsInv;
    }
    
    /* get file size */
    long fSize;
    size_t res;
    fseek(kawFile, 0, SEEK_END);
    fSize = ftell(kawFile);
    rewind(kawFile);
    
    /* reading data */
    res = fread(lightsInv.data, 1, sizeof(float)*height*width*lightsInv.channels(), kawFile);
    if (res != fSize) {
        std::cerr << "ERROR: Error while reading calibrated light matrix in" << std::endl;
    }
    fclose(kawFile);
    
    return lightsInv;
}
void OpenCVPicture::loadDataWithoutScaling(int flags) {
    if (!rawData.empty()) {
        cv::Mat temp = cv::imdecode(rawData, flags);
        temp.convertTo(mat, CV_32FC(temp.channels()));
    } else {
        cv::Mat temp = cv::imread(filename, flags);
        if (temp.empty()) {
            std::cout << "Error : Image " << filename << " cannot be loaded..."
                      << std::endl;
            exit(EXIT_FAILURE);
        }
        temp.convertTo(mat, CV_32FC(temp.channels()));
    }
    xOffset = -mat.cols / 2;
    yOffset = -mat.rows / 2;
}
void PixelStabilizer::init(const cv::Mat& frame)
{
    int width = frame.cols;
    int height = frame.rows;

    // compute derivative of reference image
    cv::Mat frame_blurred, frame_blurred0;
    cv::Mat gradIx, gradIy;
    cv::GaussianBlur(frame, frame_blurred0, cv::Size(5, 5), 1.);
    frame_blurred0.convertTo(frame_blurred, CV_32F);
    cv::Sobel(frame_blurred, gradIx, CV_32F, 1, 0);
    cv::Sobel(frame_blurred, gradIy, CV_32F, 0, 1);

    // compute A
    float one_over_N = 1./(width * height);
    Matx88f A = 0;
    Ji_ = cv::Mat::zeros(height, width, CV_32FC(8));
    for (int i=0; i<height; ++i) {
        for (int j=0; j<width; ++j) {
            float x = static_cast<float> (j);
            float y = static_cast<float> (i);
            cv::Matx<float, 2, 8> Jx = {x,   y,   1.f, 0.f, 0.f, 0.f, -x*x, -x*y,
                                        0.f, 0.f, 0.f, x,   y,   1.f, -x*y, -y*y};
            cv::Vec2f gradI (gradIx.at<float>(i, j), gradIy.at<float>(i, j));

            A += one_over_N * Jx.t() * gradI * gradI.t() * Jx;
            Ji_.at<Vec8f>(i, j) =  Jx.t() * gradI;
        }
    }

    Ainv_ = A.inv();
}
Beispiel #14
0
/**
* Convert gdal type to opencv type
*/
int KGDAL2CV::gdal2opencv(const GDALDataType& gdalType, const int& channels){

	switch (gdalType){

		/// UInt8
	case GDT_Byte:
		if (channels == 1){ return CV_8UC1; }
		if (channels == 3){ return CV_8UC3; }
		if (channels == 4){ return CV_8UC4; }
		else { return CV_8UC(channels); }
		return -1;

		/// UInt16
	case GDT_UInt16:
		if (channels == 1){ return CV_16UC1; }
		if (channels == 3){ return CV_16UC3; }
		if (channels == 4){ return CV_16UC4; }
		else { return CV_16UC(channels); }
		return -1;

		/// Int16
	case GDT_Int16:
		if (channels == 1){ return CV_16SC1; }
		if (channels == 3){ return CV_16SC3; }
		if (channels == 4){ return CV_16SC4; }
		else { return CV_16SC(channels); }
		return -1;

		/// UInt32
	case GDT_UInt32:
	case GDT_Int32:
		if (channels == 1){ return CV_32SC1; }
		if (channels == 3){ return CV_32SC3; }
		if (channels == 4){ return CV_32SC4; }
		else { return CV_32SC(channels); }
		return -1;

	case GDT_Float32:
		if (channels == 1){ return CV_32FC1; }
		if (channels == 3){ return CV_32FC3; }
		if (channels == 4){ return CV_32FC4; }
		else { return CV_32FC(channels); }
		return -1;

	case GDT_Float64:
		if (channels == 1){ return CV_64FC1; }
		if (channels == 3){ return CV_64FC3; }
		if (channels == 4){ return CV_64FC4; }
		else { return CV_64FC(channels); }
		return -1;

	default:
		std::cout << "Unknown GDAL Data Type" << std::endl;
		std::cout << "Type: " << GDALGetDataTypeName(gdalType) << std::endl;
		return -1;
	}

	return -1;
}
Mat IntegralChannelHistogramFilter::applyTo(const Mat& image, Mat& filtered) const {
	if (image.depth() != CV_32S)
		throw invalid_argument("IntegralChannelHistogramFilter: the image must have a depth of CV_32S");

	unsigned int bins = image.channels();
	double shift = 1.0 - overlap;
	double blockWidth = (image.cols - 1) / (1 + (blockColumns - 1) * shift);
	double blockHeight = (image.rows - 1) / (1 + (blockRows - 1) * shift);
	filtered.create(blockRows, blockColumns, CV_32FC(bins));
	float* histogramValues = filtered.ptr<float>();
	float factor = 1.f / 255.f;
	for (unsigned int i = 0; i < blockRows; ++i) {
		for (unsigned int j = 0; j < blockColumns; ++j) {
			int top = cvRound(i * shift * blockHeight);
			int bottom = cvRound(i * shift * blockHeight + blockHeight);
			int left = cvRound(j * shift * blockWidth);
			int right = cvRound(j * shift * blockWidth + blockWidth);
			int* tl = reinterpret_cast<int*>(image.data + top * image.step[0] + left * image.step[1]);
			int* tr = reinterpret_cast<int*>(image.data + top * image.step[0] + right * image.step[1]);
			int* bl = reinterpret_cast<int*>(image.data + bottom * image.step[0] + left * image.step[1]);
			int* br = reinterpret_cast<int*>(image.data + bottom * image.step[0] + right * image.step[1]);
			float l1norm = eps;
			float l2normSquared = eps * eps;
			for (unsigned int b = 0; b < bins; ++b) {
				histogramValues[b] = factor * (tl[b] + br[b] - tr[b] - bl[b]);
				l1norm += histogramValues[b];
				l2normSquared += histogramValues[b] * histogramValues[b];
			}
			if (normalization == Normalization::L2HYS) {
				float normalizer = 1.f / sqrt(l2normSquared);
				l2normSquared = eps * eps;
				for (unsigned int b = 0; b < bins; ++b) {
					histogramValues[b] = std::min(0.2f, normalizer * histogramValues[b]);
					l2normSquared += histogramValues[b] * histogramValues[b];
				}
				normalizer = 1.f / sqrt(l2normSquared);
				for (unsigned int b = 0; b < bins; ++b)
					histogramValues[b] *= normalizer;
			} else {
				float normalizer = 1;
				if (normalization == Normalization::L2NORM)
					normalizer = 1.f / sqrt(l2normSquared);
				else // normalization == Normalization::L1NORM || normalization == Normalization::L1SQRT
					normalizer = 1.f / l1norm;
				if (normalization == Normalization::L1SQRT) {
					for (unsigned int b = 0; b < bins; ++b)
						histogramValues[b] = sqrt(normalizer * histogramValues[b]);
				} else {
					for (unsigned int b = 0; b < bins; ++b)
						histogramValues[b] *= normalizer;
				}
			}
			histogramValues += bins;
		}
	}
	return filtered;
}
Beispiel #16
0
TEST(blobFromImage, allocated)
{
    int size[] = {1, 3, 4, 5};
    Mat img(size[2], size[3], CV_32FC(size[1]));
    Mat blob(4, size, CV_32F);
    void* blobData = blob.data;
    dnn::blobFromImage(img, blob, 1.0 / 255, Size(), Scalar(), false, false);
    ASSERT_EQ(blobData, blob.data);
}
Mat CompleteExtendedHogFilter::applyTo(const Mat& image, Mat& filtered) const {
	size_t cellRowCount = image.rows / cellSize;
	size_t cellColumnCount = image.cols / cellSize;
	size_t descriptorSize = binCount + (signedGradients && unsignedGradients ? binCount / 2 : 0) + 4;
	filtered = Mat::zeros(cellRowCount, cellColumnCount, CV_32FC(descriptorSize));
	buildInitialHistograms(filtered, image, cellRowCount, cellColumnCount);
	buildDescriptors(filtered, cellRowCount, cellColumnCount, descriptorSize);
	return filtered;
}
Beispiel #18
0
void unsharpFiltering(const Mat& input, const Mat& boxMask, float scale, Mat& output, Mat& scaledUnsharp, Mat& blurredInput){
    Mat input_tmp;
    float* input_data;
    float* blur_data;
    float* scaled_data;
    float* output_data;
    Size inputSize;
    double t = (double)getTickCount();
    // To check the input data type is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in unsharpFiltering!!!\n");
        exit(0);
    }
    // blur the input
    spatialFiltering(input_tmp, boxMask, blurredInput);
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaledUnsharp.create(inputSize, CV_32FC(1));
    input_data = (float*)input_tmp.data;
    blur_data = (float*)blurredInput.data;
    output_data = (float*)output.data;
    scaled_data = (float*)scaledUnsharp.data;
    // start doing unsharp
    for(int i=0; i<inputSize.width*inputSize.height; i++){
        float buf;
        scaled_data[i] = scale * (input_data[i] - blur_data[i]);// get scaledUnsharp
        buf = input_data[i] + scaled_data[i];
        if(buf > 255) // pix > 255
            buf = 255;
        else if(buf < 0)// pix < 0
            buf = 0;
        output_data[i] = buf; //write output data
    }
    t = ((double)getTickCount() - t)*1000/getTickFrequency();
    printf("unsharpFiltering total consume %gms\n", t);// get the processing time
    return;
}
Mat* convertToMatChannel(const float* data, int height, int width, int channels,
		int channel, int misalign){
    Mat* output = new Mat(Size_<int>(width, height), CV_32FC(1));

    int x, y, c = channel;
    for(y=0;y<height;y++)
	for(x=0;x<width;x++)
	    output->at< float >(y, x) = data[x*height + y + c*height*width];

    return output;
};
Beispiel #20
0
    //! re-initiaization method
    void initialize(Size _frameSize, int _frameType)
    {
        frameSize = _frameSize;
        frameType = _frameType;
        nframes = 0;

        int nchannels = CV_MAT_CN(frameType);
        CV_Assert( nchannels <= CV_CN_MAX );
        CV_Assert( nmixtures <= 255);

#ifdef HAVE_OPENCL
        if (ocl::isOpenCLActivated() && opencl_ON)
        {
            create_ocl_apply_kernel();

            bool isFloat = CV_MAKETYPE(CV_32F,nchannels) == frameType;
            kernel_getBg.create("getBackgroundImage2_kernel", ocl::video::bgfg_mog2_oclsrc, format( "-D CN=%d -D FL=%d -D NMIXTURES=%d", nchannels, isFloat, nmixtures));

            if (kernel_apply.empty() || kernel_getBg.empty())
                opencl_ON = false;
        }
        else opencl_ON = false;

        if (opencl_ON)
        {
            u_weight.create(frameSize.height * nmixtures, frameSize.width, CV_32FC1);
            u_weight.setTo(Scalar::all(0));

            u_variance.create(frameSize.height * nmixtures, frameSize.width, CV_32FC1);
            u_variance.setTo(Scalar::all(0));

            if (nchannels==3)
                nchannels=4;
            u_mean.create(frameSize.height * nmixtures, frameSize.width, CV_32FC(nchannels)); //4 channels
            u_mean.setTo(Scalar::all(0));

            //make the array for keeping track of the used modes per pixel - all zeros at start
            u_bgmodelUsedModes.create(frameSize, CV_8UC1);
            u_bgmodelUsedModes.setTo(cv::Scalar::all(0));
        }
        else
#endif
        {
            // for each gaussian mixture of each pixel bg model we store ...
            // the mixture weight (w),
            // the mean (nchannels values) and
            // the covariance
            bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
            //make the array for keeping track of the used modes per pixel - all zeros at start
            bgmodelUsedModes.create(frameSize,CV_8U);
            bgmodelUsedModes = Scalar::all(0);
        }
    }
Beispiel #21
0
void laplacianFiltering(const Mat& input, const Mat& laplacianMask, float scale, Mat& output, Mat& scaledLaplacian){
    Mat input_tmp,tmp;
    float* input_data;
    float* output_data;
    float* scaleLap_data;
    double t = (double)getTickCount();
    Size inputSize;
    spatialFiltering(input, laplacianMask, scaledLaplacian); //get scaledLaplacian(haven't multiple scale)
    // To check input data is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in laplacianFiltering!!!\n");
        exit(0);
    }
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaleLap_data = (float*)scaledLaplacian.data;
    input_data = (float*)input_tmp.data;
    output_data = (float*)output.data;
    // start doing laplace transform
    for(int p=0; p<inputSize.width*inputSize.height; p++){
        float buf;
        scaleLap_data[p] = scaleLap_data[p]*scale;//multiple scale
        buf = input_data[p] + scaleLap_data[p]; // add to origin input
        if(buf>1) // if value > 1
            buf = 1;
        else if(buf<0)// if value <0
            buf = 0;
        output_data[p] = buf;
    }
    t = (double)getTickCount()-t;
    printf("Laplacian total consume %gms\n", t*1000/getTickFrequency());// get processing time
    return;
}
Beispiel #22
0
void cv::viz::writeTrajectory(InputArray _traj, const String& files_format, int start, const String& tag)
{
    if (_traj.kind() == _InputArray::STD_VECTOR_MAT)
    {
#if CV_MAJOR_VERSION < 3
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.obj;
#else
        std::vector<Mat>& v = *(std::vector<Mat>*)_traj.getObj();
#endif

        for(size_t i = 0, index = max(0, start); i < v.size(); ++i, ++index)
        {
            Affine3d affine;
            Mat pose = v[i];
            CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16));
            pose.copyTo(affine.matrix);
            writePose(cv::format(files_format.c_str(), index), affine, tag);
        }
        return;
    }

    if (_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT)
    {
        CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));

        Mat traj = _traj.getMat();

        if (traj.depth() == CV_32F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3f>((int)i), tag);

        if (traj.depth() == CV_64F)
            for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index)
                writePose(cv::format(files_format.c_str(), index), traj.at<Affine3d>((int)i), tag);
        return;
    }

    CV_Error(Error::StsError, "Unsupported array kind");
}
Beispiel #23
0
PERF_TEST_P(Size_CvtMode32F, DISABLED_cvtColor_32f,
            testing::Combine(
                testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p),
                CvtMode32F::all()
                )
            )
{
    Size sz = get<0>(GetParam());
    int _mode = get<1>(GetParam()), mode = _mode;
    ChPair ch = getConversionInfo(mode);
    mode %= COLOR_COLORCVT_MAX;
    Mat src(sz, CV_32FC(ch.scn));
    Mat dst(sz, CV_32FC(ch.scn));

    declare.time(100);
    declare.in(src, WARMUP_RNG).out(dst);

    int runs = sz.width <= 320 ? 100 : 5;
    TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);

    SANITY_CHECK_NOTHING();
}
Beispiel #24
0
void ColorTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);

    if (m_curBrightnessRadius == 0 && m_curContrastRadius == 0 && m_curSaturationRadius == 0)
        return;

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Beispiel #25
0
void IntensityTransformer::Apply(size_t id, cv::Mat &mat)
{
    UNUSED(id);

    if (m_eigVal.empty() || m_eigVec.empty() || m_curStdDev == 0)
        return;

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Beispiel #26
0
void ColorTransformer::Apply(uint8_t, cv::Mat &mat)
{
    if (m_brightnessRadius == 0.0 && m_contrastRadius == 0.0 && m_saturationRadius == 0.0)
        return;

    // Have to convert to float
    ConvertToFloatingPointIfRequired(mat);

    if (mat.type() == CV_64FC(mat.channels()))
        Apply<double>(mat);
    else if (mat.type() == CV_32FC(mat.channels()))
        Apply<float>(mat);
    else
        RuntimeError("Unsupported type");
}
Beispiel #27
0
void histEqualization(const Mat& input, Mat& output, Mat& T){
    Mat input_tmp;
    Mat histVector;
    unsigned char* input_data;
    unsigned char* output_data;
    unsigned char* T_data;
    float* hist_data;
    Size inputSize;
    double time_count;
    // To check input data is float or uchar
    if(input.type() == CV_32FC(1)){
        //printf("CV_32FC1 Dectect in histEqualization!!\n");
        input.convertTo(input_tmp, CV_8UC(1), 255.0);
    }
    else if(input.type() == CV_8UC(1)){
        //printf("CV_8UC1 Dectect in histEqualization!!\n");
        input_tmp = input;
    }
    else{
        printf("Error Type in histEqualization!!!\n");
        exit(0);
    }
    time_count = (double)getTickCount();
    T.create(1,256,CV_8UC(1));
    T_data = (unsigned char*)T.data;
    output.create(input.size(), CV_8UC(1));
    input_data = (unsigned char*)input_tmp.data;
    inputSize = input_tmp.size();
    output_data = (unsigned char*)output.data;
    imageHist(input, histVector); // get the image hist
    hist_data = (float*)histVector.data;

    float accum = 0; //  get the accumadata amount
    int height = inputSize.height;
    int width = inputSize.width;
    // To get the transform function
    for(int i=0; i<256; i++){
        T_data[i] = (unsigned char)((255.0/(height*width))* (accum + hist_data[i]) + 0.5);
        accum += hist_data[i];
    }
    // To write the output by T
    for(int i=0; i<inputSize.width*inputSize.height; i++){
        output_data[i] = T_data[(int)input_data[i]];
    }
    time_count = (double)getTickCount() - time_count;
    printf("histEqualization Total consume %gms\n", time_count*1000/getTickFrequency());// get processing time
    return;
}
cv::Mat HogIntegralImageComputer::create_hog_maps(const cv::Mat &mags, const cv::Mat &qangles)
{
    cv::Mat maps(mags.rows, mags.cols, CV_32FC(9), cv::Scalar(0.0f));
    for (int i = 0; i < mags.rows; i++) {
        for (int j = 0; j < mags.cols; j++) {
            cv::Vec2b ang = qangles.at<cv::Vec2b>(i,j);
            cv::Vec2f mag = mags.at<cv::Vec2f>(i,j);
            cv::VecHogf v(0.0f);
            v[ang[0]] += mag[0];
            v[ang[1]] += mag[1];
            v[CC_HOG_CHANS] = mag[0] + mag[1]; // gradient magnitude...
            maps.at<cv::VecHogf>(i,j) = v;
        }
    }
    return maps;
}
void CornerDetection::imageCB(const sensor_msgs::ImageConstPtr& msg)
{
	if(blockSize_harris == 0)
		blockSize_harris = 1;
		if(blockSize_shi == 0)
		blockSize_shi = 1;

	cv::Mat img, img_gray, myHarris_dst, myShiTomasi_dst;
	cv_bridge::CvImagePtr cvPtr;

	try
	{
		cvPtr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
	}
	catch (cv_bridge::Exception& e) 
	{
		ROS_ERROR("cv_bridge exception: %s", e.what());
		return;
	}
	cvPtr->image.copyTo(img);
	cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);

  myHarris_dst = cv::Mat::zeros( img_gray.size(), CV_32FC(6) );
  Mc = cv::Mat::zeros( img_gray.size(), CV_32FC1 );

  cv::cornerEigenValsAndVecs( img_gray, myHarris_dst, blockSize_harris, apertureSize, cv::BORDER_DEFAULT );

  for( int j = 0; j < img_gray.rows; j++ )
		for( int i = 0; i < img_gray.cols; i++ )
		 {
			float lambda_1 = myHarris_dst.at<cv::Vec6f>(j, i)[0];
			float lambda_2 = myHarris_dst.at<cv::Vec6f>(j, i)[1];
			Mc.at<float>(j,i) = lambda_1*lambda_2 - 0.04f*pow( ( lambda_1 + lambda_2 ), 2 );
		 }

  cv::minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, cv::Mat() );

  this->myHarris_function(img, img_gray);

  myShiTomasi_dst = cv::Mat::zeros( img_gray.size(), CV_32FC1 );
  cv::cornerMinEigenVal( img_gray, myShiTomasi_dst, blockSize_shi, apertureSize, cv::BORDER_DEFAULT );
  cv::minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, cv::Mat() );

  this->myShiTomasi_function(img, img_gray, myShiTomasi_dst);

  cv::waitKey(2);
}
void BackgroundSubtractorLOBSTER::getBackgroundImage(cv::OutputArray backgroundImage) const {
	CV_DbgAssert(m_bInitialized);
	cv::Mat oAvgBGImg = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels));
	for(size_t s=0; s<m_nBGSamples; ++s) {
		for(int y=0; y<m_oImgSize.height; ++y) {
			for(int x=0; x<m_oImgSize.width; ++x) {
				const size_t idx_nimg = m_voBGColorSamples[s].step.p[0]*y + m_voBGColorSamples[s].step.p[1]*x;
				const size_t idx_flt32 = idx_nimg*4;
				float* oAvgBgImgPtr = (float*)(oAvgBGImg.data+idx_flt32);
				const uchar* const oBGImgPtr = m_voBGColorSamples[s].data+idx_nimg;
				for(size_t c=0; c<m_nImgChannels; ++c)
					oAvgBgImgPtr[c] += ((float)oBGImgPtr[c])/m_nBGSamples;
			}
		}
	}
	oAvgBGImg.convertTo(backgroundImage,CV_8U);
}