Example #1
0
	void blurRemoveMinMax_(const Mat& src, Mat& dest, const int r)
	{
		const Size ksize = Size(2 * r + 1, 2 * r + 1);
		if (src.data != dest.data)src.copyTo(dest);

		Mat xv;
		Mat nv;
		Mat element = Mat::ones(2 * r + 1, 2 * r + 1, CV_8U);
		dilate(src, xv, element);
		erode(src, nv, element);

		Mat mind;
		Mat maxd;
		Mat mask;
		absdiff(src, nv, mind);//can move to loop
		absdiff(src, xv, maxd);//
		min(mind, maxd, mask);//

		T* n = nv.ptr<T>(0);
		T* x = xv.ptr<T>(0);
		T* d = dest.ptr<T>(0);
		T* nd = mind.ptr<T>(0);
		T* mk = mask.ptr<T>(0);

		int remsize = src.size().area();

#if CV_SSE4_1
		if (src.depth() == CV_8U)
		{

			const int ssesize = src.size().area() / 16;
			remsize = src.size().area() - ssesize * 16;
			for (int i = 0; i < ssesize; i++)
			{
				__m128i mmk = _mm_load_si128((__m128i*)mk);
				__m128i mnd = _mm_load_si128((__m128i*)nd);

				__m128i mmn = _mm_load_si128((__m128i*)n);
				__m128i mmx = _mm_load_si128((__m128i*)x);
				__m128i msk = _mm_cmpeq_epi8(mnd, mmk);
				_mm_stream_si128((__m128i*)d, _mm_blendv_epi8(mmx, mmn, msk));
				nd += 16;
				mk += 16;
				d += 16;
				n += 16;
				x += 16;
			}
		}
		else if (src.depth() == CV_16S || src.depth() == CV_16U)
		{

			const int ssesize = src.size().area() / 8;
			remsize = src.size().area() - ssesize * 8;
			for (int i = 0; i < ssesize; i++)
			{
				__m128i mmk = _mm_load_si128((__m128i*)mk);
				__m128i mnd = _mm_load_si128((__m128i*)nd);

				__m128i mmn = _mm_load_si128((__m128i*)n);
				__m128i mmx = _mm_load_si128((__m128i*)x);
				__m128i msk = _mm_cmpeq_epi16(mnd, mmk);
				_mm_stream_si128((__m128i*)d, _mm_blendv_epi8(mmx, mmn, msk));
				nd += 8;
				mk += 8;
				d += 8;
				n += 8;
				x += 8;
			}
		}
		else if (src.depth() == CV_32F)
		{

			const int ssesize = src.size().area() / 4;
			remsize = src.size().area() - ssesize * 4;
			for (int i = 0; i < ssesize; i++)
			{
				__m128 mmk = _mm_load_ps((float*)mk);
				__m128 mnd = _mm_load_ps((float*)nd);

				__m128 mmn = _mm_load_ps((float*)n);
				__m128 mmx = _mm_load_ps((float*)x);
				__m128 msk = _mm_cmpeq_ps(mnd, mmk);
				_mm_stream_ps((float*)d, _mm_blendv_ps(mmx, mmn, msk));
				nd += 4;
				mk += 4;
				d += 4;
				n += 4;
				x += 4;
			}
		}
		else if (src.depth() == CV_64F)
		{
			const int ssesize = src.size().area() / 2;
			remsize = src.size().area() - ssesize * 2;
			for (int i = 0; i < ssesize; i++)
			{
				__m128d mmk = _mm_load_pd((double*)mk);
				__m128d mnd = _mm_load_pd((double*)nd);

				__m128d mmn = _mm_load_pd((double*)n);
				__m128d mmx = _mm_load_pd((double*)x);
				__m128d msk = _mm_cmpeq_pd(mnd, mmk);
				_mm_stream_pd((double*)d, _mm_blendv_pd(mmx, mmn, msk));
				nd += 2;
				mk += 2;
				d += 2;
				n += 2;
				x += 2;
			}
		}
#endif
		for (int i = 0; i < remsize; i++)
		{
			{
				if (nd[i] == mk[i])
				{
					d[i] = n[i];
				}
				else
				{
					d[i] = x[i];
				}
			}
		}
	}
	float getMaxVal(const Mat& mat) {
		return getMaxVal(mat.depth());
	}
Example #3
0
bool  Jpeg2KDecoder::readData(Mat& img) {
    bool result = false;
    int color = img.channels() > 1;
    uchar* data = img.data;
    int step = img.step;
    jas_stream_t* stream = (jas_stream_t*)m_stream;
    jas_image_t* image = (jas_image_t*)m_image;

    if (stream && image) {
        bool convert;
        int colorspace;
        if (color) {
            convert = (jas_image_clrspc(image) != JAS_CLRSPC_SRGB);
            colorspace = JAS_CLRSPC_SRGB;
        } else {
            convert = (jas_clrspc_fam(jas_image_clrspc(image)) != JAS_CLRSPC_FAM_GRAY);
            colorspace = JAS_CLRSPC_SGRAY; // TODO GENGRAY or SGRAY?
        }

        // convert to the desired colorspace
        if (convert) {
            jas_cmprof_t* clrprof = jas_cmprof_createfromclrspc(colorspace);
            if (clrprof) {
                jas_image_t* _img = jas_image_chclrspc(image, clrprof, JAS_CMXFORM_INTENT_RELCLR);
                if (_img) {
                    jas_image_destroy(image);
                    m_image = image = _img;
                    result = true;
                } else {
                    fprintf(stderr, "JPEG 2000 LOADER ERROR: cannot convert colorspace\n");
                }
                jas_cmprof_destroy(clrprof);
            } else {
                fprintf(stderr, "JPEG 2000 LOADER ERROR: unable to create colorspace\n");
            }
        } else {
            result = true;
        }

        if (result) {
            int ncmpts;
            int cmptlut[3];
            if (color) {
                cmptlut[0] = jas_image_getcmptbytype(image, JAS_IMAGE_CT_RGB_B);
                cmptlut[1] = jas_image_getcmptbytype(image, JAS_IMAGE_CT_RGB_G);
                cmptlut[2] = jas_image_getcmptbytype(image, JAS_IMAGE_CT_RGB_R);
                if (cmptlut[0] < 0 || cmptlut[1] < 0 || cmptlut[0] < 0) {
                    result = false;
                }
                ncmpts = 3;
            } else {
                cmptlut[0] = jas_image_getcmptbytype(image, JAS_IMAGE_CT_GRAY_Y);
                if (cmptlut[0] < 0) {
                    result = false;
                }
                ncmpts = 1;
            }

            if (result) {
                for (int i = 0; i < ncmpts; i++) {
                    int maxval = 1 << jas_image_cmptprec(image, cmptlut[i]);
                    int offset =  jas_image_cmptsgnd(image, cmptlut[i]) ? maxval / 2 : 0;

                    int yend = jas_image_cmptbry(image, cmptlut[i]);
                    int ystep = jas_image_cmptvstep(image, cmptlut[i]);
                    int xend = jas_image_cmptbrx(image, cmptlut[i]);
                    int xstep = jas_image_cmpthstep(image, cmptlut[i]);

                    jas_matrix_t* buffer = jas_matrix_create(yend / ystep, xend / xstep);
                    if (buffer) {
                        if (!jas_image_readcmpt(image, cmptlut[i], 0, 0, xend / xstep, yend / ystep, buffer)) {
                            if (img.depth() == CV_8U) {
                                result = readComponent8u(data + i, buffer, step, cmptlut[i], maxval, offset, ncmpts);
                            } else {
                                result = readComponent16u(((unsigned short*)data) + i, buffer, step / 2, cmptlut[i], maxval, offset, ncmpts);
                            }
                            if (!result) {
                                i = ncmpts;
                                result = false;
                            }
                        }
                        jas_matrix_destroy(buffer);
                    }
                }
            }
        } else {
            fprintf(stderr, "JPEG2000 LOADER ERROR: colorspace conversion failed\n");
        }
    }

    close();

    return result;
}
Example #4
0
bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, double confidence,
                        OutputArray _inliers, int flags)
{

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();

    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);

    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);

    Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
    cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, flags, useExtrinsicGuess, rvec, tvec);

    int model_points = 4;                             // minimum of number of model points
    if( flags == cv::SOLVEPNP_ITERATIVE ) model_points = 6;
    else if( flags == cv::SOLVEPNP_UPNP ) model_points = 6;
    else if( flags == cv::SOLVEPNP_EPNP ) model_points = 5;

    double param1 = reprojectionError;                // reprojection error
    double param2 = confidence;                       // confidence
    int param3 = iterationsCount;                     // number maximum iterations

    cv::Mat _local_model(3, 2, CV_64FC1);
    cv::Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);

    // call Ransac
    int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);

    if( result <= 0 || _local_model.rows <= 0)
    {
        _rvec.assign(rvec);    // output rotation vector
        _tvec.assign(tvec);    // output translation vector

        if( _inliers.needed() )
            _inliers.release();

        return false;
    }
    else
    {
        _rvec.assign(_local_model.col(0));    // output rotation vector
        _tvec.assign(_local_model.col(1));    // output translation vector
    }

    if(_inliers.needed())
    {
        Mat _local_inliers;
        int count = 0;
        for (int i = 0; i < _mask_local_inliers.rows; ++i)
        {
            if((int)_mask_local_inliers.at<uchar>(i) == 1) // inliers mask
            {
                _local_inliers.push_back(count);    // output inliers vector
                count++;
            }
        }
        _local_inliers.copyTo(_inliers);
    }
    return true;
}
static void test_threshold( const Mat& _src, Mat& _dst,
                            float thresh, float maxval, int thresh_type )
{
    int i, j;
    int depth = _src.depth(), cn = _src.channels();
    int width_n = _src.cols*cn, height = _src.rows;
    int ithresh = cvFloor(thresh);
    int imaxval, ithresh2;
    
    if( depth == CV_8U )
    {
        ithresh2 = saturate_cast<uchar>(ithresh);
        imaxval = saturate_cast<uchar>(maxval);
    }
    else if( depth == CV_16S )
    {
        ithresh2 = saturate_cast<short>(ithresh);
        imaxval = saturate_cast<short>(maxval);
    }
    else
    {
        ithresh2 = cvRound(ithresh);
        imaxval = cvRound(maxval);
    }

    assert( depth == CV_8U || depth == CV_16S || depth == CV_32F );
    
    switch( thresh_type )
    {
    case CV_THRESH_BINARY:
        for( i = 0; i < height; i++ )
        {
            if( depth == CV_8U )
            {
                const uchar* src = _src.ptr<uchar>(i);
                uchar* dst = _dst.ptr<uchar>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = (uchar)(src[j] > ithresh ? imaxval : 0);
            }
            else if( depth == CV_16S )
            {
                const short* src = _src.ptr<short>(i);
                short* dst = _dst.ptr<short>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = (short)(src[j] > ithresh ? imaxval : 0);
            }
            else
            {
                const float* src = _src.ptr<float>(i);
                float* dst = _dst.ptr<float>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = src[j] > thresh ? maxval : 0.f;
            }
        }
        break;
    case CV_THRESH_BINARY_INV:
        for( i = 0; i < height; i++ )
        {
            if( depth == CV_8U )
            {
                const uchar* src = _src.ptr<uchar>(i);
                uchar* dst = _dst.ptr<uchar>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = (uchar)(src[j] > ithresh ? 0 : imaxval);
            }
            else if( depth == CV_16S )
            {
                const short* src = _src.ptr<short>(i);
                short* dst = _dst.ptr<short>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = (short)(src[j] > ithresh ? 0 : imaxval);
            }
            else
            {
                const float* src = _src.ptr<float>(i);
                float* dst = _dst.ptr<float>(i);
                for( j = 0; j < width_n; j++ )
                    dst[j] = src[j] > thresh ? 0.f : maxval;
            }
        }
        break;
    case CV_THRESH_TRUNC:
        for( i = 0; i < height; i++ )
        {
            if( depth == CV_8U )
            {
                const uchar* src = _src.ptr<uchar>(i);
                uchar* dst = _dst.ptr<uchar>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? ithresh2 : s);
                }
            }
            else if( depth == CV_16S )
            {
                const short* src = _src.ptr<short>(i);
                short* dst = _dst.ptr<short>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (short)(s > ithresh ? ithresh2 : s);
                }
            }
            else
            {
                const float* src = _src.ptr<float>(i);
                float* dst = _dst.ptr<float>(i);
                for( j = 0; j < width_n; j++ )
                {
                    float s = src[j];
                    dst[j] = s > thresh ? thresh : s;
                }
            }
        }
        break;
    case CV_THRESH_TOZERO:
        for( i = 0; i < height; i++ )
        {
            if( depth == CV_8U )
            {
                const uchar* src = _src.ptr<uchar>(i);
                uchar* dst = _dst.ptr<uchar>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? s : 0);
                }
            }
            else if( depth == CV_16S )
            {
                const short* src = _src.ptr<short>(i);
                short* dst = _dst.ptr<short>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (short)(s > ithresh ? s : 0);
                }
            }
            else
            {
                const float* src = _src.ptr<float>(i);
                float* dst = _dst.ptr<float>(i);
                for( j = 0; j < width_n; j++ )
                {
                    float s = src[j];
                    dst[j] = s > thresh ? s : 0.f;
                }
            }
        }
        break;
    case CV_THRESH_TOZERO_INV:
        for( i = 0; i < height; i++ )
        {
            if( depth == CV_8U )
            {
                const uchar* src = _src.ptr<uchar>(i);
                uchar* dst = _dst.ptr<uchar>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? 0 : s);
                }
            }
            else if( depth == CV_16S )
            {
                const short* src = _src.ptr<short>(i);
                short* dst = _dst.ptr<short>(i);
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (short)(s > ithresh ? 0 : s);
                }
            }
            else
            {
                const float* src = _src.ptr<float>(i);
                float* dst = _dst.ptr<float>(i);
                for( j = 0; j < width_n; j++ )
                {
                    float s = src[j];
                    dst[j] = s > thresh ? 0.f : s;
                }
            }
        }
        break;
    default:
        assert(0);
    }
}
// prepare a photo with CLBP_M (Completed LBP-Magnitude)
void expression_recognizer::localMeanThreshold(Mat& face, int A, int B, int P, float phase) {
	CV_Assert( face.depth() == CV_8U );
	CV_Assert( face.size().width == 48 && face.size().height == 60 );
	CV_Assert( A > 0 && B > 0 && P > 0 );

	if (A != currA || B != currB || P != currP || currPhase != phase) updateELBPkernel( A,B,P,phase );

	unsigned int fw = face.size().width;
	unsigned int fh = face.size().height;
	unsigned int marginal = (currA >= currB) ? currA : currB;

	Mat local_mean = Mat(face.size(), CV_32F);
	float total_mean=0;

	copyMakeBorder(face,face,marginal,marginal,marginal,marginal,BORDER_REPLICATE); //replication difference = 0

	// local difference mean per pixel
	for (unsigned int y = marginal; y<fh+marginal; y++) {
		for (unsigned int x = marginal; x<fw+marginal; x++) {
			float sum=0;
			access(uchar, face,y, x);
			uchar center = face.at<uchar>(y,x);

			for (unsigned int i=0; i<currP; i++) {
				unsigned int relx = ELBP_coords[i].x+x;
				unsigned int rely = ELBP_coords[i].y+y;

				access( uchar, face, rely, relx )
				uchar cmp = face.at<uchar>(rely,relx);
				sum += (float)abs(center - cmp);
			}

			sum /= (float)currP;
			access( float, local_mean, y-marginal, x-marginal )
			local_mean.at<float>(y-marginal,x-marginal) = sum;
			total_mean += sum;
		}
	}
	total_mean /= local_mean.size().area();
	//LOGD("debug: total mean %f", total_mean);

	// ELBP
	face.convertTo(face, CV_16U);
	unsigned short ELBP_code = 0;
	copyMakeBorder(local_mean,local_mean,marginal,marginal,marginal,marginal,BORDER_REPLICATE);

	for (unsigned int y=marginal;y<fh+marginal;y++) {
		for (unsigned int x=marginal;x<fw+marginal;x++) {
			ELBP_code = 0;

			for (unsigned int i=0; i<currP; i++) {
				unsigned int relx = ELBP_coords[i].x+x;
				unsigned int rely = ELBP_coords[i].y+y;

				access( float, local_mean, rely, relx )
				if (local_mean.at<float>(rely,relx) >= total_mean) ELBP_code += powtable[i];
			}

			access( unsigned short, face, y-marginal, x-marginal )
			face.at<unsigned short>(y-marginal,x-marginal) = ELBP_code;
		}
	}

	face = face.colRange(0, fw).rowRange(0, fh);
}
// Filteres a face picture with 40 Gabor-filters, arranging the results into 3D-array-like
// structure and runs 3D-LBP operation on them. Outputs an idetification histogram after
// segmentation and concatenation in left-right, top-down order.
// If step is 0 or 1 returns a debug picture in "face". ind = index per (freq*orientation)+rot
void expression_recognizer::gaborLBPHistograms(Mat& face, Mat& hist, Mat& lut, int N, int step, int ind) {
	CV_Assert( face.depth() == CV_8U );
	CV_Assert( face.channels() == 1 );

	CV_Assert( lut.depth() == CV_8U );
	CV_Assert( lut.channels() == 1 );
	CV_Assert( lut.total() == 256 );
	CV_Assert( lut.isContinuous() );

	Mat tmppic = Mat( Size(64, 64), CV_64FC2);
	Mat holder = Mat( Size(64, 64), CV_64FC1);

	vector<Mat> planes;
	resize(face, face, Size(64,64));
	//face = face.colRange(8,80);//.clone();
	vector<Mat> doubleface;
	face.convertTo(face, CV_64FC2);

	int m = getOptimalDFTSize( face.size().height );
	int n = getOptimalDFTSize( face.size().width );
	copyMakeBorder(face, face, 0, m - face.size().height, 0, n - face.size().width, BORDER_CONSTANT, Scalar::all(0));

	doubleface.clear();
	doubleface.push_back(face);
	doubleface.push_back(face);
	merge( doubleface, face );

	dft(face, face, DFT_COMPLEX_OUTPUT + DFT_SCALE, 0);

	vector<Mat> gaborCube(scale*orientation);
	vector<Mat> binaryGaborVolume;
	for (unsigned int freq=0;freq<scale;freq++) {
		for (unsigned int rot=0;rot<orientation;rot++) {
			unsigned int index = (freq*orientation)+rot;

			Mat tmp = gaborKernels[index];
			mulSpectrums(face, tmp, tmppic, 0, false);
			idft(tmppic, tmppic, DFT_SCALE, 0);

			planes.clear();
			split(tmppic, planes);
			Mat p0=planes[0];
			Mat p1=planes[1];
			magnitude(p0,p1,holder);
			//holder = holder.colRange(0, 64).rowRange(0,64);
			// From real and imaginary parts we can get the magnitude for identification
			// add 1px borders for later, store in gabor-cube

			copyMakeBorder(holder, holder,1, 1, 1, 1, BORDER_CONSTANT, Scalar::all(0));
			gaborCube[index] = holder.clone();
		}
	}

	if (step == 0) face = gaborCube[ind];

	vector<Mat> LBP;
	Mat lbp = Mat(64,64,CV_8U);

	for (unsigned int freq=0;freq<scale;freq++) {
		for (unsigned int rot=0;rot<orientation;rot++) {

			unsigned int index = rot+(freq*orientation);
			Mat thiz = gaborCube[index];
			uchar pix = 0;

			for (unsigned int y=1;y<thiz.size().height-1;y++) {
				for (unsigned int x=1;x<thiz.size().width-1;x++) {
					pix = 0;
					double center = thiz.at<double>(y,x);

					// indices 1,3,5 and 7 are normal closest neighbor LBP
					if (thiz.at<double>(y-1,x) >= center ) pix += powtable[1];
					if (thiz.at<double>(y,x+1) >= center ) pix += powtable[3];
					if (thiz.at<double>(y+1,x) >= center ) pix += powtable[5];
					if (thiz.at<double>(y,x-1) >= center ) pix += powtable[7];

					// orientation neighbors are indices 2 and 6
					if (rot > 0) {
						Mat back = gaborCube[index-1];
						if ( back.at<double>(y,x) >= center ) pix += powtable[2];
					}
					if (rot < orientation-1) {
						Mat front = gaborCube[index+1];
						if ( front.at<double>(y,x) >= center ) pix += powtable[6];
					}
					//scale neighbors, indices 0,4
					if (freq > 0 ) {
						Mat back = gaborCube[index-orientation];
						if ( back.at<double>(y,x) >= center) pix += powtable[0];
					}

					if (freq < scale-1) {
						Mat front = gaborCube[index+orientation];
						if ( front.at<double>(y,x) >= center) pix += powtable[4];
					}

					lbp.at<uchar>(y-1,x-1) = pix;
				}
			}

			// 59 uniform patterns
			if (N>0) LUT(lbp, lut, lbp);

			LBP.push_back(lbp.clone());
		}
	}

	if (step == 1) face = LBP[ind];

	int histSize[] = {256};
	float range[] = {0, 256};
	const float* histRange[] = {range};
	int channels[]={0};

	static double areaWeights[] =		{  1,1,1,1,1,1,1,1,
								   	   	   1,1,1,1,1,1,1,1,
										   1,4,4,3,3,4,4,1,
										   1,4,4,3,3,4,4,1,
										   0,1,1,1,1,1,1,0,
										   0,1,2,2,2,2,1,0,
										   0,1,2,2,2,2,1,0,
										   0,0,1,1,1,1,0,0 };


	static unsigned int xstep=8, ystep=8;
	static unsigned int xsize=8, ysize=8;

	for (unsigned int y = 0;y<ystep;y++) {
		for (unsigned int x = 0;x<xstep;x++) {
			Mat accuhist = Mat::zeros(256,1,CV_32F);
			unsigned int weight = areaWeights[x+(y*xsize)];

			if (weight != 0) {
				for (unsigned int i=0;i<scale*orientation;i++) {
					Mat tempHist = Mat::zeros(256,1,CV_32F);
					lbp = LBP[i];

					Mat roi = lbp.rowRange(y*ysize, (y+1)*ysize).colRange(x*xsize,(x+1)*xsize);
					calcHist(&roi, 1, 0, Mat(), tempHist, 1, histSize, histRange, true, false );
					scaleAdd(tempHist, 1, accuhist, accuhist);
				}

				if (N>0) accuhist = accuhist.rowRange(0, N);
				// cut from 256 values per 8x8 area to 8 values per area
				//dump( accuhist );
				 hist.push_back(accuhist.clone());
				//cuts the ID vector length even more
			}
		}
	}

	normalize( hist, hist, 0, 1, NORM_MINMAX, -1, noArray());
}
Example #8
0
void SIFT::operator()(InputArray _image, InputArray _mask,
                      vector<KeyPoint>& keypoints,
                      OutputArray _descriptors,
                      bool useProvidedKeypoints) const
{
    int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
    Mat image = _image.getMat(), mask = _mask.getMat();

    if( image.empty() || image.depth() != CV_8U )
        CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );

    if( !mask.empty() && mask.type() != CV_8UC1 )
        CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );

    if( useProvidedKeypoints )
    {
        firstOctave = 0;
        int maxOctave = INT_MIN;
        for( size_t i = 0; i < keypoints.size(); i++ )
        {
            int octave, layer;
            float scale;
            unpackOctave(keypoints[i], octave, layer, scale);
            firstOctave = std::min(firstOctave, octave);
            maxOctave = std::max(maxOctave, octave);
            actualNLayers = std::max(actualNLayers, layer-2);
        }

        firstOctave = std::min(firstOctave, 0);
        CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
        actualNOctaves = maxOctave - firstOctave + 1;
    }

    Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
    vector<Mat> gpyr, dogpyr;
    int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave;

    //double t, tf = getTickFrequency();
    //t = (double)getTickCount();
    buildGaussianPyramid(base, gpyr, nOctaves);
    buildDoGPyramid(gpyr, dogpyr);

    //t = (double)getTickCount() - t;
    //printf("pyramid construction time: %g\n", t*1000./tf);

    if( !useProvidedKeypoints )
    {
        //t = (double)getTickCount();
        findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
        KeyPointsFilter::removeDuplicated( keypoints );

        if( nfeatures > 0 )
            KeyPointsFilter::retainBest(keypoints, nfeatures);
        //t = (double)getTickCount() - t;
        //printf("keypoint detection time: %g\n", t*1000./tf);

        if( firstOctave < 0 )
            for( size_t i = 0; i < keypoints.size(); i++ )
            {
                KeyPoint& kpt = keypoints[i];
                float scale = 1.f/(float)(1 << -firstOctave);
                kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
                kpt.pt *= scale;
                kpt.size *= scale;
            }

        if( !mask.empty() )
            KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }
    else
    {
        // filter keypoints by mask
        //KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }

    if( _descriptors.needed() )
    {
        //t = (double)getTickCount();
        int dsize = descriptorSize();
        _descriptors.create((int)keypoints.size(), dsize, CV_32F);
        Mat descriptors = _descriptors.getMat();

        calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
        //t = (double)getTickCount() - t;
        //printf("descriptor extraction time: %g\n", t*1000./tf);
    }
}
bool  PngDecoder::readData( Mat& img )
{
    bool result = false;
    AutoBuffer<uchar*> _buffer(m_height);
    uchar** buffer = _buffer;
    int color = img.channels() > 1;
    uchar* data = img.data;
    int step = (int)img.step;

    if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height )
    {
        png_structp png_ptr = (png_structp)m_png_ptr;
        png_infop info_ptr = (png_infop)m_info_ptr;
        png_infop end_info = (png_infop)m_end_info;

        if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )
        {
            int y;

            if( img.depth() == CV_8U && m_bit_depth == 16 )
                png_set_strip_16( png_ptr );
            else if( !isBigEndian() )
                png_set_swap( png_ptr );

            if(img.channels() < 4)
            {
                /* observation: png_read_image() writes 400 bytes beyond
                 * end of data when reading a 400x118 color png
                 * "mpplus_sand.png".  OpenCV crashes even with demo
                 * programs.  Looking at the loaded image I'd say we get 4
                 * bytes per pixel instead of 3 bytes per pixel.  Test
                 * indicate that it is a good idea to always ask for
                 * stripping alpha..  18.11.2004 Axel Walthelm
                 */
                 png_set_strip_alpha( png_ptr );
            }

            if( m_color_type == PNG_COLOR_TYPE_PALETTE )
                png_set_palette_to_rgb( png_ptr );

            if( m_color_type == PNG_COLOR_TYPE_GRAY && m_bit_depth < 8 )
#if (PNG_LIBPNG_VER_MAJOR*10000 + PNG_LIBPNG_VER_MINOR*100 + PNG_LIBPNG_VER_RELEASE >= 10209) || \
    (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR == 0 && PNG_LIBPNG_VER_RELEASE >= 18)
                png_set_expand_gray_1_2_4_to_8( png_ptr );
#else
                png_set_gray_1_2_4_to_8( png_ptr );
#endif

            if( CV_MAT_CN(m_type) > 1 && color )
                png_set_bgr( png_ptr ); // convert RGB to BGR
            else if( color )
                png_set_gray_to_rgb( png_ptr ); // Gray->RGB
            else
                png_set_rgb_to_gray( png_ptr, 1, 0.299, 0.587 ); // RGB->Gray

            png_read_update_info( png_ptr, info_ptr );

            for( y = 0; y < m_height; y++ )
                buffer[y] = data + y*step;

            png_read_image( png_ptr, buffer );
            png_read_end( png_ptr, end_info );

            result = true;
        }
    }

    close();
    return result;
}
Example #10
0
static void convertFromCCS( const Mat& _src0, const Mat& _src1, Mat& _dst, int flags )
{
    if( _dst.rows > 1 && (_dst.cols > 1 || (flags & DFT_ROWS)) )
    {
        int i, count = _dst.rows, len = _dst.cols;
        bool is2d = (flags & DFT_ROWS) == 0;
        Mat src0row, src1row, dstrow;
        for( i = 0; i < count; i++ )
        {
            int j = !is2d || i == 0 ? i : count - i;
            src0row = _src0.row(i);
            src1row = _src1.row(j);
            dstrow = _dst.row(i);
            convertFromCCS( src0row, src1row, dstrow, 0 );
        }

        if( is2d )
        {
            src0row = _src0.col(0);
            dstrow = _dst.col(0);
            convertFromCCS( src0row, src0row, dstrow, 0 );
            if( (len & 1) == 0 )
            {
                src0row = _src0.col(_src0.cols - 1);
                dstrow = _dst.col(len/2);
                convertFromCCS( src0row, src0row, dstrow, 0 );
            }
        }
    }
    else
    {
        int i, n = _dst.cols + _dst.rows - 1, n2 = (n+1) >> 1;
        int cn = _src0.channels();
        int srcstep = cn, dststep = 1;

        if( !_dst.isContinuous() )
            dststep = (int)(_dst.step/_dst.elemSize());

        if( !_src0.isContinuous() )
            srcstep = (int)(_src0.step/_src0.elemSize1());

        if( _dst.depth() == CV_32F )
        {
            Complexf* dst = _dst.ptr<Complexf>();
            const float* src0 = _src0.ptr<float>();
            const float* src1 = _src1.ptr<float>();
            int delta0, delta1;

            dst->re = src0[0];
            dst->im = 0;

            if( (n & 1) == 0 )
            {
                dst[n2*dststep].re = src0[(cn == 1 ? n-1 : n2)*srcstep];
                dst[n2*dststep].im = 0;
            }

            delta0 = srcstep;
            delta1 = delta0 + (cn == 1 ? srcstep : 1);
            if( cn == 1 )
                srcstep *= 2;

            for( i = 1; i < n2; i++, delta0 += srcstep, delta1 += srcstep )
            {
                float t0 = src0[delta0];
                float t1 = src0[delta1];

                dst[i*dststep].re = t0;
                dst[i*dststep].im = t1;

                t0 = src1[delta0];
                t1 = -src1[delta1];

                dst[(n-i)*dststep].re = t0;
                dst[(n-i)*dststep].im = t1;
            }
        }
        else
        {
            Complexd* dst = _dst.ptr<Complexd>();
            const double* src0 = _src0.ptr<double>();
            const double* src1 = _src1.ptr<double>();
            int delta0, delta1;

            dst->re = src0[0];
            dst->im = 0;

            if( (n & 1) == 0 )
            {
                dst[n2*dststep].re = src0[(cn == 1 ? n-1 : n2)*srcstep];
                dst[n2*dststep].im = 0;
            }

            delta0 = srcstep;
            delta1 = delta0 + (cn == 1 ? srcstep : 1);
            if( cn == 1 )
                srcstep *= 2;

            for( i = 1; i < n2; i++, delta0 += srcstep, delta1 += srcstep )
            {
                double t0 = src0[delta0];
                double t1 = src0[delta1];

                dst[i*dststep].re = t0;
                dst[i*dststep].im = t1;

                t0 = src1[delta0];
                t1 = -src1[delta1];

                dst[(n-i)*dststep].re = t0;
                dst[(n-i)*dststep].im = t1;
            }
        }
    }
}
Example #11
0
static void mulComplex( const Mat& src1, const Mat& src2, Mat& dst, int flags )
{
    dst.create(src1.rows, src1.cols, src1.type());
    int i, j, depth = src1.depth(), cols = src1.cols*2;

    CV_Assert( src1.size == src2.size && src1.type() == src2.type() &&
              (src1.type() == CV_32FC2 || src1.type() == CV_64FC2) );

    for( i = 0; i < dst.rows; i++ )
    {
        if( depth == CV_32F )
        {
            const float* a = src1.ptr<float>(i);
            const float* b = src2.ptr<float>(i);
            float* c = dst.ptr<float>(i);

            if( !(flags & CV_DXT_MUL_CONJ) )
                for( j = 0; j < cols; j += 2 )
                {
                    double re = (double)a[j]*(double)b[j] - (double)a[j+1]*(double)b[j+1];
                    double im = (double)a[j+1]*(double)b[j] + (double)a[j]*(double)b[j+1];

                    c[j] = (float)re;
                    c[j+1] = (float)im;
                }
            else
                for( j = 0; j < cols; j += 2 )
                {
                    double re = (double)a[j]*(double)b[j] + (double)a[j+1]*(double)b[j+1];
                    double im = (double)a[j+1]*(double)b[j] - (double)a[j]*(double)b[j+1];

                    c[j] = (float)re;
                    c[j+1] = (float)im;
                }
        }
        else
        {
            const double* a = src1.ptr<double>(i);
            const double* b = src2.ptr<double>(i);
            double* c = dst.ptr<double>(i);

            if( !(flags & CV_DXT_MUL_CONJ) )
                for( j = 0; j < cols; j += 2 )
                {
                    double re = a[j]*b[j] - a[j+1]*b[j+1];
                    double im = a[j+1]*b[j] + a[j]*b[j+1];

                    c[j] = re;
                    c[j+1] = im;
                }
            else
                for( j = 0; j < cols; j += 2 )
                {
                    double re = a[j]*b[j] + a[j+1]*b[j+1];
                    double im = a[j+1]*b[j] - a[j]*b[j+1];

                    c[j] = re;
                    c[j+1] = im;
                }
        }
    }
}
Example #12
0
//Prints basic info about image
void imgInfo(const Mat& img){
	std::cout << "Type: " << img.type()
					<<"  Channels: " << img.channels()
					<< "   Depth:" << img.depth()
					<< std::endl;
}
void replaceFace(Mat &faceFrame,FaceCartoon faceCartoon){

	stringstream ss;
	ss.str("");
	string tempCartoonNum;
	ss << (faceCartoon.cartoonMatchNum + 1);
	ss >> tempCartoonNum;
	Mat srcImg = imread(prefixTest + tempCartoonNum + subfix);
	Mat src_mask = Mat::zeros(srcImg.rows, srcImg.cols, srcImg.depth());
	Mat srcResize;
	Mat src_mask_resize;
	Point center(faceCartoon.facePosition.xc, faceCartoon.facePosition.yc);	//贴图中心点       

	vector<Point> vec_points = vec_vec_points.at(faceCartoon.cartoonMatchNum);
	int contourPointNum = vec_points.size() - 1;	//最后一个不要....
	int minx = INT_MAX, miny = INT_MAX, maxx = INT_MIN, maxy = INT_MIN;
	//取出该动画形象的轮廓点
	Point *polyyy = new Point[contourPointNum];
	for (int i = 0; i < contourPointNum; i++)
	{
		int x = vec_points.at(i).x;
		int y = vec_points.at(i).y;
		polyyy[i] = Point(x, y);
		minx = std::min(minx, x);
		maxx = std::max(maxx, x);
		miny = std::min(miny, y);
		maxy = std::max(maxy, y);
	}
	const Point* polygons1[1] = { polyyy };
	int num_points1[] = { contourPointNum };
	fillPoly(src_mask, polygons1, num_points1, 1, Scalar(255,255,255));
	free(polyyy);
	//imshow("动画轮廓", src_mask);
	//imshow("动画形象", srcImg);

	float wScale = 1.2, hScale = 1.6;
	int width, height;	//动画脸的目标宽高
	int cartoonWidth, cartoonHeight;	//动画脸的实际宽高

	width = faceCartoon.facePosition.w * wScale;	
	height = faceCartoon.facePosition.w * hScale;	

	//计算动画脸所占宽度和高度
	cartoonWidth = maxx - minx;
	cartoonHeight = maxy -miny;

	int minxd = center.x - width / 2;
	int maxxd = center.x + width / 2;
	int minyd = center.y - height / 2;
	int maxyd = center.y + height / 2;

	//计算动画脸的需要进行的缩放比例
	double widthScale = (double)width / (double)cartoonWidth;
	double heightScale = (double)height / (double)cartoonHeight;

	Size reSize(srcImg.cols * widthScale, srcImg.rows * heightScale);
	//缩放
	resize(srcImg, srcResize, reSize, 0, 0, CV_INTER_LINEAR);
	resize(src_mask, src_mask_resize, reSize, 0, 0, CV_INTER_LINEAR);

	int minxS = minx * widthScale;
	int maxxS = maxx * widthScale;
	int minyS = miny * heightScale;
	int maxyS = maxy * heightScale;

	//如果动画脸宽度超过边界,截取未超过边界的部分显示
	int roiMask_x = minxS, roiMask_y = minyS, roiMask_width = width, roiMask_height = height;
	if (minxd < 0)
	{
		cout<<"宽度小于0"<<endl;
		roiMask_x = roiMask_x + (-1) * minxd;
		roiMask_width = width + minxd;	//minxd为负
		center.x += (-1) * minxd / 2;
	}
	if (maxxd > faceFrame.cols)
	{
		roiMask_width = width - (maxxd - faceFrame.cols);
		center.x -= (maxxd - faceFrame.cols) / 2;
	}
	//高度超过边界
	if (minyd < 0)
	{
		cout<<"高度小于0"<<endl;
		roiMask_y = roiMask_y + (-1) * minyd;
		roiMask_height = height + minyd;
		center.y += (-1) * minyd / 2;
	}
	//高度超过边界
	if (maxyd > faceFrame.rows)
	{
		roiMask_height = height - (maxyd - faceFrame.rows);
		center.y -= (maxyd - faceFrame.rows) / 2;
	}
	//截取ROI
	Rect roiMask(roiMask_x, roiMask_y, roiMask_width, roiMask_height);
	Mat src_mask_resize_roi = src_mask_resize(roiMask);
	Mat srcResize_roi = srcResize(roiMask);
	//Mat src_downScale;
	//pyrDown(srcResize_roi, src_downScale);
	// Seamlessly clone src into dst and put the results in output
	Mat output;
	customCV::seamlessClone(srcResize_roi, faceFrame, src_mask_resize_roi, center, output, 1);
	faceFrame = output;
}
int main5ggg()
{
	// Read images : src image will be cloned into dst
	Mat src = imread("airplane.jpg");
	Mat dst = imread("sky2.jpg");
	Mat img1 = imread("manhua.jpg");

	// Create a rough mask around the airplane.
	Mat src_mask = Mat::zeros(src.rows, src.cols, src.depth());

	// Define the mask as a closed polygon
	Point poly[1][7];
	poly[0][0] = Point(4, 80);
	poly[0][1] = Point(30, 54);
	poly[0][2] = Point(151,63);
	poly[0][3] = Point(254,37);
	poly[0][4] = Point(298,90);
	poly[0][5] = Point(272,134);
	poly[0][6] = Point(43,122);

	const Point* polygons[1] = { poly[0] };
	int num_points[] = { 7 };

	// Create mask by filling the polygon

	fillPoly(src_mask, polygons, num_points, 1, Scalar(255,255,255));

	// The location of the center of the src in the dst
	Point center(250,140);

	// Seamlessly clone src into dst and put the results in output
	Mat output;
	double t = (double)cvGetTickCount();
	//printf("Histogram time = %g ms\n", t / ((double)cvGetTickFrequency()*1000.));
	customCV::seamlessClone(src, dst, src_mask, center, output, 2);
	t = (double)cvGetTickCount() - t;
	printf("Histogram time = %g ms\n", t / ((double)cvGetTickFrequency()*1000.));


	Point polyyy[1][12];
	polyyy[0][0] = Point(124, 347);
	polyyy[0][1] = Point(149, 46);
	polyyy[0][2] = Point(408,43);
	polyyy[0][3] = Point(447,350);
	polyyy[0][4] = Point(544,323);
	polyyy[0][5] = Point(517,542);
	polyyy[0][6] = Point(432,534);
	polyyy[0][7] = Point(334,656);
	polyyy[0][8] = Point(214,652);
	polyyy[0][9] = Point(136,532);
	polyyy[0][10] = Point(17,526);
	polyyy[0][11] = Point(19,328);
	const Point* polygons1[1] = { polyyy[0] };
	int num_points1[] = { 12 };
	fillPoly(img1, polygons1, num_points1, 1, Scalar(255,255,255));
	imshow("img1", img1);
	//画出多边形轮廓
	fillPoly(src, polygons, num_points, 1, Scalar(255,255,255));

	imshow("src", src);
	imshow("output",output);
	waitKey(0);
	// Save result
	imwrite("opencv-seamless-cloning-example111.jpg", output);
	return 0;
}
Example #15
0
/**
 * Runs the fast, single-disparity stereo algorithm.  Returns a
 * vector of points where it found disparity matches in the image.
 *
 * @param _leftImage left camera image as a CV_8UC1
 * @param _rightImage right camera image as a CV_8UC1
 * @param hitVector a cv::vector that we will populate with cv::Point()s
 * @param state set of configuration parameters for the function.
 *      You can change these on each run of the function if you'd like.
 */
void PushbroomStereo::ProcessImages(Mat leftImage, Mat rightImage, cv::vector<Point3f> *pointVector3d, cv::vector<uchar> *pointColors, cv::vector<Point3i> *pointVector2d, PushbroomStereoState state) {

    //cout << "[main] entering process images" << endl;

    // make sure that the inputs are of the right type
    CV_Assert(leftImage.type() == CV_8UC1 && rightImage.type() == CV_8UC1);

    // we want to use the sum-of-absolute-differences (SAD) algorithm
    // on a single disparity

    // split things up so we can parallelize
    int rows = leftImage.rows;

	
	StopWatchInterface	*timer;
	sdkCreateTimer( &timer );

	sdkResetTimer( &timer );
	sdkStartTimer( &timer );

    // first parallelize remaping

    // we split these arrays up and send them into each
    // thread so at the end, each thread has written to the
    // appropriate spot in the array
    Mat remapped_left(state.mapxL.rows, state.mapxL.cols, leftImage.depth());
    Mat remapped_right(state.mapxR.rows, state.mapxR.cols, rightImage.depth());

		remap( leftImage, remapped_left, state.mapxL, Mat(), INTER_NEAREST);
		remap( rightImage, remapped_right, state.mapxR, Mat(), INTER_NEAREST);

	sdkStopTimer( &timer );
	//printf("remap timer: %.2f ms \n", sdkGetTimerValue( &timer) );

	
	sdkResetTimer( &timer );
	sdkStartTimer( &timer );

    Mat laplacian_left(remapped_left.rows, remapped_left.cols, remapped_left.depth());
    Mat laplacian_right(remapped_right.rows, remapped_right.cols, remapped_right.depth());

	    // apply interest operator
		Laplacian( remapped_left, laplacian_left, -1, 3, 1, 0, BORDER_DEFAULT);

		Laplacian( remapped_right, laplacian_right, -1, 3, 1, 0, BORDER_DEFAULT);

	sdkStopTimer( &timer );
	//printf("laplacian timer: %.2f ms \n", sdkGetTimerValue( &timer) );

	sdkResetTimer( &timer );
	sdkStartTimer( &timer );

    cv::vector<Point3f> pointVector3dArray;
    cv::vector<Point3i> pointVector2dArray;
    cv::vector<uchar> pointColorsArray;

    //cout << "[main] firing worker threads..." << endl;

    if (state.lastValidPixelRow > 0) {

        // crop image to be only include valid pixels
        rows = state.lastValidPixelRow;
    }


    int rows_round = RoundUp(rows, state.blockSize);

	RunStereoPushbroomStereo( remapped_left, remapped_right, laplacian_left, laplacian_right,
	pointVector3d, pointVector2d, pointColors,
	0, rows_round - 1, state );

		
	sdkStopTimer( &timer );
	//printf("RunStereo timer: %.2f ms \n", sdkGetTimerValue( &timer) );

    int numPoints = 0;
    // compute the required size of our return vector
    // this prevents multiple memory allocations
    numPoints = pointVector3dArray.size();
    
    pointVector3d->reserve(numPoints);
    pointColors->reserve(numPoints);

	pointVector3d->insert( pointVector3d->end(), pointVector3dArray.begin(), pointVector3dArray.end() );

	pointColors->insert( pointColors->end(), pointColorsArray.begin(), pointColorsArray.end() );

	 if (state.show_display)
	{
		pointVector2d->insert( pointVector2d->end(), pointVector2dArray.begin(), pointVector2dArray.end() );
	}
}
bool  PngEncoder::write( const Mat& img, const vector<int>& params )
{
    png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
    png_infop info_ptr = 0;
    FILE* f = 0;
    int y, width = img.cols, height = img.rows;
    int depth = img.depth(), channels = img.channels();
    bool result = false;
    AutoBuffer<uchar*> buffer;

    if( depth != CV_8U && depth != CV_16U )
        return false;

    if( png_ptr )
    {
        info_ptr = png_create_info_struct( png_ptr );

        if( info_ptr )
        {
            if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )
            {
                if( m_buf )
                {
                    png_set_write_fn(png_ptr, this,
                        (png_rw_ptr)writeDataToBuf, (png_flush_ptr)flushBuf);
                }
                else
                {
                    f = fopen( m_filename.c_str(), "wb" );
                    if( f )
                        png_init_io( png_ptr, f );
                }

                int compression_level = 0;
                int compression_strategy = Z_RLE;

                for( size_t i = 0; i < params.size(); i += 2 )
                {
                    if( params[i] == CV_IMWRITE_PNG_COMPRESSION )
                    {
                        compression_level = params[i+1];
                        compression_level = MIN(MAX(compression_level, 0), MAX_MEM_LEVEL);
                    }
                    if( params[i] == CV_IMWRITE_PNG_STRATEGY )
                    {
                        compression_strategy = params[i+1];
                        compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
                    }
                }

                if( m_buf || f )
                {
                    if( compression_level > 0 )
                    {
                        png_set_compression_mem_level( png_ptr, compression_level );
                    }
                    else
                    {
                        // tune parameters for speed
                        // (see http://wiki.linuxquestions.org/wiki/Libpng)
                        png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_SUB);
                        png_set_compression_level(png_ptr, Z_BEST_SPEED);
                    }
                    png_set_compression_strategy(png_ptr, compression_strategy);

                    png_set_IHDR( png_ptr, info_ptr, width, height, depth == CV_8U ? 8 : 16,
                        channels == 1 ? PNG_COLOR_TYPE_GRAY :
                        channels == 3 ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGBA,
                        PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
                        PNG_FILTER_TYPE_DEFAULT );

                    png_write_info( png_ptr, info_ptr );

                    png_set_bgr( png_ptr );
                    if( !isBigEndian() )
                        png_set_swap( png_ptr );

                    buffer.allocate(height);
                    for( y = 0; y < height; y++ )
                        buffer[y] = img.data + y*img.step;

                    png_write_image( png_ptr, buffer );
                    png_write_end( png_ptr, info_ptr );

                    result = true;
                }
            }
        }
    }

    png_destroy_write_struct( &png_ptr, &info_ptr );
    if(f) fclose( f );

    return result;
}
Example #17
0
 Ptr<Formatted> format(const Mat& mtx) const
 {
     char braces[5] = {'\0', '\0', ',', '\0', '\0'};
     return makePtr<FormattedImpl>("{", "}", mtx, &*braces,
         mtx.rows == 1 || !multiline, false, mtx.depth() == CV_64F ? prec64f : prec32f );
 }
int Pretreatment::OTSU(const Mat &grayMat, int &liminal)
{
    int retVal = 0;

	long size = grayMat.rows * grayMat.cols;

	// Check the mat is empty or not.
	if (grayMat.empty())
	{
        printf("The Matrix is Empty!\r\n");
		retVal = -1;
		return (retVal);
	}

	// Accept only char type matrices.
	CV_Assert(grayMat.depth() != sizeof(uchar));

	// Histogram.
	float histogram[256] = {0};

	// Find the Max Value of the image.
	for( int row = 0; row < grayMat.rows; row++ )
	{
		const uchar *imageData = grayMat.ptr<uchar>(row);

		for( int col = 0; col < grayMat.cols; col++ )
		{
			histogram[ int( *imageData++ ) ]++;
		}
	}

	// The sum of the frontground gray values.
	long sum0 = 0;

	// The sum of the background gray values.
	long sum1 = 0;

	// The numbers of frontground.
	long cnt0 = 0;

	// The numbers of background.
	long cnt1 = 0;

	// The proportion of the frontground.
	double w0 = 0;

	// The proportion of the background.
	double w1 = 0;

	// The average gray level in the frontground.
	double u0 = 0;

	// The average gray level in the background.
	double u1 = 0;

	// OTSU.
	double variance = 0;

	//double u = 0;

	double maxVariance = 0;

	for( int i = 1; i < 256; i++)
	{
		sum0 = 0;
		sum1 = 0;
		cnt0 = 0;
		cnt1 = 0;
		w0 = 0;
		w1 = 0;

		for( int j = 0; j < i; j++)
		{
			cnt0 += static_cast<long>(histogram[j]);
			sum0 += static_cast<long>(j * histogram[j]);
		}

		u0 = (double)sum0 / cnt0;
		w0 = (double)cnt0 / size;

		for(int j = i ; j <= 255; j++)
		{
			cnt1 +=static_cast<long>(histogram[j]);
			sum1 += static_cast<long>(j * histogram[j]);
		}

		u1 = (double)sum1 / cnt1;

		w1 = 1 - w0;					// (double)cnt1 / size;

		//u = u0 * w0 + u1 * w1;			// Image average values

		variance =  w0 * w1 *  (u0 - u1) * (u0 - u1);

		if(variance > maxVariance)
		{
			maxVariance = variance;
			liminal = i;
		}
	}
    DEBUG_PRINT("liminal = %d\r\n", liminal);
	return 0;
}
// This function calculates different kinds of LBP descriptors used in most of the recognition
// methods used in this thesis. "face" is the incoming face picture. "hist" returns the result
// from ARLBP filtering where the result is split into hdivs*vdivs parts. The total length of
// histogram is therefore 256*hdivs*vdivs. "sHist" is regular 3x3 LBP filtered image histogram
// cut into only uniform patterns. The length is therefore (uniform patterns)*hdivs*vdivs in order
// of uniform patterns. (index-wise from smallest to largest)
void expression_recognizer::ARLBP(Mat& face, Mat& hist, Mat& sHist, int hdivs, int vdivs) {
	CV_Assert( face.depth() == CV_8U);
	CV_Assert( face.channels() == 1);
	CV_Assert( face.size() == cv::Size(64, 64) );
	CV_Assert( hdivs > 0 );
	CV_Assert( vdivs > 0 );

	// if height and width are set to 1 the function will only calculate simple, full-sized LBP-histograms
	bool smallLBP = ((height!=1) || (width!=1));

	// temporary holder for LBP picture
	Mat tempLBP;
	Mat tempLBPsmall;
	tempLBP.create(face.size(), CV_8UC1);
	if (smallLBP) tempLBPsmall.create(face.size(), CV_8UC1);

	copyMakeBorder(face,face,height,height,width,width,BORDER_REPLICATE,Scalar::all(0));

	// 1st. calculate the ARLBP picture with sliding window. Height and width parameters
	// are set by "setFilterSize" defined in the header file.
	uchar pixel=0;
	for (unsigned int y=0;y<64;y++) {
		 for (unsigned int x=0;x<64;x++) {
			 Mat roi = face.rowRange(Range(y, y+height+height+1)).colRange(Range(x, x+width+width+1));

			 // opencv/core.hpp MIN
			 int bb = MIN(face.rows-y-1,height);
			 int rb = MIN(face.cols-x-1,width);
			 int tb = MIN(y, height);
			 int lb = MIN(x, width); //border sizes

			 uchar center = roi.at<uchar>(height,width); // = face.at<uchar>(y+h,x+w);

			 for (unsigned int dx=1;dx<=width;dx++) {
				 for (unsigned int dy=1;dy<=height;dy++) {
					 area[0] += roi.at<uchar>(height-dy,width-dx);
					 area[2] += roi.at<uchar>(height-dy,width+dx);
					 area[4] += roi.at<uchar>(height+dy,width+dx);
					 area[6] += roi.at<uchar>(height+dy,width-dx);
					 if (dx == 1) { // any dx= 1 < constant < width
						 area[1] += roi.at<uchar>(height-dy,width);
						 area[5] += roi.at<uchar>(height+dy,width);
					 }
				 }
				 area[3] += roi.at<uchar>(height,width+dx);
				 area[7] += roi.at<uchar>(height,width-dx);
			 }
			 area[0] = cvRound((double)area[0] / MAX(1,(tb*lb)));
			 area[1] = cvRound((double)area[1] / MAX(1,tb));
			 area[2] = cvRound((double)area[2] / MAX(1,(rb*tb)));
			 area[3] = cvRound((double)area[3] / MAX(1,rb));
			 area[4] = cvRound((double)area[4] / MAX(1,(rb*bb)));
			 area[5] = cvRound((double)area[5] / MAX(1,bb));
			 area[6] = cvRound((double)area[6] / MAX(1,(lb*bb)));
			 area[7] = cvRound((double)area[7] / MAX(1,lb));

			 pixel=0;
			 for (unsigned int i=0;i<8;i++) {
				if (center <= area[i]) pixel += powtable[i];
				area[i] = 0;
			 }
			 tempLBP.at<uchar>(y,x) = pixel;

			 // calculate 3x3 sized LBP
			 if (smallLBP) {
				 pixel=0;
				 if (center <= roi.at<uchar>(height-1,width-1))	pixel += powtable[0];
				 if (center <= roi.at<uchar>(height-1,width))	pixel += powtable[1];
				 if (center <= roi.at<uchar>(height-1,width+1))	pixel += powtable[2];
				 if (center <= roi.at<uchar>(height,width+1))	pixel += powtable[3];
				 if (center <= roi.at<uchar>(height+1,width+1))	pixel += powtable[4];
				 if (center <= roi.at<uchar>(height+1,width))	pixel += powtable[5];
				 if (center <= roi.at<uchar>(height+1,width-1))	pixel += powtable[6];
				 if (center <= roi.at<uchar>(height,width-1))	pixel += powtable[7];
				 tempLBPsmall.at<uchar>(y,x) = pixel;
			 }
		 } // end x
	} // end y
	// for debugging
	//if (smallLBP) face = tempLBP.clone();
	face = face.colRange(width, 64+width).rowRange(height, 64+height);

	// Then concatenated histogram from the LBP pic.. 64x64 picture is split into 16 16x16 pictures.
	// Histograms are added in top->bottom, left->right order, as usual.

	//convert the small LBP to uniform patterns, every other value is set to 255 for easy removal
	if (smallLBP) LUT(tempLBPsmall, uniforms_8, tempLBPsmall);

	int histSize[] = {256};
	float range[] = {0, 256};
	const float* histRange[] = {range};
	int channels[]={0};
	Mat tempHist, histSmall;
	int hskip = (int)(tempLBP.size().width / hdivs);
	int vskip = (int)(tempLBP.size().height / hdivs);

	for (unsigned int y=0;y<vdivs;y++) {
		for (unsigned int x=0;x<hdivs;x++) {
			// select sub-region as region of interest
			Mat roi=tempLBP.rowRange(vskip*y, vskip*(y+1)).colRange(hskip*x,hskip*(x+1));
			// calculate the histogram of that ROI
			cv::calcHist(&roi, 1, 0, Mat(), tempHist, 1, histSize, histRange, true, false );
			// append to the histogram to be returned.
			hist.push_back(tempHist.clone());

			//and the same for 3x3 LBP
			if (smallLBP) {
				roi = tempLBPsmall.rowRange(vskip*y,vskip*(y+1)).colRange(hskip*x,hskip*(x+1));
				cv::calcHist(&roi, 1, 0, Mat(), histSmall, 1, histSize, histRange, true, false);
				histSmall = histSmall.rowRange(0,cutoffPoint_8); // clip off the non uniforms
				sHist.push_back(histSmall.clone());
			}
		}
	}

	normalize( hist, hist, 0, 1, NORM_MINMAX, -1, noArray());
	if (smallLBP) normalize( sHist, sHist, 0, 1, NORM_MINMAX, -1, noArray());
	// ..and leave without a fuss.
}
Example #20
0
void SelfSimDescriptor::compute(const Mat& img, vector<float>& descriptors, Size winStride,
                                const vector<Point>& locations) const
{
    CV_Assert( img.depth() == CV_8U );

    winStride.width = std::max(winStride.width, 1);
    winStride.height = std::max(winStride.height, 1);
    Size gridSize = getGridSize(img.size(), winStride);
    int i, nwindows = locations.empty() ? gridSize.width*gridSize.height : (int)locations.size();
    int border = largeSize/2 + smallSize/2;
    int fsize = (int)getDescriptorSize();
    vector<float> tempFeature(fsize+1);
    descriptors.resize(fsize*nwindows + 1);
    Mat ssd(largeSize, largeSize, CV_32F), mappingMask;
    computeLogPolarMapping(mappingMask);

#if 0 //def _OPENMP
    int nthreads = cvGetNumThreads();
    #pragma omp parallel for num_threads(nthreads)
#endif
    for( i = 0; i < nwindows; i++ )
    {
        Point pt;
        float* feature0 = &descriptors[fsize*i];
        float* feature = &tempFeature[0];
        int x, y, j;

        if( !locations.empty() )
        {
            pt = locations[i];
            if( pt.x < border || pt.x >= img.cols - border ||
                pt.y < border || pt.y >= img.rows - border )
            {
                for( j = 0; j < fsize; j++ )
                    feature0[j] = 0.f;
                continue;
            }
        }
        else
            pt = Point((i % gridSize.width)*winStride.width + border,
                       (i / gridSize.width)*winStride.height + border);

        SSD(img, pt, ssd);

        // Determine in the local neighborhood the largest difference and use for normalization
        float var_noise = 1000.f;
        for( y = -1; y <= 1 ; y++ )
            for( x = -1 ; x <= 1 ; x++ )
                var_noise = std::max(var_noise, ssd.at<float>(largeSize/2+y, largeSize/2+x));

        for( j = 0; j <= fsize; j++ )
            feature[j] = FLT_MAX;

        // Derive feature vector before exp(-x) computation
        // Idea: for all  x,a >= 0, a=const.   we have:
        //       max [ exp( -x / a) ] = exp ( -min(x) / a )
        // Thus, determine min(ssd) and store in feature[...]
        for( y = 0; y < ssd.rows; y++ )
        {
            const schar *mappingMaskPtr = mappingMask.ptr<schar>(y);
            const float *ssdPtr = ssd.ptr<float>(y);
            for( x = 0 ; x < ssd.cols; x++ )
            {
                int index = mappingMaskPtr[x];
                feature[index] = std::min(feature[index], ssdPtr[x]);
            }
        }

        var_noise = -1.f/var_noise;
        for( j = 0; j < fsize; j++ )
            feature0[j] = feature[j]*var_noise;
        Mat _f(1, fsize, CV_32F, feature0);
        cv::exp(_f, _f);
    }
}
void expression_recognizer::edgeHistogram( Mat& face, Mat& hist ) {
	CV_Assert( face.depth() == CV_8U );
	CV_Assert( face.size() == Size(64, 64));
	CV_Assert( face.channels() == 1 );

	static unsigned int M = 10, N = 8;

	// calculate edge images
	Mat h_edges = Mat(face.size(), CV_16S);
	Mat v_edges = Mat(face.size(), CV_16S);
	Mat h_edges_pow = Mat( face.size(), CV_32F );
	Mat v_edges_pow = Mat( face.size(), CV_32F );

	Mat vertical = Mat(1,3,CV_16S);
	vertical.at<signed short>(0,0) = -1;
	vertical.at<signed short>(0,1) = 0;
	vertical.at<signed short>(0,2) = 1;
	Mat horizontal = vertical.t();

	filter2D( face, v_edges, CV_16S, vertical, Point(-1,-1), 0, BORDER_DEFAULT );
	filter2D( face, h_edges, CV_16S, horizontal, Point(-1,-1), 0, BORDER_DEFAULT );

	Mat magnitude = Mat( face.size(), CV_32F);
	Mat direction = Mat::zeros( face.size(), CV_8U );

	double angle = 0.0f;
	for (unsigned int y=0;y<face.size().height;y++) {
		for (unsigned int x=0;x<face.size().width;x++) {

			if (h_edges.at<signed short>(y,x) != 0) {
				if (v_edges.at<signed short>(y,x) != 0) {
					angle = atan2( v_edges.at<signed short>(y,x), h_edges.at<signed short>(y,x));
				} else angle = 0.0f;
				if (angle < 0.0f) angle += (2*M_PI);

				for (unsigned int n = N; n > 1; n--) {
					if (angle < (2*M_PI*n/N)) direction.at<uchar>(y,x) = n-1;
				}
			} else {
				direction.at<uchar>(y,x) = 0;
			}

			//if (x == 32) LOGD("angle: %u, %f", direction.at<uchar>(y,x), angle);
		}
	}

	v_edges.convertTo(v_edges, CV_32F);
	h_edges.convertTo(h_edges, CV_32F);
	pow( v_edges, 2, v_edges_pow );
	pow( h_edges, 2, h_edges_pow );
	add( h_edges_pow, v_edges_pow, magnitude );
	sqrt(magnitude, magnitude);

	normalize( magnitude, magnitude, 0, M+1-0.001, NORM_MINMAX, CV_8U);
	//normalize( direction, direction, 0, N-0.001, NORM_MINMAX, CV_8U); // so everything rounds down to 0-(N-1)

	/*LOGD("dir mag");
	dump( direction );
	dump( magnitude );*/

	unsigned int bin = 0;
	for (unsigned int y=0;y<face.size().height;y++) {
		for (unsigned int x=0;x<face.size().width;x++) {

			unsigned int m = magnitude.at<uchar>(y,x);
			unsigned int n = direction.at<uchar>(y,x);

			if (m == 0) bin = 0; else bin = ((m-1)*N)+n+1;

			face.at<uchar>(y,x) = bin; //histogram bins 0-81
			//if (x == 32) LOGD("debug: m:%u, n:%u = bin = %u", m, n, bin);
		}
	}

	int histSize[] = {256};
	float range[] = {0, 256};
	const float* histRange[] = {range};
	int channels[]={0};

	// split into 8x8 subimages
	static unsigned int ystep = 8, xstep = 8, xsize = 8, ysize = 8;
	for (unsigned int y = 0;y<ystep;y++) {
		for (unsigned int x = 0;x<xstep;x++) {
			// take histograms
			Mat tempHist = Mat(256,1,CV_8U);

			Mat roi = face.rowRange(y*ysize, (y+1)*ysize).colRange(x*xsize,(x+1)*xsize);
			calcHist(&roi, 1, 0, Mat(), tempHist, 1, histSize, histRange, true, false );

			tempHist = tempHist.rowRange(0, 82);
			hist.push_back(tempHist);
		}
	}

	normalize( hist, hist, 0, 1, NORM_MINMAX);
}
static void test_threshold( const Mat& _src, Mat& _dst,
                            float thresh, float maxval, int thresh_type )
{
    int i, j;
    int depth = _src.depth(), cn = _src.channels();
    int width_n = _src.cols*cn, height = _src.rows;
    int ithresh = cvFloor(thresh), ithresh2, imaxval = cvRound(maxval);
    const uchar* src = _src.data;
    uchar* dst = _dst.data;
    size_t srcstep = _src.step, dststep = _dst.step;
    
    ithresh2 = saturate_cast<uchar>(ithresh);
    imaxval = saturate_cast<uchar>(imaxval);

    assert( depth == CV_8U || depth == CV_32F );
    
    switch( thresh_type )
    {
    case CV_THRESH_BINARY:
        for( i = 0; i < height; i++, src += srcstep, dst += dststep )
        {
            if( depth == CV_8U )
                for( j = 0; j < width_n; j++ )
                    dst[j] = (uchar)(src[j] > ithresh ? imaxval : 0);
            else
                for( j = 0; j < width_n; j++ )
                    ((float*)dst)[j] = ((const float*)src)[j] > thresh ? maxval : 0.f;
        }
        break;
    case CV_THRESH_BINARY_INV:
        for( i = 0; i < height; i++, src += srcstep, dst += dststep )
        {
            if( depth == CV_8U )
                for( j = 0; j < width_n; j++ )
                    dst[j] = (uchar)(src[j] > ithresh ? 0 : imaxval);
            else
                for( j = 0; j < width_n; j++ )
                    ((float*)dst)[j] = ((const float*)src)[j] > thresh ? 0.f : maxval;
        }
        break;
    case CV_THRESH_TRUNC:
        for( i = 0; i < height; i++, src += srcstep, dst += dststep )
        {
            if( depth == CV_8U )
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? ithresh2 : s);
                }
            else
                for( j = 0; j < width_n; j++ )
                {
                    float s = ((const float*)src)[j];
                    ((float*)dst)[j] = s > thresh ? thresh : s;
                }
        }
        break;
    case CV_THRESH_TOZERO:
        for( i = 0; i < height; i++, src += srcstep, dst += dststep )
        {
            if( depth == CV_8U )
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? s : 0);
                }
            else
                for( j = 0; j < width_n; j++ )
                {
                    float s = ((const float*)src)[j];
                    ((float*)dst)[j] = s > thresh ? s : 0.f;
                }
        }
        break;
    case CV_THRESH_TOZERO_INV:
        for( i = 0; i < height; i++, src += srcstep, dst += dststep )
        {
            if( depth == CV_8U )
                for( j = 0; j < width_n; j++ )
                {
                    int s = src[j];
                    dst[j] = (uchar)(s > ithresh ? 0 : s);
                }
            else
                for( j = 0; j < width_n; j++ )
                {
                    float s = ((const float*)src)[j];
                    ((float*)dst)[j] = s > thresh ? 0.f : s;
                }
        }
        break;
    default:
        assert(0);
    }
}
Example #23
0
void solvePlanarPnP(const Mat& objectPoints, const Mat& imagePoints, const Mat& cameraMatrix, const Mat& distCoeffs,
                    Mat& _rvec, Mat& _tvec, bool useExtrinsicGuess)
{
    CV_Assert(objectPoints.depth() == CV_32F && imagePoints.depth() == CV_32F);

    if(useExtrinsicGuess == false)
    {
        solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, _rvec, _tvec, false);
        return;
    }

    Mat rvec, tvec;
    _rvec.convertTo(rvec, CV_32FC1);
    _tvec.convertTo(tvec, CV_32FC1);

    // calculate rotation matrix
    Mat R(3, 3, CV_32FC1);
    Rodrigues(rvec, R);
    CV_Assert(R.type() == CV_32FC1);

    // calculate object normal
    Point3f normal0 = getPlanarObjectNormal(objectPoints);
//    printf("Normal0: %f %f %f\n", normal0.x, normal0.y, normal0.z);

    Mat Normal0(3, 1, CV_32F);
    Normal0.at<Point3f>(0, 0) = normal0;
    Mat Normal = R*Normal0;
    Point3f normal = Normal.at<Point3f>(0, 0);
    normal = normal*(1.0/norm(normal));
    if(normal.z < 0) normal = -normal; // z points from the camera
//    printf("Normal: %f %f %f\n", normal.x, normal.y, normal.z);

    vector<Point3f> rotated_object_points;
    rotated_object_points.resize(objectPoints.rows);
    for(size_t i = 0; i < rotated_object_points.size(); i++)
    {
        Mat p = objectPoints.rowRange(i, i + 1);
        p = p.reshape(1, 3);        
        Mat res = R*p;
        rotated_object_points[i] = res.at<Point3f>(0, 0);
    }

    double alpha, C;
    vector<Point3f> object_points_crf;
    findPlanarObjectPose(rotated_object_points, imagePoints, normal, cameraMatrix, distCoeffs, alpha, C, object_points_crf);

    Mat rp(3, 1, CV_32FC1);
    rp.at<Point3f>(0, 0) = normal*alpha;

    Mat Rp;
    Rodrigues(rp, Rp);

    R = Rp*R;
    Rodrigues(R, rvec);

    Point3f center1 = massCenter(rotated_object_points);
    Mat mcenter1(3, 1, CV_32FC1, &center1);
    Mat mcenter1_alpha = Rp*mcenter1;
    Point3f center1_alpha = mcenter1_alpha.at<Point3f>(0, 0);

    Point3f center2 = massCenter(object_points_crf);
    tvec.at<Point3f>(0, 0) = center2 - center1_alpha;

    Mat mobj;
    objectPoints.copyTo(mobj);
    mobj = mobj.reshape(1);

    CV_Assert(R.type() == CV_32FC1 && mobj.type() == CV_32FC1);
    Mat mrobj = R*mobj.t();
    mrobj = mrobj.t();
    Point3f p1 = mrobj.at<Point3f>(0, 0) + center2 - center1;
    Point3f p2 = object_points_crf[0];
//    printf("point1: %f %f %f\n", p1.x, p1.y, p1.z);
//    printf("point2: %f %f %f\n", p2.x, p2.y, p2.z);

    rvec.convertTo(_rvec, _rvec.depth());
    tvec.convertTo(_tvec, _tvec.depth());
}
Example #24
0
bool  PxMDecoder::readData( Mat& img )
{
    int color = img.channels() > 1;
    uchar* data = img.data;
    int step = (int)img.step;
    PaletteEntry palette[256];
    bool   result = false;
    int  bit_depth = CV_ELEM_SIZE1(m_type)*8;
    int  src_pitch = (m_width*m_bpp*bit_depth/8 + 7)/8;
    int  nch = CV_MAT_CN(m_type);
    int  width3 = m_width*nch;
    int  i, x, y;

    if( m_offset < 0 || !m_strm.isOpened())
        return false;

    AutoBuffer<uchar> _src(src_pitch + 32);
    uchar* src = _src;
    AutoBuffer<uchar> _gray_palette;
    uchar* gray_palette = _gray_palette;

    // create LUT for converting colors
    if( bit_depth == 8 )
    {
        _gray_palette.allocate(m_maxval + 1);
        gray_palette = _gray_palette;

        for( i = 0; i <= m_maxval; i++ )
            gray_palette[i] = (uchar)((i*255/m_maxval)^(m_bpp == 1 ? 255 : 0));

        FillGrayPalette( palette, m_bpp==1 ? 1 : 8 , m_bpp == 1 );
    }

    try
    {
        m_strm.setPos( m_offset );

        switch( m_bpp )
        {
        ////////////////////////// 1 BPP /////////////////////////
        case 1:
            if( !m_binary )
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    for( x = 0; x < m_width; x++ )
                        src[x] = ReadNumber( m_strm, 1 ) != 0;

                    if( color )
                        FillColorRow8( data, src, m_width, palette );
                    else
                        FillGrayRow8( data, src, m_width, gray_palette );
                }
            }
            else
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    m_strm.getBytes( src, src_pitch );

                    if( color )
                        FillColorRow1( data, src, m_width, palette );
                    else
                        FillGrayRow1( data, src, m_width, gray_palette );
                }
            }
            result = true;
            break;

        ////////////////////////// 8 BPP /////////////////////////
        case 8:
        case 24:
            for( y = 0; y < m_height; y++, data += step )
            {
                if( !m_binary )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int code = ReadNumber( m_strm, INT_MAX );
                        if( (unsigned)code > (unsigned)m_maxval ) code = m_maxval;
                        if( bit_depth == 8 )
                            src[x] = gray_palette[code];
                        else
                            ((ushort *)src)[x] = (ushort)code;
                    }
                }
                else
                {
                    m_strm.getBytes( src, src_pitch );
                    if( bit_depth == 16 && !isBigEndian() )
                    {
                        for( x = 0; x < width3; x++ )
                        {
                            uchar v = src[x * 2];
                            src[x * 2] = src[x * 2 + 1];
                            src[x * 2 + 1] = v;
                        }
                    }
                }

                if( img.depth() == CV_8U && bit_depth == 16 )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int v = ((ushort *)src)[x];
                        src[x] = (uchar)(v >> 8);
                    }
                }

                if( m_bpp == 8 ) // image has one channel
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U ) {
                            uchar *d = data, *s = src, *end = src + m_width;
                            for( ; s < end; d += 3, s++)
                                d[0] = d[1] = d[2] = *s;
                        } else {
                            ushort *d = (ushort *)data, *s = (ushort *)src, *end = ((ushort *)src) + m_width;
                            for( ; s < end; s++, d += 3)
                                d[0] = d[1] = d[2] = *s;
                        }
                    }
                    else
                        memcpy( data, src, m_width*(bit_depth/8) );
                }
                else
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U )
                            icvCvt_RGB2BGR_8u_C3R( src, 0, data, 0, cvSize(m_width,1) );
                        else
                            icvCvt_RGB2BGR_16u_C3R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1) );
                    }
                    else if( img.depth() == CV_8U )
                        icvCvt_BGR2Gray_8u_C3C1R( src, 0, data, 0, cvSize(m_width,1), 2 );
                    else
                        icvCvt_BGRA2Gray_16u_CnC1R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1), 3, 2 );
                }
            }
            result = true;
            break;
        default:
            assert(0);
        }
    }
    catch(...)
    {
    }

    return result;
}
Example #25
0
        void l0Smooth(InputArray src, OutputArray dst, double lambda, double kappa)
        {
            Mat S = src.getMat();

            CV_Assert(!S.empty());
            CV_Assert(S.depth() == CV_8U || S.depth() == CV_16U
            || S.depth() == CV_32F || S.depth() == CV_64F);

            dst.create(src.size(), src.type());

            if(S.data == dst.getMat().data){
                S = S.clone();
            }

            if(S.depth() == CV_8U)
            {
                S.convertTo(S, CV_32F, 1/255.0f);
            }
            else if(S.depth() == CV_16U)
            {
                S.convertTo(S, CV_32F, 1/65535.0f);
            }else if(S.depth() == CV_64F){
                S.convertTo(S, CV_32F);
            }

            const double betaMax = 100000;

            // gradient operators in frequency domain
            Mat otfFx, otfFy;
            float kernel[2] = {-1, 1};
            float kernel_inv[2] = {1,-1};
            psf2otf(Mat(1,2,CV_32FC1, kernel_inv), otfFx, S.rows, S.cols);
            psf2otf(Mat(2,1,CV_32FC1, kernel_inv), otfFy, S.rows, S.cols);

            vector<Mat> denomConst;
            Mat tmp = pow2absComplex(otfFx) + pow2absComplex(otfFy);

            for(int i = 0; i < S.channels(); i++){
                denomConst.push_back(tmp);
            }

            // input image in frequency domain
            vector<Mat> numerConst;
            dftMultiChannel(S, numerConst);
            /*********************************
            * solver
            *********************************/
            double beta = 2 * lambda;
            while(beta < betaMax){
                // h, v subproblem
                Mat h, v;

                filter2D(S, h, -1, Mat(1, 2, CV_32FC1, kernel), Point(0, 0),
                0, BORDER_REPLICATE);
                filter2D(S, v, -1, Mat(2, 1, CV_32FC1, kernel), Point(0, 0),
                0, BORDER_REPLICATE);

                Mat hvMag = h.mul(h) + v.mul(v);

                Mat mask;
                if(S.channels() == 1)
                {
                    threshold(hvMag, mask, lambda/beta, 1, THRESH_BINARY);
                }
                else if(S.channels() > 1)
                {
                    Mat *channels = new Mat[S.channels()];
                    split(hvMag, channels);
                    hvMag = channels[0];

                    for(int i = 1; i < S.channels(); i++){
                        hvMag = hvMag + channels[i];
                    }

                    threshold(hvMag, mask, lambda/beta, 1, THRESH_BINARY);

                    Mat in[] = {mask, mask, mask};
                    merge(in, 3, mask);

                    delete[] channels;
                }

                h = h.mul(mask);
                v = v.mul(mask);

                // S subproblem
                vector<Mat> denom(S.channels());
                for(int i = 0; i < S.channels(); i++){
                    denom[i] = beta * denomConst[i] + 1;
                }

                Mat hGrad, vGrad;
                filter2D(h, hGrad, -1, Mat(1, 2, CV_32FC1, kernel_inv));
                filter2D(v, vGrad, -1, Mat(2, 1, CV_32FC1, kernel_inv));

                vector<Mat> hvGradFreq;
                dftMultiChannel(hGrad+vGrad, hvGradFreq);

                vector<Mat> numer(S.channels());
                for(int i = 0; i < S.channels(); i++){
                    numer[i] = numerConst[i] + hvGradFreq[i] * beta;
                }

                vector<Mat> sFreq(S.channels());
                divComplexByRealMultiChannel(numer, denom, sFreq);

                idftMultiChannel(sFreq, S);

                beta = beta * kappa;
            }

            Mat D = dst.getMat();
            if(D.depth() == CV_8U)
            {
                S.convertTo(D, CV_8U, 255);
            }
            else if(D.depth() == CV_16U)
            {
                S.convertTo(D, CV_16U, 65535);
            }else if(D.depth() == CV_64F){
                S.convertTo(D, CV_64F);
            }else{
                S.copyTo(D);
            }
        }
Example #26
0
bool  PxMEncoder::write( const Mat& img, const std::vector<int>& params )
{
    bool isBinary = true;

    int  width = img.cols, height = img.rows;
    int  _channels = img.channels(), depth = (int)img.elemSize1()*8;
    int  channels = _channels > 1 ? 3 : 1;
    int  fileStep = width*(int)img.elemSize();
    int  x, y;

    for( size_t i = 0; i < params.size(); i += 2 )
        if( params[i] == CV_IMWRITE_PXM_BINARY )
            isBinary = params[i+1] != 0;

    WLByteStream strm;

    if( m_buf )
    {
        if( !strm.open(*m_buf) )
            return false;
        int t = CV_MAKETYPE(img.depth(), channels);
        m_buf->reserve( alignSize(256 + (isBinary ? fileStep*height :
            ((t == CV_8UC1 ? 4 : t == CV_8UC3 ? 4*3+2 :
            t == CV_16UC1 ? 6 : 6*3+2)*width+1)*height), 256));
    }
    else if( !strm.open(m_filename) )
        return false;

    int  lineLength;
    int  bufferSize = 128; // buffer that should fit a header

    if( isBinary )
        lineLength = width * (int)img.elemSize();
    else
        lineLength = (6 * channels + (channels > 1 ? 2 : 0)) * width + 32;

    if( bufferSize < lineLength )
        bufferSize = lineLength;

    AutoBuffer<char> _buffer(bufferSize);
    char* buffer = _buffer;

    // write header;
    sprintf( buffer, "P%c\n%d %d\n%d\n",
             '2' + (channels > 1 ? 1 : 0) + (isBinary ? 3 : 0),
             width, height, (1 << depth) - 1 );

    strm.putBytes( buffer, (int)strlen(buffer) );

    for( y = 0; y < height; y++ )
    {
        uchar* data = img.data + img.step*y;
        if( isBinary )
        {
            if( _channels == 3 )
            {
                if( depth == 8 )
                    icvCvt_BGR2RGB_8u_C3R( (uchar*)data, 0,
                        (uchar*)buffer, 0, cvSize(width,1) );
                else
                    icvCvt_BGR2RGB_16u_C3R( (ushort*)data, 0,
                        (ushort*)buffer, 0, cvSize(width,1) );
            }

            // swap endianness if necessary
            if( depth == 16 && !isBigEndian() )
            {
                if( _channels == 1 )
                    memcpy( buffer, data, fileStep );
                for( x = 0; x < width*channels*2; x += 2 )
                {
                    uchar v = buffer[x];
                    buffer[x] = buffer[x + 1];
                    buffer[x + 1] = v;
                }
            }
            strm.putBytes( (channels > 1 || depth > 8) ? buffer : (char*)data, fileStep );
        }
        else
        {
            char* ptr = buffer;

            if( channels > 1 )
            {
                if( depth == 8 )
                {
                    for( x = 0; x < width*channels; x += channels )
                    {
                        sprintf( ptr, "% 4d", data[x + 2] );
                        ptr += 4;
                        sprintf( ptr, "% 4d", data[x + 1] );
                        ptr += 4;
                        sprintf( ptr, "% 4d", data[x] );
                        ptr += 4;
                        *ptr++ = ' ';
                        *ptr++ = ' ';
                    }
                }
                else
                {
                    for( x = 0; x < width*channels; x += channels )
                    {
                        sprintf( ptr, "% 6d", ((ushort *)data)[x + 2] );
                        ptr += 6;
                        sprintf( ptr, "% 6d", ((ushort *)data)[x + 1] );
                        ptr += 6;
                        sprintf( ptr, "% 6d", ((ushort *)data)[x] );
                        ptr += 6;
                        *ptr++ = ' ';
                        *ptr++ = ' ';
                    }
                }
            }
            else
            {
                if( depth == 8 )
                {
                    for( x = 0; x < width; x++ )
                    {
                        sprintf( ptr, "% 4d", data[x] );
                        ptr += 4;
                    }
                }
                else
                {
                    for( x = 0; x < width; x++ )
                    {
                        sprintf( ptr, "% 6d", ((ushort *)data)[x] );
                        ptr += 6;
                    }
                }
            }

            *ptr++ = '\n';

            strm.putBytes( buffer, (int)(ptr - buffer) );
        }
    }

    strm.close();
    return true;
}
Example #27
0
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,
                Size corrsize, int ctype,
                Point anchor, double delta, int borderType )
{
    const double blockScale = 4.5;
    const int minBlockSize = 256;
    std::vector<uchar> buf;

    Mat templ = _templ;
    int depth = img.depth(), cn = img.channels();
    int tdepth = templ.depth(), tcn = templ.channels();
    int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);

    CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );

    if( depth != tdepth && tdepth != std::max(CV_32F, depth) )
    {
        _templ.convertTo(templ, std::max(CV_32F, depth));
        tdepth = templ.depth();
    }

    CV_Assert( depth == tdepth || tdepth == CV_32F);
    CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&
               corrsize.width <= img.cols + templ.cols - 1 );

    CV_Assert( ccn == 1 || delta == 0 );

    corr.create(corrsize, ctype);

    int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);
    Size blocksize, dftsize;

    blocksize.width = cvRound(templ.cols*blockScale);
    blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );
    blocksize.width = std::min( blocksize.width, corr.cols );
    blocksize.height = cvRound(templ.rows*blockScale);
    blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );
    blocksize.height = std::min( blocksize.height, corr.rows );

    dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);
    dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ.cols + 1;
    blocksize.width = MIN( blocksize.width, corr.cols );
    blocksize.height = dftsize.height - templ.rows + 1;
    blocksize.height = MIN( blocksize.height, corr.rows );

    Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );
    Mat dftImg( dftsize, maxDepth );

    int i, k, bufSize = 0;
    if( tcn > 1 && tdepth != maxDepth )
        bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);

    if( cn > 1 && depth != maxDepth )
        bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*
            (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));

    if( (ccn > 1 || cn > 1) && cdepth != maxDepth )
        bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));

    buf.resize(bufSize);

    // compute DFT of each template plane
    for( k = 0; k < tcn; k++ )
    {
        int yofs = k*dftsize.height;
        Mat src = templ;
        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));

        if( tcn > 1 )
        {
            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
            int pairs[] = {k, 0};
            mixChannels(&templ, 1, &src, 1, pairs, 1);
        }

        if( dst1.data != src.data )
            src.convertTo(dst1, dst1.depth());

        if( dst.cols > templ.cols )
        {
            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
            part = Scalar::all(0);
        }
        dft(dst, dst, 0, templ.rows);
    }

    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
    int tileCount = tileCountX * tileCountY;

    Size wholeSize = img.size();
    Point roiofs(0,0);
    Mat img0 = img;

    if( !(borderType & BORDER_ISOLATED) )
    {
        img.locateROI(wholeSize, roiofs);
        img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y,
                       roiofs.x, wholeSize.width-img.cols-roiofs.x);
    }
    borderType |= BORDER_ISOLATED;

    // calculate correlation by blocks
    for( i = 0; i < tileCount; i++ )
    {
        int x = (i%tileCountX)*blocksize.width;
        int y = (i/tileCountX)*blocksize.height;

        Size bsz(std::min(blocksize.width, corr.cols - x),
                 std::min(blocksize.height, corr.rows - y));
        Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1);
        int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y;
        int x1 = std::max(0, x0), y1 = std::max(0, y0);
        int x2 = std::min(img0.cols, x0 + dsz.width);
        int y2 = std::min(img0.rows, y0 + dsz.height);
        Mat src0(img0, Range(y1, y2), Range(x1, x2));
        Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height));
        Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1));
        Mat cdst(corr, Rect(x, y, bsz.width, bsz.height));

        for( k = 0; k < cn; k++ )
        {
            Mat src = src0;
            dftImg = Scalar::all(0);

            if( cn > 1 )
            {
                src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]);
                int pairs[] = {k, 0};
                mixChannels(&src0, 1, &src, 1, pairs, 1);
            }

            if( dst1.data != src.data )
                src.convertTo(dst1, dst1.depth());

            if( x2 - x1 < dsz.width || y2 - y1 < dsz.height )
                copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0),
                               x1-x0, dst.cols-dst1.cols-(x1-x0), borderType);

            dft( dftImg, dftImg, 0, dsz.height );
            Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0,
                                         dftsize.width, dftsize.height));
            mulSpectrums(dftImg, dftTempl1, dftImg, 0, true);
            dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height );

            src = dftImg(Rect(0, 0, bsz.width, bsz.height));

            if( ccn > 1 )
            {
                if( cdepth != maxDepth )
                {
                    Mat plane(bsz, cdepth, &buf[0]);
                    src.convertTo(plane, cdepth, 1, delta);
                    src = plane;
                }
                int pairs[] = {0, k};
                mixChannels(&src, 1, &cdst, 1, pairs, 1);
            }
            else
            {
                if( k == 0 )
                    src.convertTo(cdst, cdepth, 1, delta);
                else
                {
                    if( maxDepth != cdepth )
                    {
                        Mat plane(bsz, cdepth, &buf[0]);
                        src.convertTo(plane, cdepth);
                        src = plane;
                    }
                    add(src, cdst, cdst);
                }
            }
        }
    }
}
void conv2(const Mat &img, const Mat &kernel, Mat& dest) {
	Point anchor(kernel.cols - kernel.cols / 2 - 1, kernel.rows - kernel.rows / 2 - 1);
	filter2D(img, dest, img.depth(), kernel, anchor);
}
Example #29
0
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
    (void) arr;
    (void) indices;
    (void) mode;
    (void) color;
    throw_no_ogl();
#else
    if (!arr.empty() && !indices.empty())
    {
        gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);

        arr.bind();

        const int kind = indices.kind();

        switch (kind)
        {
        case _InputArray::OPENGL_BUFFER :
            {
                ogl::Buffer buf = indices.getOGlBuffer();

                const int depth = buf.depth();

                CV_Assert( buf.channels() == 1 );
                CV_Assert( depth <= CV_32S );

                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;

                buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                gl::DrawElements(mode, buf.size().area(), type, 0);

                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                break;
            }

        default:
            {
                Mat mat = indices.getMat();

                const int depth = mat.depth();

                CV_Assert( mat.channels() == 1 );
                CV_Assert( depth <= CV_32S );
                CV_Assert( mat.isContinuous() );

                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;

                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                gl::DrawElements(mode, mat.size().area(), type, mat.data);
            }
        }
    }
#endif
}
DLLEXPORT MyMat* LoadFromCIFAR10Test(string path)
{
	MyMat* imgs = new MyMat[10000];
	MyMat* imgs200 = new MyMat[200];
	int id[10000];
	int indexOfType[10][1000];
	int countOfType[10] = {0};
	int planeId[3]={2,1,0};
	int width=32;
	int height=32;
	int type=CV_8UC3;
	int imgSize=width*height;
	int nChannels=3;
	int imgDataSize = 1+imgSize*nChannels;
	int n = 0;
	Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
	Mat img;
	Mat rs;
	ifstream fin(path + "test_batch.bin", ios::binary);
	//使用构造函数创建矩阵
	while(fin.eof() != true)
	{
		if(n >= 10000)
			break;
		img.create(width, height, type);
		rs.create(64, 64, type);
		imgs[n].create(64, 64, type);
		uchar *buf=(uchar*)calloc(imgDataSize,sizeof(uchar));
		fin.read((char *)buf,(imgDataSize)*sizeof(uchar));
		imgs[n].type = buf[0];
		indexOfType[buf[0]][countOfType[buf[0]]++] = n;
		imgs[n].id = n;
		for(int i = 0; i < height; i++) 
		{
			for(int j = 0; j < width; j++ ) {
				uchar* dataIJ = img.data + i * img.step + j * img.elemSize();// img.at(i, j)
				for(int k = 0; k < nChannels; k++)
					dataIJ[k] = buf[1 + planeId[k] * imgSize + i * width + j];
			}
		}
		id[n] = n;
		//拉普拉斯锐化
		resize(img, rs, rs.size(), 0, 0, INTER_CUBIC);
		filter2D(rs, imgs[n], rs.depth(), kernel );
		n++;
		free(buf);
	}
	fin.close();
	srand(time(NULL));
	int modnum = 10000;
	for(int i = 0; i < 200; i++)
	{
		//int r = rand() % modnum;
		int j = id[i];
		imgs200[i] = imgs[j];
		//id[r] = id[modnum - 1];
		modnum--;
	}
	delete[] imgs;
	return imgs200;
}